\documentclass{beamer}
\usepackage{etex}
\usepackage{beamerthemesplit}
\setbeamertemplate{navigation symbols}{} 
\setbeamertemplate{headline}{}

\usepackage{verbatim,ulem}
\usepackage{ wasysym }

\usepackage{tikz}
\definecolor{steelblue}{RGB}{70,130,180}
\tikzset{vertex/.style={circle,draw,fill=steelblue,inner sep=0pt,minimum size=1mm}}
\tikzset{edge/.style={color=steelblue,line width=.5mm}}

\usepackage[all,color]{xy}

\useoutertheme{infolines} 

\input{staeckermacros}


\newcommand{\ul}{\underline}
\newcommand{\ol}{\overline}
\newcommand{\E}{\mathcal E}

\newcommand{\fpause}{\pause\vfill}

\DeclareMathOperator{\Prob}{Prob}

%vertically aligned graphic
\newcommand{\vg}[2]{\vcenter{\hbox{\includegraphics[#1]{#2}}}}

\title[]{The expected difference between $N(f)$ and $\MF(f)$}
\author[Staecker]{P. Christopher Staecker\\ joint with Seung Won Kim}
\date[Nielsen 2016]{Nielsen Theory and Related Topics, July 2016\\ Rio Claro SP, Brazil}
\institute[Fairfield U.]{Fairfield University}

\begin{document}

\frame{\titlepage}

%\frame{
%\[ \text{\color{red}{ATEN\c{C}AO}} \]
%This talk involves some non-integers!
%\fpause
%Some are very small.
%}

\frame{
\begin{thm}[Wecken, 1940s]
If $X$ is a manifold of dimension not equal to 2, then $N(f) = \MF(f)$. 
\end{thm}
\fpause
What about surfaces?
\fpause
A quotation from a famous mathematician:
\fpause
Ulrich Koschorke, 2016: \pause ``in dimension 2 the Wecken theorem is \emph{very badly wrong}''.
\fpause
We all agree on the ``wrong''.\pause\ This talk is about the ``very badly''.
}

\frame{
We want to try to measure how badly wrong the situation is on surfaces with boundary.
\fpause
Kelly (1987) showed that $\MF(f) - N(f)$ can be arbitrarily large.
\fpause
Here is the example:
\[ \phi: \begin{array}{rcl} 
a & \mapsto & (bab^{-1}a^{-1})^k ba \\
b & \mapsto & 1
\end{array}
\]
\fpause
Kelly computes $N(f) = 0$ and $\MF(f) = 2k$.
}

\frame{{The Big Question}
(Always on surfaces with boundary)
\fpause
When $N(f) \neq \MF(f)$,\pause\ or even when $N(f) \ll \MF(f)$,\pause\ is this behavior \emph{typical} or  \emph{exceptional}?
\fpause
If I choose $f$ ``at random'' on a surface with boundary, should I expect it to be Wecken?
\fpause
When $f$ is chosen at random, what is our statistical expectation for the quantity $\MF(f)-N(f)$?
}

\frame{{Randomness in free groups}
We always measure this randomness algebraically in terms of homotopy classes of maps.
\fpause
Let $X_n$ be a bouquet of $n$ circles, so $\pi_1(X_n)$ is a free group on $n$ generators, we write:
\[ \pi_1(X_n) =  G = \langle a_1, \dots, a_n \rangle. \]
\fpause
We can measure the size of a subset $S \subset G$ using the \emph{asymptotic density}.
}

\frame{
For a natural number $p$, let $G_p$ be the set of elements of $G$ with word length at most $p$.
\fpause
\[ D_p(S) = \frac{\#(S \cap G_p)}{\#G_p} \]
This is the probability that a random word of length $p$ is an element of $S$.
\fpause
The \emph{asymptotic density} of $S$ is 
\[ D(S) = \lim_{p\to\infty} D_p(S). \]
}

\frame{
\[ D(S) = \lim_{p\to\infty} D_p(S). \]
\fpause
This limit may not exist, so we also talk about these:
\[ \ul D(S) = \liminf_{p\to\infty} D_p(S), \quad \ol D(S) = \limsup_{p\to\infty} D_p(S). \]
\fpause
These always exist, since $0 \le D_p(S) \le 1$.
\fpause
When $D(S)=1$ we say $S$ is \emph{generic}.
\fpause
These densities represent that probability that a random ``very long'' element of $G$ is in $S$.
}

\frame{
This is how we measure probabilities for properties of random elements. 
\fpause
What about random homomorphisms $\phi:G\to G$? 
\fpause
Such a homomorphism, say on $G = \langle a,b,c\rangle$, looks something like this:
\[ \phi: \begin{array}{lcll}
a &\mapsto & bbac\\
b &\mapsto & ccbca^{-1}a^{-1} \\
c &\mapsto & b^{-1}acca^{-1}
\end{array} \]
\pause
A homomorphism $\phi:G\to G$ is equivalent to the $n$-tuple of \emph{image words}.
\fpause
So combinatorially, the set of endomorphisms $G\to G$ is $G^n$.
}

\frame{
When $S\subset G^n$ is a set of homomorphisms, we define
\[ D(S) = \lim_{p \to \infty} \frac{\#(S\cap G_p^n)}{\#G_p^n} \]
\fpause
And also $\ul D(S)$ and $\ol D(S)$ like before.
\fpause
These numbers represent the probability that a random ``long'' homomorphism is in $S$.
\fpause
If $R$ is the set of homomorphisms with Wagner's \emph{remant} condition, Brown \& Wagner (1999) showed that $D(R) =1$.
}

\frame{
We think of a set $S\subset G^n$ as a collection of induced homomorphisms from maps on bouquets of circles.
\fpause
We'll write $N(\phi)$ as the Nielsen number of any map whose induced homomorphism is $\phi$. \pause Similarly $\MF(\phi)$.
\fpause
A simple result: \pause let $\mathcal N_1 \subset G^n$ be the set of $\phi$ with $N(\phi) \ge 1$. 
\fpause
Then $D(\mathcal N_1)$ represents the probability that a random map has Nielsen number at least 1.
\fpause
Not hard to show that $D(\mathcal N_1) = 1$.
\fpause
So ``almost all'' maps have Nielsen number at least 1.
}

\frame{
In Kim \& S. (2014), we showed that if $\mathcal N_k$ is the set with $N(\phi) \ge k$, \pause then
\[ D(\mathcal N_k) = 1 \]
for \emph{any} $k$. 
\fpause
This means that for any fixed $k$, a random homomorphism will have $N(\phi) \ge k$ with probability 1.
\fpause
So ``almost all'' maps have Nielsen number ``arbitrarily large''.
}

\frame{
Another way to discuss this is in terms of the \emph{expectation} of the Nielsen number.
\fpause
Probabilistically we should say that the expected value of the Nielsen number is $\infty$.
\fpause
Generally, if $f:G^n \to \R$ is any real valued function of homomorphisms, define the \emph{expectation} of $f$ as:
\[ \E(f) = \lim_{p\to\infty} \frac{1}{\#G_p^n} \sum_{\phi \in G_p^n} f(\phi). \]
And similarly $\ul \E(f)$ and $\ol \E(f)$.
}

\frame{
So $\E(N) =\infty$. 
\fpause
And also $\E(\MF) = \infty$.
\fpause
{\color{red} I think $\E(L) = 0$\pause, I don't know about $\E(|L|)$.}
}

\frame{{Goals}
\pause
What is the expectation of $\MF(f) - N(f)$?
\fpause
We want to compute $\E(\MF - N)$. 
\fpause
We also will consider $\E(N/\MF)$.
\fpause
The ``best'' result would be that $\E(\MF-N) = 0$ and $\E(N/\MF) = 1$. 
}

\frame{
(footnote)
Actually $\E(\MF-N)=0$ implies $\E(N/\MF)=1$:
\fpause
If we expect $\MF(f)-N(f) = 0$, then we expect $\log \MF(f) = \log N(f)$.
\fpause
So we expect $\log \frac{N(f)}{\MF(f)} = 0$.
\fpause
So we expect $\frac{N(f)}{\MF(f)} = 1$.
}

\frame{
So $\E(\MF-N)=0$ is the stronger result.
\fpause
So we'll try to show that $\E(\MF-N) = 0$.
\fpause
If that's too hard, we will try to show $\E(N/\MF) = 1$.
}

\frame{{Past work}
Some past work dealt with the simpler question:
\fpause
What is the probability that $\MF(f) = N(f)$?
\fpause
Let $W_n$ be the set of $\phi$ with $\MF(\phi) = N(\phi)$, so we want to compute $D(W_n)$.
\fpause
It turns out that $D(W_n)$ depends on $n$ (\# of generators).
}

\frame{
Our main tool for telling when $f$ is Wecken or not is \emph{Wagner's algorithm}.
\fpause
Briefly: \pause\ Look at the homomorphism:
\[ \phi: \begin{array}{lcll}
a &\mapsto &bbac &= b\ul{ba}c\\
b &\mapsto &ccbca^{-1}a^{-1} &= c\ul{cbca^{-1}}a^{-1} \\
c &\mapsto & bacca^{-1} & = b\ul{acc}a^{-1}
\end{array} \]

\fpause
The \emph{remnant} is the middle subword in each position that doesn't cancel in any product.
}

\frame{
\begin{thm*}[Wagner 1999] If $\phi$ has nontrivial remnant in each generator, then it is the induced homomorphism for a map $f:X_n\to X_n$ with these properties:
\begin{itemize}
\item $f$ has a fixed point for each occurence of $a_i^{\pm 1}$ inside $\phi(a_i)$
\item The fixed point classes are determined by equalities among the ``Wagner tails''
\end{itemize}
\end{thm*}
}

\frame{
For example:
\[ \phi: \begin{array}{lcll}
a &\mapsto & b\ul{ba}c\\
b &\mapsto & c\ul{cbca^{-1}}a^{-1} \\
c &\mapsto & b\ul{acc}a^{-1}
\end{array} \]

\fpause
Inside $\phi(a)$ we have one fixed point with Wagner tails $(b^2,c)$.
\fpause
Inside $\phi(b)$ we have one fixed point with Wagner tails $(c^2, ca^{-2})$
\fpause
Inside $\phi(c)$ we have two fixed points with Wagner tails $(ba, ca^{-1})$ and $(bac,a^{-1})$.
\fpause
We always get an extra fixed point with Wagner tail $(1,1)$.
\fpause
There are no equalities among these Wagner tails, so none of these are in the same classes, so $N(f) = 5$. 
\fpause
Also $\MF(f)=5$, since we have 5 fpcs of 1 point each.
}

\frame{
Simple fact: \pause If there are no equalities between the Wagner tails, \pause then the number of fixed points in Wagner's algorithm equals $N(f)$,\pause\ and also equals $\MF(f)$.
\fpause
\begin{lem}
If there are no equalities between the Wagner tails, then $\phi$ is Wecken.
\end{lem}
\fpause
This is something we should be able to count combinatorially.
}

\frame{
This led to:

\[\vg{width=200px}{china}\]

\fpause
\begin{thm}[Brimley, Griisser, Miller, S. 2012]
$\ul D(W_n) > 0$ for all $n$, increasing in $n$.
\end{thm}
}

\frame{
$D(W_n) > 0$ for all $n$, increasing in $n$.
\fpause
So for each $n$, there is at least \emph{some} positive probability that a random map is Wecken.
\fpause
This was improved in
\begin{thm}[Kim, S. 2016] 
$\lim_{n\to\infty} \ul D(W_n) = 1.$
\end{thm}
\fpause
This means that when $n$ is very large, almost every map is Wecken.
\fpause
We stated as an open question: Is $D(W_n) = 1$ for each $n$?
\fpause
In the case $n=2$, the answer is \emph{no}.
}

\frame{
\begin{thm*}
$\ol D(W_2) < .99997$
\end{thm*}
\fpause
This bound could be improved a bit, but probably not much.
\fpause
The proof uses some work from Wagner's \emph{other} paper ``Classes of Wecken maps''
\fpause
She examines Kelly's class of homomorphisms $T_3$ and proves that they are not Wecken.
\fpause
We show that $\ul D(T_3) > .00003$ which gives the bound above.
}

\frame{
\begin{thm*}
$\ol D(W_2) < .99997$
\end{thm*}
The argument relies fundamentally on Kelly's techniques for $\MF(f)$ on the pants surface\pause, so we do not expect the proof to generalize to $n>2$.
\fpause
\begin{conj*}
For each $n\ge 2$, $\ol D(W_n) < 1$
\end{conj*}
\fpause
This would mean that $0<D(W_n)<1$, which is interesting.
\fpause
For $n=2$ the best bounds we have are:
\[ .5 < \ul D(W_2) \le \ol D(W_2) < .99997 \]
}

\frame{{Expectations}
Let's discuss $\E(\MF - N)$ and $\E(N / \MF)$. 
\fpause
These probably will depend on $n$, so let's write $\E_n(\MF-N)$ and $\E_n(N/\MF)$.
\fpause
The ``best'' result would be $\E_n(\MF-N) = 0$ and $\E_n(N/\MF) =1$ for all $n$.
\fpause
But the fact that $D(W_2)<1$ easily shows that $\E_2(\MF-N) > 0$.
}

\frame{
\begin{thm*} $\ul \E_2(\MF-N) > 0 $
\end{thm*}
\fpause
\begin{proof}
\begin{align*} 
\ul \E_2(\MF-N) &= \liminf_{p\to\infty} \frac{1}{\#G_p^2} \sum_{\phi\in G_p^2} MF(\phi) - N(\phi)  \\ 
\onslide<3->{&\ge \liminf_{p\to\infty} \frac{1}{\#G_p^2} \sum_{\phi\in W^c_2 \cap G_p^2} MF(\phi) - N(\phi) \\} 
\onslide<4->{&\ge \liminf_{p\to\infty} \frac{1}{\#G_p^2}  \sum_{\phi\in W^c_2 \cap G_p^2} 1 }
\onslide<5->{=  \liminf_{p\to\infty} \frac{1}{\#G_p^2}  \#( W^c_2 \cap G_p^2) \\ }
\onslide<6->{&= \ul D( W^c_2)}
\onslide<7->{  > .00003  > 0}
\end{align*}
\end{proof}
}

\frame{
So we expect $\ul \E_n(\MF-N) > 0$ for each $n$, but these are probably small.
\fpause
We have tried many times to prove that these are finite, but the combinatorics is very difficult.
\fpause
So we will instead look at $\E_n(N/\MF)$. \pause (We want this to be 1.)
}

\frame{{Approach}
Remember:
\begin{lem}
If there are no equalities between the Wagner tails, then $\phi$ is Wecken.
\end{lem}

\fpause

To measure $\E_n(N/\MF)$, we need to consider when there \emph{are} some equalities between the Wagner tails.

\fpause

We need some condition which lets us predict when a Wagner tail is repeated
}

\frame{
A necessary condition for repeated Wagner tails:
\fpause
\begin{lem}
A repeated Wagner tail must be completely outside of the remnant subword.
\end{lem}
\fpause
\begin{proof}
If $w$ is a repeated Wagner tail, this means that we have something like 
\[ \phi(a) = waS, \quad \phi(b) = wbT. \] \pause
and since $w$ will cancel in the product $\phi(a^{-1})\phi(b)$, it is outside of the remnant.
\end{proof}
}

\frame{
So the Wagner tails outside the remnant may combine (cancel) with others. \pause
The ones inside the remnant can never cancel with others.
\fpause
This means that $N(f)$ and $\MF(f)$ are \emph{both} bounded between \# Wagner tails inside the remnant, \pause and \# Wagner tails.
\fpause
\begin{align*}
\#\text{WTs inside remnant} \le N(f) &\le \#\text{WTs} \\
\#\text{WTs inside remnant} \le \MF(f) &\le \#\text{WTs} 
\end{align*}
\fpause
This means
\[ \frac{N(f)}{\MF(f)} \ge \frac{\#\text{WTs in remnant}}{\#\text{WTs}} \]
\fpause
The right side above is the proportion of the WTs which appear inside the remnant.
}

\frame{
\[ \frac{N(f)}{\MF(f)} \ge \frac{\#\text{WTs in remnant}}{\#\text{WTs}} \]
We can use this to estimate $\frac{N(f)}{\MF(f)}$ without looking very specifically at our map.
\fpause
\[ \phi: \begin{array}{lcll}
a &\mapsto &= b\ul{b{\color{red}a}}c\\
b &\mapsto &= c\ul{c{\color{red}b}ca^{-1}}a^{-1} \\
c &\mapsto & = b\ul{a{\color{red}cc}}a^{-1}
\end{array} \]
\pause
Here we have 4 WTs inside the remnant, and 5 total WTs, so 
\[ \frac{N(f)}{\MF(f)} \ge \frac45 \]
}

\frame{
\[ \phi: \begin{array}{lcll}
a &\mapsto &= b\ul{b{\color{red}a}}c\\
b &\mapsto &= c\ul{c{\color{red}b}ca^{-1}}a^{-1} \\
c &\mapsto & = b\ul{a{\color{red}cc}}a^{-1}
\end{array} \]

\vfill
To get 
\[ \frac{\#\text{WTs in remnant}}{\#\text{WTs}} \approx 1,\] 
we want the remnant  subwords to be as large as possible.
}


\frame{
Everybody knows the remnant property is generic.
\fpause
Based on work in Arzhantseva (2000), we can derive stronger results.
\fpause
For any $r <1$, let $R_r$ be the set of $\phi$ with 
\[ |\Rem_\phi a_i| > r |\phi(a_i)| \]
for each i. \pause These are the maps with ``remnant ratio $r$''.
\fpause
\begin{thm}(S. 2010) 
$D(R_r)=1$ for any $r$.
\end{thm}
}

\frame{
So we can expect that that for any $r$, the remnant occupies proportion $r$ of the image words.

\fpause

We care about the proportion of the WTs inside the remnant.

\fpause

We should also expect proportion $r$ of the WTs to be inside the remnant, provided that the WTs are ``uniformly distributed'' in the image words.

\fpause
We need to make sure that the WTs do not cluster together, and have no bias in their locations.
}

\frame{
We get a WT for each appearance of $a_i^{\pm 1}$ inside $\phi(a_i)$. 
\fpause
If $\phi(a_i)$ is a long random word, we do indeed expect the letters $a_i^{\pm 1}$ to be ``uniformly distributed'' inside this word.
\fpause
In particular, if $w$ is a random word of length $p$, we  expect roughly $\frac pn$ occurences of $a_i^{\pm 1}$, distributed evenly throughout the word.
}

\frame{
This is a little tricky, and we'll use some probability. \pause Let $\#_{a_i}(w)$ be the number of $a_i^{\pm 1}$ in $w$.
\fpause
Choosing $w$ at random, we expect $\#_{a_i} (w) = \frac 1n |w|$.
\fpause
Here is the result:
\begin{lem}
Let $S_\epsilon$ be the set of words $w$ with 
\[ \#_{a_i}(w) > \frac{1-\epsilon}{n}|w|. \] 
for each $i$. Then $D(S_\epsilon) = 1$ for any $\epsilon > 0$.
\end{lem}
}

\frame{
This is an interesting result in its own right, but as far as I know is new.
\fpause
The proof uses some basic probability theory. \pause We count the complimentary set.
\fpause
When $|w|=p$, the word has $k$ occurrences of $a_i^{\pm 1}$ with probability:
\[ {p \choose k} \left(\frac1n\right)^k \left(\frac{n-1}{n}\right)^{p-k} \]
\fpause
The probability that $w$ has less than or equal to $k$ occurences is
\[ \sum_{j=0}^k {p \choose j} \left(\frac1n\right)^j \left(\frac{n-1}{n}\right)^{p-j} \]
}

\frame{
\[ \sum_{j=0}^k {p \choose j} \left(\frac1n\right)^j \left(\frac{n-1}{n}\right)^{p-j} \]
This is the tail-end of the binomial distribution. 
\fpause
Theorem from probability: when $k < \frac pn$, we have an exponential bound
\[ \sum_{j=0}^k {p \choose j} \left(\frac1n\right)^j \left(\frac{n-1}{n}\right)^{p-j} \le \exp(-Ck) \]
\fpause
So:
\[ \Prob(\#_{a_i}(w) \le k) \le \exp(-Ck) \]
}

\frame{
\[ \Prob(\#_{a_i}(w) \le k) \le \exp(-Ck) \]
This exponential bound allows us to say that :\pause
\[ \Prob \left( \#_{a_i}(w) \le \frac{1-\epsilon}{n}|w| \right)\] 
is bounded above by $\exp(-C|w|)$ \pause, which goes to 0 as $|w|\to\infty$.
\fpause
So 
\[ \#_{a_i}(w) > \frac{1-\epsilon}{n}|w| \] 
is probability 1 as $|w|\to \infty$.
}
%\frame{
%\[ \Prob(\#_{a_i}(w) \le k) \le \exp(-Ck) \]
%Remember we are trying to measure 
%\[ \ul D(S_\epsilon) =  \liminf_{|w| \to \infty} \Prob(\#_{a_i}(w) > \frac{1-\epsilon}{n}|w|) \]
%So we have 
%\begin{align*}
%\ol D(S^c_\epsilon) &\le \limsup_{p\to\infty} \Prob\left(\#_{a_i}(w) \le \frac{1-\epsilon}{n}p\right) \\
%&\le \limsup_{p\to\infty}\exp(-C\frac{1-\epsilon}np) = 0.
%\end{align*}
%\fpause
%So $\ul D(S_\epsilon) \ge 1 - \ol D(S^c_\epsilon) = 1$
%}

\frame{
So generically we have
\[ \#_{a_i}(w) > \frac{1-\epsilon}{n}|w|. \] 
This means the number of $a_i$ is at least what we expect it to be. 
\fpause
For them to be ``uniformly distributed'', we actually need more:
\fpause
\begin{lem}
Let $S_\epsilon$ be the set of words $w$ with 
\[ \#_{a_i}(v) > \frac{1-\epsilon}{n}|v|. \] 
for each $i$, \emph{for any subword} $v$ with $|v| > \frac12 |w|$. Then $D(S_\epsilon) = 1$ for any $\epsilon > 0$.
\end{lem}
}

\frame{
Actually we can improve this slightly again.
\fpause
We've been discussing a lower bound:
\[ \#_{a_i}(w) > \frac{1-\epsilon}{n}|w|. \] 
\fpause 
But also we can get a similar upper bound by a symmetric argument. 
\fpause
\begin{lem}
Let $S_\epsilon$ be the set of words $w$ with 
\[ \frac{1-\epsilon}{n}|v| < \#_{a_i}(v) < \frac{1+\epsilon}{n}|v|. \] 
for each $i$, for any subword $v$ with $|v| > \frac12 |w|$. Then $D(S_\epsilon) = 1$ for any $\epsilon > 0$.
\end{lem}
}


\frame{{Put it all together}
Our result:
\begin{thm*}
For each $n>1$, we have $\E_n(N/\MF) = 1$
\end{thm*}
\pause
No dependence on $n$, so we'll just write $\E(N/\MF)$.
\fpause
Choose any $r$ and $\epsilon$.
\pause
\begin{align*}
\ul \E(N/\MF) 
\onslide<4->{&\ge \ul \E_{R_r \cap S_\epsilon}(N/\MF)}
\onslide<5->{= \liminf_{p\to\infty} \frac{1}{\#(R_r \cap S_\epsilon)} \sum_{\phi \in R_r \cap S_\epsilon} \frac{N(\phi)}{\MF(\phi)} \\}
\onslide<6->{&\ge \liminf_{p\to\infty} \frac{1}{\#(R_r \cap S_\epsilon)} \sum_{\phi \in R_r \cap S_\epsilon} \frac{\#\text{WTs in remnant}}{\#WTs} \\}
\onslide<7->{&= \liminf_{p\to\infty} \frac{1}{\#(R_r \cap S_\epsilon)} \sum_{\phi \in R_r \cap S_\epsilon} \frac{\#_{a_i}(\Rem_\phi a_i)}{\#_{a_i}(\phi(a_i))} }
\end{align*}
}

\frame{
\begin{align*}
\ul\E(N/\MF) &\ge
\liminf_{p\to\infty} \frac{1}{\#(R_r \cap S_\epsilon)} \sum_{\phi \in R_r \cap S_\epsilon} \frac{\#_{a_i}(\Rem_\phi a_i)}{\#_{a_i}(\phi(a_i))} \\
\onslide<2->{&\ge \liminf_{p\to\infty} \frac{1}{\#(R_r \cap S_\epsilon)} \sum_{\phi\in R_r \cap S_\epsilon} \frac{\frac{1+\epsilon}{n}|\Rem_\phi a_i|}{\frac{1-\epsilon}{n}|\phi(a_i)|} \\}
\onslide<3->{&= \frac{1+\epsilon}{1-\epsilon} \liminf_{p\to\infty} \frac{1}{\#(R_r \cap S_\epsilon)} \sum_{\phi\in R_r \cap S_\epsilon} \frac{|\Rem_\phi a_i|}{|\phi(a_i)|} \\}
\onslide<4->{&\ge \frac{1+\epsilon}{1-\epsilon} \liminf_{p\to\infty} \frac{1}{\#(R_r \cap S_\epsilon)} \sum_{\phi\in R_r \cap S_\epsilon} r \\}
\onslide<5->{&= \frac{1+\epsilon}{1-\epsilon} r \liminf_{p\to\infty} \frac{1}{\#(R_r \cap S_\epsilon)} \sum_{\phi\in R_r \cap S_\epsilon} 1 \\}
\onslide<6->{&= \frac{1+\epsilon}{1-\epsilon} r}
\end{align*}
}

\frame{
So we have
\[ \ul\E(N/\MF) \ge \frac{1+\epsilon}{1-\epsilon} r \]
for \emph{any} $\epsilon > 0$ and $r < 1$. 
\fpause
This means $\ul \E(N/\MF) = 1$ and thus $\E(N/\MF)=1$.
\fpause
(no more proofs)\pause\ (obrigado!)
}

\frame{
So what about $\E(\MF - N)$? \pause This is harder.
\fpause
We have used the fact that:
\[ \# \text{WTs in remnant} \le \smiley \le  \# \text{WTs} \]
\fpause
In the generic situation, these two bounds are very large, but proportionally close together. 
\fpause
That is:
\[ \frac{ \# \text{WTs in remnant}}{\# \text{WTs} } \approx 1. \]
\fpause
But the absolute difference between these may still be fairly large.
}

\frame{
So $\MF(f) -N(f)$ can still be quite large, even when the ratio is small.
\fpause
But still we conjecture:
\begin{conj}
For $n>1$, we have $0 < \ul \E_n(\MF - N) \le \ol \E_n (\MF - N) < \infty$, and 
\[ \lim_{n\to \infty} \ol \E_n(\MF-N) = 0. \]
\end{conj}
\fpause
This will require fancier counting arguments to prove.
}

\frame{
That's all!
}
\end{document}