diff --git a/main.tex b/main.tex index 82cc4e04920521810f3d8d73731453a4e9838366..87583670fa2c6e5c3d4e6a6c622d73584bcd89a7 100644 --- a/main.tex +++ b/main.tex @@ -57,7 +57,8 @@ \newcommand{\diam}[1]{\mathcal{D}\left(#1\right)} \newcommand{\paths}[1]{\mathcal{P}\left(#1\to\mathbf{1}\right)} -\newcommand{\gapsum}[1]{\mathrm{gapsum}\left(#1\right)} +\newcommand{\maxgap}[1]{\mathrm{maxgap}\left(#1\right)} +\newcommand{\gaps}[1]{#1_{\mathrm{gaps}}} \long\def\ignore#1{} @@ -220,21 +221,21 @@ \section{Quasiprobability method} Let us first introduce notation for paths of the Markov Chain \begin{definition}[Paths] - We define a \emph{path} of the Markov Chain as a sequence of states and resampling choices $\xi=((b_0,r_0),(b_1,r_1),...,(b_k,r_k)) \in (\{0,1\}^n\times[n])^k$ indicating that at time $t$ Markov Chain was in state $b_t\in\{0,1\}^n$ and then resampled site $r_t$. We denote by $|\xi|$ the length of such a path, i.e. the number of resamples that happened, and by $\mathbb{P}[\xi]$ the probability associated to this path. - We denote by $\paths{b}$ the set of all valid paths $\xi$ that start in state $b$ and end in state $\mathbf{1}$. + We define a \emph{path} of the Markov Chain as a sequence of states and resampling choices $\xi=((b_0,r_0),(b_1,r_1),...,(b_k,r_k)) \in (\{0,1\}^n\times[n])^k$ indicating that at time $t$ Markov Chain was in state $b_t\in\{0,1\}^n$ and then resampled site $r_t$. We denote by $|\xi|$ the length $k$ of such a path, i.e. the number of resamples that happened, and by $\mathbb{P}[\xi]$ the probability associated to this path. + We denote by $\paths{b}$ the set of all valid paths $\xi$ that start in state $b$ and end in state $\mathbf{1} := 1^n$. \end{definition} We can write the expected number of resamplings per site $R^{(n)}(p)$ as \begin{align} - R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1\}^{n}} \rho_b \; R_b(p) \label{eq:originalsum} + R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1\}^{n}} \rho_b \; R_b(p) \label{eq:originalsum} , \end{align} where $R_b(p)$ is the expected number of resamplings when starting from configuration $b$ \begin{align*} - R_b(p) &= \sum_{\xi \in \paths{b}} \mathbb{P}[\xi] \cdot |\xi| + R_b(p) &= \sum_{\xi \in \paths{b}} \mathbb{P}[\xi] \cdot |\xi| . \end{align*} -We consider $R^{(n)}(p)$ as a power series in $p$ and show that many terms in (\ref{eq:originalsum}) cancel out if we only consider the series up to some finite order $p^k$. Note that if a path samples a $0$ then $\mathbb{P}[\xi]$ gains a factor $p$.\\ +We consider $R^{(n)}(p)$ as a power series in $p$ and show that many terms in (\ref{eq:originalsum}) cancel out if we only consider the series up to some finite order $p^k$. The main idea is that if a path samples a $0$ then $\mathbb{P}[\xi]$ gains a factor $p$ so paths that contribute to $p^k$ can't be arbitrarily long.\\ -To see this, we split the sum in (\ref{eq:originalsum}) into parts that will later cancel out. The initial probabilities $\rho_b$ contain a factor $p$ for every $0$ and a factor $(1-p)$ for every $1$. When expanding this product of $p$s and $(1-p)$s, we see that the $1$s contribute a factor $1$ and a factor $(-p)$ and the $0$s only give a factor $p$. Therefore we no longer consider bitstrings $b\in\{0,1\}^n$ but bitstrings $b\in\{0,1,1'\}^n$. We view this as follows: every site can have one of $\{0,1,1'\}$ with `probabilities' $p$, $1$ and $-p$ respectively. A configuration $b=101'1'101'$ now has probability $\rho_{b} = 1\cdot p\cdot(-p)\cdot(-p)\cdot 1\cdot p\cdot(-p) = -p^5$ in the starting state $\rho$. It should not be hard to see that we have +To see this, we split the sum in (\ref{eq:originalsum}) into parts that will later cancel out. The initial probabilities $\rho_b$ contain a factor $p$ for every $0$ and a factor $(1-p)$ for every $1$. When expanding this product of $p$s and $(1-p)$s, we see that the $1$s contribute a factor $1$ and a factor $(-p)$ and the $0$s only give a factor $p$. We want to expand this product explicitly and therefore we no longer consider bitstrings $b\in\{0,1\}^n$ but bitstrings $b\in\{0,1,1'\}^n$. We view this as follows: every site can have one of $\{0,1,1'\}$ with `probabilities' $p$, $1$ and $-p$ respectively. A configuration $b=101'1'101'$ now has probability $\rho_{b} = 1\cdot p\cdot(-p)\cdot(-p)\cdot 1\cdot p\cdot(-p) = -p^5$ in the starting state $\rho$. It should not be hard to see that we have \begin{align*} R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_{b} \; R_{\bar{b}}(p) , \end{align*} @@ -248,15 +249,14 @@ We can further rewrite the sum over $b\in\{0,1,1'\}^n$ as a sum over all slot co \end{align*} where $C(f)\in\{0,1,1'\}^n$ denotes a configuration with slots on the sites $C$ filled with (anti)particles described by $f$. The non-slot positions are filled with $1$s. -\begin{definition}[Diameter] - For a slot configuration $C\subseteq[n]$, we define the diameter $\diam{C}$ to be the minimum size of an interval containing $C$ where the interval is also considered modulo $n$. In other words $\diam{C} = n - \max\{ j \vert \exists i : [i,i+j-1]\cap C = \emptyset \}$. Figure \ref{fig:diametergap} shows the diameter in a picture. +\begin{definition}[Diameter and gaps] \label{def:diameter} \label{def:gaps} + For a subset $C\subseteq[n]$, we define the \emph{diameter} $\diam{C}$ to be the minimum size of an interval $I$ containing $C$. Here we consider both $C$ and the interval modulo $n$. In other words $\diam{C} = \min\{ j \vert \exists i : C\subseteq [i,i+j-1] \}$. We define the \emph{gaps} of $C$, as $I\setminus C$ and denote this by $\gaps{C}$. Note that $\diam{C} = |C| + |\gaps{C}|$. Define $\maxgap{C}$ as the size of the largest connected component of $\gaps{C}$. Figure \ref{fig:diametergap} illustrates these concepts with a picture. \end{definition} - \begin{figure} \begin{center} \includegraphics{diagram_gap.pdf} \end{center} - \caption{\label{fig:diametergap} A configuration $C=\{1,2,4,7,9\}\subseteq[n]$ consisting of 5 slots shown by the red dots. The blue squares denote the set $C_{><}$ which is all elements of $[n]\setminus C$ that lie within the interval spanned by $C$. The dotted line at the top depicts the rest of the circle which may be much larger. The diameter of this configuration is $\diam{C} = |C| + |C_{><}| =9$ as shown. The largest gap of $C$ is $\mathrm{gap}(C)=2$ which is the largest connected component of $C_{><}$. Note that we do not count the rest of the circle as a gap, we only consider gaps \emph{within} the diameter of $C$.} + \caption{\label{fig:diametergap} Illustration of Definition \ref{def:diameter}. A set $C=\{1,2,4,7,9\}\subseteq[n]$ consisting of 5 positions is shown by the red dots. The smallest interval containing $C$ is $[1,9]$, so the diameter is $\diam{C}=9$. The blue squares denote the set $\gaps{C} = \{3,5,6,8\}$. The dotted line at the top depicts the rest of the circle which may be much larger. The largest gap of $C$ is $\maxgap{C}=2$ which is the largest connected component of $\gaps{C}$.} \end{figure} \begin{claim}[Strong cancellation claim] \label{claim:strongcancel} @@ -283,13 +283,14 @@ A weaker version of the claim is that if $C$ contains a gap of size $k$, then th \begin{align*} \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)} , \end{align*} - is at least $p^{|C|+\mathrm{gap}(C)}$ when $n$ is large enough. Here $\mathrm{gap}(C)$ is defined as in Figure \ref{fig:diametergap}, its the size of the largest gap of $C$ within the diameter of $C$. All lower order terms cancel out. + is at least $p^{|C|+\maxgap{C}}$ when $n$ is large enough. All lower order terms cancel out. \end{claim} This weaker version would imply \ref{it:const} but for $\mathcal{O}(k^2)$ as opposed to $k+1$. \newpage -The reason that claim \ref{claim:strongcancel} would prove \ref{it:const} is the following: -For a starting configuration that \emph{does} give a nonzero contribution, you can take that same starting configuration and translate it to get $n$ other configurations that give the same contribution. Therefore the coefficient in the expected number of resamplings is a multiple of $n$ which Andr\'as already divided out in the definition of $R^{(n)}(p)$. To show \ref{it:const} we argue that this is the \emph{only} dependency on $n$. This is because there are only finitely many (depending on $k$ but not on $n$) configurations where the $k$ slots are nearby regardless of the value of $n$. So there are only finitely many nonzero contributions after translation symmetry was taken out. For example, when considering all starting configurations with 5 slots one might think there are $\binom{n}{5}$ configurations to consider which would be a dependency on $n$ (more than only the translation symmetry). But since most of these configurations have a diameter larger than $k$, they do not contribute to $a_k$. Only finitely many do and that does not depend on $n$. +The reason that claim \ref{claim:strongcancel} would prove \ref{it:const} is the following: to know the value of $a_k^{(n)}$, for any $n\geq k+1$ it is enough to look at configurations $C$ with diameter at most $k$, since larger configurations do not contribute to $a_k^{(n)}$. +For a starting state $b\in\{0,1\}^n$ that \emph{does} give a nonzero contribution, you can take that same starting configuration and translate it to get $n$ other configurations that give the same contribution. (An exception is a starting state like $1010101010...$ which you can only translate twice, but we only have to consider configurations with small diameter, in which case you can make exactly $n$ translations.) +Therefore the coefficient in the expected number of resamplings is a multiple of $n$ which Andr\'as already divided out in the definition of $R^{(n)}(p)$. To show \ref{it:const} we argue that this is the \emph{only} dependency on $n$. This is because there are only finitely many (depending on $k$ but not on $n$) configurations where the $k$ slots are nearby regardless of the value of $n$. So there are only finitely many nonzero contributions after translation symmetry was taken out. For example, when considering all starting configurations with 5 slots one might think there are $\binom{n}{5}$ configurations to consider which would be a dependency on $n$ (more than only the translation symmetry). But since most of these configurations have a diameter larger than $k$, they do not contribute to $a_k$. Only finitely many do and that does not depend on $n$. ~ @@ -330,13 +331,14 @@ With this we can write a recursive formula for the expected number of resamples &\quad+ 73555.2 p^{18} + 123053 p^{19} + 205290 p^{20} + 341620 p^{21} + 567161 p^{22} \\ &\quad+ 939693 p^{23} + 1.5537\cdot10^{6} p^{24} + 2.56158\cdot10^{6} p^{25} + \mathcal{O}(p^{26}) \end{align*} -where the recursion steps were done with a computer. This assumes $n$ to be much larger than the largest power of $p$ considered. +where the recursion steps were done with a computer for an infinite line (or a cirlce where $n$ is assumed to be much larger than the largest power of $p$ considered). -Note: in the first line at the second term it uses that with probability $(3p-6p^2)$ the state goes to $\framebox{$101$}$ and then the expected number of resamplings is $1+R_{101}$. I (Tom) believe this requires the assumption $p_\mathrm{tot} := \sum_{\xi\in\paths{b}} \mathbb{P}[\xi] = 1$. To see why this is required, note that the actual term in the recursive formula should be $$(3p-6p^2)\cdot\left( \sum_{\xi\in\paths{101}} \mathbb{P}[\xi] \cdot \left( 1 + |\xi|\right) \right) = (3p-6p^2)\left( p_\mathrm{tot} + R_{101} \right)$$ -When there would be a non-zero probability of never stopping the resample process then $p_\mathrm{tot}$ (the probability of ever reaching $\mathbf{1}$) could be less than one. Therefore I assume that $R^{(n)}(p)$ is finite which implies that the probability of ever reaching $\mathbf{1}$ is 1. +Note: in the first line at the second term it uses that with probability $(3p-6p^2 + 3p^3)$ the state goes to $\framebox{$101$}$ and then the expected number of resamplings is $1+R_{101}$. Note that the actual term in the recursive formula should be +$$(3p-6p^2+3p^3)\cdot\left( \sum_{\xi\in\paths{101}} \mathbb{P}[\xi] \cdot \left( 1 + |\xi|\right) \right) = (3p-6p^2+3p^3)\left( p_\mathrm{tot} + R_{101} \right)$$ +where $p_\mathrm{tot} := \sum_{\xi\in\paths{b}} \mathbb{P}[\xi]$. However, since the state space is finite (for finite $n$) and there is always a non-vanishing probability to go to $\mathbf{1}$, we know that $p_\mathrm{tot}=1$, i.e. the process terminates almost surely. \newpage -\subsection{Cancellation of gapped configurations} +\subsection{Weak cancellation proof} Here we prove claim \ref{claim:weakcancel}, the weaker version of the claim. We require the following definition \begin{definition}[Path independence] \label{def:independence} @@ -360,11 +362,11 @@ and indeed the sums agree up to order $p^{k-1}=p^2$. When going up to order $p^{ ~ \begin{proof} -Consider a path $\xi_1\in\paths{b_1}$ and a path $\xi_2\in\paths{b_2}$ such that $\xi_1$ and $\xi_2$ are independent (Definition \ref{def:independence}). The paths $\xi_1,\xi_2$ induce $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ different paths of total length $|\xi_1|+|\xi_2|$ in $\paths{b_1\land b_2}$. In the sums $R_{b_1}$ and $R_{b_2}$, the contribution of these paths are $\mathbb{P}[\xi_1]\cdot |\xi_1|$ and $\mathbb{P}[\xi_2]\cdot |\xi_2|$. The next diagram shows how these $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths contribute to $R_{b_1\land b_2}$. At every step one has to choose between doing a step of $\xi_1$ or a step of $\xi_2$. The number of zeroes in the current state determine the probabilities with which this happens (beside the probabilities associated to the two original paths already). The grid below shows that at every point one can choose to do a step of $\xi_1$ with probability $p_i$ or a step of $\xi_2$ with probability $1-p_i$. These $p_i$ could in principle be different at every point in this grid. + Consider a path $\xi_1\in\paths{b_1}$ and a path $\xi_2\in\paths{b_2}$ such that $\xi_1$ and $\xi_2$ are independent (Definition \ref{def:independence} or \ref{def:independence2}). The paths $\xi_1,\xi_2$ induce $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ different paths of total length $|\xi_1|+|\xi_2|$ in $\paths{b_1\land b_2}$. In the sums $R_{b_1}$ and $R_{b_2}$, the contribution of these paths are $\mathbb{P}[\xi_1]\cdot |\xi_1|$ and $\mathbb{P}[\xi_2]\cdot |\xi_2|$. The next diagram shows how these $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths contribute to $R_{b_1\land b_2}$. Point $(i,j)$ in the grid indicates that $i$ steps of $\xi_1$ have been done and $j$ steps of $\xi_2$ have been done. At every point (except the top and right edges of the grid) one has to choose between doing a step of $\xi_1$ or a step of $\xi_2$. The number of zeroes in the current state determine the probabilities with which this happens (beside the probabilities associated to the two original paths already). The grid below shows that at a certain point one can choose to do a step of $\xi_1$ with probability $p_i$ or a step of $\xi_2$ with probability $1-p_i$. These $p_i$ could in principle be different at every point in this grid. The weight of such a new path $\xi\in\paths{b_1\land b_2}$ is $p_\mathrm{grid}\cdot\mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2]$ where $p_\mathrm{grid}$ is the weight of the path in the diagram. By induction one can show that the sum over the $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ different terms $p_\mathrm{grid}$ is $1$. \begin{center} \includegraphics{diagram_paths.pdf} \end{center} -The weight of such a new path is the weight of the path in the diagram, multiplied by $\mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2]$. By induction one can show that the sum over all $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths in the grid is $1$. Hence the contribution of all $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths together to $R_{b_1\land b_2}$ is given by + Hence the contribution of all $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths together to $R_{b_1\land b_2}$ is given by \[ \mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2]\cdot(|\xi_1|+|\xi_2|) = \mathbb{P}[\xi_2]\cdot\mathbb{P}[\xi_1]\cdot|\xi_1| \;\; + \;\; \mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2]\cdot|\xi_2|. \] @@ -400,18 +402,27 @@ we can do the same with the second term and this proves the claim. \end{align*} where we used the identity $\sum_{a\in\{0,1\}^l} (-1)^{|a|} = 0$. -~ - -It is useful to introduce some new notation: for any event $A$ (where an event is a set of paths), define -\begin{align*} - \mathbb{P}_b(A) &= \mathbb{P}(A \;|\; \text{start in }b) \\ - R_{b,A} &= \mathbb{E}( \#resamples \;|\; A\;,\; \text{start in }b) -\end{align*} -Denote by $\mathrm{Z}_j$ the event that site $j$ becomes zero at any point in time before the Markov Chain terminates. Denote the complement by $\mathrm{NZ}_j$, i.e. the event that site $j$ does \emph{not} become zero before it terminates. - -The proof of claim \ref{claim:expectationsum} also proves the following claim -\begin{claim}[Conditional independence] \label{claim:eventindependence} - As in \ref{claim:expectationsum}, let $b=b_1\land b_2\in\{0,1\}^n$ be a state with two groups ($b_1\lor b_2 = 1^n$) of zeroes. Let $j_1$, $j_2$ be indices `inbetween' the groups (or only one index in case of the infinite line). Then we have +\newpage +\subsection{Proving the strong cancellation claim} +It is useful to introduce some new notation: +\begin{definition}[Events conditioned on starting state] \label{def:conditionedevents} + For any state $b\in\{0,1\}^n$ and any event $A$ (where an event is a subset of all possible paths of the Markov Chain), define + \begin{align*} + \mathbb{P}_b(A) &= \mathbb{P}(A \;|\; \text{start in }b) \\ + R_{b,A} &= \mathbb{E}( \#resamples \;|\; A \; \& \; \text{start in }b) + \end{align*} +\end{definition} +\begin{definition}[Vertex visiting event] \label{def:visitingResamplings} + Denote by $\mathrm{Z}^{(j)}$ the event that site $j$ becomes zero at any point in time before the Markov Chain terminates. Denote the complement by $\mathrm{NZ}^{(j)}$, i.e. the event that site $j$ does \emph{not} become zero before it terminates. +\end{definition} +\begin{figure} + \begin{center} + \includegraphics{diagram_groups.pdf} + \end{center} + \caption{\label{fig:separatedgroups} Illustration of setup of Lemma \ref{lemma:eventindependence}. Here $b_1,b_2\in\{0,1\}^n$ are bitstrings such that all zeroes of $b_1$ and all zeroes of $b_2$ are separated by two indices $j_1,j_2$.} +\end{figure} +\begin{lemma}[Conditional independence] \label{lemma:eventindependence} \label{claim:eventindependence} + Let $b=b_1\land b_2\in\{0,1\}^n$ be a state with two groups ($b_1\lor b_2 = 1^n$) of zeroes that are separated as in Figure \ref{fig:separatedgroups}. Let $j_1$, $j_2$ be any indices `inbetween' the groups as shown in the figure. Then we have \begin{align*} \mathbb{P}_b(\mathrm{NZ}_{j_1} , \mathrm{NZ}_{j_2}) &= @@ -425,11 +436,13 @@ The proof of claim \ref{claim:expectationsum} also proves the following claim R_{b_2,\mathrm{NZ}_{j_1},\mathrm{NZ}_{j_2}} \end{align*} up to any order in $p$. -\end{claim} +\end{lemma} +The lemma says that conditioned on $j_1$ and $j_2$ not being crossed, the two halves of the circle are independent. + \begin{proof} For clarity we do the proof for the infinite line, when there is only one index. Simply replace $\mathrm{NZ}_j$ by $\mathrm{NZ}_{j_1}\cap\mathrm{NZ}_{j_2}$ for the case of the circle. - Note that any path $\xi\in\paths{b} \cap \mathrm{NZ}_j$ can be split into paths $\xi_1\in\paths{b_1}\cap \mathrm{NZ}_j$ and $\xi_2\in\paths{b_2}\cap\mathrm{NZ}_j$ and by the same reasoning as in the proof of claim \ref{claim:expectationsum}, we obtain + Note that any path $\xi\in\paths{b} \cap \mathrm{NZ}_j$ can be split into paths $\xi_1\in\paths{b_1}\cap \mathrm{NZ}_j$ and $\xi_2\in\paths{b_2}\cap\mathrm{NZ}_j$. This can be done by taking all resampling positions $r_i$ in $\xi$ and if its ``on the $b_1$ side of $j_1,j_2$'' then add it to $\xi_1$ and if its ``on the $b_2$ side of $j_1,j_2$'' then add it to $\xi_2$. Vice versa, all paths $\xi_1\in\paths{b_1}\cap \mathrm{NZ}_j$ and $\xi_2\in\paths{b_2}\cap\mathrm{NZ}_j$ also induce a path $\xi\in\paths{b} \cap \mathrm{NZ}_j$ by simply concatenating the resampling positions. By the same reasoning as in the proof of claim \ref{claim:expectationsum}, we obtain \begin{align*} \mathbb{P}_b(\mathrm{NZ}_j) = \sum_{\substack{\xi\in\paths{b}\\\xi \in \mathrm{NZ}_j}} \mathbb{P}[\xi] @@ -456,8 +469,7 @@ The proof of claim \ref{claim:expectationsum} also proves the following claim Dividing by $\mathbb{P}_b(\mathrm{NZ}_j)$ and using the first equality gives the desired result. \end{proof} -~ - +\begin{comment} TEST: Although a proof of claim \ref{claim:expectationsum} was already given, I'm trying to prove it in an alternate way using claim \ref{claim:eventindependence}. ~ @@ -505,49 +517,44 @@ Now observe that + \mathcal{O}(p^k) \\ &\overset{???}{=} R_{b_1} + R_{b_2} + \mathcal{O}(p^k) \end{align*} +\end{comment} -\newpage - \subsection{Attempt to prove the linear bound \ref{it:const}} - Consider the chain (instead of the cycle) for simplicity with vertices identified by $\mathbb{Z}$. \begin{definition}[Starting state dependent probability distribution.] Let $I\subset\mathbb{Z}$ be a finite set of vertices. - Let $b_I$ be the initial state where everything is $1$, apart from the vertices corresponding to $I$, which are set $0$. For an event $A$ representing a subset of all possible resample sequences let $P_I(A)$ denote the probability of seeing a resample sequence from $A$ when the whole procedure started in state $b_I$. -\end{definition} -\begin{definition}[Vertex visiting resamplings]\label{def:visitingResamplings} - Let $V^{(i)}$ be the event corresponding to ``Vertex $i$ gets resampled to $0$ before termination''. + Let $b_I$ be the initial state where everything is $1$, apart from the vertices corresponding to $I$, which are set $0$. Define $P_I(A)=P_{b_I}(A)$ where the latter is defined in Definition \ref{def:conditionedevents}, i.e. the probability of seeing a resample sequence from $A$ when the whole procedure started in state $b_I$. \end{definition} The intuition of the following lemma is that the far right can only affect the zero vertex if there is an interaction chain forming, which means that every vertex should get resampled to $0$ at least once. \begin{lemma}\label{lemma:probIndep} Suppose we have a finite set $I\subset\mathbb{N}_+$ of vertices. Let $I_{\max}:=\max(I)$ and $I':=I\setminus\{I_{\max}\}$, and similarly let $I_{\min}:=\min(I)$. - Then $P_{I}(V^{(0)})=P_{I'}(V^{(0)}) + O(p^{I_{\max}+1-|I|})$. + Then $P_{I}(Z^{(0)})=P_{I'}(Z^{(0)}) + O(p^{I_{\max}+1-|I|})$. \end{lemma} \begin{proof} - The proof uses induction on $|I|$. For $|I|=1$ the statement is easy, since every resample sequence that resamples the $0$ vertex must produce at least $I_{\max}$ number of $0$-s during the resamplings. + The proof uses induction on $|I|$. For $|I|=1$ the statement is easy, since every resample sequence that resamples vertex $0$ to zero must produce at least $I_{\max}$ zeroes in-between. - Induction step: For an event $A$ and $k>0$ let us denote by $A_k=A\cap$``Each vertex in $0,1,2,\ldots, k-1$ gets $0$ before termination (either by resampling or initialisation), but not $k$''. Observe that $V^{(0)}=\dot{\bigcup}_{k=1}^{\infty}V^{(0)}_k$. - Let $I_{k}:=I\setminus[k]$, finally let $I_{><}:=\{I_{\min}+1,I_{\max}-1]\}\setminus I$ as shown in Figure \ref{fig:diametergap}. Suppose we proved the claim up to $|I|-1$, then the induction step can be shown by + Induction step: For an event $A$ and $k>0$ let us denote $A_k = A\cap\left(\cap_{j=0}^{k-1} \mathrm{Z}^{(j)}\right)\cap \mathrm{NZ}^{(k)}$, i.e. $A_k$ is the event $A$ \emph{and} ``Each vertex in $0,1,2,\ldots, k-1$ becomes $0$ at some point before termination (either by resampling or initialisation), but vertex $k$ does not''. Observe that these events form a partition, so $Z^{(0)}=\dot{\bigcup}_{k=1}^{\infty}Z^{(0)}_k$. + Let $I_{k}:=I\setminus[1,k]$, finally let $I_{><}:=\{I_{\min}+1,I_{\max}-1]\}\setminus I$ (note that $I_{><} = \gaps{I}$ as shown in Figure \ref{fig:diametergap}). Suppose we have proven the claim up to $|I|-1$, then the induction step can be shown by \begin{align*} - P_{I}(V^{(0)}) - &=\sum_{k=1}^{\infty}P(V^{(0)}_k) - =\sum_{k\in \mathbb{N}\setminus I}P(V^{(0)}_k)\\ - &=\sum_{k\in\mathbb{N}\setminus I}P_{I_{k}}(\overline{V^{(k)}}) \tag{by Claim~\ref{claim:eventindependence}}\\ - &=\sum_{k\in I_{><}}P_{I_{k}}(\overline{V^{(k)}})+\mathcal{O}(p^{I_{\max}+1-|I|}) - \tag{$k<}}P_{I'_{k}}(\overline{V^{(k)}})+\mathcal{O}(p^{I_{\max}+1-|I|}) + P_{I}(Z^{(0)}) + &=\sum_{k=1}^{\infty}P(Z^{(0)}_k) \tag{the events are a partition}\\ + &=\sum_{k\in \mathbb{N}\setminus I}P(Z^{(0)}_k) \tag{$\mathbb{P}(A_k)=0$ for $k\in I$}\\ + &=\sum_{k\in\mathbb{N}\setminus I}P_{I_{k}}(\mathrm{NZ}^{(k)}) \tag{by Claim~\ref{claim:eventindependence}}\\ + &=\sum_{k\in I_{><}}P_{I_{k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}+1-|I|}) + \tag{$k<}}P_{I'_{k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}+1-|I|}) \tag{$k< I_{\max}\Rightarrow I_{<}}P_{I'_{k}}(\overline{V^{(k)}})+\mathcal{O}(p^{I_{\max}-k+1-|I_{>k}|})\right) +\mathcal{O}(p^{I_{\max}+1-|I|}) \tag{by induction, since for $k>I_{\min}$ we have $|I_{<}}P_{I'_{k}}(\overline{V^{(k)}}) +\mathcal{O}(p^{I_{\max}+1-|I|}) - \tag{as $P_{I'_{k}}(\overline{V^{(k)}}) +\mathcal{O}(p^{I_{\max}+1-|I|})\\ - &=\sum_{k\in\mathbb{N}\setminus I'}P_{I'_{k}}(\overline{V^{(k)}}) +\mathcal{O}(p^{I_{\max}+1-|I|}) \tag{$k=I_{\max}\Rightarrow P_{I'_{<}}P_{I'_{k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}-k+1-|I_{>k}|})\right) +\mathcal{O}(p^{I_{\max}+1-|I|}) \tag{by induction, since for $k>I_{\min}$ we have $|I_{<}}P_{I'_{k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|}) + \tag{as $P_{I'_{k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})\\ + &=\sum_{k\in\mathbb{N}\setminus I'}P_{I'_{k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|}) \tag{$k=I_{\max}\Rightarrow P_{I'_{<}|}) \\