Changeset - aad4326b06f9
[Not reviewed]
0 1 2
Tom Bannink - 8 years ago 2017-07-02 19:57:50
tombannink@gmail.com
Add attempt at proving circle version of lemma
3 files changed with 117 insertions and 0 deletions:
0 comments (0 inline, 0 general)
diagram_circle_lemma.pdf
Show inline comments
 
new file 100644
 
binary diff not shown
diagram_circle_lemma.tex
Show inline comments
 
new file 100644
 
\documentclass{standalone}
 
\usepackage[T1]{fontenc}
 
\usepackage{amsmath}
 
\usepackage{amsfonts}
 
\usepackage{parskip}
 
\usepackage[usenames,dvipsnames]{color}
 
\usepackage[hidelinks]{hyperref}
 
\renewcommand*{\familydefault}{\sfdefault}
 

	
 
\usepackage{bbm} %For \mathbbm{1}
 
%\usepackage{bbold}
 
\usepackage{tikz}
 

	
 
\begin{document}
 

	
 
\begin{tikzpicture}[scale=0.8]
 
    \def\r{4};
 
    \def\step{15};
 

	
 
    % Line and circles
 
    \foreach \a in {0,...,23} {
 
        \draw[-] ({\a*\step}:\r) -- ({(\a+1)*\step}:\r);
 
        \draw ({\a*\step}:\r) circle (0.05);
 
    }
 

	
 
    % Red dots
 
    \foreach \a in {2,3,6,8,9,10,11,14,15,17,20} {
 
        \draw[fill,red] ({\a*\step}:\r) circle (0.09);
 
    }
 
    % Blue squares
 
    \foreach \a in {2,3,20} {
 
        \draw[fill,blue] ({\a*\step}:{\r-0.3})+(-0.1,-0.1) rectangle +(0.1,0.1);
 
    }
 
    % Green triangles
 
    \foreach \a in {6,8,9,10,11,14,15,17} {
 
        \draw[fill,green] ({\a*\step}:{\r-0.3}) +(-0.15,-0.1) -- +(0.15,-0.1) -- +(0,0.15);
 
    }
 

	
 
    % Legend
 
    \draw (-1,0)+(-0.5,-1) rectangle +(2.2,1);
 
    \draw[fill,red]   (-1.1,0.5)  circle (0.09);
 
    \draw[fill,blue]  (-1.1,0)    +(-0.1,-0.1) rectangle +(0.1,0.1);
 
    \draw[fill,green] (-1.1,-0.5) +(-0.15,-0.1) -- +(0.15,-0.1) -- +(0,0.15);
 
    \draw[anchor=west] (-1,0.5)  node {$I$};
 
    \draw[anchor=west] (-1,0)    node {$I_{\mathrm{in}(n-l,k)}$};
 
    \draw[anchor=west] (-1,-0.5) node {$I_{\mathrm{out}(n-l,k)}$};
 

	
 

	
 
    % Bigger circle
 
    \foreach \a in {-5,...,3} {
 
        \draw[-,dashed] ({\a*\step}:\r+1) -- ({(\a+1)*\step}:\r+1);
 
        %\draw ({\a*\step}:\r+1) circle (0.05);
 
    }
 
    \draw ({-5*\step}:\r+1) +(0.2,-0.2) node {$n-l$};
 
    \draw ({4*\step}:\r+1) +(0.2,0.2) node {$k$};
 

	
 

	
 
    %
 
    % Arrows with labels
 
    %
 

	
 
    % 0
 
    \draw ({0*\step}:\r-0.3) node {$0$};
 
    % 1
 
    \draw ({1*\step}:\r-0.3) node {$1$};
 
    % n/2
 
    \draw ({12*\step}:\r-0.3) node {$\frac{n}{2}$};
 
    % i_*
 
    \draw[->] ({10*\step}:\r-1) -- ({10*\step}:\r-0.2);
 
    \draw ({10*\step}:\r-1) +(0.3,0) node {$i_*$};
 
    % n-1
 
    \draw ({-1*\step}:\r-0.3) +(-0.4,0) node {$n-1$};
 
\end{tikzpicture}
 
\end{document}
main.tex
Show inline comments
 
@@ -391,384 +391,427 @@ For all $\xi\in\paths{b_1\land b_2}$ we have that \emph{either} $\xi$ splits int
 
\]
 
where last sum only contains only terms of order $p^{k}$ or higher. Now for the first sum, note that
 
\[
 
	\sum_{\mathclap{\substack{\xi_{1,2}\in\paths{b_{1,2}}\\ \mathrm{independent}}}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1|
 
    = \sum_{\xi_1\in\paths{b_1}} \sum_{\substack{\xi_2\in\paths{b_2}\\ \text{independent of }\xi_1}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1|
 
\]
 
where the sum over independent paths could be empty for certain $\xi_1$. Now we replace this last sum by a sum over \emph{all} paths $\xi_2\in\paths{b_2}$. This will change the sum but only for terms where $\xi_1,\xi_2$ are dependent. For those terms we already know that $\mathbb{P}[\xi_1]\mathbb{P}[\xi_2]$ contains a factor $p^k$ and hence we have 
 
\begin{align*}
 
    \sum_{\mathclap{\substack{\xi_{1,2}\in\paths{b_{1,2}}\\ \mathrm{independent}}}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1|
 
    &= \sum_{\xi_1\in\paths{b_1}} \sum_{\xi_2\in\paths{b_2}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1| + \mathcal{O}(p^k) \\
 
    &= \sum_{\xi_1\in\paths{b_1}} \mathbb{P}[\xi_1]|\xi_1| + \mathcal{O}(p^k) \\
 
    &= R_{b_1} + \mathcal{O}(p^k)
 
\end{align*}
 
we can do the same with the second term and this proves the claim.
 
\end{proof}
 

	
 
~\\
 
\textbf{Proof of claim \ref{claim:weakcancel}}: We can assume $C$ consists of a group on the left with $l$ slots and a group on the right with $r$ slots (so $r+l=|C|$), with a gap of size $k=\mathrm{gap}(C)$ between these groups. Then on the left we have strings in $\{0,1'\}^l$ as possibilities and on the right we have strings in $\{0,1'\}^r$. The combined configuration can be described by strings $f=(a,b)\in\{0,1'\}^{l+r}$. The initial probability of such a state $C(a,b)$ is $\rho_{C(a,b)} = (-1)^{|a|+|b|} p^{r+l}$ and by claim \ref{claim:expectationsum} we know $R_{C(a,b)} = R_{C(a)} + R_{C(b)} + \mathcal{O}(p^k)$ where $C(a)$ indicates that only the left slots have been filled by $a$ and the other slots are filled with $1$s. The total contribution of these configurations is therefore
 
\begin{align*}
 
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}
 
    &= \sum_{a\in\{0,1'\}^l} \sum_{b\in\{0,1'\}^r} (-1)^{|a|+|b|}p^{r+l} \left( R_{C(a)} + R_{C(b)} + \mathcal{O}(p^k) \right) \\
 
    &=\;\;\; p^{r+l}\sum_{a\in\{0,1'\}^l} (-1)^{|a|} R_{C(a)} \sum_{b\in\{0,1'\}^r} (-1)^{|b|} \\
 
    &\quad + p^{r+l}\sum_{b\in\{0,1'\}^r} (-1)^{|b|} R_{C(b)} \sum_{a\in\{0,1'\}^l} (-1)^{|a|}
 
        + \mathcal{O}(p^{r+l+k})\\
 
    &= 0 + \mathcal{O}(p^{|C|+k})
 
\end{align*}
 
where we used the identity $\sum_{a\in\{0,1\}^l} (-1)^{|a|} = 0$.
 

	
 
\newpage
 
\subsection{Proving the strong cancellation claim}
 
It is useful to introduce some new notation:
 
\begin{definition}[Events conditioned on starting state] \label{def:conditionedevents}
 
    For any state $b\in\{0,1\}^n$ and any event $A$ (where an event is a subset of all possible paths of the Markov Chain), define
 
    \begin{align*}
 
        \mathbb{P}_b(A) &= \mathbb{P}(A \;|\; \text{start in }b) \\
 
        R_{b,A} &= \mathbb{E}( \#resamples \;|\; A \; \& \; \text{start in }b)
 
    \end{align*}
 
\end{definition}
 
\begin{definition}[Vertex visiting event] \label{def:visitingResamplings}
 
    Denote by $\mathrm{Z}^{(j)}$ the event that site $j$ becomes zero at any point in time before the Markov Chain terminates. Denote the complement by $\mathrm{NZ}^{(j)}$, i.e. the event that site $j$ does \emph{not} become zero before it terminates. Furthermore define $\mathrm{NZ}^{(j_1,j_2)} := \mathrm{NZ}^{(j_1)} \cap \mathrm{NZ}^{(j_2)}$, i.e. the event that \emph{both} $j_1$ and $j_2$ do not become zero before termination.
 
\end{definition}
 
\begin{figure}
 
	\begin{center}
 
    	\includegraphics{diagram_groups.pdf}
 
    \end{center}
 
    \caption{\label{fig:separatedgroups} Illustration of setup of Lemma \ref{lemma:eventindependence}. Here $b_1,b_2\in\{0,1\}^n$ are bitstrings such that all zeroes of $b_1$ and all zeroes of $b_2$ are separated by two indices $j_1,j_2$.}
 
\end{figure}
 
\begin{lemma}[Conditional independence] \label{lemma:eventindependence} \label{claim:eventindependence}
 
    Let $b=b_1\land b_2\in\{0,1\}^n$ be a state with two groups ($b_1\lor b_2 = 1^n$) of zeroes that are separated by at least one site inbetween, as in Figure \ref{fig:separatedgroups}. Let $j_1$, $j_2$ be any indices inbetween the groups, such that $b_1$ lies on one side of them and $b_2$ on the other, as shown in the figure. Furthermore, let $A_1$ be any event that depends only on the sites ``on the $b_1$ side of $j_1,j_2$'', and similar for $A_2$ (for example $\mathrm{Z}^{(i)}$ for an $i$ on the correct side). Then we have
 
    \begin{align*}
 
        \mathbb{P}_b(\mathrm{NZ}^{(j_1,j_2)}, A_1, A_2)
 
        &=
 
        \mathbb{P}_{b_1}(\mathrm{NZ}^{(j_1,j_2)}, A_1)
 
        \; \cdot \;
 
        \mathbb{P}_{b_2}(\mathrm{NZ}^{(j_1,j_2)}, A_2) \\
 
        R_{b,\mathrm{NZ}^{(j_1,j_2)},A_1,A_2}
 
        &=
 
        R_{b_1,\mathrm{NZ}^{(j_1,j_2)},A_1}
 
        \; + \;
 
        R_{b_2,\mathrm{NZ}^{(j_1,j_2)},A_2}
 
    \end{align*}
 
    up to any order in $p$.
 
\end{lemma}
 
The lemma says that conditioned on $j_1$ and $j_2$ not being crossed, the two halves of the circle are independent. 
 

	
 
\begin{proof}
 
    Note that any path $\xi\in\paths{b} \cap \mathrm{NZ}^{(j_1,j_2)}$ can be split into paths $\xi_1\in\paths{b_1}\cap \mathrm{NZ}^{(j_1,j_2)}$ and $\xi_2\in\paths{b_2}\cap\mathrm{NZ}^{(j_1,j_2)}$. This can be done by taking all resampling positions $r_i$ in $\xi$ and if $r_i$ is ``on the $b_1$ side of $j_1,j_2$'' then add it to $\xi_1$ and if its ``on the $b_2$ side of $j_1,j_2$'' then add it to $\xi_2$. Note that now $\xi_1$ is a path from $b_1$ to $\mathbf{1}$, because in the original path $\xi$, all zeroes ``on the $b_1$ side'' have been resampled by resamplings ``on the $b_1$ side''. Since the sites $j_1,j_2$ inbetween never become zero, there can not be any zero ``on the $b_1$ side'' that was resampled by a resampling ``on the $b_2$ side''.  Vice versa, all paths $\xi_1\in\paths{b_1}\cap \mathrm{NZ}^{(j_1,j_2)}$ and $\xi_2\in\paths{b_2}\cap\mathrm{NZ}^{(j_1,j_2)}$ also induce a path $\xi\in\paths{b} \cap \mathrm{NZ}^{(j_1,j_2)}$ by simply concatenating the resampling positions. Note that $\xi_1,\xi_2$ actually induce $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths $\xi$ because of the possible orderings of concatenating the resamplings in $\xi_1$ and $\xi_2$. However, all these paths have smaller weight, and by the same reasoning as in the proof of claim \ref{claim:expectationsum} these weights sum to exactly $1$, so we obtain
 
    \begin{align*}
 
        \mathbb{P}_b(\mathrm{NZ}^{(j_1,j_2)},A_1,A_2)
 
        &= \sum_{\substack{\xi\in\paths{b} \cap \\ \mathrm{NZ}^{(j_1,j_2)}\cap A_1\cap A_2}} \mathbb{P}[\xi] \\
 
        &= \sum_{\substack{\xi_1\in\paths{b_1} \cap \\ \mathrm{NZ}^{(j_1,j_2)}\cap A_1}} \;\;
 
          \sum_{\substack{\xi_2\in\paths{b_1} \cap \\ \mathrm{NZ}^{(j_1,j_2)}\cap A_2}}
 
        \mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2] \\
 
        &=
 
        \mathbb{P}_{b_1}(\mathrm{NZ}^{(j_1,j_2)},A_1)
 
        \; \cdot \;
 
        \mathbb{P}_{b_2}(\mathrm{NZ}^{(j_1,j_2)},A_2).
 
    \end{align*}
 
    For the second equality, note that again by the same reasoning as in the proof of claim \ref{claim:expectationsum} we have
 
    \begin{align*}
 
        \mathbb{P}_b(\mathrm{NZ}^{(j_1,j_2)},A_1,A_2) R_{b,\mathrm{NZ}^{(j_1,j_2)},A_1,A_2}
 
        &:= \sum_{\substack{\xi\in\paths{b}\\\xi \in \mathrm{NZ}^{(j_1,j_2)}\cap A_1\cap A_2}} \mathbb{P}[\xi] |\xi| \\
 
        &= \sum_{\substack{\xi_1\in\paths{b_1}\\\xi_1 \in \mathrm{NZ}^{(j_1,j_2)}\cap A_1}}
 
          \sum_{\substack{\xi_2\in\paths{b_2}\\\xi_2 \in \mathrm{NZ}^{(j_1,j_2)}\cap A_2}}
 
        \mathbb{P}[\xi_1]\mathbb{P}[\xi_2] (|\xi_1| + |\xi_2|) \\
 
        &=
 
        \mathbb{P}_{b_2}(\mathrm{NZ}^{(j_1,j_2)},A_2) \mathbb{P}_{b_1}(\mathrm{NZ}^{(j_1,j_2)},A_1) R_{b_1,\mathrm{NZ}^{(j_1,j_2)},A_1} \\
 
        &\quad +
 
        \mathbb{P}_{b_1}(\mathrm{NZ}^{(j_1,j_2)},A_1) \mathbb{P}_{b_2}(\mathrm{NZ}^{(j_1,j_2)},A_2) R_{b_2,\mathrm{NZ}^{(j_1,j_2)},A_2} .
 
    \end{align*}
 
    Dividing by $\mathbb{P}_b(\mathrm{NZ}_{j_1}\cap\mathrm{NZ}_{j_2},A_1,A_2)$ and using the first equality gives the desired result.
 
\end{proof}
 

	
 
\begin{comment}
 
TEST: Although a proof of claim \ref{claim:expectationsum} was already given, I'm trying to prove it in an alternate way using claim \ref{claim:eventindependence}.
 

	
 
~
 

	
 
Assume that $b_1$ ranges up to site $0$, the gap ranges from sites $1,...,k$ and $b_2$ ranges from site $k+1$ and onwards. For $j=1,...,k$ define the ``partial-zeros'' event $\mathrm{PZ}_j = \mathrm{Z}_1 \cap \mathrm{Z}_2 \cap ... \cap \mathrm{Z}_{j-1} \cap \mathrm{NZ}_j$ i.e. the first $j-1$ sites of the gap become zero and site $j$ does not become zero. Also define the ``all-zeros'' event $\mathrm{AZ} = \mathrm{Z}_1 \cap ... \cap \mathrm{Z}_k$, where all sites of the gap become zero. Note that these events partition the space, so we have for all $b$ that $\sum_{j=1}^k \mathbb{P}_b(\mathrm{PZ}_j) = 1 - \mathbb{P}_b(\mathrm{AZ}) = 1 - \mathcal{O}(p^k)$.
 

	
 
~
 

	
 
Furthermore, if site $j$ becomes zero when starting from $b_1$ it means all sites to the left of $j$ become zero as well. Similarly, from $b_2$ it implies all the sites to the right of $j$ become zero.
 
Because of that, we have
 
\begin{align*}
 
    \mathbb{P}_{b_1}(\mathrm{PZ}_j) &= \mathbb{P}_{b_1}(\mathrm{Z}_{j-1} \cap \mathrm{NZ}_j) = \mathcal{O}(p^{j-1}) \\
 
    \mathbb{P}_{b_2}(\mathrm{NZ}_j) &= 1 - \mathbb{P}_{b_2}(\mathrm{Z}_j) = 1 - \mathcal{O}(p^{k-j+1})
 
\end{align*}
 
Following the proof of claim \ref{claim:eventindependence} we also have
 
\begin{align*}
 
    \mathbb{P}_b(\mathrm{PZ}_{j})
 
    &=
 
    \mathbb{P}_{b_1}(\mathrm{PZ}_{j})
 
    \; \cdot \;
 
    \mathbb{P}_{b_2}(\mathrm{NZ}_{j}) \\
 
    R_{b,\mathrm{PZ}_{j}}
 
    &=
 
    R_{b_1,\mathrm{PZ}_{j}}
 
    \; + \;
 
    R_{b_2,\mathrm{NZ}_{j}}
 
\end{align*}
 

	
 

	
 
Now observe that
 
\begin{align*}
 
    R_b &= \sum_{j=1}^k \mathbb{P}_b(\mathrm{PZ}_j) R_{b,\mathrm{PZ}_j} + \mathbb{P}_b(\mathrm{AZ}) R_{b,\mathrm{AZ}} \\
 
        &= \sum_{j=1}^k \mathbb{P}_{b_2}(\mathrm{NZ}_j)\mathbb{P}_{b_{1}}(\mathrm{PZ}_j) R_{b_1,\mathrm{PZ}_j}
 
        + \sum_{j=1}^k \mathbb{P}_{b_1}(\mathrm{PZ}_j)\mathbb{P}_{b_{2}}(\mathrm{NZ}_j) R_{b_2,\mathrm{NZ}_j}
 
        + \mathcal{O}(p^k) \\
 
        &= \sum_{j=1}^k \mathbb{P}_{b_{1}}(\mathrm{PZ}_j) R_{b_1,\mathrm{PZ}_j}
 
        - \sum_{j=1}^k \mathbb{P}_{b_2}(\mathrm{Z}_j)\mathbb{P}_{b_{1}}(\mathrm{PZ}_j) R_{b_1,\mathrm{PZ}_j}
 
        + \sum_{j=1}^k \mathbb{P}_{b_1}(\mathrm{PZ}_j)\mathbb{P}_{b_{2}}(\mathrm{NZ}_j) R_{b_2,\mathrm{NZ}_j}
 
        + \mathcal{O}(p^k) \\
 
        &= \sum_{j=1}^k \mathbb{P}_{b_{1}}(\mathrm{PZ}_j) R_{b_1,\mathrm{PZ}_j}
 
        + \sum_{j=1}^k \mathbb{P}_{b_1}(\mathrm{PZ}_j)\mathbb{P}_{b_{2}}(\mathrm{NZ}_j) R_{b_2,\mathrm{NZ}_j}
 
        + \mathcal{O}(p^k) \\
 
        &= R_{b_1}
 
        + \sum_{j=1}^k \mathbb{P}_{b_1}(\mathrm{PZ}_j)\mathbb{P}_{b_{2}}(\mathrm{NZ}_j) R_{b_2,\mathrm{NZ}_j}
 
        + \mathcal{O}(p^k) \\
 
        &\overset{???}{=} R_{b_1} + R_{b_2} + \mathcal{O}(p^k)
 
\end{align*}
 
\end{comment}
 

	
 
Consider the chain (instead of the cycle) for simplicity with vertices identified by $\mathbb{Z}$.
 
\begin{definition}[Starting state dependent probability distribution.]
 
	Let $I\subset\mathbb{Z}$ be a finite set of vertices.
 
    Let $b_I$ be the initial state where everything is $1$, apart from the vertices corresponding to $I$, which are set $0$. Define $P_I(A)=P_{b_I}(A)$ where the latter is defined in Definition \ref{def:conditionedevents}, i.e. the probability of seeing a resample sequence from $A$ when the whole procedure started in state $b_I$. 
 
\end{definition}
 

	
 
The intuition of the following lemma is that the far right can only affect the zero vertex if there is an interaction chain forming, which means that every vertex should get resampled to $0$ at least once.
 
\begin{lemma}\label{lemma:probIndep}
 
	Suppose we have a finite set $I\subset\mathbb{N}_+$ of vertices.
 
    Let $I_{\max}:=\max(I)$ and $I':=I\setminus\{I_{\max}\}$, and similarly let $I_{\min}:=\min(I)$. These definitions are illustraded in Figure \ref{fig:lemmaillustration}.
 
	Then $P_{I}(Z^{(0)})=P_{I'}(Z^{(0)}) + O(p^{I_{\max}+1-|I|})$.
 
\end{lemma}
 
\begin{proof}
 
\begin{figure}
 
	\begin{center}
 
    	\includegraphics{diagram_proborders.pdf}
 
    \end{center}
 
    \caption{\label{fig:lemmaillustration} Illustration of setup of Lemma \ref{lemma:probIndep}.}
 
\end{figure}
 
	The proof uses induction on $|I|$. For $|I|=1$ the statement is easy, since every resample sequence that resamples vertex $0$ to zero must produce at least $I_{\max}$ zeroes in-between.
 
	
 
    Induction step: For an event $A$ and $k>0$ let us denote $A_k = A\cap\left(\cap_{j=0}^{k-1} \mathrm{Z}^{(j)}\right)\cap \mathrm{NZ}^{(k)}$, i.e. $A_k$ is the event $A$ \emph{and} ``Each vertex in $0,1,2,\ldots, k-1$ becomes $0$ at some point before termination (either by resampling or initialisation), but vertex $k$ does not''. Observe that these events form a partition, so $Z^{(0)}=\dot{\bigcup}_{k=1}^{\infty}Z^{(0)}_k$.
 
    Let $I_{<k}:=I\cap[1,k-1]$ and similarly $I_{>k}:=I\setminus[1,k]$, finally let $I_{><}:=\{I_{\min}+1,I_{\max}-1]\}\setminus I$ (note that $I_{><} = \gaps{I}$ as shown in Figure \ref{fig:diametergap}). Suppose we have proven the claim up to $|I|-1$, then the induction step can be shown by
 
	\begin{align*}
 
		P_{I}(Z^{(0)})
 
		&=\sum_{k=1}^{\infty}P(Z^{(0)}_k) \tag{the events are a partition}\\
 
        &=\sum_{k\in \mathbb{N}\setminus I}P(Z^{(0)}_k) \tag{$\mathbb{P}(A_k)=0$ for $k\in I$}\\
 
        &=\sum_{k\in\mathbb{N}\setminus I}P_{I_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)}) \tag{by Claim~\ref{claim:eventindependence}}\\
 
        &=\sum_{k\in I_{><}}P_{I_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}+1-|I|})
 
		\tag{$k<I_{\min}\Rightarrow P_{I_{<k}}(Z^{(0)}_k)=0$}\\
 
        &=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}+1-|I|})	
 
		\tag{$k< I_{\max}\Rightarrow I_{<k}=I'_{<k}$}\\
 
		&=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot
 
        \left(P_{I'_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}-k+1-|I_{>k}|})\right) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{by induction, since for $k>I_{\min}$ we have $|I_{<k}|<|I|$}\\
 
		&=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot
 
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	
 
		\tag{as $P_{I'_{<k}}(Z^{(0)}_k)=\mathcal{O}(p^{k-|I'_{<k}|})$}\\
 
		&=\sum_{k\in\mathbb{N}\setminus I}P_{I'_{<k}}(Z^{(0)}_k)\cdot
 
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})\\
 
		&=\sum_{k\in\mathbb{N}\setminus I'}P_{I'_{<k}}(Z^{(0)}_k)\cdot
 
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{$k=I_{\max}\Rightarrow P_{I'_{<k}}(Z^{(0)}_k)=\mathcal{O}(p^{I_{\max}-|I'|})=\mathcal{O}(p^{I_{\max}+1-|I|})$}\\
 
		&=P_{I'}(Z^{(0)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{analogously to the beginning}			
 
	\end{align*}
 
\end{proof}
 

	
 
	The main insight that Lemma~\ref{lemma:probIndep} gives is that if we separate the slots to two halves, in order to see the cancellation of the contribution of the expected resamples on the right, we can simply pair up the left configurations by the particle filling the leftmost slot. And similarly for cancelling the left expectations we pair up right configurations based on the rightmost filling. 
 
	
 
	Also this claim finally ``sees'' how many empty places are between slots. These properties make it possible to use this lemma to prove the sought linear bound. We show it for the infinite chain, but with a little care it should also translate to the circle.
 

	
 
~
 

	
 
Here, I (Tom) tried to set up the same Lemma but for the circle instead of the infinite chain.
 
This time, it is no longer $I_\mathrm{max}$ but any vertex $i_* \in I$, and $I' = I \setminus \{i_*\}$. Without loss of generality, we can assume that $i_* \leq n/2$ (because if not then we can relabel the vertices and count the other way around so that $i_* \to n-i_*$). The goal is now to prove:
 
\begin{align*}
 
    P_I(Z^{(0)}) = P_{I'}(Z^{(0)}) + \mathcal{O}(p^{i_* + 1 - |I|})
 
\end{align*}
 
Note that when we refer to an interval $[a,b]$ on the circle we could be referring to two possible intervals because of the periodicity of the circle. In the following, whenever we refer to an interval $[a,b]$ we refer to the interval with vertex 0 on the \emph{inside}.
 

	
 
For $a,b\in[n]$, define the event ``zeroes patch'' as the event of getting zeroes inside the interval $[a,b]$ but not on the boundary, i.e.  $\mathrm{ZP}^{[a,b]} = \mathrm{NZ}^{(a)} \cap \mathrm{Z}^{(a+1)} \cap \mathrm{Z}^{(a+2)} \cap \cdots \cap \mathrm{Z}^{(b-1)} \cap \mathrm{NZ}^{(b)}$ (where we assume that $\mathrm{Z}^{(0)}$ is part of this intersection).
 

	
 
Furthermore, define the `inside' and `outside' of $I$ as $I_{\mathrm{in}(a,b)} = I\cap[a,b]$ and $I_{\mathrm{out}(a,b)} = I \setminus [a,b]$.
 
The following diagram illustrates these definitions.
 
\begin{center}
 
    \includegraphics{diagram_circle_lemma.pdf}
 
\end{center}
 
\begin{align*}
 
    P_{I}(\mathrm{Z}^{(0)})
 
    &=\sum_{\substack{l,k=1\\k+l<n}}
 
    P_I(\mathrm{ZP}^{[n-l,k]}) \tag{the events are a partition}\\
 
    &=\sum_{\substack{l,k=1\\k+l<n\\k,n-l\notin I}}
 
    P_I(\mathrm{ZP}^{[n-l,k]}) \tag{$\mathbb{P}(\mathrm{ZP}^{[a,b]})=0$ for $a\in I$ or $b\in I$}
 
\end{align*}
 
Note that if $[-l,k]$ does not `touch' $I$ then $P_I(\mathrm{ZP}^{[-l,k]}) = 0$.
 
Furthermore, we have $P_I(\mathrm{ZP}^{[n-l,k]}) = \mathcal{O}(p^{k+l-1-|I_{\mathrm{in}(n-l,k)}|})$. If $k\geq i_*$ or $l\geq i_*$ then this gives $P_I(\mathrm{ZP}^{[n-l,k]}) = \mathcal{O}(p^{i_* - 1 - |I|})$ since $|I_\mathrm{in}| \leq |I|$. Therefore we have
 
\begin{align*}
 
    P_I(\mathrm{Z}^{(0)})
 
    &=\sum_{\substack{l,k=1\\k,n-l\notin I}}^{i_*-1}
 
    P_I(\mathrm{ZP}^{[n-l,k]})
 
    + \mathcal{O}(p^{i_* - 1 - |I|}) \\
 
    &=\sum_{\substack{l,k=1\\k,n-l\notin I}}^{i_*-1}
 
    P_{I_{\mathrm{in}(n-l,k)}}(\mathrm{ZP}^{[n-l,k]}) \cdot
 
    P_{I_{\mathrm{out}(n-l,k)}}(\mathrm{NZ}^{(n-l,k)})
 
    + \mathcal{O}(p^{i_* - 1 - |I|}) \\
 
    \tag{by Claim~\ref{claim:eventindependence} for $n-l,k\notin I$} \\
 
    &=\sum_{\substack{l,k=1\\k,n-l\notin I}}^{i_*-1}
 
    P_{I'_{\mathrm{in}(n-l,k)}}(\mathrm{ZP}^{[n-l,k]}) \cdot
 
    P_{I_{\mathrm{out}(n-l,k)}}(\mathrm{NZ}^{(n-l,k)})
 
    + \mathcal{O}(p^{i_* - 1 - |I|})
 
\end{align*}
 
Now we are supposed to use the induction step, but this is where I got stuck.
 

	
 

	
 
\begin{definition}[Connected patches]
 
	Let $\mathcal{P}\subset 2^{\mathbb{Z}}$ be a finite system of finite subsets of $\mathbb{Z}$. We say that the patch set of a resample sequence is $\mathcal{P}$,
 
	if the connected components of the vertices that have ever become $0$ are exactly the elements of $\mathcal{P}$. We denote by $A^{(\mathcal{P})}$ the event that the set of patches is $\mathcal{P}$. For a patch $P$ let $A^{(P)}=\bigcup_{\mathcal{P}:P\in \mathcal{P}}A^{(\mathcal{P})}$.
 
\end{definition} 
 
Note by Tom: So $A^{(\mathcal{P})}$ is the event that the set of all patches is \emph{exactly} $\mathcal{P}$ whereas $A^{(P)}$ is the event that one of the patches is equal to $P$ but there can be other patches as well.
 

	
 
\begin{definition}[Conditional expectations]
 
	Let $S\subset\mathbb{Z}$ be a finite slot configuration, and for $f\in\{0,1'\}^{|S|}$ let $I:=S(f)$ be the set of vertices filled with particles. 
 
	Then we define
 
	$$R_I:=\mathbb{E}[\#\{\text{resamplings when started from inital state }I\}].$$
 
	For a patch set $\mathcal{P}$ and some $P\in\mathcal{P}$ we define
 
	$$R^{(\mathcal{P})}_I:=\mathbb{E}[\#\{\text{resamplings when started from inital state }I\}|A^{(\mathcal{P})}]$$	
 
	and 
 
	$$R^{(P,\mathcal{P})}_I:=\mathbb{E}[\#\{\text{resamplings inside }P\text{ when started from inital state }I\}|A^{(\mathcal{P})}]$$		
 
	finally
 
	$$R^{(P)}_I:=\mathbb{E}[\#\{\text{resamplings inside }P\text{ when started from inital state }I\}|A^{(P)}].$$	
 
\end{definition} 
 

	
 
    Similarly to Mario's proof I use the observation that 
 
    \begin{align*}
 
    R^{(n)} &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} R_{S(f)}\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)}
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{(\mathcal{P})}) R^{(\mathcal{P})}_{S(f)} \\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)}
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{\mathcal{P}}) \sum_{P\in\mathcal{P}} R^{(P,\mathcal{P})}_{S(f)}\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} 
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{\mathcal{P}}) \sum_{P\in\mathcal{P}} R^{(P)}_{S(f)\cap P}\tag{by Claim~\ref{claim:eventindependence}}\\ 
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} 
 
    \sum_{P\text{ patch}} R^{(P)}_{S(f)\cap P}\sum_{\mathcal{P}:P\in\mathcal{P}}\mathbb{P}_{S(f)}(A^{\mathcal{P}})\\     
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f\in\{0,1'\}^{|S|}}
 
     \rho_{S(f)} R^{(P)}_{S(f)\cap P}\mathbb{P}_{S(f)}(A^{(P)}) \tag{by definition}\\        
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f\in\{0,1'\}^{|S|}}
 
    \rho_{S(f)} R^{(P)}_{S(f)\cap P}\mathbb{P}_{S(f)\cap P}(A^{(P)})\mathbb{P}_{S(f)\cap \overline{P}}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}}) \tag{remember Definition~\ref{def:visitingResamplings} and use Claim~\ref{claim:eventindependence}}\\    
 
    &= \frac{1}{n}\sum_{S\subseteq [n]} \sum_{P\text{ patch}} \sum_{f_P\in\{0,1'\}^{|S\cap P|}}
 
    \rho_{S(f_P)} R^{(P)}_{S(f_P)} \mathbb{P}_{S(f_P)}(A^{(P)})
 
    \sum_{f_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}|}} \rho_{S(f_{\overline{P}})} \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}}) \\   
 
	&= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f_P\in\{0,1'\}^{|S\cap P|}}
 
	\rho_{S(f_P)}
 
        \sum_{f_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}|}}\rho_{S(f_{\overline{P}})}\mathcal{O}(p^{|S_{><}|}) \tag{see below} \\
 
	&= \frac{1}{n}\sum_{S\subseteq [n]}\mathcal{O}(p^{|S|+|S_{><}|}).
 
    \end{align*}
 
\begin{figure}
 
	\begin{center}
 
    	\includegraphics{diagram_patches.pdf}
 
    \end{center}
 
    \caption{\label{fig:patches} Illustration of last steps of the proof.}
 
\end{figure}
 
    The penultimate inequality can be seen by case separation as follows: If $S\subseteq P$ then there is no splitting into $S\cap P$ and $S\setminus P$, and we already have $\mathbb{P}_{S(f_P)}(A^{(P)})=\mathcal{O}(p^{|S_{><}|})$ simply because the patch $P$ must be filled with zeroes that were not yet in $S$, so this is at least $|S_{><}|$ resampled zeroes. For the more general case, assume that $S$ is larger than $P$ on both sides of $P$. This is illustrated in Figure \ref{fig:patches}. We will focus on the following sum that was in the above equations:
 
    \begin{align*}
 
        \sum_{f_{\overline{P}}\in\{0,1'\}^{|S \cap \overline{P}|}} \rho_{S(f_{\overline{P}})} \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}})
 
    \end{align*}
 
    By Lemma \ref{lemma:eventindependence} we can split this sum into two parts: the part to the left of $P$ and the part to the right of $P$. Define $S_\mathrm{left}=S\cap[S_\mathrm{min},P_{\mathrm{min}}-1]$ and $S_\mathrm{right}=S\cap[P_{\mathrm{max}}+1,S_\mathrm{max}]$, so that $S\cap\overline{P} = S_\mathrm{left} \cup S_\mathrm{right}$. These are also illustrated in Figure \ref{fig:patches}. Then we have
 
    \begin{align*}
 
        \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}})
 
        &= \mathbb{P}_{S(f_{\mathrm{left}})}(\overline{Z^{(P_{\min}-1)}}) \;\cdot\; \mathbb{P}_{S(f_{\mathrm{right}})}(\overline{Z^{(P_{\max}+1)}})
 
    \end{align*}
 
    and hence we can split the sum. Without loss of generality we now only consider the `right' part of the sum:
 
    \begin{align*}
 
        \sum_{f\in\{0,1'\}^{|S_\mathrm{right}|}} \rho_{S_\mathrm{right}(f)} \mathbb{P}_{S_\mathrm{right}(f)}(\overline{Z^{(P_{\max}+1)}})
 
    \end{align*}
 
    Now further split this sum over the value of $f$ at position $S_\mathrm{max}$:
 
    \begin{align*}
 
        \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}} \sum_{f'\in\{0,1'\}}
 
        \rho_{S_\mathrm{right}(f\,f')} \mathbb{P}_{S_\mathrm{right}(f\,f')}(\overline{Z^{(P_{\max}+1)}})
 
    \end{align*}
 
    and we use the definition of $\rho$ for the sum over $f'$:
 
    \begin{align*}
 
         \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}}
 
        \rho_{S_\mathrm{right}(f)} \left(p \mathbb{P}_{S_\mathrm{right}(f\, 0)}(\overline{Z^{(P_{\max}+1)}}) + (-p) \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) \right)
 
    \end{align*}
 
    Now we recognize the setup of Lemma~\ref{lemma:probIndep} with $I=S_\mathrm{right}(f\,0)$ and $I'=S_\mathrm{right}(f\,1)$. The lemma yields
 
    \begin{align*}
 
        \mathbb{P}_{S_\mathrm{right}(f\, 0)}(\overline{Z^{(P_{\max}+1)}}) &= \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) + \mathcal{O}(p^{S_\mathrm{max}-(P_{\mathrm{max}}+1)+1-|S_\mathrm{right}|}) \\
 
        &= \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) + \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|}) .
 
    \end{align*}
 
    Entering this back into the sum gives
 
    \begin{align*}
 
         \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}}
 
        \rho_{S_\mathrm{right}(f)} \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|+1})
 
         = \sum_{f\in\{0,1'\}^{|S_\mathrm{right}|}}
 
        \rho_{S_\mathrm{right}(f)} \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|})
 
    \end{align*}
 
    One can do the same for the `left' part, which gives a term $\mathcal{O}(p^{P_\mathrm{min}-S_{\mathrm{min}}-|S_\mathrm{left}|})$. The part of $S$ that was within $P$ gives $\mathbb{P}_{S(f_P)}(A^{(P)})=\mathcal{O}(p^{P_\mathrm{max}-P_\mathrm{min}+1-|S\cap P|})$. Combining these three factors yields
 
    \begin{align*}
 
        (\textrm{left part})(P\textrm{ part})(\textrm{right part}) &=
 
\mathcal{O}(p^{P_\mathrm{min}-S_{\mathrm{min}}-|S_\mathrm{left}|}) \cdot \mathcal{O}(p^{P_\mathrm{max}-P_\mathrm{min}+1-|S\cap P|}) \cdot \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|}) \\
 
        &= \mathcal{O}(p^{S_\mathrm{max}-S_\mathrm{min}+1-|S_\mathrm{left}\cup S_\mathrm{right}\cup (S\cap P)|})\\
 
        &= \mathcal{O}(p^{S_\mathrm{max}-S_\mathrm{min}+1-|S|})
 
        = \mathcal{O}(p^{|S_{><}|})
 
    \end{align*}
 
    as required. This finishes the proof.
 

	
 
    ~
 

	
 
	I think the same arguments would translate to the torus and other translationally invariant spaces, so we could go higher dimensional as Mario suggested. Then I think one would need to replace $|S_{><}|$ by the minimal number $k$ such that there is a $C$ set for which $S\cup C$ is connected. I am not entirely sure how to generalise Lemma~\ref{lemma:probIndep} though, which has key importance in the present proof.
 
    
 
    Questions:
 
    \begin{itemize}
 
    	\item Is this proof finally flawless?
 
    	\item In view of this proof, can we better characterise $a_k^{(k+1)}$?
 
    	\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
 
    	\item Can we prove the conjectured formula for $a_k^{(3)}$?		
 
    \end{itemize} 
 
    
 
\begin{comment}
 
    \subsection{Sketch of the (false) proof of the linear bound \ref{it:const}}
 
    Let us interpret $[n]$ as the vertices of a length-$n$ cycle, and interpret operations on vertices mod $n$ s.t. $n+1\equiv 1$ and $1-1\equiv n$.
 
    %\begin{definition}[Resample sequences]
 
    %	A sequence of indices $(r_\ell)=(r_1,r_2,\ldots,r_k)\in[n]^k$ is called resample sequence if our procedure performs $k$ consequtive resampling, where the first resampling of the procedure resamples around the mid point $r_1$ the second around $r_2$ and so on. Let $RS(k)$ the denote the set of length $k$ resample sequences, and let $RS=\cup_{k\in\mathbb{N}}RS(k)$.
 
    %\end{definition}
 
    %\begin{definition}[Constrained resample sequence]\label{def:constrainedRes}
 
    %	Let $C\subseteq[n]$ denote a slot configuration, and let $a\in\{\text{res},\neg\text{res}\}^{n-|C|}$, where the elements correspond to labels ``resampled" vs. ``not resampled" respectively. 
 
    %	For $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
    %	We define the set $A^{(C,a)}\subseteq RS$ as the set of resample sequences $(r_\ell)$ such that for all $j$ which has $a_j=\text{res}$ we have that $i_j$ appears in $(r_\ell)$ but for $j'$-s which have $a_{j'}=\neg\text{res}$ we have that $i_{j'}$ never appears in $(r_\ell)$. 
 
    %\end{definition}    
 
    \begin{definition}[Conditional expected number of resamples]
 
    	For a slot configuration $C\subseteq[n]$ and $a\in\{\!\text{ever},\text{ never}\}^{n-|C|}$ we define the event $A^{(C,a)}:=\bigwedge_{j\in[n-|C|]}\{i_j\text{ has }a_j\text{ become }0\text{ before reaching }\mathbf{1}\}$,
 
    	where $i_j$ is the $j$-th vertex of $[n]\setminus C$.
 
    	Then we also define
 
    	$$R^{(C,a)}_b:=\mathbb{E}[\#\{\text{resamplings when started from inital state }b\}|A^{(C,a)}].$$
 
    \end{definition}     
 
    
 
    As in Mario's proof I use the observation that 
 
    \begin{align*}
 
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}}\sum_{a\in\{\!\text{ever},\text{ never}\}^{n-|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)P_{C(f)}(A^{(C,a)})\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{a\in\{\!\text{ever},\text{ never}\}^{n-|C|}} \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)P_{C(f)}(A^{(C,a)}), 
 
    \end{align*}
 
    where we denote by $C\subseteq[n]$ a slot configuration, whereas $C(f)$ denotes the slots of $C$ filled with the particles described by $f$, while all other location in $[n]\setminus C$ are set to $1$. 
 
    When we write $R_{C(f)}$ we mean $R_{C(\bar{f})}$, i.e., replace $1'$-s with $1$-s. Since the notation is already heavy we dropped the bar from $f$, as it is clear from the context. Finally by $P_{C(f)}(A^{(C,a)})$ we denote the probability that the event $A^{(C,a)}$ holds.
 
    
 
    As in Definition for $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
    Suppose that $a$ is such that there are two indices $j_1\neq j_2$ such that 
 
    $a_{j_1}=\text{never}=a_{j_2}$, moreover the sets $\{i_{j_1}+1,\ldots, i_{j_2}-1\}$ and $\{i_{j_2}+1,\ldots, i_{j_1}-1\}$ partition $C$ non-trivially, and we denote by $C_l$,$C_r$ the corresponding partitions. 
 
    I wanted to prove that
 
    \begin{equation}\label{eq:conditionalCancellation}
 
		\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)=0,
 
    \end{equation}    
 
    based on the observation that for all $f\in\{0,1'\}^{|C|}$ we have 
 
    that 
 
    \begin{equation}\label{eq:keyIndependce}
 
    R^{{(C,a)}}_{C(f)}(p)=R^{{(C_l,a_l)}}_{C_l(f_l)}(p)+R^{{(C_r,a_r)}}_{C_r(f_r)}(p),
 
    \end{equation}
 
    where $f_l\in\{0,1'\}^{|C_l|}$ is defined as taking only the indices (and values) of $f$ corresponding to vertices of $C_l$, also $a_l\in[n-|C_l|]$ is defined such that $a$ and $a_l$ agree on vertices where $a$ is defined, and on the vertices where $a$ is not defined, i.e., the vertices of $C_r$ we define $a_l$ to contain ``never". We define things analogously for $f_r$ and $a_r$. 
 
    
 
    The reason why \eqref{eq:keyIndependce} holds is that as before the two halves of the cycle are conditionally independent because neither $i_{j_1}$ nor $i_{j_2}$ can become $0$. To be more precise each resample sequence $\left(C(f)\rightarrow \mathbf{1} \right)\in A^{(C,a)}$ can be uniquely decomposed to resample sequences $\left(C_l(f_l)\rightarrow \mathbf{1}\right)\in A^{(C_l,a_l)}$ and $\left(C_r(f_r)\rightarrow \mathbf{1}\right)\in A^{(C_r,a_r)}$. The sum of probabilities of the set of resample sequences $\{r\}$ which have decomposition $(r_l,r_r)$ have probability which is the product of the probabilities of $r_l$ and $r_r$ as shown in the proof of Claim~\ref{claim:expectationsum}. This proves that the set of all resample sequences $\left(C(f)\rightarrow \mathbf{1}\right)\in A^{(C,a)}$ for our purposes can be viewed as a product set with product probability distribution. Therefore the halves can be treated independently and so the expectation values just add up. 
 
    
 
    From here I wanted to mimic Mario's proof:
 
    \begin{align*}
 
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)&=
 
    \sum_{f_l\in\{0,1'\}^{|C_l|}} \sum_{f_r\in\{0,1'\}^{|C_r|}}  (-1)^{|f_l|+|f_r|}p^{|C_l|+|C_r|} \left( R^{{(C_l,a_l)}}_{C_l(f_l)}(p) + R^{{(C_r,a_r)}}_{C_r(f_l)}(p) \right)\\
 
    &= p^{|C|}\sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} R^{{(C_l,a_l)}}_{C_l(f_l)}(p) \sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} \\
 
    &\quad + p^{|C|}\sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} R^{{(C_r,a_r)}}_{C_r(f_r)}(p) \sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} \\
 
    &= 0.
 
    \end{align*}
 
    The nasty issue which I did not realise that the missing term $P_{C(f)}(A^{(C,a)})$ is non-constant: even though the event $A^{(C,a)}$ is independent of $f$ the probability $P_{C(f)}(A^{(C,a)})=P_{C(f_l)}(A^{(C_l,a_l)})\cdot P_{C(f_r)}(A^{(C_r,a_r)})$ is not and so the above breaks down.
 
    
 
    Observe that if \eqref{eq:conditionalCancellation} would hold for configurations that cut the slot configuration to two halves it would imply that the only non-zero contribution comes from pairs $(C,a)$ such that $C\cup\{i_j:a_j=\text{ever}\}$ is connected. This is because if this set is not connected, then either we can cut $C$ to two halves non-trivially along ``never" vertices, or there is an island of $\text{ever}$ vertices separated from any slots, and therefore from any $0$-s. This latter case has zero contribution since we cannot set these indices to $0$, without reaching them by some resamplings, and thereby building a path of $0$-s leading there.
 
    
 
    If $|C\cup\{i_j:a_j=\text{ever}\}|\geq k+1$ then all contribution has a power at least $k+1$ in $p$ since $(C,a)$ requires the prior appearance of at least $k+1$ particles. If $n\geq k+1$ than all $(C,a)$ such that $|C\cup\{i_j:a_j=\text{ever}\}|\leq k$ appears exactly $n$ times, since $(C,a)$ cannot be translationally invariant. Moreover the quantity $R^{{(C,a)}}_{C(f)}(p)$ is independent of $n$ due to the conditioning that every resampling happens on a connected component of length at most $k<n$. This would prove that $a_k^{(n)}$ is constant for $n\geq k+1$. The same arguments would directly translate to the torus and other translationally invariant objects, so we could go higher dimensional as Mario suggested.
 
    
 
    Questions:
 
    \begin{itemize}
 
    	\item Is it possible to somehow fix this proof?
 
    	\item In view of this (false) proof, can we better characterise $a_k^{(k+1)}$?
 
    	\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
 
    	\item Can we prove the conjectured formula for $a_k^{(3)}$?		
 
    \end{itemize} 
 

	
 
\begin{comment}
 
    \subsection{Sketch of the proof of the linear bound \ref{it:const}}
 
    Let us interpret $[n]$ as the vertices of a length-$n$ cycle, and interpret operations on vertices mod $n$ s.t. $n+1\equiv 1$ and $1-1\equiv n$.
 
    \begin{definition}[Resample sequences]
 
		A sequence of indices $(r_\ell)=(r_1,r_2,\ldots,r_k)\in[n]^k$ is called resample sequence if our procedure performs $k$ consequtive resampling, where the first resampling of the procedure resamples around the mid point $r_1$ the second around $r_2$ and so on. Let $RS(k)$ the denote the set of length $k$ resample sequences, and let $RS=\cup_{k\in\mathbb{N}}RS(k)$.
 
    \end{definition}
 
    \begin{definition}[Constrained resample sequence]\label{def:constrainedRes}
 
    	Let $C\subseteq[n]$ denote a slot configuration, and let $a\in\{\text{res},\neg\text{res}\}^{n-|C|}$, where the elements correspond to labels ``resampled" vs. ``not resampled" respectively. 
 
    	For $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
		We define the set $A^{(C,a)}\subseteq RS$ as the set of resample sequences $(r_\ell)$ such that for all $j$ which has $a_j=\text{res}$ we have that $i_j$ appears in $(r_\ell)$ but for $j'$-s which have $a_{j'}=\neg\text{res}$ we have that $i_{j'}$ never appears in $(r_\ell)$. 
 
    \end{definition}    
 
    \begin{definition}[Expected number of resamples]
 
		For $b\in\{0,1\}^n$ we define 
 
		$$R_b=\mathbb{E}[\#\{\text{resamplings when started from inital state }b\}],$$
 
		and for $(C,a)$ as in the previous definition we also define
 
		$$R^{(C,a)}_b=\mathbb{E}[\#\{\text{resamplings }\in A^{(C,a)} \text{ when started from inital state }b\}].$$
 
		Here we mean by the latter that after each resampling we check whether the sequence of resamplings so far is in $A^{(C,a)}$, if yes we count it, otherwise we do not count.
 
    \end{definition}     
 
    
 
    As in Mario's proof I use the observation that 
 
    \begin{align*}
0 comments (0 inline, 0 general)