Changeset - d3ea27296b0f
[Not reviewed]
0 1 0
András Gilyén - 8 years ago 2017-09-06 06:11:37
gilyen@cwi.nl
cycle lemma
1 file changed with 58 insertions and 0 deletions:
0 comments (0 inline, 0 general)
main.tex
Show inline comments
 
@@ -637,384 +637,442 @@ Here, I (Tom) tried to set do the same Lemma but for the circle instead of the i
 
    \end{center}
 
    Note that by Claim~\ref{claim:eventindependence} we have
 
    \begin{align*}
 
        \P_{I}(\mathrm{ZP}^{[-l,r]_0}) = \P_{I \cap [-l,r]_0}(\mathrm{ZP}^{[-l,r]_0}) \;\cdot\; \P_{I\setminus [-l,r]_0}(\NZ{-l},\NZ{r})
 
    \end{align*}
 
    We have $i_*\in I \setminus[-l,r]_0$, and $I\cap[-l,r]_0 = I' \cap [-l,r]_0$. Define $J=I\setminus[-l,r]_0$ and $J'=I'\setminus[-l,r]_0$. We have $|J|<|I|$ so we can apply the induction hypothesis to $J$:
 
    \begin{align*}
 
        \P_{J}(\NZ{-l},\NZ{r})
 
        &=
 
        1
 
        - \P_{J}(\Z{-l},\NZ{r})
 
        - \P_{J}(\Z{r})
 
        \tag{partition of all events} \\
 
        &=
 
        1
 
        - \P_{J'}(\Z{-l},\NZ{r})
 
        - \P_{J'}(\Z{r}) \\
 
        &\quad + \mathcal{O}(p^{\min\left( \dist{r}(i_*,-l), \dist{-l}(i_*,r) \right) +1-|J|})
 
        + \mathcal{O}(p^{d(i_*,r)+1-|J|}) \\
 
        &=
 
        \P_{J'}(\NZ{-l},\NZ{b})
 
        + \mathcal{O}(p^{\min\left( \dist{r}(i_*,-l) , d(i_*,r)\right)+1-|J|})
 
    \end{align*}
 
    Note that the event $\mathrm{ZP}^{[-l,r]_0}$ contains $l+r-1$ zeroes, so $\P_{I \cap [-l,r]_0}(\mathrm{ZP}^{[-l,r]_0}) = \mathcal{O}(p^{l+r-1-|I\cap[-l,r]_0|})$. This means
 
    \begin{align*}
 
        \P_{I}(\mathrm{ZP}^{[-l,r]_0})
 
        &= \P_{I' \cap [-l,r]_0}(\mathrm{ZP}^{[-l,r]_0})
 
        \left( \P_{I' \setminus [-l,r]_0}(\NZ{a},\NZ{b}) + \mathcal{O}(p^{\min\left( \dist{r}(i_*,-l) , d(i_*,r)\right)+1-|J|}) \right) \\
 
        &= \P_{I' \cap [-l,r]_0}(\mathrm{ZP}^{[-l,r]_0}) \;\cdot\; \P_{I'\setminus [-l,r]_0}(\NZ{a},\NZ{b}) \\
 
        &\qquad + \mathcal{O}(p^{\min\left( \dist{r}(i_*,-l) , d(i_*,r)\right)+1-|J| + l+r-1-|I\cap[-l,r]_0|}) \\
 
        &= \P_{I'}(\mathrm{ZP}^{[-l,r]_0})
 
        + \mathcal{O}(p^{\min\left( \dist{r}(i_*,-l) , d(i_*,r)\right)+l+r-|I|})
 
    \end{align*}
 
    Where we used Claim~\ref{claim:eventindependence} again.
 
    Case separation shows that
 
    $$\min\left( \dist{r}(i_*,-l) , d(i_*,r)\right) + l +r \geq d(i_*,0) + 1$$
 
    for $l,r\geq 1$ which proves the claim.
 

	
 
    The first equality that we have to prove now follows from the fact that the ``zeroes patch'' events are a partition of $\Z{0}$:
 
    \begin{align*}
 
        \P_{I}(\Z{0})
 
        &=\sum_{\substack{l,r\geq 1\\l+r\leq n}}
 
        \P_I(\mathrm{ZP}^{[-l,r]_0})
 
        \tag{the events are a partition of $\Z{0}$}\\
 
        &=\sum_{\substack{l,r\geq 1\\l+r\leq n}}
 
        \P_{I'}(\mathrm{ZP}^{[-l,r]_0})
 
        + \mathcal{O}(p^{d(i_*,0)+1-|I|})
 
        \tag{by claim} \\
 
        &= \P_{I'}(\Z{0}) + \mathcal{O}(p^{d(i_*,0)+1-|I|})
 
    \end{align*}
 
    Similarly, we have
 
    \begin{align*}
 
        \P_{I}(\Z{0} , \NZ{s})
 
        &=\sum_{l=1}^{n-s}\sum_{r=1}^{s}
 
        \P_{I}(\mathrm{ZP}^{[-l,r]_0},\NZ{s})
 
        \tag{partition of $\Z{0}$}\\
 
        &=\sum_{l=1}^{n-s}\sum_{r=1}^{i_*-1}
 
        \P_{I}(\mathrm{ZP}^{[-l,r]_0},\NZ{s})
 
        +\mathcal{O}(p^{i_*+1-|I|}) \\
 
        &=\sum_{l=1}^{n-s}\sum_{r=1}^{i_*-1}
 
        \P_{I\cap[s,r]_0}(\mathrm{ZP}^{[-l,r]_0},\NZ{s}) \cdot
 
        \P_{I\setminus [s,r]_0}(\NZ{r},\NZ{s})
 
        +\mathcal{O}(p^{i_*+1-|I|})
 
        \tag{Claim~\ref{claim:eventindependence}}\\
 
        &=\sum_{l=1}^{n-s}\sum_{r=1}^{i_*-1}
 
        \P_{I'\cap[s,r]_0}(\mathrm{ZP}^{[-l,r]_0},\NZ{s}) \cdot
 
        \P_{I\setminus [s,r]_0}(\NZ{r},\NZ{s})
 
        +\mathcal{O}(p^{i_*+1-|I|})
 
        \tag{$i_*\in I \setminus[s,r]_0$}\\
 
        &=\sum_{l=1}^{n-s}\sum_{r=1}^{i_*-1}
 
        \P_{I'\cap[s,r]_0}(\mathrm{ZP}^{[-l,r]_0},\NZ{s}) \cdot
 
        \P_{I'\setminus [s,r]_0}(\NZ{r},\NZ{s}) \\
 
        &\qquad +\mathcal{O}(p^{\min\left( \dist{r}(i_*,s) , d(i_*,r)\right)+l+r-|I|})
 
        +\mathcal{O}(p^{i_*+1-|I|})
 
        \tag{same argument as before}\\
 
        &=\sum_{l=1}^{n-s}\sum_{r=1}^{i_*-1}
 
        \P_{I'\cap[s,r]_0}(\mathrm{ZP}^{[-l,r]_0},\NZ{s}) \cdot
 
        \P_{I'\setminus [s,r]_0}(\NZ{r},\NZ{s}) \\
 
        &\qquad
 
        +\mathcal{O}(p^{\min\left( i_* , s-i_* \right) +1-|I|})
 
        \tag{case separation}\\
 
        &= \P_{I'}(\Z{0} , \NZ{s})
 
        +\mathcal{O}(p^{\min\left( i_* , s-i_* \right) +1-|I|})
 
    \end{align*}
 
    This finishes the proof.
 
\end{proof}
 

	
 
\begin{definition}[Connected patches]
 
	Let $\mathcal{P}\subset 2^{\mathbb{Z}}$ be a finite system of finite subsets of $\mathbb{Z}$. We say that the patch set of a resample sequence is $\mathcal{P}$,
 
	if the connected components of the vertices that have ever become $0$ are exactly the elements of $\mathcal{P}$. We denote by $A^{(\mathcal{P})}$ the event that the set of patches is $\mathcal{P}$. For a patch $P$ let $A^{(P)}=\bigcup_{\mathcal{P}:P\in \mathcal{P}}A^{(\mathcal{P})}$.
 
\end{definition} 
 
Note by Tom: So $A^{(\mathcal{P})}$ is the event that the set of all patches is \emph{exactly} $\mathcal{P}$ whereas $A^{(P)}$ is the event that one of the patches is equal to $P$ but there can be other patches as well.
 

	
 
\begin{definition}[Conditional expectations]
 
	Let $S\subset\mathbb{Z}$ be a finite slot configuration, and for $f\in\{0,1'\}^{|S|}$ let $I:=S(f)$ be the set of vertices filled with particles. 
 
	Then we define
 
	$$R_I:=\mathbb{E}[\#\{\text{resamplings when started from inital state }I\}].$$
 
	For a patch set $\mathcal{P}$ and some $P\in\mathcal{P}$ we define
 
	$$R^{(\mathcal{P})}_I:=\mathbb{E}[\#\{\text{resamplings when started from inital state }I\}|A^{(\mathcal{P})}]$$	
 
	and 
 
	$$R^{(P,\mathcal{P})}_I:=\mathbb{E}[\#\{\text{resamplings inside }P\text{ when started from inital state }I\}|A^{(\mathcal{P})}]$$		
 
	finally
 
	$$R^{(P)}_I:=\mathbb{E}[\#\{\text{resamplings inside }P\text{ when started from inital state }I\}|A^{(P)}].$$	
 
\end{definition} 
 

	
 
    Similarly to Mario's proof I use the observation that 
 
    \begin{align*}
 
    R^{(n)} &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} R_{S(f)}\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)}
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{(\mathcal{P})}) R^{(\mathcal{P})}_{S(f)} \\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)}
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{\mathcal{P}}) \sum_{P\in\mathcal{P}} R^{(P,\mathcal{P})}_{S(f)}\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} 
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{\mathcal{P}}) \sum_{P\in\mathcal{P}} R^{(P)}_{S(f)\cap P}\tag{by Claim~\ref{claim:eventindependence}}\\ 
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} 
 
    \sum_{P\text{ patch}} R^{(P)}_{S(f)\cap P}\sum_{\mathcal{P}:P\in\mathcal{P}}\mathbb{P}_{S(f)}(A^{\mathcal{P}})\\     
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f\in\{0,1'\}^{|S|}}
 
     \rho_{S(f)} R^{(P)}_{S(f)\cap P}\mathbb{P}_{S(f)}(A^{(P)}) \tag{by definition}\\        
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f\in\{0,1'\}^{|S|}}
 
    \rho_{S(f)} R^{(P)}_{S(f)\cap P}\mathbb{P}_{S(f)\cap P}(A^{(P)})\mathbb{P}_{S(f)\cap \overline{P}}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}}) \tag{remember Definition~\ref{def:visitingResamplings} and use Claim~\ref{claim:eventindependence}}\\    
 
    &= \frac{1}{n}\sum_{S\subseteq [n]} \sum_{P\text{ patch}} \sum_{f_P\in\{0,1'\}^{|S\cap P|}}
 
    \rho_{S(f_P)} R^{(P)}_{S(f_P)} \mathbb{P}_{S(f_P)}(A^{(P)})
 
    \sum_{f_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}|}} \rho_{S(f_{\overline{P}})} \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}}) \\   
 
	&= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f_P\in\{0,1'\}^{|S\cap P|}}
 
	\rho_{S(f_P)}
 
        \sum_{f_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}|}}\rho_{S(f_{\overline{P}})}\mathcal{O}(p^{|S_{><}|}) \tag{see below} \\
 
	&= \frac{1}{n}\sum_{S\subseteq [n]}\mathcal{O}(p^{|S|+|S_{><}|}).
 
    \end{align*}
 
\begin{figure}
 
	\begin{center}
 
    	\includegraphics{diagram_patches.pdf}
 
    \end{center}
 
    \caption{\label{fig:patches} Illustration of last steps of the proof.}
 
\end{figure}
 
    The penultimate inequality can be seen by case separation as follows: If $S\subseteq P$ then there is no splitting into $S\cap P$ and $S\setminus P$, and we already have $\mathbb{P}_{S(f_P)}(A^{(P)})=\mathcal{O}(p^{|S_{><}|})$ simply because the patch $P$ must be filled with zeroes that were not yet in $S$, so this is at least $|S_{><}|$ resampled zeroes. For the more general case, assume that $S$ is larger than $P$ on both sides of $P$. This is illustrated in Figure \ref{fig:patches}. We will focus on the following sum that was in the above equations:
 
    \begin{align*}
 
        \sum_{f_{\overline{P}}\in\{0,1'\}^{|S \cap \overline{P}|}} \rho_{S(f_{\overline{P}})} \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}})
 
    \end{align*}
 
    By Lemma \ref{lemma:eventindependence} we can split this sum into two parts: the part to the left of $P$ and the part to the right of $P$. Define $S_\mathrm{left}=S\cap[S_\mathrm{min},P_{\mathrm{min}}-1]$ and $S_\mathrm{right}=S\cap[P_{\mathrm{max}}+1,S_\mathrm{max}]$, so that $S\cap\overline{P} = S_\mathrm{left} \cup S_\mathrm{right}$. These are also illustrated in Figure \ref{fig:patches}. Then we have
 
    \begin{align*}
 
        \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}})
 
        &= \mathbb{P}_{S(f_{\mathrm{left}})}(\overline{Z^{(P_{\min}-1)}}) \;\cdot\; \mathbb{P}_{S(f_{\mathrm{right}})}(\overline{Z^{(P_{\max}+1)}})
 
    \end{align*}
 
    and hence we can split the sum. Without loss of generality we now only consider the `right' part of the sum:
 
    \begin{align*}
 
        \sum_{f\in\{0,1'\}^{|S_\mathrm{right}|}} \rho_{S_\mathrm{right}(f)} \mathbb{P}_{S_\mathrm{right}(f)}(\overline{Z^{(P_{\max}+1)}})
 
    \end{align*}
 
    Now further split this sum over the value of $f$ at position $S_\mathrm{max}$:
 
    \begin{align*}
 
        \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}} \sum_{f'\in\{0,1'\}}
 
        \rho_{S_\mathrm{right}(f\,f')} \mathbb{P}_{S_\mathrm{right}(f\,f')}(\overline{Z^{(P_{\max}+1)}})
 
    \end{align*}
 
    and we use the definition of $\rho$ for the sum over $f'$:
 
    \begin{align*}
 
         \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}}
 
        \rho_{S_\mathrm{right}(f)} \left(p \mathbb{P}_{S_\mathrm{right}(f\, 0)}(\overline{Z^{(P_{\max}+1)}}) + (-p) \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) \right)
 
    \end{align*}
 
    Now we recognize the setup of Lemma~\ref{lemma:probIndep} with $I=S_\mathrm{right}(f\,0)$ and $I'=S_\mathrm{right}(f\,1)$. The lemma yields
 
    \begin{align*}
 
        \mathbb{P}_{S_\mathrm{right}(f\, 0)}(\overline{Z^{(P_{\max}+1)}}) &= \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) + \mathcal{O}(p^{S_\mathrm{max}-(P_{\mathrm{max}}+1)+1-|S_\mathrm{right}|}) \\
 
        &= \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) + \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|}) .
 
    \end{align*}
 
    Entering this back into the sum gives
 
    \begin{align*}
 
         \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}}
 
        \rho_{S_\mathrm{right}(f)} \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|+1})
 
         = \sum_{f\in\{0,1'\}^{|S_\mathrm{right}|}}
 
        \rho_{S_\mathrm{right}(f)} \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|})
 
    \end{align*}
 
    One can do the same for the `left' part, which gives a term $\mathcal{O}(p^{P_\mathrm{min}-S_{\mathrm{min}}-|S_\mathrm{left}|})$. The part of $S$ that was within $P$ gives $\mathbb{P}_{S(f_P)}(A^{(P)})=\mathcal{O}(p^{P_\mathrm{max}-P_\mathrm{min}+1-|S\cap P|})$. Combining these three factors yields
 
    \begin{align*}
 
        (\textrm{left part})(P\textrm{ part})(\textrm{right part}) &=
 
\mathcal{O}(p^{P_\mathrm{min}-S_{\mathrm{min}}-|S_\mathrm{left}|}) \cdot \mathcal{O}(p^{P_\mathrm{max}-P_\mathrm{min}+1-|S\cap P|}) \cdot \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|}) \\
 
        &= \mathcal{O}(p^{S_\mathrm{max}-S_\mathrm{min}+1-|S_\mathrm{left}\cup S_\mathrm{right}\cup (S\cap P)|})\\
 
        &= \mathcal{O}(p^{S_\mathrm{max}-S_\mathrm{min}+1-|S|})
 
        = \mathcal{O}(p^{|S_{><}|})
 
    \end{align*}
 
    as required. This finishes the proof.
 

	
 
    ~
 

	
 
	I think the same arguments would translate to the torus and other translationally invariant spaces, so we could go higher dimensional as Mario suggested. Then I think one would need to replace $|S_{><}|$ by the minimal number $k$ such that there is a $C$ set for which $S\cup C$ is connected. I am not entirely sure how to generalise Lemma~\ref{lemma:probIndep} though, which has key importance in the present proof.
 
    
 
    Questions:
 
    \begin{itemize}
 
    	\item Is this proof finally flawless?
 
    	\item In view of this proof, can we better characterise $a_k^{(k+1)}$?
 
    	\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
 
    	\item Can we prove the conjectured formula for $a_k^{(3)}$?		
 
    \end{itemize} 
 
    
 
	\begin{lemma}On the infinite chain
 
		$$\P_I(Z^{0}\cap Z^{k})=\P_I(Z^{0})\P_I(Z^{k})+\mathcal{O}(p^{k-|b|+1})$$
 
	\end{lemma}   
 
    Note that using De Morgan's law and the inclusion-exclusion formula we can see that this is equivalent to saying:
 
    $$\P_I(NZ^{0}\cap NZ^{k})=\P_I(NZ^{0})\P_I(NZ^{k})+\mathcal{O}(p^{k-|b|+1})$$
 
    \begin{proof}
 
    We proceed by induction on $|I|$. For $|I|=0,1$ the statement is trivial.
 
    
 
    Now observe that:
 
    $$\P_I(Z^{0})=\sum_{P\text{ patch}\,:\,0\in P}\P_I(P\in\mathcal{P})$$
 
    $$\P_I(Z^{k})=\sum_{P\text{ patch}\,:\,k\in P}\P_I(P\in\mathcal{P})$$
 
    
 
    Suppose $|I|\geq 2$, then we proceed using induction similarly to the above
 
    \begin{align*}
 
    &\P_I(Z^{0}\cap Z^{k})\\
 
    &=\sum_{\underset{P_l^{\max}+1<P_r^{\min}}{P_l,P_r\text{ patches}\,:\,0\in P_l,k\in P_r}}
 
    \P_I(P_l,P_r\in\mathcal{P})
 
    +\sum_{P\text{ patch}\,:\,0,k\in P}\P_I(P\in\mathcal{P})\\
 
    &=\sum_{\underset{P_l^{\max}+1<P_r^{\min}}{P_l,P_r\text{ patches}\,:\,0\in P_l,k\in P_r}}
 
    \P_I(P_l,P_r\in\mathcal{P})
 
    +\mathcal{O}(p^{k+1})\\
 
    &\overset{Lemma~\ref{claim:eventindependence}}{=}\sum_{\underset{P_l^{\max}+1<P_r^{\min}}{P_l,P_r\text{ patches}\,:\,0\in P_l,k\in P_r}}
 
    \P_{I\cap P_l}(P_l\in\mathcal{P})
 
    \P_{> I <}(NZ^{P_l^{\max}+1}\cap NZ^{P_r^{\min}-1})
 
    \P_{I\cap P_r}(P_r\in\mathcal{P})
 
    +\mathcal{O}(p^{k+1})\\
 
    &\overset{\text{induction}}{=}\sum_{\underset{P_l^{\max}+1<P_r^{\min}}{P_l,P_r\text{ patches}\,:\,0\in P_l,k\in P_r}}
 
    \P_{I\cap P_l}(P_l\in\mathcal{P})
 
    \P_{> I <}(NZ^{P_l^{\max}+1})\P_{> I <}(NZ^{P_r^{\min}-1})
 
    \P_{I\cap P_r}(P_r\in\mathcal{P})
 
    +\mathcal{O}(p^{k-|I|+1})\\
 
    &\overset{Lemma~\ref{lemma:probIndep}}{=}\sum_{\underset{P_l^{\max}+1<P_r^{\min}}{P_l,P_r\text{ patches}\,:\,0\in P_l,k\in P_r}}
 
    \P_{I\cap P_l}(P_l\in\mathcal{P})
 
    \P_{I\setminus P_l}(NZ^{P_l^{\max}+1})\P_{I\setminus P_r}(NZ^{P_r^{\min}-1})
 
    \P_{I\cap P_r}(P_r\in\mathcal{P})
 
    +\mathcal{O}(p^{k-|I|+1})\\
 
    &\overset{Lemma~\ref{claim:eventindependence}}{=}\sum_{\underset{P_l^{\max}+1<P_r^{\min}}{P_l,P_r\text{ patches}\,:\,0\in P_l,k\in P_r}}
 
    \P_{I}(P_l\in\mathcal{P})
 
    \P_{I}(P_r\in\mathcal{P})
 
    +\mathcal{O}(p^{k-|I|+1})\\
 
    &=\left(\sum_{P_l\text{ patch}\,:\,0\in P_l}
 
    \P_{I}(P_l\in\mathcal{P})\right)\left(\sum_{P_l\text{ patch}\,:\,0\in P_l}
 
    \P_{I}(P_l\in\mathcal{P})\right)
 
    +\mathcal{O}(p^{k-|I|+1})\\
 
    &=\P_I(Z^{0})\P_I(Z^{k})
 
    +\mathcal{O}(p^{k-|I|+1}).	
 
    \end{align*}
 
    \end{proof}
 

	
 
	Final observation: Suppose $S={a,b}$ 
 
	    \begin{align*}
 
	    &\sum_{f_{\overline{P}}\in\{0,1'\}^{2}} \rho_{S(f_{\overline{P}})} \mathbb{P}_{S(f_{\overline{P}})}(NZ^{(P_{\min}-1)}\cap NZ^{(P_{\max}+1)})  \\
 
	    &= \sum_{f_{\overline{P}}\in\{0,1'\}^{2}} \rho_{S(f_{\overline{P}})} \mathbb{P}_{S(f_{\overline{P}})}(NZ^{(P_{\min}-1)}) \mathbb{P}_{S(f_{\overline{P}})}(NZ^{(P_{\max}+1)})   +\mathcal{O}(p^{|\overline{P}|-|S|})\\
 
 	    &= \sum_{f_{\overline{P}}\in\{0,1'\}^{2}}  \rho_{a_f}\mathbb{P}_{S(f_{\overline{P}})}(NZ^{(P_{\min}-1)})  \rho_{b_f}\mathbb{P}_{S(f_{\overline{P}})}(NZ^{(P_{\max}+1)})   +\mathcal{O}(p^{|\overline{P}|-|S|})\\
 
	    \end{align*}
 

	
 

	
 
    
 
\begin{comment}
 
    \subsection{Sketch of the (false) proof of the linear bound \ref{it:const}}
 
    Let us interpret $[n]$ as the vertices of a length-$n$ cycle, and interpret operations on vertices mod $n$ s.t. $n+1\equiv 1$ and $1-1\equiv n$.
 
    %\begin{definition}[Resample sequences]
 
    %	A sequence of indices $(r_\ell)=(r_1,r_2,\ldots,r_k)\in[n]^k$ is called resample sequence if our procedure performs $k$ consequtive resampling, where the first resampling of the procedure resamples around the mid point $r_1$ the second around $r_2$ and so on. Let $RS(k)$ the denote the set of length $k$ resample sequences, and let $RS=\cup_{k\in\mathbb{N}}RS(k)$.
 
    %\end{definition}
 
    %\begin{definition}[Constrained resample sequence]\label{def:constrainedRes}
 
    %	Let $C\subseteq[n]$ denote a slot configuration, and let $a\in\{\text{res},\neg\text{res}\}^{n-|C|}$, where the elements correspond to labels ``resampled" vs. ``not resampled" respectively. 
 
    %	For $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
    %	We define the set $A^{(C,a)}\subseteq RS$ as the set of resample sequences $(r_\ell)$ such that for all $j$ which has $a_j=\text{res}$ we have that $i_j$ appears in $(r_\ell)$ but for $j'$-s which have $a_{j'}=\neg\text{res}$ we have that $i_{j'}$ never appears in $(r_\ell)$. 
 
    %\end{definition}    
 
    \begin{definition}[Conditional expected number of resamples]
 
    	For a slot configuration $C\subseteq[n]$ and $a\in\{\!\text{ever},\text{ never}\}^{n-|C|}$ we define the event $A^{(C,a)}:=\bigwedge_{j\in[n-|C|]}\{i_j\text{ has }a_j\text{ become }0\text{ before reaching }\mathbf{1}\}$,
 
    	where $i_j$ is the $j$-th vertex of $[n]\setminus C$.
 
    	Then we also define
 
    	$$R^{(C,a)}_b:=\mathbb{E}[\#\{\text{resamplings when started from inital state }b\}|A^{(C,a)}].$$
 
    \end{definition}     
 
    
 
    As in Mario's proof I use the observation that 
 
    \begin{align*}
 
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}}\sum_{a\in\{\!\text{ever},\text{ never}\}^{n-|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)P_{C(f)}(A^{(C,a)})\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{a\in\{\!\text{ever},\text{ never}\}^{n-|C|}} \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)P_{C(f)}(A^{(C,a)}), 
 
    \end{align*}
 
    where we denote by $C\subseteq[n]$ a slot configuration, whereas $C(f)$ denotes the slots of $C$ filled with the particles described by $f$, while all other location in $[n]\setminus C$ are set to $1$. 
 
    When we write $R_{C(f)}$ we mean $R_{C(\bar{f})}$, i.e., replace $1'$-s with $1$-s. Since the notation is already heavy we dropped the bar from $f$, as it is clear from the context. Finally by $P_{C(f)}(A^{(C,a)})$ we denote the probability that the event $A^{(C,a)}$ holds.
 
    
 
    As in Definition for $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
    Suppose that $a$ is such that there are two indices $j_1\neq j_2$ such that 
 
    $a_{j_1}=\text{never}=a_{j_2}$, moreover the sets $\{i_{j_1}+1,\ldots, i_{j_2}-1\}$ and $\{i_{j_2}+1,\ldots, i_{j_1}-1\}$ partition $C$ non-trivially, and we denote by $C_l$,$C_r$ the corresponding partitions. 
 
    I wanted to prove that
 
    \begin{equation}\label{eq:conditionalCancellation}
 
		\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)=0,
 
    \end{equation}    
 
    based on the observation that for all $f\in\{0,1'\}^{|C|}$ we have 
 
    that 
 
    \begin{equation}\label{eq:keyIndependce}
 
    R^{{(C,a)}}_{C(f)}(p)=R^{{(C_l,a_l)}}_{C_l(f_l)}(p)+R^{{(C_r,a_r)}}_{C_r(f_r)}(p),
 
    \end{equation}
 
    where $f_l\in\{0,1'\}^{|C_l|}$ is defined as taking only the indices (and values) of $f$ corresponding to vertices of $C_l$, also $a_l\in[n-|C_l|]$ is defined such that $a$ and $a_l$ agree on vertices where $a$ is defined, and on the vertices where $a$ is not defined, i.e., the vertices of $C_r$ we define $a_l$ to contain ``never". We define things analogously for $f_r$ and $a_r$. 
 
    
 
    The reason why \eqref{eq:keyIndependce} holds is that as before the two halves of the cycle are conditionally independent because neither $i_{j_1}$ nor $i_{j_2}$ can become $0$. To be more precise each resample sequence $\left(C(f)\rightarrow \mathbf{1} \right)\in A^{(C,a)}$ can be uniquely decomposed to resample sequences $\left(C_l(f_l)\rightarrow \mathbf{1}\right)\in A^{(C_l,a_l)}$ and $\left(C_r(f_r)\rightarrow \mathbf{1}\right)\in A^{(C_r,a_r)}$. The sum of probabilities of the set of resample sequences $\{r\}$ which have decomposition $(r_l,r_r)$ have probability which is the product of the probabilities of $r_l$ and $r_r$ as shown in the proof of Claim~\ref{claim:expectationsum}. This proves that the set of all resample sequences $\left(C(f)\rightarrow \mathbf{1}\right)\in A^{(C,a)}$ for our purposes can be viewed as a product set with product probability distribution. Therefore the halves can be treated independently and so the expectation values just add up. 
 
    
 
    From here I wanted to mimic Mario's proof:
 
    \begin{align*}
 
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)&=
 
    \sum_{f_l\in\{0,1'\}^{|C_l|}} \sum_{f_r\in\{0,1'\}^{|C_r|}}  (-1)^{|f_l|+|f_r|}p^{|C_l|+|C_r|} \left( R^{{(C_l,a_l)}}_{C_l(f_l)}(p) + R^{{(C_r,a_r)}}_{C_r(f_l)}(p) \right)\\
 
    &= p^{|C|}\sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} R^{{(C_l,a_l)}}_{C_l(f_l)}(p) \sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} \\
 
    &\quad + p^{|C|}\sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} R^{{(C_r,a_r)}}_{C_r(f_r)}(p) \sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} \\
 
    &= 0.
 
    \end{align*}
 
    The nasty issue which I did not realise that the missing term $P_{C(f)}(A^{(C,a)})$ is non-constant: even though the event $A^{(C,a)}$ is independent of $f$ the probability $P_{C(f)}(A^{(C,a)})=P_{C(f_l)}(A^{(C_l,a_l)})\cdot P_{C(f_r)}(A^{(C_r,a_r)})$ is not and so the above breaks down.
 
    
 
    Observe that if \eqref{eq:conditionalCancellation} would hold for configurations that cut the slot configuration to two halves it would imply that the only non-zero contribution comes from pairs $(C,a)$ such that $C\cup\{i_j:a_j=\text{ever}\}$ is connected. This is because if this set is not connected, then either we can cut $C$ to two halves non-trivially along ``never" vertices, or there is an island of $\text{ever}$ vertices separated from any slots, and therefore from any $0$-s. This latter case has zero contribution since we cannot set these indices to $0$, without reaching them by some resamplings, and thereby building a path of $0$-s leading there.
 
    
 
    If $|C\cup\{i_j:a_j=\text{ever}\}|\geq k+1$ then all contribution has a power at least $k+1$ in $p$ since $(C,a)$ requires the prior appearance of at least $k+1$ particles. If $n\geq k+1$ than all $(C,a)$ such that $|C\cup\{i_j:a_j=\text{ever}\}|\leq k$ appears exactly $n$ times, since $(C,a)$ cannot be translationally invariant. Moreover the quantity $R^{{(C,a)}}_{C(f)}(p)$ is independent of $n$ due to the conditioning that every resampling happens on a connected component of length at most $k<n$. This would prove that $a_k^{(n)}$ is constant for $n\geq k+1$. The same arguments would directly translate to the torus and other translationally invariant objects, so we could go higher dimensional as Mario suggested.
 
    
 
    Questions:
 
    \begin{itemize}
 
    	\item Is it possible to somehow fix this proof?
 
    	\item In view of this (false) proof, can we better characterise $a_k^{(k+1)}$?
 
    	\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
 
    	\item Can we prove the conjectured formula for $a_k^{(3)}$?		
 
    \end{itemize} 
 

	
 
\begin{comment}
 
    \subsection{Sketch of the proof of the linear bound \ref{it:const}}
 
    Let us interpret $[n]$ as the vertices of a length-$n$ cycle, and interpret operations on vertices mod $n$ s.t. $n+1\equiv 1$ and $1-1\equiv n$.
 
    \begin{definition}[Resample sequences]
 
		A sequence of indices $(r_\ell)=(r_1,r_2,\ldots,r_k)\in[n]^k$ is called resample sequence if our procedure performs $k$ consequtive resampling, where the first resampling of the procedure resamples around the mid point $r_1$ the second around $r_2$ and so on. Let $RS(k)$ the denote the set of length $k$ resample sequences, and let $RS=\cup_{k\in\mathbb{N}}RS(k)$.
 
    \end{definition}
 
    \begin{definition}[Constrained resample sequence]\label{def:constrainedRes}
 
    	Let $C\subseteq[n]$ denote a slot configuration, and let $a\in\{\text{res},\neg\text{res}\}^{n-|C|}$, where the elements correspond to labels ``resampled" vs. ``not resampled" respectively. 
 
    	For $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
		We define the set $A^{(C,a)}\subseteq RS$ as the set of resample sequences $(r_\ell)$ such that for all $j$ which has $a_j=\text{res}$ we have that $i_j$ appears in $(r_\ell)$ but for $j'$-s which have $a_{j'}=\neg\text{res}$ we have that $i_{j'}$ never appears in $(r_\ell)$. 
 
    \end{definition}    
 
    \begin{definition}[Expected number of resamples]
 
		For $b\in\{0,1\}^n$ we define 
 
		$$R_b=\mathbb{E}[\#\{\text{resamplings when started from inital state }b\}],$$
 
		and for $(C,a)$ as in the previous definition we also define
 
		$$R^{(C,a)}_b=\mathbb{E}[\#\{\text{resamplings }\in A^{(C,a)} \text{ when started from inital state }b\}].$$
 
		Here we mean by the latter that after each resampling we check whether the sequence of resamplings so far is in $A^{(C,a)}$, if yes we count it, otherwise we do not count.
 
    \end{definition}     
 
    
 
    As in Mario's proof I use the observation that 
 
    \begin{align*}
 
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}}\sum_{a\in\{\text{res},\neg\text{res}\}^{n-|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{a\in\{\text{res},\neg\text{res}\}^{n-|C|}} \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p), 
 
    \end{align*}
 
    where we denote by $C\subseteq[n]$ a slot configuration, whereas $C(f)$ denotes the slots of $C$ filled with the particles described by $f$, while all other location in $[n]\setminus C$ are set to $1$. 
 
	When we write $R_{C(f)}$ we mean $R_{C(\bar{f})}$, i.e., replace $1'$-s with $1$-s. Since the notation is already heavy we dropped the bar from $f$, as it is clear from the context.
 
    
 
    As in Definition~\ref{def:constrainedRes} for $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
    Suppose that $a$ is such that there are two indices $j_1\neq j_2$ such that 
 
    $a_{j_1}=\neg\text{res}=a_{j_2}$, moreover the sets $\{i_{j_1}+1,\ldots, i_{j_2}-1\}$ and $\{i_{j_2}+1,\ldots, i_{j_1}-1\}$ partition $C$ non-trivially, and we denote by $C_l$,$C_r$ the corresponding partitions. 
 
    We claim that 
 
    $$\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)=0.$$
 
    
 
	This is based on the observation that that for all $f\in\{0,1'\}^{|C|}$ we have 
 
    that 
 
    \begin{equation}\label{eq:keyIndependceWrong}
 
    R^{{(C,a)}}_{C(f)}(p)=R^{{(C_l,a_l)}}_{C_l(f_l)}(p)+R^{{(C_r,a_r)}}_{C_r(f_r)}(p),
 
    \end{equation}
 
    where $f_l\in\{0,1'\}^{|C_l|}$ is defined as taking only the indices (and values) of $f$ corresponding to vertices of $C_l$, also $a_l\in[n-|C_l|]$ is defined such that $a$ and $a_l$ agree on vertices where $a$ is defined, and on the vertices where $a$ is not defined, i.e., the vertices of $C_r$ we define $a_l$ to contain $\neg\text{res}$. We define things analogously for $f_r$ and $a_r$.
 
    
 
    The reason why \eqref{eq:keyIndependceWrong} holds is as before that the two halves of the cycle are conditionally independent because neither $i_{j_1}$ nor $i_{j_2}$ are resampled. One could probably also argue similarly as Tom's grid figure shows. 
 
    From here the proof goes just as in Mario's proof:
 
    \begin{align*}
 
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)&=
 
    \sum_{f_l\in\{0,1'\}^{|C_l|}} \sum_{f_r\in\{0,1'\}^{|C_r|}}  (-1)^{|f_l|+|f_r|}p^{|C_l|+|C_r|} \left( R^{{(C_l,a_l)}}_{C_l(f_l)}(p) + R^{{(C_r,a_r)}}_{C_r(f_l)}(p) \right)\\
 
    &= p^{|C|}\sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} R^{{(C_l,a_l)}}_{C_l(f_l)}(p) \sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} \\
 
    &\quad + p^{|C|}\sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} R^{{(C_r,a_r)}}_{C_r(f_r)}(p) \sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} \\
 
    &= 0.
 
    \end{align*}
 
    
 
    Observe that it implies that the only non-zero contribution comes from pairs $(C,a)$ such that $C\cup\{i_j:a_j=\text{res}\}$ is connected. This is because if this set is not connected, then either we can cut $C$ to two halves non-trivially along $\neg\text{res}$ vertices, or there is an island of $\text{res}$ vertices separated from any slots, and therefore from any $0$-s. This latter case has zero contribution since we cannot resample these indices without first setting them to $0$, but under the conditions they can be never reached by any resampling, therefore they remain $1$ always.
 
    
 
    If $|C\cup\{i_j:a_j=\text{res}\}|\geq k+1$ then all contribution has a power at least $k+1$ in $p$ since $(C,a)$ requires the prior appearance of at least $k+1$ particles. If $n\geq k+1$ than all $(C,a)$ such that $|C\cup\{i_j:a_j=\text{res}\}|\leq k$ appears exactly $n$ times, since $(C,a)$ cannot be translationally invariant. Moreover the quantity $R^{{(C,a)}}_{C(f)}(p)$ is independent of $n$ due to the conditioning that every resampling happens on a connected component of length at most $k<n$. This proves that $a_k^{(n)}$ is constant for $n\geq k+1$.
 
    
 
    Note that the heart of the proof is \eqref{eq:keyIndependceWrong}, so this is what we should double check.    
 

	
 
	The same arguments directly translate to the torus and other translationally invariant objects, so we can go higher dimensional :-) as Mario suggested.
 
	
 
	Questions:
 
	\begin{itemize}
 
		\item In view of this proof, can we better characterise $a_k^{(k+1)}$?
 
		\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
 
		\item Can we prove the conjectured formula for $a_k^{(3)}$?		
 
	\end{itemize} 
 
\end{comment}
 
        
 
\begin{comment}    
 
    \begin{definition}[Neighborhood]
 
	   	For the length-$n$ cycle we identify sites with $[n]$. 
 
	   	For a subset $S\subseteq [n]$ we define the $k$ neighborhood of $S$ as
 
	   	$N_k(S):=\cup_{s\in S} \{s-k,s-k+1,\ldots,s+k\}$ where numbers are interpreted mod $n$ and we represent the $\equiv 0$ class by $n$).
 
	\end{definition}
 
	\begin{definition}[Blocks and Gaps]
 
	   	For a configuration $C\subseteq [n]$ we call the connected components of $[n]\setminus N_1(C)$ the gaps. We denote by $m_C$ the number of gaps.
 
	   	We call a non-empty subset $B\subset C$ a block if $N_3(B)\cap C=B$ and $B$ is minimal, i.e., there is no proper subset $\emptyset\neq B'\subsetneq B$ satisfying $N_3(B')\cap C=B'$. 
 
	   	Observe that whenever $m_C\geq 2$ the number of blocks is the same as the number of gaps.  
 
    \end{definition}
 
    \begin{definition}[Crossings]
 
    	We say that a run (path) of the resampling procedure crosses $i\in[n]$ if there is ever a $0$ in $N_1({i})$ during the run.
 
    \end{definition}
 
    \begin{definition}[Enumerating gaps and mid points]
 
		Let $G_1,G_2,\ldots, G_{m_C}$ be an enumeration of the gaps respecting the cyclic ordering, and let $g_i$ be the middle element of $G_i$, if there are two middle elements we choose the smaller according to the cyclic ordering. (If $m_C=1$ and $G_1=[n]$ let $g_1=1$.)
 
		If $m_C\geq 2$ then for all $i\in[m_C]$ let $B_i$ be the block between $G_i$ and $G_{i+1}$.
 
    \end{definition}
 
    
 
    As in Mario's proof I use the observation that 
 
    \begin{align*}
 
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_b(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}(p),
 
    \end{align*}
 
    where we denote by $C\subseteq[n]$ a slot configuration, whereas $C(f)$ denotes the slots of $C$ filled with the particles described by $f$. 
 
    For $a\in\{\text{crossed},\text{not crossed}\}^m$ we also introduce the notation $R^a_{C(f)}(p):=\mathbb{E}(\#\{\text{resamples before reaching }\mathbbm{1} \text{ from } C(f)\}|\bigwedge_{j\in[m_C]}g_j \text{ is } a_j)\cdot\mathbb{P}(\bigwedge_{j\in[m_C]}g_j \text{ is } a_j)$, which we define as $0$ if the conditioning event has $0$ probability. 
 
    Since $$R_{C(f)}(p)=\sum_{a\in\{\text{crossed},\text{not crossed}\}^{m_C}}R^a_{C(f)}(p),$$ we can further rewrite the expectation as
 
    \begin{align*}
 
	    R^{(n)}(p) &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{a\in\{\text{crossed},\text{not crossed}\}^{m_C}}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^a_{C(f)}(p).
 
    \end{align*}
 
    Suppose that $a$ contains at least two ``not crossed'', the we claim that $\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^a_{C(f)}(p)=0$. Let $j_1\neq j_2$ be two distinct indexes such that $a_{j_1}$ and $a_{j_2}$ are both saying ``not crossed''. Let $B_l:=B_{j_1}\cup B_{j_1+1}\cup\cdots\cup B_{j_2-1}$ and $B_r:=B_{j_2}\cup B_{j_2+1}\cup\cdots\cup B_{j_1-1}$ (again we interpret indexes mod $m_C$).
 
    Then we claim that for all $f\in\{0,1'\}^{|C|}$ we have 
 
    that 
 
    \begin{equation}\label{eq:keyIndependceOld}
 
		R^a_{C(f)}(p)=R^a_{B_l(f)}(p)+R^a_{B_r(f)}(p).
 
    \end{equation} 
 
    The reason is as before that the halves are independent because neither $g_{j_1}$ nor $g_{j_2}$ is crossed. One could probably similarly prove it as the grid figure shows. 
 
    From here the proof goes just as in Mario's proof:
 
    \begin{align*}
 
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^a_{C(f)}(p)&=
 
    \sum_{f_l\in\{0,1'\}^{|B_l|}} \sum_{f_r\in\{0,1'\}^{|B_r|}}  (-1)^{|f_l|+|f_r|}p^{|B_l|+|B_r|} \left( R^a_{B_l(f)} + R^a_{B_r(f)} \right)\\
 
    &= p^{|C|}\sum_{f_l\in\{0,1'\}^{|B_l|}} (-1)^{|f_l|} R^a_{B_l(f)} \sum_{f_r\in\{0,1'\}^{|B_r|}} (-1)^{|f_r|} \\
 
    &\quad + p^{|C|}\sum_{f_r\in\{0,1'\}^{|B_r|}} (-1)^{|f_r|} R^a_{B_r(f)} \sum_{f_l\in\{0,1'\}^{|B_l|}} (-1)^{|f_l|} \\
 
    &= 0 
 
    \end{align*}
 
    From this it follows that the only contribution comes from paths that cross all but one (or all) of the mid gaps. This then implies that it is enough to consider $\mathcal{O}(k)$ length configurations. (We define the length of a configuration $C$ as $n-\max_{j\in[m_C]}|G_j|$.)
 
    
 
    Note that the heart of the proof is \eqref{eq:keyIndependceOld}, so this is what we should double check.
 
    
 
    In fact I think the independence that we use in \eqref{eq:keyIndependceOld} can be also proven when we define a crossing as crossing the actual point, and not its $1$-neighborhood. It then would make it possible to define blocks as consecutive slacks. Also then we could actually use all points of the gaps not only the mid points. The requirement for the cancellation would be that there are ``not crossed'' labels from at least two distinct gaps. This would probably lead to the optimal $k+1$ bound giving the actual statement \ref{it:const}. 
 
    
 
    Speculation: The $n=k$ case would then probably not work because the all $0$ starting configuration is invariant under rotations.
 
    To actually go below $2k$ one needs to be careful, because there are periodic configurations that are invariant under some rotations causing double counting issues. This can be probably resolved by showing that when a pattern becomes periodic for some $n$ it actually produces periodicity times more expectation due to symmetry. But this is all just speculation.
 
\end{comment}
 

	
 
	\bibliographystyle{alpha}
 
	\bibliography{Resample.bib}
 
	
0 comments (0 inline, 0 general)