Changeset - 15598be578d9
[Not reviewed]
Merge
0 5 2
Tom Bannink - 8 years ago 2017-06-01 18:00:22
tom.bannink@cwi.nl
1 file changed with 1 insertions and 1 deletions:
0 comments (0 inline, 0 general)
main.tex
Show inline comments
 
@@ -517,193 +517,193 @@ Now observe that
 
        + \mathcal{O}(p^k) \\
 
        &\overset{???}{=} R_{b_1} + R_{b_2} + \mathcal{O}(p^k)
 
\end{align*}
 
\end{comment}
 

	
 
Consider the chain (instead of the cycle) for simplicity with vertices identified by $\mathbb{Z}$.
 
\begin{definition}[Starting state dependent probability distribution.]
 
	Let $I\subset\mathbb{Z}$ be a finite set of vertices.
 
    Let $b_I$ be the initial state where everything is $1$, apart from the vertices corresponding to $I$, which are set $0$. Define $P_I(A)=P_{b_I}(A)$ where the latter is defined in Definition \ref{def:conditionedevents}, i.e. the probability of seeing a resample sequence from $A$ when the whole procedure started in state $b_I$. 
 
\end{definition}
 

	
 
The intuition of the following lemma is that the far right can only affect the zero vertex if there is an interaction chain forming, which means that every vertex should get resampled to $0$ at least once.
 
\begin{lemma}\label{lemma:probIndep}
 
	Suppose we have a finite set $I\subset\mathbb{N}_+$ of vertices.
 
	Let $I_{\max}:=\max(I)$ and $I':=I\setminus\{I_{\max}\}$, and similarly let $I_{\min}:=\min(I)$.
 
	Then $P_{I}(Z^{(0)})=P_{I'}(Z^{(0)}) + O(p^{I_{\max}+1-|I|})$.
 
\end{lemma}
 
\begin{proof}
 
	The proof uses induction on $|I|$. For $|I|=1$ the statement is easy, since every resample sequence that resamples vertex $0$ to zero must produce at least $I_{\max}$ zeroes in-between.
 
	
 
    Induction step: For an event $A$ and $k>0$ let us denote $A_k = A\cap\left(\cap_{j=0}^{k-1} \mathrm{Z}^{(j)}\right)\cap \mathrm{NZ}^{(k)}$, i.e. $A_k$ is the event $A$ \emph{and} ``Each vertex in $0,1,2,\ldots, k-1$ becomes $0$ at some point before termination (either by resampling or initialisation), but vertex $k$ does not''. Observe that these events form a partition, so $Z^{(0)}=\dot{\bigcup}_{k=1}^{\infty}Z^{(0)}_k$.
 
    Let $I_{<k}:=I\cap[1,k-1]$ and similarly $I_{>k}:=I\setminus[1,k]$, finally let $I_{><}:=\{I_{\min}+1,I_{\max}-1]\}\setminus I$ (note that $I_{><} = \gaps{I}$ as shown in Figure \ref{fig:diametergap}). Suppose we have proven the claim up to $|I|-1$, then the induction step can be shown by
 
	\begin{align*}
 
		P_{I}(Z^{(0)})
 
		&=\sum_{k=1}^{\infty}P(Z^{(0)}_k) \tag{the events are a partition}\\
 
        &=\sum_{k\in \mathbb{N}\setminus I}P(Z^{(0)}_k) \tag{$\mathbb{P}(A_k)=0$ for $k\in I$}\\
 
        &=\sum_{k\in\mathbb{N}\setminus I}P_{I_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)}) \tag{by Claim~\ref{claim:eventindependence}}\\
 
        &=\sum_{k\in I_{><}}P_{I_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}+1-|I|})
 
		\tag{$k<I_{\min}\Rightarrow P_{I_{<k}}(Z^{(0)}_k)=0$}\\
 
        &=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}+1-|I|})	
 
		\tag{$k< I_{\max}\Rightarrow I_{<k}=I'_{<k}$}\\
 
		&=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot
 
        \left(P_{I'_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}-k+1-|I_{>k}|})\right) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{by induction, since for $k>I_{\min}$ we have $|I_{<k}|<|I|$}\\
 
		&=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot
 
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	
 
		\tag{as $P_{I'_{<k}}(Z^{(0)}_k)=\mathcal{O}(p^{k-|I'_{<k}|})$}\\
 
		&=\sum_{k\in\mathbb{N}\setminus I}P_{I'_{<k}}(Z^{(0)}_k)\cdot
 
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})\\
 
		&=\sum_{k\in\mathbb{N}\setminus I'}P_{I'_{<k}}(Z^{(0)}_k)\cdot
 
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{$k=I_{\max}\Rightarrow P_{I'_{<k}}(Z^{(0)}_k)=\mathcal{O}(p^{I_{\max}-|I'|})=\mathcal{O}(p^{I_{\max}+1-|I|})$}\\
 
		&=P_{I'}(Z^{(0)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{analogously to the beginning}			
 
	\end{align*}
 
\end{proof}
 

	
 
	The main insight that Lemma~\ref{lemma:probIndep} gives is that if we separate the slots to two halves, in order to see the cancellation of the contribution of the expected resamples on the right, we can simply pair up the left configurations by the particle filling the leftmost slot. And similarly for cancelling the left expectations we pair up right configurations based on the rightmost filling. 
 
	
 
	Also this claim finally ``sees'' how many empty places are between slots. These properties make it possible to use this lemma to prove the sought linear bound. We show it for the infinite chain, but with a little care it should also translate to the circle.
 

	
 
\begin{definition}[Connected patches]
 
	Let $\mathcal{P}\subset 2^{\mathbb{Z}}$ be a finite system of finite subsets of $\mathbb{Z}$. We say that the patch set of a resample sequence is $\mathcal{P}$,
 
	if the connected components of the vertices that have ever become $0$ are exactly the elements of $\mathcal{P}$. We denote by $A^{(\mathcal{P})}$ the event that the set of patches is $\mathcal{P}$. For a patch $P$ let $A^{(P)}=\bigcup_{\mathcal{P}:P\in \mathcal{P}}A^{(\mathcal{P})}$.
 
\end{definition} 
 
Note by Tom: So $A^{(\mathcal{P})}$ is the event that the set of all patches is \emph{exactly} $\mathcal{P}$ whereas $A^{(P)}$ is the event that one of the patches is equal to $P$ but there can be other patches as well.
 

	
 
\begin{definition}[Conditional expectations]
 
	Let $S\subset\mathbb{Z}$ be a finite slot configuration, and for $f\in\{0,1'\}^{|S|}$ let $I:=S(f)$ be the set of vertices filled with particles. 
 
	Then we define
 
	$$R_I:=\mathbb{E}[\#\{\text{resamplings when started from inital state }I\}].$$
 
	For a patch set $\mathcal{P}$ and some $P\in\mathcal{P}$ we define
 
	$$R^{(\mathcal{P})}_I:=\mathbb{E}[\#\{\text{resamplings when started from inital state }I\}|A^{(\mathcal{P})}]$$	
 
	and 
 
	$$R^{(P,\mathcal{P})}_I:=\mathbb{E}[\#\{\text{resamplings inside }P\text{ when started from inital state }I\}|A^{(\mathcal{P})}]$$		
 
	finally
 
	$$R^{(P)}_I:=\mathbb{E}[\#\{\text{resamplings inside }P\text{ when started from inital state }I\}|A^{(P)}].$$	
 
\end{definition} 
 

	
 
    Similarly to Mario's proof I use the observation that 
 
    \begin{align*}
 
    R^{(n)} &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} R_{S(f)}\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)}
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{(\mathcal{P})}) R^{(\mathcal{P})}_{S(f)} \\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)}
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{\mathcal{P}}) \sum_{P\in\mathcal{P}} R^{(P,\mathcal{P})}_{S(f)}\\
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} 
 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{\mathcal{P}}) \sum_{P\in\mathcal{P}} R^{(P)}_{S(f)\cap P}\tag{by Claim~\ref{claim:eventindependence}}\\ 
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} 
 
    \sum_{P\text{ patch}} R^{(P)}_{S(f)\cap P}\sum_{\mathcal{P}:P\in\mathcal{P}}\mathbb{P}_{S(f)}(A^{\mathcal{P}})\\     
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f\in\{0,1'\}^{|S|}}
 
     \rho_{S(f)} R^{(P)}_{S(f)\cap P}\mathbb{P}_{S(f)}(A^{(P)}) \tag{by definition}\\        
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f\in\{0,1'\}^{|S|}}
 
    \rho_{S(f)} R^{(P)}_{S(f)\cap P}\mathbb{P}_{S(f)\cap P}(A^{(P)})\mathbb{P}_{S(f)\cap \overline{P}}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}}) \tag{remember Definition~\ref{def:visitingResamplings} and use Claim~\ref{claim:eventindependence}}\\    
 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f_P\in\{0,1'\}^{|S\cap P|}}
 
    \rho_{S(f_P)}  R^{(P)}_{S(f_P)}\mathbb{P}_{S(f_P)}(A^{(P)})
 
    \sum_{f_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}|}}\rho_{S(f_{\overline{P}})}\mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}}) \\   
 
	&= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f_P\in\{0,1'\}^{|S\cap P|}}
 
	\rho_{S(f_P)}
 
	\sum_{f_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}|}}\rho_{S(f_{\overline{P}})}\mathcal{O}(p^{|S_{><}|}) \\             
 
	&= \frac{1}{n}\sum_{S\subseteq [n]}\mathcal{O}(p^{|S|+|S_{><}|}).
 
    \end{align*}
 
   	
 
   	The penultimate inequality can be seen by case separation.
 
   	If $S_{><}\subseteq P$ then already $\mathbb{P}_{S(f_P)}(A^{(P)})=\mathcal{O}(p^{|S_{><}|})$.
 
   	Otherwise if all elements of $S_{><}\setminus P$ are larger than $P_{\max}$ then we view the last summation as $\sum_{f'_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}\setminus\{S_{\max}\}|}}\sum_{f''_{\overline{P}}\in\{0,1'\}^{1}}$ and use Lemma~\ref{lemma:probIndep} to conclude the cancellations pairwise regarding the filling of $S_{\max}$, i.e., the value of $f''_{\overline{P}}$. We proceed similarly when 
 
   	all elements of $S_{><}\setminus P$ are smaller than $P_{\min}$. In the last case we again proceed similarly, but now the cancellations will come from the interplay of $4$ fillings, corresponding to the possible filling of $S_{\min}$ and $S_{\max}$ simultaneously.
 
   	   
 
	I think the same arguments would directly translate to the torus and other translationally invariant objects, so we could go higher dimensional as Mario suggested. Then one would need to replace $|S_{><}|$ by the minimal number $k$ such that there is a $C$ set for which $S\cup C$ is connected.
 
	I think the same arguments would translate to the torus and other translationally invariant spaces, so we could go higher dimensional as Mario suggested. Then I think one would need to replace $|S_{><}|$ by the minimal number $k$ such that there is a $C$ set for which $S\cup C$ is connected. I am not entirely sure how to generalise Lemma~\ref{lemma:probIndep} though, which has key importance in the present proof.
 
    
 
    Questions:
 
    \begin{itemize}
 
    	\item Is this proof finally flawless?
 
    	\item In view of this proof, can we better characterise $a_k^{(k+1)}$?
 
    	\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
 
    	\item Can we prove the conjectured formula for $a_k^{(3)}$?		
 
    \end{itemize} 
 
    
 
\begin{comment}
 
    \subsection{Sketch of the (false) proof of the linear bound \ref{it:const}}
 
    Let us interpret $[n]$ as the vertices of a length-$n$ cycle, and interpret operations on vertices mod $n$ s.t. $n+1\equiv 1$ and $1-1\equiv n$.
 
    %\begin{definition}[Resample sequences]
 
    %	A sequence of indices $(r_\ell)=(r_1,r_2,\ldots,r_k)\in[n]^k$ is called resample sequence if our procedure performs $k$ consequtive resampling, where the first resampling of the procedure resamples around the mid point $r_1$ the second around $r_2$ and so on. Let $RS(k)$ the denote the set of length $k$ resample sequences, and let $RS=\cup_{k\in\mathbb{N}}RS(k)$.
 
    %\end{definition}
 
    %\begin{definition}[Constrained resample sequence]\label{def:constrainedRes}
 
    %	Let $C\subseteq[n]$ denote a slot configuration, and let $a\in\{\text{res},\neg\text{res}\}^{n-|C|}$, where the elements correspond to labels ``resampled" vs. ``not resampled" respectively. 
 
    %	For $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
    %	We define the set $A^{(C,a)}\subseteq RS$ as the set of resample sequences $(r_\ell)$ such that for all $j$ which has $a_j=\text{res}$ we have that $i_j$ appears in $(r_\ell)$ but for $j'$-s which have $a_{j'}=\neg\text{res}$ we have that $i_{j'}$ never appears in $(r_\ell)$. 
 
    %\end{definition}    
 
    \begin{definition}[Conditional expected number of resamples]
 
    	For a slot configuration $C\subseteq[n]$ and $a\in\{\!\text{ever},\text{ never}\}^{n-|C|}$ we define the event $A^{(C,a)}:=\bigwedge_{j\in[n-|C|]}\{i_j\text{ has }a_j\text{ become }0\text{ before reaching }\mathbf{1}\}$,
 
    	where $i_j$ is the $j$-th vertex of $[n]\setminus C$.
 
    	Then we also define
 
    	$$R^{(C,a)}_b:=\mathbb{E}[\#\{\text{resamplings when started from inital state }b\}|A^{(C,a)}].$$
 
    \end{definition}     
 
    
 
    As in Mario's proof I use the observation that 
 
    \begin{align*}
 
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}(p)\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}}\sum_{a\in\{\!\text{ever},\text{ never}\}^{n-|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)P_{C(f)}(A^{(C,a)})\\
 
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{a\in\{\!\text{ever},\text{ never}\}^{n-|C|}} \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)P_{C(f)}(A^{(C,a)}), 
 
    \end{align*}
 
    where we denote by $C\subseteq[n]$ a slot configuration, whereas $C(f)$ denotes the slots of $C$ filled with the particles described by $f$, while all other location in $[n]\setminus C$ are set to $1$. 
 
    When we write $R_{C(f)}$ we mean $R_{C(\bar{f})}$, i.e., replace $1'$-s with $1$-s. Since the notation is already heavy we dropped the bar from $f$, as it is clear from the context. Finally by $P_{C(f)}(A^{(C,a)})$ we denote the probability that the event $A^{(C,a)}$ holds.
 
    
 
    As in Definition for $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
    Suppose that $a$ is such that there are two indices $j_1\neq j_2$ such that 
 
    $a_{j_1}=\text{never}=a_{j_2}$, moreover the sets $\{i_{j_1}+1,\ldots, i_{j_2}-1\}$ and $\{i_{j_2}+1,\ldots, i_{j_1}-1\}$ partition $C$ non-trivially, and we denote by $C_l$,$C_r$ the corresponding partitions. 
 
    I wanted to prove that
 
    \begin{equation}\label{eq:conditionalCancellation}
 
		\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)=0,
 
    \end{equation}    
 
    based on the observation that for all $f\in\{0,1'\}^{|C|}$ we have 
 
    that 
 
    \begin{equation}\label{eq:keyIndependce}
 
    R^{{(C,a)}}_{C(f)}(p)=R^{{(C_l,a_l)}}_{C_l(f_l)}(p)+R^{{(C_r,a_r)}}_{C_r(f_r)}(p),
 
    \end{equation}
 
    where $f_l\in\{0,1'\}^{|C_l|}$ is defined as taking only the indices (and values) of $f$ corresponding to vertices of $C_l$, also $a_l\in[n-|C_l|]$ is defined such that $a$ and $a_l$ agree on vertices where $a$ is defined, and on the vertices where $a$ is not defined, i.e., the vertices of $C_r$ we define $a_l$ to contain ``never". We define things analogously for $f_r$ and $a_r$. 
 
    
 
    The reason why \eqref{eq:keyIndependce} holds is that as before the two halves of the cycle are conditionally independent because neither $i_{j_1}$ nor $i_{j_2}$ can become $0$. To be more precise each resample sequence $\left(C(f)\rightarrow \mathbf{1} \right)\in A^{(C,a)}$ can be uniquely decomposed to resample sequences $\left(C_l(f_l)\rightarrow \mathbf{1}\right)\in A^{(C_l,a_l)}$ and $\left(C_r(f_r)\rightarrow \mathbf{1}\right)\in A^{(C_r,a_r)}$. The sum of probabilities of the set of resample sequences $\{r\}$ which have decomposition $(r_l,r_r)$ have probability which is the product of the probabilities of $r_l$ and $r_r$ as shown in the proof of Claim~\ref{claim:expectationsum}. This proves that the set of all resample sequences $\left(C(f)\rightarrow \mathbf{1}\right)\in A^{(C,a)}$ for our purposes can be viewed as a product set with product probability distribution. Therefore the halves can be treated independently and so the expectation values just add up. 
 
    
 
    From here I wanted to mimic Mario's proof:
 
    \begin{align*}
 
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)&=
 
    \sum_{f_l\in\{0,1'\}^{|C_l|}} \sum_{f_r\in\{0,1'\}^{|C_r|}}  (-1)^{|f_l|+|f_r|}p^{|C_l|+|C_r|} \left( R^{{(C_l,a_l)}}_{C_l(f_l)}(p) + R^{{(C_r,a_r)}}_{C_r(f_l)}(p) \right)\\
 
    &= p^{|C|}\sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} R^{{(C_l,a_l)}}_{C_l(f_l)}(p) \sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} \\
 
    &\quad + p^{|C|}\sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} R^{{(C_r,a_r)}}_{C_r(f_r)}(p) \sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} \\
 
    &= 0.
 
    \end{align*}
 
    The nasty issue which I did not realise that the missing term $P_{C(f)}(A^{(C,a)})$ is non-constant: even though the event $A^{(C,a)}$ is independent of $f$ the probability $P_{C(f)}(A^{(C,a)})=P_{C(f_l)}(A^{(C_l,a_l)})\cdot P_{C(f_r)}(A^{(C_r,a_r)})$ is not and so the above breaks down.
 
    
 
    Observe that if \eqref{eq:conditionalCancellation} would hold for configurations that cut the slot configuration to two halves it would imply that the only non-zero contribution comes from pairs $(C,a)$ such that $C\cup\{i_j:a_j=\text{ever}\}$ is connected. This is because if this set is not connected, then either we can cut $C$ to two halves non-trivially along ``never" vertices, or there is an island of $\text{ever}$ vertices separated from any slots, and therefore from any $0$-s. This latter case has zero contribution since we cannot set these indices to $0$, without reaching them by some resamplings, and thereby building a path of $0$-s leading there.
 
    
 
    If $|C\cup\{i_j:a_j=\text{ever}\}|\geq k+1$ then all contribution has a power at least $k+1$ in $p$ since $(C,a)$ requires the prior appearance of at least $k+1$ particles. If $n\geq k+1$ than all $(C,a)$ such that $|C\cup\{i_j:a_j=\text{ever}\}|\leq k$ appears exactly $n$ times, since $(C,a)$ cannot be translationally invariant. Moreover the quantity $R^{{(C,a)}}_{C(f)}(p)$ is independent of $n$ due to the conditioning that every resampling happens on a connected component of length at most $k<n$. This would prove that $a_k^{(n)}$ is constant for $n\geq k+1$. The same arguments would directly translate to the torus and other translationally invariant objects, so we could go higher dimensional as Mario suggested.
 
    
 
    Questions:
 
    \begin{itemize}
 
    	\item Is it possible to somehow fix this proof?
 
    	\item In view of this (false) proof, can we better characterise $a_k^{(k+1)}$?
 
    	\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
 
    	\item Can we prove the conjectured formula for $a_k^{(3)}$?		
 
    \end{itemize} 
 

	
 
\begin{comment}
 
    \subsection{Sketch of the proof of the linear bound \ref{it:const}}
 
    Let us interpret $[n]$ as the vertices of a length-$n$ cycle, and interpret operations on vertices mod $n$ s.t. $n+1\equiv 1$ and $1-1\equiv n$.
 
    \begin{definition}[Resample sequences]
 
		A sequence of indices $(r_\ell)=(r_1,r_2,\ldots,r_k)\in[n]^k$ is called resample sequence if our procedure performs $k$ consequtive resampling, where the first resampling of the procedure resamples around the mid point $r_1$ the second around $r_2$ and so on. Let $RS(k)$ the denote the set of length $k$ resample sequences, and let $RS=\cup_{k\in\mathbb{N}}RS(k)$.
 
    \end{definition}
 
    \begin{definition}[Constrained resample sequence]\label{def:constrainedRes}
 
    	Let $C\subseteq[n]$ denote a slot configuration, and let $a\in\{\text{res},\neg\text{res}\}^{n-|C|}$, where the elements correspond to labels ``resampled" vs. ``not resampled" respectively. 
 
    	For $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
 
		We define the set $A^{(C,a)}\subseteq RS$ as the set of resample sequences $(r_\ell)$ such that for all $j$ which has $a_j=\text{res}$ we have that $i_j$ appears in $(r_\ell)$ but for $j'$-s which have $a_{j'}=\neg\text{res}$ we have that $i_{j'}$ never appears in $(r_\ell)$. 
 
    \end{definition}    
 
    \begin{definition}[Expected number of resamples]
 
		For $b\in\{0,1\}^n$ we define 
 
		$$R_b=\mathbb{E}[\#\{\text{resamplings when started from inital state }b\}],$$
 
		and for $(C,a)$ as in the previous definition we also define
 
		$$R^{(C,a)}_b=\mathbb{E}[\#\{\text{resamplings }\in A^{(C,a)} \text{ when started from inital state }b\}].$$
 
		Here we mean by the latter that after each resampling we check whether the sequence of resamplings so far is in $A^{(C,a)}$, if yes we count it, otherwise we do not count.
 
    \end{definition}     
 
    
 
    As in Mario's proof I use the observation that 
 
    \begin{align*}
0 comments (0 inline, 0 general)