Files @ 280d2941c33b
Branch filter:

Location: AENC/resampling_chain/main.tex

Tom Bannink
Add conditional independence equation
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
\documentclass[a4paper,11pt,english,final]{article}
\pdfoutput=1

\usepackage[utf8]{inputenc}
\usepackage[english]{babel}
\usepackage{fullpage}

\usepackage{graphics}
\usepackage{diagbox}
\usepackage[table]{xcolor}% http://ctan.org/pkg/xcolor
\usepackage{graphicx}
\usepackage{caption}
\captionsetup{compatibility=false}
\graphicspath{{./}}


\usepackage{tikz}
\usepackage{amssymb}
\usepackage{mathtools}
\usepackage{bm}
\usepackage{bbm}
%\usepackage{bbold}
\usepackage{verbatim}

%for correcting large brackets spacing
\usepackage{mleftright}\mleftright

\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{enumitem}
\usepackage{float}

%\usepackage{titling}

%\setlength{\droptitle}{-5mm}  

%\usepackage{MnSymbol}
\newcommand{\cupdot}{\overset{.}{\cup}}
\newcommand{\pvp}{\vec{p}{\kern 0.45mm}'}

\DeclarePairedDelimiter\bra{\langle}{\rvert}
\DeclarePairedDelimiter\ket{\lvert}{\rangle}
\DeclarePairedDelimiterX\braket[2]{\langle}{\rangle}{#1 \delimsize\vert #2}
\newcommand{\underflow}[2]{\underset{\kern-60mm \overbrace{#1} \kern-60mm}{#2}}

\def\Ind(#1){{{\tt Ind}({#1})}}
\def\Id{\mathrm{Id}}
\def\Pr{\mathrm{Pr}}
\def\Tr{\mathrm{Tr}}
\def\im{\mathrm{im}}
\newcommand{\bOt}[1]{\widetilde{\mathcal O}\left(#1\right)}

\newcommand{\QMAo}{\textsf{QMA$_1$}}
\newcommand{\BQP}{\textsf{BQP}}
\newcommand{\NP}{\textsf{NP}}
\newcommand{\SharpP}{\textsf{\# P}}

\newcommand{\diam}[1]{\mathcal{D}\left(#1\right)}
\newcommand{\paths}[1]{\mathcal{P}\left(#1\to\mathbf{1}\right)}
\newcommand{\maxgap}[1]{\mathrm{maxgap}\left(#1\right)}
\newcommand{\gaps}[1]{#1_{\mathrm{gaps}}}

\long\def\ignore#1{}

\newtheorem{theorem}{Theorem}
\newtheorem{corollary}[theorem]{Corollary}%[theorem]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{prop}[theorem]{Proposition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{remark}[theorem]{Remark}

\newenvironment{proof}
{\noindent {\bf Proof. }}
{{\hfill $\Box$}\\	\smallskip}

\usepackage[final]{hyperref}
\hypersetup{
	colorlinks = true,
	allcolors = {blue},
}
\usepackage{ifpdf} 
\ifpdf
	\typeout{^^J *** PDF mode *** } 
%	\input{myBiblatex.tex}
%	\addbibresource{LLL.bib}	
%\else
%	\typeout{^^J *** DVI mode ***} 
%	\hypersetup{breaklinks = true}
%	\usepackage[quadpoints=false]{hypdvips}
	\let\oldthebibliography=\thebibliography
	\let\endoldthebibliography=\endthebibliography
	\renewenvironment{thebibliography}[1]{%
		\begin{oldthebibliography}{#1}%
			\setlength{\itemsep}{-.3ex}%
	}%
	{%
		\end{oldthebibliography}%
	}
\fi 

%opening
\title{Criticality of resampling on the cycle / in the evolution model}
%\author{?\thanks{QuSoft, CWI and University of Amsterdam, the Netherlands. \texttt{?@cwi.nl} }
	%\and
	%?%
%}
%\thanksmarkseries{arabic}
%\renewcommand{\thefootnote}{\fnsymbol{footnote}}
%\date{\vspace{-12mm}}

\begin{document}
	
	\maketitle

	\begin{abstract}
		The model we consider is the following~\cite{ResampleLimit}: We have a cycle of length $n\geq 3$. Initially we set each site to $0$ or $1$ independently at each site, such that we set it $0$ with probability $p$. After that in each step we select a random vertex with $0$ value and resample it together with its two neighbours assigning $0$ with probability $p$ to each vertex just as initially. The question we try to answer is what is the expected number of resamplings performed before reaching the all $1$ state. 
		
		We present strong evidence for a remarkable critical behaviour. We conjecture that there exists some $p_c\approx0.62$, such that for all $p\in[0,p_c)$ the expected number of resamplings is bounded by a $p$ dependent constant times $n$, whereas for all $p\in(p_c,1]$ the expected number of resamplings is exponentially growing in $n$.
	\end{abstract}
	%Let $R(n)$ denote this quantity for a length $n\geq 3$ cycle.
	
	We can think about the resampling procedure as a Markov chain. To describe the corresponding matrix we introduce some notation. For $b\in\{0,1\}^n$ let $r(b,i,(x_{-1},x_0,x_1))$ denote the bit string which differs form $b$ by replacing the bits at index $i-1$,$i$ and $i+1$ with the values in $x$, interpreting the indices $\!\!\!\!\mod n$. Also for $x\in\{0,1\}^k$ let $p(x)=p((x_1,\ldots,x_k))=\prod_{i=1}^{k}p^{(1-x_i)}(1-p)^{x_i}$. Now we can describe the matrix of the Markov chain. We use row vectors for the elements of the probability distribution indexed by bitstrings of length $n$. Let $M_{(n)}$ denote the matrix of the leaking Markov chain:
	$$
		M_{(n)}=\sum_{b\in\{0,1\}^n\setminus{\{1\}^n}}\sum_{i\in[n]:b_i=0}\sum_{x\in\{0,1\}^3}E_{(b,r(b,i,x))}\frac{p(x)}{n-|b|},
	$$
	where $E_{(i,j)}$ denotes the matrix that is all $0$ except $1$ at the $(i,j)$th entry.

	We want to calculate the average number of resamplings $R^{(n)}$, which we define as the expected number of resamplings divided by $n$. For this let $\rho,\mathbbm{1}\in[0,1]^{2^n}$ be indexed with elements of $\{0,1\}^n$ such that $\rho_b=p(b)$ and $\mathbbm{1}_b=1$. Then we use that the expected number of resamplings is just the hitting time of the Markov chain:
	\begin{align*}
		R^{(n)}:&=\mathbb{E}(\#\{\text{resampling before termination}\})/n\\
		&=\sum_{k=1}^{\infty}P(\text{at least } k \text{ resamplings are performed})/n\\
		&=\sum_{k=1}^{\infty}\rho M_{(n)}^k \mathbbm{1}/n\\
		&=\sum_{k=0}^{\infty}a^{(n)}_k p^k
	\end{align*}

	\begin{table}[]
	\centering
	\caption{Table of the coefficients $a^{(n)}_k$}
	\label{tab:coeffs}
	\resizebox{\columnwidth}{!}{%
		\begin{tabular}{c|ccccccccccccccccccccc}
			\backslashbox[10mm]{$n$}{$k$} & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 & 12 & 13 & 14 & 15 & 16 & 17 & 18 & 19 & 20 \\		\hline
			3 &	0 & 1 & \cellcolor{blue!25}2 & 3+1/3 & 5.00 & 7.00 & 9.33 & 12.00 & 15.00 & 18.33 & 22.00 & 26.00 & 30.33 & 35.00 & 40.00 & 45.333 & 51.000 & 57.000 & 63.333 & 70.000 & 77.000 \\
			4 &	0 & 1 & 2 & \cellcolor{blue!25}3+2/3 & 6.16 & 9.66 & 14.3 & 20.33 & 27.83 & 37.00 & 48.00 & 61.00 & 76.16 & 93.66 & 113.6 & 136.33 & 161.83 & 190.33 & 222.00 & 257.00 & 295.50 \\
			5 &	0 & 1 & 2 & 3+2/3 & \cellcolor{blue!25}6.44 & 10.8 & 17.3 & 26.65 & 39.43 & 56.48 & 78.65 & 106.9 & 142.2 & 185.8 & 238.7 & 302.41 & 378.05 & 467.13 & 571.14 & 691.69 & 830.44 \\
			6 &	0 & 1 & 2 & 3+2/3 & 6.44 & \cellcolor{blue!25}11.0 & 18.5 & 30.02 & 47.10 & 71.68 & 106.0 & 152.9 & 215.4 & 297.4 & 403.1 & 537.21 & 705.25 & 913.31 & 1168.2 & 1477.4 & 1849.1 \\
			7 &	0 & 1 & 2 & 3+2/3 & 6.44 & 11.0 & \cellcolor{blue!25}18.7 & 31.21 & 50.83 & 80.80 & 125.3 & 189.7 & 280.8 & 407.0 & 578.6 & 808.13 & 1110.2 & 1502.6 & 2005.6 & 2643.2 & 3443.1 \\
			8 &	0 & 1 & 2 & 3+2/3 & 6.44 & 11.0 & 18.7 & \cellcolor{blue!25}31.44 & 52.08 & 84.95 & 136.0 & 213.6 & 328.9 & 496.5 & 735.6 & 1070.7 & 1532.5 & 2159.5 & 2998.8 & 4108.1 & 5556.7 \\
			9 &	0 & 1 & 2 & 3+2/3 & 6.44 & 11.0 & 18.7 & 31.44 & \cellcolor{blue!25}52.30 & 86.27 & 140.7 & 226.3 & 358.4 & 558.4 & 855.4 & 1289.0 & 1911.5 & 2791.4 & 4017.2 & 5701.4 & 7985.9 \\
			10&	0 & 1 & 2 & 3+2/3 & 6.44 & 11.0 & 18.7 & 31.44 & 52.30 & \cellcolor{blue!25}86.49 & 142.1 & 231.6 & 373.4 & 594.8 & 934.4 & 1447.1 & 2209.0 & 3324.6 & 4934.8 & 7226.9 & 10447. \\
            \vdots \\
            15& 0 & 1 & 2 & 3+2/3 & 6.44 & 11.08 & 18.76 & 31.45 & 52.31 & 86.49 & 142.33 & 233.31 & 381.17 & 621.02 & \cellcolor{blue!25}1009.38 & 1637.13 & % 2650.74 & 4285.68 & 6913.55 & 11171.2 & 18052.2
        \end{tabular}
	}
	\end{table}

	We observe that this is a power series in $p$. We discovered a very regular structure in this power series. It seems that for all $k\in\mathbb{N}$ and for all $n>k$ we have that $a^{(n)}_k$ is constant, this conjecture we verified using a computer up to $n=14$. 
	\newpage
	\noindent Based on our calculations presented in Table~\ref{tab:coeffs} and Figure~\ref{fig:coeffs_conv_radius} we make the following conjectures:
	\begin{enumerate}[label=(\roman*)]
		\item $\forall k\in\mathbb{N}, \forall n\geq 3 : a^{(n)}_k\geq 0$	\label{it:pos}	
        (A simpler version: $\forall k>0: a_k^{(3)}=(k+1)(k+2)/6$)
		\item $\forall k\in\mathbb{N}, \forall n>m\geq 3 : a^{(n)}_k\geq a^{(m)}_k$ \label{it:geq}		
		\item $\forall k\in\mathbb{N}, \forall n,m\geq \max(k,3) : a^{(n)}_k=a^{(m)}_k$ \label{it:const}		
  		\item $\exists p_c=\lim\limits_{k\rightarrow\infty}1\left/\sqrt[k]{a_{k}^{(k+1)}}\right.$ \label{it:lim}			
	\end{enumerate}
	We also conjecture that $p_c\approx0.61$, see Figure~\ref{fig:coeffs_conv_radius}.

	\begin{figure}[!htb]\centering
	\includegraphics[width=0.5\textwidth]{coeffs_conv_radius.pdf}
	%\includegraphics[width=0.5\textwidth]{log_coeffs.pdf}	
	\caption{$1\left/\sqrt[k]{a_{k}^{(k+1)}}\right.$} %$\frac{1}{\sqrt[k]{a_k^{(k+1)}}}$
	\label{fig:coeffs_conv_radius}
	\end{figure}
    
    For reference, we also explicitly give formulas for $R^{(n)}(p)$ for small $n$. We also give them in terms of $q=1-p$ because they sometimes look nicer that way.
    \begin{align*}
    	R^{(3)}(p) &= \frac{1-(1-p)^3}{3(1-p)^3}
        			= \frac{1-q^3}{3q^3}\\
    	R^{(4)}(p) &= \frac{p(6-12p+10p^2-3p^3)}{6(1-p)^4}
                    = \frac{(1-q)(1+q+q^2+3q^3)}{6q^4}\\
        R^{(5)}(p) &= \frac{p(90-300p+435p^2-325p^3+136p^4-36p^5+6p^6)}{15(1-p)^5(6-2p+p^2)}\\
                   &= \frac{(1-q)(6+5q+6q^2+21q^3+46q^4+6q^6)}{15q^5(5+q^2)}
    \end{align*}
    For $n=3$ the system becomes very simple because regardless of the current state, the probability of going to $111$ is always equal to $(1-p)^3$. Therefore the expected number of resamplings is simply the expectation of a geometric distribution. This gives the formula for $R^{(3)}(p)$ as shown above. Note that the $k$-th coefficient of the powerseries of a function $f(p)$ is given by $\frac{1}{k!}\left.\frac{d^k f}{dp^k}\right|_{p=0}$, i.e. the $k$-th derivative to $p$ evaluated at $0$ divided by $k!$. For the function $R^{(3)}(p) = (1-p)^{-3} - 1$ this yields $a^{(3)}_k = (k+2)(k+1)/6$ for $k\geq 1$ and $a^{(3)}_0=0$.

    We can do the same for $n=4,5$, which gives, for $k\geq 1$ (with Mathematica):
    \begin{align*}
        a^{(3)}_k &= \frac{(k+2)(k+1)}{6}\\
        a^{(4)}_k &= \frac{1}{6}\left(2+\frac{(k+3)(k+2)(k+1)}{6}\right)\\
        a^{(5)}_k &= \frac{1}{15}\left(\frac{(k+4)(k+3)(k+2)(k+1)}{20} - \frac{(k+3)(k+2)(k+1)}{30} - \frac{(k+2)(k+1)}{50} + \frac{76(k+1)}{25}\right.\\
                  &  \qquad\quad \left. + \frac{626}{125} - \frac{4}{250}
                  \left( \left(\frac{1+i\sqrt{5}}{6}\right)^k(94-25\sqrt{5}i)+\left(\frac{1-i\sqrt{5}}{6}\right)^k(94+25\sqrt{5}i) \right)
                  \right)
    \end{align*}
    and from $n=6$ and onwards, the expression becomes complicated and Mathematica can only give expressions including roots of polynomials.

    ~

	If statements \ref{it:pos}-\ref{it:lim} are true, then we can define the function 
	$$R^{(\infty)}(p):=\sum_{k=0}^{\infty}a^{(k+1)}_k p^k,$$
	which would then have radius of convergence $p_c$, also it would satisfy for all $p\in[0,p_c)$ that $R^{(n)}(p)\leq R^{(\infty)}(p)$ and $\lim\limits_{n\rightarrow\infty}R^{(n)}(p)=R^{(\infty)}(p)$.
	It would also imply, that for all $p\in(p_c,1]$ we get $R^{(n)}(p)=\Omega\left(\left(\frac{p}{p_c}\right)^{n/2}\right)$.
	This would then imply a very strong critical behaviour. It would mean that for all $p\in[0,p_c)$ the expected number of resamplings is bounded by a constant $R^{(\infty)}(p)$ times $n$, whereas for all $p\in(p_c,1]$ the expected number of resamplings is exponentially growing in $n$.
	
	Now we turn to the possible proof techniques for justifying the conjectures \ref{it:pos}-\ref{it:lim}.
	First note that $\forall n\geq 3$ we have $a^{(n)}_0=0$, since for $p=0$ the expected number of resamplings is $0$.
	Also note that the expected number of initial $0$s is $p\cdot n$. If $p\ll1/n$, then with high probability there is a single $0$ initially and the first resampling will fix it, so the linear term in the expected number of resamplings is $np$, therefore $\forall n\geq 3$, $a^{(n)}_1=1$. 
	
	For the second order coefficients it is a bit harder to argue, but one can use the structure of $M_{(n)}$ to come up with a combinatorial proof. To see this, first assume we have a vector $e_b$ having a single non-zero, unit element indexed with bitstring $b$.
	Observe that $e_bM_{(n)}$ is a vector containing polynomial entries, such that the only indices $b'$ which have a non-zero constant term must have $|b'|\geq|b|+1$, since if a resampling produces a $0$ entry it also introduces a $p$ factor. Using this observation one can see that the second order term can be red off from $\rho M_{(n)}\mathbbm{1}+\rho M_{(n)}^2\mathbbm{1}$,
	which happens to be $2n$. (Note that it is already a bit surprising, form the steps of the combinatorial proof one would expect $n^2$ terms appearing, but they just happen to cancel each other.) Using similar logic one should be able to prove the claim for $k=3$, but for larger $k$s it seems to quickly get more involved.
	
	The question is how could we prove the statements \ref{it:pos}-\ref{it:lim} for a general $k$?
	
    \appendix
    
    \section{Lower bound on $R^{(n)}(p)$}
    Proof that \ref{it:pos} and \ref{it:lim} imply that for any fixed $p>p_c$ we have $R^{(n)}(p)\in\Omega\left(\left(\frac{p}{p_c}\right)^{n/2}\right)$. 
    
    By definition of $p_c = \lim_{k\to\infty} 1\left/ \sqrt[k]{a_k^{(k+1)}} \right.$ we know that for any $\epsilon$ there exists a $k_\epsilon$ such that for all $k\geq k_\epsilon$ we have $a_k^{(k+1)}\geq (p_c + \epsilon)^{-k}$. Now note that $R^{(n)}(p) \geq a_{n-1}^{(n)}p^{n-1}$ since all terms of the power series are positive, so for $n\geq k_\epsilon$ we have $R^{(n)}(p)\geq (p_c +\epsilon)^{-(n-1)}p^{n-1}$. Note that
    \begin{align*}
    	R^{(n)}(p)\geq(p_c+\epsilon)^{-(n-1)}p^{n-1}=\left(\frac{p}{p_c+\epsilon}\right)^{n-1} \geq \left(\frac{p}{p_c}\right)^{\frac{n-1}{2}},
    \end{align*}
    where the last inequality holds for $\epsilon\leq\sqrt{p_c}(\sqrt{p}-\sqrt{p_c})$.
    
    \section{Calculating the coefficients $a_k^{(n)}$}
    Let $\rho'\in\mathbb{R}[p]^{2^n}$ be a vector of polynomials, and let $\text{rank}(\rho')$ be defined in the following way: 
    $$\text{rank}(\rho'):=\min_{b\in\{0,1\}^n}\left( |b|+ \text{maximal } k\in\mathbb{N} \text{ such that } p^k \text{ divides } \rho'_b\right).$$
	Clearly for any $\rho'$ we have that $\text{rank}(\rho' M_{(n)})\geq \text{rank}(\rho') + 1$. Another observation is, that all elements of $\rho'$ are divisible by $p^{\text{rank}(\rho')-n}$.
    We observe that for the initial $\rho$ we have that $\text{rank}(\rho)=n$, therefore $\text{rank}(\rho*(M_{(n)}^k))\geq n+k$, and so $\rho*(M_{(n)}^k)*\mathbbm{1}$ is obviously divisible by $p^{k}$. This implies that $a_k^{(n)}$ can be calculated by only looking at $\rho*(M_{(n)}^1)*\mathbbm{1}, \ldots, \rho*(M_{(n)}^k)*\mathbbm{1}$.
    
\newpage
\section{Quasiprobability method}
Let us first introduce notation for paths of the Markov Chain
\begin{definition}[Paths]
    We define a \emph{path} of the Markov Chain as a sequence of states and resampling choices $\xi=((b_0,r_0),(b_1,r_1),...,(b_k,r_k)) \in (\{0,1\}^n\times[n])^k$ indicating that at time $t$ Markov Chain was in state $b_t\in\{0,1\}^n$ and then resampled site $r_t$. We denote by $|\xi|$ the length $k$ of such a path, i.e. the number of resamples that happened, and by $\mathbb{P}[\xi]$ the probability associated to this path.
    We denote by $\paths{b}$ the set of all valid paths $\xi$ that start in state $b$ and end in state $\mathbf{1} := 1^n$.
\end{definition}
We can write the expected number of resamplings per site $R^{(n)}(p)$ as
\begin{align}
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1\}^{n}} \rho_b \; R_b(p) \label{eq:originalsum} ,
\end{align}
where $R_b(p)$ is the expected number of resamplings when starting from configuration $b$
\begin{align*}
	R_b(p) &= \sum_{\xi \in \paths{b}} \mathbb{P}[\xi] \cdot |\xi| .
\end{align*}

We consider $R^{(n)}(p)$ as a power series in $p$ and show that many terms in (\ref{eq:originalsum}) cancel out if we only consider the series up to some finite order $p^k$. The main idea is that if a path samples a $0$ then $\mathbb{P}[\xi]$ gains a factor $p$ so paths that contribute to $p^k$ can't be arbitrarily long.\\

To see this, we split the sum in (\ref{eq:originalsum}) into parts that will later cancel out. The initial probabilities $\rho_b$ contain a factor $p$ for every $0$ and a factor $(1-p)$ for every $1$. When expanding this product of $p$s and $(1-p)$s, we see that the $1$s contribute a factor $1$ and a factor $(-p)$ and the $0$s only give a factor $p$. We want to expand this product explicitly and therefore we no longer consider bitstrings $b\in\{0,1\}^n$ but bitstrings $b\in\{0,1,1'\}^n$. We view this as follows: every site can have one of $\{0,1,1'\}$ with `probabilities' $p$, $1$ and $-p$ respectively. A configuration $b=101'1'101'$ now has probability $\rho_{b} = 1\cdot p\cdot(-p)\cdot(-p)\cdot 1\cdot p\cdot(-p) = -p^5$ in the starting state $\rho$. It should not be hard to see that we have
\begin{align*}
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_{b} \; R_{\bar{b}}(p) ,
\end{align*}
where $\bar{b}$ is the bitstring obtained by changing every $1'$ in it back to a $1$. It is simply the same sum as (\ref{eq:originalsum}) but now every factor $(1-p)$ is explicitly split into $1$ and $(-p)$.
   
Some terminology: for any configuration we call a $0$ a \emph{particle} (probability $p$) and a $1'$ an \emph{antiparticle} (probability $-p$). We use the word \emph{slot} for a position that is occupied by either a paritcle or antiparticle ($0$ or $1'$). In the initial state, the probability of a configuration is given by $\pm p^{\mathrm{\#slots}}$ where the $\pm$ sign depends on the parity of the number of antiparticles.
    
We can further rewrite the sum over $b\in\{0,1,1'\}^n$ as a sum over all slot configurations $C\subseteq[n]$ and over all possible fillings of these slots.
\begin{align*}
	R^{(n)}(p) &= \frac{1}{n} \sum_{C\subseteq[n]} \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)} ,
\end{align*}
where $C(f)\in\{0,1,1'\}^n$ denotes a configuration with slots on the sites $C$ filled with (anti)particles described by $f$. The non-slot positions are filled with $1$s.

\begin{definition}[Diameter and gaps] \label{def:diameter} \label{def:gaps}
    For a subset $C\subseteq[n]$, we define the \emph{diameter} $\diam{C}$ to be the minimum size of an interval $I$ containing $C$. Here we consider both $C$ and the interval modulo $n$. In other words $\diam{C} = \min\{ j \vert \exists i : C\subseteq [i,i+j-1] \}$. We define the \emph{gaps} of $C$, as $I\setminus C$ and denote this by $\gaps{C}$. Note that $\diam{C} = |C| + |\gaps{C}|$.  Define $\maxgap{C}$ as the size of the largest connected component of $\gaps{C}$. Figure \ref{fig:diametergap} illustrates these concepts with a picture. 
\end{definition}
\begin{figure}
	\begin{center}
    	\includegraphics{diagram_gap.pdf}
    \end{center}
    \caption{\label{fig:diametergap} Illustration of Definition \ref{def:diameter}. A set $C=\{1,2,4,7,9\}\subseteq[n]$ consisting of 5 positions is shown by the red dots. The smallest interval containing $C$ is $[1,9]$, so the diameter is $\diam{C}=9$. The blue squares denote the set $\gaps{C} = \{3,5,6,8\}$. The dotted line at the top depicts the rest of the circle which may be much larger. The largest gap of $C$ is $\maxgap{C}=2$ which is the largest connected component of $\gaps{C}$.}
\end{figure}

\begin{claim}[Strong cancellation claim] \label{claim:strongcancel}
	The lowest order term in
    \begin{align*}
        \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)} ,
    \end{align*}
	is $p^{\diam{C}}$ when $n$ is large enough. All lower order terms cancel out.
\end{claim}

Example: for $C_0=\{1,2,4,7,9\}$ (the configuration shown in Figure \ref{fig:diametergap}) we computed the quantity up to order $p^{20}$ in an infinite system:
\begin{align*}
	\sum_{f\in\{0,1'\}^{|C_0|}} \rho_{C_0(f)} R_{C_0(f)} &= 0.0240278 p^{9} + 0.235129 p^{10} + 1.24067 p^{11} + 4.71825 p^{12} \\
    &\quad + 14.5555 p^{13} + 38.8307 p^{14} + 93.2179 p^{15} + 206.837 p^{16}\\
    &\quad + 432.302 p^{17} + 862.926 p^{18} + 1662.05 p^{19} + 3112.9 p^{20} + \mathcal{O}(p^{21})
\end{align*}
and indeed the lowest order is $\diam{C}=9$.

~

A weaker version of the claim is that if $C$ contains a gap of size $k$, then the sum is zero up to and including order $p^{|C|+k-1}$.
\begin{claim}[Weak cancellation claim] \label{claim:weakcancel}
	For $C\subseteq[n]$ a configuration of slot positions, the lowest order term in
    \begin{align*}
        \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)} ,
    \end{align*}
    is at least $p^{|C|+\maxgap{C}}$ when $n$ is large enough. All lower order terms cancel out.
\end{claim}
This weaker version would imply \ref{it:const} but for $\mathcal{O}(k^2)$ as opposed to $k+1$.

\newpage
The reason that claim \ref{claim:strongcancel} would prove \ref{it:const} is the following: to know the value of $a_k^{(n)}$, for any $n\geq k+1$ it is enough to look at configurations $C$ with diameter at most $k$, since larger configurations do not contribute to $a_k^{(n)}$.
For a starting state $b\in\{0,1\}^n$ that \emph{does} give a nonzero contribution, you can take that same starting configuration and translate it to get $n$ other configurations that give the same contribution. (An exception is a starting state like $1010101010...$ which you can only translate twice, but we only have to consider configurations with small diameter, in which case you can make exactly $n$ translations.)
Therefore the coefficient in the expected number of resamplings is a multiple of $n$ which Andr\'as already divided out in the definition of $R^{(n)}(p)$. To show \ref{it:const} we argue that this is the \emph{only} dependency on $n$. This is because there are only finitely many (depending on $k$ but not on $n$) configurations where the $k$ slots are nearby regardless of the value of $n$. So there are only finitely many nonzero contributions after translation symmetry was taken out. For example, when considering all starting configurations with 5 slots one might think there are $\binom{n}{5}$ configurations to consider which would be a dependency on $n$ (more than only the translation symmetry). But since most of these configurations have a diameter larger than $k$, they do not contribute to $a_k$. Only finitely many do and that does not depend on $n$.

~

Section \ref{sec:computerb} shows how to compute $R_b$ (this is not relevant for showing the claim) and the section after that shows how to prove the weaker claim.

\newpage
\subsection{Computation of $R_b$} \label{sec:computerb}

By $R_{101}$ we denote $R_b(p)$ for a $b$ that consists of only $1$s except for a single zero. We compute $R_{101}$ up to second order in $p$. This requires the following transitions.
\begin{align*}
    \framebox{$1 0 1$} &\to \framebox{$1 1 1$} & (1-p)^3 = 1-3p+3p^2-p^3\\
    \hline
    \framebox{$1 0 1$} &\to
        \begin{cases}
            \framebox{$0 1 1$}\\
            \framebox{$1 0 1$}\\
            \framebox{$1 1 0$}
        \end{cases}
        & 3p(1-p)^2 = 3p-6p^2+3p^3\\
    \hline
    \framebox{$1 0 1$} &\to \framebox{$0 1 0$} & p^2(1-p) = p^2-p^3\\
    \framebox{$1 0 1$} &\to
        \begin{cases}
            \framebox{$1 0 0$}\\
            \framebox{$0 0 1$}
        \end{cases}
        & 2p^2(1-p) = 2p^2 - 2p^3\\
    \hline
    \framebox{$1 0 1$} &\to \framebox{$0 0 0$} & p^3
\end{align*}
With this we can write a recursive formula for the expected number of resamples from $101$:
\begin{align*}
    R_{101} &= (1-3p+3p^2 - p^3)(1) + (3p -6p^2 +3p^3) (1+R_{101}) \\
            &\quad + (p^2 - p^3) (1+R_{10101}) + (2p^2-2p^3) (1+R_{1001}) \\
			&= 1 + 3 p + 7 p^2 + 14.6667 p^3 + 29 p^4 + 55.2222 p^5 + 102.444 p^6 + 186.36 p^7 \\
            &\quad + 333.906 p^8 + 590.997 p^9 + 1035.58 p^{10} + 1799.39 p^{11} + 3104.2 p^{12} \\
            &\quad+ 5322.18 p^{13} + 9075.83 p^{14} + 15403.6 p^{15} + 26033.4 p^{16} + 43833.5 p^{17} \\
            &\quad+ 73555.2 p^{18} + 123053 p^{19} + 205290 p^{20} + 341620 p^{21} + 567161 p^{22} \\
            &\quad+ 939693 p^{23} + 1.5537\cdot10^{6} p^{24} + 2.56158\cdot10^{6} p^{25} + \mathcal{O}(p^{26})
\end{align*}
where the recursion steps were done with a computer for an infinite line (or a cirlce where $n$ is assumed to be much larger than the largest power of $p$ considered).

Note: in the first line at the second term it uses that with probability $(3p-6p^2 + 3p^3)$ the state goes to $\framebox{$101$}$ and then the expected number of resamplings is $1+R_{101}$. Note that the actual term in the recursive formula should be
$$(3p-6p^2+3p^3)\cdot\left( \sum_{\xi\in\paths{101}} \mathbb{P}[\xi] \cdot \left( 1 + |\xi|\right) \right) = (3p-6p^2+3p^3)\left( p_\mathrm{tot} + R_{101} \right)$$
where $p_\mathrm{tot} := \sum_{\xi\in\paths{b}} \mathbb{P}[\xi]$. However, since the state space is finite (for finite $n$) and there is always a non-vanishing probability to go to $\mathbf{1}$, we know that $p_\mathrm{tot}=1$, i.e. the process terminates almost surely.

\newpage
\subsection{Weak cancellation proof}

Here we prove claim \ref{claim:weakcancel}, the weaker version of the claim. We require the following definition
\begin{definition}[Path independence] \label{def:independence}
	We say two paths $\xi_i\in\paths{b_i}$ ($i=1,2$) of the Markov Chain are \emph{independent} if $\xi_1$ never resamples a site that was ever zero in $\xi_2$ and the other way around. It is allowed that $\xi_1$ resamples a $1$ to a $1$ that was also resampled from $1$ to $1$ by $\xi_2$ and vice versa. If the paths are not independent then we call the paths \emph{dependent}.
\end{definition}
\begin{definition}[Path independence - alternative] \label{def:independence2}
    Equivalently, on the infinite line $\xi_1$ and $\xi_2$ are independent if there is a site `inbetween' them that was never zero in $\xi_1$ and never zero in $\xi_2$. On the circle $\xi_1$ and $\xi_2$ are independent if there are \emph{two} sites inbetween them that are never zero.
\end{definition}
\begin{claim}[Sum of expectation values] \label{claim:expectationsum}
When $b=b_1\land b_2\in\{0,1\}^n$ is a state with two groups ($b_1\lor b_2 = 1^n$) of zeroes with $k$ $1$s inbetween the groups, then we have $R_b(p) = R_{b_1}(p) + R_{b_2}(p) + \mathcal{O}(p^{k})$ where $b_1$ and $b_2$ are the configurations where only one of the groups is present and the other group has been replaced by $1$s. To be precise, the sums agree up to and including order $p^{k-1}$.
\end{claim}
\textbf{Example}: For $b_1 = 0111111$ and $b_2 = 1111010$ we have $b=0111010$ and $k=3$. The claim says that the expected time to reach $\mathbf{1}$ from $b$ is the time to make the first group $1$ plus the time to make the second group $1$, as if they are independent. Simulation shows that
\begin{align*}
    R_{b_1} &= 1 + 3p + 7p^2 + 14.67p^3 + 29p^4 + \mathcal{O}(p^5)\\
    R_{b_2} &= 2 + 5p + 10.67p^2 + 21.11p^3+40.26p^4 + \mathcal{O}(p^5)\\
    R_{b} &= 3 + 8p + 17.67p^2 + 34.78p^3+65.27p^4 + \mathcal{O}(p^5)\\
    R_{b_1} + R_{b_2} &= 3 + 8p + 17.67p^2+35.78p^3 + 69.26p^4 +\mathcal{O}(p^5)
\end{align*}
and indeed the sums agree up to order $p^{k-1}=p^2$. When going up to order $p^{k}$ or higher, there will be terms where the groups interfere so they are no longer independent.

~

\begin{proof}
    Consider a path $\xi_1\in\paths{b_1}$ and a path $\xi_2\in\paths{b_2}$ such that $\xi_1$ and $\xi_2$ are independent (Definition \ref{def:independence} or \ref{def:independence2}). The paths $\xi_1,\xi_2$ induce $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ different paths of total length $|\xi_1|+|\xi_2|$ in $\paths{b_1\land b_2}$. In the sums $R_{b_1}$ and $R_{b_2}$, the contribution of these paths are $\mathbb{P}[\xi_1]\cdot |\xi_1|$ and $\mathbb{P}[\xi_2]\cdot |\xi_2|$. The next diagram shows how these $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths contribute to $R_{b_1\land b_2}$. Point $(i,j)$ in the grid indicates that $i$ steps of $\xi_1$ have been done and $j$ steps of $\xi_2$ have been done. At every point (except the top and right edges of the grid) one has to choose between doing a step of $\xi_1$ or a step of $\xi_2$. The number of zeroes in the current state determine the probabilities with which this happens (beside the probabilities associated to the two original paths already). The grid below shows that at a certain point one can choose to do a step of $\xi_1$ with probability $p_i$ or a step of $\xi_2$ with probability $1-p_i$. These $p_i$ could in principle be different at every point in this grid. The weight of such a new path $\xi\in\paths{b_1\land b_2}$ is $p_\mathrm{grid}\cdot\mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2]$ where $p_\mathrm{grid}$ is the weight of the path in the diagram. By induction one can show that the sum over the $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ different terms $p_\mathrm{grid}$ is $1$.
\begin{center}
\includegraphics{diagram_paths.pdf}
\end{center}
 Hence the contribution of all $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths together to $R_{b_1\land b_2}$ is given by
\[
\mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2]\cdot(|\xi_1|+|\xi_2|) = \mathbb{P}[\xi_2]\cdot\mathbb{P}[\xi_1]\cdot|\xi_1| \;\; + \;\; \mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2]\cdot|\xi_2|.
\]
Ideally we would now like to sum this expression over all possible paths $\xi_1,\xi_2$ and use $p_\mathrm{tot}:=\sum_{\xi\in\paths{b_i}} \mathbb{P}[\xi] = 1$ (which also holds up to arbitrary order in $p$). The above expression would then become $R_{b_1} + R_{b_2}$. However, not all paths in the sum would satisfy the independence condition so it seems we can't do this. We now argue that it works up to order $p^{k-1}$.
For all $\xi\in\paths{b_1\land b_2}$ we have that \emph{either} $\xi$ splits into two independent paths $\xi_1,\xi_2$ as above, \emph{or} it does not. In the latter case, when $\xi$ can not be split like that, we know $\mathbb{P}[\xi]$ contains a power $p^k$ or higher because there is a gap of size $k$  and the paths must have moved at least $k$ times `towards each other' (for example one path moves $m$ times to the right and the other path moves $k-m$ times to the left). So the total weight of such a combined path is at least order $p^k$. Therefore we have
\[
	R_{b_1\land b_2} = \sum_{\mathclap{\substack{\xi_{1,2}\in\paths{b_{1,2}}\\ \mathrm{independent}}}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1| + \sum_{\mathclap{\substack{\xi_{1,2}\in\paths{b_{1,2}}\\ \mathrm{independent}}}} \mathbb{P}[\xi_1]\mathbb{P}[\xi_2]|\xi_2| + \sum_{\mathclap{\xi\;\mathrm{dependent}}} \mathbb{P}[\xi]|\xi|.
\]
where last sum only contains only terms of order $p^{k}$ or higher. Now for the first sum, note that
\[
	\sum_{\mathclap{\substack{\xi_{1,2}\in\paths{b_{1,2}}\\ \mathrm{independent}}}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1|
    = \sum_{\xi_1\in\paths{b_1}} \sum_{\substack{\xi_2\in\paths{b_2}\\ \text{independent of }\xi_1}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1|
\]
where the sum over independent paths could be empty for certain $\xi_1$. Now we replace this last sum by a sum over \emph{all} paths $\xi_2\in\paths{b_2}$. This will change the sum but only for terms where $\xi_1,\xi_2$ are dependent. For those terms we already know that $\mathbb{P}[\xi_1]\mathbb{P}[\xi_2]$ contains a factor $p^k$ and hence we have 
\begin{align*}
    \sum_{\mathclap{\substack{\xi_{1,2}\in\paths{b_{1,2}}\\ \mathrm{independent}}}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1|
    &= \sum_{\xi_1\in\paths{b_1}} \sum_{\xi_2\in\paths{b_2}} \mathbb{P}[\xi_2]\mathbb{P}[\xi_1]|\xi_1| + \mathcal{O}(p^k) \\
    &= \sum_{\xi_1\in\paths{b_1}} \mathbb{P}[\xi_1]|\xi_1| + \mathcal{O}(p^k) \\
    &= R_{b_1} + \mathcal{O}(p^k)
\end{align*}
we can do the same with the second term and this proves the claim.
\end{proof}

~\\
\textbf{Proof of claim \ref{claim:weakcancel}}: We can assume $C$ consists of a group on the left with $l$ slots and a group on the right with $r$ slots (so $r+l=|C|$), with a gap of size $k=\mathrm{gap}(C)$ between these groups. Then on the left we have strings in $\{0,1'\}^l$ as possibilities and on the right we have strings in $\{0,1'\}^r$. The combined configuration can be described by strings $f=(a,b)\in\{0,1'\}^{l+r}$. The initial probability of such a state $C(a,b)$ is $\rho_{C(a,b)} = (-1)^{|a|+|b|} p^{r+l}$ and by claim \ref{claim:expectationsum} we know $R_{C(a,b)} = R_{C(a)} + R_{C(b)} + \mathcal{O}(p^k)$ where $C(a)$ indicates that only the left slots have been filled by $a$ and the other slots are filled with $1$s. The total contribution of these configurations is therefore
\begin{align*}
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}
    &= \sum_{a\in\{0,1'\}^l} \sum_{b\in\{0,1'\}^r} (-1)^{|a|+|b|}p^{r+l} \left( R_{C(a)} + R_{C(b)} + \mathcal{O}(p^k) \right) \\
    &=\;\;\; p^{r+l}\sum_{a\in\{0,1'\}^l} (-1)^{|a|} R_{C(a)} \sum_{b\in\{0,1'\}^r} (-1)^{|b|} \\
    &\quad + p^{r+l}\sum_{b\in\{0,1'\}^r} (-1)^{|b|} R_{C(b)} \sum_{a\in\{0,1'\}^l} (-1)^{|a|}
        + \mathcal{O}(p^{r+l+k})\\
    &= 0 + \mathcal{O}(p^{|C|+k})
\end{align*}
where we used the identity $\sum_{a\in\{0,1\}^l} (-1)^{|a|} = 0$.

\newpage
\subsection{Proving the strong cancellation claim}
It is useful to introduce some new notation:
\begin{definition}[Events conditioned on starting state] \label{def:conditionedevents}
    For any state $b\in\{0,1\}^n$ and any event $A$ (where an event is a subset of all possible paths of the Markov Chain), define
    \begin{align*}
        \mathbb{P}_b(A) &= \mathbb{P}(A \;|\; \text{start in }b) \\
        R_{b,A} &= \mathbb{E}( \#resamples \;|\; A \; \& \; \text{start in }b)
    \end{align*}
\end{definition}
\begin{definition}[Vertex visiting event] \label{def:visitingResamplings}
    Denote by $\mathrm{Z}^{(j)}$ the event that site $j$ becomes zero at any point in time before the Markov Chain terminates. Denote the complement by $\mathrm{NZ}^{(j)}$, i.e. the event that site $j$ does \emph{not} become zero before it terminates. Furthermore define $\mathrm{NZ}^{(j_1,j_2)} := \mathrm{NZ}^{(j_1)} \cap \mathrm{NZ}^{(j_2)}$, i.e. the event that \emph{both} $j_1$ and $j_2$ do not become zero before termination.
\end{definition}
\begin{figure}
	\begin{center}
    	\includegraphics{diagram_groups.pdf}
    \end{center}
    \caption{\label{fig:separatedgroups} Illustration of setup of Lemma \ref{lemma:eventindependence}. Here $b_1,b_2\in\{0,1\}^n$ are bitstrings such that all zeroes of $b_1$ and all zeroes of $b_2$ are separated by two indices $j_1,j_2$.}
\end{figure}
\begin{lemma}[Conditional independence] \label{lemma:eventindependence} \label{claim:eventindependence}
    Let $b=b_1\land b_2\in\{0,1\}^n$ be a state with two groups ($b_1\lor b_2 = 1^n$) of zeroes that are separated by at least one site inbetween, as in Figure \ref{fig:separatedgroups}. Let $j_1$, $j_2$ be any indices inbetween the groups, such that $b_1$ lies on one side of them and $b_2$ on the other, as shown in the figure. Furthermore, let $A_1$ be any event that depends only on the sites ``on the $b_1$ side of $j_1,j_2$'', and similar for $A_2$ (for example $\mathrm{Z}^{(i)}$ for an $i$ on the correct side). Then we have
    \begin{align*}
        \mathbb{P}_b(\mathrm{NZ}^{(j_1,j_2)}, A_1, A_2)
        &=
        \mathbb{P}_{b_1}(\mathrm{NZ}^{(j_1,j_2)}, A_1)
        \; \cdot \;
        \mathbb{P}_{b_2}(\mathrm{NZ}^{(j_1,j_2)}, A_2) \\
        \mathbb{P}_b(A_1, A_2 \mid \mathrm{NZ}^{(j_1,j_2)})
        &=
        \mathbb{P}_{b_1}(A_1 \mid \mathrm{NZ}^{(j_1,j_2)})
        \; \cdot \;
        \mathbb{P}_{b_2}(A_2 \mid \mathrm{NZ}^{(j_1,j_2)}) \\
        R_{b,\mathrm{NZ}^{(j_1,j_2)},A_1,A_2}
        &=
        R_{b_1,\mathrm{NZ}^{(j_1,j_2)},A_1}
        \; + \;
        R_{b_2,\mathrm{NZ}^{(j_1,j_2)},A_2}
    \end{align*}
    up to any order in $p$.
\end{lemma}
The lemma says that conditioned on $j_1$ and $j_2$ not being crossed, the two halves of the circle are independent. 

\begin{proof}
    Note that any path $\xi\in\paths{b} \cap \mathrm{NZ}^{(j_1,j_2)}$ can be split into paths $\xi_1\in\paths{b_1}\cap \mathrm{NZ}^{(j_1,j_2)}$ and $\xi_2\in\paths{b_2}\cap\mathrm{NZ}^{(j_1,j_2)}$. This can be done by taking all resampling positions $r_i$ in $\xi$ and if $r_i$ is ``on the $b_1$ side of $j_1,j_2$'' then add it to $\xi_1$ and if its ``on the $b_2$ side of $j_1,j_2$'' then add it to $\xi_2$. Note that now $\xi_1$ is a path from $b_1$ to $\mathbf{1}$, because in the original path $\xi$, all zeroes ``on the $b_1$ side'' have been resampled by resamplings ``on the $b_1$ side''. Since the sites $j_1,j_2$ inbetween never become zero, there can not be any zero ``on the $b_1$ side'' that was resampled by a resampling ``on the $b_2$ side''.  Vice versa, all paths $\xi_1\in\paths{b_1}\cap \mathrm{NZ}^{(j_1,j_2)}$ and $\xi_2\in\paths{b_2}\cap\mathrm{NZ}^{(j_1,j_2)}$ also induce a path $\xi\in\paths{b} \cap \mathrm{NZ}^{(j_1,j_2)}$ by simply concatenating the resampling positions. Note that $\xi_1,\xi_2$ actually induce $\binom{|\xi_1|+|\xi_2|}{|\xi_1|}$ paths $\xi$ because of the possible orderings of concatenating the resamplings in $\xi_1$ and $\xi_2$. However, all these paths have smaller weight, and by the same reasoning as in the proof of claim \ref{claim:expectationsum} these weights sum to exactly $1$, so we obtain
    \begin{align*}
        \mathbb{P}_b(\mathrm{NZ}^{(j_1,j_2)},A_1,A_2)
        &= \sum_{\substack{\xi\in\paths{b} \cap \\ \mathrm{NZ}^{(j_1,j_2)}\cap A_1\cap A_2}} \mathbb{P}[\xi] \\
        &= \sum_{\substack{\xi_1\in\paths{b_1} \cap \\ \mathrm{NZ}^{(j_1,j_2)}\cap A_1}} \;\;
          \sum_{\substack{\xi_2\in\paths{b_1} \cap \\ \mathrm{NZ}^{(j_1,j_2)}\cap A_2}}
        \mathbb{P}[\xi_1]\cdot\mathbb{P}[\xi_2] \\
        &=
        \mathbb{P}_{b_1}(\mathrm{NZ}^{(j_1,j_2)},A_1)
        \; \cdot \;
        \mathbb{P}_{b_2}(\mathrm{NZ}^{(j_1,j_2)},A_2).
    \end{align*}
    The second equality follows directly from Bayes rule and removing $A_1,A_2$.
    For the third equality, note that again by the same reasoning as in the proof of claim \ref{claim:expectationsum} we have
    \begin{align*}
        \mathbb{P}_b(\mathrm{NZ}^{(j_1,j_2)},A_1,A_2) R_{b,\mathrm{NZ}^{(j_1,j_2)},A_1,A_2}
        &:= \sum_{\substack{\xi\in\paths{b}\\\xi \in \mathrm{NZ}^{(j_1,j_2)}\cap A_1\cap A_2}} \mathbb{P}[\xi] |\xi| \\
        &= \sum_{\substack{\xi_1\in\paths{b_1}\\\xi_1 \in \mathrm{NZ}^{(j_1,j_2)}\cap A_1}}
          \sum_{\substack{\xi_2\in\paths{b_2}\\\xi_2 \in \mathrm{NZ}^{(j_1,j_2)}\cap A_2}}
        \mathbb{P}[\xi_1]\mathbb{P}[\xi_2] (|\xi_1| + |\xi_2|) \\
        &=
        \mathbb{P}_{b_2}(\mathrm{NZ}^{(j_1,j_2)},A_2) \mathbb{P}_{b_1}(\mathrm{NZ}^{(j_1,j_2)},A_1) R_{b_1,\mathrm{NZ}^{(j_1,j_2)},A_1} \\
        &\quad +
        \mathbb{P}_{b_1}(\mathrm{NZ}^{(j_1,j_2)},A_1) \mathbb{P}_{b_2}(\mathrm{NZ}^{(j_1,j_2)},A_2) R_{b_2,\mathrm{NZ}^{(j_1,j_2)},A_2} .
    \end{align*}
    Dividing by $\mathbb{P}_b(\mathrm{NZ}_{(j_1,j_2)},A_1,A_2)$ and using the first equality gives the desired result.
\end{proof}

\begin{comment}
TEST: Although a proof of claim \ref{claim:expectationsum} was already given, I'm trying to prove it in an alternate way using claim \ref{claim:eventindependence}.

~

Assume that $b_1$ ranges up to site $0$, the gap ranges from sites $1,...,k$ and $b_2$ ranges from site $k+1$ and onwards. For $j=1,...,k$ define the ``partial-zeros'' event $\mathrm{PZ}_j = \mathrm{Z}_1 \cap \mathrm{Z}_2 \cap ... \cap \mathrm{Z}_{j-1} \cap \mathrm{NZ}_j$ i.e. the first $j-1$ sites of the gap become zero and site $j$ does not become zero. Also define the ``all-zeros'' event $\mathrm{AZ} = \mathrm{Z}_1 \cap ... \cap \mathrm{Z}_k$, where all sites of the gap become zero. Note that these events partition the space, so we have for all $b$ that $\sum_{j=1}^k \mathbb{P}_b(\mathrm{PZ}_j) = 1 - \mathbb{P}_b(\mathrm{AZ}) = 1 - \mathcal{O}(p^k)$.

~

Furthermore, if site $j$ becomes zero when starting from $b_1$ it means all sites to the left of $j$ become zero as well. Similarly, from $b_2$ it implies all the sites to the right of $j$ become zero.
Because of that, we have
\begin{align*}
    \mathbb{P}_{b_1}(\mathrm{PZ}_j) &= \mathbb{P}_{b_1}(\mathrm{Z}_{j-1} \cap \mathrm{NZ}_j) = \mathcal{O}(p^{j-1}) \\
    \mathbb{P}_{b_2}(\mathrm{NZ}_j) &= 1 - \mathbb{P}_{b_2}(\mathrm{Z}_j) = 1 - \mathcal{O}(p^{k-j+1})
\end{align*}
Following the proof of claim \ref{claim:eventindependence} we also have
\begin{align*}
    \mathbb{P}_b(\mathrm{PZ}_{j})
    &=
    \mathbb{P}_{b_1}(\mathrm{PZ}_{j})
    \; \cdot \;
    \mathbb{P}_{b_2}(\mathrm{NZ}_{j}) \\
    R_{b,\mathrm{PZ}_{j}}
    &=
    R_{b_1,\mathrm{PZ}_{j}}
    \; + \;
    R_{b_2,\mathrm{NZ}_{j}}
\end{align*}


Now observe that
\begin{align*}
    R_b &= \sum_{j=1}^k \mathbb{P}_b(\mathrm{PZ}_j) R_{b,\mathrm{PZ}_j} + \mathbb{P}_b(\mathrm{AZ}) R_{b,\mathrm{AZ}} \\
        &= \sum_{j=1}^k \mathbb{P}_{b_2}(\mathrm{NZ}_j)\mathbb{P}_{b_{1}}(\mathrm{PZ}_j) R_{b_1,\mathrm{PZ}_j}
        + \sum_{j=1}^k \mathbb{P}_{b_1}(\mathrm{PZ}_j)\mathbb{P}_{b_{2}}(\mathrm{NZ}_j) R_{b_2,\mathrm{NZ}_j}
        + \mathcal{O}(p^k) \\
        &= \sum_{j=1}^k \mathbb{P}_{b_{1}}(\mathrm{PZ}_j) R_{b_1,\mathrm{PZ}_j}
        - \sum_{j=1}^k \mathbb{P}_{b_2}(\mathrm{Z}_j)\mathbb{P}_{b_{1}}(\mathrm{PZ}_j) R_{b_1,\mathrm{PZ}_j}
        + \sum_{j=1}^k \mathbb{P}_{b_1}(\mathrm{PZ}_j)\mathbb{P}_{b_{2}}(\mathrm{NZ}_j) R_{b_2,\mathrm{NZ}_j}
        + \mathcal{O}(p^k) \\
        &= \sum_{j=1}^k \mathbb{P}_{b_{1}}(\mathrm{PZ}_j) R_{b_1,\mathrm{PZ}_j}
        + \sum_{j=1}^k \mathbb{P}_{b_1}(\mathrm{PZ}_j)\mathbb{P}_{b_{2}}(\mathrm{NZ}_j) R_{b_2,\mathrm{NZ}_j}
        + \mathcal{O}(p^k) \\
        &= R_{b_1}
        + \sum_{j=1}^k \mathbb{P}_{b_1}(\mathrm{PZ}_j)\mathbb{P}_{b_{2}}(\mathrm{NZ}_j) R_{b_2,\mathrm{NZ}_j}
        + \mathcal{O}(p^k) \\
        &\overset{???}{=} R_{b_1} + R_{b_2} + \mathcal{O}(p^k)
\end{align*}
\end{comment}

Consider the chain (instead of the cycle) for simplicity with vertices identified by $\mathbb{Z}$.
\begin{definition}[Starting state dependent probability distribution.]
	Let $I\subset\mathbb{Z}$ be a finite set of vertices.
    Let $b_I$ be the initial state where everything is $1$, apart from the vertices corresponding to $I$, which are set $0$. Define $P_I(A)=P_{b_I}(A)$ where the latter is defined in Definition \ref{def:conditionedevents}, i.e. the probability of seeing a resample sequence from $A$ when the whole procedure started in state $b_I$. 
\end{definition}

The intuition of the following lemma is that the far right can only affect the zero vertex if there is an interaction chain forming, which means that every vertex should get resampled to $0$ at least once.
\begin{lemma}\label{lemma:probIndep}
	Suppose we have a finite set $I\subset\mathbb{N}_+$ of vertices.
    Let $I_{\max}:=\max(I)$ and $I':=I\setminus\{I_{\max}\}$, and similarly let $I_{\min}:=\min(I)$. These definitions are illustraded in Figure \ref{fig:lemmaillustration}.
	Then $P_{I}(Z^{(0)})=P_{I'}(Z^{(0)}) + O(p^{I_{\max}+1-|I|})$.
\end{lemma}
\begin{proof}
\begin{figure}
	\begin{center}
    	\includegraphics{diagram_proborders.pdf}
    \end{center}
    \caption{\label{fig:lemmaillustration} Illustration of setup of Lemma \ref{lemma:probIndep}.}
\end{figure}
	The proof uses induction on $|I|$. For $|I|=1$ the statement is easy, since every resample sequence that resamples vertex $0$ to zero must produce at least $I_{\max}$ zeroes in-between.
	
    Induction step: For an event $A$ and $k>0$ let us denote $A_k = A\cap\left(\cap_{j=0}^{k-1} \mathrm{Z}^{(j)}\right)\cap \mathrm{NZ}^{(k)}$, i.e. $A_k$ is the event $A$ \emph{and} ``Each vertex in $0,1,2,\ldots, k-1$ becomes $0$ at some point before termination (either by resampling or initialisation), but vertex $k$ does not''. Observe that these events form a partition, so $Z^{(0)}=\dot{\bigcup}_{k=1}^{\infty}Z^{(0)}_k$.
    Let $I_{<k}:=I\cap[1,k-1]$ and similarly $I_{>k}:=I\setminus[1,k]$, finally let $I_{><}:=\{I_{\min}+1,I_{\max}-1]\}\setminus I$ (note that $I_{><} = \gaps{I}$ as shown in Figure \ref{fig:diametergap}). Suppose we have proven the claim up to $|I|-1$, then the induction step can be shown by
	\begin{align*}
		P_{I}(Z^{(0)})
		&=\sum_{k=1}^{\infty}P(Z^{(0)}_k) \tag{the events are a partition}\\
        &=\sum_{k\in \mathbb{N}\setminus I}P(Z^{(0)}_k) \tag{$\mathbb{P}(A_k)=0$ for $k\in I$}\\
        &=\sum_{k\in\mathbb{N}\setminus I}P_{I_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)}) \tag{by Claim~\ref{claim:eventindependence}}\\
        &=\sum_{k\in I_{><}}P_{I_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}+1-|I|})
		\tag{$k<I_{\min}\Rightarrow P_{I_{<k}}(Z^{(0)}_k)=0$}\\
        &=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot P_{I_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}+1-|I|})	
		\tag{$k< I_{\max}\Rightarrow I_{<k}=I'_{<k}$}\\
		&=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot
        \left(P_{I'_{>k}}(\mathrm{NZ}^{(k)})+\mathcal{O}(p^{I_{\max}-k+1-|I_{>k}|})\right) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{by induction, since for $k>I_{\min}$ we have $|I_{<k}|<|I|$}\\
		&=\sum_{k\in I_{><}}P_{I'_{<k}}(Z^{(0)}_k)\cdot
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	
		\tag{as $P_{I'_{<k}}(Z^{(0)}_k)=\mathcal{O}(p^{k-|I'_{<k}|})$}\\
		&=\sum_{k\in\mathbb{N}\setminus I}P_{I'_{<k}}(Z^{(0)}_k)\cdot
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})\\
		&=\sum_{k\in\mathbb{N}\setminus I'}P_{I'_{<k}}(Z^{(0)}_k)\cdot
        P_{I'_{>k}}(\mathrm{NZ}^{(k)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{$k=I_{\max}\Rightarrow P_{I'_{<k}}(Z^{(0)}_k)=\mathcal{O}(p^{I_{\max}-|I'|})=\mathcal{O}(p^{I_{\max}+1-|I|})$}\\
		&=P_{I'}(Z^{(0)}) +\mathcal{O}(p^{I_{\max}+1-|I|})	\tag{analogously to the beginning}			
	\end{align*}
\end{proof}

	The main insight that Lemma~\ref{lemma:probIndep} gives is that if we separate the slots to two halves, in order to see the cancellation of the contribution of the expected resamples on the right, we can simply pair up the left configurations by the particle filling the leftmost slot. And similarly for cancelling the left expectations we pair up right configurations based on the rightmost filling. 
	
	Also this claim finally ``sees'' how many empty places are between slots. These properties make it possible to use this lemma to prove the sought linear bound. We show it for the infinite chain, but with a little care it should also translate to the circle.

~

Here, I (Tom) tried to set up the same Lemma but for the circle instead of the infinite chain.
This time, it is no longer $I_\mathrm{max}$ but any vertex $i_* \in I$, and $I' = I \setminus \{i_*\}$. Without loss of generality, we can assume that $i_* \leq n/2$ so that the distance to $0$ is simply $d(i_*,0)=i_*$ (because if not then we can relabel the vertices and count the other way around so that $i_* \to n-i_*$). The goal is now to prove:
\begin{align*}
    P_I(Z^{(0)}) = P_{I'}(Z^{(0)}) + \mathcal{O}(p^{\mathrm{d}(i_*,0) + 1 - |I|})
\end{align*}
Note that when we refer to an interval $[a,b]$ on the circle we could be referring to two possible intervals because of the periodicity of the circle. In the following, whenever we refer to an interval $[a,b]$ we refer to the interval with vertex 0 on the \emph{inside}.

For $a,b\in[n]$, define the event ``zeroes patch'' as the event of getting zeroes inside the interval $[a,b]$ but not on the boundary, i.e.  $\mathrm{ZP}^{[a,b]} = \mathrm{NZ}^{(a)} \cap \mathrm{Z}^{(a+1)} \cap \mathrm{Z}^{(a+2)} \cap \cdots \cap \mathrm{Z}^{(b-1)} \cap \mathrm{NZ}^{(b)}$ (where we assume that $\mathrm{Z}^{(0)}$ is part of this intersection).

Furthermore, define the `inside' and `outside' of $I$ as $I_{\mathrm{in}(a,b)} = I\cap[a,b]$ and $I_{\mathrm{out}(a,b)} = I \setminus [a,b]$.
The following diagram illustrates these definitions.
\begin{center}
    \includegraphics{diagram_circle_lemma.pdf}
\end{center}
\begin{align*}
    P_{I}(\mathrm{Z}^{(0)})
    &=\sum_{\substack{l,k=1\\k+l<n}}
    P_I(\mathrm{ZP}^{[n-l,k]}) \tag{the events are a partition}\\
    &=\sum_{\substack{l,k=1\\k+l<n\\k,n-l\notin I}}
    P_I(\mathrm{ZP}^{[n-l,k]}) \tag{$\mathbb{P}(\mathrm{ZP}^{[a,b]})=0$ for $a\in I$ or $b\in I$}
\end{align*}
Note that if $[-l,k]$ does not `touch' $I$ then $P_I(\mathrm{ZP}^{[-l,k]}) = 0$.
Furthermore, we have $P_I(\mathrm{ZP}^{[n-l,k]}) = \mathcal{O}(p^{k+l-1-|I_{\mathrm{in}(n-l,k)}|})$. If $k > \mathrm{d}(i_*,0)$ or $l > \mathrm{d}(i_*,0)$ then this gives $P_I(\mathrm{ZP}^{[n-l,k]}) = \mathcal{O}(p^{\mathrm{d}(i_*,0) + 1 - |I|})$ since $|I_\mathrm{in}| \leq |I|$. Therefore we have
\begin{align*}
    P_I(\mathrm{Z}^{(0)})
    &=\sum_{\substack{l,k=1\\k,n-l\notin I}}^{i_*-1}
    P_I(\mathrm{ZP}^{[n-l,k]})
    + \mathcal{O}(p^{i_* + 1 - |I|}) \\
    &=\sum_{\substack{l,k=1\\k,n-l\notin I}}^{i_*-1}
    P_{I_{\mathrm{in}(n-l,k)}}(\mathrm{ZP}^{[n-l,k]}) \cdot
    P_{I_{\mathrm{out}(n-l,k)}}(\mathrm{NZ}^{(n-l,k)})
    + \mathcal{O}(p^{i_* + 1 - |I|}) \\
    \tag{by Claim~\ref{claim:eventindependence} for $n-l,k\notin I$} \\
    &=\sum_{\substack{l,k=1\\k,n-l\notin I}}^{i_*-1}
    P_{I'_{\mathrm{in}(n-l,k)}}(\mathrm{ZP}^{[n-l,k]}) \cdot
    P_{I_{\mathrm{out}(n-l,k)}}(\mathrm{NZ}^{(n-l,k)})
    + \mathcal{O}(p^{i_* + 1 - |I|})
\end{align*}
Now we are supposed to use the induction step, but this is where I got stuck.


\begin{definition}[Connected patches]
	Let $\mathcal{P}\subset 2^{\mathbb{Z}}$ be a finite system of finite subsets of $\mathbb{Z}$. We say that the patch set of a resample sequence is $\mathcal{P}$,
	if the connected components of the vertices that have ever become $0$ are exactly the elements of $\mathcal{P}$. We denote by $A^{(\mathcal{P})}$ the event that the set of patches is $\mathcal{P}$. For a patch $P$ let $A^{(P)}=\bigcup_{\mathcal{P}:P\in \mathcal{P}}A^{(\mathcal{P})}$.
\end{definition} 
Note by Tom: So $A^{(\mathcal{P})}$ is the event that the set of all patches is \emph{exactly} $\mathcal{P}$ whereas $A^{(P)}$ is the event that one of the patches is equal to $P$ but there can be other patches as well.

\begin{definition}[Conditional expectations]
	Let $S\subset\mathbb{Z}$ be a finite slot configuration, and for $f\in\{0,1'\}^{|S|}$ let $I:=S(f)$ be the set of vertices filled with particles. 
	Then we define
	$$R_I:=\mathbb{E}[\#\{\text{resamplings when started from inital state }I\}].$$
	For a patch set $\mathcal{P}$ and some $P\in\mathcal{P}$ we define
	$$R^{(\mathcal{P})}_I:=\mathbb{E}[\#\{\text{resamplings when started from inital state }I\}|A^{(\mathcal{P})}]$$	
	and 
	$$R^{(P,\mathcal{P})}_I:=\mathbb{E}[\#\{\text{resamplings inside }P\text{ when started from inital state }I\}|A^{(\mathcal{P})}]$$		
	finally
	$$R^{(P)}_I:=\mathbb{E}[\#\{\text{resamplings inside }P\text{ when started from inital state }I\}|A^{(P)}].$$	
\end{definition} 

    Similarly to Mario's proof I use the observation that 
    \begin{align*}
    R^{(n)} &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} R_{S(f)}\\
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)}
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{(\mathcal{P})}) R^{(\mathcal{P})}_{S(f)} \\
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)}
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{\mathcal{P}}) \sum_{P\in\mathcal{P}} R^{(P,\mathcal{P})}_{S(f)}\\
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} 
    \sum_{\mathcal{P}\text{ patches}} \mathbb{P}_{S(f)}(A^{\mathcal{P}}) \sum_{P\in\mathcal{P}} R^{(P)}_{S(f)\cap P}\tag{by Claim~\ref{claim:eventindependence}}\\ 
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{f\in\{0,1'\}^{|S|}}\rho_{S(f)} 
    \sum_{P\text{ patch}} R^{(P)}_{S(f)\cap P}\sum_{\mathcal{P}:P\in\mathcal{P}}\mathbb{P}_{S(f)}(A^{\mathcal{P}})\\     
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f\in\{0,1'\}^{|S|}}
     \rho_{S(f)} R^{(P)}_{S(f)\cap P}\mathbb{P}_{S(f)}(A^{(P)}) \tag{by definition}\\        
    &= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f\in\{0,1'\}^{|S|}}
    \rho_{S(f)} R^{(P)}_{S(f)\cap P}\mathbb{P}_{S(f)\cap P}(A^{(P)})\mathbb{P}_{S(f)\cap \overline{P}}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}}) \tag{remember Definition~\ref{def:visitingResamplings} and use Claim~\ref{claim:eventindependence}}\\    
    &= \frac{1}{n}\sum_{S\subseteq [n]} \sum_{P\text{ patch}} \sum_{f_P\in\{0,1'\}^{|S\cap P|}}
    \rho_{S(f_P)} R^{(P)}_{S(f_P)} \mathbb{P}_{S(f_P)}(A^{(P)})
    \sum_{f_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}|}} \rho_{S(f_{\overline{P}})} \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}}) \\   
	&= \frac{1}{n}\sum_{S\subseteq [n]}\sum_{P\text{ patch}}\sum_{f_P\in\{0,1'\}^{|S\cap P|}}
	\rho_{S(f_P)}
        \sum_{f_{\overline{P}}\in\{0,1'\}^{|S\cap \overline{P}|}}\rho_{S(f_{\overline{P}})}\mathcal{O}(p^{|S_{><}|}) \tag{see below} \\
	&= \frac{1}{n}\sum_{S\subseteq [n]}\mathcal{O}(p^{|S|+|S_{><}|}).
    \end{align*}
\begin{figure}
	\begin{center}
    	\includegraphics{diagram_patches.pdf}
    \end{center}
    \caption{\label{fig:patches} Illustration of last steps of the proof.}
\end{figure}
    The penultimate inequality can be seen by case separation as follows: If $S\subseteq P$ then there is no splitting into $S\cap P$ and $S\setminus P$, and we already have $\mathbb{P}_{S(f_P)}(A^{(P)})=\mathcal{O}(p^{|S_{><}|})$ simply because the patch $P$ must be filled with zeroes that were not yet in $S$, so this is at least $|S_{><}|$ resampled zeroes. For the more general case, assume that $S$ is larger than $P$ on both sides of $P$. This is illustrated in Figure \ref{fig:patches}. We will focus on the following sum that was in the above equations:
    \begin{align*}
        \sum_{f_{\overline{P}}\in\{0,1'\}^{|S \cap \overline{P}|}} \rho_{S(f_{\overline{P}})} \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}})
    \end{align*}
    By Lemma \ref{lemma:eventindependence} we can split this sum into two parts: the part to the left of $P$ and the part to the right of $P$. Define $S_\mathrm{left}=S\cap[S_\mathrm{min},P_{\mathrm{min}}-1]$ and $S_\mathrm{right}=S\cap[P_{\mathrm{max}}+1,S_\mathrm{max}]$, so that $S\cap\overline{P} = S_\mathrm{left} \cup S_\mathrm{right}$. These are also illustrated in Figure \ref{fig:patches}. Then we have
    \begin{align*}
        \mathbb{P}_{S(f_{\overline{P}})}(\overline{Z^{(P_{\min}-1)}}\cap\overline{Z^{(P_{\max}+1)}})
        &= \mathbb{P}_{S(f_{\mathrm{left}})}(\overline{Z^{(P_{\min}-1)}}) \;\cdot\; \mathbb{P}_{S(f_{\mathrm{right}})}(\overline{Z^{(P_{\max}+1)}})
    \end{align*}
    and hence we can split the sum. Without loss of generality we now only consider the `right' part of the sum:
    \begin{align*}
        \sum_{f\in\{0,1'\}^{|S_\mathrm{right}|}} \rho_{S_\mathrm{right}(f)} \mathbb{P}_{S_\mathrm{right}(f)}(\overline{Z^{(P_{\max}+1)}})
    \end{align*}
    Now further split this sum over the value of $f$ at position $S_\mathrm{max}$:
    \begin{align*}
        \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}} \sum_{f'\in\{0,1'\}}
        \rho_{S_\mathrm{right}(f\,f')} \mathbb{P}_{S_\mathrm{right}(f\,f')}(\overline{Z^{(P_{\max}+1)}})
    \end{align*}
    and we use the definition of $\rho$ for the sum over $f'$:
    \begin{align*}
         \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}}
        \rho_{S_\mathrm{right}(f)} \left(p \mathbb{P}_{S_\mathrm{right}(f\, 0)}(\overline{Z^{(P_{\max}+1)}}) + (-p) \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) \right)
    \end{align*}
    Now we recognize the setup of Lemma~\ref{lemma:probIndep} with $I=S_\mathrm{right}(f\,0)$ and $I'=S_\mathrm{right}(f\,1)$. The lemma yields
    \begin{align*}
        \mathbb{P}_{S_\mathrm{right}(f\, 0)}(\overline{Z^{(P_{\max}+1)}}) &= \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) + \mathcal{O}(p^{S_\mathrm{max}-(P_{\mathrm{max}}+1)+1-|S_\mathrm{right}|}) \\
        &= \mathbb{P}_{S_\mathrm{right}(f\, 1)}(\overline{Z^{(P_{\max}+1)}}) + \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|}) .
    \end{align*}
    Entering this back into the sum gives
    \begin{align*}
         \sum_{f\in\{0,1'\}^{|S_\mathrm{right}\setminus\{S_\mathrm{max}\}|}}
        \rho_{S_\mathrm{right}(f)} \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|+1})
         = \sum_{f\in\{0,1'\}^{|S_\mathrm{right}|}}
        \rho_{S_\mathrm{right}(f)} \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|})
    \end{align*}
    One can do the same for the `left' part, which gives a term $\mathcal{O}(p^{P_\mathrm{min}-S_{\mathrm{min}}-|S_\mathrm{left}|})$. The part of $S$ that was within $P$ gives $\mathbb{P}_{S(f_P)}(A^{(P)})=\mathcal{O}(p^{P_\mathrm{max}-P_\mathrm{min}+1-|S\cap P|})$. Combining these three factors yields
    \begin{align*}
        (\textrm{left part})(P\textrm{ part})(\textrm{right part}) &=
\mathcal{O}(p^{P_\mathrm{min}-S_{\mathrm{min}}-|S_\mathrm{left}|}) \cdot \mathcal{O}(p^{P_\mathrm{max}-P_\mathrm{min}+1-|S\cap P|}) \cdot \mathcal{O}(p^{S_\mathrm{max}-P_{\mathrm{max}}-|S_\mathrm{right}|}) \\
        &= \mathcal{O}(p^{S_\mathrm{max}-S_\mathrm{min}+1-|S_\mathrm{left}\cup S_\mathrm{right}\cup (S\cap P)|})\\
        &= \mathcal{O}(p^{S_\mathrm{max}-S_\mathrm{min}+1-|S|})
        = \mathcal{O}(p^{|S_{><}|})
    \end{align*}
    as required. This finishes the proof.

    ~

	I think the same arguments would translate to the torus and other translationally invariant spaces, so we could go higher dimensional as Mario suggested. Then I think one would need to replace $|S_{><}|$ by the minimal number $k$ such that there is a $C$ set for which $S\cup C$ is connected. I am not entirely sure how to generalise Lemma~\ref{lemma:probIndep} though, which has key importance in the present proof.
    
    Questions:
    \begin{itemize}
    	\item Is this proof finally flawless?
    	\item In view of this proof, can we better characterise $a_k^{(k+1)}$?
    	\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
    	\item Can we prove the conjectured formula for $a_k^{(3)}$?		
    \end{itemize} 
    
\begin{comment}
    \subsection{Sketch of the (false) proof of the linear bound \ref{it:const}}
    Let us interpret $[n]$ as the vertices of a length-$n$ cycle, and interpret operations on vertices mod $n$ s.t. $n+1\equiv 1$ and $1-1\equiv n$.
    %\begin{definition}[Resample sequences]
    %	A sequence of indices $(r_\ell)=(r_1,r_2,\ldots,r_k)\in[n]^k$ is called resample sequence if our procedure performs $k$ consequtive resampling, where the first resampling of the procedure resamples around the mid point $r_1$ the second around $r_2$ and so on. Let $RS(k)$ the denote the set of length $k$ resample sequences, and let $RS=\cup_{k\in\mathbb{N}}RS(k)$.
    %\end{definition}
    %\begin{definition}[Constrained resample sequence]\label{def:constrainedRes}
    %	Let $C\subseteq[n]$ denote a slot configuration, and let $a\in\{\text{res},\neg\text{res}\}^{n-|C|}$, where the elements correspond to labels ``resampled" vs. ``not resampled" respectively. 
    %	For $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
    %	We define the set $A^{(C,a)}\subseteq RS$ as the set of resample sequences $(r_\ell)$ such that for all $j$ which has $a_j=\text{res}$ we have that $i_j$ appears in $(r_\ell)$ but for $j'$-s which have $a_{j'}=\neg\text{res}$ we have that $i_{j'}$ never appears in $(r_\ell)$. 
    %\end{definition}    
    \begin{definition}[Conditional expected number of resamples]
    	For a slot configuration $C\subseteq[n]$ and $a\in\{\!\text{ever},\text{ never}\}^{n-|C|}$ we define the event $A^{(C,a)}:=\bigwedge_{j\in[n-|C|]}\{i_j\text{ has }a_j\text{ become }0\text{ before reaching }\mathbf{1}\}$,
    	where $i_j$ is the $j$-th vertex of $[n]\setminus C$.
    	Then we also define
    	$$R^{(C,a)}_b:=\mathbb{E}[\#\{\text{resamplings when started from inital state }b\}|A^{(C,a)}].$$
    \end{definition}     
    
    As in Mario's proof I use the observation that 
    \begin{align*}
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}(p)\\
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}}\sum_{a\in\{\!\text{ever},\text{ never}\}^{n-|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)P_{C(f)}(A^{(C,a)})\\
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{a\in\{\!\text{ever},\text{ never}\}^{n-|C|}} \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)P_{C(f)}(A^{(C,a)}), 
    \end{align*}
    where we denote by $C\subseteq[n]$ a slot configuration, whereas $C(f)$ denotes the slots of $C$ filled with the particles described by $f$, while all other location in $[n]\setminus C$ are set to $1$. 
    When we write $R_{C(f)}$ we mean $R_{C(\bar{f})}$, i.e., replace $1'$-s with $1$-s. Since the notation is already heavy we dropped the bar from $f$, as it is clear from the context. Finally by $P_{C(f)}(A^{(C,a)})$ we denote the probability that the event $A^{(C,a)}$ holds.
    
    As in Definition for $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
    Suppose that $a$ is such that there are two indices $j_1\neq j_2$ such that 
    $a_{j_1}=\text{never}=a_{j_2}$, moreover the sets $\{i_{j_1}+1,\ldots, i_{j_2}-1\}$ and $\{i_{j_2}+1,\ldots, i_{j_1}-1\}$ partition $C$ non-trivially, and we denote by $C_l$,$C_r$ the corresponding partitions. 
    I wanted to prove that
    \begin{equation}\label{eq:conditionalCancellation}
		\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)=0,
    \end{equation}    
    based on the observation that for all $f\in\{0,1'\}^{|C|}$ we have 
    that 
    \begin{equation}\label{eq:keyIndependce}
    R^{{(C,a)}}_{C(f)}(p)=R^{{(C_l,a_l)}}_{C_l(f_l)}(p)+R^{{(C_r,a_r)}}_{C_r(f_r)}(p),
    \end{equation}
    where $f_l\in\{0,1'\}^{|C_l|}$ is defined as taking only the indices (and values) of $f$ corresponding to vertices of $C_l$, also $a_l\in[n-|C_l|]$ is defined such that $a$ and $a_l$ agree on vertices where $a$ is defined, and on the vertices where $a$ is not defined, i.e., the vertices of $C_r$ we define $a_l$ to contain ``never". We define things analogously for $f_r$ and $a_r$. 
    
    The reason why \eqref{eq:keyIndependce} holds is that as before the two halves of the cycle are conditionally independent because neither $i_{j_1}$ nor $i_{j_2}$ can become $0$. To be more precise each resample sequence $\left(C(f)\rightarrow \mathbf{1} \right)\in A^{(C,a)}$ can be uniquely decomposed to resample sequences $\left(C_l(f_l)\rightarrow \mathbf{1}\right)\in A^{(C_l,a_l)}$ and $\left(C_r(f_r)\rightarrow \mathbf{1}\right)\in A^{(C_r,a_r)}$. The sum of probabilities of the set of resample sequences $\{r\}$ which have decomposition $(r_l,r_r)$ have probability which is the product of the probabilities of $r_l$ and $r_r$ as shown in the proof of Claim~\ref{claim:expectationsum}. This proves that the set of all resample sequences $\left(C(f)\rightarrow \mathbf{1}\right)\in A^{(C,a)}$ for our purposes can be viewed as a product set with product probability distribution. Therefore the halves can be treated independently and so the expectation values just add up. 
    
    From here I wanted to mimic Mario's proof:
    \begin{align*}
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)&=
    \sum_{f_l\in\{0,1'\}^{|C_l|}} \sum_{f_r\in\{0,1'\}^{|C_r|}}  (-1)^{|f_l|+|f_r|}p^{|C_l|+|C_r|} \left( R^{{(C_l,a_l)}}_{C_l(f_l)}(p) + R^{{(C_r,a_r)}}_{C_r(f_l)}(p) \right)\\
    &= p^{|C|}\sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} R^{{(C_l,a_l)}}_{C_l(f_l)}(p) \sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} \\
    &\quad + p^{|C|}\sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} R^{{(C_r,a_r)}}_{C_r(f_r)}(p) \sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} \\
    &= 0.
    \end{align*}
    The nasty issue which I did not realise that the missing term $P_{C(f)}(A^{(C,a)})$ is non-constant: even though the event $A^{(C,a)}$ is independent of $f$ the probability $P_{C(f)}(A^{(C,a)})=P_{C(f_l)}(A^{(C_l,a_l)})\cdot P_{C(f_r)}(A^{(C_r,a_r)})$ is not and so the above breaks down.
    
    Observe that if \eqref{eq:conditionalCancellation} would hold for configurations that cut the slot configuration to two halves it would imply that the only non-zero contribution comes from pairs $(C,a)$ such that $C\cup\{i_j:a_j=\text{ever}\}$ is connected. This is because if this set is not connected, then either we can cut $C$ to two halves non-trivially along ``never" vertices, or there is an island of $\text{ever}$ vertices separated from any slots, and therefore from any $0$-s. This latter case has zero contribution since we cannot set these indices to $0$, without reaching them by some resamplings, and thereby building a path of $0$-s leading there.
    
    If $|C\cup\{i_j:a_j=\text{ever}\}|\geq k+1$ then all contribution has a power at least $k+1$ in $p$ since $(C,a)$ requires the prior appearance of at least $k+1$ particles. If $n\geq k+1$ than all $(C,a)$ such that $|C\cup\{i_j:a_j=\text{ever}\}|\leq k$ appears exactly $n$ times, since $(C,a)$ cannot be translationally invariant. Moreover the quantity $R^{{(C,a)}}_{C(f)}(p)$ is independent of $n$ due to the conditioning that every resampling happens on a connected component of length at most $k<n$. This would prove that $a_k^{(n)}$ is constant for $n\geq k+1$. The same arguments would directly translate to the torus and other translationally invariant objects, so we could go higher dimensional as Mario suggested.
    
    Questions:
    \begin{itemize}
    	\item Is it possible to somehow fix this proof?
    	\item In view of this (false) proof, can we better characterise $a_k^{(k+1)}$?
    	\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
    	\item Can we prove the conjectured formula for $a_k^{(3)}$?		
    \end{itemize} 

\begin{comment}
    \subsection{Sketch of the proof of the linear bound \ref{it:const}}
    Let us interpret $[n]$ as the vertices of a length-$n$ cycle, and interpret operations on vertices mod $n$ s.t. $n+1\equiv 1$ and $1-1\equiv n$.
    \begin{definition}[Resample sequences]
		A sequence of indices $(r_\ell)=(r_1,r_2,\ldots,r_k)\in[n]^k$ is called resample sequence if our procedure performs $k$ consequtive resampling, where the first resampling of the procedure resamples around the mid point $r_1$ the second around $r_2$ and so on. Let $RS(k)$ the denote the set of length $k$ resample sequences, and let $RS=\cup_{k\in\mathbb{N}}RS(k)$.
    \end{definition}
    \begin{definition}[Constrained resample sequence]\label{def:constrainedRes}
    	Let $C\subseteq[n]$ denote a slot configuration, and let $a\in\{\text{res},\neg\text{res}\}^{n-|C|}$, where the elements correspond to labels ``resampled" vs. ``not resampled" respectively. 
    	For $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
		We define the set $A^{(C,a)}\subseteq RS$ as the set of resample sequences $(r_\ell)$ such that for all $j$ which has $a_j=\text{res}$ we have that $i_j$ appears in $(r_\ell)$ but for $j'$-s which have $a_{j'}=\neg\text{res}$ we have that $i_{j'}$ never appears in $(r_\ell)$. 
    \end{definition}    
    \begin{definition}[Expected number of resamples]
		For $b\in\{0,1\}^n$ we define 
		$$R_b=\mathbb{E}[\#\{\text{resamplings when started from inital state }b\}],$$
		and for $(C,a)$ as in the previous definition we also define
		$$R^{(C,a)}_b=\mathbb{E}[\#\{\text{resamplings }\in A^{(C,a)} \text{ when started from inital state }b\}].$$
		Here we mean by the latter that after each resampling we check whether the sequence of resamplings so far is in $A^{(C,a)}$, if yes we count it, otherwise we do not count.
    \end{definition}     
    
    As in Mario's proof I use the observation that 
    \begin{align*}
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_{\bar{b}}(p)\\
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}(p)\\
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}}\sum_{a\in\{\text{res},\neg\text{res}\}^{n-|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)\\
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{a\in\{\text{res},\neg\text{res}\}^{n-|C|}} \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p), 
    \end{align*}
    where we denote by $C\subseteq[n]$ a slot configuration, whereas $C(f)$ denotes the slots of $C$ filled with the particles described by $f$, while all other location in $[n]\setminus C$ are set to $1$. 
	When we write $R_{C(f)}$ we mean $R_{C(\bar{f})}$, i.e., replace $1'$-s with $1$-s. Since the notation is already heavy we dropped the bar from $f$, as it is clear from the context.
    
    As in Definition~\ref{def:constrainedRes} for $j\in[n-|C|]$ let $i_j$ denote the $j$-th index in $[n]\setminus C$.
    Suppose that $a$ is such that there are two indices $j_1\neq j_2$ such that 
    $a_{j_1}=\neg\text{res}=a_{j_2}$, moreover the sets $\{i_{j_1}+1,\ldots, i_{j_2}-1\}$ and $\{i_{j_2}+1,\ldots, i_{j_1}-1\}$ partition $C$ non-trivially, and we denote by $C_l$,$C_r$ the corresponding partitions. 
    We claim that 
    $$\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)=0.$$
    
	This is based on the observation that that for all $f\in\{0,1'\}^{|C|}$ we have 
    that 
    \begin{equation}\label{eq:keyIndependceWrong}
    R^{{(C,a)}}_{C(f)}(p)=R^{{(C_l,a_l)}}_{C_l(f_l)}(p)+R^{{(C_r,a_r)}}_{C_r(f_r)}(p),
    \end{equation}
    where $f_l\in\{0,1'\}^{|C_l|}$ is defined as taking only the indices (and values) of $f$ corresponding to vertices of $C_l$, also $a_l\in[n-|C_l|]$ is defined such that $a$ and $a_l$ agree on vertices where $a$ is defined, and on the vertices where $a$ is not defined, i.e., the vertices of $C_r$ we define $a_l$ to contain $\neg\text{res}$. We define things analogously for $f_r$ and $a_r$.
    
    The reason why \eqref{eq:keyIndependceWrong} holds is as before that the two halves of the cycle are conditionally independent because neither $i_{j_1}$ nor $i_{j_2}$ are resampled. One could probably also argue similarly as Tom's grid figure shows. 
    From here the proof goes just as in Mario's proof:
    \begin{align*}
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^{{(C,a)}}_{C(f)}(p)&=
    \sum_{f_l\in\{0,1'\}^{|C_l|}} \sum_{f_r\in\{0,1'\}^{|C_r|}}  (-1)^{|f_l|+|f_r|}p^{|C_l|+|C_r|} \left( R^{{(C_l,a_l)}}_{C_l(f_l)}(p) + R^{{(C_r,a_r)}}_{C_r(f_l)}(p) \right)\\
    &= p^{|C|}\sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} R^{{(C_l,a_l)}}_{C_l(f_l)}(p) \sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} \\
    &\quad + p^{|C|}\sum_{f_r\in\{0,1'\}^{|C_r|}} (-1)^{|f_r|} R^{{(C_r,a_r)}}_{C_r(f_r)}(p) \sum_{f_l\in\{0,1'\}^{|C_l|}} (-1)^{|f_l|} \\
    &= 0.
    \end{align*}
    
    Observe that it implies that the only non-zero contribution comes from pairs $(C,a)$ such that $C\cup\{i_j:a_j=\text{res}\}$ is connected. This is because if this set is not connected, then either we can cut $C$ to two halves non-trivially along $\neg\text{res}$ vertices, or there is an island of $\text{res}$ vertices separated from any slots, and therefore from any $0$-s. This latter case has zero contribution since we cannot resample these indices without first setting them to $0$, but under the conditions they can be never reached by any resampling, therefore they remain $1$ always.
    
    If $|C\cup\{i_j:a_j=\text{res}\}|\geq k+1$ then all contribution has a power at least $k+1$ in $p$ since $(C,a)$ requires the prior appearance of at least $k+1$ particles. If $n\geq k+1$ than all $(C,a)$ such that $|C\cup\{i_j:a_j=\text{res}\}|\leq k$ appears exactly $n$ times, since $(C,a)$ cannot be translationally invariant. Moreover the quantity $R^{{(C,a)}}_{C(f)}(p)$ is independent of $n$ due to the conditioning that every resampling happens on a connected component of length at most $k<n$. This proves that $a_k^{(n)}$ is constant for $n\geq k+1$.
    
    Note that the heart of the proof is \eqref{eq:keyIndependceWrong}, so this is what we should double check.    

	The same arguments directly translate to the torus and other translationally invariant objects, so we can go higher dimensional :-) as Mario suggested.
	
	Questions:
	\begin{itemize}
		\item In view of this proof, can we better characterise $a_k^{(k+1)}$?
		\item Why did Mario's and Tom's simulation show that for fixed $C$ the contribution coefficients have constant sign? Is it relevant for proving \ref{it:pos}-\ref{it:geq}?
		\item Can we prove the conjectured formula for $a_k^{(3)}$?		
	\end{itemize} 
\end{comment}
        
\begin{comment}    
    \begin{definition}[Neighborhood]
	   	For the length-$n$ cycle we identify sites with $[n]$. 
	   	For a subset $S\subseteq [n]$ we define the $k$ neighborhood of $S$ as
	   	$N_k(S):=\cup_{s\in S} \{s-k,s-k+1,\ldots,s+k\}$ where numbers are interpreted mod $n$ and we represent the $\equiv 0$ class by $n$).
	\end{definition}
	\begin{definition}[Blocks and Gaps]
	   	For a configuration $C\subseteq [n]$ we call the connected components of $[n]\setminus N_1(C)$ the gaps. We denote by $m_C$ the number of gaps.
	   	We call a non-empty subset $B\subset C$ a block if $N_3(B)\cap C=B$ and $B$ is minimal, i.e., there is no proper subset $\emptyset\neq B'\subsetneq B$ satisfying $N_3(B')\cap C=B'$. 
	   	Observe that whenever $m_C\geq 2$ the number of blocks is the same as the number of gaps.  
    \end{definition}
    \begin{definition}[Crossings]
    	We say that a run (path) of the resampling procedure crosses $i\in[n]$ if there is ever a $0$ in $N_1({i})$ during the run.
    \end{definition}
    \begin{definition}[Enumerating gaps and mid points]
		Let $G_1,G_2,\ldots, G_{m_C}$ be an enumeration of the gaps respecting the cyclic ordering, and let $g_i$ be the middle element of $G_i$, if there are two middle elements we choose the smaller according to the cyclic ordering. (If $m_C=1$ and $G_1=[n]$ let $g_1=1$.)
		If $m_C\geq 2$ then for all $i\in[m_C]$ let $B_i$ be the block between $G_i$ and $G_{i+1}$.
    \end{definition}
    
    As in Mario's proof I use the observation that 
    \begin{align*}
    R^{(n)}(p) &= \frac{1}{n}\sum_{b\in\{0,1,1'\}^{n}} \rho_b \; R_b(p)\\
    &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R_{C(f)}(p),
    \end{align*}
    where we denote by $C\subseteq[n]$ a slot configuration, whereas $C(f)$ denotes the slots of $C$ filled with the particles described by $f$. 
    For $a\in\{\text{crossed},\text{not crossed}\}^m$ we also introduce the notation $R^a_{C(f)}(p):=\mathbb{E}(\#\{\text{resamples before reaching }\mathbbm{1} \text{ from } C(f)\}|\bigwedge_{j\in[m_C]}g_j \text{ is } a_j)\cdot\mathbb{P}(\bigwedge_{j\in[m_C]}g_j \text{ is } a_j)$, which we define as $0$ if the conditioning event has $0$ probability. 
    Since $$R_{C(f)}(p)=\sum_{a\in\{\text{crossed},\text{not crossed}\}^{m_C}}R^a_{C(f)}(p),$$ we can further rewrite the expectation as
    \begin{align*}
	    R^{(n)}(p) &= \frac{1}{n}\sum_{C\subseteq [n]}\sum_{a\in\{\text{crossed},\text{not crossed}\}^{m_C}}\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^a_{C(f)}(p).
    \end{align*}
    Suppose that $a$ contains at least two ``not crossed'', the we claim that $\sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^a_{C(f)}(p)=0$. Let $j_1\neq j_2$ be two distinct indexes such that $a_{j_1}$ and $a_{j_2}$ are both saying ``not crossed''. Let $B_l:=B_{j_1}\cup B_{j_1+1}\cup\cdots\cup B_{j_2-1}$ and $B_r:=B_{j_2}\cup B_{j_2+1}\cup\cdots\cup B_{j_1-1}$ (again we interpret indexes mod $m_C$).
    Then we claim that for all $f\in\{0,1'\}^{|C|}$ we have 
    that 
    \begin{equation}\label{eq:keyIndependceOld}
		R^a_{C(f)}(p)=R^a_{B_l(f)}(p)+R^a_{B_r(f)}(p).
    \end{equation} 
    The reason is as before that the halves are independent because neither $g_{j_1}$ nor $g_{j_2}$ is crossed. One could probably similarly prove it as the grid figure shows. 
    From here the proof goes just as in Mario's proof:
    \begin{align*}
    \sum_{f\in\{0,1'\}^{|C|}} \rho_{C(f)} R^a_{C(f)}(p)&=
    \sum_{f_l\in\{0,1'\}^{|B_l|}} \sum_{f_r\in\{0,1'\}^{|B_r|}}  (-1)^{|f_l|+|f_r|}p^{|B_l|+|B_r|} \left( R^a_{B_l(f)} + R^a_{B_r(f)} \right)\\
    &= p^{|C|}\sum_{f_l\in\{0,1'\}^{|B_l|}} (-1)^{|f_l|} R^a_{B_l(f)} \sum_{f_r\in\{0,1'\}^{|B_r|}} (-1)^{|f_r|} \\
    &\quad + p^{|C|}\sum_{f_r\in\{0,1'\}^{|B_r|}} (-1)^{|f_r|} R^a_{B_r(f)} \sum_{f_l\in\{0,1'\}^{|B_l|}} (-1)^{|f_l|} \\
    &= 0 
    \end{align*}
    From this it follows that the only contribution comes from paths that cross all but one (or all) of the mid gaps. This then implies that it is enough to consider $\mathcal{O}(k)$ length configurations. (We define the length of a configuration $C$ as $n-\max_{j\in[m_C]}|G_j|$.)
    
    Note that the heart of the proof is \eqref{eq:keyIndependceOld}, so this is what we should double check.
    
    In fact I think the independence that we use in \eqref{eq:keyIndependceOld} can be also proven when we define a crossing as crossing the actual point, and not its $1$-neighborhood. It then would make it possible to define blocks as consecutive slacks. Also then we could actually use all points of the gaps not only the mid points. The requirement for the cancellation would be that there are ``not crossed'' labels from at least two distinct gaps. This would probably lead to the optimal $k+1$ bound giving the actual statement \ref{it:const}. 
    
    Speculation: The $n=k$ case would then probably not work because the all $0$ starting configuration is invariant under rotations.
    To actually go below $2k$ one needs to be careful, because there are periodic configurations that are invariant under some rotations causing double counting issues. This can be probably resolved by showing that when a pattern becomes periodic for some $n$ it actually produces periodicity times more expectation due to symmetry. But this is all just speculation.
\end{comment}

	\bibliographystyle{alpha}
	\bibliography{Resample.bib}
	
\end{document}