Changeset - 527e0b602063
[Not reviewed]
0 1 0
Shravas K. Rao - 9 years ago 2016-10-05 14:34:35
shravas@gup-130-157.cwi.nl
fixed normalization error in main proof
1 file changed with 4 insertions and 4 deletions:
0 comments (0 inline, 0 general)
disctospec.tex
Show inline comments
 
@@ -178,41 +178,41 @@ Decompose $P$ into a sum of linear forms as in Lemma~\ref{lem:classtonon}.  For
 
\sum_{j=0}^d \alpha^i_j T_i(x+jy, \ldots, x+jy) = T_i(y, \ldots, y).\label{eq:need}
 
\end{equation}
 
We can then construct $P_j$ satisfying the lemma as follows.
 
\[
 
P_j(x) = \sum_{i=0}^d \alpha_j^i T_i(x).
 
\]
 

	
 
Because $T_i$ is an $i$-linear form, we can rewrite the left-hand side of~\eqref{eq:need} as
 
\[
 
\sum_{j=0}^d \sum_{s \in \{0, 1\}^i} \alpha^i_j T_i((1-s_1)x+js_1y, \ldots, (1-s_i)x+js_iy) = 
 
\sum_{j=0}^d \sum_{s \in \{0, 1\}^i} \alpha^i_j j^{|s|}T_i((1-s_1)x+s_1y, \ldots, (1-s_i)x+s_iy)
 
\]
 
where $|s|$ denotes the Hamming weight of $s$.  Then~\eqref{eq:need} holds if for all $0 \leq k < i$,
 
\[
 
\sum_{j=0}^d \alpha^i_j j^{k} = 0
 
\]
 
and
 
\[
 
\sum_{j=0}^d \alpha^i_j j^{i} = 1.
 
\]
 
This is a system of linear equations whose coefficients come from the first $i+1$ rows of a $(d+1) \times (d+1)$ Vandermonde matrix.  Because $d < p$, the determinant of such a matrix is non-zero and thus the matrix is invertible and $\alpha^i_j$ exist as desired.
 
\end{proof}
 

	
 
\begin{proof}[Proof of Lemma~\ref{lem:disctospec}]
 
We prove the contrapositive.  Assume that $\cay^{(t)}(\Gamma, q, S')$ does not have spectral expansion $\lambda$, that is that there exist vectors $x_1, \ldots, x_t$ such that $\|x_i\|_{\ell_t} = 1$ for all $i$, but $(A_H-A_K)(x_1, \ldots, x_t) \geq \lambda$.  Let $1_{H, K} = \left(\frac{1}{|S'|}-\frac{1}{|S|}\right)(1_{S'}-1_{S})$.  Then
 
We prove the contrapositive.  Assume that $\cay^{(t)}(\Gamma, q, S')$ does not have spectral expansion $\lambda$, that is that there exist vectors $x_1, \ldots, x_t$ such that $\|x_i\|_{\ell_t} = 1$ for all $i$, but $(A_H-A_K)(x_1, \ldots, x_t) \geq \lambda$.  Let $1_{H, K} = \frac{1_{S'}}{|S'|}-\frac{1_{S}}{|S|}$.  Then
 
 \begin{equation}
 
(A_H-A_K)(x_1, \ldots, x_t) = \frac{1}{t!} \sum_{\sigma \in S_t} \mathbb{E}_{u, g}[x_1(ug^{\sigma(1)-1})x_2(ug^{\sigma(2)-1})\cdots x_t(ug^{\sigma(t)-1})1_{H, K}(g)]. \label{eq:unravel}
 
(A_H-A_K)(x_1, \ldots, x_t) = \frac{|\Gamma|^2}{t!} \sum_{\sigma \in S_t} \mathbb{E}_{u, g}[x_1(ug^{\sigma(1)-1})x_2(ug^{\sigma(2)-1})\cdots x_t(ug^{\sigma(t)-1})1_{H, K}(g)]. \label{eq:unravel}
 
\end{equation}
 
  By the Lemma~\ref{lem:genvn},~\eqref{eq:unravel} is bounded above by $\|1_{H, K}\|_{U^t}$.  By the inverse Gowers theorem, there exists a polynomial $P$ of degree $t-1$ such that $\mathbb{E}_{h}[e(P(g)) 1_{H, K}] \geq \epsilon$ for some $\epsilon$.  By Lemma~\ref{lem:decomp}, there exist $P_0, \ldots, P_{t-1}$ such that \[e(P_0(x))e(P_1(x+y))\cdots e(P_{t-1}(x+(t-1)y)) = e(P(y)),\] and therefore \[\mathbb{E}_{u, g}[e(P_0(u))e(P_1(ug))\cdots e(P_{t-1}(ug^{t-1})) 1_{H, K}] = \mathbb{E}_{h}[e(P(g)) 1_{H, K}] \geq \epsilon,\]
 
and
 
\[
 
(A_H-A_K)(e(P_0), \ldots, e(P_{t-1})) \geq \frac{\epsilon}{t!}.
 
(A_H-A_K)(e(P_0), \ldots, e(P_{t-1})) \geq \frac{\epsilon|\Gamma|^2}{t!}.
 
\]
 
Because the coordinates of $e(P_i)$ are bounded above in absolute value by $1$ for all $i$, by Lemma~\ref{lem:disctoinfty}, this implies that $H$ has discrepancy at least $\epsilon/(t!\pi^t)$ with respect to $K$.
 
Because the coordinates of $e(P_i)$ are bounded above in absolute value by $1$ for all $i$, by Lemma~\ref{lem:disctoinfty}, this implies that $H$ has discrepancy at least $\epsilon|\Gamma|^2/(t!\pi^t)$ with respect to $K$.
 
\end{proof}
 

	
 

	
 
\bibliographystyle{alphaabbrv}
 
\bibliography{disctospec.bib}
 

	
 
\end{document}
 
\ No newline at end of file
0 comments (0 inline, 0 general)