%A Plain TeX file for an 18 page document
%Two New Extensions of the Hales-Jewett theorem
%Randall McCutcheon
\font\abs=cmr8
\font\abbf=cmbx8
\font\captain=cmr7
\font\smcp=cmcsc8
%\font\nw=cmbx8
\headline={\ifnum\pageno>1{\smcp the electronic journal
of combinatorics {\bf 7} (2000),\#R49\hfill\folio}\fi}
\magnification=\magstep 1
\nopagenumbers
%\baselineskip=14pt
%\font\it=cmti12
%\font\bf=cmbx12
%\font\mainf=cmr12
\hsize=6.2 true in\vsize=8.5 true in
\def\calMtwo{{\cal M}}\def\calN{{\cal N}}
\def\oneT{\{1,\ldots, T\}}\def\wk{{\cal W}_k}
\def\mkt{{\cal M}_k^T}\def\lt{left topological }
\def\ob{\overline{b}}\def\oay{\overline{a}}
\def\wkn{{\cal W}_k^N}\def\jay{{\cal J}}
\def\bg{\bigskip}
\def\hfon{\noindent\hfonn}
\def\range{\hbox{range}}
\def\Q{{\cal Q}}
\def\sol{ \{ 1,2,\cdots ,l\} }
\def\soN{ \{ 1,2,\cdots ,N\} }
\def\soM{ \{ 1,2,\cdots ,M\} }
\def\calh{{\cal H}}
\def\valo{ (V_\alpha^{(1)})_{\alpha\in \fone}}
\def\valt{ (V_\alpha^{(2)})_{\alpha\in \fone}}
\def\valk{ (V_\alpha^{(k)})_{\alpha\in \fone}}
\def\zdxr{(Z,{\cal D},\xi,R)}\def\bl{\langle}\def\br{\rangle}
\def\linftyx{L^\infty(X,{\cal A}, \mu)}
\def\clim {\lim_{N-M\rightarrow\infty} {1\over N-M} \sum_{n=M}^{N-1}}
\def\climinf {\liminf_{N-M\rightarrow\infty} {1\over N-M} \sum_{n=M}^{N-1}}
\def\climh{\lim_{H\rightarrow\infty} {1\over H} \sum_{h=1}^H}
\def\ybnt{(Y,{\cal B},\nu,S)}
\def\ni{\noindent}
\def\ybns{(Y,{\cal B},\nu,S)}\def\H{{\bf H}}
\def\xtil{(\tilde{X}, \tilde{\a}, \tilde{\mu}, \tilde{T})}
\def\ltwoxtil{L^2(X\times X, \a\otimes \a, \mu\times_Y\mu)}
\def\na{n_\alpha}\def\ma{2n_\alpha}
\def\ybns{(Y,{\cal B},\nu,S)}
\def\xaut{(X, {\cal A},\mu,T)}\def\aktom{\alpha_k\cup\cdots \cup\alpha_m}
\def\xauxau{( X\times X, {\cal A}\otimes {\cal A}, \mu\times \mu)}
\def\pam{P} %_{{\cal M}}}
\def\LL{{\cal L}}\def\L{{\cal L}}
\def\calM{{\cal M}}
\def\aitch{{\cal H}}\def\snoi{_{n=1}^\infty}
\def\ar{{\bf R}}
\def\ss{{1\over N} \sum_{n=1}^N}\def\ga{\gamma}\def\eee{{\cal E}}
\def\ssr{{1\over R} \sum_{r=1}^R}\def\emm{{\cal M}}
\def\Bnorm{\bigg\vert \bigg\vert}
\def\Lt{ L^2(X,\a,\mu)} \def\ra{\rightarrow}\def\soi{_{i=1}^\infty}
\def\pf{{\bf Proof. }}\def\half{{1\over 2}}
\def\sumoN{{1\over N}\sum_{n=1}^N}\def\goinf{\rightarrow\infty}
\def\xt{(X,{\cal A},\mu,T)}
\def\mps{measure preserving system}
\def\od{\overline{d}}\def\supp{\hbox{supp }}
\def\eye{{\cal I}}
\def\wone{{\cal W}^{(1)}}
\def\wtwo{{\cal W}^{(2)}}
\def\gee{{\cal G}}
\def\vee{{\cal V}}
\def\vone{{\cal V}^{(1)}}
\def\vtwo{{\cal V}^{(2)}}
\def\gtwo{{\cal G}^{(2)}}
\def\gthree{{\cal G}^{(3)}}
\def\tai {\{ T_\alpha ^{(i)}\}_{\alpha\in\f} }
\def\tee{{\cal T}}
\def\pee{{\cal P}}
\def\zee{{\cal Z}}
\def\iv {\Lambda}
\def\ta {\{ T_\alpha \}_{\alpha\in\f} }
\def\taone {\{ T_\alpha ^{(1)}\}_{\alpha\in\f} }
\def\tak {\{ T_\alpha ^{(k)}\}_{\alpha\in\f} }
\def\eh {{\epsilon\over 2}}
\def\t {{\cal T}}
\def\s {{\cal S}}
\def\Th {\theta}
\def\th {\theta}
\def\be{\beta}
\def\al{\alpha}
\def\emp{\emptyset}
\def\ipmt{{\hbox{\rm IP-}\kern-1pt\lim{{\kern-59 pt\lower1ex\hbox{$_
{(\alpha_1,\cdots ,\alpha_{m})\in({\cal F}^{(1)})^{m}_<}$}}}\;\;\; }}
\def\aub{ \alpha\cup\beta}
\def\en{{\bf n}}
\def\xau{(X,{\cal A},\mu)}
\def\bft{{\bf t}}
\def\q {{\bf Q}}
\def\e {\epsilon}
\def\bfn {{\bf n}}
\def\fempty{ {\cal F}_\emptyset}
\def\shiftip { IP$^*_+$-set }
\def\shiftips { IP$^*_+$-sets }
\def\ttil { \tilde{T}}
\def\a {{\cal A}}
\def\aonetom {\alpha_1,\cdots ,\alpha_m}
\def\overg {\overline{G}}
\def\mod {\hbox{ mod}}
\def\aa {{\cal A}}
\def\bb {{\cal B}}
\def\cc {{\cal C}}
\def\c {{\cal C}}
\def\w {{\cal W}}
\def\tgamma {\tilde{\gamma}}
\def\f {{\cal F}}
\def\n {{\bf N}}
\def\h {{\cal H}}
\def\zed {{\bf Z}}
\def\zpoly {{\bf Z}[x_1,\cdots ,x_k]}
\def\p {{\bf P}}
\newcount\chapter\chapter =0
\newcount\equation\equation =0
\newcount\exer\exer =0
\def\newchapter{\global\advance \chapter by 1\global\equation =0
\global\exer =0\vfill\eject\noindent}
\def\i {\global\advance\equation by 1
\eqno{ (\number \chapter .\number \equation) }}
\def\ex {\noindent
\global \advance\exer by 1 \bf Exercise \number\chapter .\number
\exer.}
\def\l {\big\langle}
\def\r {\big\rangle}
\def\ab {\alpha\cup\beta}
\def\norm {\big\vert\big\vert}
\def\snorm {\vert\vert}
\def\bnorm {\Big\vert\Big\vert}
\def\fone {{\cal F}^{(1)}}
\def\aoneton {\alpha_1,\cdots ,\alpha_N}
\def\bonetom {\beta_1,\cdots ,\beta_m}
\def\ftwo {{\cal F}^{(2)}}
\def\fthree {{\cal F}^{(3)}}
\def\ffour {{\cal F}^{(4)}}
\def\pe {{\bf PE}}
\def\xy {{X\over Y}}
\def\zy {{Z\over Y}}
\def\ainfone {\alpha\in {\cal F}^{(1)}}
\def\ainf {\alpha\in {\cal F}}
\def\one {^{(1)}}
\def\j {^{(j)}}
\def\bet {^{(\beta)}}
\def\verse {^{-1}}
\def\dlim{D\hbox{-}\lim}
\def\ltwoc{L^2_{{\bf C}}}
\def\es {^{(s)}}
\def\ltwo {L^2}
\def\ztimesz {Z\times_Y Z}
\def\lessd {_{\leq d}}
\def\z {{\bf Z}}
\def\y {(Y,\bb,\nu,\Omega)}
\def\x {(X,\aa,\mu,\Omega)}
\def\b {{\cal B}}
\def\Box {\smallskip\hfill\vbox {\hrule \hbox{\vrule \kern 3pt
\vbox{\kern 6pt}\kern 3pt\vrule }\hrule}\medskip}
\def\med {\medskip}
\def\part {\bigcup_{i=1}^r C_i}
\def\ap {arithmetic progression }
\def\ay{{\cal A}}\def\ol{\overline}
\def\cms{compact metric space }
\def\tx{ T:X\rightarrow X}
\def\breaky{\break\vskip - 10 pt}
\font\hugester=cmbx10 scaled \magstep5
\font\hugest=cmbx10 scaled \magstep4
\font\huger=cmbx10 scaled \magstep3
\font\huge=cmbx10 scaled \magstep1
\font\larger=cmcsc10 scaled \magstep0
\font\large=cmbx10 scaled \magstep0
\font\names=cmr10 scaled \magstep2
\font\namescap=cmr10 scaled\magstep4
\font\hfonn=cmsl10
\def\ok{\{0,1,\ldots, k-1\}}
\hbox{ }
\footnote{}{\noindent
$^*$The author acknowledges support from the National Science
Foundation via a post doctoral fellowship administered by the
University of Maryland.}
%\mainf
\bigskip
\centerline{\huge Two New Extensions of the Hales-Jewett Theorem}
%\centerline{\huge TWO NEW EXTENSIONS OF THE HALES-JEWETT THEOREM}
\bigskip\bigskip
\centerline{\larger Randall McCutcheon$^*$}
\centerline{Department of Mathematics}
\centerline{University of Maryland}
\centerline{College Park, MD 20742}
\centerline{randall@math.umd.edu}
\smallskip
\centerline{Submitted: June 30, 2000; Accepted: September 28, 2000}
\bigskip
\bigskip
\centerline{\vbox{\hsize = 5 true in
\noindent {\abbf Abstract:}
\abs\baselineskip = 10 pt We prove two extensions of the
Hales-Jewett coloring theorem.
The first is a polynomial version
of a finitary case of Furstenberg and
Katznelson's multiparameter
elaboration of a theorem, due to Carlson, about variable words.
The second is an ``idempotent'' version of
a result of Carlson and Simpson.}}
\smallskip
\centerline{\vbox{\hsize = 5 true in
\noindent
{\abbf MSC2000:} {\abs Primary 05D10; Secondary
22A15.}}}
\bigskip
\ni
For $k,N\in \n$, let $\wkn$ denote the set of length-$N$ words
on the alphabet $\{ 0,1,\cdots ,k-1\}$. A {\it variable word}
over $\wkn$ is a word $w(x)$ of length $N$ on the alphabet
$\{ 0,1,\cdots ,k-1,x\}$ in which the letter $x$ appears at
least once. If $w(x)$ is a variable word and $i\in\ok$, we denote
by $w(i)$ the word that is obtained by replacing each occurrence of
$x$ in $w(x)$ by an $i$. The Hales-Jewett theorem states that
for every $k,r\in\n$, there exists $N=N(k,r)\in\n$ such that for
any partition $\wkn=\part$, there exist $j$, $1\leq j\leq r$, and
a variable word $w(x)$ over $\wkn$ such that $\big\{ w(i):i\in
\ok\big\}\subset C_j$.
\med\ni
{\bf 1. Finitary extensions.}
\med\ni
In [BL], V. Bergelson and A. Leibman provided a ``polynomial'' version of
the Hales-Jewett theorem. In order to formulate their result, we must
develop some terminology. Let $l\in \n$.
A {\it set-monomial} (over $\n^l$)
in the variable $X$ is an expression
$m(X)= S_1\times S_2\times \cdots \times S_l$, where for each $i$, $1\leq i
\leq l$, $S_i$ is either the symbol $X$ or a non-empty
singleton subset of $\n$ (these are called {\it coordinate
coefficients}). The
{\it degree} of the monomial is the number of times the symbol $X$ appears
in the list $S_1,\cdots ,S_l$.
For example, taking $l=3$, $m(X) = \{ 5\} \times X\times X$ is a
set-monomial of degree 2, while $m(X)=X\times \{ 17\} \times \{ 2\}$
is a set-monomial of degree 1. A {\it set-polynomial} is an expression of
the form $P(X)=m_1(X)\cup m_2(X)\cup\cdots \cup m_k(X)$,
where $k\in \n$ and $m_1(X),\cdots ,m_k(X)$
are set-monomials. The degree of a set-polynomial is the largest degree
of its set-monomial ``summands'', and its {\it constant term} consists of the
``sum'' of those $m_i$ that are constant, i.e. of degree zero.
Finally, we say that two set polynomials are {\it disjoint} if they share
no set-monomial summands in common.
Let $\f(S)$ denote the family of non-empty
finite subsets of a set $S$. Any non-empty set
polynomial $p(A)$ determines a function from $\f(\n)$ to $\f(\n^l)$ in
the obvious way (interpreting the symbol $\times$ as Cartesian product
and the symbol $\cup$ as union).
Notice that if $P(X)$ and $Q(X)$ are disjoint set-polynomials and
$B\in \f(\n)$ contains no coordinate coefficients of either $P$ or
$Q$ then $P(B)\cap Q(B)=\emp$.
Here now is the Bergelson-Leibman
coloring theorem.
\med\ni
{\bf Theorem 1.1.} Let $l\in \n$ and let
$\pee$ be a finite family of set-polynomials over $\n^l$ whose
constant terms are empty. Let $I\subset \n$ be any finite set and let
$r\in \n$.
There exists a finite set $S\subset \n$, with $S\cap I=\emptyset$,
such that if $\f\big( \bigcup _{P\in \pee} P(S)\big)=\part$
then there exists $i$, $1\leq i\leq r$, some non-empty
$B\subset S$, and some $A\subset \bigcup _{P\in \pee} P(S)$ with
$A\cap P(B)=\emptyset $ for all $P\in \pee$ and
$\big\{ A\cup P(B): P\in \pee \big\} \subset C_i.$
\med
Although the ``polynomial'' nature of Theorem 1.1 is at once clear,
it is not immediately obvious that it includes the Hales-Jewett
theorem as a special case, so we shall give a different formulation,
and derive it from Theorem 1.1.
\def\mkNd{ {\cal M}_k^N(d)}
Let $k,N,d\in \n$. We denote by $\mkNd$ the set of all function
$\phi: \{ 1,2,\ldots ,N\} ^d\rightarrow \ok$. When $d=2$, one may
identify this with the set of $N\times N$ matrices with entries
belonging to $\ok$, so in general we shall refer to the members of
$\mkNd$ as matrices, even when $d>2$. A {\it variable matrix}
over $\mkNd$ is a function
$\psi: \{ 1,2,\ldots ,N\} ^d\rightarrow \{ 0,1,\ldots ,k-1,x\}$
for which $x$ appears in the range. The {\it support} of $\psi$
is the set $\psi\verse (x)$; that is, the set of
locations in the matrix where the symbol $x$ appears. If $\psi$ is
a variable matrix over $\mkNd$, $\psi$ is said to be {\it
standard} if its support has the form $B^d$ for some $B\subset \{ 1,2,
\ldots ,N\}$.
We shall also consider multi-variable matrices
$\psi: \{ 1,2,\ldots ,N\} ^d\rightarrow \{ 0,1,\ldots
,k-1,x_1,x_2,\ldots , x_t\} $. In this case we require that all the
$x_i$ appear in the range, and we call $\psi\verse (x_i)$ the
{\it $i$th} support of $\psi$. If $\psi$ is a $t$-variable matrix
then $\psi$ gives rise, via substitution, to a function
$w(x_1,\ldots ,x_t): \{ 0,\ldots ,k-1\}\ra {\cal M}_k^N(d)$, and
we will often refer to this induced $w$ instead of to $\psi$
when dealing with variable matrices.
We require the following nonconventional notion of addition of
matrices. We will introduce
this notion in the context of dimension 2, although the obvious
analogs are valid in arbitrary dimension.
%Also Lemma 1.5 will
%be in the context of dimension 2.
Let $w=(w_{ij})_{i,j=1}^M$
and $y=(y_{ij})_{i,j=1}^M$ be matrices (variable or otherwise).
If there exist disjoint sets $W$ and $Y$, whose union is
$\{ 1,\ldots ,M\}^2$, such that $w_{ij}=0$ for $(i,j)\in W$
and $y_{ij}=0$ for $(i,j)\in Y$, then we define $w+y=(z_{ij})
_{i,j=1}^M$, where $z_{ij}=w_{ij}$ if $(i,j)\in Y$ and $z_{ij}=
y_{ij}$ if $(i,j)\in W$. If however there exists $(i,j)\in
\{1,\ldots ,M\}^2$ such that $w_{ij}\neq 0\neq y_{ij}$ then
the sum $w+y$ is undefined.
\med\noindent
{\bf Theorem 1.2} The following are equivalent:
\smallskip\noindent (a) Theorem 1.1.
\noindent
(b) Let $d\in\n$ and let
$\big(P_i(X)\big)_{i=1}^t$ be pairwise disjoint set-polynomials
over $\n^d$ having empty constant term
and let $J$ be any finite subset of $\n$ containing
all coordinate coefficients
represented in the $P_i$'s. Let $k,r\in\n$. There exists $N\in\n$
having the property that if $\mkNd=\part$ then there exists a set
$B\subset \{ 1,2,\ldots ,N\} \setminus J$, a variable matrix
$w(x_1,\ldots ,x_t)$, and $n$, with $1\leq n\leq r$, such that
(i) The $i$th support of $w_i$ is
$P_i(B)$, $1\leq i\leq t$,
(ii) $\{ w(i_1,\ldots ,i_t): i_j\in
\ok, 1\leq j\leq t\}\subset C_n$, and
(iii) $w$ is $0$ on $J^d$.
\noindent
(c) Let $k,r,d\in \n$. There exists $N$ such that for every partition
$\mkNd=\part$ there is a standard variable matrix
$w(x)$ over $\mkNd$ such that $\{ w(i):i\in\ok\}$ lies in one cell $C_j$.
\med\ni
\pf First we show (a) implies (b). Choose $b\in\n$ with $2^b\geq k$ and
consider the set
$$\pee = \{ \bigcup _{s=1}^t \big(E_s\times P_s(X)\big):
E_s\subset \{ 1,\ldots ,b\}, \; 1\leq s\leq t \} .$$
$\pee$ is a finite family of set polynomials over $\n^{d+1}$.
Let $I=J\cup \{ 1,\ldots ,b\}$ and let $l=d+1$. Now pick a finite
subset $S\subset \n$ as guaranteed by Theorem 1.1.
Notice in particular that $S\cap I=\emp$.
Pick $N\in\n$
such that $S\cup I\subset \{ 1,\ldots ,N\}$. Suppose that
${\cal M}_k^N(d)=\part$. Form a map
$\pi: \f\big( \{ 1,\ldots ,b\} \times \{ 1,\ldots ,N\}^d\big)
\ra \mkNd$ as follows:
$$\big( \pi(A)\big) (a_1,\ldots ,a_d)=
\min\big\{ \sum_{(j,a_1,\ldots ,a_d)\in A} 2^{j-1}, k-1\big\} .$$
Now put $D_i=\pi\verse (C_i)$, $1\leq i\leq r$. Then
$\f\big( \bigcup_{P\in \pee} P(S)\big) \subset \bigcup_{i=1}^r
D_i$ so there exist $B\subset S$ and $A\subset \bigcup _{P\in \pee}
P(S)$ with $A\cap P(B)=\emp$ for all $P\in \pee$ (in particular
$A\cap \big( \{ 1,\ldots ,b\} \times P_i(B)\big)
=\emp, 1\leq i\leq t$) and such that for
some $z$, $1\leq z\leq r$,
$$\big\{ A\cup \bigcup _{s=1}^t \big( E_s\times P_s(B)\big) :
E_s\subset \{ 1,\ldots ,b\}, 1\leq s\leq t \big\} \subset D_z .$$
Define a variable matrix $\psi=w(x_1,\ldots ,x_t)$ over $\mkNd$
by
\smallskip
\noindent 1. $\psi\big( (a_1,\ldots ,a_d)\big) = x_i$ if
$(a_1,\ldots ,a_d)\in P_i(B)$, and
\noindent
2. $\psi\big( (a_1,\ldots ,a_d)\big) =
\pi(A)(a_1,\ldots ,a_d)$ otherwise.
\smallskip
\noindent
(Recall that the sets $\{ P_i(B):1\leq i\leq t\}$ are pairwise disjoint,
owing to the fact that the $P_i$'s are pairwise disjoint and $B$ contains
no coordinate coefficients of any $P_i$.)
The $i$th support of $w$ is clearly $P_i(B)$, $1\leq i\leq t$.
Now for any $i_1,\ldots ,i_t\in \ok$, we pick sets
$E_s\subset \{ 1,\ldots b\}$ such that $\sum_{n\in E_s}2^{n-1}=i_s$,
$1\leq s\leq t$, and note that
$$w(i_1,\ldots ,i_t)=\pi(A)+\sum_{s=1}^t \pi\big( E_s\times P_s(B)\big)
=\pi\Big( A\cup \bigcup _{s=1}^t \big( E_s\times
P_s(B)\big)\Big) \in C_z .$$
Since $J\subset I$, $S\cap I=\emp$ and $A\subset \bigcup_{P\in \pee}
P(S)$, we have
$A\cap \big( \{ 1,\ldots ,b\} \times J^d\big) =\emp$, so that
$w$ is zero on $J^d$.
This finishes the proof that (a) implies (b).
Letting $t=1$ and $P_1(X)=X^d$, one sees that (b) implies (c).
Therefore all that remains is to show (c) implies (a).
Let $\{ Q_1,\cdots ,Q_t\}$ be the family of all set-monomials that appear
in any of the set-polynomials of $\pee$, and write
$Q_i(X)=S_1^{(i)}\times \cdots \times S_d^{(i)}$, where each
$S_j^{(i)}$ is either a singleton or the symbol $X$. Let $k=2^t$ and
put $d=l$.
Let $N$ be as promised by (c) and choose $y\in \n$ larger than all
coordinate coefficients in question and larger than any member of
$I$. Set $S=\{ y+1,\ldots ,y+N\}$. Suppose now that
$\f\big( \bigcup _{P\in\pee} P(S)\big) =\part$.
Let $Y$ be the family of $t$-tuples
of subsets of $\{ 1,\ldots ,N\}^d$. We identify $Y$ with $\mkNd$
by
$$(A_1,\ldots ,A_t)\leftrightarrow w
\;\; \hbox{ if and only if }\;\; w(i_1,\ldots ,i_d)=\sum_{s=1}^t
2^{1_{A_s}((i_1,\ldots ,i_d))} .$$
Our next task is to
construct a map $\pi$ sending $Y$ (and thus, effectively,
$\mkNd$) to
$\f\big( \bigcup _{s=1}^t Q_s(S)\big)=
\f\big( \bigcup _{P\in\pee} P(S)\big)$. First we define $\pi$ for
$t$-tuples of sets, one of which is a singleton and the rest of which are
empty. Suppose then that $i$ is fixed, $A_j=\emp$ for $i\neq j$
and $A_i=\{ (a_1,\ldots ,a_d)\}$. Recall that $Q_i(X)=
S_1^{(i)}\times \cdots S_d^{(i)}$, where some of the $S_j^{(i)}$ are
singletons and some are $X$. Let $T=\{ j: S_j^{(i)}=X\}$. Suppose
that for all $j\in \{1,\ldots ,d\}\setminus T$,
$a_j=\min\big\{ a_i:i\in T\big\}$. If this
condition is not met, we set $\pi\big( (A_1,\cdots ,A_t)\big) =\emp$.
If the condition is met, put $b_j=S_j^{(i)}$ if $S_j^{(i)}$
is a singleton and $b_j=a_j+y$ if $S_j^{(i)}=X$, $1\leq j\leq d$,
and set $\pi (A_1,\ldots ,A_t)=\{(b_1,\ldots ,b_d)\} $. We now extend
$\pi$ to the desired domain by requiring that
$\pi(A_1\cup B_1,\ldots ,A_t\cup B_t)=\pi(A_1,\ldots ,A_t)
\cup \pi(B_1,\ldots ,B_t)$. (This extension is unique.)
We now confirm that $\pi$ has the following two properties. First,
if $C\subset \{ 1,\ldots ,N\}$, then letting $B=C+y=\{c+y:c\in C\}$,
fixing $i$ and putting $A_i=C^d$ and
$A_j=\emp$ for all $j\neq i$, $\pi(A_1,\ldots ,A_t)
=Q_i(B)$. Second, if $A_i\cap B_i=\emp$ for all $i$,
$\pi\big( (A_1,\ldots ,A_t)\big)\cap \pi\big( (B_1,\ldots ,B_t)\big)
=\emp$.
We now use the map $\pi$ to draw back the partition. Namely, let
$D_i=\pi\verse (C_i)$, $1\leq i\leq r$. Then $Y=\bigcup _{i=1}^rD_i$.
But $Y$ is identified with $\mkNd$, so by (c) there exists a standard
variable matrix $w(x)$ and some $z$, $1\leq z\leq r$, such that
$W=\big\{ w(i):i\in\ok\big\}\subset D_z$. (After the identification,
of course.)
Let $C^d$ be the support of $w(x)$. Let $(A_1,\ldots,A_t)$ be the
member of $Y$ that is identified with $w(0)$. Then $A_i\cap C^d=\emp$
for $1\leq i\leq t$, so that
$\pi\big( (A_1,\ldots ,A_t)\big) \cap \pi\big( (C^d,\ldots ,C^d)\big)
=\emp$. Moreover, in $Y$, $W$ takes the form
$$W=\big\{ (A_1,\ldots ,A_t)\cup (F_1,\ldots ,F_t):\; F_i\in \{ \emp,
C^d\}, \; 1\leq i\leq t\big\}.$$
Let $A=\pi\big( (A_1,\ldots ,A_t)\big)$ and let $B=C+y$. Let
$P\in\pee$ and choose a set $E\subset \{ 1,\ldots ,t\}$ such that
$P(X)=\bigcup _{i\in E}Q_i(X)$. Next put $F_j=C^d$ if $j\in E$ and
$F_j=\emp$ otherwise. Then
$(A_1,\ldots ,A_t)\cup (F_1,\ldots ,F_t)\in W$. But $\pi(W)\subset
C_z$, so $\big(A\cup \bigcup _{i\in E} Q_i(B^d)\big)\in C_z$.
\Box
Formulations (a) and (b) in Theorem 1.2 are more powerful, on the
surface, than formulation (c) and hence it is good to have them on hand
for some applications, but formulation (c) has aesthetic advantages.
For one, when $d=1$ it gives precisely the Hales-Jewett theorem.
We now shift our focus slightly.
Let $A$ be a finite field and let $n\in \n$. Then $A^n$ is a vector space
over $A$. A translate of a $t$-dimensional vector
subspace of $A^n$ is called a {\it $t$-space}. The following theorem
was proved by Graham, Leeb and Rothschild ([GLR]).
\med\ni
{\bf Theorem 1.3} Let $r,n,t\in \n$. There exists $N=N(r,n,t)$ such that
for any $r$-coloring of the $n$-spaces of $A^N$ there exists a $t$-space
$V$ such that the family of $n$-spaces contained in $V$ is monochromatic.
\med
We mention this result because it is so well known. It is not
quite in keeping with our theme, namely extensions of the
Hales-Jewett theorem, but if
we restrict attention to a certain sub-class of $n$-spaces, the
situation becomes much more ``Hales-Jewett-like''.
Recall that a variable word over $\wk$ is a word on the alphabet
$\{ 1,2,\cdots ,k,x\}$ in which the symbol $x$ appears at least once.
An $n$-variable word is a word on the alphabet $\{ 1,\cdots ,k,x_1,
\cdots ,x_n\}$ in which all the $x_i$'s occur and for which no
occurrence of $x_{i+1}$ precedes an occurrence of $x_{i}$, $1\leq i\leq n-1$.
If $w(x_1,\cdots ,x_n)$ is an $n$-variable
word over $\w_k^M$ then the set $\{ w(t_1,t_2,\cdots ,t_n):
1\leq t_i\leq k, i=1,\cdots ,n \}$ will be called the space associated
with $w$. (Notice now that if $k=p^s$ for some prime $p$ and $s\in\n$
and we identify $\ok$ with a field $A$ having $p^s$ elements, choose a
basis $\{ v_1,\cdots ,v_M\}$ for $A^M$ and identify the word
$w_1w_2\cdots w_M$ with the vector $\sum_{i=1}^M w_iv_i$, then the
space associated with an $n$-variable word is indeed an $n$-space in
$A^N$. However, not all $n$-spaces can be obtained in this way.)
If $w$ is a $t$-variable word and $v$ is an $n$-variable word
and the space associated with $v$ is contained in the space associated
with $w$, $v$ will be called an $n$-{\it subword} of $w$. Another way
of seeing this is, if $w(y_1,\cdots ,y_t)$ is a $t$-variable word then
the $n$-variable subwords of it (in the variables $x_1,\cdots ,x_n$)
are of the form $w(z_1,\cdots ,z_t)$, where $z_1\cdots z_t$ is an
$n$-variable word over $\w_k(t)$.
The following theorem is a finitary consequence of a generalization
of T. Carlson's theorem ([C, Lemma 5.9])
due to H. Furstenberg and Y. Katznelson (see [FK,
Theorem 3.1]).
It extends the Hales-Jewett theorem in the
following sense. If we call regular words (that is, elements
of $\w_k^M$) $0$-variable words, then the Hales-Jewett theorem
corresponds to the case $n=0$, $t=1$ of Theorem 1.4.
\med\ni
{\bf Theorem 1.4} Let $k,r,n,t\in \n$ be given. There exists $M=M(k,r,n,t)$
such that for every $r$-cell partition of the $n$-variable words over
$\w_k^M$ there
exists a $t$-variable word all of whose $n$-subwords lie in the same cell.
\med
\def\ee{{\cal E}}\def\stm{ _{i,j=1}^{T+M}}\def\onet{\{1,\ldots ,T\}}
We seek now to give a polynomial analog of Theorem 1.4. To this end,
let $k,N,d,n\in\n$ and suppose we have non-empty sets $B_i\subset
\{1,\ldots ,N\}$, $1\leq i\leq n$, with $B_1<\cdots T$ or $j>T$ (that is, all the supports
of $(a_{ij})\stm$ lie in $\onet^2$).
\smallskip\noindent
Then for any $R$-coloring $\ga$ of $\ee$ there exists a
$(2T+1)$-variable matrix $w(x_1,\ldots ,x_{2T+1})$ = $(b_{ij})\stm$
over ${\calM}_k^{T+M}(2)$ that satisfies:
\smallskip\noindent
(1) $b_{ij}=0$ if $(i,j)\in\onet^2$.\def\wkt{{\cal W}_k^T}
\noindent
(2) There exists a non-empty set $B\subset \{ T+1,\ldots ,T+M\}$
such that the supports of $w$ are $\{ i\}\times B$ and $B\times
\{i\}$, $i\in\onet$, $B\times B$.
\noindent
(3) For any standard $n^2$-variable matrix $m=(c_{ij})\stm$ satisfying
$c_{ij}=0$ if $(i,j)\not\in \onet^2$, the set
$\{ m+w(i_1,\ldots ,i_{2T+1}):i_j\in\ok, 1\leq j\leq 2T+1\}$ is
$\ga$-monochromatic.
\medskip\def\mkN{{\cal M}_k^N}
\ni
\pf Let $P_i(X)$, $1\leq i\leq 2T+1$, denote the set polynomials
$\{i\}\times X$ and $X\times \{i\}$, $i\in \onet$, and $X\times X$.
These are pairwise disjoint set-polynomials (in fact, distinct
set-monomials). Let $\gee$ be the set of all standard $n^2$-variable
matrices over $\mkt(2)$. Let $J=\onet$, $t=2T+1$, $r=R^{|\gee|}+1$,
$d=2$, and put $M=N-T$, where $N$ is the number guaranteed by
Theorem 1.2 (b). Let $\ga$ be an $R$-coloring of $\ee$.
\def\NN{_{i,j=1}^N}
We now construct a $(R^{|\gee|}+1)$-cell partition of $\mkN(2)$.
For $(d_{ij})\NN, (f_{ij})\NN\in \mkN(2)$, we write
$(d_{ij})\NN\sim (f_{ij})\NN$ if for every standard $n^2$-variable
matrix $m=(e_{ij})\stm$ satisfying $e_{ij}=0$ for all $(i,j)\not\in
\oneT^2$, we have $\ga\big( m+(d_{ij})\NN\big)=\ga\big( m+
(f_{ij})\NN\big)$, in the sense that if either side of this
expression is defined then so is the other and they are equal.
(Hence in particular all matrices that have a non-zero entry for
any index point in $\oneT^2$ are relegated to the same equivalence
class. The other equivalence classes are characterized by the value
of $\ga$ at $|\gee|$ points, hence the equivalence classes of
$\sim$ form an $r$-cell partition.)
According to the conditions whereby $M$ was chosen, there exists a
non-empty set $B\subset \{1,\ldots ,N\}\setminus J =\{ T+1,\ldots ,
T+M\}$ and a variable matrix $w(x_1,\ldots , x_{2T+1})
=(b_{ij})\stm$ such that
the supports of $w$ are $P_i(B)$, $1\leq i\leq 2T+1$, and the set
$\{ w(i_1,\ldots ,i_{2T+1}): i_j\in\ok, 1\leq j\leq 2T+1\}$ lies
entirely in a single equivalence class of $\sim$ and such that
moreover $b_{ij}=0$ for all $(i,j)\in J^2=
%\{1,\ldots ,N\}^2\setminus
%\bigcup_{i=1}^{2T+1} P_i(\{ 1,\ldots ,N\}\setminus J)=
\oneT^2$. The variable matrix thus chosen satisfies (1), (2) and (3).
\Box
Our second lemma is a finitary version of a theorem proved
independently by Milliken ([Mi]) and Taylor ([T]). Recall that if $A$ is a set
then $\f(A)$ is the family of
non-empty finite subsets of $A$. We write $\f=\f(\n)$ as a kind
of shorthand. Recall that for $\al,\be\in\f$, we
write $\al<\be$ if $\max \al<\min \be$. For $k\in\n$, and
a sequence $(\al_i)_{i=1}^\infty\subset \f$, we write
$FU(<\al_i>_{i=1}^\infty)=\{ \bigcup _{i\in A} \al_i: A\in \f\}$.
($FU$ stands for ``finite unions.'' One may consider the set of
finite unions of a finite sequence as well, of course.)
If $\gee\subset \f$, let
$\gee^k_<$ be the set of $k$-tuples $(\al_1,\ldots ,\al_k)$
in $\gee^k$ for which $\al_1<\al_2<\cdots \al_k$. The Milliken-Taylor
theorem states that for any finite partition $\f^k_<=\part$, there
exists $j$, with $1\leq j\leq r$, and a sequence
$(\al_i)_{i=1}^\infty$, with $\al_1<\al_2<\cdots $, such that
$\big( FU(<\al_i>_{i=1}^\infty)\big)^k_< \subset C_j$.
We shall not need the full strength of the Milliken-Taylor theorem,
but only the following finitary version of it.
\medskip
\noindent {\bf Lemma 1.6}
Let $r,n,t\in \n$. There exists $L=L(r,n,t)\in\n$ such that if
$\{ (\al_,\ldots ,\al_n): \emp\neq \al_i\subset \{ 1,\ldots ,L\},
\al_1<\al_2<\cdots <\al_n\} =\part$ then there exist non-empty sets $
\al_i\subset \{ 1,\ldots ,L\}$, $1\leq i\leq t$, with $\al_1<\al_2<
\cdots <\al_t$, and $j$, $1\leq j\leq r$, with
$\big( FU(<\al_i>_{i=1}^t)\big) ^n_<\subset C_j$.
\medskip
Here now is the main theorem of this section.
\medskip\noindent
{\bf Theorem 1.7} Let $k,r,n,t,d\in \n$. There exists $N=N(k,r,n,t,d)$
such that for every $r$-cell partition of the standard $n^d$-variable
matrices over ${\cal M}_k^N(d)$, there exists a standard
$t^d$-variable matrix over $\mkNd$ all of whose standard
$n^d$-variable submatrices lie in the same cell.
\medskip
Before giving the proof of Theorem 1.7, let us make a few remarks
about notation and also Lemma 1.5. First, the object
$\ee$ defined in the lemma
consists of variable words with supports in $\oneT^2$, and
the variable word that is found must have zero entries over
$\oneT^2$. We note that there is nothing remarkable here about
the set $\oneT^2$. Once $M$ has been chosen, any set $S^2\subset
\{ 1,\ldots ,M+T\}^2$ where $|S|=T$, would serve just as nicely
in this capacity. This is a simple result of the fact that
standard variable matrices remain such upon permuting the
indices $\{1,\ldots ,M+T\}$.
Next, the lemma as stated applies to $\calM_k^{T+M}(2)$ and variable
words over it. In our application of it, we shall be applying it
in the context of an isomorphic copy of $\calM_k^{T+M}$, namely
the space determined by an appropriate standard $(M+T)^2$-variable
matrix. Notationally, it is convenient to write such a variable
matrix with a matrix of variables,
namely as $w\big( (x_{ij})\stm\big)$, where it is understood that the
variable $x_{ij}$ has support $B_i\times B_j$ for some non-empty
sets $B_1