% A latex2e file for a 16 page document.
\documentclass[12pt]{article}
% EJC page size:
\setlength{\textwidth}{6.2in}
\setlength{\oddsidemargin}{.15in}\setlength{\evensidemargin}{.15in}
\setlength{\textheight}{8.5in}\setlength{\topmargin}{0in}
\usepackage{amssymb}
\title{Tournament Sequences and Meeussen Sequences}
\author{
Matthew Cook\\
Computational and Neural Systems Program\\
California Institute of Technology\\
Pasadena, CA 91125\\
{\tt cook@paradise.caltech.edu}
\and
Michael Kleber\thanks{Partially supported by an NSF Mathematical
Sciences Postdoctoral Research Fellowship}\\
Department of Mathematics\\
Massachusetts Institute of Technology\\
Cambridge, MA 02139\\
{\tt kleber@math.mit.edu}
}
\date{Submitted: March 22, 2000; Accepted: September 5, 2000}
\newcommand{\phito}{\stackrel{\phi}{\mapsto}} % Extra {}s mess up spacing!
\newcommand{\pf}{\noindent{\bf Proof:\,}\,}
\newcommand{\qed}{\hfill $\blacksquare$ \bigskip}
\newcommand{\tm}{{\raisebox{.75ex}{\sc\small tm}}}
\newcommand{\e}{\varepsilon}
\newtheorem{thm}{Theorem}
\newtheorem{defn}[thm]{Definition}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{conj}[thm]{Conjecture}
\newtheorem{lemma}[thm]{Lemma}
\newtheorem{recur}{Recurrence}
%\renewcommand{\theequation}{\fnsymbol{equation}}
\setcounter{secnumdepth}{1}
\begin{document}
\pagestyle{myheadings}
\markright{\rm{\sc the electronic journal of combinatorics} \textbf{7} (2000), \#R44\hfill}
\thispagestyle{empty}
\maketitle
\begin{abstract}
A {\em tournament sequence} is an increasing sequence of positive
integers $(t_1,t_2,\ldots)$ such that $t_1=1$ and $t_{i+1} \leq 2t_i$.
A {\em Meeussen sequence} is an increasing sequence of positive
integers $(m_1,m_2,\ldots)$ such that $m_1=1$, every nonnegative
integer is the sum of a subset of the $\{m_i\}$, and each integer
$m_i-1$ is the sum of a unique such subset.
We show that these two properties are isomorphic. That is, we present
a bijection between tournament and Meeussen sequences which respects
the natural tree structure on each set. We also present an efficient
technique for counting the number of tournament sequences of length
$n$, and discuss the asymptotic growth of this number. The counting
technique we introduce is suitable for application to other
well-behaved counting problems of the same sort where a closed form or
generating function cannot be found.
\medskip \noindent {\bf MSC:} 11B99 (Primary), 05A15, 05A16 (Secondary).
\end{abstract}
\section{Introduction}
\label{sec_intro}
An infinite {\em tournament sequence} $T$ is an infinite sequence of
positive integers $T=(t_1,t_2,\ldots)$ such that
\begin{itemize}
\item $t_1=1$ and $t_i < t_{i+1} \leq 2t_i$ for $i=1,2,\ldots$
\end{itemize}
For example, the first infinite tournament sequence in lexicographic
order is $t_i=i$, and the last is $t_i=2^{i-1}$. A finite tournament
sequence $T=(t_1,\ldots,t_n)$ is a truncated infinite tournament
sequence.
An infinite {\em Meeussen sequence} $M$ is an infinite sequence of
positive integers $M=(m_1,m_2,\ldots)$ such that
\begin{itemize} \addtolength{\itemsep}{-6pt}
\item $m_1=1$ and $m_i t_n-t_{n-1} \\[4pt]
S_n - u(n-1, t_n-t_{n-1}+1-k), & \mbox{if } k\leq t_n-t_{n-1}
\end{array} \right. \\
&& \mbox{\qquad where $S_n=m_1+\cdots+m_n$.}
\end{eqnarray*}
Beginning with $m_1=1$ and $u(1,1)=1$, we can quickly calculate
each successive term of $\phi(T)$ in linear time.
\section{Properties of the Bijection}
\label{sec_fib}
In the introduction, we stated without proof that the beheaded
Fibonacci sequence $(1,2,3,5,8,13,\ldots)$ is a Meeussen sequence, and
moreover that it is the smallest one in lexicographic order, the image
of $(1,2,3,4,5,6,\ldots)$ under the map $\phi$ defined above. This is
a special case of a more general surprising property of $\phi$, which
we prove in this section.
Since $(1,2,3,5,8,13,\ldots)$ is, coincidentally, again a tournament
sequence, we can apply $\phi$ to it as well. This leads to the
following computational observation:
$$
(1, 2, 3, 5, 8, 13, 21,\ldots) \phito
(1, 2, 3, 6, 11, 20, 37,\ldots) \phito
(1, 2, 3, 7, 13, 25, 48,\ldots)
$$
The middle sequence is the ``3-bonacci'' sequence beginning $(1,2,3)$
and the last is the ``4-bonacci'' sequence beginning $(1,2,3,7)$,
where we say a sequence is $k$-bonacci if each term (after the first
$k$) is the sum of the previous $k$ terms.
Further experimentation reveals that, for example,
$$
(1, 2, 4, 7, 12, 20, 33, 54, 88, 143,\ldots) \phito
(1, 2, 4, 7, 13, 24, 44, 81, 149, 274,\ldots).
$$
The first sequence begins $(1,2)$ and thereafter each term is one plus
the sum of the previous two, while the second sequence is the
3-bonacci sequence beginning $(1,2,4)$, with no additive constant in
the recurrence.
Inspired by examples of this type, we make the following observation.
\begin{prop}
\label{fib}
Suppose $(t_1,\ldots,t_{n+1})$ is a finite tournament sequence, and
suppose that for some integers $k,c$, it happens that both
$$
\begin{array}{llll}
t_{n+1} &=& t_{n }+t_{n-1}+\cdots+t_{n-k+1}+c & \mbox{and} \\
t_{n } &=& t_{n-1}+t_{n-2}+\cdots+t_{n-k }+c.
\end{array}
$$
Then $(t_1,\ldots,t_{n+1})\phito(m_1,\ldots,m_{n+1})$, where
$$
\begin{array}{rcl}
m_{n+1} &=& m_{n }+m_{n-1}+\cdots+m_{n-k+1}+m_{n-k }.
\end{array}
$$
\end{prop}
While the statement of the proposition is designed to mimic the
examples above, the hypothesis simplifies to $t_{n+1} =
2t_{n}-t_{n-k}$. Since our map $\phi$ respects extending or
truncating a sequence, we are really just assuming that $t_{n+1} =
2t_{n}-t_{n-k}$ for some single pair $n,k$; the effect is purely local.
To prove the proposition, it would help to have a more concrete
relationship between terms of the tournament and Meeussen sequences
associated to one another by our bijection.
\begin{lemma}
\label{lemma_tm}
Let $T=(t_1,t_2,\ldots)$ be a (finite or infinite) tournament sequence
with associated Meeussen sequence $\phi(T)=M=(m_1,m_2,\ldots)$. Write
$ur(M)$, the uniquely representable sums of $M$, as
$\{u_11$.}
\end{eqnarray*}
\end{recur}
We could then find the number of nodes on row $n$ by summing $c(n,k)$
for all $k$ up to $2^{n-1}$. The work involved grows exponentially in
$n$, though, so for large $n$ this is impractical. We could define a
generating function in two independent variables
$$
g(x,y) = \sum_{n,k} x^n y^k c(n,k) = xy + x^2y^2 + x^3(y^3+y^4) + \cdots
$$
which, based on Recurrence~\ref{rec_c}, must satisfy
$$
g(x,y) = xy + \frac{xy}{1-y}\, g(x,y) - \frac{xy}{1-y}\, g(x,y^2).
$$
Rewriting this as $g(x,y) = \frac{xy(y-1)}{xy+y-1} + \frac{xy}{xy+y-1}\,
g(x,y^2)$, we can solve formally by iterated substitution to get
$$
g(x,y) = \sum_{n=0}^{\infty} \left( y^{2^n}-1 \right)
\prod_{k=0}^n \frac{x y^{2^k}}{x y^{2^k} + y^{2^k} - 1}.
$$
This seems to offer dim prospects for a nice form for $s(n)$, the
coefficient of $x^n$ at $y=1$.
Alternatively, one could hope to work with the function $d(n,k)$ which
counts the number of $n$th-generation descendents of a node labelled
$(k)$. We can count these descendents by summing the number of
$(n-1)$-generation descendents of the node's $k$ children:
\begin{recur}
\label{rec_d}
\begin{eqnarray*}
d(1,k) &=& k \mbox{\quad for all $k\geq1$,} \\
d(n,0) &=& 0 \mbox{\quad for all $n\geq1$, and otherwise,} \\
d(n,k) &=& \sum_{j=k+1}^{2k} d(n-1,j).
\end{eqnarray*}
\end{recur}
The number of nodes on row $n$ of our tree is then $s(n)=d(n-1,1)$.
Torelli points out that this recurrence can be expressed in closed
form, by replacing the last line with
$$
d(n,k) \,=\, d(n,k-1) - d(n-1,k) + d(n-1,2k-1) + d(n-1,2k)
$$
This alternate version embodies the notion that the tree below a node
labelled $(k)$ looks just like the tree below a $(k-1)$, but modified
by pruning the branch beginning with the child $(k)$ and grafting on
branches beginning with $(2k-1)$ and $(2k)$ instead. However, as $n$
increases, either version still involves calculating an exponentially
growing set of values.
We offer instead the following technique for calculating the growth of
the tree in polynomial time. Consider the family of functions
$p_n$ for $n=1,2,3,\ldots$ such that $p_n(k)$ is the number of
$n$th-generation descendents of a node labelled $(k)$, what we called
$d(n,k)$ above. Then in the spirit of Recurrence~\ref{rec_d}, we can
get a recurrence relation for the functions $p_n$ themselves:
\begin{recur}
\label{rec_p}
\begin{eqnarray*}
p_1(k) &=& k, \\
p_n(k) &=& \sum_{j=k+1}^{2k} p_{n-1}(j).
\end{eqnarray*}
\end{recur}
Purists would start the recurrence with $p_0(k)=1$ instead.
The key observation is that each $p_n$ is in fact a degree $n$
polynomial in $k$, which we obtain by symbolic summation of a range of
values of $p_{n-1}$. Recall that the sum $\sum_{j=1}^k j^n$ is a
polynomial in $k$ of degree $n+1$. Our recurrence states that
$p_n(k)$ is the sum of the first $2k$ values of $p_{n-1}$ minus the
sum of the first $k$ values. Since $p_{n-1}$ is a polynomial in $k$
by induction, so is $p_n$. The next few polynomials after $p_1=k$ are
$$
p_2 = \frac{3k^2+k}{2}, \quad
p_3 = \frac{7k^3+6k^2+k}{2}, \quad
p_4 = \frac{105k^4+154k^3+63k^2+6k}{8},\ldots
$$
Modern computer algebra packages can generally carry out this
type of symbolic summation quickly, so we can use this polynomial
recurrence directly to find $p_n$, and evaluate $p_{n-1}(1)$ to find the
number of nodes on level $n$. For example, in Maple\tm:
\begin{verbatim}
p := proc(n) option remember;
if (n=1) then k else sum( p(n-1), 'k'=k+1..2*k ) fi; end;
s := n -> eval( p(n-1), k=1 );
\end{verbatim}
Then \verb"p(n)" returns the polynomial $p_n$ in the variable
\verb"k", and \verb"s(n)" evaluates $p_{n-1}$ at $k=1$, giving us the
number of nodes on level $n$ of the tree.
Now that we know that $p_n$ is an $n$th-degree polynomial, we need not
calculate the polynomial explicitly just to find some of its values.
For example, $p_n$ is determined by its values at the $n+1$ points
$k=0,1,\ldots,n$, which we can find (using Recurrence~\ref{rec_d})
once we know $p_{n-1}$ at $k=0,1,\ldots,2n$. We could then fit an
interpolating polynomial to those points to find other desired values
of $p_n$. In this case, another trick presents itself; we can use the
linear dependence among $n+2$ equally-spaced values of a polynomial
$p$ of degree $n$:
$$
\forall a,b: \,\,
\sum_{i=0}^{n+1} (-1)^i {n \choose i}\, p(a+bi) = 0.
$$
Combining all of these tricks, we can efficiently calculate the number
of nodes on row $n$ of our tree as follows:
\begin{recur}
\label{rec_good}
\begin{eqnarray*}
d(0,k) &=& 1 \mbox{\quad for all $k$,} \\
d(n,0) &=& 0 \mbox{\quad for all $n>0$,} \\
d(n,k) &=& d(n,k-1) - d(n-1,k) + d(n-1,2k-1) + d(n-1,2k) \\
&& \hspace{2.25in} \mbox{for $k\leq n$, and} \\
d(n,k) &=& \sum_{j=1}^{n+1} (-1)^{(j-1)} {n+1 \choose j} \, d(n,k-j)
\mbox{\qquad for $n d(n-1,1);
\end{verbatim}
This code seems empirically to calculate $s(n)$ after having done the
work for $s(n-1)$ in time $O(n^2)$ even when $n$ is around 190, when
$s(n)$ has over 5000 decimal digits. This is the behavior one would
expect if multiplication had a constant unit cost and addition were
free.
On a modest desktop Pentium II, this computes up to $s(30)$ in under a
second, $s(85)$ in under a minute, and s(190) in about an hour; a
little extra work to avoid computing the binomial coefficients
multiple times speeds it up even more. We record $s(n)$ for $1\leq
n\leq 22$ here. The sequence also appears as entry A008934 in
Sloane's On-Line Encyclopedia of Integer Sequences~\cite{EIS}.
\begin{quote} \flushleft \sloppy
1, 1, 2, 7, 41, 397, 6377, 171886, 7892642, 627340987, 87635138366,
21808110976027, 9780286524758582, 7981750158298108606,
11950197013167283686587, 33046443615914736611839942,
169758733825407174485685959261,
1627880269212042994531083889564192,
29264239787495935863325877024506142042,
989901541366810465070950556260422637919176,
63214893835996484808167529681187283166038800097,
7643667309922877343580868981767361594845888953165967,
\ldots
\end{quote}
\subsection{Asymptotic Behavior}
Now we turn to the asymptotic growth of $s(n)$. We know from
Lemma~\ref{lem_bound} that ${n-1 \choose 2}$ is an upper bound for
$\lg s(n)$, where $\lg$ denotes $\log_2$. We will first prove that a
lower bound for $s(n)$ is $\alpha\,2^{n\choose2} / (n-1)!$ for a
certain constant $\alpha$, and then we will show that $\lg s(n)$ is
asymptotic to ${n\choose2}-\lg n!+O(\log n)^2$.
We are grateful to Donald Knuth for suggesting the method used here.
The technique rests on the following observation:
\begin{thm}[Knuth]
\label{thm_knuth}
Let $T$ be a rooted tree in which we want to know the number of
vertices on the $n$th level. Select $n-1$ vertices
$v_1,\ldots,v_{n-1}$ in $T$ by choosing $v_1$ to be the root and
picking $v_{i+1}$ uniformly at random from among the children of
$v_i$, of which there are $\deg(v_i)$. Then the expected value
$$
E(\deg(v_1)\deg(v_2)\cdots\deg(v_{n-1}))
$$
is exactly the number of vertices on the $n$th level of $T$.
\end{thm}
\pf
See \cite{knuth} for a discussion of this technique in greater
generality. In this case, a proof by induction is straightforward: if
this technique works for counting the $n$th level of each of $k$ trees
$T_1,\ldots,T_k$, then it clearly also works for counting level $n+1$
of the tree $T$ whose root has $k$ children, the roots of
$T_1,\ldots,T_k$.
\qed
We will apply Theorem~\ref{thm_knuth} to the tree of tournament
sequences, in which, conveniently, each vertex is already labelled
with its degree. This means we can calculate $s(n)$ by finding the
expected value of the product $t_1t_2\cdots t_{n-1}$, where
$(t_1,t_2,\ldots,t_{n-1})$ is a tournament sequence selected at random
by setting $t_1=1$ and picking $t_{i+1}$ uniformly at random from
among $t_i+1,t_i+2,\ldots,2t_i$. For the remainder of this section,
whenever we talk about a distribution for $t_i$, it is implicitly with
respect to this way of picking a tournament sequence.
\begin{lemma}
\label{lem_lb}
Let $s(n)$ be the number of tournament sequences of length $n$. Then
$$
s(n) \geq \alpha\,\frac{2^{n\choose 2}}{(n-1)!}
$$
where $\alpha=(1-\frac12)(1-\frac14)(1-\frac18)\cdots\approx.28878837\ldots$
\end{lemma}
\pf
Observe that there is a natural continuous analogue to the expected
value $E(t_1t_2\cdots t_{n-1})=s(n)$. Consider instead the expected
value $E(r_1r_2\cdots r_{n-1})$, where $r_1=1$ and $r_{i+1}$ is a real
number chosen uniformly at random from the interval $(r_i,2r_i]$.
Equivalently, we are taking random variables $u_i$ $(1\leq i\leq n-2)$
each with a uniform distribution over $(1,2]$, and setting
$r_{i+1}=u_ir_i$.
The expected value in the continuous case will give an underestimate
of the expected value for the discrete version, in which the $u_i$ are
distributed the same way but we set $t_{i+1}=\lceil u_it_i \rceil$.
The independence of the various $u_i$ make the expected value easy to
calculate:
\begin{eqnarray*}
E(r_1r_2r_3\cdots r_{n-1})
&=& E((1)(u_1)(u_1u_2)\cdots(u_1u_2\cdots u_{n-2})) \\
&=& E(u_1^{n-2} \, u_2^{n-3} \cdots u_{n-2}^1) \\
&=& \frac{2^{n-1}-1}{n-1}\,\frac{2^{n-2}-1}{n-2}\cdots\frac{2^2-1}{2} \\
&\geq& \alpha\,\frac{2^{n\choose 2}}{(n-1)!}
\end{eqnarray*}
\qed
Now we show that this lower bound is quite good, by finding the rate
of growth of the error. We use $\lg$ to denote $\log_2$.
\begin{thm}
\label{thm_growth}
\quad $\lg s(n) = {n \choose 2} - \lg n! + O(\log n)^2$
\end{thm}
\pf
In the proof of Lemma~\ref{lem_lb}, we calculated $E(u_k^{n-k-1})$,
where $u_k=r_{k+1}/r_k$ was uniformly distributed over $(1,2]$. In
the original problem, $t_k$ and $t_{k+1}$ are integers, so the ratio
$t_{k+1}/t_k$ instead takes on a discrete set of values, each with
equal probability:
$$
E\left( \left(\frac{t_{k+1}}{t_k}\right)^{\!\!p} \,\right) =
\frac{ \left(\frac{t+1}{t}\right)^p +
\left(\frac{t+2}{t}\right)^p + \cdots +
\left(\frac{t+t}{t}\right)^p }{t}
$$
where $t=t_k$ throughout. We can rewrite this and take advantage of
the ability to sum consecutive $p$th powers:
\begin{eqnarray*}
E\left( \left(\frac{t_{k+1}}{t_k}\right)^{\!\!p} \,\right)
&=&
\frac{ (t+1)^p + (t+2)^p +\cdots+ (t+t)^p }{t^{p+1}} \\
&\leq&
\frac{\frac{(2t)^{p+1}}{p+1} + \frac{(2t)^p}{2} + O(2t)^{p-1}}{t^{p+1}} \\
&&=\,
\frac{2^{p+1}}{p+1} + \frac{2^p}{2t} + O\!\left(\frac{2^p}{t^2}\right)
\end{eqnarray*}
The $2^{p+1}/p+1$ term is exactly the lower bound we used in
Lemma~\ref{lem_lb}, and we now have an idea of how much error this
introduced:
\begin{eqnarray*}
E\left( \left(\frac{t_{k+1}}{t_k}\right)^{\!\!n-k-1} \right)
&=&
\frac{2^{n-k}}{n-k} + \frac{2^{n-k}}{4t_k}
+ O\!\left(\frac{2^{n-k}}{t_k^2}\right)
\\ &=&
E(u_k^{n-k-1}) \left( 1 + \frac{n-k}{4t_k}
+ O\!\left(\frac{n-k}{t_k^2}\right) \right)
\end{eqnarray*}
The expected value now depends on $t_k$, and to bound the error, we
need some idea of how large we expect $t_k$ to be.
Consider again the continuous analogue used in the proof of
Lemma~\ref{lem_lb}. We expect $s_k$ to grow exponentially, as $c^k$
for some $c$, so look at the distribution of $s_k^{1/k}$. Taking
logs, we see that $\log s_k^{1/k}$ is distributed as the average of
$k$ copies of $\log x$ on $(1,2]$, each with mean $2\log2-1$. Thus as
$k$ increases without bound, the distribution of $s_k^{1/k}$ converges
towards a point distribution at $4/e$. As we already noted, the $t_k$
certainly grow no slower than the $s_k$. We conclude that for any
constant $c<4/e$ and probability $p<1$, there is a sufficiently
large $k_0$ such that $\Pr(t_k>c^k)>p$ for all $k\geq k_0$.
The error we want to bound is the product of the $n$ error terms $e_k
= 1+\frac{n-k}{4t_k}+O(\frac{n-k}{t_k^2})$ for $k=1,\ldots,n$. To
simplify bookkeeping, we will instead think about $\lg s(n)$ and bound
the sum of the logs of the error terms. We separate the work into two
cases, depending on whether $k$ is less or greater than $2\log n$.
When $k<2\log n$, the denominator $t_k$ may be small, and the error
terms $\log e_k$ may be as much as $O(\log n)$. Adding up all $2\log
n$ of these terms gives a total which is $O(\log n)^2$, the error term
in the statement of our theorem.
Finally, when $k=2\log n$, we know that as long as $n$ is sufficiently
large, with high probability $t_k>c^{2\log n}$, which is on the order
of $n^2$. Thus with high probability, $e_k$ is $1+O(1/n)$, and $\log
e_k=O(1/n)$. Since the error terms monotonically decrease, the sum of
all the terms with $k\geq 2\log n$ is bounded by $ne_{2\log n}=O(1)$.
So taking $n$ sufficiently large ensures that the error in the lower
bound is concentrated almost entirely in the first $2\log n$ error
terms, and we are done.
\qed
Computational evidence based on the actual values of $s(n)$ for $n$ up
to 190 indicates that the constant needed to make $\lg s(n) <
{n\choose 2} - \lg n! + c (\log n)^2$ reaches a peak of $c\approx
1.18304060\ldots$ at $n=32$ and decreases slowly thereafter.
\begin{thebibliography}{19}
\bibitem{ant}
Bach, E.; Shallit, J.
{\em Algorithmic Number Theory\/}, vol.~1.
MIT Press, Cambridge, MA, 1996.
\bibitem{mbm}
Banderier, C.; Bousquet-M\'elou, M.; Denise, A.; Flajolet, P.;
Gardy, D.; Gouyou-Beauchamps, D.
Generating functions for generating trees.
Preprint, 2000.
% Preprint on-line at MBM's web site:
% http://dept-info.labri.u-bordeaux.fr/~bousquet/publis.html
\bibitem{bdpp}
Barcucci, E.; Del Lungo, A.; Pergola, E; Pinzani, R.
ECO: a methodology for the enumeration of combinatorial objects.
{\em J. Differ. Equations Appl.} {\bf 5} (1999), no. 4--5, 435--490.
\bibitem{knockout}
Capelli, P.; Narayana, T.~V.
On Knock-Out Tournaments.
{\em Canad. Math. Bull.} {\bf 13} (1970), 105--109
\bibitem{CGHK}
Chung, F.~R.~K.; Graham, R.~L.; Hoggatt, V.~E.,~Jr.; Kleiman, M.
The number of Baxter permutations.
{\em J. Combin. Theory Ser. A} {\bf 24} (1978), 382--394.
\bibitem{regular}
Fishburn, P.~C.; Odlyzko, A.~M.
Unique subjective probability of finite sets.
{\em J. Ramanujan Math. Soc.} {\bf 4} (1989), 1--23
\bibitem{additive}
Finch, S.
Conjectures about 1-additive sequences.
{\em Fibonacci Quart.} {\bf 29} (1991), 209--214.
\bibitem{knuth}
Knuth, D.
Estimating the Efficiency of Backtrack Programs.
{\em Math. Comput.} {\bf 29} \#129 (1975), 121--136.
\bibitem{EIS}
Sloane, N.~J.~A.
Sloane's On-Line Encyclopedia of Integer Sequences. \\
\verb"http://www.research.att.com/~njas/sequences/"
\bibitem{T}
Torelli, M.
Increasing Integer Sequences and Goldbach's Conjecture.
Preprint, 1996.
\bibitem{subword}
Tromp, J.; Shallit, J.
Subword complexity of a generalized Thue-Morse word.
{\em Information Processing Lett.} {\bf 54} (1995), 313--316.
\bibitem{W1}
West, J.
Generating trees and the Catalan and Schr\"oder numbers.
{\em Discrete Math.} {\bf 146} (1995), 247--262.
\bibitem{W2}
West, J.
Generating trees and forbidden subsequences.
Proceedings of the 6th Conference on Formal Power Series and Algebraic
Combinatorics (New Brunswick, NJ, 1994).
{\em Discrete Math.} {\bf 157} (1996), 363--374.
\end{thebibliography}
\end{document}