\documentclass{article}
\usepackage{amstex,amsthm,amssymb,psfig}
\theoremstyle{plain} %% This is the default
\newtheorem{thm}{Theorem}[section]
\newtheorem{cor}[thm]{Corollary}
\newtheorem{lem}[thm]{Lemma}
\newtheorem{fact}[thm]{Fact}
\newtheorem{claim}{Claim}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{ax}{Axiom}
\theoremstyle{definition}
\newtheorem{defn}{Definition}[section]
\theoremstyle{remark}
\newtheorem{rem}{Remark}[section]
\newtheorem{example}{Example}[section]
\newtheorem{notation}{Notation}
\renewcommand{\thenotation}{} % to make the notation environment unnumbered
\numberwithin{equation}{section}
\newcommand{\thmref}[1]{Theorem~\ref{#1}}
\newcommand{\secref}[1]{\S\ref{#1}}
\newcommand{\lemref}[1]{Lemma~\ref{#1}}
% Math definitions
\newcommand{\bslash}{{\setminus}}
\newcommand{\lamu}{{\lambda / \mu}}
\newcommand{\row}{\operatorname{row}}
\newcommand{\gr}{\operatorname{gr}}
\newcommand{\Ax}{{A^{(x)}_f}}
\newcommand{\Al}{{A^\Lambda_f}}
\newcommand{\Dl}{D^\Lambda_f}
\newcommand{\sgn}{\operatorname{sgn}}
\newcommand{\End}{\operatorname{End}}
\newcommand{\even}{\operatorname{even}}
\newcommand{\sh}{\operatorname{shape}}
\newcommand{\Sym}{\operatorname{Sym}}
\newcommand{\evac}{\operatorname{evac}}
\newcommand{\lp}{\operatorname{lp}}
\newcommand{\Z}{{\mathbb Z}}
\newcommand{\R}{{\mathbb R}}
\pagestyle{myheadings}
\markright{\sc the electronic journal of combinatorics 2 (1995),\#R23\hfill}
\thispagestyle{empty}
\begin{document}
\input epsf
\title{Matrices connected with Brauer's centralizer algebras
\footnote{Subject Class 05E15, 05E10}
}
\author{{Mark D. McKerihan
\thanks{This research was supported in part by a Department of
Education graduate fellowship at the University of Michigan}
}\\
{\small \em Department of Mathematics}\\
{\small \em University of Michigan}\\
{\small \em Ann Arbor, MI 48109}\\
{\small \em e-mail: \tt mckerihn@math.lsa.umich.edu}\\
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\date{\small Submitted: October 9, 1995; Accepted: October 31,1995}
\maketitle
\begin{abstract}
In a 1989 paper \cite{HW1}, Hanlon and Wales showed that the algebra structure
of the Brauer Centralizer Algebra $A_f^{(x)}$ is completely determined by the
ranks of certain combinatorially defined square matrices $Z^{\lamu}$,
whose entries are polynomials in the parameter $x$. We consider a set of
matrices $M^{\lamu}$ found by Jockusch that have a similar combinatorial
description. These new matrices can be obtained from the original matrices by
extracting the terms that are of ``highest degree'' in a certain sense.
Furthermore, the $M^{\lamu}$ have analogues ${\cal M}^\lamu$ that play the
same role that the $Z^{\lamu}$ play in $A_f^{(x)}$, for another algebra that
arises naturally in this context.
We find very simple formulas for the determinants of the matrices
$M^{\lambda/\mu}$ and ${\cal M}^\lamu$, which prove Jockusch's original
conjecture that $\det M^\lamu$ has only integer roots. We define a Jeu de
Taquin algorithm for standard matchings, and compare this algorithm to the
usual Jeu de Taquin algorithm defined by Sch\"utzenberger for standard
tableaux. The formulas for the determinants
of $M^{\lambda/\mu}$ and ${\cal M}^\lamu$ have elegant statements in terms of
this new Jeu de Taquin algorithm.
\end{abstract}
\tableofcontents
\section{Introduction}
\label{intro}
Brauer's Centralizer Algebras were introduced by Richard Brauer \cite{Brauer}
in 1937 for the purpose of studying the centralizer algebras of orthogonal and
symplectic groups on the tensor powers of their defining representations. An
interesting problem that has been open for many years now is to determine
the algebra structure of the Brauer centralizer algebras $A^{(x)}_f$. Some results about the
semisimplicity of these algebras were found by Brauer, Brown and Weyl, and have
been known for quite a long time (see \cite{Brauer},\cite{Brown},\cite{Weyl}).
More recently,
Hanlon and Wales \cite{HW1} have been able to reduce the question of the
structure of $A^{(x)}_f$ to finding the ranks of certain matrices $Z^\lamu(x)$.
Finding these ranks has proved very difficult in general. They have
been found in several special cases, and there are many conjectures about
these matrices which are supported by large amounts of computational evidence.
One conjecture arising out of this work was that $A^{(x)}_f$ is
semisimple unless $x$ is a rational integer. Wenzl \cite{Wenzl} has used a
different approach (involving ``the tower construction'' due to Vaughn Jones
\cite{Jones}) to prove this important result. In our work we take the point
of view taken by Hanlon and Wales in \cite{HW1}-\cite{HW4}, and we pay
particular attention to the case where $x$ is a rational integer.
We consider subsets of $\Z^+ \times \Z^+$, which we will
think of as the set of positions in an infinite matrix, whose rows are
numbered from top to bottom, and whose columns are numbered from left to
right. Thus, the element $(i,j)$ will be thought of as the position in the
$i$th row, and $j$th column of the matrix. These positions will be called
{\it boxes}.
\begin{defn}
Define the partial order $<_s$, ``the standard order'' on $\Z^+ \times \Z^+$,
by $x \leq _s y$ if $x$ appears weakly North and weakly West of $y$.
\end{defn}
\begin{defn}
Define the total order $<_h$, ``the Hebrew order'' on $\Z^+ \times \Z^+$, by
$x <_h y$ if $y$ is either strictly South of $x$, or if $y$ is in the same
row as $x$ and strictly West of $x$ in that row.
\end{defn}
\begin{defn}
A finite subset $D \subset \Z^+ \times \Z^+$ will be called a {\it diagram}.
A {\it matching} of the diagram $D$ is a fixed point free involution
$\epsilon:D \rightarrow D$. A matching $\delta$ of the diagram $D$ is called
{\it standard} if for every $x,y \in D$, $x<_s y$ implies that
$\delta(x)<_h \delta(y)$.
\end{defn}
We will usually use $\epsilon$ to denote
an arbitrary matching, while $\delta$ will be reserved for standard matchings.
It will sometimes be convenient to think of matchings in a slightly different
way, namely as 1-{\it factors}. A 1-factor is a graph such that every
vertex is incident with exactly one edge. If $\epsilon$ is a matching of $D$,
then we can think of $\epsilon$ as a 1-factor by putting an edge between $x$
and $\epsilon(x)$ for all $x \in D$. Note that if there is a matching of
shape $D$, then $D$ must contain an even number of boxes.
\begin{example}
There are three matchings of shape (4,2)/(2). They are represented below as
the 1-factors $\delta$, $\delta'$ and $\epsilon$. The matchings $\delta$ and
$\delta'$ are both standard, while $\epsilon$ is not standard.
\end{example}
\begin{figure}[!htb]
%%\centerline{\psfig{figure=stdeg.eps,width=\linewidth}}
\epsfxsize = \linewidth
\epsffile{stdeg.eps}
\end{figure}
\begin{rem}
An immediate consecuence of the definition for a standard matching is that
one can never have an edge between boxes $x$ and $y$ if both
$x <_s y$ and $x <_h y$. This means that there can be no NW-SE edges in a
standard matching, nor N-S (or vertical) edges. There can be E-W (horizontal)
edges however.
\end{rem}
Let $F_D$ be the set of matchings of $D$, and let $V_D$ be the real vector
space with basis $F_D$. Let $A_D$ be the set of standard matchings of $D$.
If $\lamu$ is a skew shape then let $[\lamu] \subset \Z^+ \times \Z^+$ be
the set of boxes $(i,j)$ such that $\mu_i < j \leq \lambda_i$. If
$D = [\lamu]$ for some skew shape $\lamu$, then we will sometimes drop the
brackets, especially in subscripts. For example, by convention
$F_\lamu = F_{[\lamu]}$.
Suppose that $\lamu$ is a skew shape. Let $S_\lamu$ denote the symmetric
group on the set $[\lamu]$. There is an $S_\lamu$ action
on $F_\lamu$ given by
\begin{equation}
(\pi \epsilon)(x) = \pi (\epsilon (\pi^{-1} x))
\end{equation}
where $\pi \in S_\lamu$.
In terms of 1-factors, this is equivalent to saying that $x$ and $y$ are
adjacent in $\epsilon$ if and only if $\pi(x)$ and $\pi(y)$
are adjacent in $\pi \epsilon$.
Let $C_\lamu$ (resp. $R_\lamu$), the column stabilizer (resp. row
stabilizer) of $[\lamu]$, be the subgroup of
$S_\lamu$, consisting of permutations $\pi$, such that $\pi(x)$ is in
the same column (resp. row) as $x$, for all $x \in [\lamu]$.
If $\epsilon_1$ and $\epsilon_2$ are matchings of shape $[\lamu]$, we obtain
a new graph on the vertex set $[\lamu]$ by simply superimposing the two
matchings. We denote this new graph by $\epsilon_1 \cup \epsilon_2$. We
define $\gamma(\epsilon_1,\epsilon_2)$ to be the number of cycles in
$\epsilon_1 \cup \epsilon_2$ (which is the same as the number of connected
components in $\epsilon_1 \cup \epsilon_2$).
\begin{example}
Below are two matchings of shape (5,4,2)/(2,1), $\epsilon_1$ and $\epsilon_2$.
Here $\gamma(\epsilon_1,\epsilon_2) = 2$.
\begin{figure}[!htb]
%%\centerline{\psfig{figure=union1.eps,height=2.0in}}
\begin{center}
\begin{minipage}{10cm}
\epsfxsize = 10cm
\epsffile{union1.eps}
\end{minipage}
\end{center}
\end{figure}
\end{example}
\newpage
We define the $A_{\lamu} \times A_{\lamu}$ matrix $M = M^\lamu(x)$ as follows:
\begin{equation}
M_{ij}=M_{\delta_i, \delta_j}=\sum_{\sigma \in C_{\lambda / \mu}} \
\sum_{\tau \in R_{\lambda / \mu}}
\sgn(\sigma) x^{\gamma(\sigma \tau \delta_i, \delta_j)}
\end{equation}
where $A_{\lamu} = \{\delta_1, \dots \delta_s\}$.
We have
defined a matching of shape $\lamu$ to be a fixed point free involution of
$[\lamu]$, or equivalently a 1-factor on $[\lamu]$. If
$|\mu| = m$, and $|\lamu| = 2k$, then we can also think of a matching of
shape $\lamu$ as a {\it labelled $(m,k)$ partial 1-factor} on the set
$[\lambda]$. A labeled $(m,k)$ partial 1-factor is a graph on $f=m+2k$
points, where $2k$ points are incident with exactly one edge, and $m$
points (called {\it free points}) are incident with no edges. These free
points are labelled with the numbers $1,2,\dots,m$. For a matching
of shape $\lamu$, the free points are the boxes in $[\mu]$, and we label them
in order from left to right in each row, from the top row to the bottom row.
Let $P_{\lambda,m}$ be the set of labelled $(m,k)$ partial 1-factors on
$[\lambda]$.
There is an $S_\lambda$ action on $P_{\lambda,m}$ given by saying that $x$
and $y$ are adjacent in $\epsilon$, if and only if $\pi(x)$ and $\pi(y)$
are adjacent in $\pi \epsilon$, and if $x$ is a free point in $\epsilon$
with label $i$, then $\pi(x)$ is a free point in $\pi \epsilon$ with label
$i$. Note that $F_\lamu \subseteq P_{\lambda,m}$ and that the $S_\lamu$ action
we defined on $F_\lamu$ is equivalent to the restriction of the $S_\lambda$ action on $F_\lamu$ to those permutations in $S_\lambda$ that fix $[\mu]$
pointwise. As before, we define $R_\lambda$ (resp. $C_\lambda$) to be the
subgroup of $S_\lambda$ that stabilizes the rows (resp. columns) of $\lambda$.
Suppose that $\epsilon_1, \epsilon_2$ are labelled $(m,k)$ partial 1-factors
in $P_{\lambda,m}$. Then $\epsilon_1 \cup \epsilon_2$ is a graph on the
vertex set $[\lambda]$ consisting of exactly $m$ paths (an isolated point is
considered a path of length zero), and some number
$\gamma(\epsilon_1,\epsilon_2)$ of cycles, each of which has even length.
Each of the $m$ paths is a path
from one labelled point to another. Let $\zeta(\epsilon_1,\epsilon_2)$ equal
$1$ if each path has the same label at both endpoints, and $0$ otherwise. We
can now define $Z = Z^\lamu(x)$ as follows.
\begin{equation}
Z_{ij}=Z_{\delta_i, \delta_j}=\sum_{\sigma \in C_\lambda} \
\sum_{\tau \in R_\lambda}
\sgn(\sigma) \zeta(\sigma \tau \delta_1,\delta_2)
x^{\gamma(\sigma \tau \delta_i, \delta_j)}
\end{equation}
The terms that appear in $M$ are a subset of those that
appear in $Z$ because if $\sigma \in C_\lamu$ then $\sigma$ fixes $[\mu]$
pointwise, and the same is true for all $\tau \in R_\lamu$. Thus,
$\sigma \tau$ fixes $[\mu]$ pointwise, and it follows that
$\zeta(\sigma \tau \delta_i, \delta_j) = 1$ for all $i,j$. One can think of
$M$ as the component of $Z$ that leaves $[\mu]$ fixed. In this paper, we
are able to find the determinant of $M$ precisely. In order to find the
determinant of $Z$, one might try to get an intermediate result which would
involve matrices which only allowed the boxes in $[\mu]$ to move in certain
restricted ways. If one could get results about such matrices, and then find
a way to remove the restrictions, one might finally arrive at the determinant
of $Z$. This would be a powerful tool for finding the rank of $Z$, which is
equivalent to determining the algebra structure of $A^{(x)}_f$ completely.
If $x = n \in \Z^+$, one can generalize the definition of the matrix $Z$ by
introducing power sum symmetric functions to keep track of the lengths of
cycles. Recall that for $i \geq 0$, the $i$th power sum on $y_1,\dots,y_n$
is given by
\begin{equation}
p_i(y_1,\dots,y_n) = \sum_{j=1}^n y_j^i.
\end{equation}
Note that $p_i(1,\dots,1) = n$ for all $i$. For any partition
$\nu=(\nu_1,\nu_2,\dots, \nu_l)$, we
define $p_\nu = \prod p_{\nu_i}$. If $\epsilon_1$ and $\epsilon_2$
are labelled $(m,k)$ partial 1-factors in $P_{\lambda,m}$, then define
$\Gamma(\epsilon_1,\epsilon_2)$ to be the partition having one part for each
cycle in $\epsilon_1 \cup \epsilon_2$. If a cycle in
$\epsilon_1 \cup \epsilon_2$ has length $2r$, then its corresponding part in
$\Gamma(\epsilon_1,\epsilon_2)$ is $r$.
\begin{example}
Below are two $(3,4)$ partial 1-factors, $\epsilon_1$ and $\epsilon_2$.
In this example $\gamma(\epsilon_1,\epsilon_2) = 2$,
$\Gamma(\epsilon_1,\epsilon_2) = (2,1)$ and $\zeta(\epsilon_1,\epsilon_2)=1$.
\end{example}
\begin{figure}[!htb]
%%\centerline{\psfig{figure=union2.eps,height=2.0in}}
\begin{center}
\begin{minipage}{10cm}
\epsfxsize = 10cm
\epsffile{union2.eps}
\end{minipage}
\end{center}
\end{figure}
Define ${\cal Z} = {\cal Z}^\lamu(y_1,\dots,y_n)$ by
\begin{equation}
{\cal Z}_{ij}={\cal Z}_{\delta_i, \delta_j}=\sum_{\sigma \in C_\lambda} \
\sum_{\tau \in R_\lambda}
\sgn(\sigma) \zeta(\sigma \tau \delta_1,\delta_2)
p_{\Gamma(\sigma \tau \delta_i, \delta_j)}.
\end{equation}
It is clear that ${\cal Z}^\lamu(1,\dots,1) = Z^\lamu(n)$.
In \cite{HW1}, Hanlon and Wales show that if $\mu = \emptyset$ and
$\lambda$ consists entirely of even parts (such a partition is called
{\it even}), then ${\cal Z}^\lamu(y_1,\dots,y_n)$ is a one by one matrix
whose only entry is a scalar multiple of the {\it zonal polynomial}
$z_\nu(y_1,\dots,y_n)$ where $\lambda = 2\nu$ (i.e. $\lambda_i = 2\nu_i$ for
all $i$). Zonal polynomials were introduced by
A.T. James (see \cite{Ja1}-\cite{Ja3}) in connection with his work on
multidimensional generalizations of the chi-squared distribution. Here we will
just say that $z_\nu$ is a homogeneous symmetric function of degree
$|\nu|$, and
$z_\nu(1,\dots,1) = h_\lambda(n)$, where $h_\lambda(x)$ is the single
entry of the matrix $Z^{\lambda/\emptyset}(x)$. A formula for $h_\lambda(x)$
is given in \cite{HW1} which we state as Theorem \ref{t:HWh}.
If $|\lamu| = 2k$, then in terms of the monomials $y_1^{i_1} \cdots y_n^{i_n}$,
the entries of the matrix ${\cal Z}$ have degree at most $k$. To see this,
observe that the degree of $p_{\Gamma(\sigma \tau \delta_i, \delta_j)}$ is
equal to $|\Gamma(\sigma \tau \delta_i, \delta_j)|$, and that
$|\Gamma(\sigma \tau \delta_i, \delta_j)| \leq k$ because there are $2k$
edges in $\sigma \tau \delta_i \cup \delta_j$. If all $2k$ edges are contained
in cycles then the term will have degree $k$, but if any edges are contained
in paths, then the degree will be smaller than $k$.
Define ${\cal M} = {\cal M}^\lamu(y_1,\dots,y_n)$ to be the matrix obtained
from ${\cal Z}$ by extracting the terms of degree $k$. Thus, ${\cal M}$ will
be a matrix consisting entirely of zeroes and terms of degree $k$. As we
noted above, in order for a term in ${\cal Z}$ to have degree $k$, every edge
in $\sigma \tau \delta_i \cup \delta_j$ must be contained in a cycle, or
equivalently every path must be an isolated point (and this point must have
the same label in $\sigma \tau \delta_i$ and $\delta_j$). It is not hard to
see that this happens if and only if $\tau$ and $\sigma$ are both
in $S_\lamu$, i.e. both fix $|\mu|$ pointwise. Hence
\begin{equation}
{\cal M}_{ij}={\cal M}_{\delta_i, \delta_j}=\sum_{\sigma \in C_\lamu} \
\sum_{\tau \in R_\lamu}
\sgn(\sigma) p_{\Gamma(\sigma \tau \delta_i, \delta_j)}.
\end{equation}
It follows that ${\cal M}^\lamu(1,\dots,1) = M^\lamu(n)$. In this sense,
the matrix $M$ can be considered the ``highest degree'' part of the matrix
$Z$, where we are computing the ``degree'' of the terms using the homogeneous
degree of the corresponding terms in the matrix ${\cal Z}$. For example,
the terms
$p_2 p_1$ and $p_1^3$ have the same homogeneous degree three. But when we
specialize $p_2 = p_1 = n$, the corresponding terms are $n^2$ and $n^3$
respectively. In the sense described above however, both terms have
``degree'' equal to three.
Also, there is more than one way to obtain $n^i$ for any $i>1$.
This means that one generally cannot reconstruct ${\cal M}$ by substitution
in $M(n)$.
The matrix ${\cal M}$ has an interesting algebraic interpretation which we
briefly describe here. To do this we give a short description of the
algebra $A^{(x)}_f$ and the closely related algebra $A^\Lambda_f$.
See \cite{HW1} for a more complete description. Both algebras have
the same basis, namely the set of $1$-factors on $2f$ points. To define the
product of two such $1$-factors $\delta_1$ and $\delta_2$, we construct a
graph $B(\delta_1,\delta_2)$. We can think of this graph as a $1$-factor
$\beta(\delta_1,\delta_2)$ together with some number
$\gamma(\delta_1,\delta_2)$ of cycles of even lengths
$2l_1, 2l_2,\dots, 2l_{\gamma(\delta_1,\delta_2)}$.
The product in $A^{(x)}_f$ is given by
$$\delta_1 * \delta_2 = x^{\gamma(\delta_1,\delta_2)}\beta(\delta_1,\delta_2).$$
In $A^\Lambda_f$, the product is
$$\delta_1 \cdot \delta_2 =
\left(\prod_{j=1}^{\gamma(\delta_1,\delta_2)} p_{l_j}(y_1,\dots,y_n)\right)
\beta(\delta_1,\delta_2)$$
where $p_i(y_1,\dots,y_n)$ is the $i$th power sum.
Since $p_i(1,\dots,1)=n$ for all $i$, the
specialization of $A^\Lambda_f$ to $y_1=\cdots=y_n=1$ is isomorphic to
$A^{(n)}_f$.
In $\Al$ there is an important tower of ideals
$\Al=\Al(0) \supset \Al(1) \supset \Al(2) \supset \cdots$. Let
$\Dl(k)=\Al(k)/\Al(k+1)$. In \cite{HW1}, Hanlon and Wales express the
multiplciation in $\Dl(k)$ in terms of a matrix
${\cal Z}_{m,k} = {\cal Z}_{m,k}(y_1,\dots,y_n)$
where $f=m+2k$. Furthermore, they show that the algebra structure of $\Dl(k)$
for particular values of $y_1,\dots,y_n$ is completely determined by the
rank of ${\cal Z}_{m,k}$. Their work implies that $\det({\cal Z}_{m,k}) \neq 0$
for the values $y_1,\dots,y_n$ if and only if $\Dl(k)$ is semisimple for those
values.
A typical element of $\Dl(k)$ is a sum of terms of the form $f\delta$ where
$f$ is a homogeneous symmetric function and $\delta$ is a certain type of
$1$-factor. Let $\gr(f\delta)=\deg(f) + k$. The
multiplication in $\Dl(k)$ respects this grading in the sense that
$\gr(f_1\delta_1 \cdot f_2\delta_2) \leq \gr(f_1\delta_1)+\gr(f_2\delta_2)$.
Let $\~\Dl(k)$ be the associated graded algebra. One can construct
matrices ${\cal M}_{m,k} = {\cal M}_{m,k}(y_1,\dots,y_n)$ that play
the same role in $\~\Dl(k)$ that the ${\cal Z}_{m,k}$ play in
$\Dl(k)$. It turns out that ${\cal M}_{m,k}$ is the matrix obtained from
${\cal Z}_{m,k}$ by extracting highest degree terms.
Using the representation theory of the symmetric group, one can show that
the matrix ${\cal M}_{m,k}$ is similar to a direct sum of matrices
${\cal M}^{\lamu}$ where $\lambda$ and $\mu$ are partitions of $f$ and $m$
respectively. These matrices ${\cal M}^\lamu$ are precisely the matrices
${\cal M}$ defined above. The main result of this paper is a formula for
the determinant of ${\cal M}$, which can be interpreted as a discriminant for
the algebra $\~\Dl(k)$ in the same way that $\det {\cal Z}$ is a
discrimant for $\Dl(k)$.
This paper is split into two main sections. In section \ref{roots}, we prove
several basic facts about standard matchings which are needed to compute the
determinant of $M$. In particular, we find an ordering on
matchings (defined in \ref{s:Trescol}) such that if $\delta$ is a standard
matching, then any column permutation of $\delta$ yields a matching which is
weakly greater than $\delta$ in this order (see Theorem \ref{t:order}). In
Theorem \ref{t:Wbasis} we show that the standard matchings of shape
$\lamu$ index a basis for an important vector space associated to $\lamu$.
This part of the paper is very similar in flavor to standard constructions
of the irreducible representations of the symmetric group. Using these two
theorems, we are able to give an explict product formula for the determinant of
$M$ in Theorem \ref{t:main1}. This formula has the following form:
$$
\det M = C \prod_{2\nu \vdash |\lamu|} h_{2\nu}(x)^{c^\lambda_{\mu \ 2\nu}}
$$
where $C$ is some nonzero real number, and
$c^\lambda_{\mu \nu}$ is a Littlewood-Richardson coefficient. The same
argument also shows that for $x = n \in \Z^+$,
$$
\det {\cal M} = C \prod_{2\nu \vdash |\lamu|}
z_\nu(y_1,\dots,y_n)^{c^\lambda_{\mu \ 2\nu}}
$$
where the constant $C$ is the same as the one above.
In section \ref{JdT}, we introduce a Jeu de Taquin algorithm for standard
matchings. Much of this section is devoted to a comparison of this algorithm
with the well known Jeu de Taquin algorithm for standard tableaux invented by
Sch\" utzenberger. This makes
sense because there is a natural way to think about any matching as a tableau
such that a matching is standard if and only if it is standard as a tableau.
In Theorem \ref{t:JdTstandard} we show that if Jeu de Taquin for matchings is
applied to a standard matching of a skew shape with one box removed, then the
output is another standard matching. Theorem \ref{t:dKequiv}
gives a description of how the two Jeu de Taquin algorithms compare in terms
of the dual Knuth equivalence for permutations. Theorem \ref{t:dKequiv} is
used to show in Theorem \ref{t:sameshape} that if Jeu de Taquin is used to
bring a standard matching of a skew shape to a standard matching of a
normal shape (the shape of a partition), then both algorithms arrive at the
same normal shape, and as a consequence of this, the standard matching of
normal shape obtained from any standard matching is independent of the
sequence of Jeu de Taquin moves chosen. Finally, using Theorem \ref{t:white},
a result of Dennis White \cite{White} we find that the number of times the
normal shape $\nu$ appears as the shape obtained from a standard matching of
shape $\lamu$ using Jeu de Taquin is the Littlewood-Richardson coefficient
$c^\lambda_{\mu \nu}$ (Theorem \ref{t:LRequiv}). Using this theorem we obtain
elegant restatements of the main results from section \ref{roots} (Theorems
\ref{t:main1} and \ref{t:main1gen}) in terms of the Jeu de Taquin algorithm
for standard matchings.
\subsection{Acknowledgments}
The author would like to thank William Jockusch for suggesting the problem
discussed in this paper, and Phil Hanlon for valuable discussions leading to
the results described here.
\section{Determinants of $M$ and ${\cal M}$}
\label{roots}
\subsection{Column permutations of standard matchings}
\label{s:colperm}
\begin{defn}
\label{d:T}
Suppose $\epsilon$ is a matching of shape $\lamu$. Define $T(\epsilon)$
to be the filling of $[ \lamu ]$ that puts $i$ in box $x$ if $\epsilon(x)$
is in row $i$. Define $\overline{T(\epsilon)}$ to be the filling obtained
by rearranging the elements in each row of $T(\epsilon)$ in increasing order
from left to right.
\end{defn}
\begin{example}
Here are $T(\epsilon)$ and $\overline{T(\epsilon)}$ for the matching
$\epsilon$ shown below.
\begin{figure}[!htb]
\epsfxsize=\linewidth
\epsffile{Teg.eps}
\end{figure}
\end{example}
\begin{lem}
\label{l:semistandard}
If $\delta$ is a standard matching then $T(\delta)$ is a semistandard tableau.
\end{lem}
\begin{proof}
Suppose $x \in [\lamu]$. If $y \in [\lamu]$ is immediately below or to the right
of $x$ then $x <_s y$. Hence $\delta(x) <_h \delta(y)$, and it follows
that $\delta(y)$ must be in the same row as $\delta(x)$ or below.
If $y \in [\lamu]$ is immediately below $x$, then $\delta(y)$ cannot be in the same
row as $\delta(x)$ because if that were the case, then $\delta(y)$ would have to
be to the left of $\delta(x)$, i.e. $\delta(y) <_s \delta(x)$. But this implies
that $y <_h x$, a contradiction.
Thus, $T(\delta)$ increases weakly in its rows, and strictly in its columns.
\end{proof}
\begin{rem}
\label{r:bij}
Lemma \ref{l:semistandard} implies that if $\delta$ is a standard matching,
then $\overline{T(\delta)} = T(\delta)$.
Also, it is not hard to see that $\delta \mapsto T(\delta)$ is a
bijection between standard matchings of shape $\lamu$ and semistandard
tableaux of shape $\lamu$ satisfying
1. Row $i$ has an even number of $i$'s, and
2. Row $i$ has $k$ $j$'s if and only if row $j$ has $k$ $i$'s.
\end{rem}
For the notation described below and the following two lemmas, let $\delta$
be some fixed standard matching of shape $\lamu$.
\begin{notation}
Let $R_i$ denote the $i$th row of $[\lamu]$. If $x \in [\lamu]$ then let
$\row(x)$ denote the row number in which x appears. For all $i \in \Z$ let
$C_i$ denote the subset of $[\lamu]$ defined as follows:
If $i \geq 0$ then $C_i$ is the union of the columns of $[\lamu]$ that have an
$i+1$ in the first row of $\overline{T(\delta)}=T(\delta)$.
If $i<0$ then $C_i$ is the union of the columns of $[\lamu]$ whose top box
is in row $|i|+1 = 1-i$.
\end{notation}
\newpage
\begin{example} Below is $T(\delta)$ for a standard matching for which
the sets $R_i$ and $C_i$ are shown.
\begin{figure}[!htb]
\begin{center}
\begin{minipage}{11cm}
\epsfxsize=11cm
\epsffile{Ceg.eps}
\end{minipage}
\end{center}
\end{figure}
\end{example}
\begin{rem}
Note that $R_i \cap C_j = \emptyset$ unless $j \geq 1-i$.
\end{rem}
\begin{lem}
\label{l:strip}
Suppose $x \in C_i$, and $\delta(x) \in C_j$. Then
{\it a}. \ \ $\row(\delta(x)) \geq \row(x) + i$, and
$\row(x) \geq \row(\delta(x)) + j$,
{\it b}. \ \ $i + j \leq 0$, and
{\it c}. \ \ If \ $i = -j$, then $\row(x)=\row(\delta(x))+i$, i.e. $x$ is
exactly $i$ rows below $\delta(x)$.
\end{lem}
\begin{proof}
Suppose $x \in R_k$, and $\delta(x) \in R_l$. In $T(\delta)$, position $x$
has an $l$ in it. Lemma \ref{l:semistandard} implies that $l \geq k + i$,
which is precisely the first statement in {\it a}. The second statement
in {\it a} follows from the first by noting that $\delta(\delta(x))=x$.
Both {\it b} and {\it c} are immediate consequences of {\it a}.
\end{proof}
\begin{lem}
\label{l:leftcol}
Suppose $i \geq 0$, and $x \in C_i$ satisfies $\row(\delta(x))=\row(x) + i$.
Suppose also that there are $k$ columns in $C_i$ to the right of $x$. Then
there are no more than $k$ columns in $C_{-i}$ to the left of $\delta(x)$.
\end{lem}
\begin{proof}
Let $z$ be a box in $R_{\row(\delta(x))} \cap C_{-i}$ that lies to the left
of $\delta(x)$. We have $z <_s \delta(x)$, so $\delta(z) <_h x$. Thus,
$\delta(z)$ lies in $R_{\row(x)}$ or above. But Lemma
\ref{l:strip} {\it a} implies that
\begin{equation}
\row(\delta(z)) \geq \row(z) - i = \row(x)
\end{equation}
i.e. $\delta(z)$ lies in
$R_{\row(x)}$ or below. Furthermore, Lemma \ref{l:strip}
{\it b} says that $\delta(z)$ lies in $C_i$ or to its left. Therefore,
$\delta(z)$ lies in $R_{\row(x)} \cap C_i$ and to the right of $x$.
The number of such boxes $z$ is obviously bounded by $k$.
\end{proof}
One useful consequence of these two lemmas is the following Corollary.
\begin{cor}
\label{c:oneofevenshape}
Suppose $\lambda \vdash 2k$. Then, if $\lambda$ is even, there
is exactly one standard matching of shape $\lambda$. If $\lambda$ is not
even, then there are no standard matchings of shape $\lambda$.
\end{cor}
\begin{proof}
Suppose that $\delta$ is a standard matching of shape $\lambda$.
Observe that because $\lambda$ is normal, $C_i = \emptyset$ for all
$i < 0$. But now, Lemma \ref{l:strip} {\it b} implies that $C_i = \emptyset$
for all $i > 0$ as well. Thus $[\lambda] = C_0$.
Now, by Lemma \ref{l:strip} {\it c}, all edges of $\delta$ must be horizontal,
which implies immediately that $\lambda$ is even. Furthermore, for a single
row of an even number of boxes, it is not hard to see that there is exactly
one matching that is standard, namely the one that connects the $i$th box
from the left to the $i$th box from the right of the row. Putting all of
these rows together, we see that the resulting matching is indeed standard.
\end{proof}
\begin{defn}
\label{d:order}
Define the total order $\prec$ on tableaux of
shape $\lamu$ with weakly increasing rows as follows:
Write $S \prec T$, if the word $w_S$ obtained by reading the rows of $S$
from left to right, from the top row to the bottom row, lexicographically
precedes the corresponding word $w_T$, obtained by reading $T$ in the same
order.
\end{defn}
\begin{rem}
Note that $\prec$ induces a total order on standard matchings (but not all
matchings) of shape $\lamu$, via $\delta_1 \prec \delta_2$ if and only if
$\overline{T(\delta_1)} \prec \overline{T(\delta_2)}$. This follows from
Remark \ref{r:bij}, which implies that
a standard matching $\delta$ is completely determined by $\overline{T(\delta)}$. Note that this is not true of matchings in general. In other words, there
could be several matchings with the same associated tableau, but there can
be at most one standard matching associated to any tableau.
\end{rem}
\begin{thm}
\label{t:order}
Suppose $\sigma \in C_{\lamu}$, and
suppose $\delta$ is a standard matching of shape $\lamu$. Then
$\overline{T(\sigma \delta)} \succeq \overline{T(\delta)}$, and
$\overline{T(\sigma \delta)} = \overline{T(\delta)}$ if and only if
$\sigma \delta = \delta$.
\end{thm}
\begin{proof}
We induct on $| [\lamu] |$. If $| [\lamu] | = 2$, the statement of the
theorem is trivially true since there is only one matching of shape
$\lamu$ in that case.
If $| [\lamu] | > 2$, then let $k$ be the smallest nonnegative integer such
that $C_k \neq \emptyset$, i.e. $k+1$ is the entry of
$\overline{T(\delta)}$ in the
leftmost box of the first row. Let $H$ be the subgroup of the column
stabilizer of $[\lamu]$ consisting of permutations $\tau$ such that
$(\tau \delta)(x) = \delta(x)$ for all $x \in R_1 \cap C_k$.
Let $A = R_1 \cap C_k$, and let $\alpha$ (resp. $\beta$) be the word obtained
by reading the entries corresponding to the positions of $A$ in
$\overline{T(\delta)}$ (resp. $\overline{T(\sigma \delta)}$) from left to
right. Note that $\alpha$ is a word consisting entirely of $k+1$'s.
First, we will show that $\beta$ cannot precede $\alpha$ in the lexicographic
order. Then, we will show that if $\beta = \alpha$, then $\sigma \in H$.
Once we have shown these two things, we will be able to finish the proof of
the theorem by induction.
To show that $\beta$ does not precede $\alpha$ lexicographically, we show that
$\beta$ cannot contain any letters smaller than $k+1$. If $\beta$ contained
$l < k+1$, then for some $x \in R_1$, $(\sigma \delta)(x) \in R_l$. Suppose
$x \in C_i$. Note that $i \geq k$, and
$(\sigma \delta)(x) \in C_j$ for some $j \geq 1-l > -k$.
(Recall that $R_l \cap C_j = \emptyset$ unless $j \geq 1-l$). Since $\sigma$
stabilizes the columns of $[\lamu]$, this implies that there exists some
$z \in C_i$ such that $\delta(z) \in C_j$. But $i + j > k - k = 0$,
which contradicts Lemma \ref{l:strip} {\it b}. We conclude that $\beta$
cannot precede $\alpha$ lexicographically. Observe that this same argument
shows that if $x \in R_1$ and $(\sigma \delta)(x) \in R_{k+1}$, then
$x \in C_k$, and $(\sigma \delta)(x) \in C_{-k}$.
Suppose now that $\beta = \alpha$, and suppose that $\sigma \notin H$. Write
the elements of $A$ as $x_1, x_2, \dots ,x_a$ in order from right to left, and
let $x_i$ be the rightmost box in $A$ such that
$y = (\sigma \delta)(x_i) \neq \delta(x_i)$. In $R_{k+1}$, let
$y_1, y_2, \dots ,y_a$ be the first $a$ boxes from left to right. Note that
for all $j \in \{ 1,\dots,a \}$, $\delta(x_j) = y_j$. Furthermore, for each
$j \in \{ 1,\dots,a \}$, $\delta(y_j) = x_j \in R_1$, and hence
$\overline{T(\delta)}$
has a $1$ in position $y_j$ for all such $j$. This implies that each such
$y_j$ must be at the top of its column, and hence in $C_{-k}$.
Since $\beta = \alpha$, $y \in R_{k+1}$. Also, $y \neq y_j$ for any $j \leq i$.
Thus $y$ is farther right in $R_{k+1}$ than $y_i$. Since $\sigma$ stabilizes
the columns of $[\lamu]$, this implies that there is some $z \in C_k$ in the
same column as $x_i$, such that $\delta(z)$ is in the same column as $y$. Now,
by the comments above, $\delta(z)$ must be in $C_{-k}$ or
to its right. Lemma \ref{l:strip} {\it b} implies that $\delta(z)$ must be in
$C_{-k}$ or to its left. Thus $\delta(z) \in C_{-k}$, and now Lemma
\ref{l:strip} {\it c} implies that $\delta(z)$ is exactly $k$ rows below $z$.
By Lemma \ref{l:leftcol}, $\delta(z)$ must be in the same column as $y_i$, or
to its left. But this contradicts the fact that $y$ lies to the right of $y_i$
and $\delta(z)$ is in the same column as $y$. It follows that $\sigma \in H$
as desired.
We have shown that
\begin{equation}
\label{e:succ}
\sigma \notin H \implies \overline{T(\sigma \delta)} \succ \overline{T(\delta)}.
\end{equation}
If $\sigma \in H$ then let $\mu^*$ be the partition such that
$[\mu^*] = [\mu] \cup A \cup \delta(A)$. Let $\delta^*$ be the standard
matching of shape $\lamu^*$ that is $\delta$ restricted to $[\lamu^*]$. Let
$\sigma^* \in H$ be a column permutation of $[\lamu]$ such that $\sigma^*$
fixes $A \cup \delta(A)$ pointwise, and $\sigma^* \delta = \sigma \delta$.
It is clear that such a $\sigma^*$ exists because of the definition of $H$.
We can think of $\sigma^*$ as a column permutation of $[\lamu^*]$.
By induction, we have
\begin{equation}
\label{e:ind1}
\overline{T(\sigma^* \delta^*)} \succeq \overline{T(\delta^*)}
\end{equation}
and
\begin{equation}
\label{e:ind2}
\overline{T(\sigma^* \delta^*)} = \overline{T(\delta^*)} \iff
\sigma^* \delta^* = \delta^*.
\end{equation}
Now, $\overline{T(\sigma \delta)} = \overline{T(\sigma^* \delta)}$
has the same letters as $\overline{T(\delta)}$ in the positions in
$A \cup \delta(A)$, so it is clear from (\ref{e:ind1}) that
$T(\sigma \delta) \succeq T(\delta)$. Moreover, (\ref{e:ind2}) implies
that $T(\sigma \delta) = T(\delta)$ if and only if $\sigma \delta = \delta$.
\end{proof}
\subsection{Product formulas for $M$ and ${\cal M}$}
For the remainder of this paper, we assume that $|\lamu| = 2k$.
It will be convenient to think of the matrices $M$ and ${\cal M}$ as products
of three matrices,
\begin{equation}
\label{e:Matprod}
\begin{split}
M &= P^t T_k(x) J \\
{\cal M} &= P^t {\cal T}_k(y_1,\dots,y_n) J.
\end{split}
\end{equation}
Here, $P$ is the $F_{\lamu} \times A_{\lamu}$ matrix with $(\epsilon, \delta)$
entry equal to the coefficient of $\epsilon$ in $e(\delta) \in V_{\lamu}$,
where $e \in \R[S_{\lamu}]$ is given by
\begin{equation}
\label{e:edefn}
e = \sum_{\sigma \in C_{\lambda / \mu}} \
\sum_{\tau \in R_{\lambda / \mu}}
\sgn(\sigma) \sigma \tau.
\end{equation}
In other words, $P$ is the matrix whose $i$th column is the expansion of
$e(\delta_i)$ in terms of the basis $F_{\lamu}$, where
$\delta_i$ is the $i$th standard matching of shape $\lamu$.
The matrix $T_k(x)$ is the $F_{\lamu} \times F_{\lamu}$ matrix with
$(\epsilon_1, \epsilon_2)$ entry given by
\begin{equation}
\label{e:T}
T_k(x)_{\epsilon_1, \epsilon_2} = x^{\gamma(\epsilon_1, \epsilon_2)}.
\end{equation}
We define ${\cal T}_k(y_1,\dots,y_n)$ by
\begin{equation}
\label{e:calT}
{\cal T}_k(y_1,\dots,y_n)_{\epsilon_1, \epsilon_2} =
p_{\Gamma(\epsilon_1, \epsilon_2)}.
\end{equation}
Note that $T_k(x)$ and ${\cal T}_k(y_1,\dots,y_n)$ are symmetric matrices.
The $F_{\lamu} \times A_{\lamu}$ matrix $J$ is defined as follows:
\begin{equation}
\label{e:J}
J_{\epsilon, \delta} =
\begin{cases} 1 & \epsilon = \delta, \\
0 & \epsilon \neq \delta.
\end{cases}
\end{equation}
We want to show that the $(\delta_i,\delta_j)$ entry of $P^t T_k(x) J$ is
equal to the $(\delta_i,\delta_j)$ entry of $M$. If
$Y$ is any matrix, then let $Y_{\epsilon}$ denote the column of $Y$ indexed
by $\epsilon$. We have
\begin{equation}
\label{e:ijprod}
\begin{split}
(P^t T_k(x) J)_{\delta_i,\delta_j} &= P_{\delta_i}^t T_k(x) J_{\delta_j} \\
&= P_{\delta_i}^t T_k(x)_{\delta_j} \\
&= \sum_{\sigma \in C_{\lambda / \mu}} \
\sum_{\tau \in R_{\lambda / \mu}}
\sgn(\sigma) T_k(x)_{\sigma \tau \delta_i,\delta_j} \\
&= \sum_{\sigma \in C_{\lambda / \mu}} \
\sum_{\tau \in R_{\lambda / \mu}}
\sgn(\sigma) x^{\gamma(\sigma \tau \delta_i, \delta_j)} \\
&= M_{\delta_i,\delta_j}. \\
\end{split}
\end{equation}
Exactly the same argument shows that
${\cal M} = P^t {\cal T}_k(y_1,\dots,y_n) J$ as well.
\subsection{Eigenvalues of $T_k(x)$ and ${\cal T}_k(y_1,\dots,y_n)$}
\label{s:evalsofT}
Using the product formulas (\ref{e:Matprod}) for $M$ and ${\cal M}$, we can
find product formulas for $\det M$ and $\det {\cal M}$. We can do this
because the matrices $T_k(x)$ and ${\cal T}_k(y_1,\dots,y_n)$ are known
to have certain nice properties. What follows is a brief discussion of these
properties. For a more detailed discussion, see \cite{HW1}.
Recall that there is an $S_{\lamu}$ action on $F_{\lamu}$ given by
\begin{equation}
\label{e:Saction}
(\sigma \epsilon) (x) = \sigma(\epsilon(\sigma^{-1}x)).
\end{equation}
This action can be linearly extended to $V_{\lamu}$, which
defines a representation $\rho: S_{\lamu} \rightarrow \End(V_{\lamu})$ as
follows:
\begin{equation}
\label{e:repn}
(\rho(\sigma))(\epsilon) = \sigma \epsilon.
\end{equation}
The key fact about $T_k(x)$ and ${\cal T}_k(y_1,\dots,y_n)$ that we need is
that both commute with the action
of $S_{\lamu}$. In other words, $T_k(x)$ and ${\cal T}_k(y_1,\dots,y_n)$ can
be regarded as endomorphisms of $V_{\lamu}$ satisfying the following equations for all $\sigma \in S_{\lamu}$:
\begin{equation}
\label{e:commute}
\begin{split}
\rho(\sigma) T_k(x) &= T_k(x) \rho(\sigma) \\
\rho(\sigma) {\cal T}_k(y_1,\dots,y_n)&={\cal T}_k(y_1,\dots,y_n) \rho(\sigma)
\end{split}
\end{equation}
This is an easy consequence of the following easy to prove identities (see
\cite{HW1}).
\begin{equation}
\label{e:HW3.1}
\begin{split}
\gamma(\sigma \epsilon_1, \sigma \epsilon_2) &= \gamma(\epsilon_1, \epsilon_2)\\
\Gamma(\sigma \epsilon_1, \sigma \epsilon_2) &= \Gamma(\epsilon_1, \epsilon_2)
\end{split}
\end{equation}
Furthermore, the $S_{\lamu}$ action on $F_{\lamu}$ is equivalent to the
action on the conjugacy class of fixed point free involutions of $S_{\lamu}$.
It follows from (\cite{MacDonald} Ex.5, p.45) that
\begin{equation}
\label{e:Vdecomp}
V_{\lamu} = \bigoplus_{2\nu} V_{2\nu}
\end{equation}
where the sum runs over all even partitions $2\nu \vdash 2k$.
Here, $V_{2\nu}$ denotes a submodule of $V_{\lamu}$, isomorphic
to the irreducible $S_{\lamu}$ module indexed by $2\nu$. In this notation,
$V_{(2k)}$ is isomorphic to the trivial representation, while $V_{(1^{2k})}$ is
isomorphic to the sign representation.
Since $V_{\lamu}$ decomposes into a direct sum of irreducibles $V_{2\nu}$,
each of which has multiplicity 1, it follows from Schur's Lemma that $T_k(x)$
restricted to $V_{2\nu}$ is a scalar operator denoted $h_{2\nu}(x) I$.
Similarly, ${\cal T}_k(y_1,\dots,y_n)$ restricted to $V_{2\nu}$ is a scalar
operator. Hanlon and Wales \cite{HW1} compute both of these scalars.
\begin{thm}
\label{t:HWh}
\cite{HW1} Let $2\nu \vdash 2k$. Then
$$h_{2\nu}(x) = \prod_{(i,2j)\in [2\nu]} (x + 2j - i - 1).$$
\end{thm}
\begin{thm}
\label{t:HWz}
\cite{HW1} Let $2\nu \vdash 2k$. Then
${\cal T}_k(y_1,\dots,y_n)$ restricted to $V_{2\nu}$ is the scalar operator
$z_{\nu}(y_1,\dots,y_n) I$, where $z_{\nu}(y_1,\dots,y_n)$ is the zonal
polynomial indexed by $\nu$.
\end{thm}
\subsection{The column span of $P$}
\label{s:Trescol}
In order to analyze $M$ and ${\cal M}$, we will consider how $T_k(x)$ and
${\cal T}_k(y_1,\dots,y_n)$ act
on the column span of $P$. It turns out that this span is the same as the
space $e(V_{\lamu}) = \langle e(\epsilon): \epsilon \in F_{\lamu} \rangle$,
where $e$ is defined in equation (\ref{e:edefn}).
We will show, in fact, that the columns of $P$ form a basis for this space.
\begin{defn}
\label{d:increasing}
We say that a matching $\epsilon$ of shape $\lamu$ is
{\it row (column) increasing} if every pair $x <_s y \in [\lamu]$,
that lie in the same row (column), satisfies $\epsilon(x) <_h \epsilon(y)$.
\end{defn}
\begin{rem}
By Lemma \ref{l:equiv} A matching $\delta$ of shape $\lamu$ is standard if and
only if it is both row and column increasing.
\end{rem}
\begin{lem}
\label{l:rowinc}
For any matching $\epsilon \in F_{\lamu}$, there exists a row
permutation $\tau \in R_{\lamu}$, such that $\tau \epsilon$ is row
increasing.
\end{lem}
\begin{proof}
For this proof we use the description of a matching as a 1-factor. For every
$i$, we can choose a permutation $\pi^{(i)}$ which permutes row $i$ as follows.
All the boxes in row $i$ that are attached to row $1$ are moved to the far
left of row $i$. The boxes that are attached to row $2$ are moved to the
right of those attached to row $1$ but to the left of every other box, and
so on. So in $\pi^{(i)} \epsilon$, if $x$ is to the left of $y$ in row $i$,
then
\begin{equation}
\row(\pi^{(i)} \epsilon (x)) \leq \row(\pi^{(i)} \epsilon (y)).
\end{equation}
If $j \neq i$, then $\pi^{(i)}$ has no effect on the row to which any
box in row $j$ is attached. Hence, if we let
\begin{equation}
\pi = \pi^{(1)} \pi^{(2)} \dots ,
\end{equation}
then $\pi \epsilon$ is a matching such that if $x <_s y$ are in the same row
then
\begin{equation}
\label{e:block}
\row(\pi \epsilon (x)) \leq \row(\pi \epsilon(y)).
\end{equation}
Let $D_{ij}$ be the set of boxes in row $i$ that are adjacent to a box in
row $j$ in $\pi\epsilon$. Note that $\epsilon(D_{ij})=D_{ji}$. Equation
\ref{e:block} implies that $D_{ij}$ and $D_{ji}$ are sets of consecutive
boxes in their respective rows.
We can assume that $i \leq j$ here, and choose a permutation $\sigma^{(ij)}$,
of $D_{ij}$ such that the $k$th box from the left of $D_{ij}$ is attached to
the $k$th box from the right of $D_{ji}$ in $\sigma^{(ij)} \pi \epsilon$.
It is clear that $\sigma^{(ij)} \pi \epsilon$ restricted to
$D_{ij} \cup D_{ji}$ is row increasing.
Furthermore, $\sigma^{(ij)}$ has no effect on
$[\lamu] \setminus (D_{ij} \cup D_{ji})$, so if we let
\begin{equation}
\sigma = \prod_{i \leq j} \sigma^{(ij)},
\end{equation}
then $\sigma \pi \epsilon$ is row increasing everywhere. Therefore,
$\tau = \sigma \pi$ is the desired permutation.
\end{proof}
Now, if $\tau \in R_{\lamu}$, it is clear from the definition of $e$ in
(\ref{e:edefn}) that $e(\tau \epsilon) = e(\epsilon)$. This fact, together
with Lemma \ref{l:rowinc} implies that we
can reduce our spanning set for $e(V_{\lamu})$ as follows
\begin{equation}
\label{e:Wspan}
e(V_{\lamu}) = \langle e(\epsilon): \epsilon \in F_{\lamu}, \epsilon \ \
\text{row increasing} \rangle
\end{equation}
Suppose that the matching $\epsilon$ of shape $\lamu$ is row increasing, but
not column increasing. Then there is a box $x$ lying immediately above a
box $y$ such that $\epsilon(x) >_h \epsilon(y)$. Let $A$ be the set of
boxes in $[\lamu]$ that are in the same row as $y$, and at least as far
West as $y$. Let $B$ be the set of boxes in $[\lamu]$ that are in the
same row as $x$, and at least as far East as $x$.
\begin{figure}[!htb]
%%\centerline{\psfig{figure=garnir.eps,height=1.2in}}
\begin{center}
\begin{minipage}{7cm}
\epsfxsize=7cm
\epsffile{garnir.eps}
\end{minipage}
\end{center}
\end{figure}
Let $\Sym(A \cup B)$
denote the symmetric group on the set $A \cup B$. It is a well known fact
that the following relation, called the Garnir relation, holds in the group
algebra $\R S_\lamu$.
\begin{thm}
\label{t:Garnir}
(Garnir relation) Let $\lamu$ be a skew shape with $A, B \subset [\lamu]$
defined as above. Then
$$\sum_{\pi \in \Sym(A \cup B)} e \pi = 0.$$
\end{thm}
It is useful to observe here that a row increasing matching $\epsilon$ is
completely determined by $T(\epsilon)$ (see Definition \ref{d:T}). Define
$\epsilon_{i,j}$ to be the number of boxes in row $i$ that are attached to
a box in row $j$ in $\epsilon$. Since $\epsilon$ is row
increasing, the boxes in row $i$ that are attached to row $j$ occur together
in a block. Thus, it is not hard to see that a row increasing matching
$\epsilon$, is uniquely determined by the numbers $\epsilon_{i,j}$, or
equivalently by $T(\epsilon)$ because $\epsilon_{i,j}$ is also equal to
the number of $j$'s in row $i$ of $T(\epsilon)$. Note that for all
$i$ and $j$ we have $\epsilon_{i,j} = \epsilon_{j,i}$.
\begin{lem}
\label{l:pidec}
Suppose that the matching $\epsilon$ of shape $\lamu$ is row increasing, and
that $x$ lies immediately above $y$ in $[\lamu]$ with
$\epsilon(x) >_h \epsilon(y)$. Define the subsets $A ,B \subset [\lamu]$ as
above. If $\pi \in \Sym(A \cup B)$, let $\overline{\pi \epsilon}$ be the row
increasing matching obtained from $\pi \epsilon$ using Lemma \ref{l:rowinc}.
Then $T(\overline{\pi \epsilon}) \preceq T(\epsilon)$.
\end{lem}
\begin{proof}
By the definitions of $A$ and $B$, if
$a \in A$ and $b \in B$, then $\epsilon(a) <_h \epsilon(b)$.
Suppose that $x$ is in row $i$ (so $y$ is in row $i+1$), and suppose that
the first difference between $w_{T(\epsilon)}$ and
$w_{T(\overline{\pi \epsilon})}$ occurs in row $j$.
If $j < i$, then the only boxes in row $j$ that can be affected by $\pi$ are
those that are attached in $\epsilon$ to boxes in rows $i$ and $i+1$. Thus,
for all $k \neq i,i+1$, we have
$(\overline{\pi \epsilon})_{j,k} = \epsilon_{j,k}$.
It follows that
\begin{equation}
\label{e:pidec1}
(\overline{\pi \epsilon})_{j,i} + (\overline{\pi \epsilon})_{j,i+1}
= \epsilon_{j,i} + \epsilon_{j,i+1}.
\end{equation}
Since the two words differ in row $j$, we must have
$(\overline{\pi \epsilon})_{j,i} \neq \epsilon_{j,i}$. If
\begin{equation}
\label{e:pidec2}
(\overline{\pi \epsilon})_{j,i} < \epsilon_{j,i}
\end{equation}
then there must be at least one box in row $j$ that is attached in
$\epsilon$ to $B$. Obviously, this means that there is a box in $B$ that is
attached to row $j$.
The observation in the first paragraph of the proof implies that in
$\epsilon$, the boxes in
$A$ must be attached to row $j$ or above. If (\ref{e:pidec2}) holds, then by
(\ref{e:pidec1}) we have
$(\overline{\pi \epsilon})_{j,i+1} > \epsilon_{j,i+1}$, which implies
\begin{equation}
(\overline{\pi \epsilon})_{i+1,j} > \epsilon_{i+1,j}.
\end{equation}
This in turn implies that for some $k < j$, the number of boxes in row
$i+1$ that are attached to row $k$ must be smaller in $\overline{\pi \epsilon}$
than in $\epsilon$, i.e.
\begin{equation}
(\overline{\pi \epsilon})_{k,i+1} = (\overline{\pi \epsilon})_{i+1,k}
< \epsilon_{i+1,k} = \epsilon_{k,i+1},
\end{equation}
which contradicts the assumption that $j$ is the first row in which
$w_{T(\epsilon)}$ and $w_{T(\overline{\pi \epsilon})}$ differ.
We conclude that
\begin{equation}
(\overline{\pi \epsilon})_{j,i} > \epsilon_{j,i}
\end{equation}
which implies that $T(\overline{\pi \epsilon}) \prec T(\epsilon)$.
If $j = i$, then we know that for all $k < i$, we have
\begin{equation}
(\overline{\pi \epsilon})_{i,k} = (\overline{\pi \epsilon})_{k,i}
= \epsilon_{k,i} = \epsilon_{i,k},
\end{equation}
and
\begin{equation}
(\overline{\pi \epsilon})_{i+1,k} = (\overline{\pi \epsilon})_{k,i+1}
= \epsilon_{k,i+1} = \epsilon_{i+1,k}.
\end{equation}
If
\begin{equation}
\label{e:pidec3}
(\overline{\pi \epsilon})_{i,i} < \epsilon_{i,i}
\end{equation}
then there must be at
least one box in $B$ which is attached in $\epsilon$ to another box in
row $i$. Hence, by the observation in the first paragraph, every box in
$A$ must be attached in $\epsilon$ to row $i$ or above.
So, for all $k > i+1$, the only boxes in $A \cup B$ that are
attached in $\epsilon$ to row $k$ are in $B$. It is easy to see that for
all $k > i+1$
\begin{equation}
\label{e:pidec4}
(\overline{\pi \epsilon})_{i,k} \leq \epsilon_{i,k}.
\end{equation}
It follows that
\begin{equation}
\label{e:pidec5}
(\overline{\pi \epsilon})_{i,i} + (\overline{\pi \epsilon})_{i,i+1}
\geq \epsilon_{i,i} + \epsilon_{i,i+1}.
\end{equation}
If (\ref{e:pidec3}) holds, then by (\ref{e:pidec5}) we must have
\begin{equation}
(\overline{\pi \epsilon})_{i+1,i} = (\overline{\pi \epsilon})_{i,i+1}
> \epsilon_{i,i+1} = \epsilon_{i+1,i}.
\end{equation}
This implies that for some $k < i$, the number of boxes in row
$i+1$ that are attached to row $k$ must be smaller in $\overline{\pi \epsilon}$
than in $\epsilon$, i.e.
\begin{equation}
(\overline{\pi \epsilon})_{k,i+1} = (\overline{\pi \epsilon})_{i+1,k}
< \epsilon_{i+1,k} = \epsilon_{k,i+1},
\end{equation}
which contradicts the assumption that $i$ is the first row in which
$w_{T(\epsilon)}$ and $w_{T(\overline{\pi \epsilon})}$ differ.
We conclude that
\begin{equation}
(\overline{\pi \epsilon})_{i,i} \geq \epsilon_{i,i}.
\end{equation}
If $(\overline{\pi \epsilon})_{i,i} > \epsilon_{i,i}$, then
$T(\overline{\pi \epsilon}) \prec T(\epsilon)$, so we can assume that
$(\overline{\pi \epsilon})_{i,i} = \epsilon_{i,i}$. In this case, by
(\ref{e:pidec5}) we know that
$(\overline{\pi \epsilon})_{i,i+1} \geq \epsilon_{i,i+1}$. If
$(\overline{\pi \epsilon})_{i,i+1} > \epsilon_{i,i+1}$, then
$T(\overline{\pi \epsilon}) \prec T(\epsilon)$, so we can assume that
$(\overline{\pi \epsilon})_{i,i+1} = \epsilon_{i,i+1}$. But now, (\ref{e:pidec4})
implies that $(\overline{\pi \epsilon})_{i,k} = \epsilon_{i,k}$ for all
$k > i+1$. This means that $w_{T(\epsilon)}$ and
$w_{T(\overline{\pi \epsilon})}$ are the same through row $i$, i.e. $j > i$.
If $j = i+1$, then for all $k \leq i$ we have
\begin{equation}
(\overline{\pi \epsilon})_{i+1,k} = \epsilon_{i+1,k}.
\end{equation}
Furthermore, for all $k > i+1$, the number of boxes in $A \cup B$ that
are attached to row $k$ is the same in both $\overline{\pi \epsilon}$ and
$\epsilon$. By assumption, the number of boxes in row $i$ that are attached
to row $k$ is the same in $\overline{\pi \epsilon}$ and $\epsilon$, so the same
is true in row $i+1$. It follows that
$(\overline{\pi \epsilon})_{i+1,i+1} = \epsilon_{i+1,i+1}$ as well, which
means that $w_{T(\epsilon)}$ and $w_{T(\overline{\pi \epsilon})}$ are the
same through row $i+1$, contradicting our assumption.
Similarly, we can show that $j$ cannot be greater than $i+1$, because in
that case we have
\begin{equation}
\label{e:pidec6}
(\overline{\pi \epsilon})_{k,l} = \epsilon_{k,l}
\end{equation}
whenever either $k \leq i+1$, or $l \leq i+1$. If $k > i+1$ and $l > i+1$,
then (\ref{e:pidec6}) holds as well since $\pi$ has no effect on edges
between rows $k$ and $l$ in that case. We conclude that if there is a
difference between $w_{T(\epsilon)}$ and $w_{T(\overline{\pi \epsilon})}$,
then it must occur in row $i$ or above, and we have already shown that
the lemma holds in that case.
\end{proof}
\begin{thm}
\label{t:Wbasis}
The set $\{ e(\delta): \delta \in A_\lamu \}$ forms a basis for the vector
space $e(V_{\lamu})$.
\end{thm}
\begin{proof}
We already know that the set of $e(\epsilon)$ such that $\epsilon$ is a
row increasing matching of shape $\lamu$ spans $e(V_{\lamu})$ (see
(\ref{e:Wspan})). We can improve this slightly as follows:
\begin{equation}
e(V_{\lamu}) = \langle e(\epsilon): \epsilon \in F_{\lamu}, \epsilon \ \
\text{row increasing}, e(\epsilon) \neq 0 \rangle
\end{equation}
Define $W_\lamu = \langle e(\delta): \delta \in A_{\lamu} \rangle$. We
will induct on the order $\prec$, to show that $e(\epsilon) \in W_\lamu$
for all row increasing matchings $\epsilon$ of shape $\lamu$. If
$e(\epsilon) = 0$ for all such matchings, then this is trivially true. If
not, let $\epsilon_0$ be the row increasing matching that minimizes
$T(\epsilon_0)$ with respect to $\prec$ among the row increasing matchings
$\epsilon$ satisfying $e(\epsilon) \neq 0$.
We will show that $\epsilon_0$ is a standard matching of shape $\lamu$.
If not, then there is some box $x$ and a box $y$ immediately below $x$
with $\epsilon_0(x) >_h \epsilon_0(y)$. Define the sets $A$ and $B$ as in
Lemma \ref{l:pidec}. Let $C$ be the number of permutations
$\pi \in \Sym(A \cup B)$ such that $\overline{\pi \epsilon_0} = \epsilon_0$. Note that $C \neq 0$, since the identity clearly works. Furthermore, note
that for any matching $\epsilon$ we have
\begin{equation}
e(\overline{\epsilon}) = e(\epsilon).
\end{equation}
This follows from the definition of $e$, and the fact that
$\overline{\epsilon}$ differs from $\epsilon$ by a row permutation.
Using the Garnir relation (Theorem \ref{t:Garnir}) we now obtain
\begin{equation}
\label{e:eps0}
\begin{split}
C e(\epsilon_0) &= - \sum_{\pi \in \Sym(A \cup B),
\overline{\pi \epsilon_0} \neq \epsilon_0}
e(\pi \epsilon_0) \\
&= - \sum_{\pi \in \Sym(A \cup B),\overline{\pi \epsilon_0} \neq \epsilon_0}
e(\overline{\pi \epsilon_0}). \\
\end{split}
\end{equation}
By Lemma \ref{l:pidec}, each $\overline{\pi \epsilon_0}$ that appears in the
right hand side of (\ref{e:eps0}) satisfies
$T(\overline{\pi \epsilon_0}) \prec T(\epsilon_0)$, so by our choice of
$\epsilon_0$, we have $e(\overline{\pi \epsilon_0}) = 0$. Hence,
\begin{equation}
e(\epsilon_0) = 0,
\end{equation}
which contradicts our choice of $\epsilon_0$. We deduce that there can be
no such boxes $x$ and $y$, and therefore $\epsilon_0$ must be a standard
matching, i.e. $\epsilon_0 \in A_\lamu$, and $e(\epsilon_0) \in W_\lamu$.
Now, if $\epsilon$ is a row increasing matching with
$T(\epsilon) \succ T(\epsilon_0)$, such that $\epsilon \notin A_\lamu$, then
there is some box $x$ immediately above a box $y$ with
$\epsilon(x) >_h \epsilon(y)$. Define $A$ and $B$ as before, and let $C$ be
the number of permutations $\pi \in \Sym(A \cup B)$ such that
$\overline{\pi \epsilon} = \epsilon$. Note that $C \neq 0$. Using the
Garnir relation we obtain
\begin{equation}
\label{e:eps}
C e(\epsilon) = - \sum_{\pi \in \Sym(A \cup B),
\overline{\pi \epsilon} \neq \epsilon}
e(\pi \epsilon).
\end{equation}
By Lemma \ref{l:pidec}, every $\overline{\pi \epsilon}$ that appears in the
right hand side of (\ref{e:eps}) satisfies
$T(\overline{\pi \epsilon}) \prec T(\epsilon)$, so by induction every term
on the right hand side is in $W_\lamu$, which implies that
$e(\epsilon) \in W_\lamu$ as well.
We have shown the $e(V_\lamu) = W_\lamu$. It remains only to show that
the set $\{ e(\delta) : \delta \in A_\lamu \}$ is linearly independent.
Suppose that there is a linear relation
\begin{equation}
\sum_{\delta \in A_\lamu} \alpha_\delta e(\delta)
\end{equation}
We want to show that $\alpha_\delta = 0$ for all $\delta \in A_\lamu$.
If not, then let $\delta_0$ be the standard matching that maximizes
$T(\delta_0)$ among those $\delta \in A_\lamu$ with $\alpha_\delta \neq 0$.
In the proof of Lemma \ref{l:detN} below, we show that for any standard
matching $\delta$, $e(\delta)$ has a non-zero $\delta$ coefficient in
$V_\lamu$, and that if $\delta_1$ and $\delta_2$ are two standard matchings
with $T(\delta_1) \prec T(\delta_2)$, then the $\delta_2$ coefficient of
$e(\delta_1)$ is zero.
In particular, for any $\delta \in A_\lamu$ such that
$T(\delta) \prec T(\delta_0)$, the $\delta_0$ coefficient of $e(\delta)$ is
zero. Since the $\delta_0$ coefficient of $e(\delta_0)$ is not zero, we
must have $\alpha_{\delta_0} = 0$, contradicting our choice of $\delta_0$.
Hence, we must have $\alpha_\delta = 0$ for all $\delta \in A_\lamu$.
\end{proof}
Another way to state Theorem \ref{t:Wbasis} is that the columns of $P$
form a basis for the space $e(V_{\lamu})$.
\subsection{Computation of $\det M$ and $\det {\cal M}$}
\label{s:detM}
The computations of $\det M$ and $\det {\cal M}$ are virtually identical, so
we will only compute $\det M$ explicitly, and merely state the corresponding
result for $\det {\cal M}$.
Suppose that $v \in V_{2\nu}$ is an eigenvector of $T_k(x)$ with
eigenvalue $h_{2\nu}(x)$. Since
$T_k(x)$ commutes with the action of $S_{\lamu}$, we have
\begin{equation}
\begin{split}
T_k(x) (e (v)) &= e (T_k(x) v) \\
&= e (h_{2\nu}(x) v) \\
&= h_{2\nu}(x) e (v). \\
\end{split}
\end{equation}
i.e. $e (v)$ is also an eigenvector of $T_k(x)$ with eigenvalue $h_{2\nu}(x)$.
Furthermore, it is not hard to see that the $h_{2\nu}(x)$ are all distinct, so
we must have $e (v) \in V_{2\nu}$. We have shown that
\begin{equation}
e( V_{2\nu}) \subseteq V_{2\nu}.
\end{equation}
It now follows that
\begin{equation}
\begin{split}
e(V_\lamu) &= e \left[ \bigoplus_{2\nu \vdash 2k} V_{2\nu} \right] \\
&= \bigoplus_{2\nu \vdash 2k} e(V_{2\nu}). \\
\end{split}
\end{equation}
Let $d_\nu = d^\lambda_{\mu \nu}$ be the dimension of $e(V_\nu)$ (thus, if
$\nu$ is not even then $d_\nu = 0$).
For every even partition $\nu \vdash 2k$, let
$B^\nu = \{ e(v_1^\nu), \dots , e(v_{d_\nu}^\nu \}$ be a basis for $e (V_\nu)$.
Let $B_\lamu = \bigcup B^\nu$. We now have two
bases for $e(V_\lamu)$, namely $e(A_\lamu)$, and $B_\lamu$.
Let $Q$ be the
$F_\lamu \times B_\lamu$ matrix whose column indexed by $e(v_i^\nu)$ is the
expansion of $e(v_i^\nu)$ in $V_\lamu$ in terms of the basis $F_\lamu$.
Let $S$ be the $A_\lamu \times B_\lamu$ matrix such that
\begin{equation}
PS = Q.
\end{equation}
The matrix $S$ is an invertible transition matrix from
the basis $e(A_\lamu)$, to the basis $B_\lamu$ for $e(V_\lamu)$.
The importance of the matrix $Q$ is the fact that its columns are eigenvectors
for $T_k(x)$. Thus,
\begin{equation}
(T_k(x) Q)_{e(v_i^\nu)} = h_\nu(x) Q_{e(v_i^\nu)}.
\end{equation}
This is equivalent to saying that $T_k(x) Q = Q D$, where $D$ is a
$B_\lamu \times B_\lamu$ diagonal matrix whose diagonal entry in the column
indexed by $e(v_i^\nu)$, is $h_\nu(x)$.
We can use these facts to study the product formulation of the matrix $M$ as
follows:
\begin{equation}
\begin{split}
S^t M &= S^t P^t T_k(x) J \\
&= Q^t T_k(x) J \\
&= D Q^t J \\
&= D S^t P^t J. \\
\end{split}
\end{equation}
From this we obtain
\begin{equation}
\label{e:detMprod}
\begin{split}
\det M &= \det D \det (P^t J) \\
&= \det (P^t J) \prod_{2\nu \vdash 2k} h_{2\nu}(x)^{d_{2\nu}}. \\
\end{split}
\end{equation}
Let $N = P^t J$. Then, $N$ is a $A_\lamu \times A_\lamu$ matrix, and
$\det(N)$ is a scalar because the matrices $P$ and $J$ both have entries in
$\R$. It is not hard to see that the $(\delta_i,\delta_j)$ entry of $N$ is
the coefficient of $\delta_j$ in $e(\delta_i)$. We have
\begin{equation}
\begin{split}
N_{ij} = N_{\delta_i, \delta_j} &= \sum_{\sigma \in C_{\lambda / \mu}} \
\sum_{\tau \in R_{\lambda / \mu}}
\sgn(\sigma) \chi(\sigma \tau \delta_i = \delta_j) \\
&= \sum_{\sigma \in C_{\lambda / \mu}} \
\sum_{\tau \in R_{\lambda / \mu}}
\sgn(\sigma) \chi(\tau \delta_i = \sigma \delta_j) \\
\end{split}
\end{equation}
where $\chi($statement$) = 1$ if the statement is true and $0$ otherwise.
\begin{notation}
Let $R_\delta$ (resp.
$C_\delta$) be the subgroup of $R_{\lamu}$ (resp. $C_{\lamu}$) that fixes
the standard matching $\delta$.
\end{notation}
\begin{lem}
\label{l:detN}
$$\det N = \prod_{\delta \in A_\lamu} |R_\delta| |C_\delta|.$$
\end{lem}
\begin{proof}
Renumber the standard matchings so that they appear in increasing order
with respect to $\prec$. Note that for any row permutation $\tau$ of
$[\lamu]$, and any matching $\epsilon$ of shape $\lamu$,
$\overline{T(\tau \epsilon)} = \overline{T(\epsilon)}$.
If $N_{ij} \neq 0$, then for some $\tau \in R_{\lamu}$, and some
$\sigma \in C_{\lamu}$, we have $\tau \delta_i = \sigma \delta_j$. By
Theorem \ref{t:order} we have
\begin{equation}
\label{e:triangular}
\begin{split}
\overline{T(\delta_j)} & \preceq \overline{T(\sigma \delta_j)} \\
& = \overline{T(\tau \delta_i)} \\
& = \overline{T(\delta_i)}. \\
\end{split}
\end{equation}
But if $i_h \delta'(y')=z,
\end{equation}
or
\begin{equation}
\label{e:dviolates}
\delta(d) = \delta'(d) >_h \delta'(y')=z,
\end{equation}
and find that these assumptions lead to a contradiction.
We need
to define two sets contained in $D$. Let $A$ be the set of boxes
in the same row as $z$ that are strictly West of $z$ and contained in $D$. Let
$B$ be the set of boxes in the same row as $y$ that are strictly East of $y$
and contained in $D$. Observe that $A \cup B = (z,y)_{<_h} \cap D$.
\pagebreak
\begin{figure}[!htb]
%%\centerline{\psfig{figure=JdT1.eps,height=1.8in}}
\begin{center}
\begin{minipage}{10cm}
\epsfxsize=10cm
\epsffile{JdT1.eps}
\end{minipage}
\end{center}
\end{figure}
By the standardness
of $\delta$, we know that $\delta(c), \delta(d) <_h \delta(y') = y$. This
fact, together with (\ref{e:cviolates}) or (\ref{e:dviolates}) implies
that either
\begin{equation}
z <_h \delta(c) <_h y
\end{equation}
or
\begin{equation}
z <_h \delta(d) <_h y.
\end{equation}
In other words, we must have $\delta(c) \in A \cup B$ or
$\delta(d) \in A \cup B$. We now break into cases to show that these cannot
happen.
\
\noindent {\bf Case 1:} $\delta(c)$ or $\delta(d) \in B$:
Suppose $f = \delta(d) \in B$. By Lemma \ref{l:UsefulFact}, since $z$ and $f$
are in the skew shape, and $z <_s f$, $x$ is in the skew shape as well.
Hence $x \in D$.
But now, $x <_s f$ so we must have
\begin{equation}
\delta(x) <_h \delta(f) = d.
\end{equation}
But this implies that $\delta(x) \leq_h y' = \delta(y)$ a contradiction, since
we are moving $y \rightarrow z$, and hence $\delta(y) <_h \delta(x)$.
The same argument shows that $\delta(c) \notin B$.
\
\noindent {\bf Case 2:} $\delta(d) \in A$:
In this case $\delta(d) <_s y$, but $\delta(\delta(d)) = d >_h y' = \delta(y)$, contradicting the standardness of $\delta$.
\
\noindent {\bf Case 3:} $\delta(c) \in A$:
This case is more complicated. We will show that $\delta(c) \in A$ implies that
there is an infinite sequence of distinct elements in $A$. Essentially, a
cycle of elements in $D$ is created (some of which are in $A$) that can never
end.
Before we begin with this part of the proof we need a little more notation.
Define $F$ to be the set of boxes in the same
row as $c$ that are strictly West of $c$ and contained in $D$. Let $G$ be the
set of boxes in the same row as $y'$ that are strictly East of $y'$ and
contained in $D$. We have $F \cup G = (c,y')_{<_h} \cap D$.
\begin{figure}[!htb]
%%\centerline{\psfig{figure=JdT2.eps,height=1.6in}}
\begin{center}
\begin{minipage}{10cm}
\epsfxsize=10cm
\epsffile{JdT2.eps}
\end{minipage}
\end{center}
\end{figure}
Let $r_1 = \delta(c) \in A$, and let $s_1$ be the box directly below $r_1$.
By Lemma \ref{l:UsefulFact}, we have $s_1 \in D$ (using $r_1 <_s y$). Let
$s_1' = \delta(s_1)$. Since $r_1 <_s s_1 <_s y$ we must have
\begin{equation}
c <_h s_1' <_h y',
\end{equation}
i.e. $s_1' \in F \cup G$.
But, if $s_1' \in F$ then we have $s_1' <_s c$, and $s_1 >_h r_1 = \delta(c)$
which contradicts the standardness of $\delta$. Thus, $s_1' \in G$.
Now, for $i = 2,3, \dots $ we define the following: let $r_i'$ be the box
directly above $s_{i-1}'$, let $r_i = \delta(r_i')$, let $s_i$ be the box
directly below $r_i$ and let $s_i' = \delta(s_i)$.
\
\noindent {\bf Claim.}
{\it
For all $i$, $r_i \in A$, and $s_i' \in G$. Moreover,
$$r_1 <_h r_2 <_h r_3 <_h \cdots$$
and
$$s_1' >_h s_2' >_h s_3' >_h \cdots$$}
\begin{proof}
We proceed by induction on $i$. We have already shown that $r_1 \in A$ and
$s_1' \in G$. Suppose now that $r_k \in A$ and $s_k' \in G$ for all
$k < i$, and that $r_1 <_h \cdots <_h r_{i-1}$, and
$s_1' >_h \cdots >_h s_{i-1}'$.
First, note that since $s_{i-1}' \in G \subseteq D$, by Lemma
\ref{l:UsefulFact},
(using $c <_s s_{i-1}'$) we have that $r_i' \in D$. Note also that since
$s_{i-2}' >_h s_{i-1}'$, $r_i'$ lies to the East of $r_{i-1}'$. Thus,
$r_{i-1}' <_s r_i'$. Now, since
\begin{equation}
r_{i-1}' <_s r_i' <_s s_{i-1}',
\end{equation}
we must have
\begin{equation}
r_{i-1} <_h r_i <_h s_{i-1}.
\end{equation}
We cannot have $s_{i-1} <_s r_i$ because this would imply that
$s_{i-1}' <_h r_i'$. Since $r_i'$ lies North of $s_{i-1}'$ this
clearly can't happen. We conclude that $r_i$ must be in the same row as
$r_{i-1}$ and strictly to the West of $r_{i-1}$, i.e. $r_i \in A$, and
$r_1 <_h \cdots <_h r_i$.
Now, using Lemma \ref{l:UsefulFact} (with $r_i <_s y$) we conclude that
$s_i \in D$. Also, we have $s_i <_s s_{i-1}$ because they are in the
same row, and
$s_i$ appears to the West of $s_{i-1}$. So, since
\begin{equation}
r_i <_s s_i <_s s_{i-1}
\end{equation}
we must have
\begin{equation}
r_i' <_h s_i' <_h s_{i-1}'.
\end{equation}
We cannot have $s_i' <_s r_i'$ because this would imply that
$s_i <_h r_i$. Since $r_i$ lies North of $s_i$ this clearly can't happen.
We conclude that $s_i'$ must be in the same row as $s_{i-1}'$ and
strictly to the East of $s_{i-1}'$, i.e. $s_i' \in G$, and
$s_1' >_h \cdots >_h s_i'$.
\end{proof}
The claim just proved is an obvious contradiction because it implies that the
sizes of $A$ and $G$ are infinite. So,
$\delta'(c), \delta'(d) <_h \delta'(y')$ as desired.
We have shown that $a <_s b \in D'$ implies that $\delta'(a) <_h \delta'(b)$,
which means that $\delta'$ is a standard matching for $D'$.
\end{proof}
\subsection{Dual Knuth equivalence with JdT for tableaux}
Our objective in this section is to describe how the outputs of the two JdT
algorithms are related. It turns out that this relationship can be
expressed in terms of the dual Knuth equivalence for $S_{n}$.
The dual Knuth equivalence for $S_{n}$ is defined as follows.
Let $\sigma ,\tau \in S_{n}$. They differ by a dual Knuth
relation of the first kind
if for some $k$
\begin{equation}
\sigma = \dots k+1 \dots k \dots k+2 \dots
\end{equation}
and
\begin{equation}
\tau = \dots k+2 \dots k \dots k+1 \dots
\end{equation}
They differ by a dual Knuth relation of the second kind if for some $k$
\begin{equation}
\sigma = \dots k \dots k+2 \dots k+1 \dots
\end{equation}
and
\begin{equation}
\tau = \dots k+1 \dots k+2 \dots k \dots
\end{equation}
The permuations $\sigma$ and $\tau$ are dual Knuth equivalent, (written
$\sigma \cong^{K^*} \tau$)
if one can get from $\sigma$ to $\tau$ via a sequence of dual Knuth relations
of the first and second kinds.
The following easy Lemma will simplify the proof of the Theorem
\ref{t:dKequiv}.
\begin{lem}
\label{l:dKlem}
Suppose $a,s \in \Z^+$. Then
$$a+1 \ \ a \ \ a+2 \cdots a+s
\cong ^{K^*} a+s \ \ a \ \ a+1 \cdots a+s-1,$$
and
$$a+1 \cdots a+s-1 \ \ a+s+1 \ \ a+s
\cong ^{K^*} a+2 \cdots a+s \ \ a+s+1 \ \ a+1.$$
\end{lem}
\begin{thm}
\label{t:dKequiv}
Suppose $\delta$ is a standard matching for $D$, where $D$ is a skew shape
with one box $z$
removed. Let $T'$ be the standard tableau for $D'$ obtained by performing a
NW-JdT (resp. SE-JdT) move for tableaux on $\delta$ at $z$. Let $\delta'$ be
the standard matching for $D'$
obtained by performing a NW-JdT (SE-JdT) move for matchings on $\delta$ at $z$.
Then $\pi_{T'} \cong ^{K^*} \pi_{\delta'}$.
\end{thm}
\begin{proof}
We prove this for a NW-JdT move. The proof for a SE-JdT move is essentially
the same.
Assume that $x$ is the box moved into position $z$ in both JdT algorithms,
and that $x$ is the box adjacent to $z$ and to the East. If this is the
case, then the Hebrew positions of the boxes in $D'$ remain the same as they
were in $D$, with $z$ taking on the Hebrew positon in $D'$ that $x$ had in
$D$. Clearly, in this case, we have $\pi_\delta = \pi_{\delta'} = \pi_{T'}$,
so $\pi_{T'} \cong ^{K^*} \pi_{\delta'}$ trivially.
On the other hand, if $y$ is moved into position $z$, and $y$ is the box
immediately below $z$, then the Hebrew position of each box in $(z,y)_{<_h}$
increases by one, and the Hebrew position of $z$ is the Hebrew position of
$y$ minus the number of boxes in $(z,y)_{<_h}$. This means that $\delta'$ is
numerically given by
\begin{equation}
\label{e:dprimenum}
\delta' (b) =
\begin{cases}
\delta(b) & b \neq z,\ \delta(b) \notin (z,y]_{<_h},\\
\delta(b)+1 & \delta(b) \in (z,y)_{<_h},\\
\delta(y) & b = z,\\
y- |(z , y)_{<_h}| & \delta(b) = y.
\end{cases}
\end{equation}
Compare this to $T'$, for which
\begin{equation}
\label{e:Tprimenum}
T' (b) =
\begin{cases}
\delta(b) & b \neq z,\\
\delta(y) & b = z.\\
\end{cases}
\end{equation}
Observe that the places where $T'$ and $\delta'$ can differ are exactly those
$b \in D'$ that satisfy $\delta(b) \in (z , y]_{<_h}$. Thus, the row words
$\pi_{T'}$ and $\pi_{\delta'}$ are the same except at the positions
corresponding to those $b \in D'$ such that $\delta(b) \in (z,y]_{<_h}$.
We define four subsets of $D'$ as follows:
\
Let $B$ be the set of boxes in $D$ strictly West of $z$ and in the same row
as $z$.
Let $G$ be the set of boxes in $D$ that have a box in $B$ immediately above.
Let $F$ be the set of boxes in $D$ strictly East of $x$ and in the same row
as $x$.
Let $A$ be the set of boxes in $D$ that have a box in $F$ immediately below.
\
\noindent Write
\begin{equation}
\begin{split}
A &= \{ a_1 <_h a_2 <_h \dots <_h a_r \} \\
B &= \{ b_1 <_h b_2 <_h \dots <_h b_l \} \\
F &= \{ f_1 <_h f_2 <_h \dots <_h f_r \} \\
G &= \{ g_1 <_h g_2 <_h \dots <_h g_l \} \\
\end{split}
\end{equation}
\begin{figure}[!htb]
%%\centerline{\psfig{figure=JdT3.eps,height=1.3in}}
\begin{center}
\begin{minipage}{10cm}
\epsfxsize=10cm
\epsffile{JdT3.eps}
\end{minipage}
\end{center}
\end{figure}
Note that $B \cup F = (z , y)_{<_h}$, and
observe that $M = A \cup B \cup F \cup G \cup \{ z \} $ is a $<_h$ interval
in $D'$. By the comments above, if $\delta(b) \notin M$, then
numerically, $\delta'(b) = T'(b)$. For convenience, we will shift the
numbering of all the boxes in
$D'$ so that the boxes of $M$ are numbered $1 \dots |M|$. So, the tableaux
$T'$ and $\delta'$ may have nonpositive numbers in them, but the subsequences
$\sigma_{T'}$ of $\pi_{T'}$, and $\sigma_{\delta'}$ of $\pi_{\delta'}$,
corresponding to the positions $b$ with $\delta(b) \in M$, will be permutations
of $\{ 1 \dots |M| \}$.
We have the following inequalities:
\begin{equation}
T'(g_1) < T'(z) < T'(a_r)
\end{equation}
and
\begin{equation}
\delta'(g_1) < \delta'(z) < \delta'(a_r).
\end{equation}
The only inequality that isn't obvious here is $\delta'(g_1) < \delta'(z)$.
One can see this by noting that $\delta(g_1) < \delta(y) = \delta'(z)$, and
that $\delta'(g_1) \leq \delta(g_1) + 1$ (this follows from equations
(\ref{e:dprimenum}) and (\ref{e:Tprimenum})), so we must have
$\delta'(g_1) \leq \delta'(z)$. But equality is not allowed, so
$\delta'(g_1) < \delta'(z)$.
Now, for any tableau $U$, if $c <_h d$, then $U(d)$ comes before $U(c)$ in
the row word $\pi_U$. In our case, if $a \in B \cup G$ and
$b \in A \cup F$, then we have
\begin{equation}
\delta'(a) \leq_h \delta'(g_1) <_h \delta'(z) <_h \delta'(a_r)
\leq_h \delta'(b),
\end{equation}
which implies that $b=\delta'(\delta'(b))$ occurs before
$a=\delta'(\delta'(a))$ in $\pi_{\delta'}$, and $z$ occurs between them.
Furthermore, $\delta'(b_i) <_h \delta'(g_i)$ so $g_i$ occurs before $b_i$ in
$\pi_{\delta'}$. Similarly, $f_i$ occurs before $a_i$ in $\pi_{\delta'}$.
Finally, the $a_i$'s, $b_i$'s, $f_i$'s and $g_i$'s all occur in increasing
order of their subscripts. So, the subsequence $\sigma_{\delta'}$ looks like
\begin{equation}
\label{e:sigmadeltaprime}
\sigma_{\delta'} =
\begin{matrix}
f_1 \dots f_r \\ a_1 \dots a_r
\end{matrix}
\enskip
z \enskip
\begin{matrix}
g_1 \dots g_l \\ b_1 \dots b_l
\end{matrix}
\end{equation}
where the notation used is interpreted as follows: if $a$ sits directly above
$b$ then $a$ comes before $b$. If $a$ is to the left of $b$ and in the same row
as $b$, then $a$ comes before $b$. All $f_i$'s and $a_i$'s come before $z$.
All $g_i$'s and $b_i$'s come after $z$.
\begin{example}
\begin{equation*}
7 \ 8 \ 1 \ 2 \ 9 \ 3 \ 4 \ 10 \ 5 \ 11 \ 6
=\begin{matrix}
7 & 8 & 9 \\ 1 & 2 & 3
\end{matrix}
\enskip
4 \enskip
\begin{matrix}
10 & 11 \\ 5 & 6
\end{matrix}
\end{equation*}
whereas
\begin{equation*}
7 \ 1 \ 2 \ 8 \ 9 \ 3 \ 4 \ 10 \ 11 \ 5 \ 6
\neq \begin{matrix}
7 & 8 & 9 \\ 1 & 2 & 3
\end{matrix}
\enskip
4 \enskip
\begin{matrix}
10 & 11 \\ 5 & 6
\end{matrix}
\end{equation*}
because $2$ comes before $8$.
\end{example}
Of course, there are many permutations that are of this form, so
in the future when we have an equivalence between two permutations denoted
this way, we will always assume that the order in which we read off the
letters for the two permutations is the same.
Numerically we have
\begin{equation}
\sigma_{\delta'} =
\begin{matrix}
r+l+2 & \dots & 2r+l+1 \\ 1 & \dots & r
\end{matrix}
\enskip
r+1 \enskip
\begin{matrix}
2r+l+2 & \dots & 2r+2l+1 \\ r+2 & \dots & r+l+1
\end{matrix}
\end{equation}
Using equations (\ref{e:dprimenum}), (\ref{e:Tprimenum}) and
(\ref{e:sigmadeltaprime}), we find that
\begin{equation}
\sigma_{T'} =
\begin{matrix}
r+l+1 & \dots & 2r+l \\ 1 & \dots & r
\end{matrix}
\enskip
2r+l+1 \enskip
\begin{matrix}
2r+l+2 & \dots & 2r+2l+1 \\ r+1 & \dots & r+l
\end{matrix}
\end{equation}
To prove the theorem, it suffices to show that
$\sigma_{T'} \cong^{K^*} \sigma_{\delta'}$. We do this by induction on
$r + l$. If $r+l=0$, then $\sigma_{T'} = 1 = \sigma_{\delta'}$, and the
theorem is trivially true.
If $l>0$ then both permutations have letters to the right of the ``middle''
position. Using Lemma \ref{l:dKlem} (the sequence where the Lemma is used
is underlined), we obtain
\begin{equation}
\begin{split}
\sigma_{\delta'} &=
\begin{matrix}
\underbar{r+l+2} & \dots & \underbar{2r+l+1} \\
1 & \dots & r
\end{matrix}
\enskip
r+1 \enskip
\begin{matrix}
\underbar{2r+l+2} & \dots & \underbar{2r+2l} & \underbar{2r+2l+1} \\
r+2 & \dots & r+l & \underbar{r+l+1}
\end{matrix}
\\
\\
&\cong^{K^*}
\begin{matrix}
\underbar{r+l+1} & \dots & \underbar{2r+l} \\
1 & \dots & r
\end{matrix}
\enskip
r+1 \enskip
\begin{matrix}
\underbar{2r+l+1} & \dots & \underbar{2r+2l-1} & \underbar{2r+2l+1} \\
r+2 & \dots & r+l & \underbar{2r+2l}
\end{matrix} \\
\end{split}
\end{equation}
Now, by induction in the first step, and Lemma \ref{l:dKlem} in the second, we
have
\begin{equation}
\begin{split}
\sigma_{\delta'} &\cong^{K^*}
\begin{matrix}
\underbar{r+l} & \dots & \underbar{2r+l-1} \\
1 & \dots & r
\end{matrix}
\enskip
\begin{matrix}
\underbar{2r+l}
\end{matrix}
\enskip
\begin{matrix}
\underbar{2r+l+1} & \dots & \underbar{2r+2l-1} & \underbar{2r+2l+1} \\
r+1 & \dots & r+l-1 & \underbar{2r+2l}
\end{matrix}
\enskip
\\
\\
&\cong^{K^*}
\begin{matrix}
\underbar{r+l+1} & \dots & \underbar{2r+l} \\
1 & \dots & r
\end{matrix}
\enskip
\begin{matrix}
\underbar{2r+l+1}
\end{matrix}
\enskip
\begin{matrix}
\underbar{2r+l+2} & \dots & \underbar{2r+2l} & \underbar{2r+2l+1} \\
r+1 & \dots & r+l-1 & \underbar{r+l}
\end{matrix}
\\
\\
&= \sigma_{T'}.\\
\end{split}
\end{equation}
A symmetric argument proves that $\sigma_{\delta'} \cong^{K^*} \sigma_{T'}$ if
$r>0$.
\end{proof}
\subsection{The normal shape obtained via JdT}
\label{s:normal}
Before stating the main theorem of this section, some discussion of the
Robinson-Schensted-Knuth insertion algorithm (RSK) is necessary. What we do here will leave out many details (including the definition of RSK) and will
focus only on what is needed. We will consider only {\it row} insertion, so
RSK means RSK row insertion (and not column insertion) in the discussion
below.
RSK provides a bijection between elements of $S_{n}$ and pairs $(P,Q)$ of
standard Young tableau of the same shape (having $n$ boxes) \cite{Schensted}.
Denote the pair obtained from the permutation $\sigma$ by
$(P(\sigma),Q(\sigma))$.
We will need several standard facts about RSK, JdT for tableaux, and dual
Knuth equivalence which we state below. For a more complete discussion, see
Sagan's book \cite{Sagan}.
\begin{fact}
\label{f:RSKinv}
RSK applied to $\sigma^{-1}$ produces the same pair of tableaux that are
produced by $\sigma$ in the reverse order, i.e.
$P(\sigma^{-1}) = Q(\sigma)$ and
$Q(\sigma^{-1}) = P(\sigma)$. Also, if $\sigma^r$ is the reverse of
$\sigma$ then $P(\sigma^r) = P(\sigma)^t$.
\end{fact}
In particular, this implies that if $\sigma$ is an involution in $S_{n}$,
then $P(\sigma) = Q(\sigma)$. In fact, RSK provides a bijection between
the set of involutions of $S_{n}$ and the set of standard tableau of
all normal shapes $\lambda \vdash n$. This is refined even further by the
following fact which was proved by Sch\" utzenberger.
\begin{fact}
\label{f:RSKbij}
\cite{Schutzenberger}
Let $0 \leq k \leq n$ be fixed. Then $\sigma \mapsto P(\sigma)$ is a
bijective map between involutions of $S_{n}$ with $k$ fixed points, and
standard tableaux of all normal shapes $\lambda \vdash n$ such that
$\lambda$ has exactly $k$ columns of odd length.
\end{fact}
\begin{fact}
\label{f:JdTPequiv}
Let $T$ be a standard tableau of shape $D$, which is a skew shape with one box
removed. If we use JdT to bring $T$ to some
standard tableau of normal shape $T'$, then $T' = P(\pi_T)$. Hence, if one
can get from the standard tableau $T$ to the standard tableau $T''$ via JdT,
then $P(\pi_{T''}) = P(\pi_T)$.
\end{fact}
\begin{fact}
\label{f:dKQequiv}
If $\sigma$ and $\tau$ are elements of $S_{n}$, then $\sigma \cong^{K^*} \tau$
if and only if $Q(\sigma) = Q(\tau)$.
\end{fact}
\begin{thm}
\label{t:sameshape}
Suppose $\delta$ is a standard matching of shape $D$, a skew shape with one
box $z$ removed. If we use
JdT for tableaux to bring $\delta$ to a standard tableau of normal shape
$\nu_1$, and we apply JdT for matchings to bring $\delta$ to a standard
matching of normal shape $\nu_2$, then $\nu_1 = \nu_2$. Moreover, $\nu_1$
(and hence $\nu_2$) is even.
\end{thm}
\begin{proof}
We show first that $\nu_1$ is even. By Fact \ref{f:JdTPequiv},
$\nu_1 = \sh P(\pi_\delta)$. Also, $\pi_\delta$ is the reverse
of the Hebrew word $h_\delta$ which is a fixed point free involution in
$S_{2n}$. Thus, by Fact \ref{f:RSKbij}, the columns of $P(h_\delta)$
all have even length. Finally, Fact \ref{f:RSKinv} implies that
$\nu_1 = \sh P(\pi_\delta) = \sh P(h_\delta)^t$, and therefore every row of
$\nu_1$ has even length.
Let $T'$ be the standard tableau for $D'$ obtained by a JdT move for
tableaux at $z$, and let $\delta'$ be the standard matching
for $D'$ obtained by a JdT move for matchings at $z$. The following
are consequences of Facts \ref{f:JdTPequiv} and \ref{f:dKQequiv}, and Theorem
\ref{t:dKequiv}.
\begin{equation}
\begin{split}
P(\pi_\delta) &= P(\pi_{T'}) \\
Q(\pi_{T'}) &= Q(\pi_{\delta'}).
\end{split}
\end{equation}
These imply the following string of equalities:
\begin{equation}
\begin{split}
\sh P(\pi_\delta) &= \sh P(\pi_{T'}) \\
&= \sh Q(\pi_{T'}) \\
&= \sh Q(\pi_{\delta'}) \\
&= \sh P(\pi_{\delta'}). \\
\end{split}
\end{equation}
From this, we conclude that if $\delta'$ is obtained from $\delta$ using
JdT for matchings, then $\sh P(\pi_{\delta'}) = \sh P(\pi_\delta)$. In
particular, suppose that $\delta'$ is a standard matching of normal shape
$\nu_2$. In that case
\begin{equation}
\begin{split}
\nu_2 &= \sh \delta' \\
&= \sh P(\pi_{\delta'}) \\
&= \sh P(\pi_{\delta}) \\
&= \nu_1. \\
\end{split}
\end{equation}
Here, the second equality comes from the fact that since $\sh \delta'$ is
normal, Fact \ref{f:JdTPequiv} implies that we must have
$P(\pi_{\delta'}) = \delta'$.
\end{proof}
\begin{cor}
\label{c:unique}
If the standard matching $\delta$ is taken to a standard matching $\delta'$
for a normal even shape $\nu$ via JdT, then $\delta'$ is independent of the
sequence of JdT moves chosen. In fact, $\nu = \sh P(\pi_{\delta})$, and
$\delta'$ is the unique standard matching of shape $\nu$.
\end{cor}
\begin{proof}
This follows from Theorem \ref{t:sameshape} and Corollary
\ref{c:oneofevenshape}.
\end{proof}
Because of Corollary \ref{c:unique}, we can make the following definition.
\begin{defn}
If $\delta$ is a standard matching, then let $\nu(\delta)$ denote the
shape of the standard matching of normal shape obtained from $\delta$ via JdT.
\end{defn}
For the normal shape $\nu \vdash 2n$, define $g^\lambda_{\mu \nu}$ to be the
number of times $\nu$ appears as $\nu(\delta)$ for some
$\delta \in A_\lamu$. We would like to compute these numbers
$g^\lambda_{\mu \nu}$. For this, we need a modified version of a result of
Dennis White. This version follows easily from the main result in \cite{White}.
Before we state the theorem, we need to make some definitions.
\begin{defn}
A word $w = w_1 w_2 \dots w_n$ in the alphabet $\Z^+$ is called a
{\it lattice permutation} if for every $j = 1,\dots,n$, the initial segment
$w_1 \dots w_j$ of $w$ contains at least as many $i$'s as it does $(i+1)'s$,
for every $i \in \Z^+$. The {\it weight} of $w$ is the vector
$(v_1,\dots,v_n)$, where $v_i$ is the number of $i$'s in $w$. Clearly,
the weight of a lattice permutation is always a partition.
\end{defn}
\begin{defn}
If $T$ is any standard tableau of the normal shape $\nu \vdash n$, define
$\lp(T)$ to be the word $w = w_1 \dots w_n$, where $w_i$ is the row in which
$i$ appears in $T$. Clearly, $\lp(T)$ is a lattice permutation of weight
$\nu$. On the other hand, given a lattice permutation $w$ of weight $\nu$,
there is a unique standard tableau $T$ of shape $\nu$, satisfying
$\lp(T) = w$.
\end{defn}
\begin{defn}
\label{d:LRfilling}
A tableau $T$ of shape $\lamu$ is called {\it semistandard}, if $T$ strictly
increases down columns, and weakly increases from left to right along rows.
A semistandard tableau $T$ is called a {\it Littlewood-Richardson filling}
({\it LR filling}) of $T$, if $h_T$ is a lattice permutation. Let
$c^\lambda_{\mu \nu}$ denote the number of LR fillings $T$ of shape $\lamu$
such that $h_T$ has weight $\nu$. It is a well known fact the the number
$c^\lambda_{\mu \nu}$ is equal to the Littlewood-Richardson coefficient
discussed in Lemma \ref{l:intersection} \cite{MacDonald}.
\end{defn}
Let $\omega_0$ denote the involution in $S_n$ with one line form
$n \ n-1 \dots 2 \ 1$.
\begin{thm}
\label{t:white}
\cite{White} Let $\pi$ be any permutation in $S_n$, and $\lamu$ a skew shape
of size $n$. Then $\lp(P(\pi)) = h_T$ for some LR filling
$T$ of shape $\lamu$ if and only if $\omega_0 \pi^{-1} = h_U$ for some
standard tableau $U$ of shape $\lamu$.
\end{thm}
We need a few more standard facts about the RSK (row) insertion algorithm.
\begin{fact}
\label{f:evac}
For any $\sigma \in S_n$, the following properties of RSK hold:
(1) $(P(\omega_0 \sigma \omega_0),Q(\omega_0 \sigma \omega_0))
= (P(\sigma)_{\evac}, Q(\sigma)_{\evac}))$, where $P_{\evac}$ is obtained
from $P$ by a process known as evacuation. This fact can be used as a
definition of evacuation for standard tableaux.
(2) $(P_{\evac})^t = (P^t)_{\evac}$.
(3) \cite{Schutzenberger} $(P_{\evac})_{\evac} = P$.
(4) $\sh P_{\evac} = \sh P$.
(5) $(P(\sigma \omega_0),Q(\sigma \omega_0)) =
(P(\sigma)^t,Q(\sigma)^t_{\evac})$.
\end{fact}
We can now state the Theorem which gives the values of the numbers
$g^\lambda_{\mu \nu}$.
\begin{thm}
\label{t:LRequiv}
For any skew shape $\lamu$ and any normal even shape $\nu$ with
$|\lamu| = |\nu|$, we have
$g^\lambda_{\mu \nu} = c^\lambda_{\mu \nu}.$
\end{thm}
\begin{proof}
We find a bijection $\phi$, between the set of standard matchings $\delta$
with $\nu(\delta) = \nu$, and the set of LR fillings $T$ of shape $\lamu$
such that $h_T$ has weight $\nu$.
Suppose $\delta \in A_\lamu$, and $\nu(\delta) = \nu$. Then $h_\delta$ is
a fixed point free involution of $S_{2n}$ where $\lamu$ has size $2n$.
Furthermore, $h_\delta = \pi_\delta \omega_0$. Taking inverses, we obtain
\begin{equation}
h_\delta = \omega_0 \pi_\delta^{-1}.
\end{equation}
Since $\delta$ is standard tableau, by Theorem \ref{t:white}, we have
$\lp(P(\pi_\delta)) = h_T$ for some LR filling $T$ of shape $\lamu$. Since
$\sh P(\pi_\delta) = \nu$, it follows that $h_T$ has weight $\nu$. We
define $\phi(\delta) = T$.
Now, if $T$ is a LR filling of shape $\lamu$ such that $h_T$ has weight
$\nu$, then let $P$ be the standard tableau of shape $\nu$ such that
$\lp(P) = h_T$. Let $\pi \in S_{2n}$ be the permutation such that
\begin{equation}
\begin{split}
P(\pi) &= P, \\
Q(\pi) &= P_{\evac}.
\end{split}
\end{equation}
Using Fact \ref{f:evac} (5), (2) and (3), we obtain
\begin{equation}
\begin{split}
P(\pi \omega_0) &= P^t, {\text and} \\
Q(\pi \omega_0) &= (P_{\evac})^t_{\evac} = P^t.
\end{split}
\end{equation}
Since $P^t$ consists entirely of even columns, Facts \ref{f:RSKinv} and
\ref{f:RSKbij} imply that $\pi \omega_0$ is a fixed point free involution
in $S_{2n}$. Thus,
\begin{equation}
\pi \omega_0 = (\pi \omega_0)^{-1} = \omega_0 \pi^{-1}.
\end{equation}
We have $\lp(P(\pi)) = h_T$, so by Theorem \ref{t:white},
$\omega_0 \pi^{-1} = h_\delta$ for some standard tableau $\delta$ of shape
$\lamu$. Moreover, since $\omega_0 \pi^{-1}$ is a fixed point free involution
of $S_{2n}$, $\delta$ is a matching of shape $\lamu$, and hence
$\delta \in A_\lamu$. To show that $\nu(\delta) = \nu$, we compute
\begin{equation}
\begin{split}
\nu(\delta) &= \sh P(\pi_\delta) \\
&= \sh P(h_\delta \omega_0) \\
&= \sh P(\omega_0 \pi^{-1} \omega_0) \\
&= \sh P(\pi^{-1})_{\evac} \\
&= \sh P(\pi^{-1}) \\
&= \sh Q(\pi) \\
&= \nu. \\
\end{split}
\end{equation}
Clearly, $\phi(\delta) = T$. In fact, it is not hard
to see that $\phi$ is a bijection, which finishes the proof of the theorem.
\end{proof}
\subsection{An alternate statement of the main theorem}
\label{s:alt}
Theorem \ref{t:LRequiv} allows us to restate Theorem \ref{t:main1}
in terms of Jeu de Taquin for matchings as follows.
\begin{thm}
\label{t:main2}
$$\det M =\prod_{\delta \in A_\lamu}
|R_\delta| |C_\delta| h_{\nu(\delta)}(x).$$
\end{thm}
Similarly, we can restate Theorem \ref{t:main1gen}.
\begin{thm}
\label{t:main2gen}
$$\det {\cal M} =\prod_{\delta \in A_\lamu}
|R_\delta| |C_\delta| z_{1/2(\nu(\delta))}(y_1,\dots,y_n).$$
\end{thm}
\bibliographystyle{amsplain}
\begin{thebibliography}{HW3}
\bibitem[Brr]{Brauer}
R. Brauer, On algebras which are connected to the semisimple continuous
groups, Ann. of Math. 38 (1937), 857-872.
\bibitem[Brn]{Brown}
W. P. Brown, An algebra related to the orthogonal group, Michigan Math. J.
3 (1955-1956), 1-22.
\bibitem[HW1]{HW1}
P. Hanlon and D. Wales, On the decomposition of Brauer's centralizer algebras,
J. Algebra 121 (1989), 409-445.
\bibitem[HW2]{HW2}
P. Hanlon and D. Wales, Eigenvalues connected with Brauer's centralizer
Algebras, J. Algebra 121 (1989), 446-476.
\bibitem[HW3]{HW3}
P. Hanlon and D. Wales, Computing the discriminants of Brauer's centralizer
algebras, Math. Comput. 54, No. 190 (1990), 771-796.
\bibitem[HW4]{HW4}
P. Hanlon and D. Wales, A Tower Construction for the Radical in Brauer's
centralizer algebras, J. Algebra 164 (1994), 773-830.
\bibitem[Ja1]{Ja1}
A. T. James, The distribution of the latent roots of the covariance matrix,
Ann. Math. Statist. 31 (1960), 151-158.
\bibitem[Ja2]{Ja2}
A. T. James, Zonal polynomials of the real positive definite symmetric
matrices, Ann. of Math. 74 (1961), 456-469.
\bibitem[Ja3]{Ja3}
A.T. James, Distributions of matrix variates and latent roots derived from
normal samples, Ann. Math. Statist. 35 (1964), 475-501.
\bibitem[Jo]{Jones}
V. F. R. Jones, Index for Subfactors, Inv. Math. 72 (1983), 1-25.
\bibitem[M]{MacDonald}
I. G. Macdonald, ``Symmetric Functions and Hall Polynomials,'' Clarendon,
London, 1979.
\bibitem[Sa]{Sagan}
B. Sagan, ``The Symmetric Group,'' Wadsworth \& Brooks/Cole Math. Series,
Pacific Grove, CA, 1991.
\bibitem[S]{Schensted}
C. Schensted, Longest increasing and decreasing subsequences, Canad. J. Math.
13 (1961), 179-191.
\bibitem[Sch]{Schutzenberger}
M.-P. Sch\" utzenberger, La correspondance de Robinson, in ``Combinatoire et
Repr\' esentation du Groupe Sym\' etrique,'' Lecture Notes in Math., Vol. 579,
Springer-Verlag, New York/Berlin, 1977.
\bibitem[Wz]{Wenzl}
H. Wenzl, On the structure of Brauer's centralizer algebras, Ann. of Math.
128 (1988), 173-193.
\bibitem[Wl]{Weyl}
H. Weyl, ``The Classical Groups,'' Princeton Univ. Press, Princeton, NJ, 1939.
\bibitem[Wh]{White}
D. E. White, Some connections between the Littlewood-Richardson rule and the
construction of Schensted, J. Combin. Theory Ser. A 30 (1981), 237-247.
\end{thebibliography}
\end{document}