%Paper: ewp-game/9503003
%From: "Vijay Krishna" <VXK5@PSUVM.PSU.EDU>
%Date: Mon, 13 Mar 95 14:37 EST

%% This document created by Scientific Word (R)
%% Version 2.0
 
 
\documentstyle[amsfonts,12pt,thmsb,sw20lart]{article}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%TCIDATA{TCIstyle=Article/art4.lat,lart,article}
 
\input tcilatex
\begin{document}
 
\author{Vijay Krishna \\
%EndAName
Penn State University \and Tomas Sj\"ostr\"om \\
%EndAName
Harvard University}
\title{ON THE CONVERGENCE OF FICTITIOUS PLAY}
\date{February 20, 1995}
\maketitle
 
\begin{abstract}
We study the continuous time Brown-Robinson fictitious play process for
non-zero sum games. We show that, in general, fictitious play cannot
converge cyclically to a mixed strategy equilibrium in which both players
use more than two pure strategies.
\end{abstract}
 
\section{Introduction}
 
This paper studies the ``fictitious play'' (FP) learning process due to
Brown \cite{Brown} and Robinson \cite{Robinson}. The FP process was
originally proposed as a computational tool for determining the value of a
two-person zero-sum game. However, it can also be interpreted as a learning
process for boundedly rational agents in which each player plays a myopic
best response in each period, on the assumption that the opponent's future
actions will resemble the past.
 
Robinson \cite{Robinson} established the result that the FP process
converges in finite two-person zero-sum games. Miyasawa \cite{Miyasawa}
showed the convergence of FP in $2\times 2$ games. However, the convergence
cannot be guaranteed in general non-zero sum games as an example due to
Shapley \cite{Shapley} shows.
 
We find it convenient to work with a continuous time formulation of
fictitious play (referred to as CFP) rather than the discrete time
formulation proposed by Brown \cite{Brown}. While the convergence results
cited above were for the discrete process, both hold for the continuous time
process also, as does Shapley's counterexample. Our main result (Theorem \ref
{Main}) is that CFP almost never converges cyclically to a mixed strategy
equilibrium in which both players use more than two pure strategies. In a
recent paper, Hofbauer \cite{Hofbauer} has made a related conjecture: if CFP
converges to a regular mixed strategy equilibrium, then the game is zero-sum.
 
As is well-known, the interpretation of mixed strategy equilibria is
problematic (see, for instance, Rubinstein \cite{Rubinstein}). In two person
zero sum games a justification for mixed strategies is that the ``correct''
probabilities provide the best defense against the opponent. But in non-zero
sum games a justification on defensive grounds cannot be made. A point of
view originating with Harsanyi \cite{Harsanyi} takes the position that the
equilibrium probabilities represent only the subjective beliefs of other
players about the behavior of a particular player; thus it is not necessary
to assume that players actually choose randomized strategies. Fictitious
play and associated learning procedures suggest a way in which such beliefs
can form over time by means of a gradual process. However, learning
procedures can serve to justify mixed strategy equilibria only in
circumstances in which the procedures converge to an equilibrium. Our result
shows that, in general non-zero sum games, mixed strategy equilibria cannot
be limits of a fictitious play process and thus are inherently unstable.
 
The behavior of dynamical processes in the presence of mixed equilibria has
previously been examined in a related context by Crawford \cite{Crawford}.
Crawford \cite{Crawford} studies a class of learning procedures in which (a)
players have a finite memory; and (b) play mixed strategies which are
adjusted in response to the difference in payoffs from playing a particular
pure strategy and the mixed strategy against the actual play in the recent
past. Crawford \cite{Crawford} then shows that mixed strategy equilibria are
generally unstable. The procedures considered do not include the CFP; they
are more akin to evolutionary processes like the so called ``replicator
dynamics.''
 
Evolutionary dynamical systems are considered in more detail by Hofbauer and
Sigmund \cite{HofbSigm}. Their results also suggest that mixed strategy
equilibria are unstable in general (asymmetric) bimatrix games (see Section
27.5 of \cite{HofbSigm}).
 
In other, more closely related work, Fudenberg and Kreps \cite{FudenKreps}
study interpretational issues concerning mixed strategies and learning
processes like FP. They propose some alternative systems based on ideas
stemming from Harsanyi's \cite{Harsanyi} purification theorem and derive
convergence results for $2\times 2$ games. Jordan \cite{Jordan} points out
other difficulties in interpreting the convergence of learning processes to
mixed equilibria. In particular, he points out that the convergence concerns
players' expectations and not strategies or payoffs.
 
\section{Fictitious Play}
 
Let $G=(A,B)$ be a two-player{\em \ game} where $A$ and $B$ are $I\times J$
matrices. We will refer to $I=\{1,2,\ldots ,I\}$ and $J=\{1,2,\ldots ,J\}$
as the sets of pure strategies available to players 1 and 2 respectively. As
usual, if player 1 chooses strategy $i$ and player 2 chooses strategy $j$,
the payoff to player 1 is $a_{ij}$ and the payoff to player 2 is $b_{ij}$.
The sets of mixed strategies are denoted by $\Delta (I)$ and $\Delta (J)$
respectively. Let $\delta _i\in \Delta (I)$ be the mixed strategy that
assigns weight 1 to $i$. We will identify $i$ with $\delta _i$ and write $%
i\in \Delta (I)$ instead of $\delta _i\in \Delta (I)$.
 
For all $q\in \Delta (J)$, let $BR(q)$ be the set of {\em pure strategy best
responses} for player{\it \ }1 and denote by $\limfunc{supp}q=\left\{
j:q_j>0\right\} $ the {\em support} of $q$. The mixed strategy pair $%
(p^{*},q^{*})$ is a {\em Nash equilibrium} if $\limfunc{supp}p^{*}\subseteq
BR(q^{*})$ and $\limfunc{supp}q^{*}\subseteq BR(p^{*})$.
 
\begin{definition}
For $t=0,1,2,...,$ the sequence $(p(t),q(t))$ is a{\em \ discrete time
fictitious play process (DFP)}{\it \ }if 
\[
(p(0),q(0))\in \Delta (I)\times \Delta (J);
\]
and for all $t\geq 0$, 
\[
p(t+1)=\frac{tp(t)+i(t)}{t+1},\,\;q(t+1)=\frac{tq(t)+j(t)}{t+1}
\]
where $i\left( t\right) \in BR\left( q\left( t\right) \right) $ and $j\left(
t\right) \in BR\left( p\left( t\right) \right) .$\medskip\ 
\end{definition}
 
Thus, $p(t+1)$ is a weighted average of $p(t)$ and $i\left( t\right) $ where
the weights are $\frac t{t+1}$ and $\frac 1{t+1}$. New strategies are chosen
in each ``period.'' Now suppose $\delta >0$ is the time between adjustments.
Replacing the weights by $\frac t{t+\delta }$ and $\frac \delta {t+\delta },$
we get 
\[
p(t+\delta )=\frac{tp(t)+\delta i\left( t\right) }{t+\delta } 
\]
 
As $\delta \rightarrow 0$, we obtain 
\[
\frac{dp(t)}{dt}=\frac{i\left( t\right) -p(t)}t 
\]
 
This is not defined for $t=0$, so the continuous time version should start
at some $t_0>0,$ say $t_0=1$. This leads to the following definition:
 
\begin{definition}
For $t\geq 1,$ the path $(p(t),q(t))$ is a {\em continuous time fictitious
play process (CFP)} if 
\[
(p(1),q(1))\in \Delta (I)\times \Delta (J); 
\]
and 
\[
\frac{dp(t)}{dt}=\frac{i\left( t\right) -p(t)}t,\,\,\frac{dq(t)}{dt}=\frac{%
j\left( t\right) -q(t)}t 
\]
where $i\left( t\right) \in BR\left( q\left( t\right) \right) $ and $j\left(
t\right) \in BR\left( p\left( t\right) \right) .$\medskip
\end{definition}
 
The discrete time fictitious play process (DFP) is also known as the
``Brown-Robinson Learning Process'' (\cite{Brown}, \cite{Robinson}). In this
paper, we find it convenient to work with its continuous time version (CFP).
We hope to explore whether our results continue to hold for the discrete
process (DFP) later.
 
It is well known that if the DFP (or CFP) $(p(t),q(t))$ converges to $%
(p^{*},q^{*}),$ then $(p^{*},q^{*})$ is a Nash equilibrium of $G.$
 
\paragraph{Cyclic Play}
 
Under fictitious play, each player always plays a best response against the
empirical distribution of the opponent's play. Under CFP, therefore, when a
player switches from one pure strategy to another he is precisely
indifferent between these two strategies. This fact is crucial for our
analysis. Let $t_0=1$ and let $(t_1,t_2,t_3,...)$ be the times when some
player switches his/her strategy. (In an exceptional case, both players may
switch at the same instant.) Let 
\[
(i_{t_n},j_{t_n})\equiv (i(t),j(t))\;\;\;\text{for\ \ \ }t\in (t_n,t_{n+1}) 
\]
denote the choices in the interval $(t_n,t_{n+1})$. The interval $%
(t_n,t_{n+1})$ consists of a string of consecutive plays of $%
(i_{t_n},j_{t_n}),$ referred to as a {\em run}. The {\em run-length} is $%
t_{n+1}-t_n$.
 
The {\em sequence of play} is the sequence of pure strategy combinations: 
\[
(i_{t_0},j_{t_0}),(i_{t_1},j_{t_1}),(i_{t_2},j_{t_2}),...,(i_{t_n},j_{t_n}),...

\]
 
The sequence of play is eventually {\em cyclic} if there is $K$ and $N$ such
that $(i_{t_n},j_{t_n})=(i_{t_{n+K}},j_{t_{n+K}})$ for all $n>N$. Cyclic
play has been called ``quasi-periodic'' play by Rosenm\"uller \cite
{Rosenmuller}. If the CFP converges to some Nash equilibrium $(p^{*},q^{*})$
and the sequence of play is eventually a cycle, we will refer to it as {\em %
cyclic convergence}.
 
We wish to alert the reader that cyclic play refers to the fact that pure
strategy combinations are played in a fixed pattern and not that the
trajectory $\left( p\left( t\right) ,q\left( t\right) \right) $ reaches a
limit cycle. As a simple example, note that for ``matching pennies'' the
sequence of play resulting from a CFP may be, for instance, $\left(
H,H\right) ,\left( H,T\right) ,\left( T,T\right) ,\left( T,H\right) $ and is
thus cyclic while the trajectory $\left( p\left( t\right) ,q\left( t\right)
\right) $ converges to the unique Nash equilibrium.
 
\section{The Main Result}
 
Our main result is that it is rare for CFP to converge cyclically to a mixed
strategy equilibrium in which both players use more than two pure strategies.
 
Let $\Gamma $ denote the set of all $I\times J$ games. Each game $G\in
\Gamma $ can be associated with a point in the Euclidean space ${\bf {\Bbb R}%
}^{I\times J}\times {\bf {\Bbb R}}^{I\times J}$. A property $P$ is said to
hold {\em generically} in $\Gamma $ if there is an open and dense subset $%
\Gamma ^{\prime }$ of $\Gamma $ in which the property holds. Similarly, a
property is said to hold for generic initial conditions if there is an open
and dense subset $Z$ of $\Delta \left( I\right) \times \Delta \left(
J\right) $ such that if $\left( p\left( 1\right) ,q\left( 1\right) \right) $
belong to $Z$ the property holds.\bigskip\ 
 
\begin{theorem}
\label{Main}For generic games and initial conditions, if a CFP converges
cyclically to $\left( p^{*},q^{*}\right) $ then $\#\limfunc{supp}p^{*}=\#%
\limfunc{supp}q^{*}\leq 2$. \medskip
\end{theorem}
 
The proof of Theorem \ref{Main} is somewhat involved and so we first present
a brief outline of the argument.
 
\subsection{An Outline of the Proof}
 
Fictitious play (CFP) is a continuous time {\em non-linear} and {\em %
non-autonomous} dynamical system. The first step is to reformulate the
system so that the problem reduces to the study of an associated (discrete) 
{\em linear} and {\em autonomous} system. Once this is done, standard tools
can be brought to bear on the problem. In the second step, these tools are
employed to analyze the linear difference equation system and obtain the
main result.
 
The method we employ is to fix a particular (arbitrary) cycle of play, where
each player uses at least three different pure strategies.\footnote{%
As is well-known, if $(p^{*},q^{*})$ is an equilibrium of a generic game
then $\#\limfunc{supp}p^{*}=\#\limfunc{supp}q^{*}$.} We then show that, for
generic games, if this cycle is played the CFP does not converge. Since
there are only countably many possible cycles, for generic games, cyclic
convergence involves at most two pure strategies for each player.
 
\paragraph{Step 1: Reduction}
 
When the play is cyclic, a sequence of choices 
\[
(i_1,j_1),(i_2,j_2),(i_3,j_3),...,(i_K,j_K)
\]
are repeated over and over in the same order. $K$ consecutive runs
corresponding to the choices $(i_1,j_1),(i_2,j_2),(i_3,j_3),...,(i_K,j_K)$
is a {\em round}. Thus, cyclic play consists of rounds $r=1,2,3,...$ A run
corresponding to the choice $(i_k,j_k)$ is referred to as a {\em k-run}. Let 
$n_k(r)$ denote the length of the $k$-run in round $r$, that is, $n_k(r)$ is
the amount of time spent playing $(i_k,j_k)$ in round $r$. Let $%
n(r)=(n_1(r),n_2(r),\ldots n_K(r))$.
 
We will argue that if the CFP is cyclic, then there exists a $K\times K$
matrix $F$ such that for all $r$%
\begin{equation}  \label{f}
n(r+1)=Fn(r)
\end{equation}
 
Since CFP is completely determined by the associated system determining the
run-lengths, the problem has been reduced to the study of a linear
difference equation. This reduction also appears in Rosenm\"uller \cite
{Rosenmuller}.
 
\paragraph{Step 2: Analysis of $F$}
 
The behavior of the discrete linear dynamical system (\ref{f}) is determined
by Eigen roots of $F$, and in the long run the evolution is determined by
the dominant Eigen root. The crucial fact (Lemma \ref{prod}) is that the
product of the non-zero Eigen roots of $F$ is one. Suppose each player uses
at least three pure strategies in the cycle. We show that generically, not
all Eigen roots of $F$ can have absolute value equal to one. Thus, there
exists an Eigen root $\lambda $ of $F$ such that $\left| \lambda \right| >1$%
. Then, for generic initial conditions, the run-lengths increase
exponentially, as in Shapley's \cite{Shapley} example, and CFP does not
converge.
 
It is important to note that, in this proof, we fix a particular cycle
(where each player uses at least three pure strategies) and show that for
generic games, CFP does not converge along this cycle. But since there are
only countably many possible cycles, generically there is {\em no} cycle
such that CFP will converge.
 
For {\em non-generic} classes of games (such as zero sum games) it may well
happen that {\em all} non-zero Eigen roots of $F$ have absolute value equal
to one, which allows for convergence. We consider this issue in Section \ref
{discussion}.
 
\section{The Determination of the Run-Lengths}
 
We start by assuming that CFP proceeds along a cycle 
\[
\left( (i_1,j_1),(i_2,j_2),(i_3,j_3),...,(i_K,j_K)\right) 
\]
We will argue that there exists a $K\times K$ matrix $F$ (which depends on
the particular cycle and on the payoff matrices) such that for all $r,$%
\begin{equation}
n(r+1)=Fn(r).
\end{equation}
 
Let $(\alpha _1,\alpha _2,\ldots ,\alpha _I)$ denote the $I$ rows of $A$ and
let $(\beta _1,\beta _2,\ldots ,\beta _J)$ denote the $J$ columns of $B$.
Let $P^0$ and $Q^0$ be vectors denoting the total amount of time each player
has used each strategy prior to the start of round $r$. It is convenient to
write $n=n(r)$ and $n^{\prime }=n(r+1).$
 
Define an $I\times K$ matrix $P$ by: 
\begin{equation}
P_{ik}=\left\{ 
\begin{array}{l}
1\text{ if }i_k=i \\ 
0\text{ if }i_k\neq i
\end{array}
\right.  \label{defP}
\end{equation}
and a $J\times K$ matrix $Q$ by: 
\begin{equation}
Q_{jk}=\left\{ 
\begin{array}{l}
1\text{ if }j_k=j \\ 
0\text{ if }j_k\neq j
\end{array}
\right.  \label{defQ}
\end{equation}
 
Observe that $(Pn)_i$ is the amount of time player 1 played strategy $i$ in
round $r$ and $(Qn)_j$ is the amount of time player 2 played strategy $j$ in
round $r$. Notice also that $\alpha _{i_k}Q=\left(
a_{i_kj_1},a_{i_kj_2},...,a_{i_kj_K}\right) $ and $\beta _{j_k}P=\left(
b_{i_1j_k},b_{i_2j_k},...,b_{i_Kj_k}\right) $.
 
Let $e_k$ denote the $k$th $K$-dimensional unit vector. It is convenient to
define the $K\times K$ matrix: 
\begin{equation}  \label{defEk}
E_k=(e_1,e_2,\ldots ,e_k,0,\ldots ,0)
\end{equation}
whose first $k$ columns are the first $k$ unit vectors and the last $(K-k)$
columns are $0$. By definition, $E_K=I$, the identity matrix. We also have $%
E_kn=\left( n_1,n_2,...,n_k,0,...,0\right) $.
 
\paragraph{Round $r$ Equations}
 
Under CFP, when a player switches from one pure strategy to another he is
precisely indifferent between these two strategies. Using this fact, we find
that the players switch from $(i_1,j_1)$ to $(i_2,j_2)$ in round $r$ when: 
\begin{equation}
\alpha _{i_2}Q^0+a_{i_2j_1}n_1=\alpha _{i_1}Q^0+a_{i_1j_1}n_1
\end{equation}
and 
\begin{equation}
\beta _{j_2}P^0+b_{i_1j_2}n_1=\beta _{j_1}P^0+b_{i_1j_1}n_1
\end{equation}
 
It is convenient to rewrite these as: 
\begin{equation}  \label{sw1.1}
(\alpha _{i_2}-\alpha _{i_1})QE_1n=-(\alpha _{i_2}-\alpha _{i_1})Q^0
\end{equation}
and 
\begin{equation}  \label{sw1.2}
(\beta _{j_2}-\beta _{j_1})PE_1n=-(\beta _{j_2}-\beta _{j_1})P^0
\end{equation}
 
Typically, of course, only one of the two players will switch strategies in
the transition from $(i_1,j_1)$ to $(i_2,j_2)$, and thus only one of
equations (\ref{sw1.1}) or (\ref{sw1.2}) will be non-trivial. For instance,
if only player 1 switches strategies, that is, if $i_2\neq i_1$ but $j_2=j_1$%
, then (\ref{sw1.2}) is trivially satisfied and hence redundant.
 
In general, for $k=1,2,...,K$, when the players switch from $(i_k,j_k)$ to $%
(i_{k+1},j_{k+1})$ we have: 
\begin{equation}
\alpha _{i_{k+1}}Q^0+\sum_{s=1}^ka_{i_{k+1}j_s}n_s=\alpha
_{i_k}Q^0+\sum_{s=1}^ka_{i_kj_s}n_s  \label{k}
\end{equation}
and 
\begin{equation}
\beta _{j_{k+1}}P^0+\sum_{s=1}^kb_{i_sj_{k+1}}n_s=\beta
_{j_k}P^0+\sum_{s=1}^kb_{i_sj_k}n_s
\end{equation}
which can be rewritten as: 
\begin{equation}
(\alpha _{i_{k+1}}-\alpha _{i_k})QE_kn=-(\alpha _{i_{k+1}}-\alpha _{i_k})Q^0
\label{swk.1}
\end{equation}
and 
\begin{equation}
(\beta _{j_{k+1}}-\beta _{j_k})PE_kn=-(\beta _{j_{k+1}}-\beta _{j_k})P^0
\label{swk.2}
\end{equation}
where we always write $K+1\equiv 1.$
 
\paragraph{Round ($r$+1) Equations}
 
By the earlier arguments, for $k=1,2,...,K,$ when the players switch from $%
(i_k,j_k)$ to $(i_{k+1},j_{k+1})$ in round $r+1$ we have: 
\begin{equation}  \label{swk'.1}
(\alpha _{i_{k+1}}-\alpha _{i_k})QE_kn^{\prime }=-(\alpha _{i_{k+1}}-\alpha
_{i_k})Qn-(\alpha _{i_{k+1}}-\alpha _{i_k})Q^0
\end{equation}
and 
\begin{equation}  \label{swk'.2}
(\beta _{j_{k+1}}-\beta _{j_k})PE_kn^{\prime }=-(\beta _{j_{k+1}}-\beta
_{j_k})Pn-(\beta _{j_{k+1}}-\beta _{j_k})P^0
\end{equation}
 
\paragraph{The Basic Difference Equation}
 
By substituting (\ref{swk.1}) and (\ref{swk.2}) into (\ref{swk'.1}) and (\ref
{swk'.2}) respectively, we obtain for $k=1,2,...,K:$ 
\begin{equation}
(\alpha _{i_{k+1}}-\alpha _{i_k})QE_kn^{\prime }=-(\alpha _{i_{k+1}}-\alpha
_{i_k})Q(I-E_k)n  \label{rowk.1}
\end{equation}
\begin{equation}
(\beta _{j_{k+1}}-\beta _{j_k})PE_kn^{\prime }=-(\beta _{j_{k+1}}-\beta
_{j_k})P(I-E_k)n  \label{rowk.2}
\end{equation}
where 
\[
I-E_k=\left[ 0,0,...,e_{k+1},e_{k+2},...,e_K\right] . 
\]
 
\begin{description}
\item[Assumption ]  {\em The game }$(A,B)${\em \ lies in an open and dense
subset of }$\Gamma ${\em \ such that for all }$k,${\em \ in the switch from }%
$(i_k,j_k)${\em \ to }$(i_{k+1},j_{k+1})${\em \ only one player switches
strategies.}
\end{description}
 
Let $\sigma (k)\in \{1,2\}$ denote the player who switches after $k$: 
\begin{equation}
\sigma (k)=\left\{ 
\begin{array}{c}
1\text{ if }i_k\neq i_{k+1} \\ 
2\text{ if }j_k\neq j_{k+1}
\end{array}
\right.  \label{sigma}
\end{equation}
 
For convenience, we usually assume player one is the first to switch in each
round ($\sigma (1)=1$) and player 2 the last ($\sigma (K)=2$).
 
Consider the system of equations that results when out of equations (\ref
{rowk.1}) and (\ref{rowk.2}), for each $k$, only the $k$th equation
corresponding to the switching player $\sigma (k)$ is considered. This
results in a system of equations of the form 
\begin{equation}  \label{Cn'Dn}
Cn^{\prime }=Dn
\end{equation}
 
The $k$th row of $C$ is 
\begin{equation}
\begin{array}{c}
(\alpha _{i_{k+1}}-\alpha _{i_k})QE_k= \\ 
\left[
a_{i_{k+1}j_1}-a_{i_k\;j_1},a_{i_{k+1}j_2}-a_{i_k%
\;j_2},...,a_{i_{k+1}j_k}-a_{i_k\;j_k},0,0,...,0\right]
\end{array}
\label{apa.1}
\end{equation}
if $\sigma (k)=1$, and is 
\begin{equation}
\begin{array}{c}
(\beta _{j_{k+1}}-\beta _{j_k})PE_k= \\ 
\left[
b_{i_1j_{k+1}}-b_{i_1\;j_k},b_{i_2j_{k+1}}-b_{i_2%
\;j_k},...,b_{i_kj_{k+1}}-b_{i_k\;j_k},0,0,...,0\right]
\end{array}
\label{apa.2}
\end{equation}
if $\sigma (k)=2$. Therefore, $C$ is a {\em lower-triangular} matrix (that
is, for all $k<l,\ c_{kl}=0$).
 
The $k$th row of $D$ is 
\begin{equation}
\begin{array}{c}
-(\alpha _{i_{k+1}}-\alpha _{i_k})Q(I-E_k)= \\ 
-\left[
0,0,...,0,a_{i_{k+1}j_{k+1}}-a_{i_k\;j_{k+1}},a_{i_{k+1}j_{k+2}}-a_{i_k%
\;j_{k+2}},...,a_{i_{k+1}j_K}-a_{i_k\;j_K}\right]
\end{array}
\label{apa.3}
\end{equation}
if $\sigma (k)=1$, and is 
\begin{equation}
\begin{array}{c}
-(\beta _{j_{k+1}}-\beta _{j_k})P(I-E_k)= \\ 
-\left[
0,0,...,0,b_{i_{k+1}j_{k+1}}-b_{i_{k+1}\;j_k},b_{i_{k+2}j_{k+1}}-b_{i_{k+2}%
\;j_k},...,b_{i_Kj_{k+1}}-b_{i_K\;j_k}\right]
\end{array}
\label{apa.4}
\end{equation}
if $\sigma (k)=2$. Thus, $D$ is a {\em strictly upper-triangular} matrix
(that is, for all $k\geq l,\ d_{kl}=0$).
 
The diagonal elements of $C$ are all strictly positive (Monderer and Sella 
\cite{MondSella} call this the ``better response property''). Thus, $C$ is
invertible and we can write 
\begin{equation}
n^{\prime }=C^{-1}Dn  \label{nCDn}
\end{equation}
 
This establishes that the run-lengths are determined by a first-order linear
difference equation. The behavior of the difference equation (\ref{nCDn}) is
determined by the Eigen roots of the matrix $C^{-1}D.$
 
\section{The Eigen Roots of $C^{-1}D$}
 
Notice from (\ref{apa.4}) that the first column of $D$ is $0$. Thus $C^{-1}D$
is singular and $\lambda _0=0$ is an Eigen root of $C^{-1}D.$ Our first
result concerns the non-zero roots of $C^{-1}D.$
 
\begin{lemma}
\label{prod}For generic games, the product of the non-zero Eigen roots of $%
C^{-1}D$ is 1.
\end{lemma}
 
\TeXButton{Proof}{\proof}Write: 
\[
C=\left[ 
\begin{array}{cc}
c_{11} & 0 \\ 
c & \overline{C}
\end{array}
\right] 
\]
where $\overline{C}$ is the triangular $\left( K-1\right) \times \left(
K-1\right) $ submatrix of $C$ in the lower right corner and $c=\left[
c_{21},c_{31},...,c_{K1}\right] ^T$. The diagonal elements of $C$ are all
strictly positive.
 
Similarly, write: 
\[
D=\left[ 
\begin{array}{ll}
0 & d \\ 
0 & \overline{D}
\end{array}
\right] 
\]
where $\overline{D}$ is the $\left( K-1\right) \times \left( K-1\right) $
submatrix of $D$ in the lower right corner and $d=\left[
d_{12},d_{13},...,d_{1K}\right] $. Now: 
\begin{equation}
C^{-1}D=\left[ 
\begin{array}{cc}
0 & \frac 1{c_{11}}d \\ 
&  \\ 
0 & \overline{C}^{-1}\left( \overline{D}-\frac 1{c_{11}}cd\right)
\end{array}
\right]  \label{CD}
\end{equation}
 
Let $I_k$ denote the $k\times k$ identity matrix. The characteristic
polynomial of $C^{-1}D$ is: 
\begin{eqnarray}
0 &=&\det \left( \lambda I_K-C^{-1}D\right)  \nonumber \\
&=&\det \left[ 
\begin{array}{cc}
\lambda & -\frac 1{c_{11}}d \\ 
&  \\ 
0 & \lambda I_{K-1}-\overline{C}^{-1}\left( \overline{D}-\frac
1{c_{11}}cd\right)
\end{array}
\right]  \nonumber \\
\ &=&\lambda \times \det \left( \lambda I_{K-1}-\overline{C}^{-1}\left( 
\overline{D}-\frac 1{c_{11}}cd\right) \right)  \label{poly}
\end{eqnarray}
using (\ref{CD}).
 
\begin{description}
\item[Claim:] 
\begin{equation}
\det \left( \overline{C}^{-1}\left( \overline{D}-\frac 1{c_{11}}cd\right)
\right) =1  \label{det}
\end{equation}
or equivalently: $\det \left( \overline{D}-\frac 1{c_{11}}cd\right) =\det 
\overline{C}$.
\end{description}
 
{\it Proof of claim.} Observe that 
\[
\overline{D}-\frac 1{c_{11}}cd 
\]
\[
=\left[ 
\begin{array}{ccccc}
0 & d_{23} & d_{24} &  & d_{2K} \\ 
0 & 0 & d_{34} & \cdots & d_{3K} \\ 
0 & 0 & 0 &  & d_{4K} \\ 
& \vdots &  & \ddots &  \\ 
0 & 0 & 0 & \cdots & 0
\end{array}
\right] -\frac 1{c_{11}}\left[ 
\begin{array}{ccccc}
c_{21}d_{12} & c_{21}d_{13} & c_{21}d_{14} &  & c_{21}d_{1K} \\ 
c_{31}d_{12} & c_{31}d_{13} & c_{31}d_{14} & \cdots & c_{31}d_{1K} \\ 
c_{41}d_{12} & c_{41}d_{13} & c_{41}d_{14} &  & c_{41}d_{1K} \\ 
& \vdots &  & \ddots &  \\ 
c_{K1}d_{12} & c_{K1}d_{13} & c_{K1}d_{14} & \cdots & c_{K1}d_{1K}
\end{array}
\right] 
\]
 
By repeated use of the rule that if a column of a matrix is the sum of two
column vectors then the determinant is the sum of two determinants, we
obtain: 
\[
\det \left( \overline{D}-\frac 1{c_{11}}cd\right) =\frac 1{c_{11}}\det
\left[ 
\begin{array}{ccccc}
-c_{21}d_{12} & d_{23} & d_{24} &  & d_{2K} \\ 
-c_{31}d_{12} & 0 & d_{34} & \cdots  & d_{3K} \\ 
-c_{41}d_{12} & 0 & 0 &  & d_{4K} \\ 
& \vdots  &  & \ddots  &  \\ 
-c_{K1}d_{12} & 0 & 0 & \cdots  & 0
\end{array}
\right] 
\]
Evaluating the determinant by expanding along the last row yields: 
\[
\det \left( \overline{D}-\frac 1{c_{11}}cd\right) =\left( -1\right)
^{K+1}\frac 1{c_{11}}c_{K1}d_{12}d_{23}d_{34}...d_{K-1},_K
\]
But now recognize that from equations (\ref{apa.1}) to (\ref{apa.4}), for
all $k,$ $d_{k,k+1}=-c_{kk}$ and $c_{K1}=c_{KK}$. (Recall that if $\sigma
(k)=1$ then $j_k=j_{k+1}$ and if $\sigma (k)=2$ then $i_k=i_{k+1}$.) Thus: 
\begin{eqnarray}
\det \left( \overline{D}-\frac 1{c_{11}}cd\right)  &=&\left( -1\right)
^{K+1}\frac 1{c_{11}}c_{K1}\left( -c_{11}\right) \left( -c_{22}\right)
\left( -c_{33}\right) ...\left( -c_{K-1},_{K-1}\right)   \nonumber
\label{detD} \\
&=&\left( -1\right) ^{2K}c_{22}c_{33}...c_{K-1},_{K-1}c_{KK}  \nonumber \\
&=&\det \overline{C}  \label{detD}
\end{eqnarray}
 
This establishes the claim.
 
By (\ref{poly}), $\lambda \neq 0$ is an Eigen root of $C^{-1}D$ if and only
if it is an Eigen root of $\overline{C}^{-1}\left( \overline{D}-\frac
1{c_{11}}cd\right) $. Thus, the claim implies that the product of the
non-zero roots of $C^{-1}D$ is one. \TeXButton{End Proof}{\endproof}
 
\section{Unit Roots of\ $C^{-1}D$}
 
Observe that the Eigen roots of $C^{-1}D$ are determined by the solutions to
the equation: 
\begin{equation}
\lambda x=C^{-1}Dx  \label{chareq}
\end{equation}
where $x\neq 0$ is an Eigen vector, which are the same as the solutions to: 
\begin{equation}
(\lambda C-D)x=0  \label{lcd}
\end{equation}
 
Suppose player 1 plays strategy $i_{k_0}$ in run $k_0$, then plays some
other strategies (but not $i_{k_0}$) for a while, and then returns to
playing $i_{k_1}=i_{k_0}$ in run $k_1$ in the same round. That is, $%
k_0<k_1-1 $, $i_{k_0}=i_{k_1}$, and $i_k\neq i_{k_0}$ for all $k$ such that $%
k_0<k<k_1$. If this happens we say that a {\em reversion} to strategy $%
i_{k_0}$ occurs in run $k_1$. Suppose that there are $\rho _i$ reversions
for player $i$ in each round. Let $\rho =\rho _1+\rho _2$. Notice that there
is always at least one reversion for each player, since $K+1\equiv 1$ by
definition and hence $\rho \geq 2.$
 
Suppose the matrix $C^{-1}D$ has $S$ distinct Eigen roots $\lambda
_1,\lambda _2,...,\lambda _S$ so that we can write the characteristic
polynomial of $C^{-1}D$ in the form: 
\[
\left| \lambda I-C^{-1}D\right| =\prod_{s=1}^S(\lambda _s-\lambda )^{\alpha
_s}=0 
\]
 
The number $\alpha _s$ is called the {\em algebraic multiplicity} of the
root $\lambda _s.$
 
\begin{lemma}
\label{algeb}For generic games, if each player uses at least three different
pure strategies in the cycle, the algebraic multiplicity of the unit root of 
$C^{-1}D$ is $\rho .$
\end{lemma}
 
\TeXButton{Proof}{\proof}The proof is by induction on $\rho ,$ the number of
reversions in the cycle of play.
 
\paragraph{Initial Step $\left( \rho =2\right) $}
 
In this step we show that, generically, if $\rho =2$ then the algebraic
multiplicity of the unit root is two. It is useful to initially consider the 
{\em simple cycle} where the players alternate in switching strategies. That
is, for all $k,$ $\sigma (k)\neq \sigma (k+1)$.
 
We may assume without loss of generality that $\sigma (k)=1$ if and only if $%
k$ is odd, and perhaps after a relabeling of strategies, write the cycle as 
\begin{eqnarray}
&&\left\langle
(i_1,j_1),(i_2,j_2),(i_3,j_3),(i_4,j_4),...,(i_K,j_K)\right\rangle
\label{hyll} \\
&=&\left\langle (1,1),(2,1),(2,2),(3,2),...,(\kappa ,\kappa ),(1,\kappa
)\right\rangle  \nonumber
\end{eqnarray}
where by assumption the number of pure strategies used by each player is $%
\kappa =\frac K2$ $>2$. In this case, $(i_k,j_k)=\left( \frac{k+1}2,\frac{k+1%
}2\right) $ if $k$ is odd, $(i_k,j_k)=\left( \frac k2+1,\frac k2\right) $ if 
$k$ is even. Call such a cycle {\em simple}. For a simple cycle we have: 
\[
\begin{array}{c}
C= \\ 
\\ 
\left[ 
\begin{array}{cccccccc}
a_{21}-a_{11} & 0 & 0 & \cdots & 0 & \cdots & 0 & 0 \\ 
b_{12}-b_{11} & b_{22}-b_{21} & 0 &  & 0 &  & 0 & 0 \\ 
a_{31}-a_{21} & a_{31}-a_{21} & a_{32}-a_{22} &  & 0 &  & 0 & 0 \\ 
b_{13}-b_{12} & b_{23}-b_{22} & b_{23}-b_{22} &  & 0 &  & 0 & 0 \\ 
\vdots &  &  & \ddots & \vdots & \ddots &  & \vdots \\ 
b_{1\kappa }-b_{1\kappa -1} & b_{2\kappa }-b_{2\kappa -1} & b_{2\kappa
}-b_{2\kappa -1} &  & b_{i_k\kappa }-b_{i_k\kappa -1} &  & 0 & 0 \\ 
a_{11}-a_{\kappa 1} & a_{11}-a_{\kappa 1} & a_{12}-a_{\kappa 2} &  & 
a_{1j_k}-a_{\kappa j_k} &  & a_{1\kappa }-a_{\kappa \kappa } & 0 \\ 
b_{11}-b_{1\kappa } & b_{21}-b_{2\kappa } & b_{21}-b_{2\kappa } & \cdots & 
b_{i_k1}-b_{i_k\kappa } & \cdots & b_{\kappa 1}-b_{\kappa \kappa } & 
b_{11}-b_{1\kappa }
\end{array}
\right]
\end{array}
\]
and 
\[
\begin{array}{c}
-D= \\ 
\\ 
\left[ 
\begin{array}{cccccccc}
0 & a_{21}-a_{11} & a_{22}-a_{12} & \cdots & a_{2j_k}-a_{1\;j_k} & \cdots & 
a_{2\kappa }-a_{1\kappa } & a_{2\kappa }-a_{1\kappa } \\ 
0 & 0 & b_{22}-b_{21} &  & b_{i_k2}-b_{i_k1} &  & b_{\kappa 2}-b_{\kappa 1}
& b_{12}-b_{11} \\ 
0 & 0 & 0 &  & a_{3j_k}-a_{2j_k} &  & a_{3\kappa }-a_{2\kappa } & a_{3\kappa
}-a_{2\kappa } \\ 
0 & 0 & 0 &  & b_{i_k3}-b_{i_k2} &  & b_{\kappa 3}-b_{\kappa 2} & 
b_{13}-b_{12} \\ 
\vdots &  &  & \ddots & \vdots & \ddots &  & \vdots \\ 
0 & 0 & 0 &  & 0 &  & b_{\kappa \kappa }-b_{\kappa \kappa -1} & b_{1\kappa
}-b_{1\kappa -1} \\ 
0 & 0 & 0 &  & 0 &  & 0 & a_{1\kappa }-a_{\kappa \kappa } \\ 
0 & 0 & 0 & \cdots & 0 & \cdots & 0 & 0
\end{array}
\right]
\end{array}
\]
 
Thus, 
\[
\begin{array}{c}
\left[ \lambda C-D\right] = \\ 
\\ 
\left[ 
\begin{array}{ccccccc}
\lambda \left( a_{21}-a_{11}\right) & a_{21}-a_{11} & a_{22}-a_{12} & \cdots
& a_{2j_k}-a_{1\;j_k} & \cdots & a_{2\kappa }-a_{1\kappa } \\ 
\lambda \left( b_{12}-b_{11}\right) & \lambda \left( b_{22}-b_{21}\right) & 
b_{22}-b_{21} &  & b_{i_k2}-b_{i_k1} &  & b_{12}-b_{11} \\ 
\lambda \left( a_{31}-a_{21}\right) & \lambda \left( a_{31}-a_{21}\right) & 
\lambda \left( a_{32}-a_{22}\right) &  & a_{3j_k}-a_{2j_k} &  & a_{3\kappa
}-a_{2\kappa } \\ 
\lambda \left( b_{13}-b_{12}\right) & \lambda \left( b_{23}-b_{22}\right) & 
\lambda \left( b_{23}-b_{22}\right) &  & b_{i_k3}-b_{i_k2} &  & b_{13}-b_{12}
\\ 
\vdots &  &  & \ddots & \vdots & \ddots & \vdots \\ 
\lambda \left( b_{1\kappa }-b_{1\kappa -1}\right) & \lambda \left(
b_{2\kappa }-b_{2\kappa -1}\right) & \lambda \left( b_{2\kappa }-b_{2\kappa
-1}\right) &  & \lambda \left( b_{i_k\kappa }-b_{i_k\kappa -1}\right) &  & 
b_{1\kappa }-b_{1\kappa -1} \\ 
\lambda \left( a_{11}-a_{\kappa 1}\right) & \lambda \left( a_{11}-a_{\kappa
1}\right) & \lambda \left( a_{12}-a_{\kappa 2}\right) &  & \lambda \left(
a_{1j_k}-a_{\kappa j_k}\right) &  & a_{1\kappa }-a_{\kappa \kappa } \\ 
\lambda \left( b_{11}-b_{1\kappa }\right) & \lambda \left( b_{21}-b_{2\kappa
}\right) & \lambda \left( b_{21}-b_{2\kappa }\right) & \cdots & \lambda
\left( b_{i_k1}-b_{i_k\kappa }\right) & \cdots & \lambda \left(
b_{11}-b_{1\kappa }\right)
\end{array}
\right]
\end{array}
\]
 
Observe that for this matrix: 
\begin{eqnarray*}
&&\ \text{row}\ 1+\text{row}\ 3+\text{row }5+...+\text{row }K-1 \\
\ &=&{\em \ }\text{{\em $\left( 0,\left( 1-\lambda \right) \left(
a_{21}-a_{11}\right) ,\left( 1-\lambda \right) \left( a_{22}-a_{12}\right)
,...,(1-\lambda )\left( a_{i_kj_k}-a_{1j_k}\right) ,..,0\right) $}}
\end{eqnarray*}
and 
\begin{eqnarray*}
&&\ \text{row}\ 2+\text{row}\ 4+\text{row }6+...+\text{row }K \\
\ &=&\text{{\em $\left( 0,0,\left( 1-\lambda \right) \left(
b_{22}-b_{21}\right) ,...,(1-\lambda )\left( b_{i_kj_k}-b_{i_k1}\right)
,...,\left( 1-\lambda \right) \left( b_{1\kappa }-b_{11}\right) \right) $}}
\end{eqnarray*}
 
Therefore, we can add the odd-numbered rows to row 1 and the even-numbered
rows to row 2 to obtain 
\[
\begin{array}{c}
\left| \lambda C-D\right| = \\ 
\\ 
\left| 
\begin{array}{ccccccc}
0 & 
\begin{array}{c}
\left( 1-\lambda \right) \times \\ 
\left( a_{21}-a_{11}\right)
\end{array}
& \cdots & 
\begin{array}{c}
\left( 1-\lambda \right) \times \\ 
\left( a_{i_kj_k}-a_{1j_k}\right)
\end{array}
& \cdots & 
\begin{array}{c}
\left( 1-\lambda \right) \times \\ 
\left( a_{\kappa \kappa }-a_{1\kappa }\right)
\end{array}
& 0 \\ 
&  &  &  &  &  &  \\ 
0 & 0 &  & 
\begin{array}{c}
\left( 1-\lambda \right) \times \\ 
\left( b_{i_kj_k}-b_{i_k1}\right)
\end{array}
&  & 
\begin{array}{c}
\left( 1-\lambda \right) \times \\ 
\left( b_{\kappa \kappa }-b_{\kappa 1}\right)
\end{array}
& 
\begin{array}{c}
\left( 1-\lambda \right) \times \\ 
\left( b_{1\kappa }-b_{11}\right)
\end{array}
\\ 
&  &  &  &  &  &  \\ 
\lambda \left( a_{31}-a_{21}\right) & \lambda \left( a_{31}-a_{21}\right) & 
& a_{3j_k}-a_{2j_k} &  & a_{3\kappa }-a_{2\kappa } & a_{3\kappa }-a_{2\kappa
} \\ 
\lambda \left( b_{13}-b_{12}\right) & \lambda \left( b_{23}-b_{22}\right) & 
\cdots & b_{i_k3}-b_{i_k2} & \cdots & b_{\kappa 3}-b_{\kappa 2} & 
b_{13}-b_{12} \\ 
\vdots & \vdots & \ddots & \vdots & \ddots &  & \vdots \\ 
\lambda \left( b_{1\kappa }-b_{1\kappa -1}\right) & \lambda \left(
b_{2\kappa }-b_{2\kappa -1}\right) & \cdots & \lambda \left( b_{i_k\kappa
}-b_{i_k\kappa -1}\right) & \cdots & b_{\kappa \kappa }-b_{\kappa \kappa -1}
& b_{1\kappa }-b_{1\kappa -1} \\ 
\lambda \left( a_{11}-a_{\kappa 1}\right) & \lambda \left( a_{11}-a_{\kappa
1}\right) &  & \lambda \left( a_{1j_k}-a_{\kappa j_k}\right) &  & \lambda
\left( a_{1\kappa }-a_{\kappa \kappa }\right) & a_{1\kappa }-a_{\kappa
\kappa } \\ 
\lambda \left( b_{11}-b_{1\kappa }\right) & \lambda \left( b_{21}-b_{2\kappa
}\right) & \cdots & \lambda \left( b_{i_k1}-b_{i_k\kappa }\right) & \cdots & 
\lambda \left( b_{\kappa 1}-b_{\kappa \kappa }\right) & \lambda \left(
b_{11}-b_{1\kappa }\right)
\end{array}
\right|
\end{array}
\]
\[
=\lambda \left( 1-\lambda \right) ^2\times \left| L(\lambda )\right| 
\]
where 
\[
\begin{array}{c}
\left| L(\lambda )\right| \equiv \\ 
\\ 
\left| 
\begin{array}{ccccccc}
0 & a_{21}-a_{11} & \cdots & a_{i_kj_k}-a_{1j_k} & \cdots & a_{\kappa \kappa
}-a_{1\kappa } & 0 \\ 
0 & 0 &  & b_{i_kj_k}-b_{i_k1} &  & b_{\kappa \kappa }-b_{\kappa 1} & 
b_{1\kappa }-b_{11} \\ 
a_{31}-a_{21} & \lambda \left( a_{31}-a_{21}\right) &  & a_{3j_k}-a_{2j_k} & 
& a_{3\kappa }-a_{2\kappa } & a_{3\kappa }-a_{2\kappa } \\ 
b_{13}-b_{12} & \lambda \left( b_{23}-b_{22}\right) &  & b_{i_k3}-b_{i_k2} & 
& b_{\kappa 3}-b_{\kappa 2} & b_{13}-b_{12} \\ 
\vdots &  & \ddots & \vdots & \ddots &  & \vdots \\ 
b_{1\kappa }-b_{1\kappa -1} & \lambda \left( b_{2\kappa }-b_{2\kappa
-1}\right) &  & \lambda \left( b_{i_k\kappa }-b_{i_k\kappa -1}\right) &  & 
b_{\kappa \kappa }-b_{\kappa \kappa -1} & b_{1\kappa }-b_{1\kappa -1} \\ 
a_{11}-a_{\kappa 1} & \lambda \left( a_{11}-a_{\kappa 1}\right) & \cdots & 
\lambda \left( a_{1j_k}-a_{\kappa j_k}\right) & \cdots & \lambda \left(
a_{1\kappa }-a_{\kappa \kappa }\right) & a_{1\kappa }-a_{\kappa \kappa } \\ 
b_{11}-b_{1\kappa } & \lambda \left( b_{21}-b_{2\kappa }\right) &  & \lambda
\left( b_{i_k1}-b_{i_k\kappa }\right) &  & \lambda \left( b_{\kappa
1}-b_{\kappa \kappa }\right) & \lambda \left( b_{11}-b_{1\kappa }\right)
\end{array}
\right|
\end{array}
\]
\smallskip\ 
 
Our claim is that for generic games 
\[
\left| L(1)\right| \neq 0 
\]
and this implies that there does not exist a third unit root. Since $\left|
L(1)\right| $ is a polynomial function of the payoffs $(a_{ij},b_{ij}),$ $%
\left| L(1)\right| =0$ on an open set in ${\bf {\Bbb R}}^{\kappa ^2}\times 
{\bf {\Bbb R}}^{\kappa ^2}$ if and only if $\left| L(1)\right| =0\,$
identically on ${\bf {\Bbb R}}^{\kappa ^2}\times {\bf {\Bbb R}}^{\kappa ^2}.$
But this is false since if $A$ and $B$ are defined by 
\begin{equation}
a_{ij}=\left\{ 
\begin{array}{ll}
0 & \text{if }i=j=1 \\ 
1 & \text{if }i=j>1 \\ 
0 & \text{if }i\neq j
\end{array}
\right.  \label{aaaa}
\end{equation}
\begin{equation}
b_{ij}=\left\{ 
\begin{array}{rl}
-1 & \text{if }i=j \\ 
0 & \text{if }i\neq j
\end{array}
\right.  \label{bbbb}
\end{equation}
then we obtain 
\begin{equation}
\begin{array}{c}
\left| L(1)\right| =\left| 
\begin{array}{ccccccccccc}
0 & 0 & 1 & 0 & 1 & 0 & 1 & \cdots & 0 & 1 & 0 \\ 
0 & 0 & -1 & 0 & -1 & 0 & -1 &  & 0 & -1 & 1 \\ 
0 & 0 & -1 & -1 & 1 & 1 & 0 &  & 0 & 0 & 0 \\ 
0 & 1 & 1 & -1 & -1 & 0 & 0 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & -1 & -1 & 1 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 1 & 1 & -1 & -1 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & -1 &  & 0 & 0 & 0 \\ 
\vdots &  &  &  &  &  &  & \ddots &  &  & \vdots \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 1 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & -1 & 1 & 1 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & -1 & -1 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 0 & -1 & -1 \\ 
-1 & 0 & 0 & 0 & 0 & 0 & 0 & \cdots & 1 & 1 & -1
\end{array}
\right| \\ 
\\ 
=-\frac 12(\kappa -2)(\kappa -1)\neq 0
\end{array}
\label{elle}
\end{equation}
recalling that $\kappa >2$. (See Appendix A for the explicit evaluation of
this determinant.)
 
So far we have only considered the simple cycle (\ref{hyll}). However, the
analysis for more complicated switching patterns is similar. A cycle where
players switch several times consecutively results, after some rearrangement
of rows and columns, in a matrix similar to $L\left( 1\right) .$ One then
checks that a determinant similar to (\ref{elle}) is not zero. The somewhat
laborious details can be found in Appendix B.
 
We have thus established that for generic games, when $\rho =2$, the
algebraic multiplicity of the unit root is $2.$
 
\paragraph{The Induction Step}
 
Suppose there exists $\overline{\rho }\geq 2$ such that the statement of
Lemma \ref{algeb} is true for any cycle such that $\rho _1+\rho _2=\overline{%
\rho }$. Now consider an arbitrary cycle $c$ of length $K$ in which the
number of reversals is $\rho _1+\rho _2=\overline{\rho }+1$. Since $%
\overline{\rho }+1\geq 3$, there is a player, say player $1$, who switches
``back'' to some strategy during the cycle. We may, for simplicity, suppose
that this occurs in run $K-1$. Moreover, we can relabel the strategies so
that the cycle $c$ is of the form: 
\[
c=\left\langle \stackunder{1}{11},\;...\;,\;\stackunder{k_0}{\kappa j_{k_0}}%
\;,\;...\;,\;\stackunder{K-2}{\left( \kappa -1\right) \kappa },\;\stackunder{%
K-1}{\kappa \kappa },\stackunder{K}{\;\kappa 1}\right\rangle 
\]
 
The numbers under the strategy labels are the runs; thus player 1 uses
strategy $\kappa \neq 1$ in run $k_0,$ then plays some other strategies, and
then switches back (``reverts'') to playing $\kappa $ again in run $K-1.$%
\footnote{%
For ease of notation, we are assuming that the players alternate in
switching in the last three runs of the cycle (that is, player 1 switches
from $\kappa -1$ to $\kappa $, then player 2 switches from $\kappa $ to $1$,
and finally player 1 switches from $\kappa $ to 1), but it should be clear
that results do not depend on this.}
 
Consider the matrix $C-D$ that corresponds to the cycle $c.$
 
Add column $K-2$ of $C-D$ to column $K$ and subtract column $K-1$ from
column $K.$ Add row $K-2$ to row $K.$ Call the resulting matrix $\widehat{X}.
$
 
For $k\neq K$, the $k$th element in the $K$th column of $\widehat{X}$ is 
\begin{equation}
\left( a_{i_{k+1}\kappa }-a_{i_k\kappa }\right) -\left( a_{i_{k+1}\kappa
}-a_{i_k\kappa }\right) +\left( a_{i_{k+1}1}-a_{i_k1}\right) =\left(
a_{i_{k+1}1}-a_{i_k1}\right)  \label{nya}
\end{equation}
if $\sigma (k)=1$, and 
\begin{equation}
\left( b_{\left( \kappa -1\right) j_{k+1}}-b_{\left( \kappa -1\right)
j_k}\right) -\left( b_{1j_{k+1}}-b_{1j_k}\right) +\left(
b_{1j_{k+1}}-b_{1j_k}\right) =\left( b_{\left( \kappa -1\right)
j_{k+1}}-b_{\left( \kappa -1\right) j_k}\right)  \label{nyb}
\end{equation}
if $\sigma (k)=2$.
 
Similarly, for $k\neq K$, the $k$th element in the $K$th row of $\widehat{X}$
is 
\begin{equation}
\left( a_{1j_k}-a_{\kappa j_k}\right) +\left( a_{\kappa j_k}-a_{\kappa
-1,j_k}\right) =a_{1j_k}-a_{\kappa -1,j_k}  \label{asa}
\end{equation}
 
Finally, delete row $K-2$ and column $K-1$ from $\widehat{X}$. Call the new
matrix $\overline{X}$.
 
Now consider the $K-1$ cycle 
\begin{equation}
\overline{c}=\left\langle \stackunder{1}{11},\;...\;,\;\stackunder{k_0}{%
\kappa j_{k_0}}\;,\;...\;,\;\stackunder{K-2}{\left( \kappa -1\right) \kappa }%
,\;\stackunder{K-1}{\left( \kappa -1\right) 1}\right\rangle  \label{sd}
\end{equation}
which is the same as $c$ except that the sequence $\left( (\kappa -1)\kappa
,\;\kappa \kappa ,\;\kappa 1\right) $ at the end has been replaced by the
shorter sequence $\left( (\kappa -1)\kappa ,\;(\kappa -1)1\right) $. In $%
\overline{c}$ player 1 makes a direct transition from strategy $(\kappa -1)$
to strategy $1$. Otherwise the cycle is as before.
 
Note that the number of reversions in $\overline{c}$ is $\overline{\rho }$.
Let $\overline{C}-\overline{D}$ denote the matrix corresponding to the cycle 
$\bar c.$
 
We now claim that $\overline{X}=\overline{C}-\overline{D}$. Indeed, the
elements in (\ref{nya}) and (\ref{nyb}) are the entries in the last column
of $\overline{X}$, and they correspond to a run where player 1 uses $\kappa
-1$ and player 2 uses $1$. This is precisely the last run in the cycle $\bar
c.$ Similarly, the last row of the matrix $\overline{X}$ corresponds to a
switch from strategy $\kappa -1$ to strategy $1$ by player 1. The other rows
and columns have not been disturbed, and hence $\overline{X}=\overline{C}-%
\overline{D}$.
 
We have shown that we can write 
\begin{equation}
\left| C-D\right| =\left| 
\begin{array}{cc}
\overline{C}-\overline{D} & \gamma \\ 
\alpha & \beta
\end{array}
\right|  \label{DetCD}
\end{equation}
since the operations we have performed on $C-D$ do not affect the
determinant. In (\ref{DetCD}), \thinspace $\overline{C}-\overline{D}$ is the
matrix resulting from the smaller cycle $\bar c$, $\left[ 
\begin{array}{c}
\gamma \\ 
\beta
\end{array}
\right] $ is the $\left( K-1\right) $th column of $\widehat{X}$, and $\left[ 
\begin{array}{cc}
\alpha & \beta
\end{array}
\right] $ is the $(K-2)$th row of $\widehat{X}$. Thus in particular, $\beta
=a_{\kappa \kappa }-a_{\kappa -1,\kappa }$.
 
In the form (\ref{DetCD}), $\gamma $ is a linear combination of the columns
of $\overline{C}-\overline{D}$, and $\alpha $ is a linear combination of the
rows. That is, there exist $\delta $ and $\eta \,$ such that: 
\begin{equation}
\begin{array}{c}
\alpha +\delta \left( \overline{C}-\overline{D}\right) =0 \\ 
\gamma +\left( \overline{C}-\overline{D}\right) \eta =0
\end{array}
\label{alpha1}
\end{equation}
More precisely, we can assume (without loss of generality) that $\sigma
(k_0)=1$. Then $\eta $ is defined by: 
\[
\eta _k=\left\{ 
\begin{array}{cl}
-1 & k=k_0 \\ 
-1 & \text{if }k_0<k\leq K-2,\sigma (k-1)=2\text{ and }\sigma (k)=1 \\ 
1 & \text{if }k_0<k\leq K-2,\sigma (k-1)=1\text{ and }\sigma (k)=2 \\ 
0 & \text{otherwise}
\end{array}
\right. 
\]
and $\delta $ is defined by: 
\[
\delta _k=\left\{ 
\begin{array}{ll}
1 & \text{if }k_0\leq k<K-2\text{ and }\sigma (k)=1 \\ 
0 & \text{otherwise.}
\end{array}
\right. 
\]
 
It can then be checked that (\ref{alpha1}) holds.
 
Now consider the matrix $T(\lambda )$ that results when the row and column
operations described above are performed on $\lambda C-D$ (rather than on $%
C-D$). To be precise, we have 
\[
\begin{array}{c}
\left[ \lambda C-D\right] = \\ 
\\ 
\left[ 
\begin{array}{ccccccc}
\lambda \left( a_{21}-a_{11}\right) & \cdots & a_{2j_k}-a_{1\;j_k} & \cdots
& a_{2\kappa }-a_{1\kappa } & a_{2\kappa }-a_{1\kappa } & a_{21}-a_{11} \\ 
\vdots & \ddots &  & \ddots &  &  & \vdots \\ 
\lambda (a_{\kappa 1}-a_{k-1,1}) &  & \lambda (a_{\kappa j_k}-a_{\kappa
-1\;j_k}) &  & \lambda (a_{\kappa \kappa }-a_{\kappa -1\kappa }) & a_{\kappa
\kappa }-a_{\kappa -1\kappa } & a_{\kappa 1}-a_{\kappa -11} \\ 
\lambda \left( b_{11}-b_{1\kappa }\right) &  & \lambda \left(
b_{i_k1}-b_{i_k\kappa }\right) &  & \lambda (b_{\kappa -1,1}-b_{\kappa
-1,\kappa }) & \lambda (b_{\kappa 1}-b_{\kappa \kappa }) & b_{\kappa
1}-b_{\kappa \kappa } \\ 
\lambda \left( a_{11}-a_{\kappa 1}\right) & \cdots & \lambda \left(
a_{1j_k}-a_{\kappa j_k}\right) & \cdots & \lambda \left( a_{1\kappa
}-a_{\kappa \kappa }\right) & \lambda \left( a_{1\kappa }-a_{\kappa \kappa
}\right) & \lambda (a_{11}-a_{\kappa 1})
\end{array}
\right]
\end{array}
\]
 
The operations are: add column $K-2$ to column $K$ and subtract column $K-1$
from column $K$. Add row $K-2$ to row $K$. Finally interchange row $K-2$ and
row $K,$ and column $K-1$ and column $K$. The result is 
\[
\begin{array}{c}
T(\lambda )= \\ 
\\ 
\left[ 
\begin{array}{cccccc}
\lambda \left( a_{21}-a_{11}\right) & \cdots & a_{2j_k}-a_{1\;j_k} & \cdots
& a_{21}-a_{11} & a_{2\kappa }-a_{1\kappa } \\ 
&  &  &  &  &  \\ 
\vdots & \ddots &  & \ddots & \vdots & \vdots \\ 
\lambda \left( a_{11}-a_{\kappa -1,1}\right) &  & \lambda \left(
a_{1j_k}-a_{\kappa -1j_k}\right) &  & 
\begin{array}{c}
\lambda (a_{11}-a_{\kappa -1\kappa })+ \\ 
\left( 1-\lambda \right) \left( a_{\kappa 1}-a_{\kappa \kappa }\right)
\end{array}
& 
\begin{array}{c}
\lambda \left( a_{1\kappa }-a_{\kappa \kappa }\right) + \\ 
a_{\kappa \kappa }-a_{\kappa -1\kappa }
\end{array}
\\ 
&  &  &  &  &  \\ 
\lambda \left( b_{11}-b_{1\kappa }\right) &  & \lambda \left(
b_{i_k1}-b_{i_k\kappa }\right) &  & 
\begin{array}{c}
(1-\lambda )\left( b_{\kappa 1}-b_{\kappa \kappa }\right) + \\ 
\lambda (b_{\kappa -1,1}-b_{\kappa -1,\kappa })
\end{array}
& \lambda (b_{\kappa 1}-b_{\kappa \kappa }) \\ 
&  &  &  &  &  \\ 
\lambda (a_{\kappa 1}-a_{k-1,1}) & \cdots & \lambda (a_{\kappa
j_k}-a_{\kappa -1\;j_k}) & \cdots & 
\begin{array}{c}
a_{\kappa 1}-a_{\kappa -1,1}+ \\ 
(\lambda -1)(a_{\kappa \kappa }-a_{\kappa -1\kappa })
\end{array}
& a_{\kappa \kappa }-a_{\kappa -1\kappa }
\end{array}
\right]
\end{array}
\]
 
Observe that $\left| T(\lambda )\right| =\left| \lambda C-D\right| .$ From (%
\ref{DetCD}) it follows that: 
\begin{equation}
T(\lambda )=\left[ 
\begin{array}{cc}
\overline{L}_1(\lambda ) & \gamma (\lambda ) \\ 
\alpha (\lambda ) & \beta (\lambda )
\end{array}
\right]  \label{TLambda}
\end{equation}
where $\overline{L}_1(\lambda )$ is a\thinspace $\left( K-1\right) \times
\left( K-1\right) $ matrix with the property that $\overline{L}_1(1)=%
\overline{C}-\overline{D}.$
 
Now add $\delta \left[ 
\begin{array}{cc}
\overline{L}_1(\lambda ) & \gamma (\lambda )
\end{array}
\right] $ to the last row of $T(\lambda )$, and add $\left[ 
\begin{array}{c}
\overline{L}_1(\lambda ) \\ 
\alpha (\lambda )
\end{array}
\right] \eta $ to the last column of $T(\lambda ).$ By (\ref{alpha1}), this
results in a matrix: 
\begin{equation}
T_1(\lambda )=\left[ 
\begin{array}{cc}
\overline{L}_1(\lambda ) & (1-\lambda )\gamma _0 \\ 
(1-\lambda )\alpha _0 & (1-\lambda )\beta _0
\end{array}
\right]  \label{TTLambda}
\end{equation}
where again we have that $\left| T_1(\lambda )\right| =\left| \lambda
C-D\right| $.
 
Consider the form of $\beta _0$. After adding $\delta \left[ 
\begin{array}{cc}
\overline{L}_1(\lambda ) & \gamma (\lambda )
\end{array}
\right] $ to the last row of $T(\lambda )$, the last row is: 
\[
\left( 0,...,0,...,\left( 1-\lambda \right) (a_{i_kj_k}-a_{\kappa
\;j_k}),...,\left( 1-\lambda \right) (a_{\kappa -1\kappa }-a_{\kappa \kappa
}),\left( 1-\lambda \right) (a_{\kappa -1\kappa }-a_{\kappa \kappa
}),0\right) 
\]
where we observe that the $j$th entry is zero if $j\leq k_0$. Multiplying
this row by $\eta $, the result is 
\[
(1-\lambda )\beta _0=(1-\lambda )\sum \eta _k(a_{i_kj_k}-a_{\kappa \;j_k}) 
\]
 
Observe that for generic games, $\beta _0\neq 0$ .
 
We now investigate the unit roots of the matrix $C^{-1}D$. Since 
\[
\left| \lambda I-C^{-1}D\right| =\left| C^{-1}\right| \left| \lambda
C-D\right| 
\]
we can equally well investigate the roots of the polynomial $\left| \lambda
C-D\right| =0$. We can write: 
\begin{equation}
\begin{array}{cl}
\left| \lambda C-D\right| & =\left| T_1(\lambda )\right| \\ 
& =(1-\lambda )\left| L_1(\lambda )\right|
\end{array}
\label{lambdacd}
\end{equation}
where 
\[
L_1(\lambda )=\left[ 
\begin{array}{cc}
\overline{L}_1(\lambda ) & (1-\lambda )\gamma _0 \\ 
\alpha _0 & \beta _0
\end{array}
\right] 
\]
 
Hence there is at least one unit root. If there is another one, then $\left|
L_1(1)\right| =0$ and there exists a vector $y=(\bar y,y_K)$ $\neq 0$ such
that 
\begin{equation}
L_1(1)y=\left[ 
\begin{array}{cc}
\overline{L}_1(1) & 0 \\ 
\alpha _0 & \beta _0
\end{array}
\right] y=0  \label{yyk}
\end{equation}
 
Since $\overline{L}_1(1)=$ $\overline{C}-\overline{D}$, this implies that 
\begin{equation}
\begin{array}{r}
\left( \overline{C}-\overline{D}\right) \bar y=0 \\ 
\alpha _0\bar y+y_K\beta _0=0
\end{array}
\label{y(C-D)}
\end{equation}
 
If $\bar y=0$, then $y_K\neq 0$. Since $\beta _0\neq 0$, this contradicts (%
\ref{y(C-D)}). Thus, $\bar y\neq 0.$ Suppose without loss of generality that 
$\bar y_j=1$ for some $j<K.$
 
Since $L_1(1)y=0$, we have 
\[
L_1(\lambda )y=(1-\lambda )v 
\]
for some vector $v$. Replace the $j$th column of $L_1(\lambda )$ by $%
(1-\lambda )v$ and call the resulting matrix $T_2(\lambda )$. Let 
\[
L_2(\lambda )=\left[ 
\begin{array}{cc}
\overline{L}_2(\lambda ) & (1-\lambda )\gamma _0 \\ 
\alpha (\lambda ) & \beta _0
\end{array}
\right] 
\]
be the matrix that obtains when the $j$th column of $L_1(\lambda )$ is
replaced by $v$. Then 
\begin{equation}
\left| \lambda C-D\right| =(1-\lambda )\left| L_1(\lambda )\right|
=(1-\lambda )\left| T_2(\lambda )\right| =(1-\lambda )^2\left| L_2(\lambda
)\right|  \label{yyy}
\end{equation}
 
Suppose that there is a third unit root. Then $\left| L_2(1)\right| =0$ and
there is $z=\left( \bar z,z_K\right) \neq 0$ such that 
\begin{equation}
L_2(1)z=\left[ 
\begin{array}{cc}
\overline{L}_2(1) & 0 \\ 
\alpha (1) & \beta _0
\end{array}
\right] z=0  \label{zzk}
\end{equation}
 
Again $\beta _0\neq 0\,$implies that $\bar z\neq 0$. Therefore, as in (\ref
{yyy}), we can use $z$ to show that 
\[
\left| \lambda C-D\right| =(1-\lambda )^3\left| L_3(\lambda )\right| 
\]
for some matrix $L_3(\lambda )$. This procedure can be repeated until $%
\left| L_k(1)\right| \neq 0$ for some $k$.
 
Now we note that one unit root resulted directly from (\ref{lambdacd}).
After that each step of the procedure corresponds not only to a unit root of 
$\left| \lambda C-D\right| =0$, but clearly also to a unit root of $\left|
\lambda \overline{C}-\overline{D}\right| =0$, where the matrix $[\lambda 
\overline{C}-\overline{D}]$ comes from the smaller cycle $\bar c$. By the
induction hypothesis, $\overline{C}$ $^{-1}\overline{D}$ has exactly $%
\overline{\rho }$ unit roots. Therefore, we can repeat the argument
precisely $\overline{\rho }$ times. Therefore, 
\[
\left| \lambda C-D\right| =(1-\lambda )^{\overline{\rho }+1}\left| L_{%
\overline{\rho }}(\lambda )\right| 
\]
and $\left| L_{\overline{\rho }}(1)\right| \neq 0$. This completes the
induction step and the proof of Lemma \ref{algeb}. \TeXButton{End Proof}
{\endproof}
 
\section{Non-convergence}
 
We can now complete the proof of Theorem \ref{Main}.
 
\paragraph{Proof of Theorem \ref{Main}}
 
Let the cycle be such that there are $\rho $ reversions. As a result of
Lemma \ref{algeb} we know that for generic games, $\lambda _0=0$ is an Eigen
root of $C^{-1}D$ and there are exactly $\rho $ unit roots: 
\[
\lambda _1=\lambda _2=...=\lambda _\rho =1 
\]
 
There are $K-\rho -1$ remaining Eigen roots. Since $K=2\kappa +\rho -2$, the
number of remaining roots is odd. Since the number of complex roots is
always even, there is an odd number of {\em real} roots. Not all of these
can equal $-1$ since from Lemma \ref{prod} we have, 
\[
\lambda _{\rho +1}\times \lambda _{\rho +2}\times ...\times \lambda _{K-1}=1 
\]
 
This implies that there is a non-zero real root, say $\lambda _{\rho +1},$
such that $\lambda _{\rho +1}\neq 1$ or $-1$ and hence $\left| \lambda
_{\rho +1}\right| \neq 1$. Since the product of all non-zero roots is one,
there exists a root, $\lambda _s$ such that $\left| \lambda _s\right| >1.$
Let $\lambda _S$ be the dominant root, that is, the root with the largest
absolute value. If $\lambda _S$ is either negative or complex, the cycle
cannot persist since run-lengths would become negative. Thus $\lambda _S$
must be real and positive and hence $\lambda _S>1$.
 
The final step of the argument is similar to that used by Shapley \cite
{Shapley}.
 
Consider the (arbitrary) vectors $P^0$ and $Q^0$ that describe the ``initial
conditions'' before the cycle begins. Let $n(0)$ be the vector of run
lengths in the initial round that the cycle is played. We know from (\ref
{swk.1}) and (\ref{swk.2}) that: 
\[
(\alpha _{i_{k+1}}-\alpha _{i_k})QE_kn(0)=-(\alpha _{i_{k+1}}-\alpha
_{i_k})Q^0 
\]
and 
\[
(\beta _{j_{k+1}}-\beta _{j_k})PE_kn(0)=-(\beta _{j_{k+1}}-\beta _{j_k})P^0 
\]
which can be rewritten as: 
\[
n(0)=\left( I-C^{-1}D\right) m\left( 0\right) 
\]
where $m\left( 0\right) \in {\Bbb R}^K$ is a vector determined by the
initial conditions $P^0$ and $Q^0.$
 
Thus: 
\[
n(r)=\left( C^{-1}D\right) ^r\left( I-C^{-1}D\right) m\left( 0\right)
=\left( I-C^{-1}D\right) \left( C^{-1}D\right) ^rm\left( 0\right) 
\]
 
Define $m\left( r\right) \equiv \left( C^{-1}D\right) ^rm\left( 0\right) $.
By the Jordan decomposition theorem (see Hirsch and Smale \cite{Hirsch}) we
can write: 
\[
m\left( r\right) =c_1m_1\left( r\right) +c_2m_2\left( r\right)
+...+c_Sm_S\left( r\right) 
\]
where each $m_s\left( r\right) $ corresponds to a different Eigen roots $%
\lambda _s$ of $C^{-1}D$ and is of the form: 
\[
m_s\left( r\right) =\sum_{i=0}^{k_s}\frac{r^i}{i!}\lambda _s^{r-i}x_{si} 
\]
where each $x_{si}$ is a generalized Eigen vector ($x_{si}\in \ker \left(
\lambda _sI-C^{-1}D\right) ^i$)$.$
 
Then $n\left( r\right) =\left( I-C^{-1}D\right) m\left( r\right) $ is also a
sum of similar terms, one of which is: 
\[
c_S\lambda _S^r\left( I-C^{-1}D\right) x_{S0}\neq 0 
\]
where as above, $\lambda _S$ is the dominant root.
 
Since $\lambda _S>1$ the run-lengths grow exponentially. Hence, as in
Shapley \cite{Shapley}, for generic initial conditions, CFP cannot converge.
 
This completes the proof of Theorem \ref{Main}. \TeXButton{End Proof}
{\endproof}
 
\section{Discussion\label{discussion}}
 
The class of $2\times 2$ games forms an exception to our main result: there
is open set of $2\times 2$ games with a unique equilibrium in mixed
strategies for which every CFP is convergent. However, it can be shown that
every $2\times 2$ game with a unique equilibrium in mixed strategies has the
same best-response correspondence as a zero-sum game. Since CFP depends only
on the best-response correspondence, the $2\times 2$ exception is a
consequence of this equivalence.
 
We now present an example in order to demonstrate that there exist
(non-generic) non-zero sum games and initial conditions for which CFP can
converge to a mixed strategy equilibrium in which more than two strategies
are used.
 
\begin{example}
``Rock-Paper-Scissors'': 
\[
\begin{tabular}{cccc|}
\multicolumn{1}{c|}{} & $R$ & $P$ & \multicolumn{1}{c}{$S$} \\ 
\cline{2-4}\cline{1-1}
\multicolumn{1}{c|}{$R$} & \multicolumn{1}{c|}{$\;0,\;0$} & 
\multicolumn{1}{c|}{$-1,\;\alpha $} & $\;\alpha ,-1$ \\ \cline{2-4}
\multicolumn{1}{c|}{$P$} & \multicolumn{1}{c|}{$\;\alpha ,-1$} & 
\multicolumn{1}{c|}{$\;0,\;0$} & $-1,\;\alpha $ \\ \cline{2-4}
\multicolumn{1}{c|}{$S$} & \multicolumn{1}{c|}{$-1,\;\alpha $} & 
\multicolumn{1}{c|}{$\;\alpha ,-1$} & $\;0,\;0$ \\ \cline{2-4}
\end{tabular}
\]
\end{example}
 
There exists a unique equilibrium which is completely mixed: each player
assigns equal probabilities to each of the three pure strategies.
 
The game is symmetric and hence non-generic. Consider a CFP process with an
initial condition satisfying $p(1)=q(1).$ Notice that the initial condition
is also non-generic.
 
It can be shown that if $\alpha \geq 1,$ any such CFP {\em converges} in a
cyclical manner; both players play the same pure strategy at all times and
follow the cycle: $\left( R,R\right) \rightarrow \left( P,P\right)
\rightarrow \left( S,S\right) .$
 
For this cyclical CFP we have: 
\[
C^{-1}D=\left[ 
\begin{array}{ccc}
0 & -\alpha ^{-1} & 1+\alpha ^{-1} \\ 
0 & -\left( \alpha ^{-1}+\alpha ^{-2}\right) & 1+\alpha ^{-1}+\alpha ^{-2}
\\ 
0 & -\left( \alpha ^{-1}+\alpha ^{-2}+\alpha ^{-3}\right) & 1+\alpha
^{-1}+\alpha ^{-2}+\alpha ^{-3}
\end{array}
\right] 
\]
and the Eigen roots of this matrix are: $0,1$ and $\alpha ^{-3}.$ For $%
\alpha \geq 1,$ the largest root is, therefore, $1.$ This implies that the
run-lengths are, in the limit, constant and the CFP converges.
 
Notice that if $\alpha >1,$ the product of the non-zero roots is {\em less}
than $1$; and thus the conclusion of Lemma \ref{prod} does not hold. This is
because in the cycle given above, both players switch simultaneously. Recall
that the assumption that the players did not switch simultaneously played an
important role in the proof of Lemma \ref{prod}.
 
Thus we have a family of games in which CFP converges cyclically to a mixed
strategy equilibrium with more than two strategies; of course, the class of
games is non-generic, as are the initial conditions.\newpage\ \ 
 
\section{Appendix A}
 
Consider the determinant of the form 
\begin{equation}
D=\left| 
\begin{array}{ccccccccccc}
0 & 0 & \delta _1 & 0 & \delta _2 & 0 & \delta _3 & \cdots & 0 & \delta
_{\kappa -1} & 0 \\ 
0 & 0 & -\delta _1 & 0 & -\delta _2 & 0 & -\delta _3 &  & 0 & -\delta
_{\kappa -1} & \delta _0 \\ 
0 & 0 & -1 & -1 & 1 & 1 & 0 &  & 0 & 0 & 0 \\ 
0 & 1 & 1 & -1 & -1 & 0 & 0 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & -1 & -1 & 1 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 1 & 1 & -1 & -1 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & -1 &  & 0 & 0 & 0 \\ 
\vdots &  &  &  &  &  &  & \ddots &  &  & \vdots \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 1 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & -1 & 1 & 1 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & -1 & -1 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 0 & -1 & -1 \\ 
-1 & 0 & 0 & 0 & 0 & 0 & 0 & \cdots & 1 & 1 & -1
\end{array}
\right|  \label{ellehat}
\end{equation}
where each $\delta _j$ is either zero or one.
 
\begin{lemma}
\label{jujj}$D=\sum_{j=1}^{\kappa -1}j\delta _j-\sum_{j=1}^{\kappa
-1}(\kappa -1)\delta _j$.
\end{lemma}
 
\TeXButton{Proof}{\proof}Multiply the even numbered columns by $(-1)$, add
row 1 to the row 2, and finally expand by the first column (which has only
one non-zero entry). The result is:
 
\[
D=\left| 
\begin{array}{cccccccccc}
0 & \delta _1 & 0 & \delta _2 & 0 & \delta _3 & \cdots & 0 & \delta _{\kappa
-1} & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 &  & 0 & 0 & -1 \\ 
0 & -1 & 1 & 1 & -1 & 0 &  & 0 & 0 & 0 \\ 
-1 & 1 & 1 & -1 & 0 & 0 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & -1 & 1 & 1 &  & 0 & 0 & 0 \\ 
0 & 0 & -1 & 1 & 1 & -1 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & -1 &  & 0 & 0 & 0 \\ 
\vdots &  &  &  &  &  & \ddots &  &  & \vdots \\ 
0 & 0 & 0 & 0 & 0 & 0 &  & -1 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 &  & 1 & 1 & -1 \\ 
0 & 0 & 0 & 0 & 0 & 0 &  & 1 & -1 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & \cdots & 0 & -1 & 1
\end{array}
\right| 
\]
 
To column 1, add all the remaining columns. To column 2, add the all the
columns except column 1, etc. The result is:
 
\[
D=\left| 
\begin{array}{cccccccccc}
\sum_{j=1}^{\kappa -1}\delta _j & \sum_{j=1}^{\kappa -1}\delta _j & 
\sum_{j=2}^{\kappa -1}\delta _j & \sum_{j=2}^{\kappa -1}\delta _j & \cdots & 
\delta _{\kappa -1}+\delta _{\kappa -2} & \delta _{\kappa -1}+\delta
_{\kappa -2} & \delta _{\kappa -1} & \delta _{\kappa -1} & 0 \\ 
-1 & -1 & -1 & -1 &  & -1 & -1 & -1 & -1 & -1 \\ 
0 & 0 & 1 & 0 &  & 0 & 0 & 0 & 0 & 0 \\ 
0 & 1 & 0 & -1 &  & 0 & 0 & 0 & 0 & 0 \\ 
\vdots &  &  &  & \ddots &  &  &  &  & \vdots \\ 
0 & 0 & 0 & 0 &  & 1 & 0 & -1 & 0 & 0 \\ 
0 & 0 & 0 & 0 &  & 0 & -1 & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 &  & 0 & 0 & 1 & 0 & -1 \\ 
0 & 0 & 0 & 0 &  & 0 & 1 & 0 & -1 & 0 \\ 
0 & 0 & 0 & 0 & \cdots & 0 & 0 & 0 & 0 & 1
\end{array}
\right| 
\]
 
Add the second last column to the fourth last, the fourth last to the sixth
last etc. to get 
\[
D=\left| 
\begin{array}{ccccccccc}
\sum_{j=1}^{\kappa -1}\delta _j & \sum_{j=1}^{\kappa -1}j\delta _j & 
\sum_{j=2}^{\kappa -1}\delta _j & \sum_{j=2}^{\kappa -1}(j-1)\delta _j & 
\cdots & 2\delta _{\kappa -1}+\delta _{\kappa -2} & \delta _{\kappa -1} & 
\delta _{\kappa -1} & 0 \\ 
-1 & -(\kappa -1) & -1 & -(\kappa -2) &  & -2 & -1 & -1 & -1 \\ 
0 & 0 & 1 & 0 &  & 0 & 0 & 0 & 0 \\ 
0 & 0 & 0 & -1 &  & 0 & 0 & 0 & 0 \\ 
\vdots &  &  &  & \ddots &  &  &  & \vdots \\ 
0 & 0 & 0 & 0 &  & 0 & -1 & 0 & 0 \\ 
0 & 0 & 0 & 0 &  & -1 & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 &  & 0 & 1 & 0 & -1 \\ 
0 & 0 & 0 & 0 &  & 0 & 0 & -1 & 0 \\ 
0 & 0 & 0 & 0 & \cdots & 0 & 0 & 0 & 1
\end{array}
\right| 
\]
 
Thus, 
\begin{equation}
D=\left| 
\begin{array}{cc}
\sum_{j=1}^{\kappa -1}\delta _j & \sum_{j=1}^{\kappa -1}j\delta _j \\ 
&  \\ 
-1 & -(\kappa -1)
\end{array}
\right| =\sum_{j=1}^{\kappa -1}j\delta _j-\sum_{j=1}^{\kappa -1}(\kappa
-1)\delta _j  \label{d}
\end{equation}
 
\TeXButton{End Proof}{\endproof}\bigskip\ 
 
Since $\delta _j\in \{0,1\}$ by assumption, we have:
 
\begin{corollary}
$D=0$ if and only if $\delta _1=\delta _2=...=\delta _{\kappa -2}=0$.
\end{corollary}
 
Finally, observe that $\left| L(1)\right| $ from (\ref{elle}) is of the form
(\ref{ellehat}) with each $\delta _j=1$. Thus, 
\[
\left| L(1)\right| =\sum_{j=1}^{\kappa -1}j-\sum_{j=1}^{\kappa -1}(\kappa
-1)=\frac{\kappa (\kappa -1)}2-(\kappa -1)^2=-\frac 12(\kappa -1)\left(
\kappa -2\right) 
\]
\newpage\ 
 
\section{Appendix B}
 
In this appendix we consider the case when $\rho _1+\rho _2=2$ but there is
at least one $k$ such that $\sigma (k)=\sigma (k+1)$. That is, at least one
player switches twice in succession. We claim that the algebraic
multiplicity of the unit root of $C^{-1}D\,$ is still 2.
 
Consider first a cycle 
\[
\left\langle 11,21,31,32,33,43,...,\kappa \kappa ,1\kappa \right\rangle 
\]
where player 1 switches twice in succession (from $1$ to $2$ to $3$), after
which player 2 also switches twice in succession (from $1$ to $2$ to $3$).
For this cycle, we have 
\[
\begin{array}{c}
\left[ \lambda C-D\right] = \\ 
\\ 
\left[ 
\begin{array}{cccccc}
\lambda \left( a_{21}-a_{11}\right) & a_{21}-a_{11} & a_{21}-a_{11} & 
a_{22}-a_{12} & \cdots & a_{2\kappa }-a_{1\kappa } \\ 
\lambda \left( a_{31}-a_{21}\right) & \lambda \left( a_{31}-a_{21}\right) & 
a_{31}-a_{21} & a_{32}-a_{22} &  & a_{3\kappa }-a_{2\kappa } \\ 
\lambda \left( b_{12}-b_{11}\right) & \lambda \left( b_{22}-b_{21}\right) & 
\lambda \left( b_{32}-b_{31}\right) & b_{32}-b_{31} &  & b_{12}-b_{11} \\ 
\lambda \left( b_{13}-b_{12}\right) & \lambda \left( b_{23}-b_{22}\right) & 
\lambda \left( b_{33}-b_{32}\right) & \lambda \left( b_{33}-b_{32}\right) & 
& b_{13}-b_{12} \\ 
\lambda (a_{41}-a_{31}) & \lambda (a_{41}-a_{31}) & \lambda (a_{41}-a_{31})
& \lambda (a_{42}-a_{32}) &  &  \\ 
\vdots &  &  & \vdots & \ddots & \vdots \\ 
\lambda \left( b_{1\kappa }-b_{1\kappa -1}\right) & \lambda \left(
b_{2\kappa }-b_{2\kappa -1}\right) & \lambda \left( b_{3\kappa }-b_{3\kappa
-1}\right) & \lambda \left( b_{3\kappa }-b_{3\kappa -1}\right) &  & 
b_{1\kappa }-b_{1\kappa -1} \\ 
\lambda \left( a_{11}-a_{\kappa 1}\right) & \lambda \left( a_{11}-a_{\kappa
1}\right) & \lambda \left( a_{11}-a_{\kappa 1}\right) & \lambda \left(
a_{12}-a_{\kappa 2}\right) &  & a_{1\kappa }-a_{\kappa \kappa } \\ 
\lambda \left( b_{11}-b_{1\kappa }\right) & \lambda \left( b_{21}-b_{2\kappa
}\right) & \lambda \left( b_{31}-b_{3\kappa }\right) & \lambda \left(
b_{31}-b_{3\kappa }\right) & \cdots & \lambda \left( b_{11}-b_{1\kappa
}\right)
\end{array}
\right]
\end{array}
\]
 
If column $3$ of this matrix is replaced by $($column $2-$ column $3+$column 
$4),$ and rows $2$ and $3$ are interchanged, we obtain the matrix: 
\[
\left[ 
\begin{array}{ccccc}
\lambda \left( a_{21}-a_{11}\right) & a_{21}-a_{11} & a_{22}-a_{12} & \cdots
& a_{2\kappa }-a_{1\kappa } \\ 
\lambda \left( b_{12}-b_{11}\right) & \lambda \left( b_{22}-b_{21}\right) & 
\lambda \left( b_{22}-b_{21}\right) +(1-\lambda )\left( b_{32}-b_{31}\right)
&  & b_{12}-b_{11} \\ 
\lambda \left( a_{31}-a_{21}\right) & \lambda \left( a_{31}-a_{21}\right) & 
(\lambda -1)\left( a_{31}-a_{21}\right) +a_{32}-a_{22} &  & a_{3\kappa
}-a_{2\kappa } \\ 
\lambda \left( b_{13}-b_{12}\right) & \lambda \left( b_{23}-b_{22}\right) & 
\lambda \left( b_{23}-b_{22}\right) &  & b_{13}-b_{12} \\ 
\vdots &  & \vdots & \ddots & \vdots \\ 
\lambda \left( b_{1\kappa }-b_{1\kappa -1}\right) & \lambda \left(
b_{2\kappa }-b_{2\kappa -1}\right) & \lambda \left( b_{2\kappa }-b_{2\kappa
-1}\right) &  & b_{1\kappa }-b_{1\kappa -1} \\ 
\lambda \left( a_{11}-a_{\kappa 1}\right) & \lambda \left( a_{11}-a_{\kappa
1}\right) & \lambda \left( a_{12}-a_{\kappa 2}\right) &  & a_{1\kappa
}-a_{\kappa \kappa } \\ 
\lambda \left( b_{11}-b_{1\kappa }\right) & \lambda \left( b_{21}-b_{2\kappa
}\right) & \lambda \left( b_{21}-b_{2\kappa }\right) & \cdots & \lambda
\left( b_{11}-b_{1\kappa }\right)
\end{array}
\right] 
\]
 
For this matrix: 
\begin{eqnarray*}
&&\ \text{row}\ 1+\text{row}\ 3+\text{row }5+...+\text{row }K-1 \\
\ &=&{\em \ }\text{{\em $\left( 0,\left( 1-\lambda \right) \left(
a_{21}-a_{11}\right) ,\left( 1-\lambda \right) \left( a_{32}-a_{12}-\left(
a_{31}-a_{21}\right) \right) ,...,(1-\lambda )\left(
a_{i_kj_k}-a_{1j_k}\right) ,..,0\right) $}}
\end{eqnarray*}
and 
\begin{eqnarray*}
&&\ \text{row}\ 2+\text{row}\ 4+\text{row }6+...+\text{row }K \\
\ &=&\text{{\em $\left( 0,0,\left( 1-\lambda \right) \left(
b_{32}-b_{31}\right) ,...,(1-\lambda )\left( b_{i_kj_k}-b_{i_k1}\right)
,...,\left( 1-\lambda \right) \left( b_{1\kappa }-b_{11}\right) \right) $}}
\end{eqnarray*}
 
Therefore, we can add the odd-numbered rows to row 1 and the even-numbered
rows to row 2 to obtain 
\begin{equation}
\begin{array}{c}
\left| \lambda C-D\right| =\lambda \left( 1-\lambda \right) ^2\times \\ 
\\ 
\left| 
\begin{array}{cccccc}
0 & a_{21}-a_{11} & [\left( a_{32}-a_{12}\right) & \cdots & a_{\kappa \kappa
}-a_{1\kappa } & 0 \\ 
&  & -(a_{31}-a_{21})] &  &  &  \\ 
0 & 0 & b_{32}-b_{31} &  & b_{\kappa \kappa }-b_{\kappa 1} & b_{1\kappa
}-b_{11} \\ 
a_{31}-a_{21} & \lambda \left( a_{31}-a_{21}\right) & [(\lambda -1)\left(
a_{31}-a_{21}\right) &  & a_{3\kappa }-a_{2\kappa } & a_{3\kappa
}-a_{2\kappa } \\ 
&  & +\left( a_{32}-a_{22}\right) ] &  &  &  \\ 
b_{13}-b_{12} & \lambda \left( b_{23}-b_{22}\right) & \lambda \left(
b_{23}-b_{22}\right) &  & b_{\kappa 3}-b_{\kappa 2} & b_{13}-b_{12} \\ 
\vdots &  & \vdots & \ddots &  & \vdots \\ 
b_{1\kappa }-b_{1\kappa -1} & \lambda \left( b_{2\kappa }-b_{2\kappa
-1}\right) & \lambda \left( b_{2\kappa }-b_{2\kappa -1}\right) &  & 
b_{\kappa \kappa }-b_{\kappa \kappa -1} & b_{1\kappa }-b_{1\kappa -1} \\ 
a_{11}-a_{\kappa 1} & \lambda \left( a_{11}-a_{\kappa 1}\right) & \lambda
\left( a_{12}-a_{\kappa 2}\right) &  & \lambda \left( a_{1\kappa }-a_{\kappa
\kappa }\right) & a_{1\kappa }-a_{\kappa \kappa } \\ 
b_{11}-b_{1\kappa } & \lambda \left( b_{21}-b_{2\kappa }\right) & \lambda
\left( b_{21}-b_{2\kappa }\right) & \cdots & \lambda \left( b_{\kappa
1}-b_{\kappa \kappa }\right) & \lambda \left( b_{11}-b_{1\kappa }\right)
\end{array}
\right|
\end{array}
\label{hujj}
\end{equation}
\[
=\lambda \left( 1-\lambda \right) ^2\times \left| M(\lambda )\right| 
\]
 
If we use the payoff matrix given by (\ref{aaaa}) and (\ref{bbbb}), i.e.
 
\[
a_{ij}=\left\{ 
\begin{array}{ll}
0 & \text{if }i=j=1 \\ 
1 & \text{if }i=j>1 \\ 
0 & \text{if }i\neq j
\end{array}
\right. 
\]
\[
b_{ij}=\left\{ 
\begin{array}{rl}
-1 & \text{if }i=j \\ 
0 & \text{if }i\neq j
\end{array}
\right. 
\]
then 
\begin{eqnarray}
\left| M(1)\right| =\left| 
\begin{array}{ccccccccccc}
0 & 0 & 0 & 0 & 1 & 0 & 1 & \cdots & 0 & 1 & 0 \\ 
0 & 0 & 0 & 0 & -1 & 0 & -1 &  & 0 & -1 & 1 \\ 
0 & 0 & -1 & -1 & 1 & 1 & 0 &  & 0 & 0 & 0 \\ 
0 & 1 & 1 & -1 & -1 & 0 & 0 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & -1 & -1 & 1 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 1 & 1 & -1 & -1 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & -1 &  & 0 & 0 & 0 \\ 
\vdots &  &  &  &  &  &  & \ddots &  &  & \vdots \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 1 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 0 & 0 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & -1 & 1 & 1 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & -1 & -1 & 0 \\ 
0 & 0 & 0 & 0 & 0 & 0 & 0 &  & 0 & -1 & -1 \\ 
-1 & 0 & 0 & 0 & 0 & 0 & 0 & \cdots & 1 & 1 & -1
\end{array}
\right|  \label{gnu}
\end{eqnarray}
 
Thus, $\left| M(1)\right| $ is of the form (\ref{ellehat}), and by the
corollary to Lemma \ref{jujj}, $\left| M(1)\right| \neq 0$. Thus, again the
algebraic multiplicity of the unit root is 2 (for generic games).
 
Similarly, if there are several places in the cycle where a player switches
twice in a row, we can reduce the matrix $\left| \lambda C-D\right| $ to the
form (\ref{ellehat}). This procedure is the same as the one that resulted in
(\ref{hujj}). Each time we ``reduce'' a sequential switch, in the first two
rows one of the $\delta _j$ will change from one to zero, just as $\delta _1$
became zero in (\ref{gnu}). However, because sometime between run 2 and run $%
K-1$ there {\em must} exist two consecutive runs where first player 2
switches and then player 1 switches, not {\em all} of $\delta _1,\delta
_2,...,\delta _{\kappa -2}$ will be zero. For example, if the cycle is 
\begin{equation}
\left\langle 11,21,31,32,33,43,53,54,...\right\rangle  \label{cykel}
\end{equation}
then the ``reduction'' will simulate the cycle $\left\langle
11,21,22,32,33,43,44,54,...\right\rangle $. Then, in columns 3 and 7, the $%
\delta _1$ and $\delta _3$ will change from 1 to zero, but in column 5, $%
\delta _2=1$. By the corollary to Lemma \ref{jujj}, the determinant is still
non-zero. Thus, if during the cycle any number of times players switch twice
in a row, the algebraic multiplicity of the unit root is still 2 (for
generic games).
 
If some player switches three or more consecutive times, the procedure is
the same. By rearranging rows and columns, we can obtain a matrix of the
form (\ref{ellehat}) which is non-singular. Note that there must exist a run 
$k^{*}$, $1<k^{*}<\kappa $, such that (i) $\sigma (k^{*})=2$ and $\sigma
(k^{*}+1)=1$, and (ii) $i_{k^{*}}\equiv i^{*}\neq 1$ and $j_{k^{*}}\equiv
j^{*}\neq 1$. This is because player one cannot switch from strategy 1 to 2
to ... back to 1 again consecutively, but player 2 must make some switch
in-between, and a similarly player 1 cannot make consecutive switches from 1
to 2 to ... back to 1. (In the case of (\ref{cykel}) take $k^{*}=5$, whence $%
i^{*}=j^{*}=3$).
 
It will suffice to illustrate this with the cycle 
\[
\left\langle
11,21,31,41,42,43,...(i^{*},j^{*}-1),(i^{*},j^{*}),(i^{*}+1,j^{*}),....,1%
\kappa \right\rangle 
\]
 
Here player 1 switches thrice in succession (from $1$ to $2$ to $3$ to $4$)
and then player 2 switches three times. In this case, we operate on the
matrix $\lambda C-D$ as follows. If we replace column 4 by $\left( \text{%
column }3-\text{column }4+\text{column }5\right) $ and interchange rows 3
and 4, we obtain a matrix $M_1(\lambda )$ which, for $\lambda =1$, is
identical to the matrix $\left| C-D\right| $ corresponding to the cycle 
\[
\left\langle
11,21,31,32,42,43,...,(i^{*},j^{*}-1),(i^{*},j^{*}),(i^{*}+1,j^{*}),....,%
\kappa \kappa ,1\kappa \right\rangle 
\]
Next, in $M_1(\lambda )$ replace column 3 by (column $2-$ column $3+$ column 
$4$) and interchange rows 2 and 3. The result is a matrix $M_2(\lambda )$
which, for $\lambda =1$, is identical to the matrix $\left| C-D\right| $
corresponding to the cycle 
\[
\left\langle
11,21,22,32,42,43,...,(i^{*},j^{*}-1),(i^{*},j^{*}),(i^{*}+1,j^{*}),....,%
\kappa \kappa ,1\kappa \right\rangle 
\]
Finally, in $M_2(\lambda )$ replace column 5 by $($column $4-$ column $5+$
column $6)$ and interchange rows 4 and 5. The result is a matrix $%
M_3(\lambda )$ which, for $\lambda =1$, is identical to the matrix $\left|
C-D\right| $ corresponding to the cycle 
\[
\left\langle
11,21,22,32,33,43,...,(i^{*},j^{*}-1),(i^{*},j^{*}),(i^{*}+1,j^{*}),....,%
\kappa \kappa ,1\kappa \right\rangle 
\]
 
It is clear how to proceed this way, to obtain a matrix $M(\lambda )$ which,
when $\lambda =1$, corresponds to the matrix $\lambda C-D$ for the simple
cycle where $\sigma (k)\neq \sigma (k+1)$. In fact, for any $\lambda $,
columns $k^{*}-1,k^{*}$ and $k^{*}+1$ of $M(\lambda )$ will be identical to
columns $k^{*}-1,k^{*},k^{*}+1$ of the matrix $\lambda C-D$ for the simple
cycle. This is because none of the operations will have affected these
columns. Refer to the case (\ref{cykel}), where columns 4, 5 and 6 remained
unchanged.
 
If we then, in the matrix $M(1)$ sum the rows corresponding to player 1's
(resp. player 2's) switches to obtain a matrix of the form (\ref{ellehat}),
at least one of the $\delta _j$ for $j<\kappa -1$ will be non-zero (viz. in
the column $k^{*}$, since this was true for the matrix corresponding to the
simple cycle). By the corollary to Lemma \ref{jujj}, $\left| M(1)\right|
\neq 0$.\newpage\ 
 
\begin{thebibliography}{99}
\bibitem{Brown}  Brown, G. W. (1951). ``Iterative Solutions of Games by
Fictitious Play,'' in {\em Activity Analysis of Production and Allocation. }%
New York: Wiley.
 
\bibitem{Crawford}  Crawford, V. P. (1985). ``Learning Behavior and
Mixed-Strategy Nash Equilibria,'' {\em Journal of Economic Behavior and
Organization}, {\bf 6}, 69-78.
 
\bibitem{FudenKreps}  Fudenberg, D. and Kreps, D. M. (1993). ``Learning
Mixed Strategy Equilibria,'' {\em Games and Economic Behavior}, {\bf 5},
320-367.
 
\bibitem{Harsanyi}  Harsanyi, J.C. (1973). ``Games with Randomly Disturbed
Payoffs,'' {\em International Journal of Game Theory}, {\bf 2}, 1-23.
 
\bibitem{Hirsch}  Hirsch, M. and S. Smale (1974). {\em Differential
Equations, Dynamical Systems and Linear Algebra}, New York: Academic Press.
 
\bibitem{Hofbauer}  Hofbauer, J. (1994). ``Stability for Best Response
Dynamics,'' mimeo, Institut f\"ur Mathematik, Universit\"at Wien, Vienna.
 
\bibitem{HofbSigm}  Hofbauer, J. and Sigmund, K. (1988). {\em The Theory of
Evolution and Dynamical Systems}, Cambridge: Cambridge University Press.
 
\bibitem{Jordan}  Jordan, J. S. (1993). ``Three Problems in Learning Mixed
Strategy Equilibria,'' {\em Games and Economic Behavior}, {\bf 5}, 368-386.
 
\bibitem{Miyasawa}  Miyasawa, K. (1963). ``On the Convergence of the
Learning Process in a $2\times 2$ Non-zero-sum Game,'' Princeton University
Econometric Research Program, Research Memorandum No. 33, Princeton.
 
\bibitem{MondSella}  Monderer, D. and Sella, A. (1993). ``Fictitious Play
and the No-Cycling Conditions,'' mimeo, The Technion, Haifa.
 
\bibitem{Robinson}  Robinson, J. (1951). ``An Iterative Method of Solving a
Game,'' {\em Annals of Mathematics,} {\bf 54}, 296-301.
 
\bibitem{Rosenmuller}  Rosenm\"uller, J. (1971). ``\"Uber
Periodizit\"atseigenschaften Spieltheoretischer Lernprozesse,'' {\em Z.
Wahrscheinlichkeitstheorie Verw. Geb.,} {\bf 17}, 259-308.
 
\bibitem{Rubinstein}  Rubinstein, A. (1991), ``Comments on the
Interpretation of Game Theory,'' {\em Econometrica}, {\bf 59}, 909-924.
 
\bibitem{Shapley}  Shapley, L. (1964). ``Some Topics in Two-Person Games,''
in {\em Advances in Game Theory, }Annals of Mathematical Studies, Volume 
{\bf 5}, 1-28.
\end{thebibliography}
 
\end{document}
