\documentclass[11pt]{amsart}
\usepackage{}
\usepackage[]{amsmath,amsfonts}
\renewcommand{\vec}[1]{\mathbf{#1}}
\newcommand{\tR}[1]{$\mathbb{R}^{#1}$}
\renewcommand{\paragraph}[1]{\vspace{0.5\baselineskip}\noindent\textbf{#1}\nopagebreak\par}
\setlength{\baselineskip}{1cm}
\renewcommand{\baselinestretch}{1.3}
\newcommand{\Ie}{\textsl{I.e.\,}}
\newcommand{\ie}{\textsl{i.e.\,}}
\newcommand{\etc}{\textsl{etc.\,}}
\newcommand{\eg}{\textsl{e.g.\,}}
\def\E{\hbox{I \kern-.5em E}}
\setlength{\parskip}{0.5\baselineskip}
\setlength{\parindent}{0cm}
\setlength{\evensidemargin}{-.3cm}
\setlength{\oddsidemargin}{-.3cm}
\setlength{\textheight}{24.2cm}
\setlength{\textwidth}{16.2cm}
%\setlength{\topmargin}{-.5cm}

\newenvironment{displaycode}{\begin{quote}\tt}{\end{quote}}
\title{{\sf SOC\,{\tt Sol}-I} \\
a Matlab Package for Approximating the Solution to a 
Continuous-Time Stochastic Optimal Control Problem}  
\author{Alistair Windsor\thanks{ Honours Student, Mathematics, Victoria
University of Wellington, New Zealand.
Email: \mbox{\tt Alistair.Windsor@vuw.ac.nz }}
 \and Jacek B. Krawczyk\thanks{Commerce \& Administration Faculty
Victoria University of Wellington, PO Box 600, Wellington, New
Zealand; fax +64-4-4955014. Email: \mbox{\tt Jacek.Krawczyk\@vuw.ac.nz\,;}\\
\underline{http://www.vuw.ac.nz/$\tilde{~}$jacek} }  } 


\begin{document}

\begin{abstract}
\begin{large}
Computing the solution to a stochastic optimal control problem is 
a difficult problem.  A method of approximating a solution to a 
given stochastic optimal control problem using Markov 
chains was developed in \cite{Paper1}.  This paper describes a 
suite of Matlab functions implementing this method of 
approximating a solution to a given continuous stochastic optimal 
control problem. 

\end{large}
\end{abstract}
\maketitle
\begin{center}
\vspace{.5 cm}

Working Paper \\
{\sf SOC\,{\tt Sol}-I}/1a \\


\begin{small}

\begin{itemize}
\item[{\em JEL}] Classification:  C63 (Computational Techniques), 
C87 (Economic Software).


\item[{\em AMS}] Categories:  93E25 (Computational methods in 
stochastic optimal control).

\item[{\em Authors}'] Keywords: Computational economics
Approximating Markov decision chains. \end{itemize} 
\end{small}
\end{center}
\pagestyle{empty} 
\thispagestyle{empty} 
\begin{center}
\vspace{.5cm}
Research supported by VUW GSBGM and IGC. \\
\vspace{1cm}
{\em Correspondence should be addressed to:} Jacek B. Krawczyk.
\end{center}
\hrulefill
\vspace{.5cm}

\begin{small}
\noindent About the authors:\,
{\sc Alistair Windsor.} 
 Honours Student, Mathematics, Victoria University of Wellington. 
Email: \mbox{\tt Alistair.Windsor\@vuw.ac.nz\,} \\
{\sc Jacek B. Krawczyk.} Commerce \&
Administration Faculty,
Victoria University of Wellington,
PO Box 600, Wellington, New Zealand; fax +64-4-4712200. Email:
\mbox{\tt Jacek.Krawczyk\@vuw.ac.nz\,;}\\
\underline{http://www.vuw.ac.nz/$\tilde{~}$jacek} 


\end{small}
\newpage
\tableofcontents
\newpage
\pagestyle{plain}
\begin{center}
\begin{Large}
{\sf SOC\,{\tt Sol}-I} \\
a Matlab Package for Approximating the Solution to a 
Continuous-Time Stochastic Optimal Control Problem 
\end{Large}
\end{center}


%\begin{document}
%\maketitle

\section{Introduction}

Computing the solution to a stochastic optimal control problem is 
a difficult problem.  A method of approximating a solution to a 
given stochastic optimal control ({\sf SOC}) problem using Markov 
chains was developed in \cite{Paper1}.  This paper describes a 
suite of Matlab\footnote{See \cite{Matlab} for the introduction
to Matlab.}  functions implementing this method of 
approximating a solution to a given continuous 
{\sf SOC}\, problem.  This should be read and used in 
conjunction with \cite{Paper1}. 

\section{The Stochastic Optimal Control Problem}

The method deals with finite horizon, free terminal state 
stochastic optimal control problems.  Let the {\sf SOC}\, problem be 
written in the following form
\begin{align}
	\min J(\vec{u},\vec{x}_{0}) &= \E \Bigl\{\int_{0}^{T}f \bigl( 
	\vec{x}(t),\vec{u}(t),t \bigr) \mathrm{d}t+ h \bigl( 
	\vec{x}(T) \bigr)\Bigl|\vec{x}(0)=\vec{x}_{0}   \Bigr\} 
	\label{e:perform1} \\
	\intertext{subject to}
	\mathrm{d}\vec{x}&=g \bigl( \vec{x}(t),\vec{u}(t),t \bigr) 
	\mathrm{d}t+ b \bigl( \vec{x}(t),\vec{u}(t),t \bigr) \mathrm{d} 
	\mathbf{W}   \label{stateq}
\end{align}
where $\mathbf{W}$ is a standard Wiener process. In the optimisation 
method, we also allow for constraints on control and state (local and mixed).  

Throughout the paper, we shall denote the dimension of the state 
space by $d$, the dimension of the control by $c$, the length of 
the horizon by $T$, and the number of variables which are affected 
by noise by $N$.

To solve (\ref{e:perform1}) subject to (\ref{stateq}) and local
constraints, we developed a package of Matlab programmes under the name
{\sf SOC\,{\tt Sol}-I}. The package is composed of the 
four main modules: \begin{enumerate}
\item \texttt{GenSolve}
\item \texttt{GenSim}
\item \texttt{ContRule}
\item \texttt{ValGraph} \end{enumerate}

%\newpage
\section{\texttt{GenSolve}}

\subsection{Purpose} \texttt{GenSolve} takes the given S.O.C 
problem, approximates it with a Markov decision chain, and then 
solves the Markov decision chain.  The result of this process is a 
discrete-time, discrete-space control rule.  \texttt{GenSolve} 
does not perform the interpolation to convert the discrete-time, 
discrete-space control rule to a continuous-time, 
continuous-state control rule.

\subsection{Syntax}
\begin{displaycode}
	GenSolve( DeltaFunctionFile, InstantaneousCostFunctionFile,\\
	\hspace*{1 cm}TerminalStateFunctionFile, Minimum, Maximum, 
	StateStep,\\ 
	\hspace*{1 cm}TimeStep, ProblemFile,Options)
\end{displaycode}

\paragraph{\texttt{DeltaFunctionFile}:} A string containing the 
name (no \texttt{.m} extension) of a file containing a Matlab 
function representing the equations of motion.  

If the problem is stochastic then the function returns a vector of 
$2d$\,: the first $d$ are $g \bigl( \vec{x}(t),\vec{u}(t),t \bigr)$
the second $d$ elements are $b \bigl( \vec{x}(t),\vec{u}(t),t \bigr)$. 
If the problem is stochastic but 
some of the variables are undisturbed by noise, \ie $N<d$, then 
the variables for which the diffusion term is constantly $0$ must 
come after the variables which are disturbed by noise.

If the problem is deterministic then the function returns a vector 
of length $d$ corresponding to the value of $g \bigl( 
\vec{x}(t),\vec{u}(t),t \bigr)$ only.

In any case the function should have a header of the form:
\begin{displaycode}
	function Delta = DeltaFunction(\,Control\,,\,StateVars\,,\,t\,)
\end{displaycode}
where \texttt{Control} is a vector of length $c$, 
\texttt{StateVars} is a vector of length $d$, and \texttt{t} is a 
scalar.

\paragraph{\texttt{InstantaneousCostFunctionFile}:} A string 
containing the name (no \texttt{.m} extension) of a file 
containing a Matlab function representing the instantaneous cost 
function, $f \bigl( \vec{x}(t),\vec{u}(t),t \bigr)$ and the 
(local and mixed) inequality constraints on control and state. 

The function returns a vector of $I+1$ values. The first value is 
the value of $f \bigl( \vec{x}(t),\vec{u}(t),t \bigr)$. The 
remaining $I$ values are the values of the inequality constraints 
which should be set up in an appropriate way for the minimization 
routine \texttt{constr}. The function should have a header of the 
form:
\begin{displaycode}
	function StageReturn = f(\,Control\,,\,StateVars\,,\,t\,)
\end{displaycode}
where \texttt{Control} is a vector of length $c$, 
\texttt{StateVars} is a vector of length $d$, and \texttt{t} is a 
scalar.

\paragraph{\texttt{TerminalStateFunctionFile}:} A string 
containing the name (no \texttt{.m} extension) of a file 
containing a Matlab function representing the terminal state 
function, $h \bigl( \vec{x}(T) \bigr)$.

This function should return only the single real value given by $h 
\bigl( \vec{x}(T) \bigr)$ (even if the function is zero). 
The function should have a header of the 
form:
\begin{displaycode}
	function TermReturn = h(\,StateVars\,)  
\end{displaycode}
where 
\texttt{StateVars} is a vector of length $d$.


\paragraph{\texttt{Minimum}, \texttt{Maximum}, and 
\texttt{StateStep}:} These functions determine the finite state 
grid for the Markov chain which we hope will approximate the 
{\sf SOC}\, problem. 

The value of \texttt{Minimum} is the least possible state.

The value of \texttt{Maximum} is the maximum possible state. 

The value of \texttt{StateStep} determines the distance between 
states. It  has to   be chosen so that the difference between 
\texttt{Minimum} and \texttt{Maximum} is divisible by 
\texttt{StateStep}.

Each of these values can be given in two different 
ways:
\begin{enumerate}
	\item  as a vector of length $d$ where the same set of numbers 
	will apply to every decision stage of the Markov chain.
	
	\item  as a matrix of with $S+1$ rows and $d$ columns, where $S$ 
	is the number of decision stages in the Markov Chain and is 
	determined by \texttt{TimeStep} below. The $i$-th row is then used to 
	determine the state grid in the $i$-th stage.
\end{enumerate}

\paragraph{\texttt{TimeStep}:} This determines the number of 
decision stages their associated times.

\texttt{TimeStep} is a vector of step lengths that partition 
interval $[0,T]$; \ie, their sum should be $T$. The number of 
elements of \texttt{TimeStep} is the number of decision stages in 
the Markov decision chain, which we denote $S$.

\paragraph{\texttt{ProblemFile}:} A string containing the name 
(with no extension) of the problem.  This name is used to store 
the solution on disk.  \texttt{GenSolve} produces two files with 
this name; one with the extension \texttt{.DPP}, which contains the 
parameters used to compute the Markov decision chain, and one with 
the extension \texttt{.DPS}, which contains the solution itself. 

These files together 
 produce the continuous-time, continuous-state control rule. Notice
that  the 
routines \texttt{GenSim} and \texttt{ValGraph} (explained below) 
also require that 
the function files \texttt{.DPP} and \texttt{.DPS} still exist 
and are unchanged.

\paragraph{\texttt{Options}:} This vector controls various 
internal setting which need only be adjusted infrequently.  The 
\texttt{Options} vector is optional.  If you do not pass an 
\texttt{Options} vector then the default values are used.

The first 18 entries are the same as those for the Matlab Optimization 
Toolbox routines and are just passed directly to the 
\texttt{constr} routine used within the algorithm. The remaining 
entries are
\begin{enumerate}\setcounter{enumi}{18}
	\item  this contains the value $c$ for 
	your problem. The default is 1.

	\item  this should be set non-zero if your 
	problem is stochastic. The default value is 0, \ie the problem 
	is assumed to be deterministic.

	\item this should be set to the number $N$ if $N<d$.  The 
	default value is 0 which \texttt{GenSolve} interprets as 
	meaning all variables are noisy. (If the problem is deterministic
\ie\, \texttt{Option(20)}=0, then this option should be 0.) 

\end{enumerate}

To change some of the \texttt{Options} values you should first 
call the function \texttt{GenOpts} which returns the default 
\texttt{Options} vector, and then change the appropriate values.

\newpage\section{\texttt{GenSim}}

\subsection{Purpose} \texttt{GenSim} takes the solution computed 
by \texttt{GenSolve}, derives a continuous-time, continuous-state 
control rule and simulates the continuous system using this
rule. It 
returns graphs of the timepaths of the state and control variables 
and the associated value.

\subsection{Implementation} The derivation of the continuous-time, 
continuous-state control rule from the solution computed by 
\texttt{GenSolve} requires some form of interpolation in both 
state and time.  In the effort to keep the script simple the 
interpolation in state is linear.  States which are outside the 
state grid simply move to the nearest state grid point.  For times 
between Markov chain times, the control profile for the most recent 
Markov chain time is used.

The differential equation which governs the evolution of the 
system is approximated using the Euler-Muruyama approximation 
scheme. The integral for the performance criterion is approximated 
using the left-hand endpoint rectangle rule.

\subsection{Syntax}
\begin{displaycode}
	SimulatedValue = GenSim(ProblemFile, InitialCondition, \\ 
	\hspace*{1cm}SimulationTimeStep, NumberofSimulations, 
	UserSuppliedNoise)
\end{displaycode}

\paragraph{\texttt{ProblemFile}:} A string containing the name 
(with no extension) of the problem.  This name is used to retrieve 
the solution produced by \texttt{GenSolve} from the disk.  
\texttt{GenSim} requires that the function files used by 
\texttt{GenSolve} still exist and are unchanged.

\paragraph{\texttt{InitialCondition}:} This is a vector of 
length $d$ which contains the initial condition. The initial 
condition is the point from which the simulation starts.

\paragraph{\texttt{SimulationTimeStep}:} This is a vector of step 
lengths that partition interval $[0,T]$; \ie, their sum should 
be $T$. 

The more simulation steps the less error there is from 
approximating the equations of motion using the Euler-Muruyama 
scheme and from approximating the performance criterion using the 
left-hand endpoint rectangle method. In general the simulation step 
is much smaller that the time step used to compute the solution 
(\ie 1the \texttt{TimeStep} argument to \texttt{GenSolve}).

If no \texttt{SimulationTimeStep} vector is given, or if the 
\texttt{SimulationTimeStep} given is empty, then the 
\texttt{TimeStep} vector used in computing the 
\texttt{ProblemFile} is used.

\paragraph{\texttt{NumberofSimulations}:} This is the number of 
simulations that should be performed. Making the number negative 
suppresses plotting of the timepaths and the number of simulations
is $|\cdot|$. 

Multiple simulations are normally performed when dealing with a 
stochastic system.  Each simulation uses a randomly determined 
noise realization (unless this is suppressed by the \\
\texttt{UserSuppliedNoise} argument).

If no \texttt{NumberofSimulations} is given then it defaults to 1.

\paragraph{\texttt{UserSuppliedNoise}:} This enables the user to 
override the random generation of noise realizations.  

Passing 0 as this argument uses a constantly 0 noise realization.  
  

This argument accepts a matrix with one row for each entry of 
\texttt{SimulationTimeStep} and $N$ columns. 

\texttt{NumberofSimulations} should be 1 if this argument is used.  
This argument is entirely optional, if no 
\texttt{UserSuppliedNoise} is given the noise realization is 
selected randomly for each simulation.

This argument has no effect for deterministic 
problems.\footnote{The current location of some of 
the noise code is inefficient for deterministic problems.
The code should be moved within stochastic only block.}


\paragraph{\texttt{SimulatedValue}:} The output consists of a 
vector of the values of the performance criterion for each of the 
simulations performed. 

If the problem is stochastic, and the noise realizations are 
random, then the average of the values from a large number of 
simulations is an approximation to the expected value of the 
continuous stochastic system (under the continuous-time, 
continuous-state control rule derived from the solution computed 
by \texttt{GenSolve}). However, this average is left for the user
to compute.

\newpage\section{\texttt{ContRule}}

\subsection{Purpose} The purpose of \texttt{ContRule} is to 
produce graphs of the continuous-time, continuous-state control 
rule derived from the solution computed by \texttt{GenSolve}. The 
control rule profile is produced for a given time and holding all but 
one state variable constant.

\subsection{Syntax}
\begin{displaycode}
	ContRule( ProblemFile, Time, InitialCondition, 
	IndependentVariable);
\end{displaycode}

\paragraph{\texttt{ProblemFile}:} A string containing the name 
(with no extension) of the problem.  This name is used to retrieve 
the solution produced by \texttt{GenSolve} from the disk.  

\paragraph{\texttt{Time}:} A scalar telling the routine for what 
time you want to compute the control profile.  If \texttt{Time} is 
not a Markov chain time then the first Markov chain time before 
\texttt{Time} is used.

\paragraph{\texttt{InitialCondition}:} This vector determines the 
values of the fixed state variables. The entry for the 
\texttt{IndependentVariable} is not used. 

\paragraph{\texttt{IndependentVariable}:} This scalar tells the 
routine which of the state variables to vary.  \Ie, numbers
like ``1'' or ``2'' \etc\, have to be entered following the 
state variables' ordering  determined in the function 
\texttt{DeltaFunctionFile}. The control rule
profile appears with this state variable along the x-axis.

\newpage\section{\texttt{ValGraph}}

\subsection{Purpose} The purpose of \texttt{ValGraph} is to 
automate the process of computing the expected values for the 
continuous system (under the continuous-time, continuous-state 
control rule derived from the solution computed by 
\texttt{GenSolve}) as the initial conditions change. In a similar 
spirit to \texttt{ContRule} above, it deals with one state variable
at a time (indicated in \texttt{IndependentVariable}), with 
the other state variables remaining fixed.

%fixes all but one state variable.

\subsection{Syntax}
\begin{displaycode}
	ValGraph(ProblemFile, InitialCondition, IndependentVariable,\\ 
	\hspace*{1 cm}IndependentVariableVector, SimSteps, NumberOfSimulations, ScaleFactor)
\end{displaycode}

\paragraph{\texttt{ProblemFile}:} A string containing the name 
(with no extension) of the problem.  This name is used to retrieve 
the solution produced by \texttt{GenSolve} from the disk. 

\paragraph{\texttt{InitialCondition}:} This vector determines the 
values of the fixed state variables. The entry for the 
\texttt{IndependentVariable} is not used. 

\paragraph{\texttt{IndependentVariable}:} This scalar tells the 
routine which of the state variables to vary.  The value graphs 
appear with this state variable along the x-axis.

\paragraph{\texttt{IndependentVariableVector}:} This vector 
contains the  values 
of the \texttt{IndependentVariable} at which you wish to 
calculate the value of the system.

\paragraph{\texttt{SimSteps}:} 
This is as for \texttt{GenSim} above, \texttt{ValGraph} calls 
\texttt{GenSim} for each of your initial conditions and then takes 
the mean of the resulting values.

\paragraph{\texttt{NumberOfSimulations}:} This is similar to 
\texttt{NumberOfSimulations} for \texttt{GenSim} above.  If you 
specify only one simulation for a stochastic problem then a 
constantly zero noise realization is used.  \texttt{ValGraph} 
automatically suppresses the output of timepaths. 

This defaults to 1.

\paragraph{\texttt{ScaleFactor}:} This simply multiplies all the 
resulting values.  

Maximization problems are treated by \texttt{GenSolve} by taking 
the negative of all the payoffs and then minimizing.  The 
\texttt{ScaleFactor} can be set to -1 thus producing the payoffs 
that you expect.

This defaults to 1. 

\newpage\section{Technical Information}

\subsection{Encoding the State Space}\label{s:Encodi} Within the 
\texttt{GenSolve} routine each point of the state grid is 
assigned a unique number. This state number is then used as an 
index in all the result matrices. 

For each stage a \texttt{CodingVector} is computed. As a precursor 
to this, we compute the number of states along each of the $d$ 
state variable axes
\begin{displaycode}
	States=round((Maximum-Minimum)./StateStep + 1);
\end{displaycode}
where \texttt{Maximum}, \texttt{Minimum}, and \texttt{StateStep} 
are the appropriate vectors for the current stage.  Taking the 
product of entries of \texttt{States} we get the 
\texttt{TotalStates} for the current stage. Now we define the 
\texttt{CdoingVector} as follows
\begin{displaycode}
	c=cumprod(States);\\
	CodingVector=[1,c(1:Dimension-1)];
\end{displaycode}
where \texttt{Dimensions} is the internal name for $d$, the number 
of state variables. This vector is used to compute the state 
number for each point in the state grid for the current stage.

Suppose \texttt{x} is a point on the state grid. We convert 
\texttt{x} into a vector of numbers, \texttt{StateVect}, with 
$1\leq \texttt{StateVect(i)}\leq\texttt{States(i)}$
\begin{displaycode}
	StateVect=round((x-Min)./StateStep+1);
\end{displaycode}
Finally the state number, \texttt{State}, for \texttt{x} is 
computed by taking the dot product of \texttt{StateVect-1} with 
the \texttt{CodingVector} and then adding 1
\begin{displaycode}
	State=(StateVect-1)*CodingVector'+1;
\end{displaycode}

This process assigns a unique \texttt{State} to each point of the 
state grid. Every number between 1 and \texttt{TotalStates} 
corresponds to a point of the state grid. By construction 1 
corresponds to \texttt{Min} and \texttt{TotalStates} to \texttt{Max}.


Converting a state number back into a point of the state grid is 
equally simple.  The first step is to convert the \texttt{State} 
into a \texttt{StateVect}, this 
is carried out by a function 
\texttt{sntosvec}
\begin{displaycode}
	function StateVect=SNumToSVect(SNum,CVect,Dimension)\\
	\\
	SNum=SNum-1;\\
	StateVect=zeros(1,Dimension);\\
	for i=Dimension:-1:1\\
	\hspace*{1 cm}StateVect(i)=floor(SNum/CVect(i));\\
	\hspace*{1 cm}SNum=SNum-StateVect(i)*CVect(i);\\
	end\\
	StateVect=StateVect+1;
\end{displaycode}
Converting a \texttt{StateVect} to its associated \texttt{x} is 
as simple as
\begin{displaycode}
	x=(StateVect-1).*StateStep+Min;
\end{displaycode}

\subsection{The Solution Files}

\texttt{GenSolve} writes two files to disk.  The file with the 
\texttt{.DPP} extension contains the parameters used to compute 
the approximating Markov chain.  The file with the \texttt{.DPS} 
extension contains the solution matrices.  These matrices are 
indexed using the state number encoding of the state space, see 
section \ref{s:Encodi}.

\paragraph{The \texttt{.DPP} File:} The first three lines of this 
file hold, as strings, the variables \texttt{DeltaFunctionFile}, 
\newline\texttt{InstantaneousCostFunctionFile}, and
\texttt{TerminalStateFunctionFile}. These variables are retrieved 
using \texttt{fscanf} with a call to \texttt{fgets} after the 
third \texttt{fscanf} to remove the remaining newline. 

The remaining portion of the file holds the matrices 
\texttt{Minimum}, \texttt{Maximum}, \texttt{StateStep}, and 
\texttt{TimeStep}, with space at the end for any additional 
information (currently \texttt{GenSolve} appends the computation 
time, in seconds, taken to compute the solution). All these 
variables (with the exception of the computation time) are stored 
and retrieved using the \texttt{matwrite} and \texttt{matread} 
functions. These simple functions store the matrices in a binary 
form which is more time and space efficient than a text 
representation. 

\paragraph{The \texttt{.DPS} File:} This file contains only 
matrices, written and retrieved by the \texttt{matwrite} and \texttt{matread} 
functions. The file begins with the matrices representing the 
optimal control, there is one matrix for each dimension of the 
control space. This can be retrieved using a call of the form
\begin{displaycode}
	\% Read Optimal Decision Matrices for each control\\
	\% dimension.\\
	\\
	for i=1:ControlDimension\\
	\hspace*{1 cm}eval(['ODM',int2str(i),'=matread(fid);'])\\
	fclose(fid);
\end{displaycode}

This is followed by a single matrix holding the cost-to-go for 
each state of each stage of the Markov chain.
% Finally there is a matrix holding th


\begin{thebibliography}{99}

\bibitem{Paper1}{\sc Krawczyk J.B. \&  A. Windsor}, 1997,
{\bf An Approximated Solution to Continuous-Time Stochastic Optimal
Control Problems Through Markov Decision Chains}, Working Paper
{\em Version 8a}. School of Economics and Finance, Victoria
University of Wellington.

\bibitem{Matlab} 1992, {\bf Matlab. High-Performance Numeric Computation
and Visualization Software}, {\em The MathWorks Inc.}  
\end{thebibliography}
\end{document}


--Message-Boundary-15483--

