\documentclass[a4paper,12pt]{article}
\newcommand{\ds}{\displaystyle}
\parindent=0pt
\begin{document}

{\bf Question} Let $A$ be a real $2 \times 2$ matrix.

\begin{description}
\item[(i)]
Show that if the eigenvalues of $a$ are real, and there exist two
linearly independent eigenvalues $u,\ v$, then the matrix $P$
whose columns are $\{u,v\}$ satisfies $AP=PD$ where $D$ is the $2
\times 2$ diagonal matrix whose diagonal entries are the
eigenvalues of $A$.

\item[(ii)]
Show that if $A$ has repeated real eigenvalue $\lambda$ and only
one independent eigenvector $u$, then by taking $u$ to be a vector
that satisfies $(A-\lambda I)v=u$ and building $P$ from $u,\ v$,
as above we have $AP=PE$ where $E$ is the $2 \times 2$ matrix
$\left(\begin{array} {cc} \lambda & 1\\ 0 & \lambda \end{array}
\right)$.

\item[(iii)]
Show that if $A$ has a pair of complex eigenvalues $\alpha \pm i
\beta$ then, denoting a complex eigenvector corresponding to
$\alpha + i \beta$ by $\xi + i \eta$ ($\xi,\eta$ real vectors),
the matrix $P$ whose columns are $\{\xi,\eta\}$ satisfies
$AP=P\left(\begin{array}{rc} \alpha & \beta\\ -\beta & \alpha
\end{array}\right)$.


[The above is the proof that in suitable coordinates every
\underline{linear} system in $\bf {R}^2$ takes one of the 3
standard forms.]
\end{description}
\medskip

{\bf Answer}
\begin{description}
\item[(i)]
If $Au=\lambda u,\ Av=\mu v$ (maybe $\lambda=\mu$) then the
columns of $AP$ are $\{\lambda u,\mu v\}$, so \underline{$AP=PD$}
with $D=\left(\begin{array} {cc} \lambda & 0\\ 0 & \lambda
\end{array} \right)$.

\item[(ii)]
Here $Au=\lambda u$ and $Av=\lambda v+u$ so we see
\underline{$AP=PE$},\

$E=\left(\begin{array} {cc} \lambda & 1\\ 0 & \lambda
\end{array} \right)$.

\item[(iii)]
We have $A(\xi+i\zeta)=(\alpha+i\beta)(\xi+i\zeta)$, and $a$ is
\underline {real}, so taking real and imaginary parts we see
$A\xi=\alpha\xi-\beta\eta,\ A\eta=\beta\xi+\alpha\eta$. Thus
\underline{$AP=P\left(\begin{array}{rc} \alpha & \beta\\ -\beta &
\alpha \end{array} \right)$}.
\end{description}

In (ii) $u \ne 0$ (by definition of eigenvector), so $v \ne 0$.
Indeed, $v$ is not a scalar multiple of $u$, as $(A-\lambda I)u=0$
but $(A-\lambda I)v \ne 0$. So $\{u,\ v\}$ are linearly
independent.  In (iii) neither $\xi$ not $\eta=0$ (else
$A(\xi+i\zeta)=(\alpha+i\beta)(\xi+i\zeta)$ gives $\beta=0$), and
if $\xi=k\zeta, k\in \bf{R}$, then $(\alpha
k-\beta)\zeta=A\xi=kA\zeta=k(k\beta+\alpha)\zeta$, giving
$k^2=-1$: contradiction.  So in all cases $P$ is invertible, so
$P^{-1}AP=D,E$ or $\left(\begin{array}{rc} \alpha & \beta\\ -\beta
& \alpha \end{array} \right)$.

\end{document}
