| 1 | // This file is part of Eigen, a lightweight C++ template library
|
|---|
| 2 | // for linear algebra.
|
|---|
| 3 | //
|
|---|
| 4 | // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
|---|
| 5 | // Copyright (C) 2010 Jitse Niesen <jitse@maths.leeds.ac.uk>
|
|---|
| 6 | //
|
|---|
| 7 | // This Source Code Form is subject to the terms of the Mozilla
|
|---|
| 8 | // Public License v. 2.0. If a copy of the MPL was not distributed
|
|---|
| 9 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|---|
| 10 |
|
|---|
| 11 | #ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
|
|---|
| 12 | #define EIGEN_SELFADJOINTEIGENSOLVER_H
|
|---|
| 13 |
|
|---|
| 14 | #include "./Tridiagonalization.h"
|
|---|
| 15 |
|
|---|
| 16 | namespace Eigen {
|
|---|
| 17 |
|
|---|
| 18 | template<typename _MatrixType>
|
|---|
| 19 | class GeneralizedSelfAdjointEigenSolver;
|
|---|
| 20 |
|
|---|
| 21 | namespace internal {
|
|---|
| 22 | template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
|
|---|
| 23 | }
|
|---|
| 24 |
|
|---|
| 25 | /** \eigenvalues_module \ingroup Eigenvalues_Module
|
|---|
| 26 | *
|
|---|
| 27 | *
|
|---|
| 28 | * \class SelfAdjointEigenSolver
|
|---|
| 29 | *
|
|---|
| 30 | * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
|
|---|
| 31 | *
|
|---|
| 32 | * \tparam _MatrixType the type of the matrix of which we are computing the
|
|---|
| 33 | * eigendecomposition; this is expected to be an instantiation of the Matrix
|
|---|
| 34 | * class template.
|
|---|
| 35 | *
|
|---|
| 36 | * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
|
|---|
| 37 | * matrices, this means that the matrix is symmetric: it equals its
|
|---|
| 38 | * transpose. This class computes the eigenvalues and eigenvectors of a
|
|---|
| 39 | * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
|
|---|
| 40 | * \f$ v \f$ such that \f$ Av = \lambda v \f$. The eigenvalues of a
|
|---|
| 41 | * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
|
|---|
| 42 | * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
|
|---|
| 43 | * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
|
|---|
| 44 | * matrices, the matrix \f$ V \f$ is always invertible). This is called the
|
|---|
| 45 | * eigendecomposition.
|
|---|
| 46 | *
|
|---|
| 47 | * The algorithm exploits the fact that the matrix is selfadjoint, making it
|
|---|
| 48 | * faster and more accurate than the general purpose eigenvalue algorithms
|
|---|
| 49 | * implemented in EigenSolver and ComplexEigenSolver.
|
|---|
| 50 | *
|
|---|
| 51 | * Only the \b lower \b triangular \b part of the input matrix is referenced.
|
|---|
| 52 | *
|
|---|
| 53 | * Call the function compute() to compute the eigenvalues and eigenvectors of
|
|---|
| 54 | * a given matrix. Alternatively, you can use the
|
|---|
| 55 | * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes
|
|---|
| 56 | * the eigenvalues and eigenvectors at construction time. Once the eigenvalue
|
|---|
| 57 | * and eigenvectors are computed, they can be retrieved with the eigenvalues()
|
|---|
| 58 | * and eigenvectors() functions.
|
|---|
| 59 | *
|
|---|
| 60 | * The documentation for SelfAdjointEigenSolver(const MatrixType&, int)
|
|---|
| 61 | * contains an example of the typical use of this class.
|
|---|
| 62 | *
|
|---|
| 63 | * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and
|
|---|
| 64 | * the likes, see the class GeneralizedSelfAdjointEigenSolver.
|
|---|
| 65 | *
|
|---|
| 66 | * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
|
|---|
| 67 | */
|
|---|
| 68 | template<typename _MatrixType> class SelfAdjointEigenSolver
|
|---|
| 69 | {
|
|---|
| 70 | public:
|
|---|
| 71 |
|
|---|
| 72 | typedef _MatrixType MatrixType;
|
|---|
| 73 | enum {
|
|---|
| 74 | Size = MatrixType::RowsAtCompileTime,
|
|---|
| 75 | ColsAtCompileTime = MatrixType::ColsAtCompileTime,
|
|---|
| 76 | Options = MatrixType::Options,
|
|---|
| 77 | MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
|
|---|
| 78 | };
|
|---|
| 79 |
|
|---|
| 80 | /** \brief Scalar type for matrices of type \p _MatrixType. */
|
|---|
| 81 | typedef typename MatrixType::Scalar Scalar;
|
|---|
| 82 | typedef typename MatrixType::Index Index;
|
|---|
| 83 |
|
|---|
| 84 | typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
|
|---|
| 85 |
|
|---|
| 86 | /** \brief Real scalar type for \p _MatrixType.
|
|---|
| 87 | *
|
|---|
| 88 | * This is just \c Scalar if #Scalar is real (e.g., \c float or
|
|---|
| 89 | * \c double), and the type of the real part of \c Scalar if #Scalar is
|
|---|
| 90 | * complex.
|
|---|
| 91 | */
|
|---|
| 92 | typedef typename NumTraits<Scalar>::Real RealScalar;
|
|---|
| 93 |
|
|---|
| 94 | friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>;
|
|---|
| 95 |
|
|---|
| 96 | /** \brief Type for vector of eigenvalues as returned by eigenvalues().
|
|---|
| 97 | *
|
|---|
| 98 | * This is a column vector with entries of type #RealScalar.
|
|---|
| 99 | * The length of the vector is the size of \p _MatrixType.
|
|---|
| 100 | */
|
|---|
| 101 | typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
|
|---|
| 102 | typedef Tridiagonalization<MatrixType> TridiagonalizationType;
|
|---|
| 103 |
|
|---|
| 104 | /** \brief Default constructor for fixed-size matrices.
|
|---|
| 105 | *
|
|---|
| 106 | * The default constructor is useful in cases in which the user intends to
|
|---|
| 107 | * perform decompositions via compute(). This constructor
|
|---|
| 108 | * can only be used if \p _MatrixType is a fixed-size matrix; use
|
|---|
| 109 | * SelfAdjointEigenSolver(Index) for dynamic-size matrices.
|
|---|
| 110 | *
|
|---|
| 111 | * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
|
|---|
| 112 | * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
|
|---|
| 113 | */
|
|---|
| 114 | SelfAdjointEigenSolver()
|
|---|
| 115 | : m_eivec(),
|
|---|
| 116 | m_eivalues(),
|
|---|
| 117 | m_subdiag(),
|
|---|
| 118 | m_isInitialized(false)
|
|---|
| 119 | { }
|
|---|
| 120 |
|
|---|
| 121 | /** \brief Constructor, pre-allocates memory for dynamic-size matrices.
|
|---|
| 122 | *
|
|---|
| 123 | * \param [in] size Positive integer, size of the matrix whose
|
|---|
| 124 | * eigenvalues and eigenvectors will be computed.
|
|---|
| 125 | *
|
|---|
| 126 | * This constructor is useful for dynamic-size matrices, when the user
|
|---|
| 127 | * intends to perform decompositions via compute(). The \p size
|
|---|
| 128 | * parameter is only used as a hint. It is not an error to give a wrong
|
|---|
| 129 | * \p size, but it may impair performance.
|
|---|
| 130 | *
|
|---|
| 131 | * \sa compute() for an example
|
|---|
| 132 | */
|
|---|
| 133 | SelfAdjointEigenSolver(Index size)
|
|---|
| 134 | : m_eivec(size, size),
|
|---|
| 135 | m_eivalues(size),
|
|---|
| 136 | m_subdiag(size > 1 ? size - 1 : 1),
|
|---|
| 137 | m_isInitialized(false)
|
|---|
| 138 | {}
|
|---|
| 139 |
|
|---|
| 140 | /** \brief Constructor; computes eigendecomposition of given matrix.
|
|---|
| 141 | *
|
|---|
| 142 | * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
|
|---|
| 143 | * be computed. Only the lower triangular part of the matrix is referenced.
|
|---|
| 144 | * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
|
|---|
| 145 | *
|
|---|
| 146 | * This constructor calls compute(const MatrixType&, int) to compute the
|
|---|
| 147 | * eigenvalues of the matrix \p matrix. The eigenvectors are computed if
|
|---|
| 148 | * \p options equals #ComputeEigenvectors.
|
|---|
| 149 | *
|
|---|
| 150 | * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
|
|---|
| 151 | * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
|
|---|
| 152 | *
|
|---|
| 153 | * \sa compute(const MatrixType&, int)
|
|---|
| 154 | */
|
|---|
| 155 | SelfAdjointEigenSolver(const MatrixType& matrix, int options = ComputeEigenvectors)
|
|---|
| 156 | : m_eivec(matrix.rows(), matrix.cols()),
|
|---|
| 157 | m_eivalues(matrix.cols()),
|
|---|
| 158 | m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
|
|---|
| 159 | m_isInitialized(false)
|
|---|
| 160 | {
|
|---|
| 161 | compute(matrix, options);
|
|---|
| 162 | }
|
|---|
| 163 |
|
|---|
| 164 | /** \brief Computes eigendecomposition of given matrix.
|
|---|
| 165 | *
|
|---|
| 166 | * \param[in] matrix Selfadjoint matrix whose eigendecomposition is to
|
|---|
| 167 | * be computed. Only the lower triangular part of the matrix is referenced.
|
|---|
| 168 | * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
|
|---|
| 169 | * \returns Reference to \c *this
|
|---|
| 170 | *
|
|---|
| 171 | * This function computes the eigenvalues of \p matrix. The eigenvalues()
|
|---|
| 172 | * function can be used to retrieve them. If \p options equals #ComputeEigenvectors,
|
|---|
| 173 | * then the eigenvectors are also computed and can be retrieved by
|
|---|
| 174 | * calling eigenvectors().
|
|---|
| 175 | *
|
|---|
| 176 | * This implementation uses a symmetric QR algorithm. The matrix is first
|
|---|
| 177 | * reduced to tridiagonal form using the Tridiagonalization class. The
|
|---|
| 178 | * tridiagonal matrix is then brought to diagonal form with implicit
|
|---|
| 179 | * symmetric QR steps with Wilkinson shift. Details can be found in
|
|---|
| 180 | * Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
|
|---|
| 181 | *
|
|---|
| 182 | * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
|
|---|
| 183 | * are required and \f$ 4n^3/3 \f$ if they are not required.
|
|---|
| 184 | *
|
|---|
| 185 | * This method reuses the memory in the SelfAdjointEigenSolver object that
|
|---|
| 186 | * was allocated when the object was constructed, if the size of the
|
|---|
| 187 | * matrix does not change.
|
|---|
| 188 | *
|
|---|
| 189 | * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
|
|---|
| 190 | * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
|
|---|
| 191 | *
|
|---|
| 192 | * \sa SelfAdjointEigenSolver(const MatrixType&, int)
|
|---|
| 193 | */
|
|---|
| 194 | SelfAdjointEigenSolver& compute(const MatrixType& matrix, int options = ComputeEigenvectors);
|
|---|
| 195 |
|
|---|
| 196 | /** \brief Computes eigendecomposition of given matrix using a direct algorithm
|
|---|
| 197 | *
|
|---|
| 198 | * This is a variant of compute(const MatrixType&, int options) which
|
|---|
| 199 | * directly solves the underlying polynomial equation.
|
|---|
| 200 | *
|
|---|
| 201 | * Currently only 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d).
|
|---|
| 202 | *
|
|---|
| 203 | * This method is usually significantly faster than the QR algorithm
|
|---|
| 204 | * but it might also be less accurate. It is also worth noting that
|
|---|
| 205 | * for 3x3 matrices it involves trigonometric operations which are
|
|---|
| 206 | * not necessarily available for all scalar types.
|
|---|
| 207 | *
|
|---|
| 208 | * \sa compute(const MatrixType&, int options)
|
|---|
| 209 | */
|
|---|
| 210 | SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors);
|
|---|
| 211 |
|
|---|
| 212 | /** \brief Returns the eigenvectors of given matrix.
|
|---|
| 213 | *
|
|---|
| 214 | * \returns A const reference to the matrix whose columns are the eigenvectors.
|
|---|
| 215 | *
|
|---|
| 216 | * \pre The eigenvectors have been computed before.
|
|---|
| 217 | *
|
|---|
| 218 | * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
|
|---|
| 219 | * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
|
|---|
| 220 | * eigenvectors are normalized to have (Euclidean) norm equal to one. If
|
|---|
| 221 | * this object was used to solve the eigenproblem for the selfadjoint
|
|---|
| 222 | * matrix \f$ A \f$, then the matrix returned by this function is the
|
|---|
| 223 | * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
|
|---|
| 224 | *
|
|---|
| 225 | * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
|
|---|
| 226 | * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
|
|---|
| 227 | *
|
|---|
| 228 | * \sa eigenvalues()
|
|---|
| 229 | */
|
|---|
| 230 | const EigenvectorsType& eigenvectors() const
|
|---|
| 231 | {
|
|---|
| 232 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
|---|
| 233 | eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
|---|
| 234 | return m_eivec;
|
|---|
| 235 | }
|
|---|
| 236 |
|
|---|
| 237 | /** \brief Returns the eigenvalues of given matrix.
|
|---|
| 238 | *
|
|---|
| 239 | * \returns A const reference to the column vector containing the eigenvalues.
|
|---|
| 240 | *
|
|---|
| 241 | * \pre The eigenvalues have been computed before.
|
|---|
| 242 | *
|
|---|
| 243 | * The eigenvalues are repeated according to their algebraic multiplicity,
|
|---|
| 244 | * so there are as many eigenvalues as rows in the matrix. The eigenvalues
|
|---|
| 245 | * are sorted in increasing order.
|
|---|
| 246 | *
|
|---|
| 247 | * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
|
|---|
| 248 | * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
|
|---|
| 249 | *
|
|---|
| 250 | * \sa eigenvectors(), MatrixBase::eigenvalues()
|
|---|
| 251 | */
|
|---|
| 252 | const RealVectorType& eigenvalues() const
|
|---|
| 253 | {
|
|---|
| 254 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
|---|
| 255 | return m_eivalues;
|
|---|
| 256 | }
|
|---|
| 257 |
|
|---|
| 258 | /** \brief Computes the positive-definite square root of the matrix.
|
|---|
| 259 | *
|
|---|
| 260 | * \returns the positive-definite square root of the matrix
|
|---|
| 261 | *
|
|---|
| 262 | * \pre The eigenvalues and eigenvectors of a positive-definite matrix
|
|---|
| 263 | * have been computed before.
|
|---|
| 264 | *
|
|---|
| 265 | * The square root of a positive-definite matrix \f$ A \f$ is the
|
|---|
| 266 | * positive-definite matrix whose square equals \f$ A \f$. This function
|
|---|
| 267 | * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
|
|---|
| 268 | * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
|
|---|
| 269 | *
|
|---|
| 270 | * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
|
|---|
| 271 | * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
|
|---|
| 272 | *
|
|---|
| 273 | * \sa operatorInverseSqrt(),
|
|---|
| 274 | * \ref MatrixFunctions_Module "MatrixFunctions Module"
|
|---|
| 275 | */
|
|---|
| 276 | MatrixType operatorSqrt() const
|
|---|
| 277 | {
|
|---|
| 278 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
|---|
| 279 | eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
|---|
| 280 | return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
|
|---|
| 281 | }
|
|---|
| 282 |
|
|---|
| 283 | /** \brief Computes the inverse square root of the matrix.
|
|---|
| 284 | *
|
|---|
| 285 | * \returns the inverse positive-definite square root of the matrix
|
|---|
| 286 | *
|
|---|
| 287 | * \pre The eigenvalues and eigenvectors of a positive-definite matrix
|
|---|
| 288 | * have been computed before.
|
|---|
| 289 | *
|
|---|
| 290 | * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
|
|---|
| 291 | * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
|
|---|
| 292 | * cheaper than first computing the square root with operatorSqrt() and
|
|---|
| 293 | * then its inverse with MatrixBase::inverse().
|
|---|
| 294 | *
|
|---|
| 295 | * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
|
|---|
| 296 | * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
|
|---|
| 297 | *
|
|---|
| 298 | * \sa operatorSqrt(), MatrixBase::inverse(),
|
|---|
| 299 | * \ref MatrixFunctions_Module "MatrixFunctions Module"
|
|---|
| 300 | */
|
|---|
| 301 | MatrixType operatorInverseSqrt() const
|
|---|
| 302 | {
|
|---|
| 303 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
|---|
| 304 | eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
|
|---|
| 305 | return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
|
|---|
| 306 | }
|
|---|
| 307 |
|
|---|
| 308 | /** \brief Reports whether previous computation was successful.
|
|---|
| 309 | *
|
|---|
| 310 | * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
|
|---|
| 311 | */
|
|---|
| 312 | ComputationInfo info() const
|
|---|
| 313 | {
|
|---|
| 314 | eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
|
|---|
| 315 | return m_info;
|
|---|
| 316 | }
|
|---|
| 317 |
|
|---|
| 318 | /** \brief Maximum number of iterations.
|
|---|
| 319 | *
|
|---|
| 320 | * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n
|
|---|
| 321 | * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).
|
|---|
| 322 | */
|
|---|
| 323 | static const int m_maxIterations = 30;
|
|---|
| 324 |
|
|---|
| 325 | #ifdef EIGEN2_SUPPORT
|
|---|
| 326 | SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors)
|
|---|
| 327 | : m_eivec(matrix.rows(), matrix.cols()),
|
|---|
| 328 | m_eivalues(matrix.cols()),
|
|---|
| 329 | m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
|
|---|
| 330 | m_isInitialized(false)
|
|---|
| 331 | {
|
|---|
| 332 | compute(matrix, computeEigenvectors);
|
|---|
| 333 | }
|
|---|
| 334 |
|
|---|
| 335 | SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
|
|---|
| 336 | : m_eivec(matA.cols(), matA.cols()),
|
|---|
| 337 | m_eivalues(matA.cols()),
|
|---|
| 338 | m_subdiag(matA.cols() > 1 ? matA.cols() - 1 : 1),
|
|---|
| 339 | m_isInitialized(false)
|
|---|
| 340 | {
|
|---|
| 341 | static_cast<GeneralizedSelfAdjointEigenSolver<MatrixType>*>(this)->compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
|
|---|
| 342 | }
|
|---|
| 343 |
|
|---|
| 344 | void compute(const MatrixType& matrix, bool computeEigenvectors)
|
|---|
| 345 | {
|
|---|
| 346 | compute(matrix, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
|
|---|
| 347 | }
|
|---|
| 348 |
|
|---|
| 349 | void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
|
|---|
| 350 | {
|
|---|
| 351 | compute(matA, matB, computeEigenvectors ? ComputeEigenvectors : EigenvaluesOnly);
|
|---|
| 352 | }
|
|---|
| 353 | #endif // EIGEN2_SUPPORT
|
|---|
| 354 |
|
|---|
| 355 | protected:
|
|---|
| 356 | static void check_template_parameters()
|
|---|
| 357 | {
|
|---|
| 358 | EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
|
|---|
| 359 | }
|
|---|
| 360 |
|
|---|
| 361 | EigenvectorsType m_eivec;
|
|---|
| 362 | RealVectorType m_eivalues;
|
|---|
| 363 | typename TridiagonalizationType::SubDiagonalType m_subdiag;
|
|---|
| 364 | ComputationInfo m_info;
|
|---|
| 365 | bool m_isInitialized;
|
|---|
| 366 | bool m_eigenvectorsOk;
|
|---|
| 367 | };
|
|---|
| 368 |
|
|---|
| 369 | /** \internal
|
|---|
| 370 | *
|
|---|
| 371 | * \eigenvalues_module \ingroup Eigenvalues_Module
|
|---|
| 372 | *
|
|---|
| 373 | * Performs a QR step on a tridiagonal symmetric matrix represented as a
|
|---|
| 374 | * pair of two vectors \a diag and \a subdiag.
|
|---|
| 375 | *
|
|---|
| 376 | * \param matA the input selfadjoint matrix
|
|---|
| 377 | * \param hCoeffs returned Householder coefficients
|
|---|
| 378 | *
|
|---|
| 379 | * For compilation efficiency reasons, this procedure does not use eigen expression
|
|---|
| 380 | * for its arguments.
|
|---|
| 381 | *
|
|---|
| 382 | * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
|
|---|
| 383 | * "implicit symmetric QR step with Wilkinson shift"
|
|---|
| 384 | */
|
|---|
| 385 | namespace internal {
|
|---|
| 386 | template<typename RealScalar, typename Scalar, typename Index>
|
|---|
| 387 | static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
|
|---|
| 388 | }
|
|---|
| 389 |
|
|---|
| 390 | template<typename MatrixType>
|
|---|
| 391 | SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
|---|
| 392 | ::compute(const MatrixType& matrix, int options)
|
|---|
| 393 | {
|
|---|
| 394 | check_template_parameters();
|
|---|
| 395 |
|
|---|
| 396 | using std::abs;
|
|---|
| 397 | eigen_assert(matrix.cols() == matrix.rows());
|
|---|
| 398 | eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
|---|
| 399 | && (options&EigVecMask)!=EigVecMask
|
|---|
| 400 | && "invalid option parameter");
|
|---|
| 401 | bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
|---|
| 402 | Index n = matrix.cols();
|
|---|
| 403 | m_eivalues.resize(n,1);
|
|---|
| 404 |
|
|---|
| 405 | if(n==1)
|
|---|
| 406 | {
|
|---|
| 407 | m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0));
|
|---|
| 408 | if(computeEigenvectors)
|
|---|
| 409 | m_eivec.setOnes(n,n);
|
|---|
| 410 | m_info = Success;
|
|---|
| 411 | m_isInitialized = true;
|
|---|
| 412 | m_eigenvectorsOk = computeEigenvectors;
|
|---|
| 413 | return *this;
|
|---|
| 414 | }
|
|---|
| 415 |
|
|---|
| 416 | // declare some aliases
|
|---|
| 417 | RealVectorType& diag = m_eivalues;
|
|---|
| 418 | EigenvectorsType& mat = m_eivec;
|
|---|
| 419 |
|
|---|
| 420 | // map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
|---|
| 421 | mat = matrix.template triangularView<Lower>();
|
|---|
| 422 | RealScalar scale = mat.cwiseAbs().maxCoeff();
|
|---|
| 423 | if(scale==RealScalar(0)) scale = RealScalar(1);
|
|---|
| 424 | mat.template triangularView<Lower>() /= scale;
|
|---|
| 425 | m_subdiag.resize(n-1);
|
|---|
| 426 | internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
|
|---|
| 427 |
|
|---|
| 428 | Index end = n-1;
|
|---|
| 429 | Index start = 0;
|
|---|
| 430 | Index iter = 0; // total number of iterations
|
|---|
| 431 |
|
|---|
| 432 | while (end>0)
|
|---|
| 433 | {
|
|---|
| 434 | for (Index i = start; i<end; ++i)
|
|---|
| 435 | if (internal::isMuchSmallerThan(abs(m_subdiag[i]),(abs(diag[i])+abs(diag[i+1]))))
|
|---|
| 436 | m_subdiag[i] = 0;
|
|---|
| 437 |
|
|---|
| 438 | // find the largest unreduced block
|
|---|
| 439 | while (end>0 && m_subdiag[end-1]==0)
|
|---|
| 440 | {
|
|---|
| 441 | end--;
|
|---|
| 442 | }
|
|---|
| 443 | if (end<=0)
|
|---|
| 444 | break;
|
|---|
| 445 |
|
|---|
| 446 | // if we spent too many iterations, we give up
|
|---|
| 447 | iter++;
|
|---|
| 448 | if(iter > m_maxIterations * n) break;
|
|---|
| 449 |
|
|---|
| 450 | start = end - 1;
|
|---|
| 451 | while (start>0 && m_subdiag[start-1]!=0)
|
|---|
| 452 | start--;
|
|---|
| 453 |
|
|---|
| 454 | internal::tridiagonal_qr_step(diag.data(), m_subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
|
|---|
| 455 | }
|
|---|
| 456 |
|
|---|
| 457 | if (iter <= m_maxIterations * n)
|
|---|
| 458 | m_info = Success;
|
|---|
| 459 | else
|
|---|
| 460 | m_info = NoConvergence;
|
|---|
| 461 |
|
|---|
| 462 | // Sort eigenvalues and corresponding vectors.
|
|---|
| 463 | // TODO make the sort optional ?
|
|---|
| 464 | // TODO use a better sort algorithm !!
|
|---|
| 465 | if (m_info == Success)
|
|---|
| 466 | {
|
|---|
| 467 | for (Index i = 0; i < n-1; ++i)
|
|---|
| 468 | {
|
|---|
| 469 | Index k;
|
|---|
| 470 | m_eivalues.segment(i,n-i).minCoeff(&k);
|
|---|
| 471 | if (k > 0)
|
|---|
| 472 | {
|
|---|
| 473 | std::swap(m_eivalues[i], m_eivalues[k+i]);
|
|---|
| 474 | if(computeEigenvectors)
|
|---|
| 475 | m_eivec.col(i).swap(m_eivec.col(k+i));
|
|---|
| 476 | }
|
|---|
| 477 | }
|
|---|
| 478 | }
|
|---|
| 479 |
|
|---|
| 480 | // scale back the eigen values
|
|---|
| 481 | m_eivalues *= scale;
|
|---|
| 482 |
|
|---|
| 483 | m_isInitialized = true;
|
|---|
| 484 | m_eigenvectorsOk = computeEigenvectors;
|
|---|
| 485 | return *this;
|
|---|
| 486 | }
|
|---|
| 487 |
|
|---|
| 488 |
|
|---|
| 489 | namespace internal {
|
|---|
| 490 |
|
|---|
| 491 | template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues
|
|---|
| 492 | {
|
|---|
| 493 | static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options)
|
|---|
| 494 | { eig.compute(A,options); }
|
|---|
| 495 | };
|
|---|
| 496 |
|
|---|
| 497 | template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false>
|
|---|
| 498 | {
|
|---|
| 499 | typedef typename SolverType::MatrixType MatrixType;
|
|---|
| 500 | typedef typename SolverType::RealVectorType VectorType;
|
|---|
| 501 | typedef typename SolverType::Scalar Scalar;
|
|---|
| 502 | typedef typename MatrixType::Index Index;
|
|---|
| 503 | typedef typename SolverType::EigenvectorsType EigenvectorsType;
|
|---|
| 504 |
|
|---|
| 505 | /** \internal
|
|---|
| 506 | * Computes the roots of the characteristic polynomial of \a m.
|
|---|
| 507 | * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
|
|---|
| 508 | */
|
|---|
| 509 | static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
|---|
| 510 | {
|
|---|
| 511 | using std::sqrt;
|
|---|
| 512 | using std::atan2;
|
|---|
| 513 | using std::cos;
|
|---|
| 514 | using std::sin;
|
|---|
| 515 | const Scalar s_inv3 = Scalar(1.0)/Scalar(3.0);
|
|---|
| 516 | const Scalar s_sqrt3 = sqrt(Scalar(3.0));
|
|---|
| 517 |
|
|---|
| 518 | // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The
|
|---|
| 519 | // eigenvalues are the roots to this equation, all guaranteed to be
|
|---|
| 520 | // real-valued, because the matrix is symmetric.
|
|---|
| 521 | Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0);
|
|---|
| 522 | Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1);
|
|---|
| 523 | Scalar c2 = m(0,0) + m(1,1) + m(2,2);
|
|---|
| 524 |
|
|---|
| 525 | // Construct the parameters used in classifying the roots of the equation
|
|---|
| 526 | // and in solving the equation for the roots in closed form.
|
|---|
| 527 | Scalar c2_over_3 = c2*s_inv3;
|
|---|
| 528 | Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
|
|---|
| 529 | if(a_over_3<Scalar(0))
|
|---|
| 530 | a_over_3 = Scalar(0);
|
|---|
| 531 |
|
|---|
| 532 | Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
|
|---|
| 533 |
|
|---|
| 534 | Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
|
|---|
| 535 | if(q<Scalar(0))
|
|---|
| 536 | q = Scalar(0);
|
|---|
| 537 |
|
|---|
| 538 | // Compute the eigenvalues by solving for the roots of the polynomial.
|
|---|
| 539 | Scalar rho = sqrt(a_over_3);
|
|---|
| 540 | Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
|
|---|
| 541 | Scalar cos_theta = cos(theta);
|
|---|
| 542 | Scalar sin_theta = sin(theta);
|
|---|
| 543 | // roots are already sorted, since cos is monotonically decreasing on [0, pi]
|
|---|
| 544 | roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
|
|---|
| 545 | roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
|
|---|
| 546 | roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
|
|---|
| 547 | }
|
|---|
| 548 |
|
|---|
| 549 | static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
|
|---|
| 550 | {
|
|---|
| 551 | using std::abs;
|
|---|
| 552 | Index i0;
|
|---|
| 553 | // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
|
|---|
| 554 | mat.diagonal().cwiseAbs().maxCoeff(&i0);
|
|---|
| 555 | // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
|
|---|
| 556 | // so let's save it:
|
|---|
| 557 | representative = mat.col(i0);
|
|---|
| 558 | Scalar n0, n1;
|
|---|
| 559 | VectorType c0, c1;
|
|---|
| 560 | n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
|
|---|
| 561 | n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
|
|---|
| 562 | if(n0>n1) res = c0/std::sqrt(n0);
|
|---|
| 563 | else res = c1/std::sqrt(n1);
|
|---|
| 564 |
|
|---|
| 565 | return true;
|
|---|
| 566 | }
|
|---|
| 567 |
|
|---|
| 568 | static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
|---|
| 569 | {
|
|---|
| 570 | eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
|
|---|
| 571 | eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
|---|
| 572 | && (options&EigVecMask)!=EigVecMask
|
|---|
| 573 | && "invalid option parameter");
|
|---|
| 574 | bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
|---|
| 575 |
|
|---|
| 576 | EigenvectorsType& eivecs = solver.m_eivec;
|
|---|
| 577 | VectorType& eivals = solver.m_eivalues;
|
|---|
| 578 |
|
|---|
| 579 | // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
|---|
| 580 | Scalar shift = mat.trace() / Scalar(3);
|
|---|
| 581 | // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
|
|---|
| 582 | MatrixType scaledMat = mat.template selfadjointView<Lower>();
|
|---|
| 583 | scaledMat.diagonal().array() -= shift;
|
|---|
| 584 | Scalar scale = scaledMat.cwiseAbs().maxCoeff();
|
|---|
| 585 | if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
|
|---|
| 586 |
|
|---|
| 587 | // compute the eigenvalues
|
|---|
| 588 | computeRoots(scaledMat,eivals);
|
|---|
| 589 |
|
|---|
| 590 | // compute the eigenvectors
|
|---|
| 591 | if(computeEigenvectors)
|
|---|
| 592 | {
|
|---|
| 593 | if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
|
|---|
| 594 | {
|
|---|
| 595 | // All three eigenvalues are numerically the same
|
|---|
| 596 | eivecs.setIdentity();
|
|---|
| 597 | }
|
|---|
| 598 | else
|
|---|
| 599 | {
|
|---|
| 600 | MatrixType tmp;
|
|---|
| 601 | tmp = scaledMat;
|
|---|
| 602 |
|
|---|
| 603 | // Compute the eigenvector of the most distinct eigenvalue
|
|---|
| 604 | Scalar d0 = eivals(2) - eivals(1);
|
|---|
| 605 | Scalar d1 = eivals(1) - eivals(0);
|
|---|
| 606 | Index k(0), l(2);
|
|---|
| 607 | if(d0 > d1)
|
|---|
| 608 | {
|
|---|
| 609 | std::swap(k,l);
|
|---|
| 610 | d0 = d1;
|
|---|
| 611 | }
|
|---|
| 612 |
|
|---|
| 613 | // Compute the eigenvector of index k
|
|---|
| 614 | {
|
|---|
| 615 | tmp.diagonal().array () -= eivals(k);
|
|---|
| 616 | // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
|
|---|
| 617 | extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
|
|---|
| 618 | }
|
|---|
| 619 |
|
|---|
| 620 | // Compute eigenvector of index l
|
|---|
| 621 | if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
|
|---|
| 622 | {
|
|---|
| 623 | // If d0 is too small, then the two other eigenvalues are numerically the same,
|
|---|
| 624 | // and thus we only have to ortho-normalize the near orthogonal vector we saved above.
|
|---|
| 625 | eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
|
|---|
| 626 | eivecs.col(l).normalize();
|
|---|
| 627 | }
|
|---|
| 628 | else
|
|---|
| 629 | {
|
|---|
| 630 | tmp = scaledMat;
|
|---|
| 631 | tmp.diagonal().array () -= eivals(l);
|
|---|
| 632 |
|
|---|
| 633 | VectorType dummy;
|
|---|
| 634 | extract_kernel(tmp, eivecs.col(l), dummy);
|
|---|
| 635 | }
|
|---|
| 636 |
|
|---|
| 637 | // Compute last eigenvector from the other two
|
|---|
| 638 | eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
|
|---|
| 639 | }
|
|---|
| 640 | }
|
|---|
| 641 |
|
|---|
| 642 | // Rescale back to the original size.
|
|---|
| 643 | eivals *= scale;
|
|---|
| 644 | eivals.array() += shift;
|
|---|
| 645 |
|
|---|
| 646 | solver.m_info = Success;
|
|---|
| 647 | solver.m_isInitialized = true;
|
|---|
| 648 | solver.m_eigenvectorsOk = computeEigenvectors;
|
|---|
| 649 | }
|
|---|
| 650 | };
|
|---|
| 651 |
|
|---|
| 652 | // 2x2 direct eigenvalues decomposition, code from Hauke Heibel
|
|---|
| 653 | template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2,false>
|
|---|
| 654 | {
|
|---|
| 655 | typedef typename SolverType::MatrixType MatrixType;
|
|---|
| 656 | typedef typename SolverType::RealVectorType VectorType;
|
|---|
| 657 | typedef typename SolverType::Scalar Scalar;
|
|---|
| 658 | typedef typename SolverType::EigenvectorsType EigenvectorsType;
|
|---|
| 659 |
|
|---|
| 660 | static inline void computeRoots(const MatrixType& m, VectorType& roots)
|
|---|
| 661 | {
|
|---|
| 662 | using std::sqrt;
|
|---|
| 663 | const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
|
|---|
| 664 | const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
|
|---|
| 665 | roots(0) = t1 - t0;
|
|---|
| 666 | roots(1) = t1 + t0;
|
|---|
| 667 | }
|
|---|
| 668 |
|
|---|
| 669 | static inline void run(SolverType& solver, const MatrixType& mat, int options)
|
|---|
| 670 | {
|
|---|
| 671 | using std::sqrt;
|
|---|
| 672 | using std::abs;
|
|---|
| 673 |
|
|---|
| 674 | eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
|
|---|
| 675 | eigen_assert((options&~(EigVecMask|GenEigMask))==0
|
|---|
| 676 | && (options&EigVecMask)!=EigVecMask
|
|---|
| 677 | && "invalid option parameter");
|
|---|
| 678 | bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
|
|---|
| 679 |
|
|---|
| 680 | EigenvectorsType& eivecs = solver.m_eivec;
|
|---|
| 681 | VectorType& eivals = solver.m_eivalues;
|
|---|
| 682 |
|
|---|
| 683 | // map the matrix coefficients to [-1:1] to avoid over- and underflow.
|
|---|
| 684 | Scalar scale = mat.cwiseAbs().maxCoeff();
|
|---|
| 685 | scale = (std::max)(scale,Scalar(1));
|
|---|
| 686 | MatrixType scaledMat = mat / scale;
|
|---|
| 687 |
|
|---|
| 688 | // Compute the eigenvalues
|
|---|
| 689 | computeRoots(scaledMat,eivals);
|
|---|
| 690 |
|
|---|
| 691 | // compute the eigen vectors
|
|---|
| 692 | if(computeEigenvectors)
|
|---|
| 693 | {
|
|---|
| 694 | if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
|
|---|
| 695 | {
|
|---|
| 696 | eivecs.setIdentity();
|
|---|
| 697 | }
|
|---|
| 698 | else
|
|---|
| 699 | {
|
|---|
| 700 | scaledMat.diagonal().array () -= eivals(1);
|
|---|
| 701 | Scalar a2 = numext::abs2(scaledMat(0,0));
|
|---|
| 702 | Scalar c2 = numext::abs2(scaledMat(1,1));
|
|---|
| 703 | Scalar b2 = numext::abs2(scaledMat(1,0));
|
|---|
| 704 | if(a2>c2)
|
|---|
| 705 | {
|
|---|
| 706 | eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
|
|---|
| 707 | eivecs.col(1) /= sqrt(a2+b2);
|
|---|
| 708 | }
|
|---|
| 709 | else
|
|---|
| 710 | {
|
|---|
| 711 | eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
|
|---|
| 712 | eivecs.col(1) /= sqrt(c2+b2);
|
|---|
| 713 | }
|
|---|
| 714 |
|
|---|
| 715 | eivecs.col(0) << eivecs.col(1).unitOrthogonal();
|
|---|
| 716 | }
|
|---|
| 717 | }
|
|---|
| 718 |
|
|---|
| 719 | // Rescale back to the original size.
|
|---|
| 720 | eivals *= scale;
|
|---|
| 721 |
|
|---|
| 722 | solver.m_info = Success;
|
|---|
| 723 | solver.m_isInitialized = true;
|
|---|
| 724 | solver.m_eigenvectorsOk = computeEigenvectors;
|
|---|
| 725 | }
|
|---|
| 726 | };
|
|---|
| 727 |
|
|---|
| 728 | }
|
|---|
| 729 |
|
|---|
| 730 | template<typename MatrixType>
|
|---|
| 731 | SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
|
|---|
| 732 | ::computeDirect(const MatrixType& matrix, int options)
|
|---|
| 733 | {
|
|---|
| 734 | internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options);
|
|---|
| 735 | return *this;
|
|---|
| 736 | }
|
|---|
| 737 |
|
|---|
| 738 | namespace internal {
|
|---|
| 739 | template<typename RealScalar, typename Scalar, typename Index>
|
|---|
| 740 | static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
|
|---|
| 741 | {
|
|---|
| 742 | using std::abs;
|
|---|
| 743 | RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
|
|---|
| 744 | RealScalar e = subdiag[end-1];
|
|---|
| 745 | // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
|
|---|
| 746 | // underflow thus leading to inf/NaN values when using the following commented code:
|
|---|
| 747 | // RealScalar e2 = numext::abs2(subdiag[end-1]);
|
|---|
| 748 | // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
|
|---|
| 749 | // This explain the following, somewhat more complicated, version:
|
|---|
| 750 | RealScalar mu = diag[end];
|
|---|
| 751 | if(td==0)
|
|---|
| 752 | mu -= abs(e);
|
|---|
| 753 | else
|
|---|
| 754 | {
|
|---|
| 755 | RealScalar e2 = numext::abs2(subdiag[end-1]);
|
|---|
| 756 | RealScalar h = numext::hypot(td,e);
|
|---|
| 757 | if(e2==0) mu -= (e / (td + (td>0 ? 1 : -1))) * (e / h);
|
|---|
| 758 | else mu -= e2 / (td + (td>0 ? h : -h));
|
|---|
| 759 | }
|
|---|
| 760 |
|
|---|
| 761 | RealScalar x = diag[start] - mu;
|
|---|
| 762 | RealScalar z = subdiag[start];
|
|---|
| 763 | for (Index k = start; k < end; ++k)
|
|---|
| 764 | {
|
|---|
| 765 | JacobiRotation<RealScalar> rot;
|
|---|
| 766 | rot.makeGivens(x, z);
|
|---|
| 767 |
|
|---|
| 768 | // do T = G' T G
|
|---|
| 769 | RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
|
|---|
| 770 | RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
|
|---|
| 771 |
|
|---|
| 772 | diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
|
|---|
| 773 | diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
|
|---|
| 774 | subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
|
|---|
| 775 |
|
|---|
| 776 |
|
|---|
| 777 | if (k > start)
|
|---|
| 778 | subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
|
|---|
| 779 |
|
|---|
| 780 | x = subdiag[k];
|
|---|
| 781 |
|
|---|
| 782 | if (k < end - 1)
|
|---|
| 783 | {
|
|---|
| 784 | z = -rot.s() * subdiag[k+1];
|
|---|
| 785 | subdiag[k + 1] = rot.c() * subdiag[k+1];
|
|---|
| 786 | }
|
|---|
| 787 |
|
|---|
| 788 | // apply the givens rotation to the unit matrix Q = Q * G
|
|---|
| 789 | if (matrixQ)
|
|---|
| 790 | {
|
|---|
| 791 | Map<Matrix<Scalar,Dynamic,Dynamic,ColMajor> > q(matrixQ,n,n);
|
|---|
| 792 | q.applyOnTheRight(k,k+1,rot);
|
|---|
| 793 | }
|
|---|
| 794 | }
|
|---|
| 795 | }
|
|---|
| 796 |
|
|---|
| 797 | } // end namespace internal
|
|---|
| 798 |
|
|---|
| 799 | } // end namespace Eigen
|
|---|
| 800 |
|
|---|
| 801 | #endif // EIGEN_SELFADJOINTEIGENSOLVER_H
|
|---|