source: pacpussensors/trunk/Vislab/lib3dv/eigen/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h@ 136

Last change on this file since 136 was 136, checked in by ldecherf, 7 years ago

Doc

File size: 8.8 KB
Line 
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
5//
6// This Source Code Form is subject to the terms of the Mozilla
7// Public License v. 2.0. If a copy of the MPL was not distributed
8// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10#ifndef EIGEN_CONJUGATE_GRADIENT_H
11#define EIGEN_CONJUGATE_GRADIENT_H
12
13namespace Eigen {
14
15namespace internal {
16
17/** \internal Low-level conjugate gradient algorithm
18 * \param mat The matrix A
19 * \param rhs The right hand side vector b
20 * \param x On input and initial solution, on output the computed solution.
21 * \param precond A preconditioner being able to efficiently solve for an
22 * approximation of Ax=b (regardless of b)
23 * \param iters On input the max number of iteration, on output the number of performed iterations.
24 * \param tol_error On input the tolerance error, on output an estimation of the relative error.
25 */
26template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
27EIGEN_DONT_INLINE
28void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
29 const Preconditioner& precond, int& iters,
30 typename Dest::RealScalar& tol_error)
31{
32 using std::sqrt;
33 using std::abs;
34 typedef typename Dest::RealScalar RealScalar;
35 typedef typename Dest::Scalar Scalar;
36 typedef Matrix<Scalar,Dynamic,1> VectorType;
37
38 RealScalar tol = tol_error;
39 int maxIters = iters;
40
41 int n = mat.cols();
42
43 VectorType residual = rhs - mat * x; //initial residual
44
45 RealScalar rhsNorm2 = rhs.squaredNorm();
46 if(rhsNorm2 == 0)
47 {
48 x.setZero();
49 iters = 0;
50 tol_error = 0;
51 return;
52 }
53 RealScalar threshold = tol*tol*rhsNorm2;
54 RealScalar residualNorm2 = residual.squaredNorm();
55 if (residualNorm2 < threshold)
56 {
57 iters = 0;
58 tol_error = sqrt(residualNorm2 / rhsNorm2);
59 return;
60 }
61
62 VectorType p(n);
63 p = precond.solve(residual); //initial search direction
64
65 VectorType z(n), tmp(n);
66 RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
67 int i = 0;
68 while(i < maxIters)
69 {
70 tmp.noalias() = mat * p; // the bottleneck of the algorithm
71
72 Scalar alpha = absNew / p.dot(tmp); // the amount we travel on dir
73 x += alpha * p; // update solution
74 residual -= alpha * tmp; // update residue
75
76 residualNorm2 = residual.squaredNorm();
77 if(residualNorm2 < threshold)
78 break;
79
80 z = precond.solve(residual); // approximately solve for "A z = residual"
81
82 RealScalar absOld = absNew;
83 absNew = numext::real(residual.dot(z)); // update the absolute value of r
84 RealScalar beta = absNew / absOld; // calculate the Gram-Schmidt value used to create the new search direction
85 p = z + beta * p; // update search direction
86 i++;
87 }
88 tol_error = sqrt(residualNorm2 / rhsNorm2);
89 iters = i;
90}
91
92}
93
94template< typename _MatrixType, int _UpLo=Lower,
95 typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
96class ConjugateGradient;
97
98namespace internal {
99
100template< typename _MatrixType, int _UpLo, typename _Preconditioner>
101struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
102{
103 typedef _MatrixType MatrixType;
104 typedef _Preconditioner Preconditioner;
105};
106
107}
108
109/** \ingroup IterativeLinearSolvers_Module
110 * \brief A conjugate gradient solver for sparse self-adjoint problems
111 *
112 * This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
113 * The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
114 *
115 * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
116 * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
117 * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
118 * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
119 *
120 * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
121 * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
122 * and NumTraits<Scalar>::epsilon() for the tolerance.
123 *
124 * This class can be used as the direct solver classes. Here is a typical usage example:
125 * \code
126 * int n = 10000;
127 * VectorXd x(n), b(n);
128 * SparseMatrix<double> A(n,n);
129 * // fill A and b
130 * ConjugateGradient<SparseMatrix<double> > cg;
131 * cg.compute(A);
132 * x = cg.solve(b);
133 * std::cout << "#iterations: " << cg.iterations() << std::endl;
134 * std::cout << "estimated error: " << cg.error() << std::endl;
135 * // update b, and solve again
136 * x = cg.solve(b);
137 * \endcode
138 *
139 * By default the iterations start with x=0 as an initial guess of the solution.
140 * One can control the start using the solveWithGuess() method.
141 *
142 * ConjugateGradient can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink.
143 *
144 * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
145 */
146template< typename _MatrixType, int _UpLo, typename _Preconditioner>
147class ConjugateGradient : public IterativeSolverBase<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
148{
149 typedef IterativeSolverBase<ConjugateGradient> Base;
150 using Base::mp_matrix;
151 using Base::m_error;
152 using Base::m_iterations;
153 using Base::m_info;
154 using Base::m_isInitialized;
155public:
156 typedef _MatrixType MatrixType;
157 typedef typename MatrixType::Scalar Scalar;
158 typedef typename MatrixType::Index Index;
159 typedef typename MatrixType::RealScalar RealScalar;
160 typedef _Preconditioner Preconditioner;
161
162 enum {
163 UpLo = _UpLo
164 };
165
166public:
167
168 /** Default constructor. */
169 ConjugateGradient() : Base() {}
170
171 /** Initialize the solver with matrix \a A for further \c Ax=b solving.
172 *
173 * This constructor is a shortcut for the default constructor followed
174 * by a call to compute().
175 *
176 * \warning this class stores a reference to the matrix A as well as some
177 * precomputed values that depend on it. Therefore, if \a A is changed
178 * this class becomes invalid. Call compute() to update it with the new
179 * matrix A, or modify a copy of A.
180 */
181 template<typename MatrixDerived>
182 explicit ConjugateGradient(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
183
184 ~ConjugateGradient() {}
185
186 /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
187 * \a x0 as an initial solution.
188 *
189 * \sa compute()
190 */
191 template<typename Rhs,typename Guess>
192 inline const internal::solve_retval_with_guess<ConjugateGradient, Rhs, Guess>
193 solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
194 {
195 eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
196 eigen_assert(Base::rows()==b.rows()
197 && "ConjugateGradient::solve(): invalid number of rows of the right hand side matrix b");
198 return internal::solve_retval_with_guess
199 <ConjugateGradient, Rhs, Guess>(*this, b.derived(), x0);
200 }
201
202 /** \internal */
203 template<typename Rhs,typename Dest>
204 void _solveWithGuess(const Rhs& b, Dest& x) const
205 {
206 typedef typename internal::conditional<UpLo==(Lower|Upper),
207 const MatrixType&,
208 SparseSelfAdjointView<const MatrixType, UpLo>
209 >::type MatrixWrapperType;
210 m_iterations = Base::maxIterations();
211 m_error = Base::m_tolerance;
212
213 for(int j=0; j<b.cols(); ++j)
214 {
215 m_iterations = Base::maxIterations();
216 m_error = Base::m_tolerance;
217
218 typename Dest::ColXpr xj(x,j);
219 internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
220 }
221
222 m_isInitialized = true;
223 m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
224 }
225
226 /** \internal */
227 template<typename Rhs,typename Dest>
228 void _solve(const Rhs& b, Dest& x) const
229 {
230 x.setZero();
231 _solveWithGuess(b,x);
232 }
233
234protected:
235
236};
237
238
239namespace internal {
240
241template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
242struct solve_retval<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
243 : solve_retval_base<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner>, Rhs>
244{
245 typedef ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> Dec;
246 EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
247
248 template<typename Dest> void evalTo(Dest& dst) const
249 {
250 dec()._solve(rhs(),dst);
251 }
252};
253
254} // end namespace internal
255
256} // end namespace Eigen
257
258#endif // EIGEN_CONJUGATE_GRADIENT_H
Note: See TracBrowser for help on using the repository browser.