source: pacpussensors/trunk/Vislab/lib3dv/eigen/unsupported/Eigen/src/IterativeSolvers/MINRES.h@ 136

Last change on this file since 136 was 136, checked in by ldecherf, 7 years ago

Doc

File size: 13.4 KB
Line 
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
5// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
6//
7// This Source Code Form is subject to the terms of the Mozilla
8// Public License v. 2.0. If a copy of the MPL was not distributed
9// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11
12#ifndef EIGEN_MINRES_H_
13#define EIGEN_MINRES_H_
14
15
16namespace Eigen {
17
18 namespace internal {
19
20 /** \internal Low-level MINRES algorithm
21 * \param mat The matrix A
22 * \param rhs The right hand side vector b
23 * \param x On input and initial solution, on output the computed solution.
24 * \param precond A right preconditioner being able to efficiently solve for an
25 * approximation of Ax=b (regardless of b)
26 * \param iters On input the max number of iteration, on output the number of performed iterations.
27 * \param tol_error On input the tolerance error, on output an estimation of the relative error.
28 */
29 template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
30 EIGEN_DONT_INLINE
31 void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
32 const Preconditioner& precond, int& iters,
33 typename Dest::RealScalar& tol_error)
34 {
35 using std::sqrt;
36 typedef typename Dest::RealScalar RealScalar;
37 typedef typename Dest::Scalar Scalar;
38 typedef Matrix<Scalar,Dynamic,1> VectorType;
39
40 // Check for zero rhs
41 const RealScalar rhsNorm2(rhs.squaredNorm());
42 if(rhsNorm2 == 0)
43 {
44 x.setZero();
45 iters = 0;
46 tol_error = 0;
47 return;
48 }
49
50 // initialize
51 const int maxIters(iters); // initialize maxIters to iters
52 const int N(mat.cols()); // the size of the matrix
53 const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
54
55 // Initialize preconditioned Lanczos
56 VectorType v_old(N); // will be initialized inside loop
57 VectorType v( VectorType::Zero(N) ); //initialize v
58 VectorType v_new(rhs-mat*x); //initialize v_new
59 RealScalar residualNorm2(v_new.squaredNorm());
60 VectorType w(N); // will be initialized inside loop
61 VectorType w_new(precond.solve(v_new)); // initialize w_new
62// RealScalar beta; // will be initialized inside loop
63 RealScalar beta_new2(v_new.dot(w_new));
64 eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
65 RealScalar beta_new(sqrt(beta_new2));
66 const RealScalar beta_one(beta_new);
67 v_new /= beta_new;
68 w_new /= beta_new;
69 // Initialize other variables
70 RealScalar c(1.0); // the cosine of the Givens rotation
71 RealScalar c_old(1.0);
72 RealScalar s(0.0); // the sine of the Givens rotation
73 RealScalar s_old(0.0); // the sine of the Givens rotation
74 VectorType p_oold(N); // will be initialized in loop
75 VectorType p_old(VectorType::Zero(N)); // initialize p_old=0
76 VectorType p(p_old); // initialize p=0
77 RealScalar eta(1.0);
78
79 iters = 0; // reset iters
80 while ( iters < maxIters )
81 {
82 // Preconditioned Lanczos
83 /* Note that there are 4 variants on the Lanczos algorithm. These are
84 * described in Paige, C. C. (1972). Computational variants of
85 * the Lanczos method for the eigenproblem. IMA Journal of Applied
86 * Mathematics, 10(3), 373–381. The current implementation corresponds
87 * to the case A(2,7) in the paper. It also corresponds to
88 * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
89 * Systems, 2003 p.173. For the preconditioned version see
90 * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
91 */
92 const RealScalar beta(beta_new);
93 v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
94// const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
95 v = v_new; // update
96 w = w_new; // update
97// const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
98 v_new.noalias() = mat*w - beta*v_old; // compute v_new
99 const RealScalar alpha = v_new.dot(w);
100 v_new -= alpha*v; // overwrite v_new
101 w_new = precond.solve(v_new); // overwrite w_new
102 beta_new2 = v_new.dot(w_new); // compute beta_new
103 eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
104 beta_new = sqrt(beta_new2); // compute beta_new
105 v_new /= beta_new; // overwrite v_new for next iteration
106 w_new /= beta_new; // overwrite w_new for next iteration
107
108 // Givens rotation
109 const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration
110 const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration
111 const RealScalar r1_hat=c*alpha-c_old*s*beta;
112 const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) );
113 c_old = c; // store for next iteration
114 s_old = s; // store for next iteration
115 c=r1_hat/r1; // new cosine
116 s=beta_new/r1; // new sine
117
118 // Update solution
119 p_oold = p_old;
120// const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
121 p_old = p;
122 p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
123 x += beta_one*c*eta*p;
124
125 /* Update the squared residual. Note that this is the estimated residual.
126 The real residual |Ax-b|^2 may be slightly larger */
127 residualNorm2 *= s*s;
128
129 if ( residualNorm2 < threshold2)
130 {
131 break;
132 }
133
134 eta=-s*eta; // update eta
135 iters++; // increment iteration number (for output purposes)
136 }
137
138 /* Compute error. Note that this is the estimated error. The real
139 error |Ax-b|/|b| may be slightly larger */
140 tol_error = std::sqrt(residualNorm2 / rhsNorm2);
141 }
142
143 }
144
145 template< typename _MatrixType, int _UpLo=Lower,
146 typename _Preconditioner = IdentityPreconditioner>
147// typename _Preconditioner = IdentityPreconditioner<typename _MatrixType::Scalar> > // preconditioner must be positive definite
148 class MINRES;
149
150 namespace internal {
151
152 template< typename _MatrixType, int _UpLo, typename _Preconditioner>
153 struct traits<MINRES<_MatrixType,_UpLo,_Preconditioner> >
154 {
155 typedef _MatrixType MatrixType;
156 typedef _Preconditioner Preconditioner;
157 };
158
159 }
160
161 /** \ingroup IterativeLinearSolvers_Module
162 * \brief A minimal residual solver for sparse symmetric problems
163 *
164 * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm
165 * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite).
166 * The vectors x and b can be either dense or sparse.
167 *
168 * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
169 * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
170 * or Upper. Default is Lower.
171 * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
172 *
173 * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
174 * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
175 * and NumTraits<Scalar>::epsilon() for the tolerance.
176 *
177 * This class can be used as the direct solver classes. Here is a typical usage example:
178 * \code
179 * int n = 10000;
180 * VectorXd x(n), b(n);
181 * SparseMatrix<double> A(n,n);
182 * // fill A and b
183 * MINRES<SparseMatrix<double> > mr;
184 * mr.compute(A);
185 * x = mr.solve(b);
186 * std::cout << "#iterations: " << mr.iterations() << std::endl;
187 * std::cout << "estimated error: " << mr.error() << std::endl;
188 * // update b, and solve again
189 * x = mr.solve(b);
190 * \endcode
191 *
192 * By default the iterations start with x=0 as an initial guess of the solution.
193 * One can control the start using the solveWithGuess() method.
194 *
195 * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
196 */
197 template< typename _MatrixType, int _UpLo, typename _Preconditioner>
198 class MINRES : public IterativeSolverBase<MINRES<_MatrixType,_UpLo,_Preconditioner> >
199 {
200
201 typedef IterativeSolverBase<MINRES> Base;
202 using Base::mp_matrix;
203 using Base::m_error;
204 using Base::m_iterations;
205 using Base::m_info;
206 using Base::m_isInitialized;
207 public:
208 typedef _MatrixType MatrixType;
209 typedef typename MatrixType::Scalar Scalar;
210 typedef typename MatrixType::Index Index;
211 typedef typename MatrixType::RealScalar RealScalar;
212 typedef _Preconditioner Preconditioner;
213
214 enum {UpLo = _UpLo};
215
216 public:
217
218 /** Default constructor. */
219 MINRES() : Base() {}
220
221 /** Initialize the solver with matrix \a A for further \c Ax=b solving.
222 *
223 * This constructor is a shortcut for the default constructor followed
224 * by a call to compute().
225 *
226 * \warning this class stores a reference to the matrix A as well as some
227 * precomputed values that depend on it. Therefore, if \a A is changed
228 * this class becomes invalid. Call compute() to update it with the new
229 * matrix A, or modify a copy of A.
230 */
231 template<typename MatrixDerived>
232 explicit MINRES(const EigenBase<MatrixDerived>& A) : Base(A.derived()) {}
233
234 /** Destructor. */
235 ~MINRES(){}
236
237 /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
238 * \a x0 as an initial solution.
239 *
240 * \sa compute()
241 */
242 template<typename Rhs,typename Guess>
243 inline const internal::solve_retval_with_guess<MINRES, Rhs, Guess>
244 solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
245 {
246 eigen_assert(m_isInitialized && "MINRES is not initialized.");
247 eigen_assert(Base::rows()==b.rows()
248 && "MINRES::solve(): invalid number of rows of the right hand side matrix b");
249 return internal::solve_retval_with_guess
250 <MINRES, Rhs, Guess>(*this, b.derived(), x0);
251 }
252
253 /** \internal */
254 template<typename Rhs,typename Dest>
255 void _solveWithGuess(const Rhs& b, Dest& x) const
256 {
257 typedef typename internal::conditional<UpLo==(Lower|Upper),
258 const MatrixType&,
259 SparseSelfAdjointView<const MatrixType, UpLo>
260 >::type MatrixWrapperType;
261
262 m_iterations = Base::maxIterations();
263 m_error = Base::m_tolerance;
264
265 for(int j=0; j<b.cols(); ++j)
266 {
267 m_iterations = Base::maxIterations();
268 m_error = Base::m_tolerance;
269
270 typename Dest::ColXpr xj(x,j);
271 internal::minres(MatrixWrapperType(*mp_matrix), b.col(j), xj,
272 Base::m_preconditioner, m_iterations, m_error);
273 }
274
275 m_isInitialized = true;
276 m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
277 }
278
279 /** \internal */
280 template<typename Rhs,typename Dest>
281 void _solve(const Rhs& b, Dest& x) const
282 {
283 x.setZero();
284 _solveWithGuess(b,x);
285 }
286
287 protected:
288
289 };
290
291 namespace internal {
292
293 template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
294 struct solve_retval<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
295 : solve_retval_base<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
296 {
297 typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec;
298 EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
299
300 template<typename Dest> void evalTo(Dest& dst) const
301 {
302 dec()._solve(rhs(),dst);
303 }
304 };
305
306 } // end namespace internal
307
308} // end namespace Eigen
309
310#endif // EIGEN_MINRES_H
311
Note: See TracBrowser for help on using the repository browser.