source: pacpussensors/trunk/Vislab/lib3dv/eigen/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h@ 136

Last change on this file since 136 was 136, checked in by ldecherf, 7 years ago

Doc

File size: 22.5 KB
Line 
1// This file is part of Eigen, a lightweight C++ template library
2// for linear algebra.
3//
4// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
5// Copyright (C) 2011 Chen-Pang He <jdh8@ms63.hinet.net>
6//
7// This Source Code Form is subject to the terms of the Mozilla
8// Public License v. 2.0. If a copy of the MPL was not distributed
9// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10
11#ifndef EIGEN_MATRIX_LOGARITHM
12#define EIGEN_MATRIX_LOGARITHM
13
14#ifndef M_PI
15#define M_PI 3.141592653589793238462643383279503L
16#endif
17
18namespace Eigen {
19
20/** \ingroup MatrixFunctions_Module
21 * \class MatrixLogarithmAtomic
22 * \brief Helper class for computing matrix logarithm of atomic matrices.
23 *
24 * \internal
25 * Here, an atomic matrix is a triangular matrix whose diagonal
26 * entries are close to each other.
27 *
28 * \sa class MatrixFunctionAtomic, MatrixBase::log()
29 */
30template <typename MatrixType>
31class MatrixLogarithmAtomic
32{
33public:
34
35 typedef typename MatrixType::Scalar Scalar;
36 // typedef typename MatrixType::Index Index;
37 typedef typename NumTraits<Scalar>::Real RealScalar;
38 // typedef typename internal::stem_function<Scalar>::type StemFunction;
39 // typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> VectorType;
40
41 /** \brief Constructor. */
42 MatrixLogarithmAtomic() { }
43
44 /** \brief Compute matrix logarithm of atomic matrix
45 * \param[in] A argument of matrix logarithm, should be upper triangular and atomic
46 * \returns The logarithm of \p A.
47 */
48 MatrixType compute(const MatrixType& A);
49
50private:
51
52 void compute2x2(const MatrixType& A, MatrixType& result);
53 void computeBig(const MatrixType& A, MatrixType& result);
54 int getPadeDegree(float normTminusI);
55 int getPadeDegree(double normTminusI);
56 int getPadeDegree(long double normTminusI);
57 void computePade(MatrixType& result, const MatrixType& T, int degree);
58 void computePade3(MatrixType& result, const MatrixType& T);
59 void computePade4(MatrixType& result, const MatrixType& T);
60 void computePade5(MatrixType& result, const MatrixType& T);
61 void computePade6(MatrixType& result, const MatrixType& T);
62 void computePade7(MatrixType& result, const MatrixType& T);
63 void computePade8(MatrixType& result, const MatrixType& T);
64 void computePade9(MatrixType& result, const MatrixType& T);
65 void computePade10(MatrixType& result, const MatrixType& T);
66 void computePade11(MatrixType& result, const MatrixType& T);
67
68 static const int minPadeDegree = 3;
69 static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24? 5: // single precision
70 std::numeric_limits<RealScalar>::digits<= 53? 7: // double precision
71 std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision
72 std::numeric_limits<RealScalar>::digits<=106? 10: // double-double
73 11; // quadruple precision
74
75 // Prevent copying
76 MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
77 MatrixLogarithmAtomic& operator=(const MatrixLogarithmAtomic&);
78};
79
80/** \brief Compute logarithm of triangular matrix with clustered eigenvalues. */
81template <typename MatrixType>
82MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
83{
84 using std::log;
85 MatrixType result(A.rows(), A.rows());
86 if (A.rows() == 1)
87 result(0,0) = log(A(0,0));
88 else if (A.rows() == 2)
89 compute2x2(A, result);
90 else
91 computeBig(A, result);
92 return result;
93}
94
95/** \brief Compute logarithm of 2x2 triangular matrix. */
96template <typename MatrixType>
97void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result)
98{
99 using std::abs;
100 using std::ceil;
101 using std::imag;
102 using std::log;
103
104 Scalar logA00 = log(A(0,0));
105 Scalar logA11 = log(A(1,1));
106
107 result(0,0) = logA00;
108 result(1,0) = Scalar(0);
109 result(1,1) = logA11;
110
111 if (A(0,0) == A(1,1)) {
112 result(0,1) = A(0,1) / A(0,0);
113 } else if ((abs(A(0,0)) < 0.5*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) {
114 result(0,1) = A(0,1) * (logA11 - logA00) / (A(1,1) - A(0,0));
115 } else {
116 // computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
117 int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
118 Scalar y = A(1,1) - A(0,0), x = A(1,1) + A(0,0);
119 result(0,1) = A(0,1) * (Scalar(2) * numext::atanh2(y,x) + Scalar(0,2*M_PI*unwindingNumber)) / y;
120 }
121}
122
123/** \brief Compute logarithm of triangular matrices with size > 2.
124 * \details This uses a inverse scale-and-square algorithm. */
125template <typename MatrixType>
126void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result)
127{
128 using std::pow;
129 int numberOfSquareRoots = 0;
130 int numberOfExtraSquareRoots = 0;
131 int degree;
132 MatrixType T = A, sqrtT;
133 const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1: // single precision
134 maxPadeDegree<= 7? 2.6429608311114350e-1: // double precision
135 maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision
136 maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double
137 1.1880960220216759245467951592883642e-1L; // quadruple precision
138
139 while (true) {
140 RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff();
141 if (normTminusI < maxNormForPade) {
142 degree = getPadeDegree(normTminusI);
143 int degree2 = getPadeDegree(normTminusI / RealScalar(2));
144 if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1))
145 break;
146 ++numberOfExtraSquareRoots;
147 }
148 MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
149 T = sqrtT.template triangularView<Upper>();
150 ++numberOfSquareRoots;
151 }
152
153 computePade(result, T, degree);
154 result *= pow(RealScalar(2), numberOfSquareRoots);
155}
156
157/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */
158template <typename MatrixType>
159int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
160{
161 const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
162 5.3149729967117310e-1 };
163 int degree = 3;
164 for (; degree <= maxPadeDegree; ++degree)
165 if (normTminusI <= maxNormForPade[degree - minPadeDegree])
166 break;
167 return degree;
168}
169
170/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
171template <typename MatrixType>
172int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
173{
174 const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
175 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
176 int degree = 3;
177 for (; degree <= maxPadeDegree; ++degree)
178 if (normTminusI <= maxNormForPade[degree - minPadeDegree])
179 break;
180 return degree;
181}
182
183/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
184template <typename MatrixType>
185int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
186{
187#if LDBL_MANT_DIG == 53 // double precision
188 const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
189 1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L };
190#elif LDBL_MANT_DIG <= 64 // extended precision
191 const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L,
192 5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L,
193 2.32777776523703892094e-1L };
194#elif LDBL_MANT_DIG <= 106 // double-double
195 const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */,
196 9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L,
197 1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L,
198 4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L,
199 1.05026503471351080481093652651105e-1L };
200#else // quadruple precision
201 const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */,
202 5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L,
203 8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L,
204 3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
205 8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
206#endif
207 int degree = 3;
208 for (; degree <= maxPadeDegree; ++degree)
209 if (normTminusI <= maxNormForPade[degree - minPadeDegree])
210 break;
211 return degree;
212}
213
214/* \brief Compute Pade approximation to matrix logarithm */
215template <typename MatrixType>
216void MatrixLogarithmAtomic<MatrixType>::computePade(MatrixType& result, const MatrixType& T, int degree)
217{
218 switch (degree) {
219 case 3: computePade3(result, T); break;
220 case 4: computePade4(result, T); break;
221 case 5: computePade5(result, T); break;
222 case 6: computePade6(result, T); break;
223 case 7: computePade7(result, T); break;
224 case 8: computePade8(result, T); break;
225 case 9: computePade9(result, T); break;
226 case 10: computePade10(result, T); break;
227 case 11: computePade11(result, T); break;
228 default: assert(false); // should never happen
229 }
230}
231
232template <typename MatrixType>
233void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const MatrixType& T)
234{
235 const int degree = 3;
236 const RealScalar nodes[] = { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L,
237 0.8872983346207416885179265399782400L };
238 const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
239 0.2777777777777777777777777777777778L };
240 eigen_assert(degree <= maxPadeDegree);
241 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
242 result.setZero(T.rows(), T.rows());
243 for (int k = 0; k < degree; ++k)
244 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
245 .template triangularView<Upper>().solve(TminusI);
246}
247
248template <typename MatrixType>
249void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const MatrixType& T)
250{
251 const int degree = 4;
252 const RealScalar nodes[] = { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L,
253 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
254 const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
255 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
256 eigen_assert(degree <= maxPadeDegree);
257 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
258 result.setZero(T.rows(), T.rows());
259 for (int k = 0; k < degree; ++k)
260 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
261 .template triangularView<Upper>().solve(TminusI);
262}
263
264template <typename MatrixType>
265void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const MatrixType& T)
266{
267 const int degree = 5;
268 const RealScalar nodes[] = { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L,
269 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L,
270 0.9530899229693319963988134391496965L };
271 const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
272 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
273 0.1184634425280945437571320203599587L };
274 eigen_assert(degree <= maxPadeDegree);
275 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
276 result.setZero(T.rows(), T.rows());
277 for (int k = 0; k < degree; ++k)
278 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
279 .template triangularView<Upper>().solve(TminusI);
280}
281
282template <typename MatrixType>
283void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const MatrixType& T)
284{
285 const int degree = 6;
286 const RealScalar nodes[] = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L,
287 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
288 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
289 const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
290 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
291 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
292 eigen_assert(degree <= maxPadeDegree);
293 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
294 result.setZero(T.rows(), T.rows());
295 for (int k = 0; k < degree; ++k)
296 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
297 .template triangularView<Upper>().solve(TminusI);
298}
299
300template <typename MatrixType>
301void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const MatrixType& T)
302{
303 const int degree = 7;
304 const RealScalar nodes[] = { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L,
305 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L,
306 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L,
307 0.9745539561713792622630948420239256L };
308 const RealScalar weights[] = { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L,
309 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
310 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
311 0.0647424830844348466353057163395410L };
312 eigen_assert(degree <= maxPadeDegree);
313 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
314 result.setZero(T.rows(), T.rows());
315 for (int k = 0; k < degree; ++k)
316 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
317 .template triangularView<Upper>().solve(TminusI);
318}
319
320template <typename MatrixType>
321void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const MatrixType& T)
322{
323 const int degree = 8;
324 const RealScalar nodes[] = { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L,
325 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L,
326 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L,
327 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L };
328 const RealScalar weights[] = { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L,
329 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
330 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
331 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
332 eigen_assert(degree <= maxPadeDegree);
333 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
334 result.setZero(T.rows(), T.rows());
335 for (int k = 0; k < degree; ++k)
336 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
337 .template triangularView<Upper>().solve(TminusI);
338}
339
340template <typename MatrixType>
341void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const MatrixType& T)
342{
343 const int degree = 9;
344 const RealScalar nodes[] = { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L,
345 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L,
346 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L,
347 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L,
348 0.9840801197538130449177881014518364L };
349 const RealScalar weights[] = { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L,
350 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L,
351 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
352 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
353 0.0406371941807872059859460790552618L };
354 eigen_assert(degree <= maxPadeDegree);
355 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
356 result.setZero(T.rows(), T.rows());
357 for (int k = 0; k < degree; ++k)
358 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
359 .template triangularView<Upper>().solve(TminusI);
360}
361
362template <typename MatrixType>
363void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const MatrixType& T)
364{
365 const int degree = 10;
366 const RealScalar nodes[] = { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L,
367 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L,
368 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L,
369 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L,
370 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L };
371 const RealScalar weights[] = { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L,
372 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L,
373 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
374 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
375 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
376 eigen_assert(degree <= maxPadeDegree);
377 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
378 result.setZero(T.rows(), T.rows());
379 for (int k = 0; k < degree; ++k)
380 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
381 .template triangularView<Upper>().solve(TminusI);
382}
383
384template <typename MatrixType>
385void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const MatrixType& T)
386{
387 const int degree = 11;
388 const RealScalar nodes[] = { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L,
389 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L,
390 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L,
391 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L,
392 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L,
393 0.9891143290730284964019690005614287L };
394 const RealScalar weights[] = { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L,
395 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L,
396 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L,
397 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
398 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
399 0.0278342835580868332413768602212743L };
400 eigen_assert(degree <= maxPadeDegree);
401 MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
402 result.setZero(T.rows(), T.rows());
403 for (int k = 0; k < degree; ++k)
404 result += weights[k] * (MatrixType::Identity(T.rows(), T.rows()) + nodes[k] * TminusI)
405 .template triangularView<Upper>().solve(TminusI);
406}
407
408/** \ingroup MatrixFunctions_Module
409 *
410 * \brief Proxy for the matrix logarithm of some matrix (expression).
411 *
412 * \tparam Derived Type of the argument to the matrix function.
413 *
414 * This class holds the argument to the matrix function until it is
415 * assigned or evaluated for some other reason (so the argument
416 * should not be changed in the meantime). It is the return type of
417 * MatrixBase::log() and most of the time this is the only way it
418 * is used.
419 */
420template<typename Derived> class MatrixLogarithmReturnValue
421: public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
422{
423public:
424
425 typedef typename Derived::Scalar Scalar;
426 typedef typename Derived::Index Index;
427
428 /** \brief Constructor.
429 *
430 * \param[in] A %Matrix (expression) forming the argument of the matrix logarithm.
431 */
432 MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { }
433
434 /** \brief Compute the matrix logarithm.
435 *
436 * \param[out] result Logarithm of \p A, where \A is as specified in the constructor.
437 */
438 template <typename ResultType>
439 inline void evalTo(ResultType& result) const
440 {
441 typedef typename Derived::PlainObject PlainObject;
442 typedef internal::traits<PlainObject> Traits;
443 static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
444 static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
445 static const int Options = PlainObject::Options;
446 typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
447 typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
448 typedef MatrixLogarithmAtomic<DynMatrixType> AtomicType;
449 AtomicType atomic;
450
451 const PlainObject Aevaluated = m_A.eval();
452 MatrixFunction<PlainObject, AtomicType> mf(Aevaluated, atomic);
453 mf.compute(result);
454 }
455
456 Index rows() const { return m_A.rows(); }
457 Index cols() const { return m_A.cols(); }
458
459private:
460 typename internal::nested<Derived>::type m_A;
461
462 MatrixLogarithmReturnValue& operator=(const MatrixLogarithmReturnValue&);
463};
464
465namespace internal {
466 template<typename Derived>
467 struct traits<MatrixLogarithmReturnValue<Derived> >
468 {
469 typedef typename Derived::PlainObject ReturnType;
470 };
471}
472
473
474/********** MatrixBase method **********/
475
476
477template <typename Derived>
478const MatrixLogarithmReturnValue<Derived> MatrixBase<Derived>::log() const
479{
480 eigen_assert(rows() == cols());
481 return MatrixLogarithmReturnValue<Derived>(derived());
482}
483
484} // end namespace Eigen
485
486#endif // EIGEN_MATRIX_LOGARITHM
Note: See TracBrowser for help on using the repository browser.