[136] | 1 | // This file is part of Eigen, a lightweight C++ template library
|
---|
| 2 | // for linear algebra.
|
---|
| 3 | //
|
---|
| 4 | // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
|
---|
| 5 | //
|
---|
| 6 | // This Source Code Form is subject to the terms of the Mozilla
|
---|
| 7 | // Public License v. 2.0. If a copy of the MPL was not distributed
|
---|
| 8 | // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
---|
| 9 |
|
---|
| 10 | #ifndef EIGEN_SPARSEMATRIX_H
|
---|
| 11 | #define EIGEN_SPARSEMATRIX_H
|
---|
| 12 |
|
---|
| 13 | namespace Eigen {
|
---|
| 14 |
|
---|
| 15 | /** \ingroup SparseCore_Module
|
---|
| 16 | *
|
---|
| 17 | * \class SparseMatrix
|
---|
| 18 | *
|
---|
| 19 | * \brief A versatible sparse matrix representation
|
---|
| 20 | *
|
---|
| 21 | * This class implements a more versatile variants of the common \em compressed row/column storage format.
|
---|
| 22 | * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
|
---|
| 23 | * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
|
---|
| 24 | * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
|
---|
| 25 | * can be done with limited memory reallocation and copies.
|
---|
| 26 | *
|
---|
| 27 | * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
|
---|
| 28 | * compatible with many library.
|
---|
| 29 | *
|
---|
| 30 | * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
|
---|
| 31 | *
|
---|
| 32 | * \tparam _Scalar the scalar type, i.e. the type of the coefficients
|
---|
| 33 | * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
|
---|
| 34 | * is ColMajor or RowMajor. The default is 0 which means column-major.
|
---|
| 35 | * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
|
---|
| 36 | *
|
---|
| 37 | * This class can be extended with the help of the plugin mechanism described on the page
|
---|
| 38 | * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
|
---|
| 39 | */
|
---|
| 40 |
|
---|
| 41 | namespace internal {
|
---|
| 42 | template<typename _Scalar, int _Options, typename _Index>
|
---|
| 43 | struct traits<SparseMatrix<_Scalar, _Options, _Index> >
|
---|
| 44 | {
|
---|
| 45 | typedef _Scalar Scalar;
|
---|
| 46 | typedef _Index Index;
|
---|
| 47 | typedef Sparse StorageKind;
|
---|
| 48 | typedef MatrixXpr XprKind;
|
---|
| 49 | enum {
|
---|
| 50 | RowsAtCompileTime = Dynamic,
|
---|
| 51 | ColsAtCompileTime = Dynamic,
|
---|
| 52 | MaxRowsAtCompileTime = Dynamic,
|
---|
| 53 | MaxColsAtCompileTime = Dynamic,
|
---|
| 54 | Flags = _Options | NestByRefBit | LvalueBit,
|
---|
| 55 | CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
---|
| 56 | SupportedAccessPatterns = InnerRandomAccessPattern
|
---|
| 57 | };
|
---|
| 58 | };
|
---|
| 59 |
|
---|
| 60 | template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
|
---|
| 61 | struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
|
---|
| 62 | {
|
---|
| 63 | typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
|
---|
| 64 | typedef typename nested<MatrixType>::type MatrixTypeNested;
|
---|
| 65 | typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
|
---|
| 66 |
|
---|
| 67 | typedef _Scalar Scalar;
|
---|
| 68 | typedef Dense StorageKind;
|
---|
| 69 | typedef _Index Index;
|
---|
| 70 | typedef MatrixXpr XprKind;
|
---|
| 71 |
|
---|
| 72 | enum {
|
---|
| 73 | RowsAtCompileTime = Dynamic,
|
---|
| 74 | ColsAtCompileTime = 1,
|
---|
| 75 | MaxRowsAtCompileTime = Dynamic,
|
---|
| 76 | MaxColsAtCompileTime = 1,
|
---|
| 77 | Flags = 0,
|
---|
| 78 | CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
|
---|
| 79 | };
|
---|
| 80 | };
|
---|
| 81 |
|
---|
| 82 | } // end namespace internal
|
---|
| 83 |
|
---|
| 84 | template<typename _Scalar, int _Options, typename _Index>
|
---|
| 85 | class SparseMatrix
|
---|
| 86 | : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
|
---|
| 87 | {
|
---|
| 88 | public:
|
---|
| 89 | EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
|
---|
| 90 | EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
|
---|
| 91 | EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
|
---|
| 92 |
|
---|
| 93 | typedef MappedSparseMatrix<Scalar,Flags> Map;
|
---|
| 94 | using Base::IsRowMajor;
|
---|
| 95 | typedef internal::CompressedStorage<Scalar,Index> Storage;
|
---|
| 96 | enum {
|
---|
| 97 | Options = _Options
|
---|
| 98 | };
|
---|
| 99 |
|
---|
| 100 | protected:
|
---|
| 101 |
|
---|
| 102 | typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
|
---|
| 103 |
|
---|
| 104 | Index m_outerSize;
|
---|
| 105 | Index m_innerSize;
|
---|
| 106 | Index* m_outerIndex;
|
---|
| 107 | Index* m_innerNonZeros; // optional, if null then the data is compressed
|
---|
| 108 | Storage m_data;
|
---|
| 109 |
|
---|
| 110 | Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
---|
| 111 | const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
|
---|
| 112 |
|
---|
| 113 | public:
|
---|
| 114 |
|
---|
| 115 | /** \returns whether \c *this is in compressed form. */
|
---|
| 116 | inline bool isCompressed() const { return m_innerNonZeros==0; }
|
---|
| 117 |
|
---|
| 118 | /** \returns the number of rows of the matrix */
|
---|
| 119 | inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
|
---|
| 120 | /** \returns the number of columns of the matrix */
|
---|
| 121 | inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
|
---|
| 122 |
|
---|
| 123 | /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
|
---|
| 124 | inline Index innerSize() const { return m_innerSize; }
|
---|
| 125 | /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
|
---|
| 126 | inline Index outerSize() const { return m_outerSize; }
|
---|
| 127 |
|
---|
| 128 | /** \returns a const pointer to the array of values.
|
---|
| 129 | * This function is aimed at interoperability with other libraries.
|
---|
| 130 | * \sa innerIndexPtr(), outerIndexPtr() */
|
---|
| 131 | inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
|
---|
| 132 | /** \returns a non-const pointer to the array of values.
|
---|
| 133 | * This function is aimed at interoperability with other libraries.
|
---|
| 134 | * \sa innerIndexPtr(), outerIndexPtr() */
|
---|
| 135 | inline Scalar* valuePtr() { return m_data.valuePtr(); }
|
---|
| 136 |
|
---|
| 137 | /** \returns a const pointer to the array of inner indices.
|
---|
| 138 | * This function is aimed at interoperability with other libraries.
|
---|
| 139 | * \sa valuePtr(), outerIndexPtr() */
|
---|
| 140 | inline const Index* innerIndexPtr() const { return m_data.indexPtr(); }
|
---|
| 141 | /** \returns a non-const pointer to the array of inner indices.
|
---|
| 142 | * This function is aimed at interoperability with other libraries.
|
---|
| 143 | * \sa valuePtr(), outerIndexPtr() */
|
---|
| 144 | inline Index* innerIndexPtr() { return m_data.indexPtr(); }
|
---|
| 145 |
|
---|
| 146 | /** \returns a const pointer to the array of the starting positions of the inner vectors.
|
---|
| 147 | * This function is aimed at interoperability with other libraries.
|
---|
| 148 | * \sa valuePtr(), innerIndexPtr() */
|
---|
| 149 | inline const Index* outerIndexPtr() const { return m_outerIndex; }
|
---|
| 150 | /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
|
---|
| 151 | * This function is aimed at interoperability with other libraries.
|
---|
| 152 | * \sa valuePtr(), innerIndexPtr() */
|
---|
| 153 | inline Index* outerIndexPtr() { return m_outerIndex; }
|
---|
| 154 |
|
---|
| 155 | /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
|
---|
| 156 | * This function is aimed at interoperability with other libraries.
|
---|
| 157 | * \warning it returns the null pointer 0 in compressed mode */
|
---|
| 158 | inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
|
---|
| 159 | /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
|
---|
| 160 | * This function is aimed at interoperability with other libraries.
|
---|
| 161 | * \warning it returns the null pointer 0 in compressed mode */
|
---|
| 162 | inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
|
---|
| 163 |
|
---|
| 164 | /** \internal */
|
---|
| 165 | inline Storage& data() { return m_data; }
|
---|
| 166 | /** \internal */
|
---|
| 167 | inline const Storage& data() const { return m_data; }
|
---|
| 168 |
|
---|
| 169 | /** \returns the value of the matrix at position \a i, \a j
|
---|
| 170 | * This function returns Scalar(0) if the element is an explicit \em zero */
|
---|
| 171 | inline Scalar coeff(Index row, Index col) const
|
---|
| 172 | {
|
---|
| 173 | eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
|
---|
| 174 |
|
---|
| 175 | const Index outer = IsRowMajor ? row : col;
|
---|
| 176 | const Index inner = IsRowMajor ? col : row;
|
---|
| 177 | Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
|
---|
| 178 | return m_data.atInRange(m_outerIndex[outer], end, inner);
|
---|
| 179 | }
|
---|
| 180 |
|
---|
| 181 | /** \returns a non-const reference to the value of the matrix at position \a i, \a j
|
---|
| 182 | *
|
---|
| 183 | * If the element does not exist then it is inserted via the insert(Index,Index) function
|
---|
| 184 | * which itself turns the matrix into a non compressed form if that was not the case.
|
---|
| 185 | *
|
---|
| 186 | * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
|
---|
| 187 | * function if the element does not already exist.
|
---|
| 188 | */
|
---|
| 189 | inline Scalar& coeffRef(Index row, Index col)
|
---|
| 190 | {
|
---|
| 191 | eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
|
---|
| 192 |
|
---|
| 193 | const Index outer = IsRowMajor ? row : col;
|
---|
| 194 | const Index inner = IsRowMajor ? col : row;
|
---|
| 195 |
|
---|
| 196 | Index start = m_outerIndex[outer];
|
---|
| 197 | Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
|
---|
| 198 | eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
|
---|
| 199 | if(end<=start)
|
---|
| 200 | return insert(row,col);
|
---|
| 201 | const Index p = m_data.searchLowerIndex(start,end-1,inner);
|
---|
| 202 | if((p<end) && (m_data.index(p)==inner))
|
---|
| 203 | return m_data.value(p);
|
---|
| 204 | else
|
---|
| 205 | return insert(row,col);
|
---|
| 206 | }
|
---|
| 207 |
|
---|
| 208 | /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
|
---|
| 209 | * The non zero coefficient must \b not already exist.
|
---|
| 210 | *
|
---|
| 211 | * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
|
---|
| 212 | * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
|
---|
| 213 | * call reserve(const SizesType &) to reserve a more appropriate number of elements per
|
---|
| 214 | * inner vector that better match your scenario.
|
---|
| 215 | *
|
---|
| 216 | * This function performs a sorted insertion in O(1) if the elements of each inner vector are
|
---|
| 217 | * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
|
---|
| 218 | *
|
---|
| 219 | */
|
---|
| 220 | Scalar& insert(Index row, Index col)
|
---|
| 221 | {
|
---|
| 222 | eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
|
---|
| 223 |
|
---|
| 224 | if(isCompressed())
|
---|
| 225 | {
|
---|
| 226 | reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
|
---|
| 227 | }
|
---|
| 228 | return insertUncompressed(row,col);
|
---|
| 229 | }
|
---|
| 230 |
|
---|
| 231 | public:
|
---|
| 232 |
|
---|
| 233 | class InnerIterator;
|
---|
| 234 | class ReverseInnerIterator;
|
---|
| 235 |
|
---|
| 236 | /** Removes all non zeros but keep allocated memory */
|
---|
| 237 | inline void setZero()
|
---|
| 238 | {
|
---|
| 239 | m_data.clear();
|
---|
| 240 | memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
|
---|
| 241 | if(m_innerNonZeros)
|
---|
| 242 | memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
|
---|
| 243 | }
|
---|
| 244 |
|
---|
| 245 | /** \returns the number of non zero coefficients */
|
---|
| 246 | inline Index nonZeros() const
|
---|
| 247 | {
|
---|
| 248 | if(m_innerNonZeros)
|
---|
| 249 | return innerNonZeros().sum();
|
---|
| 250 | return static_cast<Index>(m_data.size());
|
---|
| 251 | }
|
---|
| 252 |
|
---|
| 253 | /** Preallocates \a reserveSize non zeros.
|
---|
| 254 | *
|
---|
| 255 | * Precondition: the matrix must be in compressed mode. */
|
---|
| 256 | inline void reserve(Index reserveSize)
|
---|
| 257 | {
|
---|
| 258 | eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
|
---|
| 259 | m_data.reserve(reserveSize);
|
---|
| 260 | }
|
---|
| 261 |
|
---|
| 262 | #ifdef EIGEN_PARSED_BY_DOXYGEN
|
---|
| 263 | /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
|
---|
| 264 | *
|
---|
| 265 | * This function turns the matrix in non-compressed mode */
|
---|
| 266 | template<class SizesType>
|
---|
| 267 | inline void reserve(const SizesType& reserveSizes);
|
---|
| 268 | #else
|
---|
| 269 | template<class SizesType>
|
---|
| 270 | inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
|
---|
| 271 | {
|
---|
| 272 | EIGEN_UNUSED_VARIABLE(enableif);
|
---|
| 273 | reserveInnerVectors(reserveSizes);
|
---|
| 274 | }
|
---|
| 275 | template<class SizesType>
|
---|
| 276 | inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
|
---|
| 277 | #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
|
---|
| 278 | typename
|
---|
| 279 | #endif
|
---|
| 280 | SizesType::Scalar())
|
---|
| 281 | {
|
---|
| 282 | EIGEN_UNUSED_VARIABLE(enableif);
|
---|
| 283 | reserveInnerVectors(reserveSizes);
|
---|
| 284 | }
|
---|
| 285 | #endif // EIGEN_PARSED_BY_DOXYGEN
|
---|
| 286 | protected:
|
---|
| 287 | template<class SizesType>
|
---|
| 288 | inline void reserveInnerVectors(const SizesType& reserveSizes)
|
---|
| 289 | {
|
---|
| 290 | if(isCompressed())
|
---|
| 291 | {
|
---|
| 292 | std::size_t totalReserveSize = 0;
|
---|
| 293 | // turn the matrix into non-compressed mode
|
---|
| 294 | m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
|
---|
| 295 | if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
---|
| 296 |
|
---|
| 297 | // temporarily use m_innerSizes to hold the new starting points.
|
---|
| 298 | Index* newOuterIndex = m_innerNonZeros;
|
---|
| 299 |
|
---|
| 300 | Index count = 0;
|
---|
| 301 | for(Index j=0; j<m_outerSize; ++j)
|
---|
| 302 | {
|
---|
| 303 | newOuterIndex[j] = count;
|
---|
| 304 | count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
|
---|
| 305 | totalReserveSize += reserveSizes[j];
|
---|
| 306 | }
|
---|
| 307 | m_data.reserve(totalReserveSize);
|
---|
| 308 | Index previousOuterIndex = m_outerIndex[m_outerSize];
|
---|
| 309 | for(Index j=m_outerSize-1; j>=0; --j)
|
---|
| 310 | {
|
---|
| 311 | Index innerNNZ = previousOuterIndex - m_outerIndex[j];
|
---|
| 312 | for(Index i=innerNNZ-1; i>=0; --i)
|
---|
| 313 | {
|
---|
| 314 | m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
|
---|
| 315 | m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
|
---|
| 316 | }
|
---|
| 317 | previousOuterIndex = m_outerIndex[j];
|
---|
| 318 | m_outerIndex[j] = newOuterIndex[j];
|
---|
| 319 | m_innerNonZeros[j] = innerNNZ;
|
---|
| 320 | }
|
---|
| 321 | m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
|
---|
| 322 |
|
---|
| 323 | m_data.resize(m_outerIndex[m_outerSize]);
|
---|
| 324 | }
|
---|
| 325 | else
|
---|
| 326 | {
|
---|
| 327 | Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
|
---|
| 328 | if (!newOuterIndex) internal::throw_std_bad_alloc();
|
---|
| 329 |
|
---|
| 330 | Index count = 0;
|
---|
| 331 | for(Index j=0; j<m_outerSize; ++j)
|
---|
| 332 | {
|
---|
| 333 | newOuterIndex[j] = count;
|
---|
| 334 | Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
|
---|
| 335 | Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
|
---|
| 336 | count += toReserve + m_innerNonZeros[j];
|
---|
| 337 | }
|
---|
| 338 | newOuterIndex[m_outerSize] = count;
|
---|
| 339 |
|
---|
| 340 | m_data.resize(count);
|
---|
| 341 | for(Index j=m_outerSize-1; j>=0; --j)
|
---|
| 342 | {
|
---|
| 343 | Index offset = newOuterIndex[j] - m_outerIndex[j];
|
---|
| 344 | if(offset>0)
|
---|
| 345 | {
|
---|
| 346 | Index innerNNZ = m_innerNonZeros[j];
|
---|
| 347 | for(Index i=innerNNZ-1; i>=0; --i)
|
---|
| 348 | {
|
---|
| 349 | m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
|
---|
| 350 | m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
|
---|
| 351 | }
|
---|
| 352 | }
|
---|
| 353 | }
|
---|
| 354 |
|
---|
| 355 | std::swap(m_outerIndex, newOuterIndex);
|
---|
| 356 | std::free(newOuterIndex);
|
---|
| 357 | }
|
---|
| 358 |
|
---|
| 359 | }
|
---|
| 360 | public:
|
---|
| 361 |
|
---|
| 362 | //--- low level purely coherent filling ---
|
---|
| 363 |
|
---|
| 364 | /** \internal
|
---|
| 365 | * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
|
---|
| 366 | * - the nonzero does not already exist
|
---|
| 367 | * - the new coefficient is the last one according to the storage order
|
---|
| 368 | *
|
---|
| 369 | * Before filling a given inner vector you must call the statVec(Index) function.
|
---|
| 370 | *
|
---|
| 371 | * After an insertion session, you should call the finalize() function.
|
---|
| 372 | *
|
---|
| 373 | * \sa insert, insertBackByOuterInner, startVec */
|
---|
| 374 | inline Scalar& insertBack(Index row, Index col)
|
---|
| 375 | {
|
---|
| 376 | return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
|
---|
| 377 | }
|
---|
| 378 |
|
---|
| 379 | /** \internal
|
---|
| 380 | * \sa insertBack, startVec */
|
---|
| 381 | inline Scalar& insertBackByOuterInner(Index outer, Index inner)
|
---|
| 382 | {
|
---|
| 383 | eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
|
---|
| 384 | eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
|
---|
| 385 | Index p = m_outerIndex[outer+1];
|
---|
| 386 | ++m_outerIndex[outer+1];
|
---|
| 387 | m_data.append(0, inner);
|
---|
| 388 | return m_data.value(p);
|
---|
| 389 | }
|
---|
| 390 |
|
---|
| 391 | /** \internal
|
---|
| 392 | * \warning use it only if you know what you are doing */
|
---|
| 393 | inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
|
---|
| 394 | {
|
---|
| 395 | Index p = m_outerIndex[outer+1];
|
---|
| 396 | ++m_outerIndex[outer+1];
|
---|
| 397 | m_data.append(0, inner);
|
---|
| 398 | return m_data.value(p);
|
---|
| 399 | }
|
---|
| 400 |
|
---|
| 401 | /** \internal
|
---|
| 402 | * \sa insertBack, insertBackByOuterInner */
|
---|
| 403 | inline void startVec(Index outer)
|
---|
| 404 | {
|
---|
| 405 | eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
|
---|
| 406 | eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
|
---|
| 407 | m_outerIndex[outer+1] = m_outerIndex[outer];
|
---|
| 408 | }
|
---|
| 409 |
|
---|
| 410 | /** \internal
|
---|
| 411 | * Must be called after inserting a set of non zero entries using the low level compressed API.
|
---|
| 412 | */
|
---|
| 413 | inline void finalize()
|
---|
| 414 | {
|
---|
| 415 | if(isCompressed())
|
---|
| 416 | {
|
---|
| 417 | Index size = static_cast<Index>(m_data.size());
|
---|
| 418 | Index i = m_outerSize;
|
---|
| 419 | // find the last filled column
|
---|
| 420 | while (i>=0 && m_outerIndex[i]==0)
|
---|
| 421 | --i;
|
---|
| 422 | ++i;
|
---|
| 423 | while (i<=m_outerSize)
|
---|
| 424 | {
|
---|
| 425 | m_outerIndex[i] = size;
|
---|
| 426 | ++i;
|
---|
| 427 | }
|
---|
| 428 | }
|
---|
| 429 | }
|
---|
| 430 |
|
---|
| 431 | //---
|
---|
| 432 |
|
---|
| 433 | template<typename InputIterators>
|
---|
| 434 | void setFromTriplets(const InputIterators& begin, const InputIterators& end);
|
---|
| 435 |
|
---|
| 436 | void sumupDuplicates();
|
---|
| 437 |
|
---|
| 438 | //---
|
---|
| 439 |
|
---|
| 440 | /** \internal
|
---|
| 441 | * same as insert(Index,Index) except that the indices are given relative to the storage order */
|
---|
| 442 | Scalar& insertByOuterInner(Index j, Index i)
|
---|
| 443 | {
|
---|
| 444 | return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
|
---|
| 445 | }
|
---|
| 446 |
|
---|
| 447 | /** Turns the matrix into the \em compressed format.
|
---|
| 448 | */
|
---|
| 449 | void makeCompressed()
|
---|
| 450 | {
|
---|
| 451 | if(isCompressed())
|
---|
| 452 | return;
|
---|
| 453 |
|
---|
| 454 | Index oldStart = m_outerIndex[1];
|
---|
| 455 | m_outerIndex[1] = m_innerNonZeros[0];
|
---|
| 456 | for(Index j=1; j<m_outerSize; ++j)
|
---|
| 457 | {
|
---|
| 458 | Index nextOldStart = m_outerIndex[j+1];
|
---|
| 459 | Index offset = oldStart - m_outerIndex[j];
|
---|
| 460 | if(offset>0)
|
---|
| 461 | {
|
---|
| 462 | for(Index k=0; k<m_innerNonZeros[j]; ++k)
|
---|
| 463 | {
|
---|
| 464 | m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
|
---|
| 465 | m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
|
---|
| 466 | }
|
---|
| 467 | }
|
---|
| 468 | m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
|
---|
| 469 | oldStart = nextOldStart;
|
---|
| 470 | }
|
---|
| 471 | std::free(m_innerNonZeros);
|
---|
| 472 | m_innerNonZeros = 0;
|
---|
| 473 | m_data.resize(m_outerIndex[m_outerSize]);
|
---|
| 474 | m_data.squeeze();
|
---|
| 475 | }
|
---|
| 476 |
|
---|
| 477 | /** Turns the matrix into the uncompressed mode */
|
---|
| 478 | void uncompress()
|
---|
| 479 | {
|
---|
| 480 | if(m_innerNonZeros != 0)
|
---|
| 481 | return;
|
---|
| 482 | m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
|
---|
| 483 | for (Index i = 0; i < m_outerSize; i++)
|
---|
| 484 | {
|
---|
| 485 | m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
---|
| 486 | }
|
---|
| 487 | }
|
---|
| 488 |
|
---|
| 489 | /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
|
---|
| 490 | void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
|
---|
| 491 | {
|
---|
| 492 | prune(default_prunning_func(reference,epsilon));
|
---|
| 493 | }
|
---|
| 494 |
|
---|
| 495 | /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
|
---|
| 496 | * The functor type \a KeepFunc must implement the following function:
|
---|
| 497 | * \code
|
---|
| 498 | * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
|
---|
| 499 | * \endcode
|
---|
| 500 | * \sa prune(Scalar,RealScalar)
|
---|
| 501 | */
|
---|
| 502 | template<typename KeepFunc>
|
---|
| 503 | void prune(const KeepFunc& keep = KeepFunc())
|
---|
| 504 | {
|
---|
| 505 | // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
|
---|
| 506 | // TODO also implement a unit test
|
---|
| 507 | makeCompressed();
|
---|
| 508 |
|
---|
| 509 | Index k = 0;
|
---|
| 510 | for(Index j=0; j<m_outerSize; ++j)
|
---|
| 511 | {
|
---|
| 512 | Index previousStart = m_outerIndex[j];
|
---|
| 513 | m_outerIndex[j] = k;
|
---|
| 514 | Index end = m_outerIndex[j+1];
|
---|
| 515 | for(Index i=previousStart; i<end; ++i)
|
---|
| 516 | {
|
---|
| 517 | if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
|
---|
| 518 | {
|
---|
| 519 | m_data.value(k) = m_data.value(i);
|
---|
| 520 | m_data.index(k) = m_data.index(i);
|
---|
| 521 | ++k;
|
---|
| 522 | }
|
---|
| 523 | }
|
---|
| 524 | }
|
---|
| 525 | m_outerIndex[m_outerSize] = k;
|
---|
| 526 | m_data.resize(k,0);
|
---|
| 527 | }
|
---|
| 528 |
|
---|
| 529 | /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
|
---|
| 530 | * \sa resizeNonZeros(Index), reserve(), setZero()
|
---|
| 531 | */
|
---|
| 532 | void conservativeResize(Index rows, Index cols)
|
---|
| 533 | {
|
---|
| 534 | // No change
|
---|
| 535 | if (this->rows() == rows && this->cols() == cols) return;
|
---|
| 536 |
|
---|
| 537 | // If one dimension is null, then there is nothing to be preserved
|
---|
| 538 | if(rows==0 || cols==0) return resize(rows,cols);
|
---|
| 539 |
|
---|
| 540 | Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
|
---|
| 541 | Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
|
---|
| 542 | Index newInnerSize = IsRowMajor ? cols : rows;
|
---|
| 543 |
|
---|
| 544 | // Deals with inner non zeros
|
---|
| 545 | if (m_innerNonZeros)
|
---|
| 546 | {
|
---|
| 547 | // Resize m_innerNonZeros
|
---|
| 548 | Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
|
---|
| 549 | if (!newInnerNonZeros) internal::throw_std_bad_alloc();
|
---|
| 550 | m_innerNonZeros = newInnerNonZeros;
|
---|
| 551 |
|
---|
| 552 | for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
|
---|
| 553 | m_innerNonZeros[i] = 0;
|
---|
| 554 | }
|
---|
| 555 | else if (innerChange < 0)
|
---|
| 556 | {
|
---|
| 557 | // Inner size decreased: allocate a new m_innerNonZeros
|
---|
| 558 | m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
|
---|
| 559 | if (!m_innerNonZeros) internal::throw_std_bad_alloc();
|
---|
| 560 | for(Index i = 0; i < m_outerSize; i++)
|
---|
| 561 | m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
|
---|
| 562 | }
|
---|
| 563 |
|
---|
| 564 | // Change the m_innerNonZeros in case of a decrease of inner size
|
---|
| 565 | if (m_innerNonZeros && innerChange < 0)
|
---|
| 566 | {
|
---|
| 567 | for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
|
---|
| 568 | {
|
---|
| 569 | Index &n = m_innerNonZeros[i];
|
---|
| 570 | Index start = m_outerIndex[i];
|
---|
| 571 | while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
|
---|
| 572 | }
|
---|
| 573 | }
|
---|
| 574 |
|
---|
| 575 | m_innerSize = newInnerSize;
|
---|
| 576 |
|
---|
| 577 | // Re-allocate outer index structure if necessary
|
---|
| 578 | if (outerChange == 0)
|
---|
| 579 | return;
|
---|
| 580 |
|
---|
| 581 | Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
|
---|
| 582 | if (!newOuterIndex) internal::throw_std_bad_alloc();
|
---|
| 583 | m_outerIndex = newOuterIndex;
|
---|
| 584 | if (outerChange > 0)
|
---|
| 585 | {
|
---|
| 586 | Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
|
---|
| 587 | for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
|
---|
| 588 | m_outerIndex[i] = last;
|
---|
| 589 | }
|
---|
| 590 | m_outerSize += outerChange;
|
---|
| 591 | }
|
---|
| 592 |
|
---|
| 593 | /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
|
---|
| 594 | * \sa resizeNonZeros(Index), reserve(), setZero()
|
---|
| 595 | */
|
---|
| 596 | void resize(Index rows, Index cols)
|
---|
| 597 | {
|
---|
| 598 | const Index outerSize = IsRowMajor ? rows : cols;
|
---|
| 599 | m_innerSize = IsRowMajor ? cols : rows;
|
---|
| 600 | m_data.clear();
|
---|
| 601 | if (m_outerSize != outerSize || m_outerSize==0)
|
---|
| 602 | {
|
---|
| 603 | std::free(m_outerIndex);
|
---|
| 604 | m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
|
---|
| 605 | if (!m_outerIndex) internal::throw_std_bad_alloc();
|
---|
| 606 |
|
---|
| 607 | m_outerSize = outerSize;
|
---|
| 608 | }
|
---|
| 609 | if(m_innerNonZeros)
|
---|
| 610 | {
|
---|
| 611 | std::free(m_innerNonZeros);
|
---|
| 612 | m_innerNonZeros = 0;
|
---|
| 613 | }
|
---|
| 614 | memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
|
---|
| 615 | }
|
---|
| 616 |
|
---|
| 617 | /** \internal
|
---|
| 618 | * Resize the nonzero vector to \a size */
|
---|
| 619 | void resizeNonZeros(Index size)
|
---|
| 620 | {
|
---|
| 621 | // TODO remove this function
|
---|
| 622 | m_data.resize(size);
|
---|
| 623 | }
|
---|
| 624 |
|
---|
| 625 | /** \returns a const expression of the diagonal coefficients */
|
---|
| 626 | const Diagonal<const SparseMatrix> diagonal() const { return *this; }
|
---|
| 627 |
|
---|
| 628 | /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
|
---|
| 629 | inline SparseMatrix()
|
---|
| 630 | : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
---|
| 631 | {
|
---|
| 632 | check_template_parameters();
|
---|
| 633 | resize(0, 0);
|
---|
| 634 | }
|
---|
| 635 |
|
---|
| 636 | /** Constructs a \a rows \c x \a cols empty matrix */
|
---|
| 637 | inline SparseMatrix(Index rows, Index cols)
|
---|
| 638 | : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
---|
| 639 | {
|
---|
| 640 | check_template_parameters();
|
---|
| 641 | resize(rows, cols);
|
---|
| 642 | }
|
---|
| 643 |
|
---|
| 644 | /** Constructs a sparse matrix from the sparse expression \a other */
|
---|
| 645 | template<typename OtherDerived>
|
---|
| 646 | inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
|
---|
| 647 | : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
---|
| 648 | {
|
---|
| 649 | EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
---|
| 650 | YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
---|
| 651 | check_template_parameters();
|
---|
| 652 | *this = other.derived();
|
---|
| 653 | }
|
---|
| 654 |
|
---|
| 655 | /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
|
---|
| 656 | template<typename OtherDerived, unsigned int UpLo>
|
---|
| 657 | inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
|
---|
| 658 | : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
---|
| 659 | {
|
---|
| 660 | check_template_parameters();
|
---|
| 661 | *this = other;
|
---|
| 662 | }
|
---|
| 663 |
|
---|
| 664 | /** Copy constructor (it performs a deep copy) */
|
---|
| 665 | inline SparseMatrix(const SparseMatrix& other)
|
---|
| 666 | : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
---|
| 667 | {
|
---|
| 668 | check_template_parameters();
|
---|
| 669 | *this = other.derived();
|
---|
| 670 | }
|
---|
| 671 |
|
---|
| 672 | /** \brief Copy constructor with in-place evaluation */
|
---|
| 673 | template<typename OtherDerived>
|
---|
| 674 | SparseMatrix(const ReturnByValue<OtherDerived>& other)
|
---|
| 675 | : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
|
---|
| 676 | {
|
---|
| 677 | check_template_parameters();
|
---|
| 678 | initAssignment(other);
|
---|
| 679 | other.evalTo(*this);
|
---|
| 680 | }
|
---|
| 681 |
|
---|
| 682 | /** Swaps the content of two sparse matrices of the same type.
|
---|
| 683 | * This is a fast operation that simply swaps the underlying pointers and parameters. */
|
---|
| 684 | inline void swap(SparseMatrix& other)
|
---|
| 685 | {
|
---|
| 686 | //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
|
---|
| 687 | std::swap(m_outerIndex, other.m_outerIndex);
|
---|
| 688 | std::swap(m_innerSize, other.m_innerSize);
|
---|
| 689 | std::swap(m_outerSize, other.m_outerSize);
|
---|
| 690 | std::swap(m_innerNonZeros, other.m_innerNonZeros);
|
---|
| 691 | m_data.swap(other.m_data);
|
---|
| 692 | }
|
---|
| 693 |
|
---|
| 694 | /** Sets *this to the identity matrix.
|
---|
| 695 | * This function also turns the matrix into compressed mode, and drop any reserved memory. */
|
---|
| 696 | inline void setIdentity()
|
---|
| 697 | {
|
---|
| 698 | eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
|
---|
| 699 | this->m_data.resize(rows());
|
---|
| 700 | Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_data.indexPtr(), rows()).setLinSpaced(0, rows()-1);
|
---|
| 701 | Eigen::Map<Matrix<Scalar, Dynamic, 1> >(this->m_data.valuePtr(), rows()).setOnes();
|
---|
| 702 | Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
|
---|
| 703 | std::free(m_innerNonZeros);
|
---|
| 704 | m_innerNonZeros = 0;
|
---|
| 705 | }
|
---|
| 706 | inline SparseMatrix& operator=(const SparseMatrix& other)
|
---|
| 707 | {
|
---|
| 708 | if (other.isRValue())
|
---|
| 709 | {
|
---|
| 710 | swap(other.const_cast_derived());
|
---|
| 711 | }
|
---|
| 712 | else if(this!=&other)
|
---|
| 713 | {
|
---|
| 714 | initAssignment(other);
|
---|
| 715 | if(other.isCompressed())
|
---|
| 716 | {
|
---|
| 717 | memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
|
---|
| 718 | m_data = other.m_data;
|
---|
| 719 | }
|
---|
| 720 | else
|
---|
| 721 | {
|
---|
| 722 | Base::operator=(other);
|
---|
| 723 | }
|
---|
| 724 | }
|
---|
| 725 | return *this;
|
---|
| 726 | }
|
---|
| 727 |
|
---|
| 728 | #ifndef EIGEN_PARSED_BY_DOXYGEN
|
---|
| 729 | template<typename Lhs, typename Rhs>
|
---|
| 730 | inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
|
---|
| 731 | { return Base::operator=(product); }
|
---|
| 732 |
|
---|
| 733 | template<typename OtherDerived>
|
---|
| 734 | inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
|
---|
| 735 | {
|
---|
| 736 | initAssignment(other);
|
---|
| 737 | return Base::operator=(other.derived());
|
---|
| 738 | }
|
---|
| 739 |
|
---|
| 740 | template<typename OtherDerived>
|
---|
| 741 | inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
|
---|
| 742 | { return Base::operator=(other.derived()); }
|
---|
| 743 | #endif
|
---|
| 744 |
|
---|
| 745 | template<typename OtherDerived>
|
---|
| 746 | EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
|
---|
| 747 |
|
---|
| 748 | friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
|
---|
| 749 | {
|
---|
| 750 | EIGEN_DBG_SPARSE(
|
---|
| 751 | s << "Nonzero entries:\n";
|
---|
| 752 | if(m.isCompressed())
|
---|
| 753 | for (Index i=0; i<m.nonZeros(); ++i)
|
---|
| 754 | s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
|
---|
| 755 | else
|
---|
| 756 | for (Index i=0; i<m.outerSize(); ++i)
|
---|
| 757 | {
|
---|
| 758 | Index p = m.m_outerIndex[i];
|
---|
| 759 | Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
|
---|
| 760 | Index k=p;
|
---|
| 761 | for (; k<pe; ++k)
|
---|
| 762 | s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
|
---|
| 763 | for (; k<m.m_outerIndex[i+1]; ++k)
|
---|
| 764 | s << "(_,_) ";
|
---|
| 765 | }
|
---|
| 766 | s << std::endl;
|
---|
| 767 | s << std::endl;
|
---|
| 768 | s << "Outer pointers:\n";
|
---|
| 769 | for (Index i=0; i<m.outerSize(); ++i)
|
---|
| 770 | s << m.m_outerIndex[i] << " ";
|
---|
| 771 | s << " $" << std::endl;
|
---|
| 772 | if(!m.isCompressed())
|
---|
| 773 | {
|
---|
| 774 | s << "Inner non zeros:\n";
|
---|
| 775 | for (Index i=0; i<m.outerSize(); ++i)
|
---|
| 776 | s << m.m_innerNonZeros[i] << " ";
|
---|
| 777 | s << " $" << std::endl;
|
---|
| 778 | }
|
---|
| 779 | s << std::endl;
|
---|
| 780 | );
|
---|
| 781 | s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
|
---|
| 782 | return s;
|
---|
| 783 | }
|
---|
| 784 |
|
---|
| 785 | /** Destructor */
|
---|
| 786 | inline ~SparseMatrix()
|
---|
| 787 | {
|
---|
| 788 | std::free(m_outerIndex);
|
---|
| 789 | std::free(m_innerNonZeros);
|
---|
| 790 | }
|
---|
| 791 |
|
---|
| 792 | #ifndef EIGEN_PARSED_BY_DOXYGEN
|
---|
| 793 | /** Overloaded for performance */
|
---|
| 794 | Scalar sum() const;
|
---|
| 795 | #endif
|
---|
| 796 |
|
---|
| 797 | # ifdef EIGEN_SPARSEMATRIX_PLUGIN
|
---|
| 798 | # include EIGEN_SPARSEMATRIX_PLUGIN
|
---|
| 799 | # endif
|
---|
| 800 |
|
---|
| 801 | protected:
|
---|
| 802 |
|
---|
| 803 | template<typename Other>
|
---|
| 804 | void initAssignment(const Other& other)
|
---|
| 805 | {
|
---|
| 806 | resize(other.rows(), other.cols());
|
---|
| 807 | if(m_innerNonZeros)
|
---|
| 808 | {
|
---|
| 809 | std::free(m_innerNonZeros);
|
---|
| 810 | m_innerNonZeros = 0;
|
---|
| 811 | }
|
---|
| 812 | }
|
---|
| 813 |
|
---|
| 814 | /** \internal
|
---|
| 815 | * \sa insert(Index,Index) */
|
---|
| 816 | EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
|
---|
| 817 |
|
---|
| 818 | /** \internal
|
---|
| 819 | * A vector object that is equal to 0 everywhere but v at the position i */
|
---|
| 820 | class SingletonVector
|
---|
| 821 | {
|
---|
| 822 | Index m_index;
|
---|
| 823 | Index m_value;
|
---|
| 824 | public:
|
---|
| 825 | typedef Index value_type;
|
---|
| 826 | SingletonVector(Index i, Index v)
|
---|
| 827 | : m_index(i), m_value(v)
|
---|
| 828 | {}
|
---|
| 829 |
|
---|
| 830 | Index operator[](Index i) const { return i==m_index ? m_value : 0; }
|
---|
| 831 | };
|
---|
| 832 |
|
---|
| 833 | /** \internal
|
---|
| 834 | * \sa insert(Index,Index) */
|
---|
| 835 | EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
|
---|
| 836 |
|
---|
| 837 | public:
|
---|
| 838 | /** \internal
|
---|
| 839 | * \sa insert(Index,Index) */
|
---|
| 840 | EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
|
---|
| 841 | {
|
---|
| 842 | const Index outer = IsRowMajor ? row : col;
|
---|
| 843 | const Index inner = IsRowMajor ? col : row;
|
---|
| 844 |
|
---|
| 845 | eigen_assert(!isCompressed());
|
---|
| 846 | eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
|
---|
| 847 |
|
---|
| 848 | Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
|
---|
| 849 | m_data.index(p) = inner;
|
---|
| 850 | return (m_data.value(p) = 0);
|
---|
| 851 | }
|
---|
| 852 |
|
---|
| 853 | private:
|
---|
| 854 | static void check_template_parameters()
|
---|
| 855 | {
|
---|
| 856 | EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
---|
| 857 | EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
|
---|
| 858 | }
|
---|
| 859 |
|
---|
| 860 | struct default_prunning_func {
|
---|
| 861 | default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
|
---|
| 862 | inline bool operator() (const Index&, const Index&, const Scalar& value) const
|
---|
| 863 | {
|
---|
| 864 | return !internal::isMuchSmallerThan(value, reference, epsilon);
|
---|
| 865 | }
|
---|
| 866 | Scalar reference;
|
---|
| 867 | RealScalar epsilon;
|
---|
| 868 | };
|
---|
| 869 | };
|
---|
| 870 |
|
---|
| 871 | template<typename Scalar, int _Options, typename _Index>
|
---|
| 872 | class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
|
---|
| 873 | {
|
---|
| 874 | public:
|
---|
| 875 | InnerIterator(const SparseMatrix& mat, Index outer)
|
---|
| 876 | : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
|
---|
| 877 | {
|
---|
| 878 | if(mat.isCompressed())
|
---|
| 879 | m_end = mat.m_outerIndex[outer+1];
|
---|
| 880 | else
|
---|
| 881 | m_end = m_id + mat.m_innerNonZeros[outer];
|
---|
| 882 | }
|
---|
| 883 |
|
---|
| 884 | inline InnerIterator& operator++() { m_id++; return *this; }
|
---|
| 885 |
|
---|
| 886 | inline const Scalar& value() const { return m_values[m_id]; }
|
---|
| 887 | inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
|
---|
| 888 |
|
---|
| 889 | inline Index index() const { return m_indices[m_id]; }
|
---|
| 890 | inline Index outer() const { return m_outer; }
|
---|
| 891 | inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
---|
| 892 | inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
---|
| 893 |
|
---|
| 894 | inline operator bool() const { return (m_id < m_end); }
|
---|
| 895 |
|
---|
| 896 | protected:
|
---|
| 897 | const Scalar* m_values;
|
---|
| 898 | const Index* m_indices;
|
---|
| 899 | const Index m_outer;
|
---|
| 900 | Index m_id;
|
---|
| 901 | Index m_end;
|
---|
| 902 | };
|
---|
| 903 |
|
---|
| 904 | template<typename Scalar, int _Options, typename _Index>
|
---|
| 905 | class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
|
---|
| 906 | {
|
---|
| 907 | public:
|
---|
| 908 | ReverseInnerIterator(const SparseMatrix& mat, Index outer)
|
---|
| 909 | : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
|
---|
| 910 | {
|
---|
| 911 | if(mat.isCompressed())
|
---|
| 912 | m_id = mat.m_outerIndex[outer+1];
|
---|
| 913 | else
|
---|
| 914 | m_id = m_start + mat.m_innerNonZeros[outer];
|
---|
| 915 | }
|
---|
| 916 |
|
---|
| 917 | inline ReverseInnerIterator& operator--() { --m_id; return *this; }
|
---|
| 918 |
|
---|
| 919 | inline const Scalar& value() const { return m_values[m_id-1]; }
|
---|
| 920 | inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
|
---|
| 921 |
|
---|
| 922 | inline Index index() const { return m_indices[m_id-1]; }
|
---|
| 923 | inline Index outer() const { return m_outer; }
|
---|
| 924 | inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
---|
| 925 | inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
---|
| 926 |
|
---|
| 927 | inline operator bool() const { return (m_id > m_start); }
|
---|
| 928 |
|
---|
| 929 | protected:
|
---|
| 930 | const Scalar* m_values;
|
---|
| 931 | const Index* m_indices;
|
---|
| 932 | const Index m_outer;
|
---|
| 933 | Index m_id;
|
---|
| 934 | const Index m_start;
|
---|
| 935 | };
|
---|
| 936 |
|
---|
| 937 | namespace internal {
|
---|
| 938 |
|
---|
| 939 | template<typename InputIterator, typename SparseMatrixType>
|
---|
| 940 | void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
|
---|
| 941 | {
|
---|
| 942 | EIGEN_UNUSED_VARIABLE(Options);
|
---|
| 943 | enum { IsRowMajor = SparseMatrixType::IsRowMajor };
|
---|
| 944 | typedef typename SparseMatrixType::Scalar Scalar;
|
---|
| 945 | typedef typename SparseMatrixType::Index Index;
|
---|
| 946 | SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
|
---|
| 947 |
|
---|
| 948 | if(begin!=end)
|
---|
| 949 | {
|
---|
| 950 | // pass 1: count the nnz per inner-vector
|
---|
| 951 | Matrix<Index,Dynamic,1> wi(trMat.outerSize());
|
---|
| 952 | wi.setZero();
|
---|
| 953 | for(InputIterator it(begin); it!=end; ++it)
|
---|
| 954 | {
|
---|
| 955 | eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
|
---|
| 956 | wi(IsRowMajor ? it->col() : it->row())++;
|
---|
| 957 | }
|
---|
| 958 |
|
---|
| 959 | // pass 2: insert all the elements into trMat
|
---|
| 960 | trMat.reserve(wi);
|
---|
| 961 | for(InputIterator it(begin); it!=end; ++it)
|
---|
| 962 | trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
|
---|
| 963 |
|
---|
| 964 | // pass 3:
|
---|
| 965 | trMat.sumupDuplicates();
|
---|
| 966 | }
|
---|
| 967 |
|
---|
| 968 | // pass 4: transposed copy -> implicit sorting
|
---|
| 969 | mat = trMat;
|
---|
| 970 | }
|
---|
| 971 |
|
---|
| 972 | }
|
---|
| 973 |
|
---|
| 974 |
|
---|
| 975 | /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
|
---|
| 976 | *
|
---|
| 977 | * A \em triplet is a tuple (i,j,value) defining a non-zero element.
|
---|
| 978 | * The input list of triplets does not have to be sorted, and can contains duplicated elements.
|
---|
| 979 | * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
|
---|
| 980 | * This is a \em O(n) operation, with \em n the number of triplet elements.
|
---|
| 981 | * The initial contents of \c *this is destroyed.
|
---|
| 982 | * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
|
---|
| 983 | * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
|
---|
| 984 | *
|
---|
| 985 | * The \a InputIterators value_type must provide the following interface:
|
---|
| 986 | * \code
|
---|
| 987 | * Scalar value() const; // the value
|
---|
| 988 | * Scalar row() const; // the row index i
|
---|
| 989 | * Scalar col() const; // the column index j
|
---|
| 990 | * \endcode
|
---|
| 991 | * See for instance the Eigen::Triplet template class.
|
---|
| 992 | *
|
---|
| 993 | * Here is a typical usage example:
|
---|
| 994 | * \code
|
---|
| 995 | typedef Triplet<double> T;
|
---|
| 996 | std::vector<T> tripletList;
|
---|
| 997 | triplets.reserve(estimation_of_entries);
|
---|
| 998 | for(...)
|
---|
| 999 | {
|
---|
| 1000 | // ...
|
---|
| 1001 | tripletList.push_back(T(i,j,v_ij));
|
---|
| 1002 | }
|
---|
| 1003 | SparseMatrixType m(rows,cols);
|
---|
| 1004 | m.setFromTriplets(tripletList.begin(), tripletList.end());
|
---|
| 1005 | // m is ready to go!
|
---|
| 1006 | * \endcode
|
---|
| 1007 | *
|
---|
| 1008 | * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
|
---|
| 1009 | * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
|
---|
| 1010 | * be explicitely stored into a std::vector for instance.
|
---|
| 1011 | */
|
---|
| 1012 | template<typename Scalar, int _Options, typename _Index>
|
---|
| 1013 | template<typename InputIterators>
|
---|
| 1014 | void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
|
---|
| 1015 | {
|
---|
| 1016 | internal::set_from_triplets(begin, end, *this);
|
---|
| 1017 | }
|
---|
| 1018 |
|
---|
| 1019 | /** \internal */
|
---|
| 1020 | template<typename Scalar, int _Options, typename _Index>
|
---|
| 1021 | void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
|
---|
| 1022 | {
|
---|
| 1023 | eigen_assert(!isCompressed());
|
---|
| 1024 | // TODO, in practice we should be able to use m_innerNonZeros for that task
|
---|
| 1025 | Matrix<Index,Dynamic,1> wi(innerSize());
|
---|
| 1026 | wi.fill(-1);
|
---|
| 1027 | Index count = 0;
|
---|
| 1028 | // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
|
---|
| 1029 | for(Index j=0; j<outerSize(); ++j)
|
---|
| 1030 | {
|
---|
| 1031 | Index start = count;
|
---|
| 1032 | Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
|
---|
| 1033 | for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
|
---|
| 1034 | {
|
---|
| 1035 | Index i = m_data.index(k);
|
---|
| 1036 | if(wi(i)>=start)
|
---|
| 1037 | {
|
---|
| 1038 | // we already meet this entry => accumulate it
|
---|
| 1039 | m_data.value(wi(i)) += m_data.value(k);
|
---|
| 1040 | }
|
---|
| 1041 | else
|
---|
| 1042 | {
|
---|
| 1043 | m_data.value(count) = m_data.value(k);
|
---|
| 1044 | m_data.index(count) = m_data.index(k);
|
---|
| 1045 | wi(i) = count;
|
---|
| 1046 | ++count;
|
---|
| 1047 | }
|
---|
| 1048 | }
|
---|
| 1049 | m_outerIndex[j] = start;
|
---|
| 1050 | }
|
---|
| 1051 | m_outerIndex[m_outerSize] = count;
|
---|
| 1052 |
|
---|
| 1053 | // turn the matrix into compressed form
|
---|
| 1054 | std::free(m_innerNonZeros);
|
---|
| 1055 | m_innerNonZeros = 0;
|
---|
| 1056 | m_data.resize(m_outerIndex[m_outerSize]);
|
---|
| 1057 | }
|
---|
| 1058 |
|
---|
| 1059 | template<typename Scalar, int _Options, typename _Index>
|
---|
| 1060 | template<typename OtherDerived>
|
---|
| 1061 | EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
|
---|
| 1062 | {
|
---|
| 1063 | EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
---|
| 1064 | YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
---|
| 1065 |
|
---|
| 1066 | const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
|
---|
| 1067 | if (needToTranspose)
|
---|
| 1068 | {
|
---|
| 1069 | // two passes algorithm:
|
---|
| 1070 | // 1 - compute the number of coeffs per dest inner vector
|
---|
| 1071 | // 2 - do the actual copy/eval
|
---|
| 1072 | // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
|
---|
| 1073 | typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
|
---|
| 1074 | typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
|
---|
| 1075 | OtherCopy otherCopy(other.derived());
|
---|
| 1076 |
|
---|
| 1077 | SparseMatrix dest(other.rows(),other.cols());
|
---|
| 1078 | Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
|
---|
| 1079 |
|
---|
| 1080 | // pass 1
|
---|
| 1081 | // FIXME the above copy could be merged with that pass
|
---|
| 1082 | for (Index j=0; j<otherCopy.outerSize(); ++j)
|
---|
| 1083 | for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
|
---|
| 1084 | ++dest.m_outerIndex[it.index()];
|
---|
| 1085 |
|
---|
| 1086 | // prefix sum
|
---|
| 1087 | Index count = 0;
|
---|
| 1088 | Matrix<Index,Dynamic,1> positions(dest.outerSize());
|
---|
| 1089 | for (Index j=0; j<dest.outerSize(); ++j)
|
---|
| 1090 | {
|
---|
| 1091 | Index tmp = dest.m_outerIndex[j];
|
---|
| 1092 | dest.m_outerIndex[j] = count;
|
---|
| 1093 | positions[j] = count;
|
---|
| 1094 | count += tmp;
|
---|
| 1095 | }
|
---|
| 1096 | dest.m_outerIndex[dest.outerSize()] = count;
|
---|
| 1097 | // alloc
|
---|
| 1098 | dest.m_data.resize(count);
|
---|
| 1099 | // pass 2
|
---|
| 1100 | for (Index j=0; j<otherCopy.outerSize(); ++j)
|
---|
| 1101 | {
|
---|
| 1102 | for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
|
---|
| 1103 | {
|
---|
| 1104 | Index pos = positions[it.index()]++;
|
---|
| 1105 | dest.m_data.index(pos) = j;
|
---|
| 1106 | dest.m_data.value(pos) = it.value();
|
---|
| 1107 | }
|
---|
| 1108 | }
|
---|
| 1109 | this->swap(dest);
|
---|
| 1110 | return *this;
|
---|
| 1111 | }
|
---|
| 1112 | else
|
---|
| 1113 | {
|
---|
| 1114 | if(other.isRValue())
|
---|
| 1115 | initAssignment(other.derived());
|
---|
| 1116 | // there is no special optimization
|
---|
| 1117 | return Base::operator=(other.derived());
|
---|
| 1118 | }
|
---|
| 1119 | }
|
---|
| 1120 |
|
---|
| 1121 | template<typename _Scalar, int _Options, typename _Index>
|
---|
| 1122 | EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
|
---|
| 1123 | {
|
---|
| 1124 | eigen_assert(!isCompressed());
|
---|
| 1125 |
|
---|
| 1126 | const Index outer = IsRowMajor ? row : col;
|
---|
| 1127 | const Index inner = IsRowMajor ? col : row;
|
---|
| 1128 |
|
---|
| 1129 | Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
|
---|
| 1130 | Index innerNNZ = m_innerNonZeros[outer];
|
---|
| 1131 | if(innerNNZ>=room)
|
---|
| 1132 | {
|
---|
| 1133 | // this inner vector is full, we need to reallocate the whole buffer :(
|
---|
| 1134 | reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
|
---|
| 1135 | }
|
---|
| 1136 |
|
---|
| 1137 | Index startId = m_outerIndex[outer];
|
---|
| 1138 | Index p = startId + m_innerNonZeros[outer];
|
---|
| 1139 | while ( (p > startId) && (m_data.index(p-1) > inner) )
|
---|
| 1140 | {
|
---|
| 1141 | m_data.index(p) = m_data.index(p-1);
|
---|
| 1142 | m_data.value(p) = m_data.value(p-1);
|
---|
| 1143 | --p;
|
---|
| 1144 | }
|
---|
| 1145 | eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
|
---|
| 1146 |
|
---|
| 1147 | m_innerNonZeros[outer]++;
|
---|
| 1148 |
|
---|
| 1149 | m_data.index(p) = inner;
|
---|
| 1150 | return (m_data.value(p) = 0);
|
---|
| 1151 | }
|
---|
| 1152 |
|
---|
| 1153 | template<typename _Scalar, int _Options, typename _Index>
|
---|
| 1154 | EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
|
---|
| 1155 | {
|
---|
| 1156 | eigen_assert(isCompressed());
|
---|
| 1157 |
|
---|
| 1158 | const Index outer = IsRowMajor ? row : col;
|
---|
| 1159 | const Index inner = IsRowMajor ? col : row;
|
---|
| 1160 |
|
---|
| 1161 | Index previousOuter = outer;
|
---|
| 1162 | if (m_outerIndex[outer+1]==0)
|
---|
| 1163 | {
|
---|
| 1164 | // we start a new inner vector
|
---|
| 1165 | while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
|
---|
| 1166 | {
|
---|
| 1167 | m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
|
---|
| 1168 | --previousOuter;
|
---|
| 1169 | }
|
---|
| 1170 | m_outerIndex[outer+1] = m_outerIndex[outer];
|
---|
| 1171 | }
|
---|
| 1172 |
|
---|
| 1173 | // here we have to handle the tricky case where the outerIndex array
|
---|
| 1174 | // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
|
---|
| 1175 | // the 2nd inner vector...
|
---|
| 1176 | bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
|
---|
| 1177 | && (size_t(m_outerIndex[outer+1]) == m_data.size());
|
---|
| 1178 |
|
---|
| 1179 | size_t startId = m_outerIndex[outer];
|
---|
| 1180 | // FIXME let's make sure sizeof(long int) == sizeof(size_t)
|
---|
| 1181 | size_t p = m_outerIndex[outer+1];
|
---|
| 1182 | ++m_outerIndex[outer+1];
|
---|
| 1183 |
|
---|
| 1184 | double reallocRatio = 1;
|
---|
| 1185 | if (m_data.allocatedSize()<=m_data.size())
|
---|
| 1186 | {
|
---|
| 1187 | // if there is no preallocated memory, let's reserve a minimum of 32 elements
|
---|
| 1188 | if (m_data.size()==0)
|
---|
| 1189 | {
|
---|
| 1190 | m_data.reserve(32);
|
---|
| 1191 | }
|
---|
| 1192 | else
|
---|
| 1193 | {
|
---|
| 1194 | // we need to reallocate the data, to reduce multiple reallocations
|
---|
| 1195 | // we use a smart resize algorithm based on the current filling ratio
|
---|
| 1196 | // in addition, we use double to avoid integers overflows
|
---|
| 1197 | double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
|
---|
| 1198 | reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
|
---|
| 1199 | // furthermore we bound the realloc ratio to:
|
---|
| 1200 | // 1) reduce multiple minor realloc when the matrix is almost filled
|
---|
| 1201 | // 2) avoid to allocate too much memory when the matrix is almost empty
|
---|
| 1202 | reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
|
---|
| 1203 | }
|
---|
| 1204 | }
|
---|
| 1205 | m_data.resize(m_data.size()+1,reallocRatio);
|
---|
| 1206 |
|
---|
| 1207 | if (!isLastVec)
|
---|
| 1208 | {
|
---|
| 1209 | if (previousOuter==-1)
|
---|
| 1210 | {
|
---|
| 1211 | // oops wrong guess.
|
---|
| 1212 | // let's correct the outer offsets
|
---|
| 1213 | for (Index k=0; k<=(outer+1); ++k)
|
---|
| 1214 | m_outerIndex[k] = 0;
|
---|
| 1215 | Index k=outer+1;
|
---|
| 1216 | while(m_outerIndex[k]==0)
|
---|
| 1217 | m_outerIndex[k++] = 1;
|
---|
| 1218 | while (k<=m_outerSize && m_outerIndex[k]!=0)
|
---|
| 1219 | m_outerIndex[k++]++;
|
---|
| 1220 | p = 0;
|
---|
| 1221 | --k;
|
---|
| 1222 | k = m_outerIndex[k]-1;
|
---|
| 1223 | while (k>0)
|
---|
| 1224 | {
|
---|
| 1225 | m_data.index(k) = m_data.index(k-1);
|
---|
| 1226 | m_data.value(k) = m_data.value(k-1);
|
---|
| 1227 | k--;
|
---|
| 1228 | }
|
---|
| 1229 | }
|
---|
| 1230 | else
|
---|
| 1231 | {
|
---|
| 1232 | // we are not inserting into the last inner vec
|
---|
| 1233 | // update outer indices:
|
---|
| 1234 | Index j = outer+2;
|
---|
| 1235 | while (j<=m_outerSize && m_outerIndex[j]!=0)
|
---|
| 1236 | m_outerIndex[j++]++;
|
---|
| 1237 | --j;
|
---|
| 1238 | // shift data of last vecs:
|
---|
| 1239 | Index k = m_outerIndex[j]-1;
|
---|
| 1240 | while (k>=Index(p))
|
---|
| 1241 | {
|
---|
| 1242 | m_data.index(k) = m_data.index(k-1);
|
---|
| 1243 | m_data.value(k) = m_data.value(k-1);
|
---|
| 1244 | k--;
|
---|
| 1245 | }
|
---|
| 1246 | }
|
---|
| 1247 | }
|
---|
| 1248 |
|
---|
| 1249 | while ( (p > startId) && (m_data.index(p-1) > inner) )
|
---|
| 1250 | {
|
---|
| 1251 | m_data.index(p) = m_data.index(p-1);
|
---|
| 1252 | m_data.value(p) = m_data.value(p-1);
|
---|
| 1253 | --p;
|
---|
| 1254 | }
|
---|
| 1255 |
|
---|
| 1256 | m_data.index(p) = inner;
|
---|
| 1257 | return (m_data.value(p) = 0);
|
---|
| 1258 | }
|
---|
| 1259 |
|
---|
| 1260 | } // end namespace Eigen
|
---|
| 1261 |
|
---|
| 1262 | #endif // EIGEN_SPARSEMATRIX_H
|
---|