Newer
Older
* \endcode
* Here is a C++11 example keeping the latest entry only:
* \code
* mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
* \endcode
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators,typename DupFunctor>
void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename DupFunctor>
void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
{
eigen_assert(!isCompressed());
// TODO, in practice we should be able to use m_innerNonZeros for that task
IndexVector wi(innerSize());
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
for(Index j=0; j<outerSize(); ++j)
{
StorageIndex start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
{
Index i = m_data.index(k);
if(wi(i)>=start)
{
// we already meet this entry => accumulate it
m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
}
else
{
m_data.value(count) = m_data.value(k);
m_data.index(count) = m_data.index(k);
wi(i) = count;
++count;
}
}
m_outerIndex[j] = start;
}
m_outerIndex[m_outerSize] = count;
// turn the matrix into compressed form
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
}
template<typename Scalar, int _Options, typename _StorageIndex>
EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#endif
const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
#ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
#endif
// two passes algorithm:
// 1 - compute the number of coeffs per dest inner vector
// 2 - do the actual copy/eval
// Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
typedef internal::evaluator<_OtherCopy> OtherCopyEval;
OtherCopyEval otherCopyEval(otherCopy);
Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
for (Index j=0; j<otherCopy.outerSize(); ++j)
for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
StorageIndex count = 0;
IndexVector positions(dest.outerSize());
StorageIndex tmp = dest.m_outerIndex[j];
dest.m_outerIndex[j] = count;
positions[j] = count;
count += tmp;
}
dest.m_outerIndex[dest.outerSize()] = count;
// alloc
dest.m_data.resize(count);
// pass 2
for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
{
Index pos = positions[it.index()]++;
dest.m_data.index(pos) = j;
dest.m_data.value(pos) = it.value();
}
}
this->swap(dest);
return *this;
}
else
{
if(other.isRValue())
// there is no special optimization
return Base::operator=(other.derived());
}
}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
template<typename _Scalar, int _Options, typename _StorageIndex>
typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
{
eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
if(isCompressed())
{
if(nonZeros()==0)
{
// reserve space if not already done
if(m_data.allocatedSize()==0)
m_data.reserve(2*m_innerSize);
// turn the matrix into non-compressed mode
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
// pack all inner-vectors to the end of the pre-allocated space
// and allocate the entire free-space to the first inner-vector
StorageIndex end = convert_index(m_data.allocatedSize());
for(Index j=1; j<=m_outerSize; ++j)
m_outerIndex[j] = end;
}
else
{
// turn the matrix into non-compressed mode
m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if(!m_innerNonZeros) internal::throw_std_bad_alloc();
for(Index j=0; j<m_outerSize; ++j)
m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
}
}
// check whether we can do a fast "push back" insertion
Index data_end = m_data.allocatedSize();
// First case: we are filling a new inner vector which is packed at the end.
// We assume that all remaining inner-vectors are also empty and packed to the end.
if(m_outerIndex[outer]==data_end)
{
eigen_internal_assert(m_innerNonZeros[outer]==0);
// pack previous empty inner-vectors to end of the used-space
// and allocate the entire free-space to the current inner-vector.
StorageIndex p = convert_index(m_data.size());
Index j = outer;
while(j>=0 && m_innerNonZeros[j]==0)
m_outerIndex[j--] = p;
// push back the new element
++m_innerNonZeros[outer];
m_data.append(Scalar(0), inner);
// check for reallocation
if(data_end != m_data.allocatedSize())
{
// m_data has been reallocated
// -> move remaining inner-vectors back to the end of the free-space
// so that the entire free-space is allocated to the current inner-vector.
eigen_internal_assert(data_end < m_data.allocatedSize());
StorageIndex new_end = convert_index(m_data.allocatedSize());
for(Index k=outer+1; k<=m_outerSize; ++k)
if(m_outerIndex[k]==data_end)
m_outerIndex[k] = new_end;
}
return m_data.value(p);
}
// Second case: the next inner-vector is packed to the end
// and the current inner-vector end match the used-space.
if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
{
eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
// add space for the new element
++m_innerNonZeros[outer];
m_data.resize(m_data.size()+1);
// check for reallocation
if(data_end != m_data.allocatedSize())
{
// m_data has been reallocated
// -> move remaining inner-vectors back to the end of the free-space
// so that the entire free-space is allocated to the current inner-vector.
eigen_internal_assert(data_end < m_data.allocatedSize());
StorageIndex new_end = convert_index(m_data.allocatedSize());
for(Index k=outer+1; k<=m_outerSize; ++k)
if(m_outerIndex[k]==data_end)
m_outerIndex[k] = new_end;
}
// and insert it at the right position (sorted insertion)
Index startId = m_outerIndex[outer];
Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
m_data.index(p) = convert_index(inner);
return (m_data.value(p) = 0);
}
if(m_data.size() != m_data.allocatedSize())
{
// make sure the matrix is compatible to random un-compressed insertion:
m_data.resize(m_data.allocatedSize());
this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
}
return insertUncompressed(row,col);
}
template<typename _Scalar, int _Options, typename _StorageIndex>
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
{
eigen_assert(!isCompressed());
const Index outer = IsRowMajor ? row : col;
const StorageIndex inner = convert_index(IsRowMajor ? col : row);
StorageIndex innerNNZ = m_innerNonZeros[outer];
if(innerNNZ>=room)
{
// this inner vector is full, we need to reallocate the whole buffer :(
reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
}
Index startId = m_outerIndex[outer];
Index p = startId + m_innerNonZeros[outer];
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
m_innerNonZeros[outer]++;
m_data.index(p) = inner;
return (m_data.value(p) = 0);
}
template<typename _Scalar, int _Options, typename _StorageIndex>
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
{
eigen_assert(isCompressed());
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index previousOuter = outer;
if (m_outerIndex[outer+1]==0)
{
// we start a new inner vector
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
{
m_outerIndex[previousOuter] = convert_index(m_data.size());
--previousOuter;
}
m_outerIndex[outer+1] = m_outerIndex[outer];
}
// here we have to handle the tricky case where the outerIndex array
// starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
// the 2nd inner vector...
bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
&& (std::size_t(m_outerIndex[outer+1]) == m_data.size());
std::size_t startId = m_outerIndex[outer];
// FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
std::size_t p = m_outerIndex[outer+1];
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
++m_outerIndex[outer+1];
double reallocRatio = 1;
if (m_data.allocatedSize()<=m_data.size())
{
// if there is no preallocated memory, let's reserve a minimum of 32 elements
if (m_data.size()==0)
{
m_data.reserve(32);
}
else
{
// we need to reallocate the data, to reduce multiple reallocations
// we use a smart resize algorithm based on the current filling ratio
// in addition, we use double to avoid integers overflows
double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
// furthermore we bound the realloc ratio to:
// 1) reduce multiple minor realloc when the matrix is almost filled
// 2) avoid to allocate too much memory when the matrix is almost empty
reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
}
}
m_data.resize(m_data.size()+1,reallocRatio);
if (!isLastVec)
{
if (previousOuter==-1)
{
// oops wrong guess.
// let's correct the outer offsets
for (Index k=0; k<=(outer+1); ++k)
m_outerIndex[k] = 0;
Index k=outer+1;
while(m_outerIndex[k]==0)
m_outerIndex[k++] = 1;
while (k<=m_outerSize && m_outerIndex[k]!=0)
m_outerIndex[k++]++;
p = 0;
--k;
k = m_outerIndex[k]-1;
while (k>0)
{
m_data.index(k) = m_data.index(k-1);
m_data.value(k) = m_data.value(k-1);
k--;
}
}
else
{
// we are not inserting into the last inner vec
// update outer indices:
Index j = outer+2;
while (j<=m_outerSize && m_outerIndex[j]!=0)
m_outerIndex[j++]++;
--j;
// shift data of last vecs:
Index k = m_outerIndex[j]-1;
while (k>=Index(p))
{
m_data.index(k) = m_data.index(k-1);
m_data.value(k) = m_data.value(k-1);
k--;
}
}
}
while ( (p > startId) && (m_data.index(p-1) > inner) )
{
m_data.index(p) = m_data.index(p-1);
m_data.value(p) = m_data.value(p-1);
--p;
}
m_data.index(p) = inner;
return (m_data.value(p) = 0);
}
namespace internal {
template<typename _Scalar, int _Options, typename _StorageIndex>
struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
: evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
{
typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
evaluator() : Base() {}
explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
};
}
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIX_H