// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
void reallocate(Index size)
{ // if the size of the matrix is not too large, let's allocate a bit more than needed such // that we can handle dense vector even in sparse mode. delete[] m_buffer; if (size<1000)
{
Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
m_buffer = new Scalar[allocSize];
} else
{
m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
m_buffer = new Scalar[size];
}
m_size = convert_index(size);
m_start = 0;
m_end = m_size;
}
protected: // element type of the linked list struct ListEl
{
StorageIndex next;
StorageIndex index;
Scalar value;
};
// used to store data in both mode
Scalar* m_buffer;
Scalar m_zero;
StorageIndex m_size;
StorageIndex m_start;
StorageIndex m_end;
StorageIndex m_allocatedSize;
StorageIndex m_allocatedElements;
StorageIndex m_mode;
/** \returns the number of non zeros in the current sub vector */ template<typename _Scalar,typename _StorageIndex>
Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
{ if (m_mode==IsSparse) return m_llSize; else return m_end - m_start;
}
template<typename _Scalar,typename _StorageIndex> void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
{
m_mode = mode; // This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings // if (m_mode==IsSparse)
{
m_llSize = 0;
m_llStart = -1;
}
}
/** Must be called whenever we might perform a write access * with an index smaller than the previous one. * * Don't worry, this function is extremely cheap.
*/ template<typename _Scalar,typename _StorageIndex> void AmbiVector<_Scalar,_StorageIndex>::restart()
{
m_llCurrent = m_llStart;
}
/** Set all coefficients of current subvector to zero */ template<typename _Scalar,typename _StorageIndex> void AmbiVector<_Scalar,_StorageIndex>::setZero()
{ if (m_mode==IsDense)
{ for (Index i=m_start; i<m_end; ++i)
m_buffer[i] = Scalar(0);
} else
{
eigen_assert(m_mode==IsSparse);
m_llSize = 0;
m_llStart = -1;
}
}
template<typename _Scalar,typename _StorageIndex>
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
{ if (m_mode==IsDense) return m_buffer[i]; else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer); // TODO factorize the following code to reduce code generation
eigen_assert(m_mode==IsSparse); if (m_llSize==0)
{ // this is the first element
m_llStart = 0;
m_llCurrent = 0;
++m_llSize;
llElements[0].value = Scalar(0);
llElements[0].index = convert_index(i);
llElements[0].next = -1; return llElements[0].value;
} elseif (i<llElements[m_llStart].index)
{ // this is going to be the new first element of the list
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
el.index = convert_index(i);
el.next = m_llStart;
m_llStart = m_llSize;
++m_llSize;
m_llCurrent = m_llStart; return el.value;
} else
{
StorageIndex nextel = llElements[m_llCurrent].next;
eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); while (nextel >= 0 && llElements[nextel].index<=i)
{
m_llCurrent = nextel;
nextel = llElements[nextel].next;
}
if (llElements[m_llCurrent].index==i)
{ // the coefficient already exists and we found it ! return llElements[m_llCurrent].value;
} else
{ if (m_llSize>=m_allocatedElements)
{
reallocateSparse();
llElements = reinterpret_cast<ListEl*>(m_buffer);
}
eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode"); // let's insert a new coefficient
ListEl& el = llElements[m_llSize];
el.value = Scalar(0);
el.index = convert_index(i);
el.next = llElements[m_llCurrent].next;
llElements[m_llCurrent].next = m_llSize;
++m_llSize; return el.value;
}
}
}
}
if (llElements[elid].index==i) return llElements[m_llCurrent].value; else return m_zero;
}
}
}
/** Iterator over the nonzero coefficients */ template<typename _Scalar,typename _StorageIndex> class AmbiVector<_Scalar,_StorageIndex>::Iterator
{ public: typedef _Scalar Scalar; typedeftypename NumTraits<Scalar>::Real RealScalar;
/** Default constructor * \param vec the vector on which we iterate * \param epsilon the minimal value used to prune zero coefficients. * In practice, all coefficients having a magnitude smaller than \a epsilon * are skipped.
*/ explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
: m_vector(vec)
{ using std::abs;
m_epsilon = epsilon;
m_isDense = m_vector.m_mode==IsDense; if (m_isDense)
{
m_currentEl = 0; // this is to avoid a compilation warning
m_cachedValue = 0; // this is to avoid a compilation warning
m_cachedIndex = m_vector.m_start-1;
++(*this);
} else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
m_currentEl = m_vector.m_llStart; while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
m_currentEl = llElements[m_currentEl].next; if (m_currentEl<0)
{
m_cachedValue = 0; // this is to avoid a compilation warning
m_cachedIndex = -1;
} else
{
m_cachedIndex = llElements[m_currentEl].index;
m_cachedValue = llElements[m_currentEl].value;
}
}
}
Iterator& operator++()
{ using std::abs; if (m_isDense)
{ do {
++m_cachedIndex;
} while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon); if (m_cachedIndex<m_vector.m_end)
m_cachedValue = m_vector.m_buffer[m_cachedIndex]; else
m_cachedIndex=-1;
} else
{
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer); do {
m_currentEl = llElements[m_currentEl].next;
} while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon); if (m_currentEl<0)
{
m_cachedIndex = -1;
} else
{
m_cachedIndex = llElements[m_currentEl].index;
m_cachedValue = llElements[m_currentEl].value;
}
} return *this;
}
protected: const AmbiVector& m_vector; // the target vector
StorageIndex m_currentEl; // the current element in sparse/linked-list mode
RealScalar m_epsilon; // epsilon used to prune zero coefficients
StorageIndex m_cachedIndex; // current coordinate
Scalar m_cachedValue; // current value bool m_isDense; // mode of the vector
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.