Skip to content
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_QTMALLOC_MODULE_H #ifndef EIGEN_QTMALLOC_MODULE_H
#define EIGEN_QTMALLOC_MODULE_H #define EIGEN_QTMALLOC_MODULE_H
...@@ -8,7 +14,7 @@ ...@@ -8,7 +14,7 @@
#include "src/Core/util/DisableStupidWarnings.h" #include "src/Core/util/DisableStupidWarnings.h"
void *qMalloc(size_t size) void *qMalloc(std::size_t size)
{ {
return Eigen::internal::aligned_malloc(size); return Eigen::internal::aligned_malloc(size);
} }
...@@ -18,7 +24,7 @@ void qFree(void *ptr) ...@@ -18,7 +24,7 @@ void qFree(void *ptr)
Eigen::internal::aligned_free(ptr); Eigen::internal::aligned_free(ptr);
} }
void *qRealloc(void *ptr, size_t size) void *qRealloc(void *ptr, std::size_t size)
{ {
void* newPtr = Eigen::internal::aligned_malloc(size); void* newPtr = Eigen::internal::aligned_malloc(size);
memcpy(newPtr, ptr, size); memcpy(newPtr, ptr, size);
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPQRSUPPORT_MODULE_H #ifndef EIGEN_SPQRSUPPORT_MODULE_H
#define EIGEN_SPQRSUPPORT_MODULE_H #define EIGEN_SPQRSUPPORT_MODULE_H
...@@ -21,8 +28,6 @@ ...@@ -21,8 +28,6 @@
* *
*/ */
#include "src/misc/Solve.h"
#include "src/misc/SparseSolve.h"
#include "src/CholmodSupport/CholmodSupport.h" #include "src/CholmodSupport/CholmodSupport.h"
#include "src/SPQRSupport/SuiteSparseQRSupport.h" #include "src/SPQRSupport/SuiteSparseQRSupport.h"
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SVD_MODULE_H #ifndef EIGEN_SVD_MODULE_H
#define EIGEN_SVD_MODULE_H #define EIGEN_SVD_MODULE_H
...@@ -12,23 +19,26 @@ ...@@ -12,23 +19,26 @@
* *
* *
* This module provides SVD decomposition for matrices (both real and complex). * This module provides SVD decomposition for matrices (both real and complex).
* This decomposition is accessible via the following MatrixBase method: * Two decomposition algorithms are provided:
* - JacobiSVD implementing two-sided Jacobi iterations is numerically very accurate, fast for small matrices, but very slow for larger ones.
* - BDCSVD implementing a recursive divide & conquer strategy on top of an upper-bidiagonalization which remains fast for large problems.
* These decompositions are accessible via the respective classes and following MatrixBase methods:
* - MatrixBase::jacobiSvd() * - MatrixBase::jacobiSvd()
* - MatrixBase::bdcSvd()
* *
* \code * \code
* #include <Eigen/SVD> * #include <Eigen/SVD>
* \endcode * \endcode
*/ */
#include "src/misc/Solve.h" #include "src/misc/RealSvd2x2.h"
#include "src/SVD/UpperBidiagonalization.h"
#include "src/SVD/SVDBase.h"
#include "src/SVD/JacobiSVD.h" #include "src/SVD/JacobiSVD.h"
#include "src/SVD/BDCSVD.h"
#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) #if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
#include "src/SVD/JacobiSVD_MKL.h" #include "src/misc/lapacke.h"
#endif #include "src/SVD/JacobiSVD_LAPACKE.h"
#include "src/SVD/UpperBidiagonalization.h"
#ifdef EIGEN2_SUPPORT
#include "src/Eigen2Support/SVD.h"
#endif #endif
#include "src/Core/util/ReenableStupidWarnings.h" #include "src/Core/util/ReenableStupidWarnings.h"
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSE_MODULE_H #ifndef EIGEN_SPARSE_MODULE_H
#define EIGEN_SPARSE_MODULE_H #define EIGEN_SPARSE_MODULE_H
...@@ -11,14 +18,16 @@ ...@@ -11,14 +18,16 @@
* - \ref SparseQR_Module * - \ref SparseQR_Module
* - \ref IterativeLinearSolvers_Module * - \ref IterativeLinearSolvers_Module
* *
* \code \code
* #include <Eigen/Sparse> #include <Eigen/Sparse>
* \endcode \endcode
*/ */
#include "SparseCore" #include "SparseCore"
#include "OrderingMethods" #include "OrderingMethods"
#ifndef EIGEN_MPL2_ONLY
#include "SparseCholesky" #include "SparseCholesky"
#endif
#include "SparseLU" #include "SparseLU"
#include "SparseQR" #include "SparseQR"
#include "IterativeLinearSolvers" #include "IterativeLinearSolvers"
......
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
#error The SparseCholesky module has nothing to offer in MPL2 only mode #error The SparseCholesky module has nothing to offer in MPL2 only mode
#endif #endif
#include "src/misc/Solve.h"
#include "src/misc/SparseSolve.h"
#include "src/SparseCholesky/SimplicialCholesky.h" #include "src/SparseCholesky/SimplicialCholesky.h"
#ifndef EIGEN_MPL2_ONLY #ifndef EIGEN_MPL2_ONLY
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSECORE_MODULE_H #ifndef EIGEN_SPARSECORE_MODULE_H
#define EIGEN_SPARSECORE_MODULE_H #define EIGEN_SPARSECORE_MODULE_H
...@@ -26,37 +33,35 @@ ...@@ -26,37 +33,35 @@
* This module depends on: Core. * This module depends on: Core.
*/ */
namespace Eigen {
/** The type used to identify a general sparse storage. */
struct Sparse {};
}
#include "src/SparseCore/SparseUtil.h" #include "src/SparseCore/SparseUtil.h"
#include "src/SparseCore/SparseMatrixBase.h" #include "src/SparseCore/SparseMatrixBase.h"
#include "src/SparseCore/SparseAssign.h"
#include "src/SparseCore/CompressedStorage.h" #include "src/SparseCore/CompressedStorage.h"
#include "src/SparseCore/AmbiVector.h" #include "src/SparseCore/AmbiVector.h"
#include "src/SparseCore/SparseCompressedBase.h"
#include "src/SparseCore/SparseMatrix.h" #include "src/SparseCore/SparseMatrix.h"
#include "src/SparseCore/SparseMap.h"
#include "src/SparseCore/MappedSparseMatrix.h" #include "src/SparseCore/MappedSparseMatrix.h"
#include "src/SparseCore/SparseVector.h" #include "src/SparseCore/SparseVector.h"
#include "src/SparseCore/SparseBlock.h" #include "src/SparseCore/SparseRef.h"
#include "src/SparseCore/SparseTranspose.h"
#include "src/SparseCore/SparseCwiseUnaryOp.h" #include "src/SparseCore/SparseCwiseUnaryOp.h"
#include "src/SparseCore/SparseCwiseBinaryOp.h" #include "src/SparseCore/SparseCwiseBinaryOp.h"
#include "src/SparseCore/SparseTranspose.h"
#include "src/SparseCore/SparseBlock.h"
#include "src/SparseCore/SparseDot.h" #include "src/SparseCore/SparseDot.h"
#include "src/SparseCore/SparsePermutation.h"
#include "src/SparseCore/SparseRedux.h" #include "src/SparseCore/SparseRedux.h"
#include "src/SparseCore/SparseFuzzy.h" #include "src/SparseCore/SparseView.h"
#include "src/SparseCore/SparseDiagonalProduct.h"
#include "src/SparseCore/ConservativeSparseSparseProduct.h" #include "src/SparseCore/ConservativeSparseSparseProduct.h"
#include "src/SparseCore/SparseSparseProductWithPruning.h" #include "src/SparseCore/SparseSparseProductWithPruning.h"
#include "src/SparseCore/SparseProduct.h" #include "src/SparseCore/SparseProduct.h"
#include "src/SparseCore/SparseDenseProduct.h" #include "src/SparseCore/SparseDenseProduct.h"
#include "src/SparseCore/SparseDiagonalProduct.h"
#include "src/SparseCore/SparseTriangularView.h"
#include "src/SparseCore/SparseSelfAdjointView.h" #include "src/SparseCore/SparseSelfAdjointView.h"
#include "src/SparseCore/SparseTriangularView.h"
#include "src/SparseCore/TriangularSolver.h" #include "src/SparseCore/TriangularSolver.h"
#include "src/SparseCore/SparseView.h" #include "src/SparseCore/SparsePermutation.h"
#include "src/SparseCore/SparseFuzzy.h"
#include "src/SparseCore/SparseSolverBase.h"
#include "src/Core/util/ReenableStupidWarnings.h" #include "src/Core/util/ReenableStupidWarnings.h"
......
...@@ -20,9 +20,6 @@ ...@@ -20,9 +20,6 @@
* Please, see the documentation of the SparseLU class for more details. * Please, see the documentation of the SparseLU class for more details.
*/ */
#include "src/misc/Solve.h"
#include "src/misc/SparseSolve.h"
// Ordering interface // Ordering interface
#include "OrderingMethods" #include "OrderingMethods"
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEQR_MODULE_H #ifndef EIGEN_SPARSEQR_MODULE_H
#define EIGEN_SPARSEQR_MODULE_H #define EIGEN_SPARSEQR_MODULE_H
...@@ -21,9 +28,6 @@ ...@@ -21,9 +28,6 @@
* *
*/ */
#include "src/misc/Solve.h"
#include "src/misc/SparseSolve.h"
#include "OrderingMethods" #include "OrderingMethods"
#include "src/SparseCore/SparseColEtree.h" #include "src/SparseCore/SparseColEtree.h"
#include "src/SparseQR/SparseQR.h" #include "src/SparseQR/SparseQR.h"
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include "Core" #include "Core"
#include <deque> #include <deque>
#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */ #if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */
#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) #define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...)
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include "Core" #include "Core"
#include <list> #include <list>
#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */ #if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */
#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) #define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...)
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include "Core" #include "Core"
#include <vector> #include <vector>
#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */ #if EIGEN_COMP_MSVC && EIGEN_OS_WIN64 && (EIGEN_MAX_STATIC_ALIGN_BYTES<=16) /* MSVC auto aligns up to 16 bytes in 64 bit builds */
#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) #define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...)
......
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H #ifndef EIGEN_SUPERLUSUPPORT_MODULE_H
#define EIGEN_SUPERLUSUPPORT_MODULE_H #define EIGEN_SUPERLUSUPPORT_MODULE_H
...@@ -36,6 +43,8 @@ namespace Eigen { struct SluMatrix; } ...@@ -36,6 +43,8 @@ namespace Eigen { struct SluMatrix; }
* - class SuperLU: a supernodal sequential LU factorization. * - class SuperLU: a supernodal sequential LU factorization.
* - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative methods). * - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative methods).
* *
* \warning This wrapper requires at least versions 4.0 of SuperLU. The 3.x versions are not supported.
*
* \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting. * \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting.
* *
* \code * \code
...@@ -48,12 +57,8 @@ namespace Eigen { struct SluMatrix; } ...@@ -48,12 +57,8 @@ namespace Eigen { struct SluMatrix; }
* *
*/ */
#include "src/misc/Solve.h"
#include "src/misc/SparseSolve.h"
#include "src/SuperLUSupport/SuperLUSupport.h" #include "src/SuperLUSupport/SuperLUSupport.h"
#include "src/Core/util/ReenableStupidWarnings.h" #include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_SUPERLUSUPPORT_MODULE_H #endif // EIGEN_SUPERLUSUPPORT_MODULE_H
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H #ifndef EIGEN_UMFPACKSUPPORT_MODULE_H
#define EIGEN_UMFPACKSUPPORT_MODULE_H #define EIGEN_UMFPACKSUPPORT_MODULE_H
...@@ -26,9 +33,6 @@ extern "C" { ...@@ -26,9 +33,6 @@ extern "C" {
* *
*/ */
#include "src/misc/Solve.h"
#include "src/misc/SparseSolve.h"
#include "src/UmfPackSupport/UmfPackSupport.h" #include "src/UmfPackSupport/UmfPackSupport.h"
#include "src/Core/util/ReenableStupidWarnings.h" #include "src/Core/util/ReenableStupidWarnings.h"
......
file(GLOB Eigen_src_subdirectories "*")
escape_string_as_regex(ESCAPED_CMAKE_CURRENT_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
foreach(f ${Eigen_src_subdirectories})
if(NOT f MATCHES "\\.txt" AND NOT f MATCHES "${ESCAPED_CMAKE_CURRENT_SOURCE_DIR}/[.].+" )
add_subdirectory(${f})
endif()
endforeach()
FILE(GLOB Eigen_Cholesky_SRCS "*.h")
INSTALL(FILES
${Eigen_Cholesky_SRCS}
DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/Cholesky COMPONENT Devel
)
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#ifndef EIGEN_LDLT_H #ifndef EIGEN_LDLT_H
#define EIGEN_LDLT_H #define EIGEN_LDLT_H
namespace Eigen { namespace Eigen {
namespace internal { namespace internal {
template<typename MatrixType, int UpLo> struct LDLT_Traits; template<typename MatrixType, int UpLo> struct LDLT_Traits;
...@@ -28,8 +28,8 @@ namespace internal { ...@@ -28,8 +28,8 @@ namespace internal {
* *
* \brief Robust Cholesky decomposition of a matrix with pivoting * \brief Robust Cholesky decomposition of a matrix with pivoting
* *
* \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition * \tparam _MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition
* \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. * \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.
* The other triangular part won't be read. * The other triangular part won't be read.
* *
* Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite
...@@ -43,7 +43,9 @@ namespace internal { ...@@ -43,7 +43,9 @@ namespace internal {
* Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky
* decomposition to determine whether a system of equations has a solution. * decomposition to determine whether a system of equations has a solution.
* *
* \sa MatrixBase::ldlt(), class LLT * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::ldlt(), SelfAdjointView::ldlt(), class LLT
*/ */
template<typename _MatrixType, int _UpLo> class LDLT template<typename _MatrixType, int _UpLo> class LDLT
{ {
...@@ -52,15 +54,15 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -52,15 +54,15 @@ template<typename _MatrixType, int _UpLo> class LDLT
enum { enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime, RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options & ~RowMajorBit, // these are the options for the TmpMatrixType, we need a ColMajor matrix here!
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
UpLo = _UpLo UpLo = _UpLo
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar, RowsAtCompileTime, 1, Options, MaxRowsAtCompileTime, 1> TmpMatrixType; typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<Scalar, RowsAtCompileTime, 1, 0, MaxRowsAtCompileTime, 1> TmpMatrixType;
typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType; typedef Transpositions<RowsAtCompileTime, MaxRowsAtCompileTime> TranspositionType;
typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType; typedef PermutationMatrix<RowsAtCompileTime, MaxRowsAtCompileTime> PermutationType;
...@@ -72,11 +74,11 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -72,11 +74,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
* The default constructor is useful in cases in which the user intends to * The default constructor is useful in cases in which the user intends to
* perform decompositions via LDLT::compute(const MatrixType&). * perform decompositions via LDLT::compute(const MatrixType&).
*/ */
LDLT() LDLT()
: m_matrix(), : m_matrix(),
m_transpositions(), m_transpositions(),
m_sign(internal::ZeroSign), m_sign(internal::ZeroSign),
m_isInitialized(false) m_isInitialized(false)
{} {}
/** \brief Default Constructor with memory preallocation /** \brief Default Constructor with memory preallocation
...@@ -85,7 +87,7 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -85,7 +87,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa LDLT() * \sa LDLT()
*/ */
LDLT(Index size) explicit LDLT(Index size)
: m_matrix(size, size), : m_matrix(size, size),
m_transpositions(size), m_transpositions(size),
m_temporary(size), m_temporary(size),
...@@ -96,16 +98,35 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -96,16 +98,35 @@ template<typename _MatrixType, int _UpLo> class LDLT
/** \brief Constructor with decomposition /** \brief Constructor with decomposition
* *
* This calculates the decomposition for the input \a matrix. * This calculates the decomposition for the input \a matrix.
*
* \sa LDLT(Index size) * \sa LDLT(Index size)
*/ */
LDLT(const MatrixType& matrix) template<typename InputType>
explicit LDLT(const EigenBase<InputType>& matrix)
: m_matrix(matrix.rows(), matrix.cols()), : m_matrix(matrix.rows(), matrix.cols()),
m_transpositions(matrix.rows()), m_transpositions(matrix.rows()),
m_temporary(matrix.rows()), m_temporary(matrix.rows()),
m_sign(internal::ZeroSign), m_sign(internal::ZeroSign),
m_isInitialized(false) m_isInitialized(false)
{ {
compute(matrix); compute(matrix.derived());
}
/** \brief Constructs a LDLT factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when \c MatrixType is a Eigen::Ref.
*
* \sa LDLT(const EigenBase&)
*/
template<typename InputType>
explicit LDLT(EigenBase<InputType>& matrix)
: m_matrix(matrix.derived()),
m_transpositions(matrix.rows()),
m_temporary(matrix.rows()),
m_sign(internal::ZeroSign),
m_isInitialized(false)
{
compute(matrix.derived());
} }
/** Clear any existing decomposition /** Clear any existing decomposition
...@@ -151,13 +172,6 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -151,13 +172,6 @@ template<typename _MatrixType, int _UpLo> class LDLT
eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_isInitialized && "LDLT is not initialized.");
return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign; return m_sign == internal::PositiveSemiDef || m_sign == internal::ZeroSign;
} }
#ifdef EIGEN2_SUPPORT
inline bool isPositiveDefinite() const
{
return isPositive();
}
#endif
/** \returns true if the matrix is negative (semidefinite) */ /** \returns true if the matrix is negative (semidefinite) */
inline bool isNegative(void) const inline bool isNegative(void) const
...@@ -173,37 +187,38 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -173,37 +187,38 @@ template<typename _MatrixType, int _UpLo> class LDLT
* \note_about_checking_solutions * \note_about_checking_solutions
* *
* More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$
* by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$,
* \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then
* \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the
* least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function
* computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular. * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular.
* *
* \sa MatrixBase::ldlt() * \sa MatrixBase::ldlt(), SelfAdjointView::ldlt()
*/ */
template<typename Rhs> template<typename Rhs>
inline const internal::solve_retval<LDLT, Rhs> inline const Solve<LDLT, Rhs>
solve(const MatrixBase<Rhs>& b) const solve(const MatrixBase<Rhs>& b) const
{ {
eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_isInitialized && "LDLT is not initialized.");
eigen_assert(m_matrix.rows()==b.rows() eigen_assert(m_matrix.rows()==b.rows()
&& "LDLT::solve(): invalid number of rows of the right hand side matrix b"); && "LDLT::solve(): invalid number of rows of the right hand side matrix b");
return internal::solve_retval<LDLT, Rhs>(*this, b.derived()); return Solve<LDLT, Rhs>(*this, b.derived());
} }
#ifdef EIGEN2_SUPPORT
template<typename OtherDerived, typename ResultType>
bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
{
*result = this->solve(b);
return true;
}
#endif
template<typename Derived> template<typename Derived>
bool solveInPlace(MatrixBase<Derived> &bAndX) const; bool solveInPlace(MatrixBase<Derived> &bAndX) const;
LDLT& compute(const MatrixType& matrix); template<typename InputType>
LDLT& compute(const EigenBase<InputType>& matrix);
/** \returns an estimate of the reciprocal condition number of the matrix of
* which \c *this is the LDLT decomposition.
*/
RealScalar rcond() const
{
eigen_assert(m_isInitialized && "LDLT is not initialized.");
return internal::rcond_estimate_helper(m_l1_norm, *this);
}
template <typename Derived> template <typename Derived>
LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1); LDLT& rankUpdate(const MatrixBase<Derived>& w, const RealScalar& alpha=1);
...@@ -220,6 +235,13 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -220,6 +235,13 @@ template<typename _MatrixType, int _UpLo> class LDLT
MatrixType reconstructedMatrix() const; MatrixType reconstructedMatrix() const;
/** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint.
*
* This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:
* \code x = decomposition.adjoint().solve(b) \endcode
*/
const LDLT& adjoint() const { return *this; };
inline Index rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
...@@ -231,11 +253,17 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -231,11 +253,17 @@ template<typename _MatrixType, int _UpLo> class LDLT
ComputationInfo info() const ComputationInfo info() const
{ {
eigen_assert(m_isInitialized && "LDLT is not initialized."); eigen_assert(m_isInitialized && "LDLT is not initialized.");
return Success; return m_info;
} }
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected: protected:
static void check_template_parameters() static void check_template_parameters()
{ {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
...@@ -248,10 +276,12 @@ template<typename _MatrixType, int _UpLo> class LDLT ...@@ -248,10 +276,12 @@ template<typename _MatrixType, int _UpLo> class LDLT
* is not stored), and the diagonal entries correspond to D. * is not stored), and the diagonal entries correspond to D.
*/ */
MatrixType m_matrix; MatrixType m_matrix;
RealScalar m_l1_norm;
TranspositionType m_transpositions; TranspositionType m_transpositions;
TmpMatrixType m_temporary; TmpMatrixType m_temporary;
internal::SignMatrix m_sign; internal::SignMatrix m_sign;
bool m_isInitialized; bool m_isInitialized;
ComputationInfo m_info;
}; };
namespace internal { namespace internal {
...@@ -266,15 +296,17 @@ template<> struct ldlt_inplace<Lower> ...@@ -266,15 +296,17 @@ template<> struct ldlt_inplace<Lower>
using std::abs; using std::abs;
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index; typedef typename TranspositionType::StorageIndex IndexType;
eigen_assert(mat.rows()==mat.cols()); eigen_assert(mat.rows()==mat.cols());
const Index size = mat.rows(); const Index size = mat.rows();
bool found_zero_pivot = false;
bool ret = true;
if (size <= 1) if (size <= 1)
{ {
transpositions.setIdentity(); transpositions.setIdentity();
if (numext::real(mat.coeff(0,0)) > 0) sign = PositiveSemiDef; if (numext::real(mat.coeff(0,0)) > static_cast<RealScalar>(0) ) sign = PositiveSemiDef;
else if (numext::real(mat.coeff(0,0)) < 0) sign = NegativeSemiDef; else if (numext::real(mat.coeff(0,0)) < static_cast<RealScalar>(0)) sign = NegativeSemiDef;
else sign = ZeroSign; else sign = ZeroSign;
return true; return true;
} }
...@@ -286,7 +318,7 @@ template<> struct ldlt_inplace<Lower> ...@@ -286,7 +318,7 @@ template<> struct ldlt_inplace<Lower>
mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner);
index_of_biggest_in_corner += k; index_of_biggest_in_corner += k;
transpositions.coeffRef(k) = index_of_biggest_in_corner; transpositions.coeffRef(k) = IndexType(index_of_biggest_in_corner);
if(k != index_of_biggest_in_corner) if(k != index_of_biggest_in_corner)
{ {
// apply the transposition while taking care to consider only // apply the transposition while taking care to consider only
...@@ -295,7 +327,7 @@ template<> struct ldlt_inplace<Lower> ...@@ -295,7 +327,7 @@ template<> struct ldlt_inplace<Lower>
mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k)); mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k));
mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s)); mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s));
std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner)); std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner));
for(int i=k+1;i<index_of_biggest_in_corner;++i) for(Index i=k+1;i<index_of_biggest_in_corner;++i)
{ {
Scalar tmp = mat.coeffRef(i,k); Scalar tmp = mat.coeffRef(i,k);
mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i)); mat.coeffRef(i,k) = numext::conj(mat.coeffRef(index_of_biggest_in_corner,i));
...@@ -321,26 +353,44 @@ template<> struct ldlt_inplace<Lower> ...@@ -321,26 +353,44 @@ template<> struct ldlt_inplace<Lower>
if(rs>0) if(rs>0)
A21.noalias() -= A20 * temp.head(k); A21.noalias() -= A20 * temp.head(k);
} }
// In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot // In some previous versions of Eigen (e.g., 3.2.1), the scaling was omitted if the pivot
// was smaller than the cutoff value. However, soince LDLT is not rank-revealing // was smaller than the cutoff value. However, since LDLT is not rank-revealing
// we should only make sure we do not introduce INF or NaN values. // we should only make sure that we do not introduce INF or NaN values.
// LAPACK also uses 0 as the cutoff value. // Remark that LAPACK also uses 0 as the cutoff value.
RealScalar realAkk = numext::real(mat.coeffRef(k,k)); RealScalar realAkk = numext::real(mat.coeffRef(k,k));
if((rs>0) && (abs(realAkk) > RealScalar(0))) bool pivot_is_valid = (abs(realAkk) > RealScalar(0));
if(k==0 && !pivot_is_valid)
{
// The entire diagonal is zero, there is nothing more to do
// except filling the transpositions, and checking whether the matrix is zero.
sign = ZeroSign;
for(Index j = 0; j<size; ++j)
{
transpositions.coeffRef(j) = IndexType(j);
ret = ret && (mat.col(j).tail(size-j-1).array()==Scalar(0)).all();
}
return ret;
}
if((rs>0) && pivot_is_valid)
A21 /= realAkk; A21 /= realAkk;
if(found_zero_pivot && pivot_is_valid) ret = false; // factorization failed
else if(!pivot_is_valid) found_zero_pivot = true;
if (sign == PositiveSemiDef) { if (sign == PositiveSemiDef) {
if (realAkk < 0) sign = Indefinite; if (realAkk < static_cast<RealScalar>(0)) sign = Indefinite;
} else if (sign == NegativeSemiDef) { } else if (sign == NegativeSemiDef) {
if (realAkk > 0) sign = Indefinite; if (realAkk > static_cast<RealScalar>(0)) sign = Indefinite;
} else if (sign == ZeroSign) { } else if (sign == ZeroSign) {
if (realAkk > 0) sign = PositiveSemiDef; if (realAkk > static_cast<RealScalar>(0)) sign = PositiveSemiDef;
else if (realAkk < 0) sign = NegativeSemiDef; else if (realAkk < static_cast<RealScalar>(0)) sign = NegativeSemiDef;
} }
} }
return true; return ret;
} }
// Reference for the algorithm: Davis and Hager, "Multiple Rank // Reference for the algorithm: Davis and Hager, "Multiple Rank
...@@ -356,7 +406,6 @@ template<> struct ldlt_inplace<Lower> ...@@ -356,7 +406,6 @@ template<> struct ldlt_inplace<Lower>
using numext::isfinite; using numext::isfinite;
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
const Index size = mat.rows(); const Index size = mat.rows();
eigen_assert(mat.cols() == size && w.size()==size); eigen_assert(mat.cols() == size && w.size()==size);
...@@ -420,16 +469,16 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower> ...@@ -420,16 +469,16 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Lower>
{ {
typedef const TriangularView<const MatrixType, UnitLower> MatrixL; typedef const TriangularView<const MatrixType, UnitLower> MatrixL;
typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU; typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitUpper> MatrixU;
static inline MatrixL getL(const MatrixType& m) { return m; } static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
}; };
template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper> template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
{ {
typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL; typedef const TriangularView<const typename MatrixType::AdjointReturnType, UnitLower> MatrixL;
typedef const TriangularView<const MatrixType, UnitUpper> MatrixU; typedef const TriangularView<const MatrixType, UnitUpper> MatrixU;
static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); } static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); }
static inline MatrixU getU(const MatrixType& m) { return m; } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); }
}; };
} // end namespace internal } // end namespace internal
...@@ -437,21 +486,35 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper> ...@@ -437,21 +486,35 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix /** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix
*/ */
template<typename MatrixType, int _UpLo> template<typename MatrixType, int _UpLo>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a) template<typename InputType>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a)
{ {
check_template_parameters(); check_template_parameters();
eigen_assert(a.rows()==a.cols()); eigen_assert(a.rows()==a.cols());
const Index size = a.rows(); const Index size = a.rows();
m_matrix = a; m_matrix = a.derived();
// Compute matrix L1 norm = max abs column sum.
m_l1_norm = RealScalar(0);
// TODO move this code to SelfAdjointView
for (Index col = 0; col < size; ++col) {
RealScalar abs_col_sum;
if (_UpLo == Lower)
abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>();
else
abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>();
if (abs_col_sum > m_l1_norm)
m_l1_norm = abs_col_sum;
}
m_transpositions.resize(size); m_transpositions.resize(size);
m_isInitialized = false; m_isInitialized = false;
m_temporary.resize(size); m_temporary.resize(size);
m_sign = internal::ZeroSign; m_sign = internal::ZeroSign;
internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign); m_info = internal::ldlt_inplace<UpLo>::unblocked(m_matrix, m_transpositions, m_temporary, m_sign) ? Success : NumericalIssue;
m_isInitialized = true; m_isInitialized = true;
return *this; return *this;
...@@ -466,18 +529,19 @@ template<typename MatrixType, int _UpLo> ...@@ -466,18 +529,19 @@ template<typename MatrixType, int _UpLo>
template<typename Derived> template<typename Derived>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma) LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Derived>& w, const typename LDLT<MatrixType,_UpLo>::RealScalar& sigma)
{ {
typedef typename TranspositionType::StorageIndex IndexType;
const Index size = w.rows(); const Index size = w.rows();
if (m_isInitialized) if (m_isInitialized)
{ {
eigen_assert(m_matrix.rows()==size); eigen_assert(m_matrix.rows()==size);
} }
else else
{ {
m_matrix.resize(size,size); m_matrix.resize(size,size);
m_matrix.setZero(); m_matrix.setZero();
m_transpositions.resize(size); m_transpositions.resize(size);
for (Index i = 0; i < size; i++) for (Index i = 0; i < size; i++)
m_transpositions.coeffRef(i) = i; m_transpositions.coeffRef(i) = IndexType(i);
m_temporary.resize(size); m_temporary.resize(size);
m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef; m_sign = sigma>=0 ? internal::PositiveSemiDef : internal::NegativeSemiDef;
m_isInitialized = true; m_isInitialized = true;
...@@ -488,53 +552,45 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri ...@@ -488,53 +552,45 @@ LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::rankUpdate(const MatrixBase<Deri
return *this; return *this;
} }
namespace internal { #ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename _MatrixType, int _UpLo, typename Rhs> template<typename _MatrixType, int _UpLo>
struct solve_retval<LDLT<_MatrixType,_UpLo>, Rhs> template<typename RhsType, typename DstType>
: solve_retval_base<LDLT<_MatrixType,_UpLo>, Rhs> void LDLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
{ {
typedef LDLT<_MatrixType,_UpLo> LDLTType; eigen_assert(rhs.rows() == rows());
EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs) // dst = P b
dst = m_transpositions * rhs;
template<typename Dest> void evalTo(Dest& dst) const
// dst = L^-1 (P b)
matrixL().solveInPlace(dst);
// dst = D^-1 (L^-1 P b)
// more precisely, use pseudo-inverse of D (see bug 241)
using std::abs;
const typename Diagonal<const MatrixType>::RealReturnType vecD(vectorD());
// In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
// as motivated by LAPACK's xGELSS:
// RealScalar tolerance = numext::maxi(vecD.array().abs().maxCoeff() * NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
// However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
// diagonal element is not well justified and leads to numerical issues in some cases.
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
for (Index i = 0; i < vecD.size(); ++i)
{ {
eigen_assert(rhs().rows() == dec().matrixLDLT().rows()); if(abs(vecD(i)) > tolerance)
// dst = P b dst.row(i) /= vecD(i);
dst = dec().transpositionsP() * rhs(); else
dst.row(i).setZero();
// dst = L^-1 (P b) }
dec().matrixL().solveInPlace(dst);
// dst = D^-1 (L^-1 P b)
// more precisely, use pseudo-inverse of D (see bug 241)
using std::abs;
using std::max;
typedef typename LDLTType::MatrixType MatrixType;
typedef typename LDLTType::RealScalar RealScalar;
const typename Diagonal<const MatrixType>::RealReturnType vectorD(dec().vectorD());
// In some previous versions, tolerance was set to the max of 1/highest and the maximal diagonal entry * epsilon
// as motivated by LAPACK's xGELSS:
// RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() *NumTraits<RealScalar>::epsilon(),RealScalar(1) / NumTraits<RealScalar>::highest());
// However, LDLT is not rank revealing, and so adjusting the tolerance wrt to the highest
// diagonal element is not well justified and to numerical issues in some cases.
// Moreover, Lapack's xSYTRS routines use 0 for the tolerance.
RealScalar tolerance = RealScalar(1) / NumTraits<RealScalar>::highest();
for (Index i = 0; i < vectorD.size(); ++i) {
if(abs(vectorD(i)) > tolerance)
dst.row(i) /= vectorD(i);
else
dst.row(i).setZero();
}
// dst = L^-T (D^-1 L^-1 P b) // dst = L^-T (D^-1 L^-1 P b)
dec().matrixU().solveInPlace(dst); matrixU().solveInPlace(dst);
// dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b
dst = dec().transpositionsP().transpose() * dst; dst = m_transpositions.transpose() * dst;
}
};
} }
#endif
/** \internal use x = ldlt_object.solve(x); /** \internal use x = ldlt_object.solve(x);
* *
...@@ -588,6 +644,7 @@ MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const ...@@ -588,6 +644,7 @@ MatrixType LDLT<MatrixType,_UpLo>::reconstructedMatrix() const
/** \cholesky_module /** \cholesky_module
* \returns the Cholesky decomposition with full pivoting without square root of \c *this * \returns the Cholesky decomposition with full pivoting without square root of \c *this
* \sa MatrixBase::ldlt()
*/ */
template<typename MatrixType, unsigned int UpLo> template<typename MatrixType, unsigned int UpLo>
inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo> inline const LDLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
...@@ -598,6 +655,7 @@ SelfAdjointView<MatrixType, UpLo>::ldlt() const ...@@ -598,6 +655,7 @@ SelfAdjointView<MatrixType, UpLo>::ldlt() const
/** \cholesky_module /** \cholesky_module
* \returns the Cholesky decomposition with full pivoting without square root of \c *this * \returns the Cholesky decomposition with full pivoting without square root of \c *this
* \sa SelfAdjointView::ldlt()
*/ */
template<typename Derived> template<typename Derived>
inline const LDLT<typename MatrixBase<Derived>::PlainObject> inline const LDLT<typename MatrixBase<Derived>::PlainObject>
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#ifndef EIGEN_LLT_H #ifndef EIGEN_LLT_H
#define EIGEN_LLT_H #define EIGEN_LLT_H
namespace Eigen { namespace Eigen {
namespace internal{ namespace internal{
template<typename MatrixType, int UpLo> struct LLT_Traits; template<typename MatrixType, int UpLo> struct LLT_Traits;
...@@ -22,8 +22,8 @@ template<typename MatrixType, int UpLo> struct LLT_Traits; ...@@ -22,8 +22,8 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
* *
* \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features
* *
* \param MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition * \tparam _MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition
* \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. * \tparam _UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper.
* The other triangular part won't be read. * The other triangular part won't be read.
* *
* This class performs a LL^T Cholesky decomposition of a symmetric, positive definite * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite
...@@ -40,8 +40,10 @@ template<typename MatrixType, int UpLo> struct LLT_Traits; ...@@ -40,8 +40,10 @@ template<typename MatrixType, int UpLo> struct LLT_Traits;
* *
* Example: \include LLT_example.cpp * Example: \include LLT_example.cpp
* Output: \verbinclude LLT_example.out * Output: \verbinclude LLT_example.out
* *
* \sa MatrixBase::llt(), class LDLT * This class supports the \link InplaceDecomposition inplace decomposition \endlink mechanism.
*
* \sa MatrixBase::llt(), SelfAdjointView::llt(), class LDLT
*/ */
/* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH) /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH)
* Note that during the decomposition, only the upper triangular part of A is considered. Therefore, * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
...@@ -54,12 +56,12 @@ template<typename _MatrixType, int _UpLo> class LLT ...@@ -54,12 +56,12 @@ template<typename _MatrixType, int _UpLo> class LLT
enum { enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime, RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime, ColsAtCompileTime = MatrixType::ColsAtCompileTime,
Options = MatrixType::Options,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
}; };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar; typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename MatrixType::Index Index; typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef typename MatrixType::StorageIndex StorageIndex;
enum { enum {
PacketSize = internal::packet_traits<Scalar>::size, PacketSize = internal::packet_traits<Scalar>::size,
...@@ -83,14 +85,30 @@ template<typename _MatrixType, int _UpLo> class LLT ...@@ -83,14 +85,30 @@ template<typename _MatrixType, int _UpLo> class LLT
* according to the specified problem \a size. * according to the specified problem \a size.
* \sa LLT() * \sa LLT()
*/ */
LLT(Index size) : m_matrix(size, size), explicit LLT(Index size) : m_matrix(size, size),
m_isInitialized(false) {} m_isInitialized(false) {}
LLT(const MatrixType& matrix) template<typename InputType>
explicit LLT(const EigenBase<InputType>& matrix)
: m_matrix(matrix.rows(), matrix.cols()), : m_matrix(matrix.rows(), matrix.cols()),
m_isInitialized(false) m_isInitialized(false)
{ {
compute(matrix); compute(matrix.derived());
}
/** \brief Constructs a LDLT factorization from a given matrix
*
* This overloaded constructor is provided for \link InplaceDecomposition inplace decomposition \endlink when
* \c MatrixType is a Eigen::Ref.
*
* \sa LLT(const EigenBase&)
*/
template<typename InputType>
explicit LLT(EigenBase<InputType>& matrix)
: m_matrix(matrix.derived()),
m_isInitialized(false)
{
compute(matrix.derived());
} }
/** \returns a view of the upper triangular matrix U */ /** \returns a view of the upper triangular matrix U */
...@@ -115,33 +133,33 @@ template<typename _MatrixType, int _UpLo> class LLT ...@@ -115,33 +133,33 @@ template<typename _MatrixType, int _UpLo> class LLT
* Example: \include LLT_solve.cpp * Example: \include LLT_solve.cpp
* Output: \verbinclude LLT_solve.out * Output: \verbinclude LLT_solve.out
* *
* \sa solveInPlace(), MatrixBase::llt() * \sa solveInPlace(), MatrixBase::llt(), SelfAdjointView::llt()
*/ */
template<typename Rhs> template<typename Rhs>
inline const internal::solve_retval<LLT, Rhs> inline const Solve<LLT, Rhs>
solve(const MatrixBase<Rhs>& b) const solve(const MatrixBase<Rhs>& b) const
{ {
eigen_assert(m_isInitialized && "LLT is not initialized."); eigen_assert(m_isInitialized && "LLT is not initialized.");
eigen_assert(m_matrix.rows()==b.rows() eigen_assert(m_matrix.rows()==b.rows()
&& "LLT::solve(): invalid number of rows of the right hand side matrix b"); && "LLT::solve(): invalid number of rows of the right hand side matrix b");
return internal::solve_retval<LLT, Rhs>(*this, b.derived()); return Solve<LLT, Rhs>(*this, b.derived());
} }
#ifdef EIGEN2_SUPPORT
template<typename OtherDerived, typename ResultType>
bool solve(const MatrixBase<OtherDerived>& b, ResultType *result) const
{
*result = this->solve(b);
return true;
}
bool isPositiveDefinite() const { return true; }
#endif
template<typename Derived> template<typename Derived>
void solveInPlace(MatrixBase<Derived> &bAndX) const; void solveInPlace(MatrixBase<Derived> &bAndX) const;
LLT& compute(const MatrixType& matrix); template<typename InputType>
LLT& compute(const EigenBase<InputType>& matrix);
/** \returns an estimate of the reciprocal condition number of the matrix of
* which \c *this is the Cholesky decomposition.
*/
RealScalar rcond() const
{
eigen_assert(m_isInitialized && "LLT is not initialized.");
eigen_assert(m_info == Success && "LLT failed because matrix appears to be negative");
return internal::rcond_estimate_helper(m_l1_norm, *this);
}
/** \returns the LLT decomposition matrix /** \returns the LLT decomposition matrix
* *
...@@ -167,24 +185,38 @@ template<typename _MatrixType, int _UpLo> class LLT ...@@ -167,24 +185,38 @@ template<typename _MatrixType, int _UpLo> class LLT
return m_info; return m_info;
} }
/** \returns the adjoint of \c *this, that is, a const reference to the decomposition itself as the underlying matrix is self-adjoint.
*
* This method is provided for compatibility with other matrix decompositions, thus enabling generic code such as:
* \code x = decomposition.adjoint().solve(b) \endcode
*/
const LLT& adjoint() const { return *this; };
inline Index rows() const { return m_matrix.rows(); } inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); } inline Index cols() const { return m_matrix.cols(); }
template<typename VectorType> template<typename VectorType>
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
EIGEN_DEVICE_FUNC
void _solve_impl(const RhsType &rhs, DstType &dst) const;
#endif
protected: protected:
static void check_template_parameters() static void check_template_parameters()
{ {
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar); EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
} }
/** \internal /** \internal
* Used to compute and store L * Used to compute and store L
* The strict upper part is not used and even not initialized. * The strict upper part is not used and even not initialized.
*/ */
MatrixType m_matrix; MatrixType m_matrix;
RealScalar m_l1_norm;
bool m_isInitialized; bool m_isInitialized;
ComputationInfo m_info; ComputationInfo m_info;
}; };
...@@ -194,12 +226,11 @@ namespace internal { ...@@ -194,12 +226,11 @@ namespace internal {
template<typename Scalar, int UpLo> struct llt_inplace; template<typename Scalar, int UpLo> struct llt_inplace;
template<typename MatrixType, typename VectorType> template<typename MatrixType, typename VectorType>
static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) static Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma)
{ {
using std::sqrt; using std::sqrt;
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef typename MatrixType::Index Index;
typedef typename MatrixType::ColXpr ColXpr; typedef typename MatrixType::ColXpr ColXpr;
typedef typename internal::remove_all<ColXpr>::type ColXprCleaned; typedef typename internal::remove_all<ColXpr>::type ColXprCleaned;
typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; typedef typename ColXprCleaned::SegmentReturnType ColXprSegment;
...@@ -268,11 +299,10 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower> ...@@ -268,11 +299,10 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
{ {
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
template<typename MatrixType> template<typename MatrixType>
static typename MatrixType::Index unblocked(MatrixType& mat) static Index unblocked(MatrixType& mat)
{ {
using std::sqrt; using std::sqrt;
typedef typename MatrixType::Index Index;
eigen_assert(mat.rows()==mat.cols()); eigen_assert(mat.rows()==mat.cols());
const Index size = mat.rows(); const Index size = mat.rows();
for(Index k = 0; k < size; ++k) for(Index k = 0; k < size; ++k)
...@@ -295,9 +325,8 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower> ...@@ -295,9 +325,8 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
} }
template<typename MatrixType> template<typename MatrixType>
static typename MatrixType::Index blocked(MatrixType& m) static Index blocked(MatrixType& m)
{ {
typedef typename MatrixType::Index Index;
eigen_assert(m.rows()==m.cols()); eigen_assert(m.rows()==m.cols());
Index size = m.rows(); Index size = m.rows();
if(size<32) if(size<32)
...@@ -322,36 +351,36 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower> ...@@ -322,36 +351,36 @@ template<typename Scalar> struct llt_inplace<Scalar, Lower>
Index ret; Index ret;
if((ret=unblocked(A11))>=0) return k+ret; if((ret=unblocked(A11))>=0) return k+ret;
if(rs>0) A11.adjoint().template triangularView<Upper>().template solveInPlace<OnTheRight>(A21); if(rs>0) A11.adjoint().template triangularView<Upper>().template solveInPlace<OnTheRight>(A21);
if(rs>0) A22.template selfadjointView<Lower>().rankUpdate(A21,-1); // bottleneck if(rs>0) A22.template selfadjointView<Lower>().rankUpdate(A21,typename NumTraits<RealScalar>::Literal(-1)); // bottleneck
} }
return -1; return -1;
} }
template<typename MatrixType, typename VectorType> template<typename MatrixType, typename VectorType>
static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)
{ {
return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); return Eigen::internal::llt_rank_update_lower(mat, vec, sigma);
} }
}; };
template<typename Scalar> struct llt_inplace<Scalar, Upper> template<typename Scalar> struct llt_inplace<Scalar, Upper>
{ {
typedef typename NumTraits<Scalar>::Real RealScalar; typedef typename NumTraits<Scalar>::Real RealScalar;
template<typename MatrixType> template<typename MatrixType>
static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat) static EIGEN_STRONG_INLINE Index unblocked(MatrixType& mat)
{ {
Transpose<MatrixType> matt(mat); Transpose<MatrixType> matt(mat);
return llt_inplace<Scalar, Lower>::unblocked(matt); return llt_inplace<Scalar, Lower>::unblocked(matt);
} }
template<typename MatrixType> template<typename MatrixType>
static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat) static EIGEN_STRONG_INLINE Index blocked(MatrixType& mat)
{ {
Transpose<MatrixType> matt(mat); Transpose<MatrixType> matt(mat);
return llt_inplace<Scalar, Lower>::blocked(matt); return llt_inplace<Scalar, Lower>::blocked(matt);
} }
template<typename MatrixType, typename VectorType> template<typename MatrixType, typename VectorType>
static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) static Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma)
{ {
Transpose<MatrixType> matt(mat); Transpose<MatrixType> matt(mat);
return llt_inplace<Scalar, Lower>::rankUpdate(matt, vec.conjugate(), sigma); return llt_inplace<Scalar, Lower>::rankUpdate(matt, vec.conjugate(), sigma);
...@@ -362,8 +391,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Lower> ...@@ -362,8 +391,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Lower>
{ {
typedef const TriangularView<const MatrixType, Lower> MatrixL; typedef const TriangularView<const MatrixType, Lower> MatrixL;
typedef const TriangularView<const typename MatrixType::AdjointReturnType, Upper> MatrixU; typedef const TriangularView<const typename MatrixType::AdjointReturnType, Upper> MatrixU;
static inline MatrixL getL(const MatrixType& m) { return m; } static inline MatrixL getL(const MatrixType& m) { return MatrixL(m); }
static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m.adjoint()); }
static bool inplace_decomposition(MatrixType& m) static bool inplace_decomposition(MatrixType& m)
{ return llt_inplace<typename MatrixType::Scalar, Lower>::blocked(m)==-1; } { return llt_inplace<typename MatrixType::Scalar, Lower>::blocked(m)==-1; }
}; };
...@@ -372,8 +401,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper> ...@@ -372,8 +401,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
{ {
typedef const TriangularView<const typename MatrixType::AdjointReturnType, Lower> MatrixL; typedef const TriangularView<const typename MatrixType::AdjointReturnType, Lower> MatrixL;
typedef const TriangularView<const MatrixType, Upper> MatrixU; typedef const TriangularView<const MatrixType, Upper> MatrixU;
static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); } static inline MatrixL getL(const MatrixType& m) { return MatrixL(m.adjoint()); }
static inline MatrixU getU(const MatrixType& m) { return m; } static inline MatrixU getU(const MatrixType& m) { return MatrixU(m); }
static bool inplace_decomposition(MatrixType& m) static bool inplace_decomposition(MatrixType& m)
{ return llt_inplace<typename MatrixType::Scalar, Upper>::blocked(m)==-1; } { return llt_inplace<typename MatrixType::Scalar, Upper>::blocked(m)==-1; }
}; };
...@@ -388,14 +417,28 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper> ...@@ -388,14 +417,28 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
* Output: \verbinclude TutorialLinAlgComputeTwice.out * Output: \verbinclude TutorialLinAlgComputeTwice.out
*/ */
template<typename MatrixType, int _UpLo> template<typename MatrixType, int _UpLo>
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a) template<typename InputType>
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const EigenBase<InputType>& a)
{ {
check_template_parameters(); check_template_parameters();
eigen_assert(a.rows()==a.cols()); eigen_assert(a.rows()==a.cols());
const Index size = a.rows(); const Index size = a.rows();
m_matrix.resize(size, size); m_matrix.resize(size, size);
m_matrix = a; m_matrix = a.derived();
// Compute matrix L1 norm = max abs column sum.
m_l1_norm = RealScalar(0);
// TODO move this code to SelfAdjointView
for (Index col = 0; col < size; ++col) {
RealScalar abs_col_sum;
if (_UpLo == Lower)
abs_col_sum = m_matrix.col(col).tail(size - col).template lpNorm<1>() + m_matrix.row(col).head(col).template lpNorm<1>();
else
abs_col_sum = m_matrix.col(col).head(col).template lpNorm<1>() + m_matrix.row(col).tail(size - col).template lpNorm<1>();
if (abs_col_sum > m_l1_norm)
m_l1_norm = abs_col_sum;
}
m_isInitialized = true; m_isInitialized = true;
bool ok = Traits::inplace_decomposition(m_matrix); bool ok = Traits::inplace_decomposition(m_matrix);
...@@ -423,33 +466,24 @@ LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, c ...@@ -423,33 +466,24 @@ LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, c
return *this; return *this;
} }
namespace internal {
template<typename _MatrixType, int UpLo, typename Rhs>
struct solve_retval<LLT<_MatrixType, UpLo>, Rhs>
: solve_retval_base<LLT<_MatrixType, UpLo>, Rhs>
{
typedef LLT<_MatrixType,UpLo> LLTType;
EIGEN_MAKE_SOLVE_HELPERS(LLTType,Rhs)
template<typename Dest> void evalTo(Dest& dst) const #ifndef EIGEN_PARSED_BY_DOXYGEN
{ template<typename _MatrixType,int _UpLo>
dst = rhs(); template<typename RhsType, typename DstType>
dec().solveInPlace(dst); void LLT<_MatrixType,_UpLo>::_solve_impl(const RhsType &rhs, DstType &dst) const
} {
}; dst = rhs;
solveInPlace(dst);
} }
#endif
/** \internal use x = llt_object.solve(x); /** \internal use x = llt_object.solve(x);
* *
* This is the \em in-place version of solve(). * This is the \em in-place version of solve().
* *
* \param bAndX represents both the right-hand side matrix b and result x. * \param bAndX represents both the right-hand side matrix b and result x.
* *
* \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. * This version avoids a copy when the right hand side matrix b is not needed anymore.
*
* This version avoids a copy when the right hand side matrix b is not
* needed anymore.
* *
* \sa LLT::solve(), MatrixBase::llt() * \sa LLT::solve(), MatrixBase::llt()
*/ */
...@@ -475,6 +509,7 @@ MatrixType LLT<MatrixType,_UpLo>::reconstructedMatrix() const ...@@ -475,6 +509,7 @@ MatrixType LLT<MatrixType,_UpLo>::reconstructedMatrix() const
/** \cholesky_module /** \cholesky_module
* \returns the LLT decomposition of \c *this * \returns the LLT decomposition of \c *this
* \sa SelfAdjointView::llt()
*/ */
template<typename Derived> template<typename Derived>
inline const LLT<typename MatrixBase<Derived>::PlainObject> inline const LLT<typename MatrixBase<Derived>::PlainObject>
...@@ -485,6 +520,7 @@ MatrixBase<Derived>::llt() const ...@@ -485,6 +520,7 @@ MatrixBase<Derived>::llt() const
/** \cholesky_module /** \cholesky_module
* \returns the LLT decomposition of \c *this * \returns the LLT decomposition of \c *this
* \sa SelfAdjointView::llt()
*/ */
template<typename MatrixType, unsigned int UpLo> template<typename MatrixType, unsigned int UpLo>
inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo> inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
......
...@@ -25,41 +25,38 @@ ...@@ -25,41 +25,38 @@
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************** ********************************************************************************
* Content : Eigen bindings to Intel(R) MKL * Content : Eigen bindings to LAPACKe
* LLt decomposition based on LAPACKE_?potrf function. * LLt decomposition based on LAPACKE_?potrf function.
******************************************************************************** ********************************************************************************
*/ */
#ifndef EIGEN_LLT_MKL_H #ifndef EIGEN_LLT_LAPACKE_H
#define EIGEN_LLT_MKL_H #define EIGEN_LLT_LAPACKE_H
#include "Eigen/src/Core/util/MKL_support.h"
#include <iostream>
namespace Eigen { namespace Eigen {
namespace internal { namespace internal {
template<typename Scalar> struct mkl_llt; template<typename Scalar> struct lapacke_llt;
#define EIGEN_MKL_LLT(EIGTYPE, MKLTYPE, MKLPREFIX) \ #define EIGEN_LAPACKE_LLT(EIGTYPE, BLASTYPE, LAPACKE_PREFIX) \
template<> struct mkl_llt<EIGTYPE> \ template<> struct lapacke_llt<EIGTYPE> \
{ \ { \
template<typename MatrixType> \ template<typename MatrixType> \
static inline typename MatrixType::Index potrf(MatrixType& m, char uplo) \ static inline Index potrf(MatrixType& m, char uplo) \
{ \ { \
lapack_int matrix_order; \ lapack_int matrix_order; \
lapack_int size, lda, info, StorageOrder; \ lapack_int size, lda, info, StorageOrder; \
EIGTYPE* a; \ EIGTYPE* a; \
eigen_assert(m.rows()==m.cols()); \ eigen_assert(m.rows()==m.cols()); \
/* Set up parameters for ?potrf */ \ /* Set up parameters for ?potrf */ \
size = m.rows(); \ size = convert_index<lapack_int>(m.rows()); \
StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \ StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \
matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
a = &(m.coeffRef(0,0)); \ a = &(m.coeffRef(0,0)); \
lda = m.outerStride(); \ lda = convert_index<lapack_int>(m.outerStride()); \
\ \
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \ info = LAPACKE_##LAPACKE_PREFIX##potrf( matrix_order, uplo, size, (BLASTYPE*)a, lda ); \
info = (info==0) ? -1 : info>0 ? info-1 : size; \ info = (info==0) ? -1 : info>0 ? info-1 : size; \
return info; \ return info; \
} \ } \
...@@ -67,36 +64,36 @@ template<> struct mkl_llt<EIGTYPE> \ ...@@ -67,36 +64,36 @@ template<> struct mkl_llt<EIGTYPE> \
template<> struct llt_inplace<EIGTYPE, Lower> \ template<> struct llt_inplace<EIGTYPE, Lower> \
{ \ { \
template<typename MatrixType> \ template<typename MatrixType> \
static typename MatrixType::Index blocked(MatrixType& m) \ static Index blocked(MatrixType& m) \
{ \ { \
return mkl_llt<EIGTYPE>::potrf(m, 'L'); \ return lapacke_llt<EIGTYPE>::potrf(m, 'L'); \
} \ } \
template<typename MatrixType, typename VectorType> \ template<typename MatrixType, typename VectorType> \
static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
{ return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \
}; \ }; \
template<> struct llt_inplace<EIGTYPE, Upper> \ template<> struct llt_inplace<EIGTYPE, Upper> \
{ \ { \
template<typename MatrixType> \ template<typename MatrixType> \
static typename MatrixType::Index blocked(MatrixType& m) \ static Index blocked(MatrixType& m) \
{ \ { \
return mkl_llt<EIGTYPE>::potrf(m, 'U'); \ return lapacke_llt<EIGTYPE>::potrf(m, 'U'); \
} \ } \
template<typename MatrixType, typename VectorType> \ template<typename MatrixType, typename VectorType> \
static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ static Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \
{ \ { \
Transpose<MatrixType> matt(mat); \ Transpose<MatrixType> matt(mat); \
return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \ return llt_inplace<EIGTYPE, Lower>::rankUpdate(matt, vec.conjugate(), sigma); \
} \ } \
}; };
EIGEN_MKL_LLT(double, double, d) EIGEN_LAPACKE_LLT(double, double, d)
EIGEN_MKL_LLT(float, float, s) EIGEN_LAPACKE_LLT(float, float, s)
EIGEN_MKL_LLT(dcomplex, MKL_Complex16, z) EIGEN_LAPACKE_LLT(dcomplex, lapack_complex_double, z)
EIGEN_MKL_LLT(scomplex, MKL_Complex8, c) EIGEN_LAPACKE_LLT(scomplex, lapack_complex_float, c)
} // end namespace internal } // end namespace internal
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_LLT_MKL_H #endif // EIGEN_LLT_LAPACKE_H
FILE(GLOB Eigen_CholmodSupport_SRCS "*.h")
INSTALL(FILES
${Eigen_CholmodSupport_SRCS}
DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/CholmodSupport COMPONENT Devel
)
...@@ -14,46 +14,52 @@ namespace Eigen { ...@@ -14,46 +14,52 @@ namespace Eigen {
namespace internal { namespace internal {
template<typename Scalar, typename CholmodType> template<typename Scalar> struct cholmod_configure_matrix;
void cholmod_configure_matrix(CholmodType& mat)
{ template<> struct cholmod_configure_matrix<double> {
if (internal::is_same<Scalar,float>::value) template<typename CholmodType>
{ static void run(CholmodType& mat) {
mat.xtype = CHOLMOD_REAL;
mat.dtype = CHOLMOD_SINGLE;
}
else if (internal::is_same<Scalar,double>::value)
{
mat.xtype = CHOLMOD_REAL; mat.xtype = CHOLMOD_REAL;
mat.dtype = CHOLMOD_DOUBLE; mat.dtype = CHOLMOD_DOUBLE;
} }
else if (internal::is_same<Scalar,std::complex<float> >::value) };
{
mat.xtype = CHOLMOD_COMPLEX; template<> struct cholmod_configure_matrix<std::complex<double> > {
mat.dtype = CHOLMOD_SINGLE; template<typename CholmodType>
} static void run(CholmodType& mat) {
else if (internal::is_same<Scalar,std::complex<double> >::value)
{
mat.xtype = CHOLMOD_COMPLEX; mat.xtype = CHOLMOD_COMPLEX;
mat.dtype = CHOLMOD_DOUBLE; mat.dtype = CHOLMOD_DOUBLE;
} }
else };
{
eigen_assert(false && "Scalar type not supported by CHOLMOD"); // Other scalar types are not yet suppotred by Cholmod
} // template<> struct cholmod_configure_matrix<float> {
} // template<typename CholmodType>
// static void run(CholmodType& mat) {
// mat.xtype = CHOLMOD_REAL;
// mat.dtype = CHOLMOD_SINGLE;
// }
// };
//
// template<> struct cholmod_configure_matrix<std::complex<float> > {
// template<typename CholmodType>
// static void run(CholmodType& mat) {
// mat.xtype = CHOLMOD_COMPLEX;
// mat.dtype = CHOLMOD_SINGLE;
// }
// };
} // namespace internal } // namespace internal
/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. /** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object.
* Note that the data are shared. * Note that the data are shared.
*/ */
template<typename _Scalar, int _Options, typename _Index> template<typename _Scalar, int _Options, typename _StorageIndex>
cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> > mat)
{ {
cholmod_sparse res; cholmod_sparse res;
res.nzmax = mat.nonZeros(); res.nzmax = mat.nonZeros();
res.nrow = mat.rows();; res.nrow = mat.rows();
res.ncol = mat.cols(); res.ncol = mat.cols();
res.p = mat.outerIndexPtr(); res.p = mat.outerIndexPtr();
res.i = mat.innerIndexPtr(); res.i = mat.innerIndexPtr();
...@@ -74,11 +80,11 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) ...@@ -74,11 +80,11 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
res.dtype = 0; res.dtype = 0;
res.stype = -1; res.stype = -1;
if (internal::is_same<_Index,int>::value) if (internal::is_same<_StorageIndex,int>::value)
{ {
res.itype = CHOLMOD_INT; res.itype = CHOLMOD_INT;
} }
else if (internal::is_same<_Index,SuiteSparse_long>::value) else if (internal::is_same<_StorageIndex,long>::value)
{ {
res.itype = CHOLMOD_LONG; res.itype = CHOLMOD_LONG;
} }
...@@ -88,7 +94,7 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) ...@@ -88,7 +94,7 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
} }
// setup res.xtype // setup res.xtype
internal::cholmod_configure_matrix<_Scalar>(res); internal::cholmod_configure_matrix<_Scalar>::run(res);
res.stype = 0; res.stype = 0;
...@@ -98,16 +104,23 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) ...@@ -98,16 +104,23 @@ cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat)
template<typename _Scalar, int _Options, typename _Index> template<typename _Scalar, int _Options, typename _Index>
const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat) const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat)
{ {
cholmod_sparse res = viewAsCholmod(mat.const_cast_derived()); cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.const_cast_derived()));
return res;
}
template<typename _Scalar, int _Options, typename _Index>
const cholmod_sparse viewAsCholmod(const SparseVector<_Scalar,_Options,_Index>& mat)
{
cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.const_cast_derived()));
return res; return res;
} }
/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix. /** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix.
* The data are not copied but shared. */ * The data are not copied but shared. */
template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo> template<typename _Scalar, int _Options, typename _Index, unsigned int UpLo>
cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat) cholmod_sparse viewAsCholmod(const SparseSelfAdjointView<const SparseMatrix<_Scalar,_Options,_Index>, UpLo>& mat)
{ {
cholmod_sparse res = viewAsCholmod(mat.matrix().const_cast_derived()); cholmod_sparse res = viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_Index> >(mat.matrix().const_cast_derived()));
if(UpLo==Upper) res.stype = 1; if(UpLo==Upper) res.stype = 1;
if(UpLo==Lower) res.stype = -1; if(UpLo==Lower) res.stype = -1;
...@@ -131,19 +144,19 @@ cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat) ...@@ -131,19 +144,19 @@ cholmod_dense viewAsCholmod(MatrixBase<Derived>& mat)
res.x = (void*)(mat.derived().data()); res.x = (void*)(mat.derived().data());
res.z = 0; res.z = 0;
internal::cholmod_configure_matrix<Scalar>(res); internal::cholmod_configure_matrix<Scalar>::run(res);
return res; return res;
} }
/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. /** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix.
* The data are not copied but shared. */ * The data are not copied but shared. */
template<typename Scalar, int Flags, typename Index> template<typename Scalar, int Flags, typename StorageIndex>
MappedSparseMatrix<Scalar,Flags,Index> viewAsEigen(cholmod_sparse& cm) MappedSparseMatrix<Scalar,Flags,StorageIndex> viewAsEigen(cholmod_sparse& cm)
{ {
return MappedSparseMatrix<Scalar,Flags,Index> return MappedSparseMatrix<Scalar,Flags,StorageIndex>
(cm.nrow, cm.ncol, static_cast<Index*>(cm.p)[cm.ncol], (cm.nrow, cm.ncol, static_cast<StorageIndex*>(cm.p)[cm.ncol],
static_cast<Index*>(cm.p), static_cast<Index*>(cm.i),static_cast<Scalar*>(cm.x) ); static_cast<StorageIndex*>(cm.p), static_cast<StorageIndex*>(cm.i),static_cast<Scalar*>(cm.x) );
} }
enum CholmodMode { enum CholmodMode {
...@@ -157,29 +170,39 @@ enum CholmodMode { ...@@ -157,29 +170,39 @@ enum CholmodMode {
* \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT
*/ */
template<typename _MatrixType, int _UpLo, typename Derived> template<typename _MatrixType, int _UpLo, typename Derived>
class CholmodBase : internal::noncopyable class CholmodBase : public SparseSolverBase<Derived>
{ {
protected:
typedef SparseSolverBase<Derived> Base;
using Base::derived;
using Base::m_isInitialized;
public: public:
typedef _MatrixType MatrixType; typedef _MatrixType MatrixType;
enum { UpLo = _UpLo }; enum { UpLo = _UpLo };
typedef typename MatrixType::Scalar Scalar; typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar; typedef typename MatrixType::RealScalar RealScalar;
typedef MatrixType CholMatrixType; typedef MatrixType CholMatrixType;
typedef typename MatrixType::Index Index; typedef typename MatrixType::StorageIndex StorageIndex;
enum {
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
};
public: public:
CholmodBase() CholmodBase()
: m_cholmodFactor(0), m_info(Success), m_isInitialized(false) : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false)
{ {
m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0); EIGEN_STATIC_ASSERT((internal::is_same<double,RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY);
m_shiftOffset[0] = m_shiftOffset[1] = 0.0;
cholmod_start(&m_cholmod); cholmod_start(&m_cholmod);
} }
CholmodBase(const MatrixType& matrix) explicit CholmodBase(const MatrixType& matrix)
: m_cholmodFactor(0), m_info(Success), m_isInitialized(false) : m_cholmodFactor(0), m_info(Success), m_factorizationIsOk(false), m_analysisIsOk(false)
{ {
m_shiftOffset[0] = m_shiftOffset[1] = RealScalar(0.0); EIGEN_STATIC_ASSERT((internal::is_same<double,RealScalar>::value), CHOLMOD_SUPPORTS_DOUBLE_PRECISION_ONLY);
m_shiftOffset[0] = m_shiftOffset[1] = 0.0;
cholmod_start(&m_cholmod); cholmod_start(&m_cholmod);
compute(matrix); compute(matrix);
} }
...@@ -191,11 +214,8 @@ class CholmodBase : internal::noncopyable ...@@ -191,11 +214,8 @@ class CholmodBase : internal::noncopyable
cholmod_finish(&m_cholmod); cholmod_finish(&m_cholmod);
} }
inline Index cols() const { return m_cholmodFactor->n; } inline StorageIndex cols() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
inline Index rows() const { return m_cholmodFactor->n; } inline StorageIndex rows() const { return internal::convert_index<StorageIndex, Index>(m_cholmodFactor->n); }
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
...@@ -216,34 +236,6 @@ class CholmodBase : internal::noncopyable ...@@ -216,34 +236,6 @@ class CholmodBase : internal::noncopyable
return derived(); return derived();
} }
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const internal::solve_retval<CholmodBase, Rhs>
solve(const MatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "LLT is not initialized.");
eigen_assert(rows()==b.rows()
&& "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
return internal::solve_retval<CholmodBase, Rhs>(*this, b.derived());
}
/** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A.
*
* \sa compute()
*/
template<typename Rhs>
inline const internal::sparse_solve_retval<CholmodBase, Rhs>
solve(const SparseMatrixBase<Rhs>& b) const
{
eigen_assert(m_isInitialized && "LLT is not initialized.");
eigen_assert(rows()==b.rows()
&& "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b");
return internal::sparse_solve_retval<CholmodBase, Rhs>(*this, b.derived());
}
/** Performs a symbolic decomposition on the sparsity pattern of \a matrix. /** Performs a symbolic decomposition on the sparsity pattern of \a matrix.
* *
* This function is particularly useful when solving for several problems having the same structure. * This function is particularly useful when solving for several problems having the same structure.
...@@ -277,7 +269,7 @@ class CholmodBase : internal::noncopyable ...@@ -277,7 +269,7 @@ class CholmodBase : internal::noncopyable
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>()); cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView<UpLo>());
cholmod_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, &m_cholmod); cholmod_factorize_p(&A, m_shiftOffset, 0, 0, m_cholmodFactor, &m_cholmod);
// If the factorization failed, minor is the column at which it did. On success minor == n. // If the factorization failed, minor is the column at which it did. On success minor == n.
this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue); this->m_info = (m_cholmodFactor->minor == m_cholmodFactor->n ? Success : NumericalIssue);
m_factorizationIsOk = true; m_factorizationIsOk = true;
...@@ -290,20 +282,22 @@ class CholmodBase : internal::noncopyable ...@@ -290,20 +282,22 @@ class CholmodBase : internal::noncopyable
#ifndef EIGEN_PARSED_BY_DOXYGEN #ifndef EIGEN_PARSED_BY_DOXYGEN
/** \internal */ /** \internal */
template<typename Rhs,typename Dest> template<typename Rhs,typename Dest>
void _solve(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const void _solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &dest) const
{ {
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
const Index size = m_cholmodFactor->n; const Index size = m_cholmodFactor->n;
EIGEN_UNUSED_VARIABLE(size); EIGEN_UNUSED_VARIABLE(size);
eigen_assert(size==b.rows()); eigen_assert(size==b.rows());
// Cholmod needs column-major stoarge without inner-stride, which corresponds to the default behavior of Ref.
Ref<const Matrix<typename Rhs::Scalar,Dynamic,Dynamic,ColMajor> > b_ref(b.derived());
// note: cd stands for Cholmod Dense
Rhs& b_ref(b.const_cast_derived());
cholmod_dense b_cd = viewAsCholmod(b_ref); cholmod_dense b_cd = viewAsCholmod(b_ref);
cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod); cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod);
if(!x_cd) if(!x_cd)
{ {
this->m_info = NumericalIssue; this->m_info = NumericalIssue;
return;
} }
// TODO optimize this copy by swapping when possible (be careful with alignment, etc.) // TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols()); dest = Matrix<Scalar,Dest::RowsAtCompileTime,Dest::ColsAtCompileTime>::Map(reinterpret_cast<Scalar*>(x_cd->x),b.rows(),b.cols());
...@@ -311,8 +305,8 @@ class CholmodBase : internal::noncopyable ...@@ -311,8 +305,8 @@ class CholmodBase : internal::noncopyable
} }
/** \internal */ /** \internal */
template<typename RhsScalar, int RhsOptions, typename RhsIndex, typename DestScalar, int DestOptions, typename DestIndex> template<typename RhsDerived, typename DestDerived>
void _solve(const SparseMatrix<RhsScalar,RhsOptions,RhsIndex> &b, SparseMatrix<DestScalar,DestOptions,DestIndex> &dest) const void _solve_impl(const SparseMatrixBase<RhsDerived> &b, SparseMatrixBase<DestDerived> &dest) const
{ {
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
const Index size = m_cholmodFactor->n; const Index size = m_cholmodFactor->n;
...@@ -320,14 +314,16 @@ class CholmodBase : internal::noncopyable ...@@ -320,14 +314,16 @@ class CholmodBase : internal::noncopyable
eigen_assert(size==b.rows()); eigen_assert(size==b.rows());
// note: cs stands for Cholmod Sparse // note: cs stands for Cholmod Sparse
cholmod_sparse b_cs = viewAsCholmod(b); Ref<SparseMatrix<typename RhsDerived::Scalar,ColMajor,typename RhsDerived::StorageIndex> > b_ref(b.const_cast_derived());
cholmod_sparse b_cs = viewAsCholmod(b_ref);
cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod); cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod);
if(!x_cs) if(!x_cs)
{ {
this->m_info = NumericalIssue; this->m_info = NumericalIssue;
return;
} }
// TODO optimize this copy by swapping when possible (be careful with alignment, etc.) // TODO optimize this copy by swapping when possible (be careful with alignment, etc.)
dest = viewAsEigen<DestScalar,DestOptions,DestIndex>(*x_cs); dest.derived() = viewAsEigen<typename DestDerived::Scalar,ColMajor,typename DestDerived::StorageIndex>(*x_cs);
cholmod_free_sparse(&x_cs, &m_cholmod); cholmod_free_sparse(&x_cs, &m_cholmod);
} }
#endif // EIGEN_PARSED_BY_DOXYGEN #endif // EIGEN_PARSED_BY_DOXYGEN
...@@ -344,10 +340,61 @@ class CholmodBase : internal::noncopyable ...@@ -344,10 +340,61 @@ class CholmodBase : internal::noncopyable
*/ */
Derived& setShift(const RealScalar& offset) Derived& setShift(const RealScalar& offset)
{ {
m_shiftOffset[0] = offset; m_shiftOffset[0] = double(offset);
return derived(); return derived();
} }
/** \returns the determinant of the underlying matrix from the current factorization */
Scalar determinant() const
{
using std::exp;
return exp(logDeterminant());
}
/** \returns the log determinant of the underlying matrix from the current factorization */
Scalar logDeterminant() const
{
using std::log;
using numext::real;
eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()");
RealScalar logDet = 0;
Scalar *x = static_cast<Scalar*>(m_cholmodFactor->x);
if (m_cholmodFactor->is_super)
{
// Supernodal factorization stored as a packed list of dense column-major blocs,
// as described by the following structure:
// super[k] == index of the first column of the j-th super node
StorageIndex *super = static_cast<StorageIndex*>(m_cholmodFactor->super);
// pi[k] == offset to the description of row indices
StorageIndex *pi = static_cast<StorageIndex*>(m_cholmodFactor->pi);
// px[k] == offset to the respective dense block
StorageIndex *px = static_cast<StorageIndex*>(m_cholmodFactor->px);
Index nb_super_nodes = m_cholmodFactor->nsuper;
for (Index k=0; k < nb_super_nodes; ++k)
{
StorageIndex ncols = super[k + 1] - super[k];
StorageIndex nrows = pi[k + 1] - pi[k];
Map<const Array<Scalar,1,Dynamic>, 0, InnerStride<> > sk(x + px[k], ncols, InnerStride<>(nrows+1));
logDet += sk.real().log().sum();
}
}
else
{
// Simplicial factorization stored as standard CSC matrix.
StorageIndex *p = static_cast<StorageIndex*>(m_cholmodFactor->p);
Index size = m_cholmodFactor->n;
for (Index k=0; k<size; ++k)
logDet += log(real( x[p[k]] ));
}
if (m_cholmodFactor->is_ll)
logDet *= 2.0;
return logDet;
};
template<typename Stream> template<typename Stream>
void dumpMemory(Stream& /*s*/) void dumpMemory(Stream& /*s*/)
{} {}
...@@ -355,9 +402,8 @@ class CholmodBase : internal::noncopyable ...@@ -355,9 +402,8 @@ class CholmodBase : internal::noncopyable
protected: protected:
mutable cholmod_common m_cholmod; mutable cholmod_common m_cholmod;
cholmod_factor* m_cholmodFactor; cholmod_factor* m_cholmodFactor;
RealScalar m_shiftOffset[2]; double m_shiftOffset[2];
mutable ComputationInfo m_info; mutable ComputationInfo m_info;
bool m_isInitialized;
int m_factorizationIsOk; int m_factorizationIsOk;
int m_analysisIsOk; int m_analysisIsOk;
}; };
...@@ -376,9 +422,13 @@ class CholmodBase : internal::noncopyable ...@@ -376,9 +422,13 @@ class CholmodBase : internal::noncopyable
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower. * or Upper. Default is Lower.
* *
* \implsparsesolverconcept
*
* This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
* *
* \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLLT * \warning Only double precision real and complex scalar types are supported by Cholmod.
*
* \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLLT
*/ */
template<typename _MatrixType, int _UpLo = Lower> template<typename _MatrixType, int _UpLo = Lower>
class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> > class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> >
...@@ -395,7 +445,7 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl ...@@ -395,7 +445,7 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
CholmodSimplicialLLT(const MatrixType& matrix) : Base() CholmodSimplicialLLT(const MatrixType& matrix) : Base()
{ {
init(); init();
Base::compute(matrix); this->compute(matrix);
} }
~CholmodSimplicialLLT() {} ~CholmodSimplicialLLT() {}
...@@ -423,9 +473,13 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl ...@@ -423,9 +473,13 @@ class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimpl
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower. * or Upper. Default is Lower.
* *
* \implsparsesolverconcept
*
* This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
* *
* \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLDLT * \warning Only double precision real and complex scalar types are supported by Cholmod.
*
* \sa \ref TutorialSparseSolverConcept, class CholmodSupernodalLLT, class SimplicialLDLT
*/ */
template<typename _MatrixType, int _UpLo = Lower> template<typename _MatrixType, int _UpLo = Lower>
class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> > class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> >
...@@ -442,7 +496,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp ...@@ -442,7 +496,7 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
CholmodSimplicialLDLT(const MatrixType& matrix) : Base() CholmodSimplicialLDLT(const MatrixType& matrix) : Base()
{ {
init(); init();
Base::compute(matrix); this->compute(matrix);
} }
~CholmodSimplicialLDLT() {} ~CholmodSimplicialLDLT() {}
...@@ -468,9 +522,13 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp ...@@ -468,9 +522,13 @@ class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimp
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower. * or Upper. Default is Lower.
* *
* \implsparsesolverconcept
*
* This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
* *
* \sa \ref TutorialSparseDirectSolvers * \warning Only double precision real and complex scalar types are supported by Cholmod.
*
* \sa \ref TutorialSparseSolverConcept
*/ */
template<typename _MatrixType, int _UpLo = Lower> template<typename _MatrixType, int _UpLo = Lower>
class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> > class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> >
...@@ -487,7 +545,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper ...@@ -487,7 +545,7 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
CholmodSupernodalLLT(const MatrixType& matrix) : Base() CholmodSupernodalLLT(const MatrixType& matrix) : Base()
{ {
init(); init();
Base::compute(matrix); this->compute(matrix);
} }
~CholmodSupernodalLLT() {} ~CholmodSupernodalLLT() {}
...@@ -515,9 +573,13 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper ...@@ -515,9 +573,13 @@ class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSuper
* \tparam _UpLo the triangular part that will be used for the computations. It can be Lower * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
* or Upper. Default is Lower. * or Upper. Default is Lower.
* *
* \implsparsesolverconcept
*
* This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed.
* *
* \sa \ref TutorialSparseDirectSolvers * \warning Only double precision real and complex scalar types are supported by Cholmod.
*
* \sa \ref TutorialSparseSolverConcept
*/ */
template<typename _MatrixType, int _UpLo = Lower> template<typename _MatrixType, int _UpLo = Lower>
class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> > class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> >
...@@ -534,7 +596,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom ...@@ -534,7 +596,7 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
CholmodDecomposition(const MatrixType& matrix) : Base() CholmodDecomposition(const MatrixType& matrix) : Base()
{ {
init(); init();
Base::compute(matrix); this->compute(matrix);
} }
~CholmodDecomposition() {} ~CholmodDecomposition() {}
...@@ -572,36 +634,6 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom ...@@ -572,36 +634,6 @@ class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecom
} }
}; };
namespace internal {
template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
struct solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
: solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
{
typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
template<typename Dest> void evalTo(Dest& dst) const
{
dec()._solve(rhs(),dst);
}
};
template<typename _MatrixType, int _UpLo, typename Derived, typename Rhs>
struct sparse_solve_retval<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
: sparse_solve_retval_base<CholmodBase<_MatrixType,_UpLo,Derived>, Rhs>
{
typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec;
EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
template<typename Dest> void evalTo(Dest& dst) const
{
dec()._solve(rhs(),dst);
}
};
} // end namespace internal
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_CHOLMODSUPPORT_H #endif // EIGEN_CHOLMODSUPPORT_H