Skip to content
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************** ********************************************************************************
* Content : Eigen bindings to Intel(R) MKL * Content : Eigen bindings to BLAS F77
* General matrix-vector product functionality based on ?GEMV. * General matrix-vector product functionality based on ?GEMV.
******************************************************************************** ********************************************************************************
*/ */
#ifndef EIGEN_GENERAL_MATRIX_VECTOR_MKL_H #ifndef EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
#define EIGEN_GENERAL_MATRIX_VECTOR_MKL_H #define EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
namespace Eigen { namespace Eigen {
...@@ -46,47 +46,46 @@ namespace internal { ...@@ -46,47 +46,46 @@ namespace internal {
// gemv specialization // gemv specialization
template<typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs> template<typename Index, typename LhsScalar, int StorageOrder, bool ConjugateLhs, typename RhsScalar, bool ConjugateRhs>
struct general_matrix_vector_product_gemv : struct general_matrix_vector_product_gemv;
general_matrix_vector_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,ConjugateRhs,BuiltIn> {};
#define EIGEN_MKL_GEMV_SPECIALIZE(Scalar) \ #define EIGEN_BLAS_GEMV_SPECIALIZE(Scalar) \
template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \ template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \ struct general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,ConjugateRhs,Specialized> { \
static void run( \ static void run( \
Index rows, Index cols, \ Index rows, Index cols, \
const Scalar* lhs, Index lhsStride, \ const const_blas_data_mapper<Scalar,Index,ColMajor> &lhs, \
const Scalar* rhs, Index rhsIncr, \ const const_blas_data_mapper<Scalar,Index,RowMajor> &rhs, \
Scalar* res, Index resIncr, Scalar alpha) \ Scalar* res, Index resIncr, Scalar alpha) \
{ \ { \
if (ConjugateLhs) { \ if (ConjugateLhs) { \
general_matrix_vector_product<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs,BuiltIn>::run( \ general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ColMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,ConjugateRhs,BuiltIn>::run( \
rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \ rows, cols, lhs, rhs, res, resIncr, alpha); \
} else { \ } else { \
general_matrix_vector_product_gemv<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \ general_matrix_vector_product_gemv<Index,Scalar,ColMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \ rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \
} \ } \
} \ } \
}; \ }; \
template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \ template<typename Index, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs,Specialized> { \ struct general_matrix_vector_product<Index,Scalar,const_blas_data_mapper<Scalar,Index,RowMajor>,RowMajor,ConjugateLhs,Scalar,const_blas_data_mapper<Scalar,Index,ColMajor>,ConjugateRhs,Specialized> { \
static void run( \ static void run( \
Index rows, Index cols, \ Index rows, Index cols, \
const Scalar* lhs, Index lhsStride, \ const const_blas_data_mapper<Scalar,Index,RowMajor> &lhs, \
const Scalar* rhs, Index rhsIncr, \ const const_blas_data_mapper<Scalar,Index,ColMajor> &rhs, \
Scalar* res, Index resIncr, Scalar alpha) \ Scalar* res, Index resIncr, Scalar alpha) \
{ \ { \
general_matrix_vector_product_gemv<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \ general_matrix_vector_product_gemv<Index,Scalar,RowMajor,ConjugateLhs,Scalar,ConjugateRhs>::run( \
rows, cols, lhs, lhsStride, rhs, rhsIncr, res, resIncr, alpha); \ rows, cols, lhs.data(), lhs.stride(), rhs.data(), rhs.stride(), res, resIncr, alpha); \
} \ } \
}; \ }; \
EIGEN_MKL_GEMV_SPECIALIZE(double) EIGEN_BLAS_GEMV_SPECIALIZE(double)
EIGEN_MKL_GEMV_SPECIALIZE(float) EIGEN_BLAS_GEMV_SPECIALIZE(float)
EIGEN_MKL_GEMV_SPECIALIZE(dcomplex) EIGEN_BLAS_GEMV_SPECIALIZE(dcomplex)
EIGEN_MKL_GEMV_SPECIALIZE(scomplex) EIGEN_BLAS_GEMV_SPECIALIZE(scomplex)
#define EIGEN_MKL_GEMV_SPECIALIZATION(EIGTYPE,MKLTYPE,MKLPREFIX) \ #define EIGEN_BLAS_GEMV_SPECIALIZATION(EIGTYPE,BLASTYPE,BLASPREFIX) \
template<typename Index, int LhsStorageOrder, bool ConjugateLhs, bool ConjugateRhs> \ template<typename Index, int LhsStorageOrder, bool ConjugateLhs, bool ConjugateRhs> \
struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,ConjugateRhs> \ struct general_matrix_vector_product_gemv<Index,EIGTYPE,LhsStorageOrder,ConjugateLhs,EIGTYPE,ConjugateRhs> \
{ \ { \
...@@ -98,16 +97,15 @@ static void run( \ ...@@ -98,16 +97,15 @@ static void run( \
const EIGTYPE* rhs, Index rhsIncr, \ const EIGTYPE* rhs, Index rhsIncr, \
EIGTYPE* res, Index resIncr, EIGTYPE alpha) \ EIGTYPE* res, Index resIncr, EIGTYPE alpha) \
{ \ { \
MKL_INT m=rows, n=cols, lda=lhsStride, incx=rhsIncr, incy=resIncr; \ BlasIndex m=convert_index<BlasIndex>(rows), n=convert_index<BlasIndex>(cols), \
MKLTYPE alpha_, beta_; \ lda=convert_index<BlasIndex>(lhsStride), incx=convert_index<BlasIndex>(rhsIncr), incy=convert_index<BlasIndex>(resIncr); \
const EIGTYPE *x_ptr, myone(1); \ const EIGTYPE beta(1); \
const EIGTYPE *x_ptr; \
char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \ char trans=(LhsStorageOrder==ColMajor) ? 'N' : (ConjugateLhs) ? 'C' : 'T'; \
if (LhsStorageOrder==RowMajor) { \ if (LhsStorageOrder==RowMajor) { \
m=cols; \ m = convert_index<BlasIndex>(cols); \
n=rows; \ n = convert_index<BlasIndex>(rows); \
}\ }\
assign_scalar_eig2mkl(alpha_, alpha); \
assign_scalar_eig2mkl(beta_, myone); \
GEMVVector x_tmp; \ GEMVVector x_tmp; \
if (ConjugateRhs) { \ if (ConjugateRhs) { \
Map<const GEMVVector, 0, InnerStride<> > map_x(rhs,cols,1,InnerStride<>(incx)); \ Map<const GEMVVector, 0, InnerStride<> > map_x(rhs,cols,1,InnerStride<>(incx)); \
...@@ -115,17 +113,17 @@ static void run( \ ...@@ -115,17 +113,17 @@ static void run( \
x_ptr=x_tmp.data(); \ x_ptr=x_tmp.data(); \
incx=1; \ incx=1; \
} else x_ptr=rhs; \ } else x_ptr=rhs; \
MKLPREFIX##gemv(&trans, &m, &n, &alpha_, (const MKLTYPE*)lhs, &lda, (const MKLTYPE*)x_ptr, &incx, &beta_, (MKLTYPE*)res, &incy); \ BLASPREFIX##gemv_(&trans, &m, &n, &numext::real_ref(alpha), (const BLASTYPE*)lhs, &lda, (const BLASTYPE*)x_ptr, &incx, &numext::real_ref(beta), (BLASTYPE*)res, &incy); \
}\ }\
}; };
EIGEN_MKL_GEMV_SPECIALIZATION(double, double, d) EIGEN_BLAS_GEMV_SPECIALIZATION(double, double, d)
EIGEN_MKL_GEMV_SPECIALIZATION(float, float, s) EIGEN_BLAS_GEMV_SPECIALIZATION(float, float, s)
EIGEN_MKL_GEMV_SPECIALIZATION(dcomplex, MKL_Complex16, z) EIGEN_BLAS_GEMV_SPECIALIZATION(dcomplex, double, z)
EIGEN_MKL_GEMV_SPECIALIZATION(scomplex, MKL_Complex8, c) EIGEN_BLAS_GEMV_SPECIALIZATION(scomplex, float, c)
} // end namespase internal } // end namespase internal
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_VECTOR_MKL_H #endif // EIGEN_GENERAL_MATRIX_VECTOR_BLAS_H
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#ifndef EIGEN_PARALLELIZER_H #ifndef EIGEN_PARALLELIZER_H
#define EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H
namespace Eigen { namespace Eigen {
namespace internal { namespace internal {
...@@ -49,8 +49,8 @@ inline void initParallel() ...@@ -49,8 +49,8 @@ inline void initParallel()
{ {
int nbt; int nbt;
internal::manage_multi_threading(GetAction, &nbt); internal::manage_multi_threading(GetAction, &nbt);
std::ptrdiff_t l1, l2; std::ptrdiff_t l1, l2, l3;
internal::manage_caching_sizes(GetAction, &l1, &l2); internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
} }
/** \returns the max number of threads reserved for Eigen /** \returns the max number of threads reserved for Eigen
...@@ -73,17 +73,17 @@ namespace internal { ...@@ -73,17 +73,17 @@ namespace internal {
template<typename Index> struct GemmParallelInfo template<typename Index> struct GemmParallelInfo
{ {
GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {} GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
int volatile sync; Index volatile sync;
int volatile users; int volatile users;
Index rhs_start; Index lhs_start;
Index rhs_length; Index lhs_length;
}; };
template<bool Condition, typename Functor, typename Index> template<bool Condition, typename Functor, typename Index>
void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
{ {
// TODO when EIGEN_USE_BLAS is defined, // TODO when EIGEN_USE_BLAS is defined,
// we should still enable OMP for other scalar types // we should still enable OMP for other scalar types
...@@ -92,6 +92,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos ...@@ -92,6 +92,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
// the matrix product when multithreading is enabled. This is a temporary // the matrix product when multithreading is enabled. This is a temporary
// fix to support row-major destination matrices. This whole // fix to support row-major destination matrices. This whole
// parallelizer mechanism has to be redisigned anyway. // parallelizer mechanism has to be redisigned anyway.
EIGEN_UNUSED_VARIABLE(depth);
EIGEN_UNUSED_VARIABLE(transpose); EIGEN_UNUSED_VARIABLE(transpose);
func(0,rows, 0,cols); func(0,rows, 0,cols);
#else #else
...@@ -102,56 +103,56 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos ...@@ -102,56 +103,56 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
// - we are not already in a parallel code // - we are not already in a parallel code
// - the sizes are large enough // - the sizes are large enough
// 1- are we already in a parallel session? // compute the maximal number of threads from the size of the product:
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
if((!Condition) || (omp_get_num_threads()>1)) Index size = transpose ? rows : cols;
return func(0,rows, 0,cols); Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
Index size = transpose ? cols : rows; // compute the maximal number of threads from the total amount of work:
double work = static_cast<double>(rows) * static_cast<double>(cols) *
static_cast<double>(depth);
double kMinTaskSize = 50000; // FIXME improve this heuristic.
pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
// 2- compute the maximal number of threads from the size of the product: // compute the number of threads we are going to use
// FIXME this has to be fine tuned Index threads = std::min<Index>(nbThreads(), pb_max_threads);
Index max_threads = std::max<Index>(1,size / 32);
// 3 - compute the number of threads we are going to use // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
Index threads = std::min<Index>(nbThreads(), max_threads); // then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if(threads==1) if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols); return func(0,rows, 0,cols);
Eigen::initParallel(); Eigen::initParallel();
func.initParallelSession(); func.initParallelSession(threads);
if(transpose) if(transpose)
std::swap(rows,cols); std::swap(rows,cols);
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads]; ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
#pragma omp parallel num_threads(threads) #pragma omp parallel num_threads(threads)
{ {
Index i = omp_get_thread_num(); Index i = omp_get_thread_num();
// Note that the actual number of threads might be lower than the number of request ones. // Note that the actual number of threads might be lower than the number of request ones.
Index actual_threads = omp_get_num_threads(); Index actual_threads = omp_get_num_threads();
Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockCols = (cols / actual_threads) & ~Index(0x3);
Index blockRows = (rows / actual_threads) & ~Index(0x7); Index blockRows = (rows / actual_threads);
blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
Index r0 = i*blockRows; Index r0 = i*blockRows;
Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols; Index c0 = i*blockCols;
Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0; info[i].lhs_start = r0;
info[i].rhs_length = actualBlockCols; info[i].lhs_length = actualBlockRows;
if(transpose) if(transpose) func(c0, actualBlockCols, 0, rows, info);
func(0, cols, r0, actualBlockRows, info); else func(0, rows, c0, actualBlockCols, info);
else
func(r0, actualBlockRows, 0,cols, info);
} }
delete[] info;
#endif #endif
} }
......