Skip to content
TensorConvolution.h 46.5 KiB
Newer Older
Luker's avatar
Luker committed
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.

#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H
#define EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H

namespace Eigen {

/** \class TensorConvolution
  * \ingroup CXX11_Tensor_Module
  *
  * \brief Tensor convolution class.
  *
  *
  */
namespace internal {

template <typename Index, typename InputDims, int NumKernelDims, int Layout>
class IndexMapper {
 public:
  IndexMapper(const InputDims& input_dims, const array<Index, NumKernelDims>& kernel_dims,
              const array<Index, NumKernelDims>& indices) {

    array<Index, NumDims> dimensions = input_dims;
    for (int i = 0; i < NumKernelDims; ++i) {
      const Index index = indices[i];
      const Index input_dim = input_dims[index];
      const Index kernel_dim = kernel_dims[i];
      const Index result_dim = input_dim - kernel_dim + 1;
      dimensions[index] = result_dim;
    }

    array<Index, NumDims> inputStrides;
    array<Index, NumDims> outputStrides;
    if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
      inputStrides[0] = 1;
      outputStrides[0] = 1;
      for (int i = 1; i < NumDims; ++i) {
        inputStrides[i] = inputStrides[i-1] * input_dims[i-1];
        outputStrides[i] = outputStrides[i-1] * dimensions[i-1];
      }
    } else {
      inputStrides[NumDims - 1] = 1;
      outputStrides[NumDims - 1] = 1;
      for (int i = static_cast<int>(NumDims) - 2; i >= 0; --i) {
        inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1];
        outputStrides[i] = outputStrides[i + 1] * dimensions[i + 1];
      }
    }

    array<Index, NumDims> cudaInputDimensions;
    array<Index, NumDims> cudaOutputDimensions;
    array<Index, NumDims> tmp = dimensions;
    array<Index, NumDims> ordering;
    const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
                              ? 0
                              : NumDims - NumKernelDims;
    for (int i = 0; i < NumKernelDims; ++i) {
      const Index index = i + offset;
      ordering[index] = indices[i];
      tmp[indices[i]] = -1;
      cudaInputDimensions[index] = input_dims[indices[i]];
      cudaOutputDimensions[index] = dimensions[indices[i]];
    }

    int written = static_cast<int>(Layout) == static_cast<int>(ColMajor)
                      ? NumKernelDims
                      : 0;
    for (int i = 0; i < NumDims; ++i) {
      if (tmp[i] >= 0) {
        ordering[written] = i;
        cudaInputDimensions[written] = input_dims[i];
        cudaOutputDimensions[written] = dimensions[i];
        ++written;
      }
    }

    for (int i = 0; i < NumDims; ++i) {
      m_inputStrides[i] = inputStrides[ordering[i]];
      m_outputStrides[i] = outputStrides[ordering[i]];
    }

    if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
      for (int i = 0; i < NumDims; ++i) {
        if (i > NumKernelDims) {
          m_cudaInputStrides[i] =
              m_cudaInputStrides[i - 1] * cudaInputDimensions[i - 1];
          m_cudaOutputStrides[i] =
              m_cudaOutputStrides[i - 1] * cudaOutputDimensions[i - 1];
        } else {
          m_cudaInputStrides[i] = 1;
          m_cudaOutputStrides[i] = 1;
        }
      }
    } else {
      for (int i = NumDims - 1; i >= 0; --i) {
        if (i + 1 < offset) {
          m_cudaInputStrides[i] =
              m_cudaInputStrides[i + 1] * cudaInputDimensions[i + 1];
          m_cudaOutputStrides[i] =
              m_cudaOutputStrides[i + 1] * cudaOutputDimensions[i + 1];
        } else {
          m_cudaInputStrides[i] = 1;
          m_cudaOutputStrides[i] = 1;
        }
      }
    }
  }

  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputPlaneToTensorInputOffset(Index p) const {
    Index inputIndex = 0;
    if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
      for (int d = NumDims - 1; d > NumKernelDims; --d) {
        const Index idx = p / m_cudaInputStrides[d];
        inputIndex += idx * m_inputStrides[d];
        p -= idx * m_cudaInputStrides[d];
      }
      inputIndex += p * m_inputStrides[NumKernelDims];
    } else {
      std::ptrdiff_t limit = 0;
      if (NumKernelDims < NumDims) {
        limit = NumDims - NumKernelDims - 1;
      }
      for (int d = 0; d < limit; ++d) {
        const Index idx = p / m_cudaInputStrides[d];
        inputIndex += idx * m_inputStrides[d];
        p -= idx * m_cudaInputStrides[d];
      }
      inputIndex += p * m_inputStrides[limit];
    }
    return inputIndex;
  }

  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputPlaneToTensorOutputOffset(Index p) const {
    Index outputIndex = 0;
    if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
      for (int d = NumDims - 1; d > NumKernelDims; --d) {
        const Index idx = p / m_cudaOutputStrides[d];
        outputIndex += idx * m_outputStrides[d];
        p -= idx * m_cudaOutputStrides[d];
      }
      outputIndex += p * m_outputStrides[NumKernelDims];
    } else {
      std::ptrdiff_t limit = 0;
      if (NumKernelDims < NumDims) {
        limit = NumDims - NumKernelDims - 1;
      }
      for (int d = 0; d < limit; ++d) {
        const Index idx = p / m_cudaOutputStrides[d];
        outputIndex += idx * m_outputStrides[d];
        p -= idx * m_cudaOutputStrides[d];
      }
      outputIndex += p * m_outputStrides[limit];
    }
    return outputIndex;
  }

  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i) const {
    const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
                              ? 0
                              : NumDims - NumKernelDims;
    return i * m_inputStrides[offset];
  }

  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i) const {
    const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
                              ? 0
                              : NumDims - NumKernelDims;
    return i * m_outputStrides[offset];
  }

  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j) const {
    const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
                              ? 0
                              : NumDims - NumKernelDims;
    return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1];
  }

  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j) const {
    const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
                              ? 0
                              : NumDims - NumKernelDims;
    return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1];
  }

  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaInputKernelToTensorInputOffset(Index i, Index j, Index k) const {
    const size_t offset = static_cast<int>(Layout) == static_cast<int>(ColMajor)
                              ? 0
                              : NumDims - NumKernelDims;
    return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1] +
           k * m_inputStrides[offset + 2];
  }

  EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapCudaOutputKernelToTensorOutputOffset(Index i, Index j, Index k) const {
Loading full blame...