// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2011 Benoit Jacob // Copyright (C) 2011-2014 Gael Guennebaud // Copyright (C) 2011-2012 Jitse Niesen // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_COREEVALUATORS_H #define EIGEN_COREEVALUATORS_H namespace Eigen { namespace internal { // This class returns the evaluator kind from the expression storage kind. // Default assumes index based accessors template struct storage_kind_to_evaluator_kind { typedef IndexBased Kind; }; // This class returns the evaluator shape from the expression storage kind. // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc. template struct storage_kind_to_shape; template<> struct storage_kind_to_shape { typedef DenseShape Shape; }; template<> struct storage_kind_to_shape { typedef SolverShape Shape; }; template<> struct storage_kind_to_shape { typedef PermutationShape Shape; }; template<> struct storage_kind_to_shape { typedef TranspositionsShape Shape; }; // Evaluators have to be specialized with respect to various criteria such as: // - storage/structure/shape // - scalar type // - etc. // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators. // We currently distinguish the following kind of evaluators: // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate) // - binary_evaluator for expression taking two arguments (CwiseBinaryOp) // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp) // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching. // - mapbase_evaluator for Map, Block, Ref // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator) template< typename T, typename Arg1Kind = typename evaluator_traits::Kind, typename Arg2Kind = typename evaluator_traits::Kind, typename Arg3Kind = typename evaluator_traits::Kind, typename Arg1Scalar = typename traits::Scalar, typename Arg2Scalar = typename traits::Scalar, typename Arg3Scalar = typename traits::Scalar> struct ternary_evaluator; template< typename T, typename LhsKind = typename evaluator_traits::Kind, typename RhsKind = typename evaluator_traits::Kind, typename LhsScalar = typename traits::Scalar, typename RhsScalar = typename traits::Scalar> struct binary_evaluator; template< typename T, typename Kind = typename evaluator_traits::Kind, typename Scalar = typename T::Scalar> struct unary_evaluator; // evaluator_traits contains traits for evaluator template struct evaluator_traits_base { // by default, get evaluator kind and shape from storage typedef typename storage_kind_to_evaluator_kind::StorageKind>::Kind Kind; typedef typename storage_kind_to_shape::StorageKind>::Shape Shape; }; // Default evaluator traits template struct evaluator_traits : public evaluator_traits_base { }; template::Shape > struct evaluator_assume_aliasing { static const bool value = false; }; // By default, we assume a unary expression: template struct evaluator : public unary_evaluator { typedef unary_evaluator Base; EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {} }; // TODO: Think about const-correctness template struct evaluator : evaluator { EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : evaluator(xpr) {} }; // ---------- base class for all evaluators ---------- template struct evaluator_base : public noncopyable { // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices. typedef traits ExpressionTraits; enum { Alignment = 0 }; }; // -------------------- Matrix and Array -------------------- // // evaluator is a common base class for the // Matrix and Array evaluators. // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense, // so no need for more sophisticated dispatching. template struct evaluator > : evaluator_base { typedef PlainObjectBase PlainObjectType; typedef typename PlainObjectType::Scalar Scalar; typedef typename PlainObjectType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = PlainObjectType::IsRowMajor, IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime, RowsAtCompileTime = PlainObjectType::RowsAtCompileTime, ColsAtCompileTime = PlainObjectType::ColsAtCompileTime, CoeffReadCost = NumTraits::ReadCost, Flags = traits::EvaluatorFlags, Alignment = traits::Alignment }; EIGEN_DEVICE_FUNC evaluator() : m_data(0), m_outerStride(IsVectorAtCompileTime ? 0 : int(IsRowMajor) ? ColsAtCompileTime : RowsAtCompileTime) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m) : m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride()) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { if (IsRowMajor) return m_data[row * m_outerStride.value() + col]; else return m_data[row + col * m_outerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_data[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { if (IsRowMajor) return const_cast(m_data)[row * m_outerStride.value() + col]; else return const_cast(m_data)[row + col * m_outerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return const_cast(m_data)[index]; } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { if (IsRowMajor) return ploadt(m_data + row * m_outerStride.value() + col); else return ploadt(m_data + row + col * m_outerStride.value()); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return ploadt(m_data + index); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { if (IsRowMajor) return pstoret (const_cast(m_data) + row * m_outerStride.value() + col, x); else return pstoret (const_cast(m_data) + row + col * m_outerStride.value(), x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { return pstoret(const_cast(m_data) + index, x); } protected: const Scalar *m_data; // We do not need to know the outer stride for vectors variable_if_dynamic m_outerStride; }; template struct evaluator > : evaluator > > { typedef Matrix XprType; EIGEN_DEVICE_FUNC evaluator() {} EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m) : evaluator >(m) { } }; template struct evaluator > : evaluator > > { typedef Array XprType; EIGEN_DEVICE_FUNC evaluator() {} EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m) : evaluator >(m) { } }; // -------------------- Transpose -------------------- template struct unary_evaluator, IndexBased> : evaluator_base > { typedef Transpose XprType; enum { CoeffReadCost = evaluator::CoeffReadCost, Flags = evaluator::Flags ^ RowMajorBit, Alignment = evaluator::Alignment }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {} typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(col, row); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(col, row); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename XprType::Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet(col, row); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet(index); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { m_argImpl.template writePacket(col, row, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { m_argImpl.template writePacket(index, x); } protected: evaluator m_argImpl; }; // -------------------- CwiseNullaryOp -------------------- // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator. // Likewise, there is not need to more sophisticated dispatching here. template::value, bool has_unary = has_unary_operator::value, bool has_binary = has_binary_operator::value> struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp(i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp(i); } }; template struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp(); } }; template struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp(i,j); } }; // We need the following specialization for vector-only functors assigned to a runtime vector, // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd. // In this case, i==0 and j is used for the actual iteration. template struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { eigen_assert(i==0 || j==0); return op(i+j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { eigen_assert(i==0 || j==0); return op.template packetOp(i+j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp(i); } }; template struct nullary_wrapper {}; #if 0 && EIGEN_COMP_MSVC>0 // Disable this ugly workaround. This is now handled in traits::match, // but this piece of code might still become handly if some other weird compilation // erros pop up again. // MSVC exhibits a weird compilation error when // compiling: // Eigen::MatrixXf A = MatrixXf::Random(3,3); // Ref R = 2.f*A; // and that has_*ary_operator> have not been instantiated yet. // The "problem" is that evaluator<2.f*A> is instantiated by traits::match<2.f*A> // and at that time has_*ary_operator returns true regardless of T. // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>. // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(), // and packet() are really instantiated as implemented below: // This is a simple wrapper around Index to enforce the re-instantiation of // has_*ary_operator when needed. template struct nullary_wrapper_workaround_msvc { nullary_wrapper_workaround_msvc(const T&); operator T()const; }; template struct nullary_wrapper { template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return nullary_wrapper >::value, has_unary_operator >::value, has_binary_operator >::value>().operator()(op,i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return nullary_wrapper >::value, has_unary_operator >::value, has_binary_operator >::value>().operator()(op,i); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return nullary_wrapper >::value, has_unary_operator >::value, has_binary_operator >::value>().template packetOp(op,i,j); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return nullary_wrapper >::value, has_unary_operator >::value, has_binary_operator >::value>().template packetOp(op,i); } }; #endif // MSVC workaround template struct evaluator > : evaluator_base > { typedef CwiseNullaryOp XprType; typedef typename internal::remove_all::type PlainObjectTypeCleaned; enum { CoeffReadCost = internal::functor_traits::Cost, Flags = (evaluator::Flags & ( HereditaryBits | (functor_has_linear_access::ret ? LinearAccessBit : 0) | (functor_traits::PacketAccess ? PacketAccessBit : 0))) | (functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), Alignment = AlignedMax }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n) : m_functor(n.functor()), m_wrapper() { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType row, IndexType col) const { return m_wrapper(m_functor, row, col); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(IndexType index) const { return m_wrapper(m_functor,index); } template EIGEN_STRONG_INLINE PacketType packet(IndexType row, IndexType col) const { return m_wrapper.template packetOp(m_functor, row, col); } template EIGEN_STRONG_INLINE PacketType packet(IndexType index) const { return m_wrapper.template packetOp(m_functor, index); } protected: const NullaryOp m_functor; const internal::nullary_wrapper m_wrapper; }; // -------------------- CwiseUnaryOp -------------------- template struct unary_evaluator, IndexBased > : evaluator_base > { typedef CwiseUnaryOp XprType; enum { CoeffReadCost = evaluator::CoeffReadCost + functor_traits::Cost, Flags = evaluator::Flags & (HereditaryBits | LinearAccessBit | (functor_traits::PacketAccess ? PacketAccessBit : 0)), Alignment = evaluator::Alignment }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_argImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_argImpl.coeff(index)); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_argImpl.template packet(row, col)); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_argImpl.template packet(index)); } protected: const UnaryOp m_functor; evaluator m_argImpl; }; // -------------------- CwiseTernaryOp -------------------- // this is a ternary expression template struct evaluator > : public ternary_evaluator > { typedef CwiseTernaryOp XprType; typedef ternary_evaluator > Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} }; template struct ternary_evaluator, IndexBased, IndexBased> : evaluator_base > { typedef CwiseTernaryOp XprType; enum { CoeffReadCost = evaluator::CoeffReadCost + evaluator::CoeffReadCost + evaluator::CoeffReadCost + functor_traits::Cost, Arg1Flags = evaluator::Flags, Arg2Flags = evaluator::Flags, Arg3Flags = evaluator::Flags, SameType = is_same::value && is_same::value, StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit), Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & ( HereditaryBits | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) & ( (StorageOrdersAgree ? LinearAccessBit : 0) | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit), Alignment = EIGEN_PLAIN_ENUM_MIN( EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, evaluator::Alignment), evaluator::Alignment) }; EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_functor(xpr.functor()), m_arg1Impl(xpr.arg1()), m_arg2Impl(xpr.arg2()), m_arg3Impl(xpr.arg3()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_arg1Impl.coeff(row, col), m_arg2Impl.coeff(row, col), m_arg3Impl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index)); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_arg1Impl.template packet(row, col), m_arg2Impl.template packet(row, col), m_arg3Impl.template packet(row, col)); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_arg1Impl.template packet(index), m_arg2Impl.template packet(index), m_arg3Impl.template packet(index)); } protected: const TernaryOp m_functor; evaluator m_arg1Impl; evaluator m_arg2Impl; evaluator m_arg3Impl; }; // -------------------- CwiseBinaryOp -------------------- // this is a binary expression template struct evaluator > : public binary_evaluator > { typedef CwiseBinaryOp XprType; typedef binary_evaluator > Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {} }; template struct binary_evaluator, IndexBased, IndexBased> : evaluator_base > { typedef CwiseBinaryOp XprType; enum { CoeffReadCost = evaluator::CoeffReadCost + evaluator::CoeffReadCost + functor_traits::Cost, LhsFlags = evaluator::Flags, RhsFlags = evaluator::Flags, SameType = is_same::value, StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit), Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( HereditaryBits | (int(LhsFlags) & int(RhsFlags) & ( (StorageOrdersAgree ? LinearAccessBit : 0) | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) ) ) ), Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment,evaluator::Alignment) }; EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr) : m_functor(xpr.functor()), m_lhsImpl(xpr.lhs()), m_rhsImpl(xpr.rhs()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index)); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_functor.packetOp(m_lhsImpl.template packet(row, col), m_rhsImpl.template packet(row, col)); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_functor.packetOp(m_lhsImpl.template packet(index), m_rhsImpl.template packet(index)); } protected: const BinaryOp m_functor; evaluator m_lhsImpl; evaluator m_rhsImpl; }; // -------------------- CwiseUnaryView -------------------- template struct unary_evaluator, IndexBased> : evaluator_base > { typedef CwiseUnaryView XprType; enum { CoeffReadCost = evaluator::CoeffReadCost + functor_traits::Cost, Flags = (evaluator::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)), Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost... }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_unaryOp(op.functor()), m_argImpl(op.nestedExpression()) { EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits::Cost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_unaryOp(m_argImpl.coeff(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_unaryOp(m_argImpl.coeff(index)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_unaryOp(m_argImpl.coeffRef(row, col)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_unaryOp(m_argImpl.coeffRef(index)); } protected: const UnaryOp m_unaryOp; evaluator m_argImpl; }; // -------------------- Map -------------------- // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ? // but that might complicate template specialization template struct mapbase_evaluator; template struct mapbase_evaluator : evaluator_base { typedef Derived XprType; typedef typename XprType::PointerType PointerType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = XprType::RowsAtCompileTime, ColsAtCompileTime = XprType::ColsAtCompileTime, CoeffReadCost = NumTraits::ReadCost }; EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map) : m_data(const_cast(map.data())), m_innerStride(map.innerStride()), m_outerStride(map.outerStride()) { EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator::Flags&PacketAccessBit, internal::inner_stride_at_compile_time::ret==1), PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_data[index * m_innerStride.value()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_data[col * colStride() + row * rowStride()]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_data[index * m_innerStride.value()]; } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { PointerType ptr = m_data + row * rowStride() + col * colStride(); return internal::ploadt(ptr); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return internal::ploadt(m_data + index * m_innerStride.value()); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { PointerType ptr = m_data + row * rowStride() + col * colStride(); return internal::pstoret(ptr, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { internal::pstoret(m_data + index * m_innerStride.value(), x); } protected: EIGEN_DEVICE_FUNC inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); } EIGEN_DEVICE_FUNC inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); } PointerType m_data; const internal::variable_if_dynamic m_innerStride; const internal::variable_if_dynamic m_outerStride; }; template struct evaluator > : public mapbase_evaluator, PlainObjectType> { typedef Map XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types once we can handle multi-sized packet types typedef typename packet_traits::type PacketScalar; enum { InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 ? int(PlainObjectType::InnerStrideAtCompileTime) : int(StrideType::InnerStrideAtCompileTime), OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 ? int(PlainObjectType::OuterStrideAtCompileTime) : int(StrideType::OuterStrideAtCompileTime), HasNoInnerStride = InnerStrideAtCompileTime == 1, HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, HasNoStride = HasNoInnerStride && HasNoOuterStride, IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit), LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit), Flags = int( evaluator::Flags) & (LinearAccessMask&PacketAccessMask), Alignment = int(MapOptions)&int(AlignedMask) }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map) : mapbase_evaluator(map) { } }; // -------------------- Ref -------------------- template struct evaluator > : public mapbase_evaluator, PlainObjectType> { typedef Ref XprType; enum { Flags = evaluator >::Flags, Alignment = evaluator >::Alignment }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref) : mapbase_evaluator(ref) { } }; // -------------------- Block -------------------- template::ret> struct block_evaluator; template struct evaluator > : block_evaluator { typedef Block XprType; typedef typename XprType::Scalar Scalar; // TODO: should check for smaller packet types once we can handle multi-sized packet types typedef typename packet_traits::type PacketScalar; enum { CoeffReadCost = evaluator::CoeffReadCost, RowsAtCompileTime = traits::RowsAtCompileTime, ColsAtCompileTime = traits::ColsAtCompileTime, MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, MaxColsAtCompileTime = traits::MaxColsAtCompileTime, ArgTypeIsRowMajor = (int(evaluator::Flags)&RowMajorBit) != 0, IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1 : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0 : ArgTypeIsRowMajor, HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor), InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), InnerStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(inner_stride_at_compile_time::ret) : int(outer_stride_at_compile_time::ret), OuterStrideAtCompileTime = HasSameStorageOrderAsArgType ? int(outer_stride_at_compile_time::ret) : int(inner_stride_at_compile_time::ret), MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0, FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator::Flags&LinearAccessBit))) ? LinearAccessBit : 0, FlagsRowMajorBit = XprType::Flags&RowMajorBit, Flags0 = evaluator::Flags & ( (HereditaryBits & ~RowMajorBit) | DirectAccessBit | MaskPacketAccessBit), Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit, PacketAlignment = unpacket_traits::alignment, Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (OuterStrideAtCompileTime!=0) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0, Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, Alignment0) }; typedef block_evaluator block_evaluator_type; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } }; // no direct-access => dispatch to a unary evaluator template struct block_evaluator : unary_evaluator > { typedef Block XprType; EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block) : unary_evaluator(block) {} }; template struct unary_evaluator, IndexBased> : evaluator_base > { typedef Block XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block) : m_argImpl(block.nestedExpression()), m_startRow(block.startRow()), m_startCol(block.startCol()), m_linear_offset(InnerPanel?(XprType::IsRowMajor ? block.startRow()*block.cols() : block.startCol()*block.rows()):0) { } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { RowsAtCompileTime = XprType::RowsAtCompileTime, ForwardLinearAccess = InnerPanel && bool(evaluator::Flags&LinearAccessBit) }; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { if (ForwardLinearAccess) return m_argImpl.coeff(m_linear_offset.value() + index); else return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { if (ForwardLinearAccess) return m_argImpl.coeffRef(m_linear_offset.value() + index); else return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet(m_startRow.value() + row, m_startCol.value() + col); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { if (ForwardLinearAccess) return m_argImpl.template packet(m_linear_offset.value() + index); else return packet(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { return m_argImpl.template writePacket(m_startRow.value() + row, m_startCol.value() + col, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { if (ForwardLinearAccess) return m_argImpl.template writePacket(m_linear_offset.value() + index, x); else return writePacket(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0, x); } protected: evaluator m_argImpl; const variable_if_dynamic m_startRow; const variable_if_dynamic m_startCol; const variable_if_dynamic m_linear_offset; }; // TODO: This evaluator does not actually use the child evaluator; // all action is via the data() as returned by the Block expression. template struct block_evaluator : mapbase_evaluator, typename Block::PlainObject> { typedef Block XprType; typedef typename XprType::Scalar Scalar; EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block) : mapbase_evaluator(block) { // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator::Alignment)) == 0) && "data is not aligned"); } }; // -------------------- Select -------------------- // NOTE shall we introduce a ternary_evaluator? // TODO enable vectorization for Select template struct evaluator > : evaluator_base > { typedef Select XprType; enum { CoeffReadCost = evaluator::CoeffReadCost + EIGEN_PLAIN_ENUM_MAX(evaluator::CoeffReadCost, evaluator::CoeffReadCost), Flags = (unsigned int)evaluator::Flags & evaluator::Flags & HereditaryBits, Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator::Alignment, evaluator::Alignment) }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select) : m_conditionImpl(select.conditionMatrix()), m_thenImpl(select.thenMatrix()), m_elseImpl(select.elseMatrix()) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { if (m_conditionImpl.coeff(row, col)) return m_thenImpl.coeff(row, col); else return m_elseImpl.coeff(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { if (m_conditionImpl.coeff(index)) return m_thenImpl.coeff(index); else return m_elseImpl.coeff(index); } protected: evaluator m_conditionImpl; evaluator m_thenImpl; evaluator m_elseImpl; }; // -------------------- Replicate -------------------- template struct unary_evaluator > : evaluator_base > { typedef Replicate XprType; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor }; typedef typename internal::nested_eval::type ArgTypeNested; typedef typename internal::remove_all::type ArgTypeNestedCleaned; enum { CoeffReadCost = evaluator::CoeffReadCost, LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0, Flags = (evaluator::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits::Flags & RowMajorBit), Alignment = evaluator::Alignment }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate) : m_arg(replicate.nestedExpression()), m_argImpl(m_arg), m_rows(replicate.nestedExpression().rows()), m_cols(replicate.nestedExpression().cols()) {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row % m_rows.value(); const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col % m_cols.value(); return m_argImpl.coeff(actual_row, actual_col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { // try to avoid using modulo; this is a pure optimization strategy const Index actual_index = internal::traits::RowsAtCompileTime==1 ? (ColFactor==1 ? index : index%m_cols.value()) : (RowFactor==1 ? index : index%m_rows.value()); return m_argImpl.coeff(actual_index); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 : RowFactor==1 ? row : row % m_rows.value(); const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 : ColFactor==1 ? col : col % m_cols.value(); return m_argImpl.template packet(actual_row, actual_col); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { const Index actual_index = internal::traits::RowsAtCompileTime==1 ? (ColFactor==1 ? index : index%m_cols.value()) : (RowFactor==1 ? index : index%m_rows.value()); return m_argImpl.template packet(actual_index); } protected: const ArgTypeNested m_arg; evaluator m_argImpl; const variable_if_dynamic m_rows; const variable_if_dynamic m_cols; }; // -------------------- PartialReduxExpr -------------------- template< typename ArgType, typename MemberOp, int Direction> struct evaluator > : evaluator_base > { typedef PartialReduxExpr XprType; typedef typename internal::nested_eval::type ArgTypeNested; typedef typename internal::remove_all::type ArgTypeNestedCleaned; typedef typename ArgType::Scalar InputScalar; typedef typename XprType::Scalar Scalar; enum { TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime) }; typedef typename MemberOp::template Cost CostOpType; enum { CoeffReadCost = TraversalSize==Dynamic ? HugeCost : TraversalSize * evaluator::CoeffReadCost + int(CostOpType::value), Flags = (traits::Flags&RowMajorBit) | (evaluator::Flags&(HereditaryBits&(~RowMajorBit))) | LinearAccessBit, Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr) : m_arg(xpr.nestedExpression()), m_functor(xpr.functor()) { EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value)); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index i, Index j) const { if (Direction==Vertical) return m_functor(m_arg.col(j)); else return m_functor(m_arg.row(i)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index index) const { if (Direction==Vertical) return m_functor(m_arg.col(index)); else return m_functor(m_arg.row(index)); } protected: typename internal::add_const_on_value_type::type m_arg; const MemberOp m_functor; }; // -------------------- MatrixWrapper and ArrayWrapper -------------------- // // evaluator_wrapper_base is a common base class for the // MatrixWrapper and ArrayWrapper evaluators. template struct evaluator_wrapper_base : evaluator_base { typedef typename remove_all::type ArgType; enum { CoeffReadCost = evaluator::CoeffReadCost, Flags = evaluator::Flags, Alignment = evaluator::Alignment }; EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {} typedef typename ArgType::Scalar Scalar; typedef typename ArgType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(row, col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { return m_argImpl.template packet(row, col); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { return m_argImpl.template packet(index); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { m_argImpl.template writePacket(row, col, x); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { m_argImpl.template writePacket(index, x); } protected: evaluator m_argImpl; }; template struct unary_evaluator > : evaluator_wrapper_base > { typedef MatrixWrapper XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper) : evaluator_wrapper_base >(wrapper.nestedExpression()) { } }; template struct unary_evaluator > : evaluator_wrapper_base > { typedef ArrayWrapper XprType; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper) : evaluator_wrapper_base >(wrapper.nestedExpression()) { } }; // -------------------- Reverse -------------------- // defined in Reverse.h: template struct reverse_packet_cond; template struct unary_evaluator > : evaluator_base > { typedef Reverse XprType; typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; enum { IsRowMajor = XprType::IsRowMajor, IsColMajor = !IsRowMajor, ReverseRow = (Direction == Vertical) || (Direction == BothDirections), ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), ReversePacket = (Direction == BothDirections) || ((Direction == Vertical) && IsColMajor) || ((Direction == Horizontal) && IsRowMajor), CoeffReadCost = evaluator::CoeffReadCost, // let's enable LinearAccess only with vectorization because of the product overhead // FIXME enable DirectAccess with negative strides? Flags0 = evaluator::Flags, LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) ) || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1)) ? LinearAccessBit : 0, Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess), Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f. }; EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse) : m_argImpl(reverse.nestedExpression()), m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1), m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const { return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) { return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row, ReverseCol ? m_cols.value() - col - 1 : col); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1); } template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const { enum { PacketSize = unpacket_traits::size, OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 }; typedef internal::reverse_packet_cond reverse_packet; return reverse_packet::run(m_argImpl.template packet( ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col)); } template EIGEN_STRONG_INLINE PacketType packet(Index index) const { enum { PacketSize = unpacket_traits::size }; return preverse(m_argImpl.template packet(m_rows.value() * m_cols.value() - index - PacketSize)); } template EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketType& x) { // FIXME we could factorize some code with packet(i,j) enum { PacketSize = unpacket_traits::size, OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1 }; typedef internal::reverse_packet_cond reverse_packet; m_argImpl.template writePacket( ReverseRow ? m_rows.value() - row - OffsetRow : row, ReverseCol ? m_cols.value() - col - OffsetCol : col, reverse_packet::run(x)); } template EIGEN_STRONG_INLINE void writePacket(Index index, const PacketType& x) { enum { PacketSize = unpacket_traits::size }; m_argImpl.template writePacket (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x)); } protected: evaluator m_argImpl; // If we do not reverse rows, then we do not need to know the number of rows; same for columns // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors. const variable_if_dynamic m_rows; const variable_if_dynamic m_cols; }; // -------------------- Diagonal -------------------- template struct evaluator > : evaluator_base > { typedef Diagonal XprType; enum { CoeffReadCost = evaluator::CoeffReadCost, Flags = (unsigned int)(evaluator::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit, Alignment = 0 }; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal) : m_argImpl(diagonal.nestedExpression()), m_index(diagonal.index()) { } typedef typename XprType::Scalar Scalar; typedef typename XprType::CoeffReturnType CoeffReturnType; EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index) const { return m_argImpl.coeff(row + rowOffset(), row + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { return m_argImpl.coeff(index + rowOffset(), index + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index) { return m_argImpl.coeffRef(row + rowOffset(), row + colOffset()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { return m_argImpl.coeffRef(index + rowOffset(), index + colOffset()); } protected: evaluator m_argImpl; const internal::variable_if_dynamicindex m_index; private: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; } }; //---------------------------------------------------------------------- // deprecated code //---------------------------------------------------------------------- // -------------------- EvalToTemp -------------------- // expression class for evaluating nested expression to a temporary template class EvalToTemp; template struct traits > : public traits { }; template class EvalToTemp : public dense_xpr_base >::type { public: typedef typename dense_xpr_base::type Base; EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp) explicit EvalToTemp(const ArgType& arg) : m_arg(arg) { } const ArgType& arg() const { return m_arg; } Index rows() const { return m_arg.rows(); } Index cols() const { return m_arg.cols(); } private: const ArgType& m_arg; }; template struct evaluator > : public evaluator { typedef EvalToTemp XprType; typedef typename ArgType::PlainObject PlainObject; typedef evaluator Base; EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : m_result(xpr.arg()) { ::new (static_cast(this)) Base(m_result); } // This constructor is used when nesting an EvalTo evaluator in another evaluator EIGEN_DEVICE_FUNC evaluator(const ArgType& arg) : m_result(arg) { ::new (static_cast(this)) Base(m_result); } protected: PlainObject m_result; }; } // namespace internal } // end namespace Eigen #endif // EIGEN_COREEVALUATORS_H