// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/** \internal * A general matrix-matrix product kernel optimized for the SparseLU factorization. * - A, B, and C must be column major * - lda and ldc must be multiples of the respective packet size * - C must have the same alignment as A
*/ template<typename Scalar>
EIGEN_DONT_INLINE void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const Scalar* B, Index ldb, Scalar* C, Index ldc)
{ usingnamespace Eigen::internal;
typedeftypename packet_traits<Scalar>::type Packet; enum {
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
PacketSize = packet_traits<Scalar>::size,
PM = 8, // peeling in M
RN = 2, // register blocking
RK = NumberOfRegisters>=16 ? 4 : 2, // register blocking
BM = 4096/sizeof(Scalar), // number of rows of A-C per chunk
SM = PM*PacketSize // step along M
};
Index d_end = (d/RK)*RK; // number of columns of A (rows of B) suitable for full register blocking
Index n_end = (n/RN)*RN; // number of columns of B-C suitable for processing RN columns at once
Index i0 = internal::first_default_aligned(A,m);
// handle the non aligned rows of A and C without any optimization: for(Index i=0; i<i0; ++i)
{ for(Index j=0; j<n; ++j)
{
Scalar c = C[i+j*ldc]; for(Index k=0; k<d; ++k)
c += B[k+j*ldb] * A[i+k*lda];
C[i+j*ldc] = c;
}
} // process the remaining rows per chunk of BM rows for(Index ib=i0; ib<m; ib+=BM)
{
Index actual_b = std::min<Index>(BM, m-ib); // actual number of rows
Index actual_b_end1 = (actual_b/SM)*SM; // actual number of rows suitable for peeling
Index actual_b_end2 = (actual_b/PacketSize)*PacketSize; // actual number of rows suitable for vectorization
// Let's process two columns of B-C at once for(Index j=0; j<n_end; j+=RN)
{ const Scalar* Bc0 = B+(j+0)*ldb; const Scalar* Bc1 = B+(j+1)*ldb;
// process rows of A' - C' with aggressive vectorization and peeling for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL1");
prefetch((A0+i+(5)*PacketSize));
prefetch((A1+i+(5)*PacketSize)); if(RK==4) prefetch((A2+i+(5)*PacketSize)); if(RK==4) prefetch((A3+i+(5)*PacketSize));
WORK(0);
WORK(1);
WORK(2);
WORK(3);
WORK(4);
WORK(5);
WORK(6);
WORK(7);
} // process the remaining rows with vectorization only for(Index i=actual_b_end1; i<actual_b_end2; i+=PacketSize)
{
WORK(0);
} #undef WORK // process the remaining rows without vectorization for(Index i=actual_b_end2; i<actual_b; ++i)
{ if(RK==4)
{
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1]+A2[i]*Bc0[2]+A3[i]*Bc0[3];
C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1]+A2[i]*Bc1[2]+A3[i]*Bc1[3];
} else
{
C0[i] += A0[i]*Bc0[0]+A1[i]*Bc0[1];
C1[i] += A0[i]*Bc1[0]+A1[i]*Bc1[1];
}
}
Bc0 += RK;
Bc1 += RK;
} // peeled loop on k
} // peeled loop on the columns j // process the last column (we now perform a matrix-vector product) if((n-n_end)>0)
{ const Scalar* Bc0 = B+(n-1)*ldb;
for(Index k=0; k<d_end; k+=RK)
{
// load and expand a 1 x RK block of B
Packet b00, b10, b20, b30;
b00 = pset1<Packet>(Bc0[0]);
b10 = pset1<Packet>(Bc0[1]); if(RK==4) b20 = pset1<Packet>(Bc0[2]); if(RK==4) b30 = pset1<Packet>(Bc0[3]);
// process the last columns of A, corresponding to the last rows of B
Index rd = d-d_end; if(rd>0)
{ for(Index j=0; j<n; ++j)
{ enum {
Alignment = PacketSize>1 ? Aligned : 0
}; typedef Map<Matrix<Scalar,Dynamic,1>, Alignment > MapVector; typedef Map<const Matrix<Scalar,Dynamic,1>, Alignment > ConstMapVector; if(rd==1) MapVector(C+j*ldc+ib,actual_b) += B[0+d_end+j*ldb] * ConstMapVector(A+(d_end+0)*lda+ib, actual_b);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.