// TODO(bsteiner): also templatize on the input type since we have users // for int8 as well as floats. template <typename Device, typename T> class BenchmarkSuite { public:
BenchmarkSuite(const Device& device, size_t m, size_t k, size_t n)
: m_(m), k_(k), n_(n), device_(device) {
initialize();
}
Eigen::array<int, 2> shuffle;
shuffle[0] = 1;
shuffle[1] = 0; #ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
B.device(device_) = A.shuffle(shuffle);
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
B.device(device_) = A.shuffle(shuffle);
} // Record the number of values shuffled from A and copied to B each second
finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
}
#ifdefined(EIGEN_HAS_INDEX_LIST)
Eigen::IndexPairList<Eigen::type2indexpair<0, 0>,
Eigen::type2indexpair<2, 1> > paddings; #else
Eigen::array<Eigen::IndexPair<TensorIndex>, 2> paddings;
paddings[0] = Eigen::IndexPair<TensorIndex>(0, 0);
paddings[1] = Eigen::IndexPair<TensorIndex>(2, 1); #endif #ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
B.device(device_) = A.pad(paddings);
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
B.device(device_) = A.pad(paddings);
} // Record the number of values copied from the padded tensor A each second
finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
}
#ifndef EIGEN_HAS_INDEX_LIST
Eigen::array<TensorIndex, 2> strides;
strides[0] = 1;
strides[1] = 2; #else // Take advantage of cxx11 to give the compiler information it can use to // optimize the code.
Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2> > strides; #endif
#ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
B.device(device_) = A.stride(strides);
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
B.device(device_) = A.stride(strides);
} // Record the number of values copied from the padded tensor A each second
finalizeBenchmark(static_cast<int64_t>(m_) * k_ * num_iters);
}
#ifndef EIGEN_HAS_INDEX_LIST
Eigen::array<int, 2> broadcast;
broadcast[0] = 1;
broadcast[1] = n_; #else // Take advantage of cxx11 to give the compiler information it can use to // optimize the code.
Eigen::IndexList<Eigen::type2index<1>, int> broadcast;
broadcast.set(1, n_); #endif
#ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
C.device(device_) = A.broadcast(broadcast);
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A.broadcast(broadcast);
} // Record the number of values broadcasted from A and copied to C each second
finalizeBenchmark(static_cast<int64_t>(m_) * n_ * num_iters);
}
void coeffWiseOp(int num_iters) {
eigen_assert(m_ == k_ && k_ == n_);
Eigen::array<TensorIndex, 2> sizes;
sizes[0] = m_;
sizes[1] = m_; const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes); const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes); #ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A * A.constant(static_cast<T>(3.14)) + B * B.constant(static_cast<T>(2.7));
} // Record the number of FLOP executed per second (2 multiplications and // 1 addition per value)
finalizeBenchmark(static_cast<int64_t>(3) * m_ * m_ * num_iters);
}
#ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A.rsqrt() + B.sqrt() * B.square();
} // Record the number of FLOP executed per second (assuming one operation // per value)
finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
}
void transcendentalFunc(int num_iters) {
eigen_assert(m_ == k_ && k_ == n_);
Eigen::array<TensorIndex, 2> sizes;
sizes[0] = m_;
sizes[1] = m_; const TensorMap<Tensor<T, 2>, Eigen::Aligned> A(a_, sizes); const TensorMap<Tensor<T, 2>, Eigen::Aligned> B(b_, sizes);
TensorMap<Tensor<T, 2>, Eigen::Aligned> C(c_, sizes); #ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
C.device(device_) = A.exp() + B.log();
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = A.exp() + B.log();
} // Record the number of FLOP executed per second (assuming one operation // per value)
finalizeBenchmark(static_cast<int64_t>(m_) * m_ * num_iters);
}
#ifndef EIGEN_HAS_INDEX_LIST
Eigen::array<TensorIndex, 1> sum_along_dim;
sum_along_dim[0] = 0; #else // Take advantage of cxx11 to give the compiler information it can use to // optimize the code.
Eigen::IndexList<Eigen::type2index<0>> sum_along_dim; #endif #ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
C.device(device_) = B.sum(sum_along_dim);
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = B.sum(sum_along_dim);
} // Record the number of FLOP executed per second (assuming one operation // per value)
finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
}
#ifndef EIGEN_HAS_INDEX_LIST
Eigen::array<TensorIndex, 1> sum_along_dim;
sum_along_dim[0] = 1; #else // Take advantage of cxx11 to give the compiler information it can use to // optimize the code.
Eigen::IndexList<Eigen::type2index<1>> sum_along_dim; #endif #ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
A.device(device_) = B.sum(sum_along_dim);
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
A.device(device_) = B.sum(sum_along_dim);
} // Record the number of FLOP executed per second (assuming one operation // per value)
finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
}
// Full reduction void fullReduction(int num_iters) {
Eigen::array<TensorIndex, 2> input_size;
input_size[0] = k_;
input_size[1] = n_; const TensorMap<Tensor<T, 2, 0, TensorIndex>, Eigen::Aligned> B(
b_, input_size);
Eigen::array<TensorIndex, 0> output_size;
TensorMap<Tensor<T, 0, 0, TensorIndex>, Eigen::Aligned> C(
c_, output_size); #ifdef EIGEN_USE_SYCL // warmup for sycl for (int iter = 0; iter < 10; ++iter) {
C.device(device_) = B.sum();
} #endif
StartBenchmarkTiming(); for (int iter = 0; iter < num_iters; ++iter) {
C.device(device_) = B.sum();
} // Record the number of FLOP executed per second (assuming one operation // per value)
finalizeBenchmark(static_cast<int64_t>(k_) * n_ * num_iters);
}
// do a contraction which is equivalent to a matrix multiplication void contraction(int num_iters) {
contraction<static_cast<int>(Eigen::ColMajor)>(num_iters, false, false);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.