14 namespace Experimental {
17 enum class MemoryLayout : uint8_t {
28 inline std::size_t GetSizeFromShape(
const T &shape)
30 if (shape.size() == 0)
47 inline std::vector<std::size_t> ComputeStridesFromShape(
const T &shape, MemoryLayout layout)
49 const auto size = shape.size();
51 if (layout == MemoryLayout::RowMajor) {
52 for (std::size_t i = 0; i < size; i++) {
54 strides[size - 1 - i] = 1;
56 strides[size - 1 - i] = strides[size - 1 - i + 1] * shape[size - 1 - i + 1];
59 }
else if (layout == MemoryLayout::ColumnMajor) {
60 for (std::size_t i = 0; i < size; i++) {
64 strides[i] = strides[i - 1] * shape[i - 1];
69 ss <<
"Memory layout type is not valid for calculating strides.";
70 throw std::runtime_error(ss.str());
81 inline T ComputeIndicesFromGlobalIndex(
const T& shape, MemoryLayout layout,
const typename T::value_type idx)
83 const auto size = shape.size();
84 auto strides = ComputeStridesFromShape(shape, layout);
87 for (std::size_t i = 0; i < size; i++) {
88 indices[i] = int(r / strides[i]);
98 template <
typename U,
typename V>
99 inline std::size_t ComputeGlobalIndex(
const U& strides,
const V& idx)
101 std::size_t globalIndex = 0;
102 const auto size = idx.size();
103 for (std::size_t i = 0; i < size; i++) {
104 globalIndex += strides[size - 1 - i] * idx[size - 1 - i];
110 template <
class... Ts>
111 struct and_types : std::true_type {
114 template <
class T0,
class... Ts>
115 struct and_types<T0, Ts...> : std::integral_constant<bool, T0{} && and_types<Ts...>{}> {
128 template <
typename T>
129 void RecursiveCopy(T &here, T &there,
130 const std::vector<std::size_t> &mins,
const std::vector<std::size_t> &maxs,
131 std::vector<std::size_t> idx, std::size_t active)
133 const auto size = idx.size();
134 for (std::size_t i = mins[active]; i < maxs[active]; i++) {
136 if (active == size - 1) {
138 for (std::size_t j = 0; j < size; j++) {
139 idxThere[j] -= mins[j];
141 there(idxThere) = here(idx);
143 Internal::RecursiveCopy(here, there, mins, maxs, idx, active + 1);
161 template <
typename V,
typename C = std::vector<V>>
166 using Shape_t = std::vector<std::size_t>;
167 using Index_t = Shape_t;
168 using Slice_t = std::vector<Shape_t>;
169 using Container_t = C;
175 MemoryLayout fLayout;
177 std::shared_ptr<Container_t> fContainer;
180 void ReshapeInplace(
const Shape_t &shape);
189 RTensor(Value_t *data, Shape_t shape, MemoryLayout layout = MemoryLayout::RowMajor)
190 : fShape(shape), fLayout(layout), fData(data), fContainer(NULL)
192 fSize = Internal::GetSizeFromShape(shape);
193 fStrides = Internal::ComputeStridesFromShape(shape, layout);
201 RTensor(Value_t *data, Shape_t shape, Shape_t strides, MemoryLayout layout = MemoryLayout::RowMajor)
202 : fShape(shape), fStrides(strides), fLayout(layout), fData(data), fContainer(NULL)
204 fSize = Internal::GetSizeFromShape(shape);
211 RTensor(std::shared_ptr<Container_t> container, Shape_t shape,
212 MemoryLayout layout = MemoryLayout::RowMajor)
213 : fShape(shape), fLayout(layout), fContainer(container)
215 fSize = Internal::GetSizeFromShape(shape);
216 fStrides = Internal::ComputeStridesFromShape(shape, layout);
217 fData = &(*container->begin());
223 RTensor(Shape_t shape, MemoryLayout layout = MemoryLayout::RowMajor)
224 : fShape(shape), fLayout(layout)
228 fSize = Internal::GetSizeFromShape(shape);
229 fStrides = Internal::ComputeStridesFromShape(shape, layout);
230 fContainer = std::make_shared<Container_t>(fSize);
231 fData = &(*fContainer->begin());
235 Value_t &operator()(
const Index_t &idx);
236 const Value_t &operator() (
const Index_t &idx)
const;
237 template <
typename... Idx> Value_t &operator()(Idx... idx);
238 template <
typename... Idx>
const Value_t &operator() (Idx... idx)
const;
241 std::size_t GetSize()
const {
return fSize; }
242 const Shape_t &GetShape()
const {
return fShape; }
243 const Shape_t &GetStrides()
const {
return fStrides; }
244 Value_t *GetData() {
return fData; }
245 const Value_t *GetData()
const {
return fData; }
246 std::shared_ptr<Container_t> GetContainer() {
return fContainer; }
247 const std::shared_ptr<Container_t> GetContainer()
const {
return fContainer; }
248 MemoryLayout GetMemoryLayout()
const {
return fLayout; }
249 bool IsView()
const {
return fContainer == NULL; }
250 bool IsOwner()
const {
return !IsView(); }
253 RTensor<Value_t, Container_t> Copy(MemoryLayout layout = MemoryLayout::RowMajor);
256 RTensor<Value_t, Container_t> Transpose();
257 RTensor<Value_t, Container_t> Squeeze();
258 RTensor<Value_t, Container_t> ExpandDims(
int idx);
259 RTensor<Value_t, Container_t> Reshape(
const Shape_t &shape);
260 RTensor<Value_t, Container_t> Slice(
const Slice_t &slice);
263 class Iterator :
public std::iterator<std::random_access_iterator_tag, Value_t> {
265 RTensor<Value_t, Container_t>& fTensor;
266 Index_t::value_type fGlobalIndex;
268 using difference_type =
typename std::iterator<std::random_access_iterator_tag, Value_t>::difference_type;
270 Iterator(RTensor<Value_t, Container_t>& x,
typename Index_t::value_type idx) : fTensor(x), fGlobalIndex(idx) {}
271 Iterator& operator++() { fGlobalIndex++;
return *
this; }
272 Iterator operator++(
int) {
auto tmp = *
this; operator++();
return tmp; }
273 Iterator& operator--() { fGlobalIndex--;
return *
this; }
274 Iterator operator--(
int) {
auto tmp = *
this; operator--();
return tmp; }
275 Iterator operator+(difference_type rhs)
const {
return Iterator(fTensor, fGlobalIndex + rhs); }
276 Iterator operator-(difference_type rhs)
const {
return Iterator(fTensor, fGlobalIndex - rhs); }
277 difference_type operator-(
const Iterator& rhs) {
return fGlobalIndex - rhs.GetGlobalIndex(); }
278 Iterator& operator+=(difference_type rhs) { fGlobalIndex += rhs;
return *
this; }
279 Iterator& operator-=(difference_type rhs) { fGlobalIndex -= rhs;
return *
this; }
282 auto idx = Internal::ComputeIndicesFromGlobalIndex(fTensor.GetShape(), fTensor.GetMemoryLayout(), fGlobalIndex);
285 bool operator==(
const Iterator& rhs)
const
287 if (fGlobalIndex == rhs.GetGlobalIndex())
return true;
290 bool operator!=(
const Iterator& rhs)
const {
return !operator==(rhs); };
291 bool operator>(
const Iterator& rhs)
const {
return fGlobalIndex > rhs.GetGlobalIndex(); }
292 bool operator<(
const Iterator& rhs)
const {
return fGlobalIndex < rhs.GetGlobalIndex(); }
293 bool operator>=(
const Iterator& rhs)
const {
return fGlobalIndex >= rhs.GetGlobalIndex(); }
294 bool operator<=(
const Iterator& rhs)
const {
return fGlobalIndex <= rhs.GetGlobalIndex(); }
295 typename Index_t::value_type GetGlobalIndex()
const {
return fGlobalIndex; };
300 Iterator begin() noexcept {
301 return Iterator(*
this, 0);
303 Iterator end() noexcept {
304 return Iterator(*
this, fSize);
311 template <
typename Value_t,
typename Container_t>
312 inline void RTensor<Value_t, Container_t>::ReshapeInplace(
const Shape_t &shape)
314 const auto size = Internal::GetSizeFromShape(shape);
316 std::stringstream ss;
317 ss <<
"Cannot reshape tensor with size " << fSize <<
" into shape { ";
318 for (std::size_t i = 0; i < shape.size(); i++) {
319 if (i != shape.size() - 1) {
320 ss << shape[i] <<
", ";
322 ss << shape[i] <<
" }.";
325 throw std::runtime_error(ss.str());
329 auto strides = Internal::ComputeStridesFromShape(shape, fLayout);
338 template <
typename Value_t,
typename Container_t>
339 inline Value_t &RTensor<Value_t, Container_t>::operator()(
const Index_t &idx)
341 const auto globalIndex = Internal::ComputeGlobalIndex(fStrides, idx);
342 return fData[globalIndex];
348 template <
typename Value_t,
typename Container_t>
349 inline const Value_t &RTensor<Value_t, Container_t>::operator() (
const Index_t &idx)
const
351 const auto globalIndex = Internal::ComputeGlobalIndex(fStrides, idx);
352 return fData[globalIndex];
358 template <
typename Value_t,
typename Container_t>
359 template <
typename... Idx>
360 Value_t &RTensor<Value_t, Container_t>::operator()(Idx... idx)
362 static_assert(Internal::and_types<std::is_convertible<Idx, std::size_t>...>{},
363 "Indices are not convertible to std::size_t.");
364 return operator()({
static_cast<std::size_t
>(idx)...});
370 template <
typename Value_t,
typename Container_t>
371 template <
typename... Idx>
372 const Value_t &RTensor<Value_t, Container_t>::operator() (Idx... idx)
const
374 static_assert(Internal::and_types<std::is_convertible<Idx, std::size_t>...>{},
375 "Indices are not convertible to std::size_t.");
376 return operator()({
static_cast<std::size_t
>(idx)...});
384 template <
typename Value_t,
typename Container_t>
385 inline RTensor<Value_t, Container_t> RTensor<Value_t, Container_t>::Transpose()
388 if (fLayout == MemoryLayout::RowMajor) {
389 fLayout = MemoryLayout::ColumnMajor;
390 }
else if (fLayout == MemoryLayout::ColumnMajor) {
391 fLayout = MemoryLayout::RowMajor;
393 throw std::runtime_error(
"Memory layout is not known.");
397 RTensor<Value_t, Container_t> x(*
this);
400 std::reverse(x.fShape.begin(), x.fShape.end());
403 std::reverse(x.fStrides.begin(), x.fStrides.end());
411 template <
typename Value_t,
typename Container_t>
412 inline RTensor<Value_t, Container_t> RTensor<Value_t, Container_t>::Squeeze()
417 for (std::size_t i = 0; i < fShape.size(); i++) {
418 if (fShape[i] != 1) {
419 shape.emplace_back(fShape[i]);
420 strides.emplace_back(fStrides[i]);
427 if (shape.size() == 0 && fShape.size() != 0) {
428 shape.emplace_back(1);
429 strides.emplace_back(1);
433 RTensor<Value_t, Container_t> x(*
this);
435 x.fStrides = strides;
443 template <
typename Value_t,
typename Container_t>
444 inline RTensor<Value_t, Container_t> RTensor<Value_t, Container_t>::ExpandDims(
int idx)
447 const int len = fShape.size();
449 auto strides = fStrides;
451 if (len + idx + 1 < 0) {
452 throw std::runtime_error(
"Given negative index is invalid.");
454 shape.insert(shape.end() + 1 + idx, 1);
455 strides.insert(strides.begin() + 1 + idx, 1);
458 throw std::runtime_error(
"Given index is invalid.");
460 shape.insert(shape.begin() + idx, 1);
461 strides.insert(strides.begin() + idx, 1);
465 RTensor<Value_t, Container_t> x(*
this);
467 x.fStrides = strides;
475 template <
typename Value_t,
typename Container_t>
476 inline RTensor<Value_t, Container_t> RTensor<Value_t, Container_t>::Reshape(
const Shape_t &shape)
479 RTensor<Value_t, Container_t> x(*
this);
480 x.ReshapeInplace(shape);
488 template <
typename Value_t,
typename Container_t>
489 inline RTensor<Value_t, Container_t> RTensor<Value_t, Container_t>::Slice(
const Slice_t &slice)
492 const auto sliceSize = slice.size();
493 const auto shapeSize = fShape.size();
494 if (sliceSize != shapeSize) {
495 std::stringstream ss;
496 ss <<
"Size of slice (" << sliceSize <<
") is unequal number of dimensions (" << shapeSize <<
").";
497 throw std::runtime_error(ss.str());
511 Shape_t shape(sliceSize);
512 for (std::size_t i = 0; i < sliceSize; i++) {
513 shape[i] = slice[i][1] - slice[i][0];
515 auto size = Internal::GetSizeFromShape(shape);
519 Shape_t idx(sliceSize);
520 for (std::size_t i = 0; i < sliceSize; i++) {
521 idx[i] = slice[i][0];
523 data = &operator()(idx);
526 RTensor<Value_t, Container_t> x(*
this);
541 template <
typename Value_t,
typename Container_t>
542 inline RTensor<Value_t, Container_t> RTensor<Value_t, Container_t>::Copy(MemoryLayout layout)
545 RTensor<Value_t, Container_t> r(fShape, layout);
548 const auto mins = Shape_t(fShape.size());
549 const auto maxs = fShape;
551 Internal::RecursiveCopy(*
this, r, mins, maxs, idx, 0);
560 template <
typename T>
561 std::ostream &operator<<(std::ostream &os, RTensor<T> &x)
563 const auto shapeSize = x.GetShape().size();
564 if (shapeSize == 1) {
566 const auto size = x.GetSize();
567 for (std::size_t i = 0; i < size; i++) {
573 }
else if (shapeSize == 2) {
575 const auto shape = x.GetShape();
576 for (std::size_t i = 0; i < shape[0]; i++) {
578 for (std::size_t j = 0; j < shape[1]; j++) {
580 if (j < shape[1] - 1) {
590 os <<
"{ printing not yet implemented for this rank }";
599 template <
typename T>
600 std::string printValue(TMVA::Experimental::RTensor<T> *x)
602 std::stringstream ss;
608 #endif // TMVA_RTENSOR