About Documentation Tutorial

Documentation flint.hpp

Tensor<T, n>

Overview

Types and Functions
 • template <typename T, unsigned int n> struct Tensor
  • Tensor() : node(nullptr)
  • Tensor(init_type data)
  • Tensor(storage_type data)
  • Tensor(FGraphNode *node, std::array shape) : node(node), shape(shape)
  • Tensor(FGraphNode *node) : node(node)
  • Tensor(const Tensor &other)
  • Tensor<T, n> &operator=(const Tensor &other)
  • Tensor(Tensor &&other)
  • std::array<size_t, n> get_shape() const
  • Tensor<T, n> &operator=(Tensor &&other)
  • ~Tensor()
  • void inverse_broadcasting()
  • void disable_inverse_broadcasting()
  • storage_type operator*()
  • std::vector<char> serialize()
  • static Tensor<T, n> deserialize(char *data, size_t *bytes_read = nullptr)
  • static Tensor<T, n> deserialize(std::vector data)
  • void execute()
  • void execute_cpu()
  • void execute_gpu()
  • Tensor<T, n> operator()()
  • Tensor<T, n> operator-() const
  • Tensor<int, n> sign() const
  • Tensor<int, n> even() const
  • TensorView<T, n - 1> operator[](const size_t index)
  • operator std::string()
  • friend std::ofstream &operator<<(std::ofstream &os, Tensor t)
  • static Tensor<T, n> read_from(std::ifstream &is)
  • friend std::ostream &operator<<(std::ostream &os, Tensor t)
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, k >= n ? k : n> operator+(const Tensor &other) const
  • template <typename K> Tensor<stronger_return<K>, n> operator+(const K other) const
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, k >= n ? k : n> operator-(const Tensor &other) const
  • template <typename K> Tensor<stronger_return<K>, n> operator-(const K other) const
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, k >= n ? k : n> operator*(const Tensor &other) const
  • template <typename K> Tensor<stronger_return<K>, n> operator*(const K other) const
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, k >= n ? k : n> operator/(const Tensor &other) const
  • template <typename K> Tensor<stronger_return<K>, n> operator/(const K other) const
  • Tensor<T, 1> flattened() const
  • Tensor<T, n - 1> flattened(const int dimension) const
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, k >= n ? k : n> pow(const Tensor &other)
  • template <typename K> Tensor<stronger_return<K>, n> pow(const K other)
  • Tensor<to_float<T>, n> log()
  • Tensor<to_float<T>, n> log2()
  • Tensor<to_float<T>, n> log10()
  • Tensor<to_float<T>, n> sqrt()
  • Tensor<to_float<T>, n> exp()
  • Tensor<to_float<T>, n> sin()
  • Tensor<to_float<T>, n> cos()
  • Tensor<to_float<T>, n> tan()
  • Tensor<to_float<T>, n> asin()
  • Tensor<to_float<T>, n> acos()
  • Tensor<to_float<T>, n> atan()
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, k >= n ? k : n> matmul(Tensor &other)
  • template <typename K> Tensor<K, n> convert() const
  • template <typename... args> Tensor<T, sizeof...(args)> reshape(args... shape)
  • template <size_t k> Tensor<T, k> reshape_array(std::array new_shape)
  • Tensor<T, n + 1> expand(int ax = n, int ax_size = 0)
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, k >= n ? k : n> min(const Tensor &other) const
  • template <typename K> Tensor<stronger_return<K>, n> min(const K other) const
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, k >= n ? k : n> max(const Tensor &other) const
  • template <typename K> Tensor<stronger_return<K>, n> max(const K other) const
  • template <typename K, unsigned int k> Tensor<int, k >= n ? k : n> operator<(const Tensor &other) const
  • template <typename K> Tensor<int, n> operator<(const K other) const
  • template <typename K, unsigned int k> Tensor<int, k >= n ? k : n> operator>(const Tensor &other) const
  • template <typename K> Tensor<int, n> operator>(const K other) const
  • template <typename K, unsigned int k> Tensor<int, k >= n ? k : n> equal(const Tensor &other) const
  • template <typename K> Tensor<int, n> equal(const K other) const
  • Tensor<T, n - 1> reduce_sum(int dimension)
  • Tensor<T, n - 1> reduce_mul(int dimension)
  • Tensor<T, 1> reduce_mul()
  • Tensor<T, 1> reduce_sum()
  • Tensor<T, n - 1> reduce_min(int dimension)
  • Tensor<T, 1> reduce_min()
  • Tensor<T, n - 1> reduce_max(int dimension)
  • Tensor<T, 1> reduce_max()
  • Tensor<T, n> abs() const
  • template <size_t k> Tensor<T, n> slice_array(std::array ranges) const
  • template <typename... args> Tensor<T, n> slice(const args... dim_ranges) const
  • Tensor<T, n> extend(std::array new_shape, std::array indices)
  • Tensor<T, n> extend(std::array new_shape, std::array indices, std::array steps)
  • Tensor<T, n> repeat_array(std::array repetitions) const
  • template <typename... args> Tensor<T, n> repeat(const args... repetitions) const
  • Tensor<T, n> transpose(std::initializer_list transposition =
  • Tensor<T, n> transpose_array(std::array transposition)
  • template <typename K, unsigned int k> Tensor<stronger_return<K>, n == k ? n - 1 : n> convolve_array(const Tensor &kernel, const std::array steps) const
  • template <typename K, unsigned int k, typename... args> Tensor<stronger_return<K>, k == n ? n - 1 : n> convolve(const Tensor &kernel, const args... steps) const
  • template <typename K, unsigned int k> Tensor<T, n> index(const Tensor &indices) const
  • template <typename K, unsigned int k> Tensor<T, n> index_set(const Tensor &b, const Tensor &indices) const
  • Tensor<T, n + 1> sliding_window(std::array window_size, std::array step_size =
  • Tensor<T, n - 1> unslide_window(std::array result_size, std::array step_size =
  • Tensor<T, n - 1> pooling_max(std::array window_size, std::array step_size =
  • Tensor<T, n - 1> pooling_sum(std::array window_size, std::array step_size =
  • Tensor<T, n> permutate(unsigned int ax) const
  • Tensor<T, n> dropout(double p) const
  • FGraphNode *get_graph_node() const
  • void set_graph_node(FGraphNode *node)
  • template <typename K, unsigned int k> Tensor<to_float<stronger_return<K>>, k> gradient(const Tensor &dx) const
  • void watch()
  • void unwatch()

The use of Tensors alone isn't enough to correctly use Flint. Especially cleaning up the library when you are finished is important to allow the backends to deallocate resources and joining Threads. The function
Flint::cleanup()
automatically cleans up all initialized backends, or you can construct a instance of
FlintContext
.
template <typename T, unsigned int n> struct Tensor 

The multi dimensional implementation of
Tensor
.
Tensor() : node(nullptr) 

Uninitialized Tensor
Tensor(init_type data) 

Creates a Tensor from a
n
-times nested
std::initializer_list
(
init_type
is a recursive defined type definition). E.g.
 Tensor<float, 2> t1{{-1., 0.}, {1., 2.}};
 Tensor<float, 3> t2 = {{{0, 1}, {1, 2}}, {{3, 4}, {5, 6}}};
Tensor(storage_type data) 

Creates a Tensor from a
n
-times nested
std::vector
(
storage_type
is a recursive defined type definition). E.g.
 std::vector<std::vector<float>> s1 = {{-1., 0.}, {1., 2.}};
 Tensor<float, 2> t1(s1);
Tensor(FGraphNode *node, std::array shape)
			: node(node), shape(shape) 

Constructs a Tensor directly from a
FGraphNode
and a shape
Tensor(FGraphNode *node) : node(node) 

Constructs a Tensor directly from a
FGraphNode
Tensor(const Tensor &other) 

Copy constructor. Copies the underlying Graph structure by creating a new node with the same operation, shape and data types. The new predecessor array points to the same predecessors (memory safety is ensured with reference counting).
If
other
has result data or if it is a storage node, the complete CPU data is directly copied. Since this operation is expensive it is advised to only use it if it is completly necessary.
Tensor<T, n> &operator=(const Tensor &other) 

Copy operator. Copies the underlying Graph structure by creating a new node with the same operation, shape and data types. If there was any previous allocated operation node allocated by this Tensor it is cleaned up. The new predecessor array points to the same predecessors (memory safety is ensured with reference counting).
If
other
has result data or if it is a storage node, the complete CPU data is directly copied. Since this operation is expensive it is advised to only use it if it is completly necessary.
Tensor(Tensor &&other) 

Move constructor. Moves every important field from
other
to this Tensor.
other
is invalidated after this operation.
std::array<size_t, n> get_shape() const 

Returns the shape of this Tensor as a array with
n
entries. Each entry describes the size of the corresponding dimension. E.g.
 Tensor<float, 2> t1{{-1., 0.}, {1., 2.}};
 std::array<double, 2> shape1 = t1.get_shape();
 // shape1 = {2, 2}
Tensor<T, n> &operator=(Tensor &&other) 

Move operator. Moves every important field from
other
to this Tensor.
other
is invalidated after this operation. If there was any previous allocated operation node allocated by this Tensor it is cleaned up.
~Tensor() 

Cleans up this tensor and frees all underlying data by reference counting.
void inverse_broadcasting() 

Sometimes there are combinations of nodes where both normal and inverse broadcasting is possible, but yields different results, e.g. multiplication for two nodes with shapes
[3, 5, 3, 5]
and
[3, 5]
. The framework chooses normal broadcasting over inverse if both are possible, this function allows you to alter this behaviour and mark a node to be inversely broadcasted. After the call to this function the given node will from then on only inversely broadcasted (in cases where only normal broadcasting is available an error will occur!). It has no effect in operations that don't use broadcasting. You can "unmark" the node with
disable_inverse_broadcasting
.
void disable_inverse_broadcasting() 

Undoes
inverse_broadcasting
storage_type operator*() 

Retrieves the data of the current node and converts it into a multidimensional vector. Executes the node if necessary (if it was not executed prior). This operation has to duplicate the complete data. Since that is a memory heavy and slow operation, it is recommended to use the index operator
operator[]
whenever possible instead. E.g.
 Tensor<int, 3> foo = Tensor<int, 3>::constant(42, 2, 2, 1);
 std::vector<std::vector<std::vector<int>>> foo_res = *foo;
 // foo_res = {{{42}, {42}}, {{42}, {42}}}
std::vector<char> serialize() 

Serializes the underlying data of the Tensor to a binary vector. If the Tensor has no Result Data it is executed.
static Tensor<T, n> deserialize(char *data,
										size_t *bytes_read = nullptr) 

Deserializes the binary representation of Tensor data back to a Tensor object. The number of bytes read is stored in
bytes_read
.
static Tensor<T, n> deserialize(std::vector data) 

Deserializes the binary representation of Tensor data back to a Tensor object.
void execute() 

Executes the underlying operation (and lazily the operations of the parents if needed) if it was not already executed prior (in that case the operation does nothing). If Flint was initiallized implicitly (without ever calling
flintInit
) or with
FLINT_BACKEND_BOTH
the backend is chosen automatically by heuristics and initialized if it was not prior.
void execute_cpu() 

Executes the underlying operation (and lazily the operations of the parents if needed) if it was not already executed prior (in that case the operation does nothing). Uses the CPU backend and initializes it if it was not initialized.
void execute_gpu() 

Executes the underlying operation (and lazily the operations of the parents if needed) if it was not already executed prior (in that case the operation does nothing). Uses the CPU backend and initializes it if it was not initialized.
Tensor<T, n> operator()() 

Convenience Method that calls
execute
and returns a lightweight copy of the Tensor
  Tensor<float, 2> t = ...;
  std::cout << t() << std::endl;
Tensor<T, n> operator-() const 

Negates the elements of this Tensor. E.g.
 Tensor<float, 2> foo = {{-3, 3.141592}, {42.0798, -4.3}};
 std::cout << (-foo)() << std::endl;
 // Tensor<FLOAT32, shape: [2, 2]>(
 //  [[3.000000, -3.141592],
 //   [-42.079800, 4.300000]])
Tensor<int, n> sign() const 

Returns a tensor
x
with the shape of a with
x[i] = 1
if
a[i] >=
 0
else
x[i] = -1
. If you need to distinguish additionally for 0 values, take a look at
equal
. E.g.
 Tensor<float, 2> foo = {{-3, 3.141592}, {42.0798, -4.3}};
 std::cout << (foo.sign())() << std::endl;
 // Tensor<INT32, shape: [2, 2]>(
 //  [[-1, 1],
 //   [1, -1]])
Tensor<int, n> even() const 

Returns a int tensor
x
with the shape of
this
with
x[i] = 1
if
this[i] % 2 = 0
else
x[i] = 0
. This Tensor needs to have a integer type. E.g.
 Tensor<int, 2> foo = {{2, 3}, {42, 7}};
 std::cout << (foo.even())() << std::endl;
 // Tensor<INT32, shape: [2, 2]>(
 //  [[1, 0],
 //   [1, 0]])
TensorView<T, n - 1> operator[](const size_t index) 

Indexes the Tensor in its last dimension by
index
. The returned type
TensorView
stores the underlying data of this Tensor and the given index. It is only valid and functional as long as the underlying data of this Tensor is not destructed (i.e. as long as this object is alive or as it is attached as the parent of another Tensor). If the underlying data is not yet computed, executes this Tensor. E.g.
 Tensor<int, 3> foo{{{0,1}, {2,3}}, {{4,5}, {6,7}}};
 TensorView<int, 2> bar = foo[1];
 TensorView<int, 1> baz = bar[1];
 std::cout << baz[0] << std::endl; // 6
 std::cout << foo[0][1][1] << std::endl; // 3
operator std::string() 

Converts this Tensor to a string representation. If the Tensor was not yet executed, it won't be, instead of the data it will say "<not yet executed>".
friend std::ofstream &operator<<(std::ofstream &os, Tensor t) 

Calls
serialize
on this Tensor and pipes the returned data to the stream.
static Tensor<T, n> read_from(std::ifstream &is) 

Reads from a input stream one Tensor representation. The input should have been created with
serialize
or the pipe operator.
friend std::ostream &operator<<(std::ostream &os, Tensor t) 

Calls
std::string()
on this Tensor and pipes the returned string to the pipe.
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, k >= n ? k : n>
		operator+(const Tensor &other) const 

Elementwise addition of this Tensor and
other
. If the dimensions differ the smaller Tensor is broadcasted along the first dimensions which are not shared of the larger one. The datatype of the result is the datatype with higher precedence. E.g.
 Tensor<int, 3> a{{{0,1}, {2,3}}, {{4,5}, {6,7}}};
 Tensor<float, 2> b{{4,2},{0.5f,1}};
 std::cout << (a + b)() << std::endl;
 // Tensor<FLOAT32, shape: [2, 2, 2]>(
 // [[[4.000000, 3.000000],
 //   [2.500000, 4.000000]],
 //  [[8.000000, 7.000000],
 //   [6.500000, 8.000000]]])
template <typename K>
		Tensor<stronger_return<K>, n> operator+(const K other) const 

Elementwise addition of the constant
other
to this Tensor. If the datatype of
K
is stronger (stronger precedence) than the datatype of this Tensor
T
,
K
will be the result type, else
T
.
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, k >= n ? k : n>
		operator-(const Tensor &other) const 

Elementwise subtraction of this Tensor and
other
. If the dimensions differ the smaller Tensor is broadcasted along the first dimensions which are not shared of the larger one. The datatype of the result is the datatype with higher precedence. E.g.
 Tensor<int, 3> a{{{0,1}, {2,3}}, {{4,5}, {6,7}}};
 Tensor<float, 2> b{{4,2},{0.5f,1}};
 std::cout << (a - b)() << std::endl;
 // Tensor<FLOAT32, shape: [2, 2, 2]>(
 // [[[-4.000000, -1.000000],
 //   [1.500000, 2.000000]],
 //  [[0.000000, 3.000000],
 //   [5.500000, 6.000000]]])
template <typename K>
		Tensor<stronger_return<K>, n> operator-(const K other) const 

Elementwise substraction of the constant
other
from this Tensor. If the datatype of
K
is stronger (stronger precedence) than the datatype of this Tensor
T
,
K
will be the result type, else
T
.
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, k >= n ? k : n>
		operator*(const Tensor &other) const 

Elementwise multiplication of this Tensor and
other
. If the dimensions differ the smaller Tensor is broadcasted along the first dimensions which are not shared of the larger one. The datatype of the result is the datatype with higher precedence. E.g.
 Tensor<int, 3> a{{{0,1}, {2,3}}, {{4,5}, {6,7}}};
 Tensor<float, 2> b{{4,2},{0.5f,1}};
 std::cout << (a * b)() << std::endl;
 // Tensor<FLOAT32, shape: [2, 2, 2]>(
 // [[[0.000000, 2.000000],
 //   [1.000000, 3.000000]],
 //  [[16.000000, 10.000000],
 //   [3.000000, 7.000000]]])
template <typename K>
		Tensor<stronger_return<K>, n> operator*(const K other) const 

Elementwise multiplication of the constant
other
from this Tensor. If the datatype of
K
is stronger (stronger precedence) than the datatype of this Tensor
T
,
K
will be the result type, else
T
.
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, k >= n ? k : n>
		operator/(const Tensor &other) const 

Elementwise division of this Tensor and
other
. If the dimensions differ the smaller Tensor is broadcasted along the first dimensions which are not shared of the larger one. The datatype of the result is the datatype with higher precedence. E.g.
 Tensor<int, 3> a{{{0,1}, {2,3}}, {{4,5}, {6,7}}};
 Tensor<float, 2> b{{4,2},{0.5f,1}};
 std::cout << (a / b)() << std::endl;
 // Tensor<FLOAT32, shape: [2, 2, 2]>(
 // [[[0.000000, 0.500000],
 //   [4.000000, 3.000000]],
 //  [[1.000000, 2.500000],
 //   [12.000000, 7.000000]]])
template <typename K>
		Tensor<stronger_return<K>, n> operator/(const K other) const 

Elementwise division of the constant
other
from this Tensor. If the datatype of
K
is stronger (stronger precedence) than the datatype of this Tensor
T
,
K
will be the result type, else
T
.
Tensor<T, 1> flattened() const 

Flattens the complete tensor to a tensor with one dimension. E.g.
 Tensor<long, 3> a = {{{3, 1, 4}, {2, 1, 5}}, {{0, 4, 2}, {4, 7, 9}}};
 std::cout << (a.flattened())() << std::endl;
 // Tensor<INT64, shape: 12>([3, 1, 4, 2, 1, 5, 0, 4, 2, 4, 7, 9])
Tensor<T, n - 1> flattened(const int dimension) const 

Flattens this tensor with
n
dimensions along
dimension
, resulting in a tensor with
n-1
dimensions. Flattening a dimension will remove it from the shape of the tensor. The data stays the same, you can imagine the elements along the flattened dimension to be appended to each other. E.g.
 Tensor<long, 3> a = {{{3, 1, 4}, {2, 1, 5}}, {{0, 4, 2}, {4, 7, 9}}};
 std::cout << (a.flattened(1))() << std::endl;
 // Tensor<INT64, shape: [4, 3]>(
 // [[3, 1, 4],
 //  [2, 1, 5],
 //  [0, 4, 2],
 //  [4, 7, 9]])
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, k >= n ? k : n>
		pow(const Tensor &other) 

Elementwise power of this Tensor to
other
. If the dimensions differ the smaller Tensor is broadcasted along the first dimensions which are not shared of the larger one. The datatype of the result is the datatype with higher precedence. E.g.
 Tensor<int, 3> a{{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}};
 Tensor<double, 2> b{{4, 2}, {0.5f, 1}};
 std::cout << (a.pow(b))() << std::endl;
 // Tensor<FLOAT64, shape: [2, 2, 2]>(
 // [[[0.000000, 1.000000],
 //   [1.414214, 3.000000]],
 //  [[256.000000, 25.000000],
 //   [2.449490, 7.000000]]])
template <typename K> Tensor<stronger_return<K>, n> pow(const K other) 

Elementwise power of this tensor to the constant
other
. If the datatype of
K
is stronger (stronger precedence) than the datatype of this Tensor
T
,
K
will be the result type, else
T
.
Tensor<to_float<T>, n> log() 

Takes the elementwise natural logarithm of this Tensor.
Tensor<to_float<T>, n> log2() 

Takes the elementwise logarithm dualis of this Tensor.
Tensor<to_float<T>, n> log10() 

Takes the elementwise logarithm to basis 10 of this Tensor.
Tensor<to_float<T>, n> sqrt() 

Takes the elementwise square root of this Tensor.
Tensor<to_float<T>, n> exp() 

Takes the elementwise exponent of this Tensor (power of the constant
e
to this Tensor).
Tensor<to_float<T>, n> sin() 

Takes the elementwise sinus of this Tensor.
Tensor<to_float<T>, n> cos() 

Takes the elementwise cosinus of this Tensor.
Tensor<to_float<T>, n> tan() 

Takes the elementwise tangents of this Tensor.
Tensor<to_float<T>, n> asin() 

Takes the elementwise arcsinus of this Tensor (
sin^(-1)
).
Tensor<to_float<T>, n> acos() 

Takes the elementwise arccosinus of this Tensor (
cos^(-1)
).
Tensor<to_float<T>, n> atan() 

Takes the elementwise arctangents of this Tensor (
tan^(-1)
).
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, k >= n ? k : n> matmul(Tensor &other) 

Carries out matrix multiplication on the last two dimensions of the tensors (broadcasts all others). E.g. a matrix multiplication of two tensors with shapes
(64, 32, 16)
and
(16, 24)
will yield a tensor with shape
(64, 32, 24)
.
Since for one entry of the tensor multiple other previous entries are needed, the operand tensors need to be executed first. Therefor the method will implicitly (or eagerly) execute this Tensor and
other
if their data is not allready present. E.g.
 Tensor<int, 3> a{{{0, 1},
                   {2, 3}},
                  {{4, 5},
                   {6, 7}}};
 Tensor<double, 2> b{{4,    2, 3.5f},
                     {0.5f, 1, 0}};
 std::cout << (a.matmul(b))() << std::endl;
 // Tensor<FLOAT64, shape: [2, 2, 3]>(
 // [[[0.500000, 1.000000, 0.000000],
 //   [9.500000, 7.000000, 7.000000]],
 //  [[18.500000, 13.000000, 14.000000],
 //   [27.500000, 19.000000, 21.000000]]])
template <typename K> Tensor<K, n> convert() const 

Converts this Tensor (and the underlying data) to type
K
given in the template.
K
must be one of
int
,
long
,
float
,
double
. The data is converted, not reinterpreted.
template <typename... args>
		Tensor<T, sizeof...(args)> reshape(args... shape) 

Reshapes this Tensor to a new shape with arbitrary dimensions. It can have less dimensions, more dimensions and a completly different shape, the only assumption that has to hold is that the product of the new shape is the same as the product of the old shape (the new shape represents as many elements as the old).
template <size_t k>
		Tensor<T, k> reshape_array(std::array new_shape) 

Reshapes this Tensor to a new shape with arbitrary dimensions. It can have less dimensions, more dimensions and a completly different shape, the only assumption that has to hold is that the product of the new shape is the same as the product of the old shape (the new shape represents as many elements as the old).
Tensor<T, n + 1> expand(int ax = n, int ax_size = 0) 

Adds a new dimension at an arbitrary position to the tensor and repeats the following dimensions to match a given shape.
  • ax
    the dimension prior to which the new dimension will be
inserted (
0
means a new dimension in the front,
n + 1
means as a new last dimension).
  • ax_size
    the new size of that dimension (repeats the following dimensions
    ax_size - 1
    times).
 Tensor<double, 2> a = {{0, 1}, {2, 3}};
 std::cout << a.expand(0, 3)() << std::endl;
 // Tensor<FLOAT64, shape: [3, 2, 2]>(
 // [[[0.000000, 1.000000],
 //   [2.000000, 3.000000]],
 //  [[0.000000, 1.000000],
 //   [2.000000, 3.000000]],
 //  [[0.000000, 1.000000],
 //   [2.000000, 3.000000]]])
 std::cout << a.expand(1, 3)() << std::endl;
 // Tensor<FLOAT64, shape: [2, 3, 2]>(
 // [[[0.000000, 1.000000],
 //   [0.000000, 1.000000],
 //   [0.000000, 1.000000]],
 //  [[2.000000, 3.000000],
 //   [2.000000, 3.000000],
 //   [2.000000, 3.000000]]])
 std::cout << a.expand(2, 3)() << std::endl;
 // Tensor<FLOAT64, shape: [2, 2, 3]>(
 // [[[0.000000, 0.000000, 0.000000],
 //   [1.000000, 1.000000, 1.000000]],
 //  [[2.000000, 2.000000, 2.000000],
 //   [3.000000, 3.000000, 3.000000]]])
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, k >= n ? k : n>
		min(const Tensor &other) const 

Takes the minimum of this tensor and
other
element wise (the lower value is the result, if one tensor is smaller it will be broadcasted).
template <typename K>
		Tensor<stronger_return<K>, n> min(const K other) const 

Takes the minimum of this Tensor and the constant value
other
for each element.
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, k >= n ? k : n>
		max(const Tensor &other) const 

Takes the maximum of this tensor and
other
element wise (the higher value is the result, if one tensor is smaller it will be broadcasted).
template <typename K>
		Tensor<stronger_return<K>, n> max(const K other) const 

Takes the maximum of this Tensor and the constant value
other
for each element.
template <typename K, unsigned int k>
		Tensor<int, k >= n ? k : n> operator<(const Tensor &other) const 

Compares this tensor and
other
elementwise and returns a 0,1 integer Tensor.
0
denotes that
this >= other
,
1
that
this <
 other
.
template <typename K> Tensor<int, n> operator<(const K other) const 

Compares this tensor and the constant
other
elementwise and returns a 0,1 integer Tensor.
0
denotes that
this >= other
,
1
that
this < other
.
template <typename K, unsigned int k>
		Tensor<int, k >= n ? k : n> operator>(const Tensor &other) const 

Compares this tensor and
other
elementwise and returns a 0,1 integer Tensor.
0
denotes that
this <= other
,
1
that
this >
 other
.
template <typename K> Tensor<int, n> operator>(const K other) const 

Compares this tensor and the constant
other
elementwise and returns a 0,1 integer Tensor.
0
denotes that
this <= other
,
1
that
this > other
.
template <typename K, unsigned int k>
		Tensor<int, k >= n ? k : n> equal(const Tensor &other) const 

Compares this tensor and
other
elementwise and returns a 0,1 integer Tensor.
0
denotes that
this != other
,
1
that
this ==
 other
.
template <typename K> Tensor<int, n> equal(const K other) const 

Compares this tensor and the constant
other
elementwise and returns a 0,1 integer Tensor.
0
denotes that
this != other
,
1
that
this == other
.
Tensor<T, n - 1> reduce_sum(int dimension) 

Reduces one dimension of the tensor by additive folding e.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}},
                  {{4, 5, -6}, {6, 7, -1}}};
 std::cout << (a.reduce_sum(0))() << std::endl;
 // Tensor<INT32, shape: [2, 3]>(
 // [[4, 6, 26],
 //  [8, 10, 3]])
 std::cout << (a.reduce_sum(1))() << std::endl;
 // Tensor<INT32, shape: [2, 3]>(
 // [[2, 4, 36],
 //  [10, 12, -7]])
 std::cout << (a.reduce_sum(2))() << std::endl;
 // Tensor<INT32, shape: [2, 2]>(
 // [[33, 9],
 //  [3, 12]])
The results of this Tensor must be available, to ensure that the method may execute the Tensor.
Tensor<T, n - 1> reduce_mul(int dimension) 

Reduces one dimension of the tensor by multiplicative folding e.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}}, {{4, 5, -6}, {6, 7, -1}}};
 std::cout << (a.reduce_mul(0))() << std::endl;
 // Tensor<INT32, shape: [2, 3]>(
 // [[0, 5, -192],
 //  [12, 21, -4]])
 std::cout << (a.reduce_mul(1))() << std::endl;
 // Tensor<INT32, shape: [2, 3]>(
 // [[0, 3, 128],
 //  [24, 35, 6]])
 std::cout << (a.reduce_mul(2))() << std::endl;
 // Tensor<INT32, shape: [2, 2]>(
 // [[0, 24],
 //  [-120, -42]])
The results of this Tensor must be available, to ensure that the method may execute the Tensor.
Tensor<T, 1> reduce_mul() 

Reduces all dimension of the tensor by multiplicatione.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}}, {{4, 5, -6}, {6, 7, -1}}};
 std::cout << e.reduce_sum()() << std::endl;
 // Tensor<INT32, shape: [1]>([0])
The results of this Tensor must be available, to ensure that the method may execute the Tensor.
Tensor<T, 1> reduce_sum() 

Reduces all dimension of the tensor by summation e.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}}, {{4, 5, -6}, {6, 7, -1}}};
 std::cout << e.reduce_sum()() << std::endl;
 // Tensor<INT32, shape: [1]>([57])
The results of this Tensor must be available, to ensure that the method may execute the Tensor.
Tensor<T, n - 1> reduce_min(int dimension) 

Reduces one dimension of the tensor by keeping the minimum e.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}}, {{4, 5, -6}, {6, 7, -1}}};
 std::cout << e.reduce_min(0)() << std::endl;
 // Tensor<INT32, shape: [2, 3]>(
 // [[0, 1, -6],
 //  [2, 3, -1]])
 std::cout << e.reduce_min(1)() << std::endl;
 // Tensor<INT32, shape: [2, 3]>(
 // [[0, 1, 4],
 // [4, 5, -6]])
 std::cout << e.reduce_min(2)() << std::endl;
 // Tensor<INT32, shape: [2, 2]>(
 // [[0, 2],
 //  [-6, -1]])
The results of this Tensor must be available, to ensure that the method may execute the Tensor.
Tensor<T, 1> reduce_min() 

Reduces all dimension of the tensor by keeping the maximum value e.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}}, {{4, 5, -6}, {6, 7, -1}}};
 std::cout << e.reduce_min()() << std::endl;
 // Tensor<INT32, shape: [1]>([-6])
The results of this Tensor must be available, to ensure that the method may execute the Tensor.
Tensor<T, n - 1> reduce_max(int dimension) 

Reduces one dimension of the tensor by keeping the maximum e.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}}, {{4, 5, -6}, {6, 7, -1}}};
 std::cout << e.reduce_max(0)() << std::endl;
 // Tensor<INT32, shape: [2, 3]>(
 // [[4, 5, 32],
 //  [6, 7, 4]])
 std::cout << e.reduce_max(1)() << std::endl;
 // Tensor<INT32, shape: [2, 3]>(
 // [[2, 3, 32],
 //  [6, 7, -1]])
 std::cout << e.reduce_max(2)() << std::endl;
 // Tensor<INT32, shape: [2, 2]>(
 // [[32, 4],
 //  [5, 7]])
The results of this Tensor must be available, to ensure that the method may execute the Tensor.
Tensor<T, 1> reduce_max() 

Reduces all dimension of the tensor by keeping the maximum value e.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}}, {{4, 5, -6}, {6, 7, -1}}};
 std::cout << e.reduce_max()() << std::endl;
 // Tensor<INT32, shape: [1]>([32])
The results of this Tensor must be available, to ensure that the method may execute the Tensor.
Tensor<T, n> abs() const 

Takes the elementwise absolute value of this Tensor (negative signs are removed).
template <size_t k>
		Tensor<T, n> slice_array(std::array ranges) const 

Like
slice
but with an array of TensorRanges instead of variadic arguments
template <typename... args>
		Tensor<T, n> slice(const args... dim_ranges) const 

Selects a slice of the tensor with a dimension wise start index, end index and step size. The arguments of this function are objects of the type
TensorRange
, there may be as many arguments as dimensions or less. The arguments start by the first one describing the first dimension, the second one describing the second and so on. If there are less arguments than dimensions, all elements of the missing last dimensions will be selected.
Each
TensorRange
contains a
start
,
end
and
step
member.
start
and
end
may be negative values, which are then subtracted from the end of the tensor (e.g.
-1
means the element before last element).
start
is inclusive and describes the start index of the selection per dimension and
end
describes the end index per dimension and is exclusive.
step
contains the per dimension step size (e.g.
2
meaning every second element will be selected etc.) and may be negative as well, which reverses the traversal order (the first elements are selected as the last ones). For a negative step size,
start > end
must hold (for a positive of course
end >
 start
) for each dimension. E.g.
 Tensor<int, 3> a{{{0, 1, 32}, {2, 3, 4}}, {{4, 5, -6}, {6, 7, -1}}};
 std::cout << (a.slice(TensorRange(0, 2), TensorRange(0, -1),
                       TensorRange(2, 0, -1)))()
           << std::endl;
 // Tensor<INT32, shape: [2, 1, 2]>(
 // [[[32, 1]],
 //  [[-6, 5]]])
To help with indexing there is the value
TensorRange::MAX_SCOPE
which describes a index depending on the traversal order in that dimension (i.e. the sign of step):
  • for forward traversel it denotes in start the shape of that
dimensions - 1 (which is the last element start can index) and for end the shape of that dimension
  • for backward traversal it denoted in start 0 and in end the element before 0 (this is necessary since otherwise it would not be
possible to just inverse a dimension without eliminating values). E.g.
 Tensor<int, 2> a{{0, 1, 2, 3}, {4, 5, 6, 7}};
 std::cout << (a.slice(
               TensorRange(TensorRange::MAX_SCOPE,
                           TensorRange::MAX_SCOPE),
               TensorRange(TensorRange::MAX_SCOPE,
                           TensorRange::MAX_SCOPE, -1)))()
           << std::endl;
 // Tensor<INT32, shape: [2, 4]>(
 // [[3, 2, 1, 0],
 //  [7, 6, 5, 4]])
Tensor<T, n> extend(std::array new_shape,
							std::array indices) 

Creates a new tensor of zeroes with the requested shape. The original tensor is embedded at the given indices. Typically used to add padding to a Tensor.
  • new_shape
    an array describing the new shape (the number of
dimensions stays the same).
  • indices
    an array of indices per dimension where the Tensor should
be placed inside of that dimension. Each entry describes the start index (meaning every other index prior in that dimension will only contain 0s). It is important, that the size in that dimension (denoted in
new_shape
) is large enough to hold the size of this Tensor + the start index. E.g.
 Tensor<int, 3> a{{{0, 1, 2}, {3, 4, 5}}, {{6, 7, 8}, {9, -2, -1}}};
 std::cout << (a.extend(std::array<double, 3>{3, 4, 4},
                        std::array<double, 3>{1, 0, 1}))()
           << std::endl;
 // Tensor<INT32, shape: [3, 4, 4]>(
 // [[[0, 0, 0, 0],
 //   [0, 0, 0, 0],
 //   [0, 0, 0, 0],
 //   [0, 0, 0, 0]],
 //  [[0, 0, 1, 2],
 //   [0, 3, 4, 5],
 //   [0, 0, 0, 0],
 //   [0, 0, 0, 0]],
 //  [[0, 6, 7, 8],
 //   [0, 9, -2, -1],
 //   [0, 0, 0, 0],
 //   [0, 0, 0, 0]]])
Tensor<T, n> extend(std::array new_shape,
							std::array indices,
							std::array steps) 

Creates a new tensor of zeroes with the requested shape. The original tensor is embedded at the given indices and with a step size.
  • new_shape
    an array describing the new shape (the number of
dimensions stays the same).
  • indices
    an array of indices per dimension where the Tensor should
be placed inside of that dimension. Each entry describes the start index (meaning every other index prior in that dimension will only contain 0s). It is important, that the size in that dimension (denoted in
new_shape
) is large enough to hold the size of this Tensor + the start index.
  • steps
    an array of step sizes per dimension. A step size of 2
means that between each value in that dimension of the original Tensor an additional 0 is placed. May be negative to inverse traversal order, in that case the index denotes the end of the traversal (still describes the index of the first value that occurs in the result tensor). E.g.
 Tensor<int, 3> a{{{0, 1, 2}, {3, 4, 5}}, {{6, 7, 8}, {9, -2, -1}}};
 std::cout << (a.extend(std::array<double, 3>{3, 4, 4},
                        std::array<double, 3>{0, 0, 1},
                        std::array<long, 3>{2, 3, -1}))()
           << std::endl;
 // Tensor<INT32, shape: [3, 4, 4]>(
 // [[[0, 2, 1, 0],
 //   [0, 0, 0, 0],
 //   [0, 0, 0, 0],
 //   [0, 5, 4, 3]],
 //  [[0, 0, 0, 0],
 //   [0, 0, 0, 0],
 //   [0, 0, 0, 0],
 //   [0, 0, 0, 0]],
 //  [[0, 8, 7, 6],
 //   [0, 0, 0, 0],
 //   [0, 0, 0, 0],
 //   [0, -1, -2, 9]]])
Tensor<T, n>
		repeat_array(std::array repetitions) const 

Repeats dimensions of a tensor multiple times.
repititions
is an array with the same number of entries as the tensor has dimensions. If
repetitions
has in a dimension a value
x
the resulting shape in that dimension is
x + 1
times larger than that of the origional Tensor (because it is concatenated with itself
x
times), such that
0
would result in no change of the shape of that dimensions, a
1
would repeat the Tensor 1 time. E.g.
 Tensor<int, 3> a{{{0, 1}, {1, 2}}, {{2, 3}, {3, 4}}};
 std::cout << (a.repeat(std::array<int, 3>{0, 1, 2}))() << std::endl;
 // Tensor<INT32, shape: [2, 4, 6]>(
 // [[[0, 1, 0, 1, 0, 1],
 //   [1, 2, 1, 2, 1, 2],
 //   [0, 1, 0, 1, 0, 1],
 //   [1, 2, 1, 2, 1, 2]],
 //  [[2, 3, 2, 3, 2, 3],
 //   [3, 4, 3, 4, 3, 4],
 //   [2, 3, 2, 3, 2, 3],
 //   [3, 4, 3, 4, 3, 4]]])
template <typename... args>
		Tensor<T, n> repeat(const args... repetitions) const 

Repeats dimensions of a tensor multiple times. This function allows one repetition argument per dimension (missing dimensions will be filled with
0
s. For one of such a repetition argument
x
the resulting shape in that dimension is
x + 1
times larger than that of the origional Tensor (because it is concatenated with itself
x
times), such that
0
would result in no change of the shape of that dimensions, a
1
would repeat the Tensor 1 time. E.g.
 Tensor<int, 3> a{{{0, 1}, {1, 2}}, {{2, 3}, {3, 4}}};
 std::cout << (a.repeat(0, 1, 2))() << std::endl;
 // Tensor<INT32, shape: [2, 4, 6]>(
 // [[[0, 1, 0, 1, 0, 1],
 //   [1, 2, 1, 2, 1, 2],
 //   [0, 1, 0, 1, 0, 1],
 //   [1, 2, 1, 2, 1, 2]],
 //  [[2, 3, 2, 3, 2, 3],
 //   [3, 4, 3, 4, 3, 4],
 //   [2, 3, 2, 3, 2, 3],
 //   [3, 4, 3, 4, 3, 4]]])
Tensor<T, n> transpose(std::initializer_list transposition = 

Transposes this tensor along multiple dimensions
transpositions
is an array with the same number of entries as the tensor has dimensions, which gives the perumtation of dimensions. The tensor will have a resulting shape in which the size in dimension
i
corresponds to the former size in dimension
transpositions[i]
.
transpositions
may be smaller than the number of dimensions of the original Tensor, in which case the remaining dimensions will be fully transposed (0 with n-1, 1 with n-2, ...). E.g.
 Tensor<int, 3> a{{{0, 1}, {1, 2}}, {{2, 3}, {3, 4}}};
 std::cout << (a.transpose({1, 0, 2}))() << std::endl;
 // Tensor<INT32, shape: [2, 2, 2]>(
 // [[[0, 1],
 //   [2, 3]],
 //  [[1, 2],
 //   [3, 4]]])
 std::cout << (a.transpose())() << std::endl;
 // Tensor<INT32, shape: [2, 2, 2]>(
 // [[[0, 2],
 //   [1, 3]],
 //  [[1, 3],
 //   [2, 4]]])
Tensor<T, n> transpose_array(std::array transposition) 

Same as
transpose
, but with transpositions as array
template <typename K, unsigned int k>
		Tensor<stronger_return<K>, n == k ? n - 1 : n>
		convolve_array(const Tensor &kernel,
					   const std::array steps) const 

Same as
convolve
, but with steps as an array
template <typename K, unsigned int k, typename... args>
		Tensor<stronger_return<K>, k == n ? n - 1 : n>
		convolve(const Tensor &kernel, const args... steps) const 

Convolves the
n
-dimensional input tensor with a
n
or
n+1
-dimensional filter kernel
kernel
and a per dimensional step size
steps
with size of
n-1
. This operation basically has two modi: - one where there is exactly one filter s.t.
kernel
has dimensionality
n
, the same size in the last dimension as the input tensor (since that dimension will be completly reduced) and in all other dimensions kernel should have the same or smaller size then the input tensor. - one where
kernel
represents an array of filters in its first dimension, s.t. kernel has dimensionality of
n+1
. Each kernel in the first dimension is convolved like it would be in the first modi. The convolution results of each kernel are concatenated in the last dimension of the result Tensor. Lets say the input tensor has a shape of
[x, y, c]
and the kernel has
[f, a, b, c]
the shape of the result will be
[convolveshape(x, a, steps[0]), convolveshape(y, b, steps[1]),
 f]
(for the semantic of
convolveshape()
look at the end of this documentation).
It is expected that the input and
kernel
have the same size in their last dimension (which will be completly reduced by the convolution). In all other dimensions the size of the input tensor should be larger or equal to the size of
kernel
. The
kernel
will be 'slid' over the tensor in each dimension, multiplying all values of
kernel
with the corresponding ones in the tensor and summing them up to a single value and moving the kernel further by the value given in
steps
in that corresponding dimension.
The implementation does not include any padding, meaning only convolutions where the complete kernel still fits into the array will be executed (the shape will be calculated correspondingly). If you want to modify this behaviour (i.e. include padding) you can use
extend
,
slice
or similar.
The resulting Tensor will therefor have a shape with dimensionality
n - 1
(or in the case of multiple kernels
n
) and size of
(shape[i] - kernel.get_shape()[i] - 1) / steps[i]
if
(shape[i] - kernel.get_shape()[i] - 1)
is divisable by
steps[i]
else
(shape[i] - kernel.get_shape()[i] - 1) / steps[i] +
 1
 Tensor<float, 3> t1{{{0, 1}, {1, 2}, {3, 4}},
                     {{5, 6}, {7, 8}, {9, 0}},
                     {{-1,-2},{-3,-4},{-5,-6}}};
 Tensor<float, 3> k1{{{1, 1}, {2, 2}}};
 std::cout << t1.convolve(k1, 2, 1)() << std::endl;
 // Tensor<FLOAT32, shape: [2, 3]>(
 // [[7.000000, 17.000000, 7.000000],
 //  [-17.000000, -29.000000, -11.000000]])
 Tensor<float, 3> t1{{{0, 1}, {1, 2}, {2, 3}},
                    {{3, 4}, {5, 6}, {7, 8}}};
 Tensor<float, 4> k1{
     {{{1, 1}, {2, -1}}},
     {{{-1, 1}, {1, 0}}},
     {{{-2, 1}, {2, -1}}}};
 std::cout << t1.convolve(k1, 1, 1)() << std::endl;
 // Tensor<FLOAT32, shape: [2, 2, 3]>(
 // [[[1.000000, 2.000000, 1.000000],
 //   [4.000000, 3.000000, 1.000000]],
 //  [[11.000000, 6.000000, 2.000000],
 //   [17.000000, 8.000000, 2.000000]]])
template <typename K, unsigned int k>
		Tensor<T, n> index(const Tensor &indices) const 

Selects single elements with a index-tensor (integer tensor containing indices for the selected dimension). It indexes a dimension of the input tensor and the result has the shape of the input tensor except for the indexed dimension. It is assumed that except for the last entry the shape of
indices
is a prefix of the shape of the input tensor and the indexing will occur in the matched subsets (the last dimension of the
indices
Tensor is the one indexed in the input tensor).
 Tensor<double, 3> a = {
     {{0, 1}, {2, 3}},
     {{4, 5}, {6, 7}},
     {{8, 9}, {10, 11}}};
 Tensor<int, 1> i1 = {0, 2};
 std::cout << a.index(i1)() << std::endl;
 // Tensor<FLOAT64, shape: [2, 2, 2]>(
 // [[[0.000000, 1.000000],
 //   [2.000000, 3.000000]],
 //  [[8.000000, 9.000000],
 //   [10.000000, 11.000000]]])
 Tensor<int, 1> i2 = {0, 1, 1, 2};
 std::cout << a.index(i2)() << std::endl;
 // Tensor<FLOAT64, shape: [4, 2, 2]>(
 // [[[0.000000, 1.000000],
 //   [2.000000, 3.000000]],
 //  [[4.000000, 5.000000],
 //   [6.000000, 7.000000]],
 //  [[4.000000, 5.000000],
 //   [6.000000, 7.000000]],
 //  [[8.000000, 9.000000],
 //   [10.000000, 11.000000]]])
template <typename K, unsigned int k>
		Tensor<T, n> index_set(const Tensor &b,
							   const Tensor &indices) const 

Assigns to each element in
b
one element in the input tensor where that element will be "send" to, i.e. the place in the input tensor the index points to will be set to the corresponding element from
b
. If multiple elements from
b
are sent to the same place in the input tensor they will be summed up. The shape of
indices
must be a prefix of the shape of
b
, meaning it can have as many dimensions as
b
or less, but the sizes of the dimensions must be the same as the first of the shape of
b
.
 Tensor<int, 2> a3 = {{0, 1}, {2, 3}, {4, 5}, {6, 7}};
 Tensor<int, 2> b3 = {{4, 5}, {6, 7}, {8, 9}};
 Tensor<int, 1> i3 = {0, 0, 2};
 std::cout << a3.index_set(b3, i3)() << std::endl;
 // Tensor<INT32, shape: [4, 2]>(
 // [[10, 12],
 //  [2, 3],
 //  [8, 9],
 //  [6, 7]])
 Tensor<int, 2> i4 = {{-1, 0}, {1, 1}, {1, 0}, {1, -1}};
 Tensor<int, 2> b4 = {{4, 5}, {6, 7}, {8, 9}, {10, 11}};
 std::cout << a3.index_set(b4, i4)() << std::endl;
 // Tensor<INT32, shape: [4, 2]>(
 // [[5, 1],
 //  [2, 13],
 //  [9, 8],
 //  [6, 10]])
Tensor<T, n + 1>
		sliding_window(std::array window_size,
					   std::array step_size = 

Creates "views" in an additional dimension of a fixed size windows. The window of size
window_size
is slid in each dimension along the Tensor starting from the beginning and moving
step_size
entries in each dimension (the last dimension is moved first until it is fully traversed, then the next dimension is moved - i.e. the tensor is traversed for the windows like a nested for loop for each dimension). The windows are concatenated in a extra dimension, which becomes the first dimension of the result Tensor.
E.g.
 Tensor<int, 3> a = {{{1, 2}, {3, 4}, {5, 6}, {7, 8}},
                      {{9, 10}, {11, 12}, {13, 14}, {15, 16}},
                      {{17, 18}, {19, 20}, {21, 22}, {23, 24}}};
 Tensor<int, 4> b = a.sliding_window(std::array<double, 3>{2, 2, 2},
                                     std::array<unsigned int, 3>{1, 2,
 1}); std::cout << b << std::endl;
 // Tensor<INT32, shape: [4, 2, 2, 2]>(
 // [[[[1, 2],
 //    [3, 4]],
 //   [[9, 10],
 //    [11, 12]]],
 //  [[[5, 6],
 //    [7, 8]],
 //   [[13, 14],
 //    [15, 16]]],
 //  [[[9, 10],
 //    [11, 12]],
 //   [[17, 18],
 //    [19, 20]]],
 //  [[[13, 14],
 //    [15, 16]],
 //   [[21, 22],
 //    [23, 24]]]])
Tensor<T, n - 1>
		unslide_window(std::array result_size,
					   std::array step_size = 

Takes an array of windows (like e.g. the result of
sliding_window
) and reconstructs the original tensor.
  • result_size
    the shape of the original tensor the views were taken
from
  • step_size
    the step size in each dimension with which the view was
slid over the tensor the create the windows
The shape of the window is inferred by the shape of the input tensor.
Overlapping elements will be summed up. If in a dimension
step_size
was larger than the window size, the resulting tensor will have
0
elements were the "gaps" between the windows were. If in a dimension
steps
was smaller than the window size (the windows were "overlapping") the overlapping elements are summed up in the result.
result_size
and
step_size
therefore have 1 entry less then
a
has dimensions.
Tensor<T, n - 1>
		pooling_max(std::array window_size,
					std::array step_size = 

Slides a window along the Tensor and reduces all elements inside that window to their maximum value (just that one remains in the result tensor), and then slides the window in each dimension by
step_size
forward (like
sliding_window
). The last dimension is complety pooled, and the result is one dimension smaller then the original tensor.
Tensor<T, n - 1>
		pooling_sum(std::array window_size,
					std::array step_size = 

Slides a window along the Tensor and reduces all elements inside that window to their sum, and then slides the window in each dimension by
step_size
forward (like
sliding_window
). The last dimension is complety pooled, and the result is one dimension smaller then the original tensor.
Tensor<T, n> permutate(unsigned int ax) const 

Randomly permutates (=swaps multiple elements with each other without creating, copying or deleting new ones) one axis of the input tensor.
Tensor<T, n> dropout(double p) const 

Randomly sets elements in the tensor by probability
p
to 0.
FGraphNode *get_graph_node() const 

Returns the underlying
FGraphNode
for use with the C-Frontend. It is still memory managed by this Tensor instance, so be carefull about variable lifetimes.
void set_graph_node(FGraphNode *node) 

Without freeing or changing the reference counter of the current node changes the managed node to the given parameter (is reference counter is modified neither). If the node is not
nullptr
the reference counter should have been incremented before this method, be carefull about variable lifetimes!
template <typename K, unsigned int k>
		Tensor<to_float<stronger_return<K>>, k>
		gradient(const Tensor &dx) const 

Calculates the gradient of this Tensor to
dx
. A gradient is always a floating point Tensor, if both this tensor and
dx
are of type float, the gradient is also of type
float
, else of
double
.
dx
needs to have been marked with
watch
before construction of this Tensor and this Tensor must be constructed inside a gradient context, either started by
fStartGradientContext
or a
GradientContext
object.
void watch() 

Watches this node, i.e. collects information needed to calculate the gradient with this node as a derivative
void unwatch() 

Removes the gradient mark (and subsequent memory overhead) for this node. After a call to this method no subsequent gradient calculations with this node as a derivative will be possible.