Skip to content

Multilinear

Tensor arithmetic.

Prefixed by ten... (tensor).

Handle multiaxis vectors, that for example represent multivariate polynomials.

Tensors are accepted as numpy.array_like and returned as numpy.ndarrays.

Broadcasting happens similar to numpys broadcasting, but the axes are matched in ascending order instead of descending order, and the arrays don't get stretched but rather padded with zeros.

creation

tenzero = np.zeros((), dtype=object)

Zero tensor.

\[ 0 \qquad \mathbb{K}^0 \]

An empty array.

Notes

Why shape (0,) (=one dimensional, zero length) instead of () (zero dimensional)?

Shape () would be size one (empty product) and a scalar that could have any nonzero value.

Dimensionality of one isn't perfect, but at least its size is then zero and it couldn't be any arbitrary value.

tenbasis(i, c=1)

Return a basis tensor.

\[ ce_i \]

Returns a numpy.ndarray with i+1 zeros in each direction and a c in the outer corner.

Source code in vector\multilinear\creation.py
28
29
30
31
32
33
34
35
36
37
38
39
40
def tenbasis(i, c=1):
    """Return a basis tensor.

    $$
        ce_i
    $$

    Returns a `numpy.ndarray` with `i+1` zeros in each direction and a `c` in
    the outer corner.
    """
    t = np.zeros(np.add(i, 1), dtype=np.result_type(c))
    t[i] = c #dont unpack i, it might be a scalar
    return t

tenrand(*d)

Return a random tensor of uniform sampled float coefficients.

\[ t \sim \mathcal{U}^d([0, 1[) \]

The coefficients are sampled from a uniform distribution in [0, 1[.

Notes

Naming like numpy.random, because seems more concise (not random & gauss as in the stdlib).

See also
Source code in vector\multilinear\creation.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def tenrand(*d):
    r"""Return a random tensor of uniform sampled `float` coefficients.

    $$
        t \sim \mathcal{U}^d([0, 1[)
    $$

    The coefficients are sampled from a uniform distribution in `[0, 1[`.

    Notes
    -----
    Naming like [`numpy.random`](https://numpy.org/doc/stable/reference/random/legacy.html),
    because seems more concise (not `random` & `gauss` as in the stdlib).

    See also
    --------
    - wraps: [`numpy.random.rand`](https://numpy.org/doc/stable/reference/generated/numpy.random.rand.html)
    """
    return np.random.rand(*d)

tenrandn(*d)

Return a random tensor of normal sampled float coefficients.

\[ t \sim \mathcal{N}^d(0, 1) \]

The coefficients are sampled from a normal distribution.

Notes

Naming like numpy.random, because seems more concise (not random & gauss as in the stdlib).

See also
Source code in vector\multilinear\creation.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def tenrandn(*d):
    r"""Return a random tensor of normal sampled `float` coefficients.

    $$
        t \sim \mathcal{N}^d(0, 1)
    $$

    The coefficients are sampled from a normal distribution.

    Notes
    -----
    Naming like [`numpy.random`](https://numpy.org/doc/stable/reference/random/legacy.html),
    because seems more concise (not `random` & `gauss` as in the stdlib).

    See also
    --------
    - wraps: [`numpy.random.randn`](https://numpy.org/doc/stable/reference/generated/numpy.random.randn.html)
    """
    return np.random.randn(*d)

utility

tenrank(t)

Return the rank.

\[ \text{rank}\,t \]
See also
Source code in vector\multilinear\utility.py
 9
10
11
12
13
14
15
16
17
18
19
20
def tenrank(t):
    r"""Return the rank.

    $$
        \text{rank}\,t
    $$

    See also
    --------
    - wraps: [`numpy.ndarray.ndim`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html)
    """
    return np.asarray(t).ndim

tendim(t)

Return the dimensionalities.

\[ \dim t \]
See also
Source code in vector\multilinear\utility.py
22
23
24
25
26
27
28
29
30
31
32
33
def tendim(t):
    r"""Return the dimensionalities.

    $$
        \dim t
    $$

    See also
    --------
    - wraps: [`numpy.ndarray.shape`](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.shape.html)
    """
    return np.asarray(t).shape

teneq(s, t)

Return whether two tensors are equal.

\[ s\overset{?}{=}t \]
Source code in vector\multilinear\utility.py
35
36
37
38
39
40
41
42
def teneq(s, t):
    r"""Return whether two tensors are equal.

    $$
        s\overset{?}{=}t
    $$
    """
    raise NotImplementedError

tentrim(t, tol=None)

Remove all trailing near zero (abs(t_i)<=tol) coefficients.

tol may also be None, then all coefficients that evaluate to False are trimmed.

Notes
  • Cutting of elements that are abs(t_i)<=tol instead of abs(t_i)<tol to allow cutting of elements that are exactly zero by trim(t, 0) instead of trim(t, sys.float_info.min).
Source code in vector\multilinear\utility.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
def tentrim(t, tol=None):
    """Remove all trailing near zero (`abs(t_i)<=tol`) coefficients.

    `tol` may also be `None`,
    then all coefficients that evaluate to `False` are trimmed.

    Notes
    -----
    - Cutting of elements that are `abs(t_i)<=tol` instead of `abs(t_i)<tol` to
    allow cutting of elements that are exactly zero by `trim(t, 0)` instead
    of `trim(t, sys.float_info.min)`.
    """
    t = np.asarray(t)
    for d in range(t.ndim): #reduce dimension
        slc_idx = (slice(None),)*d + (-1,) + (...,)
        slc_drop = (slice(None),)*d + (slice(-1),) + (...,)
        while t.shape[d]>0 and np.all(np.logical_not(t[slc_idx].astype(bool)) if tol is None else np.abs(t[slc_idx])<=tol):
            t = t[slc_drop]
    if t.size == 0:
        return t.reshape((0,))
    while t.ndim>1 and t.shape[-1]==1: #reduce rank
        t = t[..., 0]
    return t

tenrshift(t, n)

Shift coefficients up.

See also
Source code in vector\multilinear\utility.py
68
69
70
71
72
73
74
75
def tenrshift(t, n):
    """Shift coefficients up.

    See also
    --------
    - wraps: [`numpy.pad`](https://numpy.org/doc/stable/reference/generated/numpy.pad.html)
    """
    return np.pad(t, tuple((ni, 0) for ni in n))

tenlshift(t, n)

Shift coefficients down.

See also
Source code in vector\multilinear\utility.py
77
78
79
80
81
82
83
84
def tenlshift(t, n):
    """Shift coefficients down.

    See also
    --------
    - wraps: [`numpy.pad`](https://numpy.org/doc/stable/reference/generated/numpy.pad.html)
    """
    return np.array(t)[*(slice(ni, None) for ni in n)].copy()

hilbert_space

tenconj(t)

Return the elementwise complex conjugate.

\[ t^* \]
See also
Source code in vector\multilinear\hilbert_space.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
def tenconj(t):
    """Return the elementwise complex conjugate.

    $$
        t^*
    $$

    See also
    --------
    - one-dimensional: [`vecconj`][vector.dense.hilbert_space.vecconj]
    - wraps: [`numpy.conjugate`](https://numpy.org/doc/stable/reference/generated/numpy.conjugate.html)
    """
    return np.conjugate(t)

tenprod(s, t)

Return the tensor product.

\[ s \otimes t \]
See also
Source code in vector\multilinear\hilbert_space.py
23
24
25
26
27
28
29
30
31
32
33
34
def tenprod(s, t):
    r"""Return the tensor product.

    $$
        s \otimes t
    $$

    See also
    --------
    - wraps: [`numpy.tensordot](https://numpy.org/doc/stable/reference/generated/numpy.tensordot.html)
    """
    return np.tensordot(s, t, 0)

vector_space

tenpos(t)

Return the identity.

\[ +t \]
See also
Source code in vector\multilinear\vector_space.py
11
12
13
14
15
16
17
18
19
20
21
22
def tenpos(t):
    """Return the identity.

    $$
        +t
    $$

    See also
    --------
    - wraps: [`numpy.positive`](https://numpy.org/doc/stable/reference/generated/numpy.positive.html)
    """
    return np.positive(t)

tenneg(t)

Return the negation.

\[ -t \]
See also
Source code in vector\multilinear\vector_space.py
24
25
26
27
28
29
30
31
32
33
34
35
def tenneg(t):
    """Return the negation.

    $$
        -t
    $$

    See also
    --------
    - wraps: [`numpy.negative`](https://numpy.org/doc/stable/reference/generated/numpy.negative.html)
    """
    return np.negative(t)

tenadd(*ts)

Return the sum.

\[ t_0 + t_1 + \cdots \]
See also
  • for sum on a single coefficient: tenaddc
Source code in vector\multilinear\vector_space.py
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def tenadd(*ts):
    r"""Return the sum.

    $$
        t_0 + t_1 + \cdots
    $$

    See also
    --------
    - for sum on a single coefficient: [`tenaddc`][vector.multilinear.vector_space.tenaddc]
    """
    ts = tuple(map(np.asarray, ts))
    shape = vechadamardmax(*(t.shape for t in ts))
    r = np.zeros(shape, dtype=np.result_type(*ts) if ts else object)
    for t in ts:
        r[tuple(map(slice, t.shape)) + (0,)*(r.ndim-t.ndim)] += t
    return r

tenaddc(t, c, i=(0,))

Return the sum with a basis tensor.

\[ t+ce_i \]

More efficient than tenadd(t, tenbasis(i, c)).

See also
  • for sum on more coefficients: tenadd
Source code in vector\multilinear\vector_space.py
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def tenaddc(t, c, i=(0,)):
    """Return the sum with a basis tensor.

    $$
        t+ce_i
    $$

    More efficient than `tenadd(t, tenbasis(i, c))`.

    See also
    --------
    - for sum on more coefficients: [`tenadd`][vector.multilinear.vector_space.tenadd]
    """
    t = np.asarray(t)
    while t.ndim < len(i):
        t = np.expand_dims(t, axis=-1)
    t = np.pad(t, tuple((0, max(ii-s+1, 0)) for s, ii in zip(t.shape, i)))
    t[i + (0,)*(len(i)-t.ndim)] += c
    return t

tensub(s, t)

Return the difference.

\[ s - t \]
See also
  • for difference on a single coefficient: tensubc
Source code in vector\multilinear\vector_space.py
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
def tensub(s, t):
    """Return the difference.

    $$
        s - t
    $$

    See also
    --------
    - for difference on a single coefficient: [`tensubc`][vector.multilinear.vector_space.tensubc]
    """
    s, t = np.asarray(s), np.asarray(t)
    shape = vechadamardmax(s.shape, t.shape)
    r = np.zeros(shape, dtype=np.result_type(s, t))
    r[tuple(map(slice, s.shape)) + (0,)*(r.ndim-s.ndim)] = s
    r[tuple(map(slice, t.shape)) + (0,)*(r.ndim-t.ndim)] -= t
    return r

tensubc(t, c, i=(0,))

Return the difference with a basis tensor.

\[ t-ce_i \]

More efficient than tensub(t, tenbasis(i, c)).

See also
  • for difference on more coefficients: tensub
Source code in vector\multilinear\vector_space.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
def tensubc(t, c, i=(0,)):
    """Return the difference with a basis tensor.

    $$
        t-ce_i
    $$

    More efficient than `tensub(t, tenbasis(i, c))`.

    See also
    --------
    - for difference on more coefficients: [`tensub`][vector.multilinear.vector_space.tensub]
    """
    t = np.asarray(t)
    while t.ndim < len(i):
        t = np.expand_dims(t, axis=-1)
    t = np.pad(t, tuple((0, max(ii-s+1, 0)) for s, ii in zip(t.shape, i)))
    t[i + (0,)*(len(i)-t.ndim)] -= c
    return t

tenmul(t, a)

Return the product.

\[ ta \]
See also
Source code in vector\multilinear\vector_space.py
113
114
115
116
117
118
119
120
121
122
123
124
def tenmul(t, a):
    """Return the product.

    $$
        ta
    $$

    See also
    --------
    - wraps: [`numpy.multiply`](https://numpy.org/doc/stable/reference/generated/numpy.multiply.html)
    """
    return np.multiply(t, a)

tenrmul(a, t)

Return the product.

\[ at \]
See also
Source code in vector\multilinear\vector_space.py
126
127
128
129
130
131
132
133
134
135
136
137
def tenrmul(a, t):
    """Return the product.

    $$
        at
    $$

    See also
    --------
    - wraps: [`numpy.multiply`](https://numpy.org/doc/stable/reference/generated/numpy.multiply.html)
    """
    return np.multiply(a, t)

tentruediv(t, a)

Return the true quotient.

\[ \frac{t}{a} \]
Notes

Why called truediv instead of div?

  • div would be more appropriate for an absolute clean mathematical implementation, that doesn't care about the language used. But the package might be used for pure integers/integer arithmetic, so both, truediv and floordiv operations have to be provided, and none should be privileged over the other by getting the universal div name.
  • truediv/floordiv is unambiguous, like Python operators.
See also
Source code in vector\multilinear\vector_space.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def tentruediv(t, a):
    r"""Return the true quotient.

    $$
        \frac{t}{a}
    $$

    Notes
    -----
    Why called `truediv` instead of `div`?

    - `div` would be more appropriate for an absolute clean mathematical
    implementation, that doesn't care about the language used. But the package
    might be used for pure integers/integer arithmetic, so both, `truediv`
    and `floordiv` operations have to be provided, and none should be
    privileged over the other by getting the universal `div` name.
    - `truediv`/`floordiv` is unambiguous, like Python `operator`s.

    See also
    --------
    - wraps: [`numpy.divide`](https://numpy.org/doc/stable/reference/generated/numpy.divide.html)
    """
    return np.divide(t, a)

tenfloordiv(t, a)

Return the floor quotient.

\[ \left\lfloor\frac{t}{a}\right\rfloor \]
See also
Source code in vector\multilinear\vector_space.py
163
164
165
166
167
168
169
170
171
172
173
174
def tenfloordiv(t, a):
    r"""Return the floor quotient.

    $$
        \left\lfloor\frac{t}{a}\right\rfloor
    $$

    See also
    --------
    - wraps: [`numpy.floor_divide`](https://numpy.org/doc/stable/reference/generated/numpy.floor_divide.html)
    """
    return np.floor_divide(t, a)

tenmod(t, a)

Return the remainder.

\[ t \bmod a \]
See also
Source code in vector\multilinear\vector_space.py
176
177
178
179
180
181
182
183
184
185
186
187
def tenmod(t, a):
    r"""Return the remainder.

    $$
        t \bmod a
    $$

    See also
    --------
    - wraps: [`numpy.mod`](https://numpy.org/doc/stable/reference/generated/numpy.mod.html)
    """
    return np.mod(t, a)

tendivmod(t, a)

Return the floor quotient and remainder

\[ \left\lfloor\frac{t}{a}\right\rfloor, \ \left(t \bmod a\right) \]
See also
Source code in vector\multilinear\vector_space.py
189
190
191
192
193
194
195
196
197
198
199
200
def tendivmod(t, a):
    r"""Return the floor quotient and remainder

    $$
        \left\lfloor\frac{t}{a}\right\rfloor, \ \left(t \bmod a\right)
    $$

    See also
    --------
    - wraps: [`numpy.divmod`](https://numpy.org/doc/stable/reference/generated/numpy.divmod.html)
    """
    return np.divmod(t, a)

elementwise

tenhadamard(*ts)

Return the elementwise product.

\[ \left((t_0)_i \cdot (t_1)_i \cdot \cdots\right)_i \]
Source code in vector\multilinear\elementwise.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def tenhadamard(*ts):
    r"""Return the elementwise product.

    $$
        \left((t_0)_i \cdot (t_1)_i \cdot \cdots\right)_i
    $$
    """
    ts = tuple(map(np.asarray, ts))
    shape = tuple(map(min, zip(*(t.shape for t in ts))))
    r = np.zeros(shape, dtype=np.result_type(*ts) if ts else object)
    slc = tuple(map(slice, shape)) + (...,)
    if ts:
        r = ts[0][*slc]
    for t in ts[1:]:
        r *= t[*slc]
    return r

tenhadamardtruediv(s, t)

Return the elementwise true quotient.

\[ \left(\frac{s_i}{t_i}\right)_i \]
Source code in vector\multilinear\elementwise.py
29
30
31
32
33
34
35
36
37
def tenhadamardtruediv(s, t):
    r"""Return the elementwise true quotient.

    $$
        \left(\frac{s_i}{t_i}\right)_i
    $$
    """
    s, t = np.asarray(s), np.asarray(t)
    return np.divide(s, t[tuple(map(slice, s.shape)), ...])

tenhadamardfloordiv(s, t)

Return the elementwise floor quotient.

\[ \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i \]
Source code in vector\multilinear\elementwise.py
39
40
41
42
43
44
45
46
47
def tenhadamardfloordiv(s, t):
    r"""Return the elementwise floor quotient.

    $$
        \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i
    $$
    """
    s, t = np.asarray(s), np.asarray(t)
    return np.floor_divide(s, t[tuple(map(slice, s.shape)), ...])

tenhadamardmod(s, t)

Return the elementwise remainder.

\[ \left(s_i \bmod t_i\right)_i \]
Source code in vector\multilinear\elementwise.py
49
50
51
52
53
54
55
56
57
def tenhadamardmod(s, t):
    r"""Return the elementwise remainder.

    $$
        \left(s_i \bmod t_i\right)_i
    $$
    """
    s, t = np.asarray(s), np.asarray(t)
    return np.mod(s, t[tuple(map(slice, s.shape)), ...])

tenhadamarddivmod(s, t)

Return the elementwise floor quotient and remainder.

\[ \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i, \ \left(s_i \bmod t_i\right)_i \]
Source code in vector\multilinear\elementwise.py
59
60
61
62
63
64
65
66
67
def tenhadamarddivmod(s, t):
    r"""Return the elementwise floor quotient and remainder.

    $$
        \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i, \ \left(s_i \bmod t_i\right)_i
    $$
    """
    s, t = np.asarray(s), np.asarray(t)
    return np.divmod(s, t[tuple(map(slice, s.shape)), ...])

tenhadamardmin(*ts, key=None)

Return the elementwise minimum.

\[ \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i \]
Source code in vector\multilinear\elementwise.py
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
def tenhadamardmin(*ts, key=None):
    r"""Return the elementwise minimum.

    $$
        \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i
    $$
    """
    ts = tuple(map(np.asarray, ts))
    shape = vechadamardmax(*(t.shape for t in ts))
    r = np.empty(shape, dtype=np.result_type(*ts) if ts else object)
    filled = np.zeros(shape, dtype=bool)
    if key is None:
        for t in ts:
            slc = tuple(map(slice, t.shape)) + (0,)*(r.ndim-t.ndim)
            r[slc] = np.where(filled[slc], np.minimum(r[slc], t), t)
            filled[slc] = True
    else:
        kcache = np.empty(shape, dtype=object)
        for t in ts:
            slc = tuple(map(slice, t.shape)) + (0,)*(r.ndim-t.ndim)
            it = np.nditer(
                [r[slc], t, filled[slc], kcache[slc]],
                flags = ['refs_ok'],
                op_flags = [['readwrite'], ['readonly'], ['readonly'], ['readwrite']]
            )
            for r_cell, t_cell, is_filled, k_cell in it:
                t_val = t_cell.item()
                t_key = key(t_val)
                if not bool(is_filled):
                    r_cell[...] = t_val
                    k_cell[...] = t_key
                else:
                    if t_key < k_cell.item():
                        r_cell[...] = t_val
                        k_cell[...] = t_key
            filled[slc] = True
    r[~filled] = 0
    return r

tenhadamardmax(*ts, key=None)

Return the elementwise maximum.

\[ \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i \]
Source code in vector\multilinear\elementwise.py
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def tenhadamardmax(*ts, key=None):
    r"""Return the elementwise maximum.

    $$
        \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i
    $$
    """
    ts = tuple(map(np.asarray, ts))
    shape = vechadamardmax(*(t.shape for t in ts))
    r = np.empty(shape, dtype=np.result_type(*ts) if ts else object)
    filled = np.zeros(shape, dtype=bool)
    if key is None:
        for t in ts:
            slc = tuple(map(slice, t.shape)) + (0,)*(r.ndim-t.ndim)
            r[slc] = np.where(filled[slc], np.maximum(r[slc], t), t)
            filled[slc] = True
    else:
        kcache = np.empty(shape, dtype=object)
        for t in ts:
            slc = tuple(map(slice, t.shape)) + (0,)*(r.ndim-t.ndim)
            it = np.nditer(
                [r[slc], t, filled[slc], kcache[slc]],
                flags = ['refs_ok'],
                op_flags = [['readwrite'], ['readonly'], ['readonly'], ['readwrite']]
            )
            for r_cell, t_cell, is_filled, k_cell in it:
                t_val = t_cell.item()
                t_key = key(t_val)
                if not bool(is_filled):
                    r_cell[...] = t_val
                    k_cell[...] = t_key
                else:
                    if t_key > k_cell.item():
                        r_cell[...] = t_val
                        k_cell[...] = t_key
            filled[slc] = True
    r[~filled] = 0
    return r