Skip to content

Multilinear Sparse

Sparse tensor arithmetic.

Prefixed by tens... (tensor - sparse).

Handle sparse multiaxis tensors, that for example represent multivariate polynomials.

Sparse tensors are accepted and returned as dicts whos keys are trimmed (no trailing zeros), non-negative int tuples.

creation

tenszero = {}

Zero tensor.

\[ 0 \]

An empty dictionary: {}.

tensbasis(i, c=1)

Return a basis tensor.

\[ ce_i \]

Returns a dictionary with a single element i:c.

Source code in vector\multilinear_sparse\creation.py
21
22
23
24
25
26
27
28
29
30
def tensbasis(i, c=1):
    """Return a basis tensor.

    $$
        ce_i
    $$

    Returns a dictionary with a single element `i:c`.
    """
    return {vectrim(i):c}

tensrand(*d)

Return a random tensor of uniform sampled float coefficients.

\[ t \sim \mathcal{U}^d([0, 1[) \]

The coefficients are sampled from a uniform distribution in [0, 1[.

Notes

Naming like numpy.random, because seems more concise (not random & gauss as in the stdlib).

Source code in vector\multilinear_sparse\creation.py
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def tensrand(*d):
    r"""Return a random tensor of uniform sampled `float` coefficients.

    $$
        t \sim \mathcal{U}^d([0, 1[)
    $$

    The coefficients are sampled from a uniform distribution in `[0, 1[`.

    Notes
    -----
    Naming like [`numpy.random`](https://numpy.org/doc/stable/reference/random/legacy.html),
    because seems more concise (not `random` & `gauss` as in the stdlib).
    """
    return {vectrim(i):random() for i in ndindex(*d)}

tensrandn(*d, mu=0, sigma=1)

Return a random tensor of normal sampled float coefficients.

\[ t \sim \mathcal{N}^d(\mu, \sigma) \]

The coefficients are sampled from a normal distribution.

Notes

Naming like numpy.random, because seems more concise (not random & gauss as in the stdlib).

Source code in vector\multilinear_sparse\creation.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def tensrandn(*d, mu=0, sigma=1):
    r"""Return a random tensor of normal sampled `float` coefficients.

    $$
        t \sim \mathcal{N}^d(\mu, \sigma)
    $$

    The coefficients are sampled from a normal distribution.

    Notes
    -----
    Naming like [`numpy.random`](https://numpy.org/doc/stable/reference/random/legacy.html),
    because seems more concise (not `random` & `gauss` as in the stdlib).
    """
    return {vectrim(i):gauss(mu, sigma) for i in ndindex(*d)}

conversion

tenstod(t, zero=0)

Return a sparse tensor (dict) as a dense tensor (numpy.ndarray).

Source code in vector\multilinear_sparse\conversion.py
11
12
13
14
15
16
def tenstod(t, zero=0):
    """Return a sparse tensor (`dict`) as a dense tensor (`numpy.ndarray`)."""
    r = np.full(tensdim(t), zero, dtype=np.array(t.values).dtype)
    for i, ti in t.items():
        r[i] = ti
    return r

tendtos(t)

Return a dense tensor (numpy.ndarray) as a sparse tensor (dict).

The resulting dict is not trimmed.

Source code in vector\multilinear_sparse\conversion.py
18
19
20
21
22
23
def tendtos(t):
    """Return a dense tensor (`numpy.ndarray`) as a sparse tensor (`dict`).

    The resulting `dict` is not [trimmed][vector.multilinear_sparse.tenstrim].
    """
    return {vectrim(i):ti for i, ti in np.ndenumerate(t)}

utility

tensrank(t)

Return the rank.

\[ \text{rank}\,t \]
Source code in vector\multilinear_sparse\utility.py
13
14
15
16
17
18
19
20
def tensrank(t):
    r"""Return the rank.

    $$
        \text{rank}\,t
    $$
    """
    return max(map(len, t.keys()), default=0)

tensdim(t)

Return the dimensionalities.

\[ \dim t \]
Source code in vector\multilinear_sparse\utility.py
22
23
24
25
26
27
28
29
def tensdim(t):
    r"""Return the dimensionalities.

    $$
        \dim t
    $$
    """
    return tuple(si+1 for si in vechadamardmax(*t.keys()))

tenseq(s, t)

Return whether two tensors are equal.

\[ s\overset{?}{=}t \]
Source code in vector\multilinear_sparse\utility.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def tenseq(s, t):
    r"""Return whether two tensors are equal.

    $$
        s\overset{?}{=}t
    $$
    """
    for i in s.keys()&t.keys():
        if i not in t:
            if bool(s[i]):
                return False
        elif i not in s:
            if bool(t[i]):
                return False
        else:
            if s[i] != t[i]:
                return False
    return True

tenstrim(t, tol=None)

Remove all near zero (abs(t_i)<=tol) coefficients.

tol may also be None, then all coefficients that evaluate to False are trimmed.

Notes
  • Cutting of elements that are abs(t_i)<=tol instead of abs(t_i)<tol to allow cutting of elements that are exactly zero by trim(t, 0) instead of trim(t, sys.float_info.min).
Source code in vector\multilinear_sparse\utility.py
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def tenstrim(t, tol=None):
    """Remove all near zero (`abs(t_i)<=tol`) coefficients.

    `tol` may also be `None`,
    then all coefficients that evaluate to `False` are trimmed.

    Notes
    -----
    - Cutting of elements that are `abs(t_i)<=tol` instead of `abs(t_i)<tol` to
    allow cutting of elements that are exactly zero by `trim(t, 0)` instead
    of `trim(t, sys.float_info.min)`.
    """
    if tol is None:
        return {i:ti for i, ti in t.items() if ti}
    else:
        return {i:ti for i, ti in t.items() if abs(ti)>tol}

tensitrim(t, tol=None)

Remove all near zero (abs(t_i)<=tol) coefficients.

tol may also be None, then all coefficients that evaluate to False are trimmed.

Notes
  • Cutting of elements that are abs(t_i)<=tol instead of abs(t_i)<tol to allow cutting of elements that are exactly zero by trim(t, 0) instead of trim(t, sys.float_info.min).
Source code in vector\multilinear_sparse\utility.py
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
def tensitrim(t, tol=None):
    """Remove all near zero (`abs(t_i)<=tol`) coefficients.

    `tol` may also be `None`,
    then all coefficients that evaluate to `False` are trimmed.

    Notes
    -----
    - Cutting of elements that are `abs(t_i)<=tol` instead of `abs(t_i)<tol` to
    allow cutting of elements that are exactly zero by `trim(t, 0)` instead
    of `trim(t, sys.float_info.min)`.
    """
    if tol is None:
        indices_to_del = tuple(i for i, ti in t.items() if not ti)
    else:
        indices_to_del = tuple(i for i, ti in t.items() if abs(ti)<=tol)

    for i in indices_to_del:
        del t[i]
    return t

tensrshift(t, n)

Shift coefficients up.

Source code in vector\multilinear_sparse\utility.py
88
89
90
91
92
def tensrshift(t, n):
    """Shift coefficients up."""
    #raw vector addition of indices prolly faster than vecadd
    return {tuple(starmap(add, zip_longest(i, n, fillvalue=0))):ti
            for i, ti in t.items()}

tenslshift(t, n)

Shift coefficients down.

Source code in vector\multilinear_sparse\utility.py
 94
 95
 96
 97
 98
 99
100
101
def tenslshift(t, n):
    """Shift coefficients down."""
    r = {}
    for i, ti in t.items():
        i = tuple(starmap(sub, zip_longest(i, n, fillvalue=0)))
        if all(ii>=0 for ii in i):
            r[i] = ti
    return r

hilbert_space

tensconj(t)

Return the complex conjugate.

\[ t^* \]

Tries to call a method conjugate on each element. If not found, simply keeps the element as is.

Source code in vector\multilinear_sparse\hilbert_space.py
 9
10
11
12
13
14
15
16
17
18
19
def tensconj(t):
    """Return the complex conjugate.

    $$
        t^*
    $$

    Tries to call a method `conjugate` on each element.
    If not found, simply keeps the element as is.
    """
    return {i:try_conjugate(ti) for i, ti in t.items()}

tensiconj(t)

Complex conjugate.

\[ t = t^* \]

Tries to call a method conjugate on each element. If not found, simply keeps the element as is.

Source code in vector\multilinear_sparse\hilbert_space.py
21
22
23
24
25
26
27
28
29
30
31
32
33
def tensiconj(t):
    """Complex conjugate.

    $$
        t = t^*
    $$

    Tries to call a method `conjugate` on each element.
    If not found, simply keeps the element as is.
    """
    for i, ti in t.values():
        t[i] = try_conjugate(ti)
    return t

vector_space

tenspos(t)

Return the identity.

\[ +t \]
Source code in vector\multilinear_sparse\vector_space.py
15
16
17
18
19
20
21
22
def tenspos(t):
    """Return the identity.

    $$
        +t
    $$
    """
    return {i:+ti for i, ti in t.items()}

tensipos(t)

Apply unary plus.

\[ t = +t \]
Source code in vector\multilinear_sparse\vector_space.py
24
25
26
27
28
29
30
31
32
33
def tensipos(t):
    """Apply unary plus.

    $$
        t = +t
    $$
    """
    for i, ti in t.items():
        t[i] = +ti
    return t

tensneg(t)

Return the negation.

\[ -t \]
Source code in vector\multilinear_sparse\vector_space.py
35
36
37
38
39
40
41
42
def tensneg(t):
    """Return the negation.

    $$
        -t
    $$
    """
    return {i:-ti for i, ti in t.items()}

tensineg(t)

Negate.

\[ t = -t \]
Source code in vector\multilinear_sparse\vector_space.py
44
45
46
47
48
49
50
51
52
53
def tensineg(t):
    """Negate.

    $$
        t = -t
    $$
    """
    for i, ti in t.items():
        t[i] = -ti
    return t

tensadd(*ts)

Return the sum.

\[ t_0 + t_1 + \cdots \]
See also
  • for sum on a single coefficient: tensaddc
Source code in vector\multilinear_sparse\vector_space.py
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def tensadd(*ts):
    r"""Return the sum.

    $$
        t_0 + t_1 + \cdots
    $$

    See also
    --------
    - for sum on a single coefficient: [`tensaddc`][vector.multilinear_sparse.vector_space.tensaddc]
    """
    r = dict(ts[0]) if ts else {}
    for t in ts[1:]:
        for i, ti in t.items():
            if i in r:
                r[i] += ti
            else:
                r[i] = +ti
    return r

tensiadd(s, *ts)

Add.

\[ s += t_0 + t_1 + \cdots \]
See also
Source code in vector\multilinear_sparse\vector_space.py
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def tensiadd(s, *ts):
    r"""Add.

    $$
        s += t_0 + t_1 + \cdots
    $$

    See also
    --------
    - for sum on a single coefficient: [`tensiaddc`][vector.multilinear_sparse.vector_space.tensiaddc]
    """
    for t in ts:
        for i, ti in t.items():
            if i in s:
                s[i] += ti
            else:
                s[i] = +ti
    return s

tensaddc(t, c, i=())

Return the sum with a basis tensor.

\[ t + ce_i \]
See also
  • for sum on more coefficients: tensadd
Source code in vector\multilinear_sparse\vector_space.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def tensaddc(t, c, i=()):
    """Return the sum with a basis tensor.

    $$
        t + ce_i
    $$

    See also
    --------
    - for sum on more coefficients: [`tensadd`][vector.multilinear_sparse.vector_space.tensadd]
    """
    r = dict(t)
    if i in r:
        r[i] += c
    else:
        r[i] = +c
    return r

tensiaddc(t, c, i=())

Add a basis tensor.

\[ t += ce_i \]
See also
  • for sum on more coefficients: tensiadd
Source code in vector\multilinear_sparse\vector_space.py
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
def tensiaddc(t, c, i=()):
    """Add a basis tensor.

    $$
        t += ce_i
    $$

    See also
    --------
    - for sum on more coefficients: [`tensiadd`][vector.multilinear_sparse.vector_space.tensiadd]
    """
    if i in t:
        t[i] += c
    else:
        t[i] = +c
    return t

tenssub(s, t)

Return the difference.

\[ s - t \]
See also
  • for difference on a single coefficient: tenssubc
Source code in vector\multilinear_sparse\vector_space.py
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
def tenssub(s, t):
    """Return the difference.

    $$
        s - t
    $$

    See also
    --------
    - for difference on a single coefficient: [`tenssubc`][vector.multilinear_sparse.vector_space.tenssubc]
    """
    r = dict(s)
    for i, ti in t.items():
        if i in r:
            r[i] -= ti
        else:
            r[i] = -ti
    return r

tensisub(s, t)

Subtract.

\[ s -= t \]
See also
  • for difference on a single coefficient: tensisubc
Source code in vector\multilinear_sparse\vector_space.py
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
def tensisub(s, t):
    """Subtract.

    $$
        s -= t
    $$

    See also
    --------
    - for difference on a single coefficient: [`tensisubc`][vector.multilinear_sparse.vector_space.tensisubc]
    """
    for i, ti in t.items():
        if i in s:
            s[i] -= ti
        else:
            s[i] = -ti
    return s

tenssubc(t, c, i=())

Return the difference with a basis tensor.

\[ t - ce_i \]
See also
  • for difference on more coefficients: tenssub
Source code in vector\multilinear_sparse\vector_space.py
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
def tenssubc(t, c, i=()):
    """Return the difference with a basis tensor.

    $$
        t - ce_i
    $$

    See also
    --------
    - for difference on more coefficients: [`tenssub`][vector.multilinear_sparse.vector_space.tenssub]
    """
    r = dict(t)
    if i in r:
        r[i] -= c
    else:
        r[i] = -c
    return r

tensisubc(t, c, i=())

Subtract a basis tensor.

\[ t -= ce_i \]
See also
  • for difference on more coefficients: tensisub
Source code in vector\multilinear_sparse\vector_space.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def tensisubc(t, c, i=()):
    """Subtract a basis tensor.

    $$
        t -= ce_i
    $$

    See also
    --------
    - for difference on more coefficients: [`tensisub`][vector.multilinear_sparse.vector_space.tensisub]
    """
    if i in t:
        t[i] -= c
    else:
        t[i] = -c
    return t

tensmul(t, a)

Return the product.

\[ ta \]
Source code in vector\multilinear_sparse\vector_space.py
201
202
203
204
205
206
207
208
def tensmul(t, a):
    """Return the product.

    $$
        ta
    $$
    """
    return {i:ti*a for i, ti in t.items()}

tensrmul(a, t)

Return the product.

\[ at \]
Source code in vector\multilinear_sparse\vector_space.py
210
211
212
213
214
215
216
217
def tensrmul(a, t):
    """Return the product.

    $$
        at
    $$
    """
    return {i:a*ti for i, ti in t.items()}

tensimul(t, a)

Multiply.

\[ t \cdot= a \]
Source code in vector\multilinear_sparse\vector_space.py
219
220
221
222
223
224
225
226
227
228
def tensimul(t, a):
    r"""Multiply.

    $$
        t \cdot= a
    $$
    """
    for i in t:
        t[i] *= a
    return t

tenstruediv(t, a)

Return the true quotient.

\[ \frac{t}{a} \]
Source code in vector\multilinear_sparse\vector_space.py
230
231
232
233
234
235
236
237
def tenstruediv(t, a):
    r"""Return the true quotient.

    $$
        \frac{t}{a}
    $$
    """
    return {i:ti/a for i, ti in t.items()}

tensitruediv(t, a)

True divide.

\[ t /= a \]
Notes

Why called truediv instead of div?

  • div would be more appropriate for an absolutely clean mathematical implementation, that doesn't care about the language used. But the package might be used for pure integers/integer arithmetic, so both, truediv and floordiv operations have to be provided, and none should be privileged over the other by getting the universal div name.
  • truediv/floordiv is unambiguous, like Python operators.
Source code in vector\multilinear_sparse\vector_space.py
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
def tensitruediv(t, a):
    """True divide.

    $$
        t /= a
    $$

    Notes
    -----
    Why called `truediv` instead of `div`?

    - `div` would be more appropriate for an absolutely clean mathematical
    implementation, that doesn't care about the language used. But the package
    might be used for pure integers/integer arithmetic, so both, `truediv`
    and `floordiv` operations have to be provided, and none should be
    privileged over the other by getting the universal `div` name.
    - `truediv`/`floordiv` is unambiguous, like Python `operator`s.
    """
    for i in t:
        t[i] /= a
    return t

tensfloordiv(t, a)

Return the floor quotient.

\[ \left\lfloor\frac{t}{a}\right\rfloor \]
Source code in vector\multilinear_sparse\vector_space.py
261
262
263
264
265
266
267
268
def tensfloordiv(t, a):
    r"""Return the floor quotient.

    $$
        \left\lfloor\frac{t}{a}\right\rfloor
    $$
    """
    return {i:ti//a for i, ti in t.items()}

tensifloordiv(t, a)

Floor divide.

\[ t //= a \]
Source code in vector\multilinear_sparse\vector_space.py
270
271
272
273
274
275
276
277
278
279
def tensifloordiv(t, a):
    """Floor divide.

    $$
        t //= a
    $$
    """
    for i in t:
        t[i] //= a
    return t

tensmod(t, a)

Return the remainder.

\[ t \bmod a \]
Source code in vector\multilinear_sparse\vector_space.py
281
282
283
284
285
286
287
288
def tensmod(t, a):
    r"""Return the remainder.

    $$
        t \bmod a
    $$
    """
    return {i:ti%a for i, ti in t.items()}

tensimod(t, a)

Mod.

\[ t \%= a \]
Source code in vector\multilinear_sparse\vector_space.py
290
291
292
293
294
295
296
297
298
299
def tensimod(t, a):
    r"""Mod.

    $$
        t \%= a
    $$
    """
    for i in t:
        t[i] %= a
    return t

tensdivmod(t, a)

Return the floor quotient and remainder.

\[ \left\lfloor\frac{t}{a}\right\rfloor, \ \left(t \bmod a\right) \]
Source code in vector\multilinear_sparse\vector_space.py
301
302
303
304
305
306
307
308
309
310
311
def tensdivmod(t, a):
    r"""Return the floor quotient and remainder.

    $$
        \left\lfloor\frac{t}{a}\right\rfloor, \ \left(t \bmod a\right)
    $$
    """
    q, r = {}, {}
    for i, ti in t.items():
        q[i], r[i] = divmod(ti, a)
    return q, r

elementwise

tenshadamard(*ts)

Return the elementwise product.

\[ \left((t_0)_i \cdot (t_1)_i \cdot \cdots\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
11
12
13
14
15
16
17
18
19
20
21
22
23
def tenshadamard(*ts):
    r"""Return the elementwise product.

    $$
        \left((t_0)_i \cdot (t_1)_i \cdot \cdots\right)_i
    $$
    """
    r = {}
    if not ts:
        return r
    for i in set(ts[0].keys()).intersection(*(t.keys() for t in ts[1:])):
        r[i] = prod_default((t[i] for t in ts), initial=MISSING, default=MISSING)
    return r

tenshadamardtruediv(s, t)

Return the elementwise true quotient.

\[ \left(\frac{s_i}{t_i}\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
25
26
27
28
29
30
31
32
def tenshadamardtruediv(s, t):
    r"""Return the elementwise true quotient.

    $$
        \left(\frac{s_i}{t_i}\right)_i
    $$
    """
    return {i:si/t[i] for i, si in s.items()}

tenshadamardfloordiv(s, t)

Return the elementwise floor quotient.

\[ \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
34
35
36
37
38
39
40
41
def tenshadamardfloordiv(s, t):
    r"""Return the elementwise floor quotient.

    $$
        \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i
    $$
    """
    return {i:si//t[i] for i, si in s.items()}

tenshadamardmod(s, t)

Return the elementwise remainder.

\[ \left(s_i \bmod t_i\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
43
44
45
46
47
48
49
50
def tenshadamardmod(s, t):
    r"""Return the elementwise remainder.

    $$
        \left(s_i \bmod t_i\right)_i
    $$
    """
    return {i:si%t[i] for i, si in s.items()}

tenshadamarddivmod(s, t)

Return the elementwise floor quotient and remainder.

\[ \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i, \ \left(s_i \bmod t_i\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
52
53
54
55
56
57
58
59
60
61
62
63
def tenshadamarddivmod(s, t):
    r"""Return the elementwise floor quotient and remainder.

    $$
        \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i, \ \left(s_i \bmod t_i\right)_i
    $$
    """
    q, r = {}, {}
    for i, ti in t.items():
        si = s[i]
        q[i], r[i] = divmod(ti, si)
    return q, r

tenshadamardmin(*ts, key=None)

Return the elementwise minimum.

\[ \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
65
66
67
68
69
70
71
72
73
74
75
76
77
def tenshadamardmin(*ts, key=None):
    r"""Return the elementwise minimum.

    $$
        \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i
    $$
    """
    r = {}
    if not ts:
        return r
    for i in set(ts[0].keys()).union(*(t.keys() for t in ts[1:])):
        r[i] = min(t[i] for t in ts)
    return r

tenshadamardmax(*ts, key=None)

Return the elementwise maximum.

\[ \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
79
80
81
82
83
84
85
86
87
88
89
90
91
def tenshadamardmax(*ts, key=None):
    r"""Return the elementwise maximum.

    $$
        \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i
    $$
    """
    r = {}
    if not ts:
        return r
    for i in set(ts[0].keys()).union(*(t.keys() for t in ts[1:])):
        r[i] = max(t[i] for t in ts)
    return r