Skip to content

Multilinear Sparse

Sparse tensor arithmetic.

Prefixed by tens... (tensor - sparse).

Handle sparse multiaxis tensors, that for example represent multivariate polynomials.

Sparse tensors are accepted and returned as dicts whos keys are trimmed (no trailing zeros), non-negative int tuples.

creation

tenszero: dict[Never, Never] = {}

Zero tensor.

\[ 0 \]

An empty dictionary: {}.

tensbasis(i: tuple[int, ...], c: Any = 1) -> dict[tuple[int, ...], Any]

Return a basis tensor.

\[ ce_i \]

Returns a dictionary with a single element i:c.

Source code in vector\multilinear_sparse\creation.py
22
23
24
25
26
27
28
29
30
31
def tensbasis(i:tuple[int,...], c:Any=1) -> dict[tuple[int,...],Any]:
    """Return a basis tensor.

    $$
        ce_i
    $$

    Returns a dictionary with a single element `i:c`.
    """
    return {vectrim(i):c}

tensrand(*d: int) -> dict[tuple[int, ...], float]

Return a random tensor of uniformly sampled float coefficients.

\[ t \sim \mathcal{U}^d([0, 1[) \]

The coefficients are sampled from a uniform distribution in [0, 1[.

Notes

Naming like numpy.random, because seems more concise (not random & gauss as in the stdlib).

Source code in vector\multilinear_sparse\creation.py
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
def tensrand(*d:int) -> dict[tuple[int,...],float]:
    r"""Return a random tensor of uniformly sampled `float` coefficients.

    $$
        t \sim \mathcal{U}^d([0, 1[)
    $$

    The coefficients are sampled from a uniform distribution in `[0, 1[`.

    Notes
    -----
    Naming like [`numpy.random`](https://numpy.org/doc/stable/reference/random/legacy.html),
    because seems more concise (not `random` & `gauss` as in the stdlib).
    """
    return {vectrim(i):random() for i in ndindex(*d)}

tensrandn(*d: int, mu: float = 0, sigma: float = 1) -> dict[tuple[int, ...], float]

Return a random tensor of normally sampled float coefficients.

\[ t \sim \mathcal{N}^d(\mu, \sigma) \]

The coefficients are sampled from a normal distribution.

Notes

Naming like numpy.random, because seems more concise (not random & gauss as in the stdlib).

Source code in vector\multilinear_sparse\creation.py
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def tensrandn(*d:int, mu:float=0, sigma:float=1) -> dict[tuple[int,...],float]:
    r"""Return a random tensor of normally sampled `float` coefficients.

    $$
        t \sim \mathcal{N}^d(\mu, \sigma)
    $$

    The coefficients are sampled from a normal distribution.

    Notes
    -----
    Naming like [`numpy.random`](https://numpy.org/doc/stable/reference/random/legacy.html),
    because seems more concise (not `random` & `gauss` as in the stdlib).
    """
    return {vectrim(i):gauss(mu, sigma) for i in ndindex(*d)}

conversion

tenstod(t: Mapping[tuple[int, ...], Any], zero: Any = 0) -> np.ndarray

Return a sparse tensor (dict) as a dense tensor (numpy.ndarray).

Source code in vector\multilinear_sparse\conversion.py
13
14
15
16
17
18
def tenstod(t:Mapping[tuple[int,...],Any], zero:Any=0) -> np.ndarray:
    """Return a sparse tensor (`dict`) as a dense tensor (`numpy.ndarray`)."""
    r = np.full(tensdim(t), zero, dtype=np.array(t.values).dtype)
    for i, ti in t.items():
        r[i] = ti
    return r

tendtos(t: np.ndarray) -> dict[tuple[int, ...], Any]

Return a dense tensor (numpy.ndarray) as a sparse tensor (dict).

The resulting dict is not trimmed.

Source code in vector\multilinear_sparse\conversion.py
20
21
22
23
24
25
def tendtos(t:np.ndarray) -> dict[tuple[int,...],Any]:
    """Return a dense tensor (`numpy.ndarray`) as a sparse tensor (`dict`).

    The resulting `dict` is not [trimmed][vector.multilinear_sparse.tenstrim].
    """
    return {vectrim(i):ti for i, ti in np.ndenumerate(t)}

utility

tensrank(t: Mapping[tuple[int, ...], Any]) -> int

Return the rank.

\[ \text{rank}\,t \]
Source code in vector\multilinear_sparse\utility.py
15
16
17
18
19
20
21
22
def tensrank(t:Mapping[tuple[int,...],Any]) -> int:
    r"""Return the rank.

    $$
        \text{rank}\,t
    $$
    """
    return max(map(len, t.keys()), default=0)

tensdim(t: Mapping[tuple[int, ...], Any]) -> tuple[int, ...]

Return the dimensionalities.

\[ \dim t \]
Source code in vector\multilinear_sparse\utility.py
24
25
26
27
28
29
30
31
def tensdim(t:Mapping[tuple[int,...],Any]) -> tuple[int,...]:
    r"""Return the dimensionalities.

    $$
        \dim t
    $$
    """
    return tuple(si+1 for si in vechadamardmax(*t.keys()))

tenseq(s: Mapping[tuple[int, ...], Any], t: Mapping[tuple[int, ...], Any]) -> bool

Return whether two tensors are equal.

\[ s\overset{?}{=}t \]
Source code in vector\multilinear_sparse\utility.py
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def tenseq(s:Mapping[tuple[int,...],Any], t:Mapping[tuple[int,...],Any]) -> bool:
    r"""Return whether two tensors are equal.

    $$
        s\overset{?}{=}t
    $$
    """
    for i in s.keys()&t.keys():
        if i not in t:
            if bool(s[i]):
                return False
        elif i not in s:
            if bool(t[i]):
                return False
        else:
            if s[i] != t[i]:
                return False
    return True

tenstrim(t: Mapping[tuple[int, ...], Any], tol: Any | None = None) -> dict[tuple[int, ...], Any]

Remove all near zero (abs(t_i)<=tol) coefficients.

tol may also be None, then all coefficients that evaluate to False are trimmed.

Notes
  • Cutting of elements that are abs(t_i)<=tol instead of abs(t_i)<tol to allow cutting of elements that are exactly zero by trim(t, 0) instead of trim(t, sys.float_info.min).
Source code in vector\multilinear_sparse\utility.py
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
def tenstrim(t:Mapping[tuple[int,...],Any], tol:Any|None=None) -> dict[tuple[int,...],Any]:
    """Remove all near zero (`abs(t_i)<=tol`) coefficients.

    `tol` may also be `None`,
    then all coefficients that evaluate to `False` are trimmed.

    Notes
    -----
    - Cutting of elements that are `abs(t_i)<=tol` instead of `abs(t_i)<tol` to
    allow cutting of elements that are exactly zero by `trim(t, 0)` instead
    of `trim(t, sys.float_info.min)`.
    """
    if tol is None:
        return {i:ti for i, ti in t.items() if ti}
    else:
        return {i:ti for i, ti in t.items() if abs(ti)>tol}

tensitrim(t: MutableMapping[tuple[int, ...], Any], tol: Any | None = None) -> MutableMapping[tuple[int, ...], Any]

Remove all near zero (abs(t_i)<=tol) coefficients.

tol may also be None, then all coefficients that evaluate to False are trimmed.

Notes
  • Cutting of elements that are abs(t_i)<=tol instead of abs(t_i)<tol to allow cutting of elements that are exactly zero by trim(t, 0) instead of trim(t, sys.float_info.min).
Source code in vector\multilinear_sparse\utility.py
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def tensitrim(t:MutableMapping[tuple[int,...],Any], tol:Any|None=None) -> MutableMapping[tuple[int,...],Any]:
    """Remove all near zero (`abs(t_i)<=tol`) coefficients.

    `tol` may also be `None`,
    then all coefficients that evaluate to `False` are trimmed.

    Notes
    -----
    - Cutting of elements that are `abs(t_i)<=tol` instead of `abs(t_i)<tol` to
    allow cutting of elements that are exactly zero by `trim(t, 0)` instead
    of `trim(t, sys.float_info.min)`.
    """
    if tol is None:
        indices_to_del = tuple(i for i, ti in t.items() if not ti)
    else:
        indices_to_del = tuple(i for i, ti in t.items() if abs(ti)<=tol)

    for i in indices_to_del:
        del t[i]
    return t

tensrshift(t: Mapping[tuple[int, ...], Any], n: tuple[int, ...]) -> dict[tuple[int, ...], Any]

Shift coefficients up.

Source code in vector\multilinear_sparse\utility.py
90
91
92
93
94
def tensrshift(t:Mapping[tuple[int,...],Any], n:tuple[int,...]) -> dict[tuple[int,...],Any]:
    """Shift coefficients up."""
    #raw vector addition of indices prolly faster than vecadd
    return {tuple(starmap(add, zip_longest(i, n, fillvalue=0))):ti
            for i, ti in t.items()}

tenslshift(t: Mapping[tuple[int, ...], Any], n: tuple[int, ...]) -> dict[tuple[int, ...], Any]

Shift coefficients down.

Source code in vector\multilinear_sparse\utility.py
 96
 97
 98
 99
100
101
102
103
def tenslshift(t:Mapping[tuple[int,...],Any], n:tuple[int,...]) -> dict[tuple[int,...],Any]:
    """Shift coefficients down."""
    r = {}
    for i, ti in t.items():
        i = tuple(starmap(sub, zip_longest(i, n, fillvalue=0)))
        if all(ii>=0 for ii in i):
            r[i] = ti
    return r

hilbertspace

tensconj(t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the complex conjugate.

\[ t^* \]

Tries to call a method conjugate on each element. If not found, simply keeps the element as is.

Source code in vector\multilinear_sparse\hilbertspace.py
11
12
13
14
15
16
17
18
19
20
21
def tensconj(t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    """Return the complex conjugate.

    $$
        t^*
    $$

    Tries to call a method `conjugate` on each element.
    If not found, simply keeps the element as is.
    """
    return {i:try_conjugate(ti) for i, ti in t.items()}

tensiconj(t: MutableMapping[tuple[int, ...], Any]) -> MutableMapping[tuple[int, ...], Any]

Complex conjugate.

\[ t = t^* \]

Tries to call a method conjugate on each element. If not found, simply keeps the element as is.

Source code in vector\multilinear_sparse\hilbertspace.py
23
24
25
26
27
28
29
30
31
32
33
34
35
def tensiconj(t:MutableMapping[tuple[int,...],Any]) -> MutableMapping[tuple[int,...],Any]:
    """Complex conjugate.

    $$
        t = t^*
    $$

    Tries to call a method `conjugate` on each element.
    If not found, simply keeps the element as is.
    """
    for i, ti in t.values():
        t[i] = try_conjugate(ti)
    return t

vectorspace

tenspos(t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the identity.

\[ +t \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
20
21
22
23
24
25
26
27
def tenspos(t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    """Return the identity.

    $$
        +t
    $$
    """
    return {i:+ti for i, ti in t.items()}

tensipos(t: MutableMapping[tuple[int, ...], Any]) -> MutableMapping[tuple[int, ...], Any]

Apply unary plus.

\[ t = +t \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
29
30
31
32
33
34
35
36
37
38
def tensipos(t:MutableMapping[tuple[int,...],Any]) -> MutableMapping[tuple[int,...],Any]:
    """Apply unary plus.

    $$
        t = +t
    $$
    """
    for i, ti in t.items():
        t[i] = +ti
    return t

tensneg(t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the negation.

\[ -t \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
40
41
42
43
44
45
46
47
def tensneg(t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    """Return the negation.

    $$
        -t
    $$
    """
    return {i:-ti for i, ti in t.items()}

tensineg(t: dict[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Negate.

\[ t = -t \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
49
50
51
52
53
54
55
56
57
58
def tensineg(t:dict[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    """Negate.

    $$
        t = -t
    $$
    """
    for i, ti in t.items():
        t[i] = -ti
    return t

tensadd(*ts: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the sum.

\[ t_0 + t_1 + \cdots \]
See also
  • for sum on a single coefficient: tensaddc
Source code in vector\multilinear_sparse\_pyvectorspace.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
def tensadd(*ts:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    r"""Return the sum.

    $$
        t_0 + t_1 + \cdots
    $$

    See also
    --------
    - for sum on a single coefficient: [`tensaddc`][vector.multilinear_sparse.vectorspace.tensaddc]
    """
    r:dict[tuple[int,...],Any] = dict(ts[0]) if ts else {}
    for t in ts[1:]:
        for i, ti in t.items():
            if i in r:
                r[i] += ti
            else:
                r[i] = +ti
    return r

tensiadd(s: MutableMapping[tuple[int, ...], Any], *ts: Mapping[tuple[int, ...], Any]) -> MutableMapping[tuple[int, ...], Any]

Add.

\[ s += t_0 + t_1 + \cdots \]
See also
Source code in vector\multilinear_sparse\_pyvectorspace.py
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
def tensiadd(s:MutableMapping[tuple[int,...],Any], *ts:Mapping[tuple[int,...],Any]) -> MutableMapping[tuple[int,...],Any]:
    r"""Add.

    $$
        s += t_0 + t_1 + \cdots
    $$

    See also
    --------
    - for sum on a single coefficient: [`tensiaddc`][vector.multilinear_sparse.vectorspace.tensiaddc]
    """
    for t in ts:
        for i, ti in t.items():
            if i in s:
                s[i] += ti
            else:
                s[i] = +ti
    return s

tensaddc(t: Mapping[tuple[int, ...], Any], c: Any, i: tuple[int, ...] = ()) -> dict[tuple[int, ...], Any]

Return the sum with a basis tensor.

\[ t + ce_i \]
See also
  • for sum on more coefficients: tensadd
Source code in vector\multilinear_sparse\_pyvectorspace.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
def tensaddc(t:Mapping[tuple[int,...],Any], c:Any, i:tuple[int,...]=()) -> dict[tuple[int,...],Any]:
    """Return the sum with a basis tensor.

    $$
        t + ce_i
    $$

    See also
    --------
    - for sum on more coefficients: [`tensadd`][vector.multilinear_sparse.vectorspace.tensadd]
    """
    r:dict[tuple[int,...],Any] = dict(t)
    if i in r:
        r[i] += c
    else:
        r[i] = +c
    return r

tensiaddc(t: MutableMapping[tuple[int, ...], Any], c: Any, i: tuple[int, ...] = ()) -> MutableMapping[tuple[int, ...], Any]

Add a basis tensor.

\[ t += ce_i \]
See also
  • for sum on more coefficients: tensiadd
Source code in vector\multilinear_sparse\_pyvectorspace.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
def tensiaddc(t:MutableMapping[tuple[int,...],Any], c:Any, i:tuple[int,...]=()) -> MutableMapping[tuple[int,...],Any]:
    """Add a basis tensor.

    $$
        t += ce_i
    $$

    See also
    --------
    - for sum on more coefficients: [`tensiadd`][vector.multilinear_sparse.vectorspace.tensiadd]
    """
    if i in t:
        t[i] += c
    else:
        t[i] = +c
    return t

tenssub(s: Mapping[tuple[int, ...], Any], t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the difference.

\[ s - t \]
See also
  • for difference on a single coefficient: tenssubc
Source code in vector\multilinear_sparse\_pyvectorspace.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def tenssub(s:Mapping[tuple[int,...],Any], t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    """Return the difference.

    $$
        s - t
    $$

    See also
    --------
    - for difference on a single coefficient: [`tenssubc`][vector.multilinear_sparse.vectorspace.tenssubc]
    """
    r:dict[tuple[int,...],Any] = dict(s)
    for i, ti in t.items():
        if i in r:
            r[i] -= ti
        else:
            r[i] = -ti
    return r

tensisub(s: MutableMapping[tuple[int, ...], Any], t: Mapping[tuple[int, ...], Any]) -> MutableMapping[tuple[int, ...], Any]

Subtract.

\[ s -= t \]
See also
  • for difference on a single coefficient: tensisubc
Source code in vector\multilinear_sparse\_pyvectorspace.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
def tensisub(s:MutableMapping[tuple[int,...],Any], t:Mapping[tuple[int,...],Any]) -> MutableMapping[tuple[int,...],Any]:
    """Subtract.

    $$
        s -= t
    $$

    See also
    --------
    - for difference on a single coefficient: [`tensisubc`][vector.multilinear_sparse.vectorspace.tensisubc]
    """
    for i, ti in t.items():
        if i in s:
            s[i] -= ti
        else:
            s[i] = -ti
    return s

tenssubc(t: Mapping[tuple[int, ...], Any], c: Any, i: tuple[int, ...] = ()) -> dict[tuple[int, ...], Any]

Return the difference with a basis tensor.

\[ t - ce_i \]
See also
  • for difference on more coefficients: tenssub
Source code in vector\multilinear_sparse\_pyvectorspace.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def tenssubc(t:Mapping[tuple[int,...],Any], c:Any, i:tuple[int,...]=()) -> dict[tuple[int,...],Any]:
    """Return the difference with a basis tensor.

    $$
        t - ce_i
    $$

    See also
    --------
    - for difference on more coefficients: [`tenssub`][vector.multilinear_sparse.vectorspace.tenssub]
    """
    r:dict[tuple[int,...],Any] = dict(t)
    if i in r:
        r[i] -= c
    else:
        r[i] = -c
    return r

tensisubc(t: MutableMapping[tuple[int, ...], Any], c: Any, i: tuple[int, ...] = ()) -> MutableMapping[tuple[int, ...], Any]

Subtract a basis tensor.

\[ t -= ce_i \]
See also
  • for difference on more coefficients: tensisub
Source code in vector\multilinear_sparse\_pyvectorspace.py
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
def tensisubc(t:MutableMapping[tuple[int,...],Any], c:Any, i:tuple[int,...]=()) -> MutableMapping[tuple[int,...],Any]:
    """Subtract a basis tensor.

    $$
        t -= ce_i
    $$

    See also
    --------
    - for difference on more coefficients: [`tensisub`][vector.multilinear_sparse.vectorspace.tensisub]
    """
    if i in t:
        t[i] -= c
    else:
        t[i] = -c
    return t

tensmul(t: Mapping[tuple[int, ...], Any], a: Any) -> dict[tuple[int, ...], Any]

Return the product.

\[ ta \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
206
207
208
209
210
211
212
213
def tensmul(t:Mapping[tuple[int,...],Any], a:Any) -> dict[tuple[int,...],Any]:
    """Return the product.

    $$
        ta
    $$
    """
    return {i:ti*a for i, ti in t.items()}

tensrmul(a: Any, t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the product.

\[ at \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
215
216
217
218
219
220
221
222
def tensrmul(a:Any, t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    """Return the product.

    $$
        at
    $$
    """
    return {i:a*ti for i, ti in t.items()}

tensimul(t: MutableMapping[tuple[int, ...], Any], a: Any) -> MutableMapping[tuple[int, ...], Any]

Multiply.

\[ t \cdot= a \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
224
225
226
227
228
229
230
231
232
233
def tensimul(t:MutableMapping[tuple[int,...],Any], a:Any) -> MutableMapping[tuple[int,...],Any]:
    r"""Multiply.

    $$
        t \cdot= a
    $$
    """
    for i in t:
        t[i] *= a
    return t

tenstruediv(t: Mapping[tuple[int, ...], Any], a: Any) -> dict[tuple[int, ...], Any]

Return the true quotient.

\[ \frac{t}{a} \]
Notes

Why called truediv instead of div?

  • div would be more appropriate for an absolutely clean mathematical implementation, that doesn't care about the language used. But the package might be used for pure integers/integer arithmetic, so both, truediv and floordiv operations have to be provided, and none should be privileged over the other by getting the universal div name.
  • truediv/floordiv is unambiguous, like Python operators.
Source code in vector\multilinear_sparse\_pyvectorspace.py
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
def tenstruediv(t:Mapping[tuple[int,...],Any], a:Any) -> dict[tuple[int,...],Any]:
    r"""Return the true quotient.

    $$
        \frac{t}{a}
    $$

    Notes
    -----
    Why called `truediv` instead of `div`?

    - `div` would be more appropriate for an absolutely clean mathematical
    implementation, that doesn't care about the language used. But the package
    might be used for pure integers/integer arithmetic, so both, `truediv`
    and `floordiv` operations have to be provided, and none should be
    privileged over the other by getting the universal `div` name.
    - `truediv`/`floordiv` is unambiguous, like Python `operator`s.
    """
    return {i:ti/a for i, ti in t.items()}

tensitruediv(t: MutableMapping[tuple[int, ...], Any], a: Any) -> MutableMapping[tuple[int, ...], Any]

True divide.

\[ t /= a \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
255
256
257
258
259
260
261
262
263
264
def tensitruediv(t:MutableMapping[tuple[int,...],Any], a:Any) -> MutableMapping[tuple[int,...],Any]:
    """True divide.

    $$
        t /= a
    $$
    """
    for i in t:
        t[i] /= a
    return t

tensfloordiv(t: Mapping[tuple[int, ...], Any], a: Any) -> dict[tuple[int, ...], Any]

Return the floor quotient.

\[ \left\lfloor\frac{t}{a}\right\rfloor \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
266
267
268
269
270
271
272
273
def tensfloordiv(t:Mapping[tuple[int,...],Any], a:Any) -> dict[tuple[int,...],Any]:
    r"""Return the floor quotient.

    $$
        \left\lfloor\frac{t}{a}\right\rfloor
    $$
    """
    return {i:ti//a for i, ti in t.items()}

tensifloordiv(t: MutableMapping[tuple[int, ...], Any], a: Any) -> MutableMapping[tuple[int, ...], Any]

Floor divide.

\[ t //= a \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
275
276
277
278
279
280
281
282
283
284
def tensifloordiv(t:MutableMapping[tuple[int,...],Any], a:Any) -> MutableMapping[tuple[int,...],Any]:
    """Floor divide.

    $$
        t //= a
    $$
    """
    for i in t:
        t[i] //= a
    return t

tensmod(t: Mapping[tuple[int, ...], Any], a: Any) -> dict[tuple[int, ...], Any]

Return the remainder.

\[ t \bmod a \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
286
287
288
289
290
291
292
293
def tensmod(t:Mapping[tuple[int,...],Any], a:Any) -> dict[tuple[int,...],Any]:
    r"""Return the remainder.

    $$
        t \bmod a
    $$
    """
    return {i:ti%a for i, ti in t.items()}

tensimod(t: MutableMapping[tuple[int, ...], Any], a: Any) -> MutableMapping[tuple[int, ...], Any]

Mod.

\[ t \%= a \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
295
296
297
298
299
300
301
302
303
304
def tensimod(t:MutableMapping[tuple[int,...],Any], a:Any) -> MutableMapping[tuple[int,...],Any]:
    r"""Mod.

    $$
        t \%= a
    $$
    """
    for i in t:
        t[i] %= a
    return t

tensdivmod(t: Mapping[tuple[int, ...], Any], a: Any) -> tuple[dict[tuple[int, ...], Any], dict[tuple[int, ...], Any]]

Return the floor quotient and remainder.

\[ \left\lfloor\frac{t}{a}\right\rfloor, \ \left(t \bmod a\right) \]
Source code in vector\multilinear_sparse\_pyvectorspace.py
306
307
308
309
310
311
312
313
314
315
316
317
def tensdivmod(t:Mapping[tuple[int,...],Any], a:Any) -> tuple[dict[tuple[int,...],Any], dict[tuple[int,...],Any]]:
    r"""Return the floor quotient and remainder.

    $$
        \left\lfloor\frac{t}{a}\right\rfloor, \ \left(t \bmod a\right)
    $$
    """
    q:dict[tuple[int,...],Any] = {}
    r:dict[tuple[int,...],Any] = {}
    for i, ti in t.items():
        q[i], r[i] = divmod(ti, a)
    return q, r

elementwise

tenshadamard(*ts: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the elementwise product.

\[ \left((t_0)_i \cdot (t_1)_i \cdot \cdots\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
13
14
15
16
17
18
19
20
21
22
23
24
25
def tenshadamard(*ts:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    r"""Return the elementwise product.

    $$
        \left((t_0)_i \cdot (t_1)_i \cdot \cdots\right)_i
    $$
    """
    r = {}
    if not ts:
        return r
    for i in set(ts[0].keys()).intersection(*(t.keys() for t in ts[1:])):
        r[i] = prod_default((t[i] for t in ts), initial=MISSING, default=MISSING)
    return r

tenshadamardtruediv(s: Mapping[tuple[int, ...], Any], t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the elementwise true quotient.

\[ \left(\frac{s_i}{t_i}\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
27
28
29
30
31
32
33
34
def tenshadamardtruediv(s:Mapping[tuple[int,...],Any], t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    r"""Return the elementwise true quotient.

    $$
        \left(\frac{s_i}{t_i}\right)_i
    $$
    """
    return {i:si/t[i] for i, si in s.items()}

tenshadamardfloordiv(s: Mapping[tuple[int, ...], Any], t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the elementwise floor quotient.

\[ \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
36
37
38
39
40
41
42
43
def tenshadamardfloordiv(s:Mapping[tuple[int,...],Any], t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    r"""Return the elementwise floor quotient.

    $$
        \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i
    $$
    """
    return {i:si//t[i] for i, si in s.items()}

tenshadamardmod(s: Mapping[tuple[int, ...], Any], t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the elementwise remainder.

\[ \left(s_i \bmod t_i\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
45
46
47
48
49
50
51
52
def tenshadamardmod(s:Mapping[tuple[int,...],Any], t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    r"""Return the elementwise remainder.

    $$
        \left(s_i \bmod t_i\right)_i
    $$
    """
    return {i:si%t[i] for i, si in s.items()}

tenshadamarddivmod(s: Mapping[tuple[int, ...], Any], t: Mapping[tuple[int, ...], Any]) -> dict[tuple[int, ...], Any]

Return the elementwise floor quotient and remainder.

\[ \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i, \ \left(s_i \bmod t_i\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
54
55
56
57
58
59
60
61
62
63
64
65
def tenshadamarddivmod(s:Mapping[tuple[int,...],Any], t:Mapping[tuple[int,...],Any]) -> dict[tuple[int,...],Any]:
    r"""Return the elementwise floor quotient and remainder.

    $$
        \left(\left\lfloor\frac{s_i}{t_i}\right\rfloor\right)_i, \ \left(s_i \bmod t_i\right)_i
    $$
    """
    q, r = {}, {}
    for i, ti in t.items():
        si = s[i]
        q[i], r[i] = divmod(ti, si)
    return q, r

tenshadamardmin(*ts: Mapping[tuple[int, ...], Any], key: Callable[[Any], Any] | None = None) -> dict[tuple[int, ...], Any]

Return the elementwise minimum.

\[ \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
67
68
69
70
71
72
73
74
75
76
77
78
79
def tenshadamardmin(*ts:Mapping[tuple[int,...],Any], key:Callable[[Any],Any]|None=None) -> dict[tuple[int,...],Any]:
    r"""Return the elementwise minimum.

    $$
        \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i
    $$
    """
    r = {}
    if not ts:
        return r
    for i in set(ts[0].keys()).union(*(t.keys() for t in ts[1:])):
        r[i] = min(t[i] for t in ts)
    return r

tenshadamardmax(*ts: Mapping[tuple[int, ...], Any], key: Callable[[Any], Any] | None = None) -> dict[tuple[int, ...], Any]

Return the elementwise maximum.

\[ \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i \]
Source code in vector\multilinear_sparse\elementwise.py
81
82
83
84
85
86
87
88
89
90
91
92
93
def tenshadamardmax(*ts:Mapping[tuple[int,...],Any], key:Callable[[Any],Any]|None=None) -> dict[tuple[int,...],Any]:
    r"""Return the elementwise maximum.

    $$
        \left(\min((t_0)_i, (t_1)_i, \cdots)\right)_i
    $$
    """
    r = {}
    if not ts:
        return r
    for i in set(ts[0].keys()).union(*(t.keys() for t in ts[1:])):
        r[i] = max(t[i] for t in ts)
    return r