Skip to content

Sparse

Sparse vectors.

>>> from vector import vecsadd
>>> v = {0:1}
>>> w = {0:2, 3:4}
>>> vecsadd(a, b)
{0:3, 3:4}

Prefixed by vecs... (vector - sparse).

All functions accept vectors and return them as dicts (index:coefficient).

The functions are type-independent. However, the data types used must support necessary scalar operations. For instance, for vector addition, coefficients must be addable.

Index keys are expected to be integers.

Docstring conventions

Summary

Math notation (vector notation if possible, index notation, domain & codomain)

More information ("More efficient than ...").

Complexity

For a vector with \(n\) elements there will be - \(x\) scalar additions (add), ...

Notes

Design choices

See also

Similar functions

References

Wikipedia, numpy, ...

creation

vecszero = {}

Zero vector.

\[ \vec{0} \]

An empty dictionary: {}.

vecsbasis(i, c=1)

Return a basis vector.

\[ c\vec{e}_i \]

Returns a dictionary with a single element i:c.

See also
Source code in vector\sparse\creation.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
def vecsbasis(i, c=1):
    r"""Return a basis vector.

    $$
        c\vec{e}_i
    $$

    Returns a dictionary with a single element `i:c`.

    See also
    --------
    - for all basis vectors: [`vecsbases`][vector.sparse.creation.vecsbases]
    """
    return {i:c}

vecsbases(start=0, c=1)

Yield all basis vectors.

\[ \left(\vec{e}_n\right)_{n\in\mathbb{N}_{\geq\text{start}}} = \left(\vec{e}_\text{start}, \vec{e}_{\text{start}+1}, \dots \right) \]
See also
Source code in vector\sparse\creation.py
37
38
39
40
41
42
43
44
45
46
47
48
49
def vecsbases(start=0, c=1):
    r"""Yield all basis vectors.

    $$
        \left(\vec{e}_n\right)_{n\in\mathbb{N}_{\geq\text{start}}} = \left(\vec{e}_\text{start}, \vec{e}_{\text{start}+1}, \dots \right)
    $$

    See also
    --------
    - for single basis vector: [`vecsbasis`][vector.sparse.creation.vecsbasis]
    """
    for i in count(start=start):
        yield vecsbasis(i, c=c)

vecsrand(n)

Return a random vector of uniform sampled float coefficients.

\[ \vec{v}\sim\mathcal{U}^n([0, 1[) \]

The coefficients are sampled from a uniform distribution in [0, 1[.

Notes

Naming like numpy.random, because seems more concise (not random & gauss as in the stdlib).

Source code in vector\sparse\creation.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def vecsrand(n):
    r"""Return a random vector of uniform sampled `float` coefficients.

    $$
        \vec{v}\sim\mathcal{U}^n([0, 1[)
    $$

    The coefficients are sampled from a uniform distribution in `[0, 1[`.

    Notes
    -----
    Naming like [`numpy.random`](https://numpy.org/doc/stable/reference/random/legacy.html),
    because seems more concise (not `random` & `gauss` as in the stdlib).
    """
    return {i:random() for i in range(n)}

vecsrandn(n, normed=True, mu=0, sigma=1, weights=None)

Return a random vector of normal sampled float coefficients.

\[ \vec{v}\sim\mathcal{N}^n(\mu, \sigma) \]

The coefficients are sampled from a normal distribution.

Notes

Naming like numpy.random, because seems more concise (not random & gauss as in the stdlib).

Source code in vector\sparse\creation.py
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def vecsrandn(n, normed=True, mu=0, sigma=1, weights=None):
    r"""Return a random vector of normal sampled `float` coefficients.

    $$
        \vec{v}\sim\mathcal{N}^n(\mu, \sigma)
    $$

    The coefficients are sampled from a normal distribution.

    Notes
    -----
    Naming like [`numpy.random`](https://numpy.org/doc/stable/reference/random/legacy.html),
    because seems more concise (not `random` & `gauss` as in the stdlib).
    """
    v = {i:gauss(mu, sigma) for i in range(n)}
    return vecstruediv(v, vecsabs(v, weights)) if normed else v

conversion

vecstod(v, zero=0)

Return a sparse vector (dict) as a dense vector (tuple).

Source code in vector\sparse\conversion.py
 5
 6
 7
 8
 9
10
def vecstod(v, zero=0):
    """Return a sparse vector (`dict`) as a dense vector (`tuple`)."""
    d = [zero] * (max(v.keys(), default=-1)+1)
    for i, vi in v.items():
        d[i] = vi
    return tuple(d)

vecdtos(v)

Return a dense vector (tuple) as a sparse vector (dict).

The resulting dict is not trimmed.

Source code in vector\sparse\conversion.py
12
13
14
15
16
17
def vecdtos(v):
    """Return a dense vector (`tuple`) as a sparse vector (`dict`).

    The resulting `dict` is not [trimmed][vector.sparse.utility.vecstrim].
    """
    return {i:vi for i, vi in enumerate(v)}

utility

vecslen(v)

Return the maximum set index plus one.

Doesn't handle trailing zeros, use vecstrim if needed.

Source code in vector\sparse\utility.py
 9
10
11
12
13
14
15
def vecslen(v):
    """Return the maximum set index plus one.

    Doesn't handle trailing zeros, use [`vecstrim`][vector.sparse.utility.vecstrim]
    if needed.
    """
    return max(v.keys(), default=0)

vecseq(v, w)

Return whether two vectors are equal.

\[ \vec{v} \overset{?}{=} \vec{w} \]
Complexity

For two vectors of lengths \(n\) & \(m\) there will be at most

  • \(\min\{n, m\}\) scalar comparisons (eq) &
  • \(|n-m|\) scalar boolean evaluations (bool).
Source code in vector\sparse\utility.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def vecseq(v, w):
    r"""Return whether two vectors are equal.

    $$
        \vec{v} \overset{?}{=} \vec{w}
    $$

    Complexity
    ----------
    For two vectors of lengths $n$ & $m$ there will be at most

    - $\min\{n, m\}$ scalar comparisons (`eq`) &
    - $|n-m|$ scalar boolean evaluations (`bool`).
    """
    for k in v.keys() | w.keys():
        if k not in w:
            if bool(v[k]):
                return False
        elif k not in v:
            if bool(w[k]):
                return False
        else:
            if not v[k]==w[k]:
                return False
    return True

vecstrim(v, tol=None)

Remove all near zero (abs(v_i)<=tol) coefficients.

\[ \begin{pmatrix} v_0 \\ \vdots \\ v_m \end{pmatrix} \ \text{where} \ m=\max\{\, j\mid |v_j|>\text{tol}\,\}\cup\{-1\} \]

tol may also be None, then all coefficients that evaluate to False are trimmed.

Complexity

For a vector of \(n\) elements there will be

  • \(n\) scalar absolute evaluations (abs) &
  • \(n\) scalar comparisons (gt).
Notes
  • Cutting of elements that are abs(v_i)<=tol instead of abs(v_i)<tol to allow cutting of elements that are exactly zero by trim(v, 0) instead of trim(v, sys.float_info.min).
Source code in vector\sparse\utility.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def vecstrim(v, tol=None):
    r"""Remove all near zero (`abs(v_i)<=tol`) coefficients.

    $$
        \begin{pmatrix}
            v_0 \\
            \vdots \\
            v_m
        \end{pmatrix} \ \text{where} \ m=\max\{\, j\mid |v_j|>\text{tol}\,\}\cup\{-1\}
    $$

    `tol` may also be `None`,
    then all coefficients that evaluate to `False` are trimmed.

    Complexity
    ----------
    For a vector of $n$ elements there will be

    - $n$ scalar absolute evaluations (`abs`) &
    - $n$ scalar comparisons (`gt`).

    Notes
    -----
    - Cutting of elements that are `abs(v_i)<=tol` instead of `abs(v_i)<tol` to
    allow cutting of elements that are exactly zero by `trim(v, 0)` instead
    of `trim(v, sys.float_info.min)`.
    """
    if tol is None:
        return {i:vi for i, vi in v.items() if vi}
    else:
        return {i:vi for i, vi in v.items() if abs(vi)>tol}

vecsitrim(v, tol=None)

Remove all near zero (abs(v_i)<=tol) coefficients.

\[ \begin{pmatrix} v_0 \\ \vdots \\ v_m \end{pmatrix} \ \text{where} \ m=\max\{\, j\mid |v_j|>\text{tol}\,\}\cup\{-1\} \]

tol may also be None, then all coefficients that evaluate to False are trimmed.

Complexity

For a vector of \(n\) elements there will be

  • \(n\) scalar absolute evaluations (abs) &
  • \(n\) scalar comparisons (gt).
Notes
  • Cutting of elements that are abs(v_i)<=tol instead of abs(v_i)<tol to allow cutting of elements that are exactly zero by trim(v, 0) instead of trim(v, sys.float_info.min).
Source code in vector\sparse\utility.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
def vecsitrim(v, tol=None):
    r"""Remove all near zero (`abs(v_i)<=tol`) coefficients.

    $$
        \begin{pmatrix}
            v_0 \\
            \vdots \\
            v_m
        \end{pmatrix} \ \text{where} \ m=\max\{\, j\mid |v_j|>\text{tol}\,\}\cup\{-1\}
    $$

    `tol` may also be `None`,
    then all coefficients that evaluate to `False` are trimmed.

    Complexity
    ----------
    For a vector of $n$ elements there will be

    - $n$ scalar absolute evaluations (`abs`) &
    - $n$ scalar comparisons (`gt`).

    Notes
    -----
    - Cutting of elements that are `abs(v_i)<=tol` instead of `abs(v_i)<tol` to
    allow cutting of elements that are exactly zero by `trim(v, 0)` instead
    of `trim(v, sys.float_info.min)`.
    """
    if tol is None:
        indices_to_del = tuple(i for i, vi in v.items() if not vi)
    else:
        indices_to_del = tuple(i for i, vi in v.items() if abs(vi)<=tol)

    for i in indices_to_del:
        del v[i]
    return v

vecsrshift(v, n)

Shift coefficients up.

\[ (v_{i-n})_i \qquad \begin{pmatrix} 0 \\ \vdots \\ 0 \\ v_0 \\ v_1 \\ \vdots \end{pmatrix} \qquad \mathbb{K}^m\to\mathbb{K}^{m+n} \]
Source code in vector\sparse\utility.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
def vecsrshift(v, n):
    r"""Shift coefficients up.

    $$
        (v_{i-n})_i \qquad \begin{pmatrix}
            0 \\
            \vdots \\
            0 \\
            v_0 \\
            v_1 \\
            \vdots
        \end{pmatrix} \qquad \mathbb{K}^m\to\mathbb{K}^{m+n}
    $$
    """
    return {i+n:vi for i, vi in v.items()}

vecsirshift(v, n)

Shift coefficients up.

\[ (v_{i-n})_i \qquad \begin{pmatrix} 0 \\ \vdots \\ 0 \\ v_0 \\ v_1 \\ \vdots \end{pmatrix} \qquad \mathbb{K}^m\to\mathbb{K}^{m+n} \]
Source code in vector\sparse\utility.py
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
def vecsirshift(v, n):
    r"""Shift coefficients up.

    $$
        (v_{i-n})_i \qquad \begin{pmatrix}
            0 \\
            \vdots \\
            0 \\
            v_0 \\
            v_1 \\
            \vdots
        \end{pmatrix} \qquad \mathbb{K}^m\to\mathbb{K}^{m+n}
    $$
    """
    for i in sorted(v.keys(), reverse=True):
        v[i+n] = v.pop(i)
    return v

vecslshift(v, n)

Shift coefficients down.

\[ (v_{i+n})_i \qquad \begin{pmatrix} v_n \\ v_{n+1} \\ \vdots \end{pmatrix} \qquad \mathbb{K}^m\to\mathbb{K}^{\max\{m-n, 0\}} \]
Source code in vector\sparse\utility.py
145
146
147
148
149
150
151
152
153
154
155
156
def vecslshift(v, n):
    r"""Shift coefficients down.

    $$
        (v_{i+n})_i \qquad \begin{pmatrix}
            v_n \\
            v_{n+1} \\
            \vdots
        \end{pmatrix} \qquad \mathbb{K}^m\to\mathbb{K}^{\max\{m-n, 0\}}
    $$
    """
    return {i-n:vi for i, vi in v.items() if i-n>=0}

vecsilshift(v, n)

Shift coefficients down.

\[ (v_{i+n})_i \qquad \begin{pmatrix} v_n \\ v_{n+1} \\ \vdots \end{pmatrix} \qquad \mathbb{K}^m\to\mathbb{K}^{\max\{m-n, 0\}} \]
Source code in vector\sparse\utility.py
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
def vecsilshift(v, n):
    r"""Shift coefficients down.

    $$
        (v_{i+n})_i \qquad \begin{pmatrix}
            v_n \\
            v_{n+1} \\
            \vdots
        \end{pmatrix} \qquad \mathbb{K}^m\to\mathbb{K}^{\max\{m-n, 0\}}
    $$
    """
    for i in sorted(v.keys()):
        vi = v.pop(i)
        if i-n >= 0:
            v[i-n] = vi
    return v

hilbert_space

vecsconj(v)

Return the complex conjugate.

\[ \vec{v}^* \]

Tries to call a method conjugate on each element. If not found, simply keeps the element as is.

Complexity

For a vector of \(n\) elements there will be

  • \(n\) scalar conjugations.
Source code in vector\sparse\hilbert_space.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
def vecsconj(v):
    r"""Return the complex conjugate.

    $$
        \vec{v}^*
    $$

    Tries to call a method `conjugate` on each element.
    If not found, simply keeps the element as is.

    Complexity
    ----------
    For a vector of $n$ elements there will be

    - $n$ scalar conjugations.
    """
    return {i:try_conjugate(vi) for i, vi in v.items()}

vecsiconj(v)

Complex conjugate.

\[ \vec{v} = \vec{v}^* \]

Tries to call a method conjugate on each element. If not found, simply keeps the element as is.

Complexity

For a vector of \(n\) elements there will be

  • \(n\) scalar conjugations.
Source code in vector\sparse\hilbert_space.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def vecsiconj(v):
    r"""Complex conjugate.

    $$
        \vec{v} = \vec{v}^*
    $$

    Tries to call a method `conjugate` on each element.
    If not found, simply keeps the element as is.

    Complexity
    ----------
    For a vector of $n$ elements there will be

    - $n$ scalar conjugations.
    """
    for i, vi in v.items():
        v[i] = try_conjugate(vi)
    return v

vecsabs(v, weights=None, conjugate=False, zero=0)

Return the Euclidean/\(\ell_{\mathbb{N}_0}^2\)-norm.

\[ ||\vec{v}||_{\ell_{\mathbb{N}_0}^2}=\sqrt{\sum_iv_i^{(*)}v_i\omega_i} \]

Returns the square root of vecsabsq.

Complexity

For a vector of \(n\) elements there will be

  • \(n\) scalar conjugations (conjugate) (if selected),
  • \(n\)/\(2n\) scalar multiplications (mul) without/with weights,
  • \(\begin{cases}n-1&n\ge1\\0&n\le1\end{cases}\) scalar additions (add) &
  • one ^0.5 call.
See also
  • squared version without square root: vecsabsq
Source code in vector\sparse\hilbert_space.py
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def vecsabs(v, weights=None, conjugate=False, zero=0):
    r"""Return the Euclidean/$\ell_{\mathbb{N}_0}^2$-norm.

    $$
        ||\vec{v}||_{\ell_{\mathbb{N}_0}^2}=\sqrt{\sum_iv_i^{(*)}v_i\omega_i}
    $$

    Returns the square root of [`vecsabsq`][vector.sparse.hilbert_space.vecsabsq].

    Complexity
    ----------
    For a vector of $n$ elements there will be

    - $n$ scalar conjugations (`conjugate`) (if selected),
    - $n$/$2n$ scalar multiplications (`mul`) without/with weights,
    - $\begin{cases}n-1&n\ge1\\0&n\le1\end{cases}$ scalar additions (`add`) &
    - one `^0.5` call.

    See also
    --------
    - squared version without square root: [`vecsabsq`][vector.sparse.hilbert_space.vecsabsq]
    """
    return vecsabsq(v, weights=weights, conjugate=conjugate, zero=zero)**0.5

vecsabsq(v, weights=None, conjugate=False, zero=0)

Return the sum of absolute squares.

\[ ||\vec{v}||_{\ell_{\mathbb{N}_0}^2}^2=\sum_iv_i^{(*)}v_i\omega_i \qquad \mathbb{K}^n\to\mathbb{K}_0^+ \]
Notes

Reasons why it exists:

  • Occurs in math.
  • Most importantly: type independent because it doesn't use sqrt.
References
Source code in vector\sparse\hilbert_space.py
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def vecsabsq(v, weights=None, conjugate=False, zero=0):
    r"""Return the sum of absolute squares.

    $$
        ||\vec{v}||_{\ell_{\mathbb{N}_0}^2}^2=\sum_iv_i^{(*)}v_i\omega_i \qquad \mathbb{K}^n\to\mathbb{K}_0^+
    $$

    Notes
    -----
    Reasons why it exists:

    - Occurs in math.
    - Most importantly: type independent because it doesn't use `sqrt`.

    References
    ----------
    - <https://docs.python.org/3/library/itertools.html#itertools-recipes>: `sum_of_squares`
    """
    if weights is None:
        if not conjugate:
            return sumprod_default(v.values(), v.values(), default=zero)
        else:
            return sumprod_default(map(try_conjugate, v.values()), v.values(), default=zero)
    else:
        if not conjugate:
            return sum_default((vi*vi*weights[i] for i, vi in v.items()), default=zero)
        else:
            return sum_default((try_conjugate(vi)*vi*weights[i] for i, vi in v.items()), default=zero)

vecsdot(v, w, weights=None, conjugate=False, zero=0)

Return the inner product.

\[ \left<\vec{v}\mid\vec{w}\right>_{\ell_{\mathbb{N}_0}^2}=\sum_iv_i^{(*)}w_i\omega_i \]
Source code in vector\sparse\hilbert_space.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
def vecsdot(v, w, weights=None, conjugate=False, zero=0):
    r"""Return the inner product.

    $$
        \left<\vec{v}\mid\vec{w}\right>_{\ell_{\mathbb{N}_0}^2}=\sum_iv_i^{(*)}w_i\omega_i
    $$
    """
    if weights is None:
        if not conjugate:
            return sum_default((v[k]*w[k] for k in v.keys()&w.keys()), default=zero)
        else:
            return sum_default((try_conjugate(v[k])*w[k] for k in v.keys()&w.keys()), default=zero)
    else:
        if not conjugate:
            return sum_default((v[k]*w[k]*weights[k] for k in v.keys()&w.keys()), default=zero)
        else:
            return sum_default((try_conjugate(v[k])*w[k]*weights[k] for k in v.keys()&w.keys()), default=zero)

vector_space

vecspos(v)

Return the identity.

\[ +\vec{v} \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar unary plus operations (pos).
Source code in vector\sparse\vector_space.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
def vecspos(v):
    r"""Return the identity.

    $$
        +\vec{v}
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar unary plus operations (`pos`).
    """
    return {i:+vi for i, vi in v.items()}

vecsipos(v)

Apply unary plus.

\[ \vec{v} = +\vec{v} \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar unary plus operations (pos).
Source code in vector\sparse\vector_space.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
def vecsipos(v):
    r"""Apply unary plus.

    $$
        \vec{v} = +\vec{v}
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar unary plus operations (`pos`).
    """
    for i, vi in v.items():
        v[i] = +vi
    return v

vecsneg(v)

Return the negation.

\[ -\vec{v} \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar negations (neg).
Source code in vector\sparse\vector_space.py
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def vecsneg(v):
    r"""Return the negation.

    $$
        -\vec{v}
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar negations (`neg`).
    """
    return {i:-vi for i, vi in v.items()}

vecsineg(v)

Negate.

\[ \vec{v} = -\vec{v} \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar negations (neg).
Source code in vector\sparse\vector_space.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
def vecsineg(v):
    r"""Negate.

    $$
        \vec{v} = -\vec{v}
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar negations (`neg`).
    """
    for i, vi in v.items():
        v[i] = -vi
    return v

vecsadd(*vs)

Return the sum.

\[ \vec{v}_0+\vec{v}_1+\cdots \]
Complexity

For two vectors with \(n\) & \(m\) elements there will be

  • \(\min\{n, m\}\) scalar additions (iadd) &
  • \(\begin{cases}m-n&m\ge n\\0&m\le n\end{cases}\) unary plus operations (pos).
See also
  • for sum on a single coefficient: vecsaddc
Source code in vector\sparse\vector_space.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
def vecsadd(*vs):
    r"""Return the sum.

    $$
        \vec{v}_0+\vec{v}_1+\cdots
    $$

    Complexity
    ----------
    For two vectors with $n$ & $m$ elements there will be

    - $\min\{n, m\}$ scalar additions (`iadd`) &
    - $\begin{cases}m-n&m\ge n\\0&m\le n\end{cases}$ unary plus operations (`pos`).

    See also
    --------
    - for sum on a single coefficient: [`vecsaddc`][vector.sparse.vector_space.vecsaddc]
    """
    r = dict(vs[0]) if vs else {}
    for v in vs[1:]:
        for i, vi in v.items():
            if i in r:
                r[i] += vi
            else:
                r[i] = +vi
    return r

vecsiadd(v, *ws)

Add.

\[ \vec{v} += \vec{w}_0+\vec{w}_1+\cdots \]
Complexity

For two vectors with \(n\) & \(m\) elements there will be

  • \(\min\{n, m\}\) scalar additions (iadd) &
  • \(\begin{cases}m-n&m\ge n\\0&m\le n\end{cases}\) unary plus operations (pos).
See also
Source code in vector\sparse\vector_space.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def vecsiadd(v, *ws):
    r"""Add.

    $$
        \vec{v} += \vec{w}_0+\vec{w}_1+\cdots
    $$

    Complexity
    ----------
    For two vectors with $n$ & $m$ elements there will be

    - $\min\{n, m\}$ scalar additions (`iadd`) &
    - $\begin{cases}m-n&m\ge n\\0&m\le n\end{cases}$ unary plus operations (`pos`).

    See also
    --------
    - for sum on a single coefficient: [`vecsiaddc`][vector.sparse.vector_space.vecsiaddc]
    """
    for w in ws:
        for i, wi in w.items():
            if i in v:
                v[i] += wi
            else:
                v[i] = +wi
    return v

vecsaddc(v, c, i=0)

Return the sum with a basis vector.

\[ \vec{v}+c\vec{e}_i \]
Complexity

There will be

  • one scalar addition (iadd) if \(i\in\vec{v}\) or
  • one unary plus operation (pos) otherwise.
See also
  • for sum on more coefficients: vecsadd
Source code in vector\sparse\vector_space.py
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
def vecsaddc(v, c, i=0):
    r"""Return the sum with a basis vector.

    $$
        \vec{v}+c\vec{e}_i
    $$

    Complexity
    ----------
    There will be

    - one scalar addition (`iadd`) if $i\in\vec{v}$ or
    - one unary plus operation (`pos`) otherwise.

    See also
    --------
    - for sum on more coefficients: [`vecsadd`][vector.sparse.vector_space.vecsadd]
    """
    r = dict(v)
    if i in r:
        r[i] += c
    else:
        r[i] = +c
    return r

vecsiaddc(v, c, i=0)

Add a basis vector.

\[ \vec{v} += c\vec{e}_i \]
Complexity

There will be

  • one scalar addition (iadd) if \(i\in\vec{v}\) or
  • one unary plus operation (pos) otherwise.
See also
  • for sum on more coefficients: vecsiadd
Source code in vector\sparse\vector_space.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
def vecsiaddc(v, c, i=0):
    r"""Add a basis vector.

    $$
        \vec{v} += c\vec{e}_i
    $$

    Complexity
    ----------
    There will be

    - one scalar addition (`iadd`) if $i\in\vec{v}$ or
    - one unary plus operation (`pos`) otherwise.

    See also
    --------
    - for sum on more coefficients: [`vecsiadd`][vector.sparse.vector_space.vecsiadd]
    """
    if i in v:
        v[i] += c
    else:
        v[i] = +c
    return v

vecssub(v, w)

Return the difference.

\[ \vec{v}-\vec{w} \]
Complexity

For two vectors with \(n\) & \(m\) elements there will be

  • \(\min\{n, m\}\) scalar subtractions (isub) &
  • \(\begin{cases}m-n&m\ge n\\0&m\le n\end{cases}\) negations (neg).
See also
  • for difference on a single coefficient: vecssubc
Source code in vector\sparse\vector_space.py
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
def vecssub(v, w):
    r"""Return the difference.

    $$
        \vec{v}-\vec{w}
    $$

    Complexity
    ----------
    For two vectors with $n$ & $m$ elements there will be

    - $\min\{n, m\}$ scalar subtractions (`isub`) &
    - $\begin{cases}m-n&m\ge n\\0&m\le n\end{cases}$ negations (`neg`).

    See also
    --------
    - for difference on a single coefficient: [`vecssubc`][vector.sparse.vector_space.vecssubc]
    """
    r = dict(v)
    for i, wi in w.items():
        if i in r:
            r[i] -= wi
        else:
            r[i] = -wi
    return r

vecsisub(v, w)

Subtract.

\[ \vec{v} -= \vec{w} \]
Complexity

For two vectors with \(n\) & \(m\) elements there will be

  • \(\min\{n, m\}\) scalar subtractions (isub) &
  • \(\begin{cases}m-n&m\ge n\\0&m\le n\end{cases}\) negations (neg).
See also
  • for difference on a single coefficient: vecsisubc
Source code in vector\sparse\vector_space.py
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
def vecsisub(v, w):
    r"""Subtract.

    $$
        \vec{v} -= \vec{w}
    $$

    Complexity
    ----------
    For two vectors with $n$ & $m$ elements there will be

    - $\min\{n, m\}$ scalar subtractions (`isub`) &
    - $\begin{cases}m-n&m\ge n\\0&m\le n\end{cases}$ negations (`neg`).

    See also
    --------
    - for difference on a single coefficient: [`vecsisubc`][vector.sparse.vector_space.vecsisubc]
    """
    for i, wi in w.items():
        if i in v:
            v[i] -= wi
        else:
            v[i] = -wi
    return v

vecssubc(v, c, i=0)

Return the difference with a basis vector.

\[ \vec{v}-c\vec{e}_i \]
Complexity

There will be

  • one scalar subtraction (isub) if \(i\in\vec{v}\) or
  • one scalar negation (neg) otherwise.
See also
  • for difference on more coefficients: vecssub
Source code in vector\sparse\vector_space.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
def vecssubc(v, c, i=0):
    r"""Return the difference with a basis vector.

    $$
        \vec{v}-c\vec{e}_i
    $$

    Complexity
    ----------
    There will be

    - one scalar subtraction (`isub`) if $i\in\vec{v}$ or
    - one scalar negation (`neg`) otherwise.

    See also
    --------
    - for difference on more coefficients: [`vecssub`][vector.sparse.vector_space.vecssub]
    """
    r = dict(v)
    if i in r:
        r[i] -= c
    else:
        r[i] = -c
    return r

vecsisubc(v, c, i=0)

Subtract a basis vector.

\[ \vec{v} -= c\vec{e}_i \]
Complexity

There will be

  • one scalar subtraction (isub) if \(i\in\vec{v}\) or
  • one scalar negation (neg) otherwise.
See also
  • for difference on more coefficients: vecsisub
Source code in vector\sparse\vector_space.py
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def vecsisubc(v, c, i=0):
    r"""Subtract a basis vector.

    $$
        \vec{v} -= c\vec{e}_i
    $$

    Complexity
    ----------
    There will be

    - one scalar subtraction (`isub`) if $i\in\vec{v}$ or
    - one scalar negation (`neg`) otherwise.

    See also
    --------
    - for difference on more coefficients: [`vecsisub`][vector.sparse.vector_space.vecsisub]
    """
    if i in v:
        v[i] -= c
    else:
        v[i] = -c
    return v

vecsmul(v, a)

Return the product.

\[ \vec{v}a \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar multiplications (mul).
Source code in vector\sparse\vector_space.py
281
282
283
284
285
286
287
288
289
290
291
292
293
294
def vecsmul(v, a):
    r"""Return the product.

    $$
        \vec{v}a
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar multiplications (`mul`).
    """
    return {i:vi*a for i, vi in v.items()}

vecsrmul(a, v)

Return the product.

\[ a\vec{v} \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar multiplications (rmul).
Source code in vector\sparse\vector_space.py
296
297
298
299
300
301
302
303
304
305
306
307
308
309
def vecsrmul(a, v):
    r"""Return the product.

    $$
        a\vec{v}
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar multiplications (`rmul`).
    """
    return {i:a*vi for i, vi in v.items()}

vecsimul(v, a)

Multiply.

\[ \vec{v} \cdot= a \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar multiplications (imul).
Source code in vector\sparse\vector_space.py
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
def vecsimul(v, a):
    r"""Multiply.

    $$
        \vec{v} \cdot= a
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar multiplications (`imul`).
    """
    for i in v:
        v[i] *= a
    return v

vecstruediv(v, a)

Return the true quotient.

\[ \frac{\vec{v}}{a} \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar true divisions (truediv).
Notes

Why called truediv instead of div?

  • div would be more appropriate for an absolutely clean mathematical implementation, that doesn't care about the language used. But the package might be used for pure integers/integer arithmetic, so both, truediv and floordiv operations have to be provided, and none should be privileged over the other by getting the universal div name.
  • truediv/floordiv is unambiguous, like Python operators.
Source code in vector\sparse\vector_space.py
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
def vecstruediv(v, a):
    r"""Return the true quotient.

    $$
        \frac{\vec{v}}{a}
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar true divisions (`truediv`).

    Notes
    -----
    Why called `truediv` instead of `div`?

    - `div` would be more appropriate for an absolutely clean mathematical
    implementation, that doesn't care about the language used. But the package
    might be used for pure integers/integer arithmetic, so both, `truediv`
    and `floordiv` operations have to be provided, and none should be
    privileged over the other by getting the universal `div` name.
    - `truediv`/`floordiv` is unambiguous, like Python `operator`s.
    """
    return {i:vi/a for i, vi in v.items()}

vecsitruediv(v, a)

True divide.

\[ \vec{v} /= a \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar true divisions (itruediv).
Notes

Why called truediv instead of div?

  • div would be more appropriate for an absolutely clean mathematical implementation, that doesn't care about the language used. But the package might be used for pure integers/integer arithmetic, so both, truediv and floordiv operations have to be provided, and none should be privileged over the other by getting the universal div name.
  • truediv/floordiv is unambiguous, like Python operators.
Source code in vector\sparse\vector_space.py
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
def vecsitruediv(v, a):
    r"""True divide.

    $$
        \vec{v} /= a
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar true divisions (`itruediv`).

    Notes
    -----
    Why called `truediv` instead of `div`?

    - `div` would be more appropriate for an absolutely clean mathematical
    implementation, that doesn't care about the language used. But the package
    might be used for pure integers/integer arithmetic, so both, `truediv`
    and `floordiv` operations have to be provided, and none should be
    privileged over the other by getting the universal `div` name.
    - `truediv`/`floordiv` is unambiguous, like Python `operator`s.
    """
    for i in v:
        v[i] /= a
    return v

vecsfloordiv(v, a)

Return the floor quotient.

\[ \left\lfloor\frac{\vec{v}}{a}\right\rfloor \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar floor divisions (floordiv).
Source code in vector\sparse\vector_space.py
382
383
384
385
386
387
388
389
390
391
392
393
394
395
def vecsfloordiv(v, a):
    r"""Return the floor quotient.

    $$
        \left\lfloor\frac{\vec{v}}{a}\right\rfloor
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar floor divisions (`floordiv`).
    """
    return {i:vi//a for i, vi in v.items()}

vecsifloordiv(v, a)

Floor divide.

\[ \vec{v} //= a \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar floor divisions (ifloordiv).
Source code in vector\sparse\vector_space.py
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
def vecsifloordiv(v, a):
    r"""Floor divide.

    $$
        \vec{v} //= a
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar floor divisions (`ifloordiv`).
    """
    for i in v:
        v[i] //= a
    return v

vecsmod(v, a)

Return the remainder.

\[ \vec{v} \bmod a \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar modulos (mod).
Source code in vector\sparse\vector_space.py
414
415
416
417
418
419
420
421
422
423
424
425
426
427
def vecsmod(v, a):
    r"""Return the remainder.

    $$
        \vec{v} \bmod a
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar modulos (`mod`).
    """
    return {i:vi%a for i, vi in v.items()}

vecsimod(v, a)

Mod.

\[ \vec{v} \%= a \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar modulos (imod).
Source code in vector\sparse\vector_space.py
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
def vecsimod(v, a):
    r"""Mod.

    $$
        \vec{v} \%= a
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar modulos (`imod`).
    """
    for i in v:
        v[i] %= a
    return v

vecsdivmod(v, a)

Return the floor quotient and remainder.

\[ \left\lfloor\frac{\vec{v}}{a}\right\rfloor, \ \left(\vec{v} \bmod a\right) \]
Complexity

For a vector with \(n\) elements there will be

  • \(n\) scalar divmods (divmod).
Source code in vector\sparse\vector_space.py
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
def vecsdivmod(v, a):
    r"""Return the floor quotient and remainder.

    $$
        \left\lfloor\frac{\vec{v}}{a}\right\rfloor, \ \left(\vec{v} \bmod a\right)
    $$

    Complexity
    ----------
    For a vector with $n$ elements there will be

    - $n$ scalar divmods (`divmod`).
    """
    q, r = {}, {}
    for i, vi in v.items():
        q[i], r[i] = divmod(vi, a)
    return q, r

elementwise

vecshadamard(*vs)

Return the elementwise product.

\[ \left((\vec{v}_0)_i\cdot(\vec{v}_1)_i\cdot\cdots\right)_i \]
Source code in vector\sparse\elementwise.py
11
12
13
14
15
16
17
18
19
20
21
22
23
def vecshadamard(*vs):
    r"""Return the elementwise product.

    $$
        \left((\vec{v}_0)_i\cdot(\vec{v}_1)_i\cdot\cdots\right)_i
    $$
    """
    r = {}
    if not vs:
        return r
    for k in set(vs[0].keys()).intersection(*(v.keys() for v in vs[1:])):
        r[k] = prod_default((v[k] for v in vs), initial=MISSING, default=MISSING)
    return r

vecshadamardtruediv(v, w)

Return the elementwise true quotient.

\[ \left(\frac{v_i}{w_i}\right)_i \]
Source code in vector\sparse\elementwise.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
def vecshadamardtruediv(v, w):
    r"""Return the elementwise true quotient.

    $$
        \left(\frac{v_i}{w_i}\right)_i
    $$
    """
    r = {}
    for i, vi in v.items():
        try:
            wi = w[i]
        except KeyError:
            raise ZeroDivisionError
        r[i] = vi / wi
    return r

vecshadamardfloordiv(v, w)

Return the elementwise floor quotient.

\[ \left(\left\lfloor\frac{v_i}{w_i}\right\rfloor\right)_i \]
Source code in vector\sparse\elementwise.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
def vecshadamardfloordiv(v, w):
    r"""Return the elementwise floor quotient.

    $$
        \left(\left\lfloor\frac{v_i}{w_i}\right\rfloor\right)_i
    $$
    """
    r = {}
    for i, vi in v.items():
        try:
            wi = w[i]
        except KeyError:
            raise ZeroDivisionError
        r[i] = vi // wi
    return r

vecshadamardmod(v, w)

Return the elementwise remainder.

\[ \left(v_i \bmod w_i\right)_i \]
Source code in vector\sparse\elementwise.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def vecshadamardmod(v, w):
    r"""Return the elementwise remainder.

    $$
        \left(v_i \bmod w_i\right)_i
    $$
    """
    r = {}
    for i, vi in v.items():
        try:
            wi = w[i]
        except KeyError:
            raise ZeroDivisionError
        r[i] = vi % wi
    return r

vecshadamarddivmod(v, w)

Return the elementwise floor quotient and remainder.

\[ \left(\left\lfloor\frac{v_i}{w_i}\right\rfloor\right)_i, \ \left(v_i \bmod w_i\right)_i \]
Source code in vector\sparse\elementwise.py
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def vecshadamarddivmod(v, w):
    r"""Return the elementwise floor quotient and remainder.

    $$
        \left(\left\lfloor\frac{v_i}{w_i}\right\rfloor\right)_i, \ \left(v_i \bmod w_i\right)_i
    $$
    """
    q, r = {}, {}
    for i, vi in v.items():
        try:
            wi = w[i]
        except KeyError:
            raise ZeroDivisionError
        q[i], r[i] = divmod(vi, wi)
    return q, r

vecshadamardmin(*vs, key=None)

Return the elementwise minimum.

\[ \left(\min((\vec{v}_0)_i,(\vec{v}_1)_i,\cdots)\right)_i \]
Source code in vector\sparse\elementwise.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def vecshadamardmin(*vs, key=None):
    r"""Return the elementwise minimum.

    $$
        \left(\min((\vec{v}_0)_i,(\vec{v}_1)_i,\cdots)\right)_i
    $$
    """
    r = {}
    if not vs:
        return r
    for k in set(vs[0].keys()).union(*(v.keys() for v in vs[1:])):
        r[k] = min(v[k] for v in vs if k in v)
    return r

vecshadamardmax(*vs, key=None)

Return the elementwise maximum.

\[ \left(\max((\vec{v}_0)_i,(\vec{v}_1)_i,\cdots)\right)_i \]
Source code in vector\sparse\elementwise.py
103
104
105
106
107
108
109
110
111
112
113
114
115
def vecshadamardmax(*vs, key=None):
    r"""Return the elementwise maximum.

    $$
        \left(\max((\vec{v}_0)_i,(\vec{v}_1)_i,\cdots)\right)_i
    $$
    """
    r = {}
    if not vs:
        return r
    for k in set(vs[0].keys()).union(*(v.keys() for v in vs[1:])):
        r[k] = max(v[k] for v in vs if k in v)
    return r