1 // SPDX-License-Identifier: GPL-2.0-only
2 /* tnum: tracked (or tristate) numbers
4 * A tnum tracks knowledge about the bits of a value. Each bit can be either
5 * known (0 or 1), or unknown (x). Arithmetic operations on tnums will
6 * propagate the unknown bits such that the tnum result represents all the
7 * possible results for possible values of the operands.
9 #include <linux/kernel.h>
10 #include <linux/tnum.h>
12 #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
13 /* A completely unknown value */
14 const struct tnum tnum_unknown
= { .value
= 0, .mask
= -1 };
16 struct tnum
tnum_const(u64 value
)
18 return TNUM(value
, 0);
21 struct tnum
tnum_range(u64 min
, u64 max
)
23 u64 chi
= min
^ max
, delta
;
26 /* special case, needed because 1ULL << 64 is undefined */
29 /* e.g. if chi = 4, bits = 3, delta = (1<<3) - 1 = 7.
30 * if chi = 0, bits = 0, delta = (1<<0) - 1 = 0, so we return
31 * constant min (since min == max).
33 delta
= (1ULL << bits
) - 1;
34 return TNUM(min
& ~delta
, delta
);
37 struct tnum
tnum_lshift(struct tnum a
, u8 shift
)
39 return TNUM(a
.value
<< shift
, a
.mask
<< shift
);
42 struct tnum
tnum_rshift(struct tnum a
, u8 shift
)
44 return TNUM(a
.value
>> shift
, a
.mask
>> shift
);
47 struct tnum
tnum_arshift(struct tnum a
, u8 min_shift
, u8 insn_bitness
)
49 /* if a.value is negative, arithmetic shifting by minimum shift
50 * will have larger negative offset compared to more shifting.
51 * If a.value is nonnegative, arithmetic shifting by minimum shift
52 * will have larger positive offset compare to more shifting.
54 if (insn_bitness
== 32)
55 return TNUM((u32
)(((s32
)a
.value
) >> min_shift
),
56 (u32
)(((s32
)a
.mask
) >> min_shift
));
58 return TNUM((s64
)a
.value
>> min_shift
,
59 (s64
)a
.mask
>> min_shift
);
62 struct tnum
tnum_add(struct tnum a
, struct tnum b
)
64 u64 sm
, sv
, sigma
, chi
, mu
;
67 sv
= a
.value
+ b
.value
;
70 mu
= chi
| a
.mask
| b
.mask
;
71 return TNUM(sv
& ~mu
, mu
);
74 struct tnum
tnum_sub(struct tnum a
, struct tnum b
)
76 u64 dv
, alpha
, beta
, chi
, mu
;
78 dv
= a
.value
- b
.value
;
82 mu
= chi
| a
.mask
| b
.mask
;
83 return TNUM(dv
& ~mu
, mu
);
86 struct tnum
tnum_and(struct tnum a
, struct tnum b
)
90 alpha
= a
.value
| a
.mask
;
91 beta
= b
.value
| b
.mask
;
92 v
= a
.value
& b
.value
;
93 return TNUM(v
, alpha
& beta
& ~v
);
96 struct tnum
tnum_or(struct tnum a
, struct tnum b
)
100 v
= a
.value
| b
.value
;
101 mu
= a
.mask
| b
.mask
;
102 return TNUM(v
, mu
& ~v
);
105 struct tnum
tnum_xor(struct tnum a
, struct tnum b
)
109 v
= a
.value
^ b
.value
;
110 mu
= a
.mask
| b
.mask
;
111 return TNUM(v
& ~mu
, mu
);
114 /* Generate partial products by multiplying each bit in the multiplier (tnum a)
115 * with the multiplicand (tnum b), and add the partial products after
116 * appropriately bit-shifting them. Instead of directly performing tnum addition
117 * on the generated partial products, equivalenty, decompose each partial
118 * product into two tnums, consisting of the value-sum (acc_v) and the
119 * mask-sum (acc_m) and then perform tnum addition on them. The following paper
120 * explains the algorithm in more detail: https://arxiv.org/abs/2105.05398.
122 struct tnum
tnum_mul(struct tnum a
, struct tnum b
)
124 u64 acc_v
= a
.value
* b
.value
;
125 struct tnum acc_m
= TNUM(0, 0);
127 while (a
.value
|| a
.mask
) {
128 /* LSB of tnum a is a certain 1 */
130 acc_m
= tnum_add(acc_m
, TNUM(0, b
.mask
));
131 /* LSB of tnum a is uncertain */
133 acc_m
= tnum_add(acc_m
, TNUM(0, b
.value
| b
.mask
));
134 /* Note: no case for LSB is certain 0 */
135 a
= tnum_rshift(a
, 1);
136 b
= tnum_lshift(b
, 1);
138 return tnum_add(TNUM(acc_v
, 0), acc_m
);
141 /* Note that if a and b disagree - i.e. one has a 'known 1' where the other has
142 * a 'known 0' - this will return a 'known 1' for that bit.
144 struct tnum
tnum_intersect(struct tnum a
, struct tnum b
)
148 v
= a
.value
| b
.value
;
149 mu
= a
.mask
& b
.mask
;
150 return TNUM(v
& ~mu
, mu
);
153 struct tnum
tnum_cast(struct tnum a
, u8 size
)
155 a
.value
&= (1ULL << (size
* 8)) - 1;
156 a
.mask
&= (1ULL << (size
* 8)) - 1;
160 bool tnum_is_aligned(struct tnum a
, u64 size
)
164 return !((a
.value
| a
.mask
) & (size
- 1));
167 bool tnum_in(struct tnum a
, struct tnum b
)
169 if (b
.mask
& ~a
.mask
)
172 return a
.value
== b
.value
;
175 int tnum_sbin(char *str
, size_t size
, struct tnum a
)
179 for (n
= 64; n
; n
--) {
183 else if (a
.value
& 1)
191 str
[min(size
- 1, (size_t)64)] = 0;
195 struct tnum
tnum_subreg(struct tnum a
)
197 return tnum_cast(a
, 4);
200 struct tnum
tnum_clear_subreg(struct tnum a
)
202 return tnum_lshift(tnum_rshift(a
, 32), 32);
205 struct tnum
tnum_with_subreg(struct tnum reg
, struct tnum subreg
)
207 return tnum_or(tnum_clear_subreg(reg
), tnum_subreg(subreg
));
210 struct tnum
tnum_const_subreg(struct tnum a
, u32 value
)
212 return tnum_with_subreg(a
, tnum_const(value
));