1 // SPDX-License-Identifier: GPL-2.0-only
2 /* tnum: tracked (or tristate) numbers
4 * A tnum tracks knowledge about the bits of a value. Each bit can be either
5 * known (0 or 1), or unknown (x). Arithmetic operations on tnums will
6 * propagate the unknown bits such that the tnum result represents all the
7 * possible results for possible values of the operands.
9 #include <linux/kernel.h>
10 #include <linux/tnum.h>
12 #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
13 /* A completely unknown value */
14 const struct tnum tnum_unknown
= { .value
= 0, .mask
= -1 };
16 struct tnum
tnum_const(u64 value
)
18 return TNUM(value
, 0);
21 struct tnum
tnum_range(u64 min
, u64 max
)
23 u64 chi
= min
^ max
, delta
;
26 /* special case, needed because 1ULL << 64 is undefined */
29 /* e.g. if chi = 4, bits = 3, delta = (1<<3) - 1 = 7.
30 * if chi = 0, bits = 0, delta = (1<<0) - 1 = 0, so we return
31 * constant min (since min == max).
33 delta
= (1ULL << bits
) - 1;
34 return TNUM(min
& ~delta
, delta
);
37 struct tnum
tnum_lshift(struct tnum a
, u8 shift
)
39 return TNUM(a
.value
<< shift
, a
.mask
<< shift
);
42 struct tnum
tnum_rshift(struct tnum a
, u8 shift
)
44 return TNUM(a
.value
>> shift
, a
.mask
>> shift
);
47 struct tnum
tnum_arshift(struct tnum a
, u8 min_shift
, u8 insn_bitness
)
49 /* if a.value is negative, arithmetic shifting by minimum shift
50 * will have larger negative offset compared to more shifting.
51 * If a.value is nonnegative, arithmetic shifting by minimum shift
52 * will have larger positive offset compare to more shifting.
54 if (insn_bitness
== 32)
55 return TNUM((u32
)(((s32
)a
.value
) >> min_shift
),
56 (u32
)(((s32
)a
.mask
) >> min_shift
));
58 return TNUM((s64
)a
.value
>> min_shift
,
59 (s64
)a
.mask
>> min_shift
);
62 struct tnum
tnum_add(struct tnum a
, struct tnum b
)
64 u64 sm
, sv
, sigma
, chi
, mu
;
67 sv
= a
.value
+ b
.value
;
70 mu
= chi
| a
.mask
| b
.mask
;
71 return TNUM(sv
& ~mu
, mu
);
74 struct tnum
tnum_sub(struct tnum a
, struct tnum b
)
76 u64 dv
, alpha
, beta
, chi
, mu
;
78 dv
= a
.value
- b
.value
;
82 mu
= chi
| a
.mask
| b
.mask
;
83 return TNUM(dv
& ~mu
, mu
);
86 struct tnum
tnum_and(struct tnum a
, struct tnum b
)
90 alpha
= a
.value
| a
.mask
;
91 beta
= b
.value
| b
.mask
;
92 v
= a
.value
& b
.value
;
93 return TNUM(v
, alpha
& beta
& ~v
);
96 struct tnum
tnum_or(struct tnum a
, struct tnum b
)
100 v
= a
.value
| b
.value
;
101 mu
= a
.mask
| b
.mask
;
102 return TNUM(v
, mu
& ~v
);
105 struct tnum
tnum_xor(struct tnum a
, struct tnum b
)
109 v
= a
.value
^ b
.value
;
110 mu
= a
.mask
| b
.mask
;
111 return TNUM(v
& ~mu
, mu
);
114 /* half-multiply add: acc += (unknown * mask * value).
115 * An intermediate step in the multiply algorithm.
117 static struct tnum
hma(struct tnum acc
, u64 value
, u64 mask
)
121 acc
= tnum_add(acc
, TNUM(0, value
));
128 struct tnum
tnum_mul(struct tnum a
, struct tnum b
)
133 pi
= a
.value
* b
.value
;
134 acc
= hma(TNUM(pi
, 0), a
.mask
, b
.mask
| b
.value
);
135 return hma(acc
, b
.mask
, a
.value
);
138 /* Note that if a and b disagree - i.e. one has a 'known 1' where the other has
139 * a 'known 0' - this will return a 'known 1' for that bit.
141 struct tnum
tnum_intersect(struct tnum a
, struct tnum b
)
145 v
= a
.value
| b
.value
;
146 mu
= a
.mask
& b
.mask
;
147 return TNUM(v
& ~mu
, mu
);
150 struct tnum
tnum_cast(struct tnum a
, u8 size
)
152 a
.value
&= (1ULL << (size
* 8)) - 1;
153 a
.mask
&= (1ULL << (size
* 8)) - 1;
157 bool tnum_is_aligned(struct tnum a
, u64 size
)
161 return !((a
.value
| a
.mask
) & (size
- 1));
164 bool tnum_in(struct tnum a
, struct tnum b
)
166 if (b
.mask
& ~a
.mask
)
169 return a
.value
== b
.value
;
172 int tnum_strn(char *str
, size_t size
, struct tnum a
)
174 return snprintf(str
, size
, "(%#llx; %#llx)", a
.value
, a
.mask
);
176 EXPORT_SYMBOL_GPL(tnum_strn
);
178 int tnum_sbin(char *str
, size_t size
, struct tnum a
)
182 for (n
= 64; n
; n
--) {
186 else if (a
.value
& 1)
194 str
[min(size
- 1, (size_t)64)] = 0;