1 /* tnum: tracked (or tristate) numbers
3 * A tnum tracks knowledge about the bits of a value. Each bit can be either
4 * known (0 or 1), or unknown (x). Arithmetic operations on tnums will
5 * propagate the unknown bits such that the tnum result represents all the
6 * possible results for possible values of the operands.
8 #include <linux/kernel.h>
9 #include <linux/tnum.h>
11 #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
12 /* A completely unknown value */
13 const struct tnum tnum_unknown
= { .value
= 0, .mask
= -1 };
15 struct tnum
tnum_const(u64 value
)
17 return TNUM(value
, 0);
20 struct tnum
tnum_range(u64 min
, u64 max
)
22 u64 chi
= min
^ max
, delta
;
25 /* special case, needed because 1ULL << 64 is undefined */
28 /* e.g. if chi = 4, bits = 3, delta = (1<<3) - 1 = 7.
29 * if chi = 0, bits = 0, delta = (1<<0) - 1 = 0, so we return
30 * constant min (since min == max).
32 delta
= (1ULL << bits
) - 1;
33 return TNUM(min
& ~delta
, delta
);
36 struct tnum
tnum_lshift(struct tnum a
, u8 shift
)
38 return TNUM(a
.value
<< shift
, a
.mask
<< shift
);
41 struct tnum
tnum_rshift(struct tnum a
, u8 shift
)
43 return TNUM(a
.value
>> shift
, a
.mask
>> shift
);
46 struct tnum
tnum_arshift(struct tnum a
, u8 min_shift
)
48 /* if a.value is negative, arithmetic shifting by minimum shift
49 * will have larger negative offset compared to more shifting.
50 * If a.value is nonnegative, arithmetic shifting by minimum shift
51 * will have larger positive offset compare to more shifting.
53 return TNUM((s64
)a
.value
>> min_shift
, (s64
)a
.mask
>> min_shift
);
56 struct tnum
tnum_add(struct tnum a
, struct tnum b
)
58 u64 sm
, sv
, sigma
, chi
, mu
;
61 sv
= a
.value
+ b
.value
;
64 mu
= chi
| a
.mask
| b
.mask
;
65 return TNUM(sv
& ~mu
, mu
);
68 struct tnum
tnum_sub(struct tnum a
, struct tnum b
)
70 u64 dv
, alpha
, beta
, chi
, mu
;
72 dv
= a
.value
- b
.value
;
76 mu
= chi
| a
.mask
| b
.mask
;
77 return TNUM(dv
& ~mu
, mu
);
80 struct tnum
tnum_and(struct tnum a
, struct tnum b
)
84 alpha
= a
.value
| a
.mask
;
85 beta
= b
.value
| b
.mask
;
86 v
= a
.value
& b
.value
;
87 return TNUM(v
, alpha
& beta
& ~v
);
90 struct tnum
tnum_or(struct tnum a
, struct tnum b
)
94 v
= a
.value
| b
.value
;
96 return TNUM(v
, mu
& ~v
);
99 struct tnum
tnum_xor(struct tnum a
, struct tnum b
)
103 v
= a
.value
^ b
.value
;
104 mu
= a
.mask
| b
.mask
;
105 return TNUM(v
& ~mu
, mu
);
108 /* half-multiply add: acc += (unknown * mask * value).
109 * An intermediate step in the multiply algorithm.
111 static struct tnum
hma(struct tnum acc
, u64 value
, u64 mask
)
115 acc
= tnum_add(acc
, TNUM(0, value
));
122 struct tnum
tnum_mul(struct tnum a
, struct tnum b
)
127 pi
= a
.value
* b
.value
;
128 acc
= hma(TNUM(pi
, 0), a
.mask
, b
.mask
| b
.value
);
129 return hma(acc
, b
.mask
, a
.value
);
132 /* Note that if a and b disagree - i.e. one has a 'known 1' where the other has
133 * a 'known 0' - this will return a 'known 1' for that bit.
135 struct tnum
tnum_intersect(struct tnum a
, struct tnum b
)
139 v
= a
.value
| b
.value
;
140 mu
= a
.mask
& b
.mask
;
141 return TNUM(v
& ~mu
, mu
);
144 struct tnum
tnum_cast(struct tnum a
, u8 size
)
146 a
.value
&= (1ULL << (size
* 8)) - 1;
147 a
.mask
&= (1ULL << (size
* 8)) - 1;
151 bool tnum_is_aligned(struct tnum a
, u64 size
)
155 return !((a
.value
| a
.mask
) & (size
- 1));
158 bool tnum_in(struct tnum a
, struct tnum b
)
160 if (b
.mask
& ~a
.mask
)
163 return a
.value
== b
.value
;
166 int tnum_strn(char *str
, size_t size
, struct tnum a
)
168 return snprintf(str
, size
, "(%#llx; %#llx)", a
.value
, a
.mask
);
170 EXPORT_SYMBOL_GPL(tnum_strn
);
172 int tnum_sbin(char *str
, size_t size
, struct tnum a
)
176 for (n
= 64; n
; n
--) {
180 else if (a
.value
& 1)
188 str
[min(size
- 1, (size_t)64)] = 0;