2 * The MIT License (MIT)
4 * Copyright (c) 2024 by Henry Gabryjelski
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "id48_internals.h"
27 typedef struct _INPUT_BITS2
{
28 // least significant 55 bits are valid/used; lsb == input₀₀
31 typedef struct _OUTPUT_BITS2
{
32 // least significant 55 bits valid
33 // Raw₅₄..Raw₄₈ == ignored bits to get to s₀₇
34 // Raw₄₇..Raw₂₀ == 28-bit challenge value frn
35 // Raw₁₉..Raw₀₀ == 20-bit response value grn
38 typedef struct _OUTPUT_INDEX2
{
39 // Opaque value for use in lookup of the output bit
40 // only least significant 20 bits are valid
45 #define nullptr ((void*)0)
48 #pragma region // reverse_bits()
49 static inline uint8_t reverse_bits_08(uint8_t n
) {
50 uint8_t bitsToSwap
= sizeof(n
) * 8;
51 uint8_t mask
= (uint8_t)(~((uint8_t)(0u))); // equivalent to uint32_t mask = 0b11111111111111111111111111111111;
53 while (bitsToSwap
>>= 1) {
54 mask
^= mask
<< (bitsToSwap
); // will convert mask to 0b00000000000000001111111111111111;
55 n
= (uint8_t)(((n
& ~mask
) >> bitsToSwap
) | ((n
& mask
) << bitsToSwap
)); // divide and conquer
59 static inline uint16_t reverse_bits_16(uint16_t n
) {
60 uint8_t bitsToSwap
= sizeof(n
) * 8;
61 uint16_t mask
= (uint16_t)(~((uint16_t)(0u))); // equivalent to uint32_t mask = 0b11111111111111111111111111111111;
63 while (bitsToSwap
>>= 1) {
64 mask
^= mask
<< (bitsToSwap
); // will convert mask to 0b00000000000000001111111111111111;
65 n
= (uint16_t)(((n
& ~mask
) >> bitsToSwap
) | ((n
& mask
) << bitsToSwap
)); // divide and conquer
69 static inline uint32_t reverse_bits_32(uint32_t n
) {
70 uint8_t bitsToSwap
= sizeof(n
) * 8;
71 uint32_t mask
= (uint32_t)(~((uint32_t)(0u))); // equivalent to uint32_t mask = 0b11111111111111111111111111111111;
73 while (bitsToSwap
>>= 1) {
74 mask
^= mask
<< (bitsToSwap
); // will convert mask to 0b00000000000000001111111111111111;
75 n
= (uint32_t)(((n
& ~mask
) >> bitsToSwap
) | ((n
& mask
) << bitsToSwap
)); // divide and conquer
79 static inline uint64_t reverse_bits_64(uint64_t n
) {
80 uint8_t bitsToSwap
= sizeof(n
) * 8;
81 uint64_t mask
= (uint64_t)(~((uint64_t)(0u))); // equivalent to uint32_t mask = 0b11111111111111111111111111111111;
83 while (bitsToSwap
>>= 1) {
84 mask
^= mask
<< (bitsToSwap
); // will convert mask to 0b00000000000000001111111111111111;
85 n
= (uint64_t)(((n
& ~mask
) >> bitsToSwap
) | ((n
& mask
) << bitsToSwap
)); // divide and conquer
89 #pragma endregion // reverse_bits()
91 #pragma region // id48lib state register
92 // Bit: ₆₃ ₆₂ ₆₁ ₆₀ ₅₉ ₅₈ ₅₇ ₅₆ ₅₅ ₅₄ ₅₃ ₅₂ ₅₁ ₅₀ ₄₉ ₄₈ ₄₇ ₄₆ ₄₅ ₄₄ ₄₃ ₄₂ ₄₁ ₄₀ ₃₉ ₃₈ ₃₇ ₃₆ ₃₅ ₃₄ ₃₃ ₃₂
93 // Reg: x x x r₀₆ r₀₅ r₀₄ r₀₃ r₀₂ r₀₁ r₀₀ m₀₆ m₀₅ m₀₄ m₀₃ m₀₂ m₀₁ m₀₀ l₀₆ l₀₅ l₀₄ l₀₃ l₀₂ l₀₁ l₀₀ g₂₂ g₂₁ g₂₀ g₁₉ g₁₈ g₁₇ g₁₆ g₁₅
95 // Bit: ₃₁ ₃₀ ₂₉ ₂₈ ₂₇ ₂₆ ₂₅ ₂₄ ₂₃ ₂₂ ₂₁ ₂₀ ₁₉ ₁₈ ₁₇ ₁₆ ₁₅ ₁₄ ₁₃ ₁₂ ₁₁ ₁₀ ₀₉ ₀₈ ₀₇ ₀₆ ₀₅ ₀₄ ₀₃ ₀₂ ₀₁ ₀₀
96 // Reg: g₁₄ g₁₃ g₁₂ g₁₁ g₁₀ g₀₉ g₀₈ g₀₇ g₀₆ g₀₅ g₀₄ g₀₃ g₀₂ g₀₁ g₀₀ h₁₂ h₁₁ h₁₀ h₀₉ h₀₈ h₀₇ h₀₆ h₀₅ h₀₄ h₀₃ h₀₂ h₀₁ h₀₀ x x x 1
97 #pragma endregion // id48lib state register
98 #pragma region // bit definitions for the (stable) id48lib state register
100 // #define SSR_BIT_i 62 -- could do this ... one fewer parameter
102 #define SSR_BIT_R06 60
103 #define SSR_BIT_R05 59
104 #define SSR_BIT_R04 58
105 #define SSR_BIT_R03 57
106 #define SSR_BIT_R02 56
107 #define SSR_BIT_R01 55
108 #define SSR_BIT_R00 54
109 #define SSR_BIT_M06 53
110 #define SSR_BIT_M05 52
111 #define SSR_BIT_M04 51
112 #define SSR_BIT_M03 50
113 #define SSR_BIT_M02 49
114 #define SSR_BIT_M01 48
115 #define SSR_BIT_M00 47
116 #define SSR_BIT_L06 46
117 #define SSR_BIT_L05 45
118 #define SSR_BIT_L04 44
119 #define SSR_BIT_L03 43
120 #define SSR_BIT_L02 42
121 #define SSR_BIT_L01 41
122 #define SSR_BIT_L00 40
123 #define SSR_BIT_G22 39
124 #define SSR_BIT_G21 38
125 #define SSR_BIT_G20 37
126 #define SSR_BIT_G19 36
127 #define SSR_BIT_G18 35
128 #define SSR_BIT_G17 34
129 #define SSR_BIT_G16 33
130 #define SSR_BIT_G15 32
131 #define SSR_BIT_G14 31
132 #define SSR_BIT_G13 30
133 #define SSR_BIT_G12 29
134 #define SSR_BIT_G11 28
135 #define SSR_BIT_G10 27
136 #define SSR_BIT_G09 26
137 #define SSR_BIT_G08 25
138 #define SSR_BIT_G07 24
139 #define SSR_BIT_G06 23
140 #define SSR_BIT_G05 22
141 #define SSR_BIT_G04 21
142 #define SSR_BIT_G03 20
143 #define SSR_BIT_G02 19
144 #define SSR_BIT_G01 18
145 #define SSR_BIT_G00 17
146 #define SSR_BIT_H12 16
147 #define SSR_BIT_H11 15
148 #define SSR_BIT_H10 14
149 #define SSR_BIT_H09 13
150 #define SSR_BIT_H08 12
151 #define SSR_BIT_H07 11
152 #define SSR_BIT_H06 10
153 #define SSR_BIT_H05 9
154 #define SSR_BIT_H04 8
155 #define SSR_BIT_H03 7
156 #define SSR_BIT_H02 6
157 #define SSR_BIT_H01 5
158 #define SSR_BIT_H00 4
159 // 3 // used only when unstable (during calculations)
160 // 2 // used only when unstable (during calculations)
161 // 1 // used only when unstable (during calculations)
162 // 0 // 1 == stable, 0 == unstable (during calculations)
163 #pragma endregion // bit definitions for the (stable) id48lib state register
164 #pragma region // Unstable (during calculations) id48lib state register
165 // Bit: ₆₃ ₆₂ ₆₁ ₆₀ ₅₉ ₅₈ ₅₇ ₅₆ ₅₅ ₅₄ ₅₃ ₅₂ ₅₁ ₅₀ ₄₉ ₄₈ ₄₇ ₄₆ ₄₅ ₄₄ ₄₃ ₄₂ ₄₁ ₄₀ ₃₉ ₃₈ ₃₇ ₃₆ ₃₅ ₃₄ ₃₃ ₃₂
166 // Reg: i j r₀₆ r₀₅ r₀₄ r₀₃ r₀₂ r₀₁ r₀₀ m₀₆ m₀₅ m₀₄ m₀₃ m₀₂ m₀₁ m₀₀ l₀₆ l₀₅ l₀₄ l₀₃ l₀₂ l₀₁ l₀₀ g₂₂ g₂₁ g₂₀ g₁₉ g₁₈ g₁₇ g₁₆ g₁₅ g₁₄
168 // Bit: ₃₁ ₃₀ ₂₉ ₂₈ ₂₇ ₂₆ ₂₅ ₂₄ ₂₃ ₂₂ ₂₁ ₂₀ ₁₉ ₁₈ ₁₇ ₁₆ ₁₅ ₁₄ ₁₃ ₁₂ ₁₁ ₁₀ ₀₉ ₀₈ ₀₇ ₀₆ ₀₅ ₀₄ ₀₃ ₀₂ ₀₁ ₀₀
169 // Reg: g₁₃ g₁₂ g₁₁ g₁₀ g₀₉ g₀₈ g₀₇ g₀₆ g₀₅ g₀₄ g₀₃ g₀₂ g₀₁ g₀₀ h₁₂ h₁₁ h₁₀ h₀₉ h₀₈ h₀₇ h₀₆ h₀₅ h₀₄ h₀₃ h₀₂ h₀₁ h₀₀ _ a b c 0
170 #pragma endregion // Unstable (during calculations) id48lib state register
172 // Summary of XOR baseline that can be excluded because they are part of a single 64-bit `<< 1` operation:
178 #pragma region // bit definitions for the (unstable) id48lib state register
179 #define SSR_UNSTABLE_BIT_i 63
180 #define SSR_UNSTABLE_BIT_j 62
181 #define SSR_UNSTABLE_OLD_BIT_R06 61 // valid only during calculations aka R07 ... just has to have a name... doesn't matter what
182 #define SSR_UNSTABLE_OLD_BIT_R05 60
183 #define SSR_UNSTABLE_OLD_BIT_R04 59
184 #define SSR_UNSTABLE_OLD_BIT_R03 58
185 #define SSR_UNSTABLE_OLD_BIT_R02 57
186 #define SSR_UNSTABLE_OLD_BIT_R01 56
187 #define SSR_UNSTABLE_OLD_BIT_R00 55
188 #define SSR_UNSTABLE_OLD_BIT_M06 54
189 #define SSR_UNSTABLE_OLD_BIT_M05 53
190 #define SSR_UNSTABLE_OLD_BIT_M04 52
191 #define SSR_UNSTABLE_OLD_BIT_M03 51
192 #define SSR_UNSTABLE_OLD_BIT_M02 50
193 #define SSR_UNSTABLE_OLD_BIT_M01 49
194 #define SSR_UNSTABLE_OLD_BIT_M00 48
195 #define SSR_UNSTABLE_OLD_BIT_L06 47
196 #define SSR_UNSTABLE_OLD_BIT_L05 46
197 #define SSR_UNSTABLE_OLD_BIT_L04 45
198 #define SSR_UNSTABLE_OLD_BIT_L03 44
199 #define SSR_UNSTABLE_OLD_BIT_L02 43
200 #define SSR_UNSTABLE_OLD_BIT_L01 42
201 #define SSR_UNSTABLE_OLD_BIT_L00 41
202 #define SSR_UNSTABLE_OLD_BIT_G22 40
203 #define SSR_UNSTABLE_OLD_BIT_G21 39
204 #define SSR_UNSTABLE_OLD_BIT_G20 38
205 #define SSR_UNSTABLE_OLD_BIT_G19 37
206 #define SSR_UNSTABLE_OLD_BIT_G18 36
207 #define SSR_UNSTABLE_OLD_BIT_G17 35
208 #define SSR_UNSTABLE_OLD_BIT_G16 34
209 #define SSR_UNSTABLE_OLD_BIT_G15 33
210 #define SSR_UNSTABLE_OLD_BIT_G14 32
211 #define SSR_UNSTABLE_OLD_BIT_G13 31
212 #define SSR_UNSTABLE_OLD_BIT_G12 30
213 #define SSR_UNSTABLE_OLD_BIT_G11 29
214 #define SSR_UNSTABLE_OLD_BIT_G10 28
215 #define SSR_UNSTABLE_OLD_BIT_G09 27
216 #define SSR_UNSTABLE_OLD_BIT_G08 26
217 #define SSR_UNSTABLE_OLD_BIT_G07 25
218 #define SSR_UNSTABLE_OLD_BIT_G06 24
219 #define SSR_UNSTABLE_OLD_BIT_G05 23
220 #define SSR_UNSTABLE_OLD_BIT_G04 22
221 #define SSR_UNSTABLE_OLD_BIT_G03 21
222 #define SSR_UNSTABLE_OLD_BIT_G02 20
223 #define SSR_UNSTABLE_OLD_BIT_G01 19
224 #define SSR_UNSTABLE_OLD_BIT_G00 18
225 #define SSR_UNSTABLE_OLD_BIT_H12 17
226 #define SSR_UNSTABLE_OLD_BIT_H11 16
227 #define SSR_UNSTABLE_OLD_BIT_H10 15
228 #define SSR_UNSTABLE_OLD_BIT_H09 14
229 #define SSR_UNSTABLE_OLD_BIT_H08 13
230 #define SSR_UNSTABLE_OLD_BIT_H07 12
231 #define SSR_UNSTABLE_OLD_BIT_H06 11
232 #define SSR_UNSTABLE_OLD_BIT_H05 10
233 #define SSR_UNSTABLE_OLD_BIT_H04 9
234 #define SSR_UNSTABLE_OLD_BIT_H03 8
235 #define SSR_UNSTABLE_OLD_BIT_H02 7
236 #define SSR_UNSTABLE_OLD_BIT_H01 6
237 #define SSR_UNSTABLE_OLD_BIT_H00 5
238 #define SSR_UNSTABLE_NEW_BIT_H00 4 // ... new value of H00 goes here ...
239 #define SSR_UNSTABLE_BIT_a 3 // valid only during calculations (ssr & 0b1 == 0b0), else ???
240 #define SSR_UNSTABLE_BIT_b 2 // valid only during calculations (ssr & 0b1 == 0b0), else ???
241 #define SSR_UNSTABLE_BIT_c 1 // valid only during calculations (ssr & 0b1 == 0b0), else ???
242 // 0 // == 0 value defines as unstable state
243 #pragma endregion // bit definitions for the (stable) id48lib state register
244 #pragma region // single bit test/set/clear/flip/assign
245 static inline bool is_ssr_state_stable(const ID48LIBX_STATE_REGISTERS
*ssr
) { ASSERT(ssr
!= nullptr); return ((ssr
->Raw
& 1u) == 1u); }
246 static inline bool test_single_ssr_bit(const ID48LIBX_STATE_REGISTERS
*ssr
, size_t bit_index
) { ASSERT(ssr
!= nullptr); ASSERT(bit_index
< (sizeof(uint64_t) * 8)); return ((ssr
->Raw
) >> bit_index
) & 1; }
247 static inline void set_single_ssr_bit(ID48LIBX_STATE_REGISTERS
*ssr
, size_t bit_index
) { ASSERT(ssr
!= nullptr); ASSERT(bit_index
< (sizeof(uint64_t) * 8)); ssr
->Raw
|= ((uint64_t)(1ull << bit_index
)); }
248 static inline void clear_single_ssr_bit(ID48LIBX_STATE_REGISTERS
*ssr
, size_t bit_index
) { ASSERT(ssr
!= nullptr); ASSERT(bit_index
< (sizeof(uint64_t) * 8)); ssr
->Raw
&= ~((uint64_t)(1ull << bit_index
)); }
249 static inline void flip_single_ssr_bit(ID48LIBX_STATE_REGISTERS
*ssr
, size_t bit_index
) { ASSERT(ssr
!= nullptr); ASSERT(bit_index
< (sizeof(uint64_t) * 8)); ssr
->Raw
^= ((uint64_t)(1ull << bit_index
)); }
250 static inline void assign_single_ssr_bit(ID48LIBX_STATE_REGISTERS
*ssr
, size_t bit_index
, bool value
) {
251 ASSERT(ssr
!= nullptr);
252 ASSERT(bit_index
< (sizeof(uint64_t) * 8));
254 set_single_ssr_bit(ssr
, bit_index
);
256 clear_single_ssr_bit(ssr
, bit_index
);
259 #pragma endregion // single bit test/set/clear/flip/assign
260 #pragma region // test/assign of temporaries a/b/c/i/j
261 static inline void test_temporary_a(ID48LIBX_STATE_REGISTERS
*ssr
) { ASSERT(!is_ssr_state_stable(ssr
)); test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_a
); }
262 static inline void test_temporary_b(ID48LIBX_STATE_REGISTERS
*ssr
) { ASSERT(!is_ssr_state_stable(ssr
)); test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_b
); }
263 static inline void test_temporary_c(ID48LIBX_STATE_REGISTERS
*ssr
) { ASSERT(!is_ssr_state_stable(ssr
)); test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_c
); }
264 static inline void test_temporary_i(ID48LIBX_STATE_REGISTERS
*ssr
) { ASSERT(!is_ssr_state_stable(ssr
)); test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_i
); }
265 static inline void test_temporary_j(ID48LIBX_STATE_REGISTERS
*ssr
) { ASSERT(!is_ssr_state_stable(ssr
)); test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_j
); }
267 static inline void assign_temporary_a(ID48LIBX_STATE_REGISTERS
*ssr
, bool v
) { ASSERT(!is_ssr_state_stable(ssr
)); assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_a
, v
); }
268 static inline void assign_temporary_b(ID48LIBX_STATE_REGISTERS
*ssr
, bool v
) { ASSERT(!is_ssr_state_stable(ssr
)); assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_b
, v
); }
269 static inline void assign_temporary_c(ID48LIBX_STATE_REGISTERS
*ssr
, bool v
) { ASSERT(!is_ssr_state_stable(ssr
)); assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_c
, v
); }
270 static inline void assign_temporary_i(ID48LIBX_STATE_REGISTERS
*ssr
, bool v
) { ASSERT(!is_ssr_state_stable(ssr
)); assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_i
, v
); }
271 static inline void assign_temporary_j(ID48LIBX_STATE_REGISTERS
*ssr
, bool v
) { ASSERT(!is_ssr_state_stable(ssr
)); assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_j
, v
); }
272 #pragma endregion // test/assign of temporaries a/b/c/i/j
274 #pragma region // Mask & Macro to get registers (in minimal bit form)
275 // ------------------------> 60 56 52 48 44 40 36 32 28 24 20 16 12 8 4 0
276 // | | | | | | | | | | | | | | | |
277 #define SSR_BITMASK_REG_H (0x000000000001FFF0ull) // (0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111'1111'1111'0000ull)
278 #define SSR_BITMASK_REG_G (0x000000FFFFFE0000ull) // (0b0000'0000'0000'0000'0000'0000'1111'1111'1111'1111'1111'1110'0000'0000'0000'0000ull)
279 #define SSR_BITMASK_REG_L (0x00007F0000000000ull) // (0b0000'0000'0000'0000'0111'1111'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000ull)
280 #define SSR_BITMASK_REG_M (0x003F100000000000ull) // (0b0000'0000'0011'1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000ull)
281 #define SSR_BITMASK_REG_R (0x1FC0000000000000ull) // (0b0001'1111'1100'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000ull)
282 #define SSR_BITMASK_REG_ALL (0x1FFFFFFFFFFFFFF0ull) // (0b0001'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'0000ull)
283 // | | | | | | | | | | | | | | | |
284 // ------------------------> 60 56 52 48 44 40 36 32 28 24 20 16 12 8 4 0
286 #define SSR_BITMASK_WITHOUT_REG_H (~(SSR_BITMASK_REG_H))
287 #define SSR_BITMASK_WITHOUT_REG_G (~(SSR_BITMASK_REG_G))
288 #define SSR_BITMASK_WITHOUT_REG_L (~(SSR_BITMASK_REG_L))
289 #define SSR_BITMASK_WITHOUT_REG_M (~(SSR_BITMASK_REG_M))
290 #define SSR_BITMASK_WITHOUT_REG_R (~(SSR_BITMASK_REG_R))
291 #define SSR_BITMASK_WITHOUT_ANY_REGS (~(SSR_BITMASK_REG_ALL))
292 #define SSR_SHIFT_COUNT_REG_H ( 4)
293 #define SSR_SHIFT_COUNT_REG_G (17)
294 #define SSR_SHIFT_COUNT_REG_L (40)
295 #define SSR_SHIFT_COUNT_REG_M (47)
296 #define SSR_SHIFT_COUNT_REG_R (54)
297 #define SSR_VALUE_MASK_REG_H (0x001FFFu) // 13 bits
298 #define SSR_VALUE_MASK_REG_G (0x7FFFFFu) // 23 bits
299 #define SSR_VALUE_MASK_REG_L (0x00007Fu) // 7 bits
300 #define SSR_VALUE_MASK_REG_M (0x00007Fu) // 7 bits
301 #define SSR_VALUE_MASK_REG_R (0x00007Fu) // 7 bits
303 static inline uint16_t get_register_h(const ID48LIBX_STATE_REGISTERS
*ssr
) { return ((uint16_t)(ssr
->Raw
>> SSR_SHIFT_COUNT_REG_H
)) & (SSR_VALUE_MASK_REG_H
); }
304 static inline uint32_t get_register_g(const ID48LIBX_STATE_REGISTERS
*ssr
) { return ((uint32_t)(ssr
->Raw
>> SSR_SHIFT_COUNT_REG_G
)) & (SSR_VALUE_MASK_REG_G
); }
305 static inline uint8_t get_register_l(const ID48LIBX_STATE_REGISTERS
*ssr
) { return ((uint8_t)(ssr
->Raw
>> SSR_SHIFT_COUNT_REG_L
)) & (SSR_VALUE_MASK_REG_L
); }
306 static inline uint8_t get_register_m(const ID48LIBX_STATE_REGISTERS
*ssr
) { return ((uint8_t)(ssr
->Raw
>> SSR_SHIFT_COUNT_REG_M
)) & (SSR_VALUE_MASK_REG_M
); }
307 static inline uint8_t get_register_r(const ID48LIBX_STATE_REGISTERS
*ssr
) { return ((uint8_t)(ssr
->Raw
>> SSR_SHIFT_COUNT_REG_R
)) & (SSR_VALUE_MASK_REG_R
); }
309 static inline void set_register_h(ID48LIBX_STATE_REGISTERS
*ssr
, uint16_t v
) { ASSERT((v
& SSR_VALUE_MASK_REG_H
) == v
); ssr
->Raw
= (ssr
->Raw
& SSR_BITMASK_WITHOUT_REG_H
) | (((uint64_t)(v
& SSR_VALUE_MASK_REG_H
)) << SSR_SHIFT_COUNT_REG_H
); }
310 static inline void set_register_g(ID48LIBX_STATE_REGISTERS
*ssr
, uint32_t v
) { ASSERT((v
& SSR_VALUE_MASK_REG_G
) == v
); ssr
->Raw
= (ssr
->Raw
& SSR_BITMASK_WITHOUT_REG_G
) | (((uint64_t)(v
& SSR_VALUE_MASK_REG_G
)) << SSR_SHIFT_COUNT_REG_G
); }
311 static inline void set_register_l(ID48LIBX_STATE_REGISTERS
*ssr
, uint8_t v
) { ASSERT((v
& SSR_VALUE_MASK_REG_L
) == v
); ssr
->Raw
= (ssr
->Raw
& SSR_BITMASK_WITHOUT_REG_L
) | (((uint64_t)(v
& SSR_VALUE_MASK_REG_L
)) << SSR_SHIFT_COUNT_REG_L
); }
312 static inline void set_register_m(ID48LIBX_STATE_REGISTERS
*ssr
, uint8_t v
) { ASSERT((v
& SSR_VALUE_MASK_REG_M
) == v
); ssr
->Raw
= (ssr
->Raw
& SSR_BITMASK_WITHOUT_REG_M
) | (((uint64_t)(v
& SSR_VALUE_MASK_REG_M
)) << SSR_SHIFT_COUNT_REG_M
); }
313 static inline void set_register_r(ID48LIBX_STATE_REGISTERS
*ssr
, uint8_t v
) { ASSERT((v
& SSR_VALUE_MASK_REG_R
) == v
); ssr
->Raw
= (ssr
->Raw
& SSR_BITMASK_WITHOUT_REG_R
) | (((uint64_t)(v
& SSR_VALUE_MASK_REG_R
)) << SSR_SHIFT_COUNT_REG_R
); }
314 #pragma endregion // Mask & Macro to get registers (in minimal bit form)
317 /// Calculates and returns 56-bit value p₅₅..p₀₀
318 /// per Definition 3.11:
319 /// p = p₀₀..p₅₅ = ( K₄₀..K₉₅ ) + ( N₀₀..N₅₅ )
321 /// <param name="k96">key in pm3 order</param>
322 /// <param name="n56">nonce in pm3 order</param>
323 /// <returns>56-bit value p₅₅..p₀₀</returns>
324 static inline uint64_t calculate__p55_p00(const ID48LIB_KEY
*k96
, const ID48LIB_NONCE
*n56
) {
325 // messy ... have to reverse the bits AND shift them into position,
326 // perform the addition, and then reverse bits again to return to
327 // native bit order (subscript is same as bit position).
329 // 1. for each byte, reverse bit order and shift into 64-bit tmp
330 // 2. add the two 56-bit tmp values
331 // 3. keeping only low 56-bit bits... reverse the bits
332 ASSERT(k96
!= nullptr);
333 ASSERT(n56
!= nullptr);
334 uint64_t k40_k95
= 0;
335 uint64_t n00_n55
= 0;
337 // k [ 6] :== K₄₇..K₄₀
339 // k [ 0] :== K₉₅..K₈₈
341 // rn[ 6] :== N₀₇..N₀₀
343 // rn[ 0] :== N₅₅..N₄₈
345 for (int8_t i
= 6; i
>= 0; --i
) {
348 uint8_t t1
= reverse_bits_08(k96
->k
[i
]);
350 uint8_t t2
= reverse_bits_08(n56
->rn
[i
]);
353 uint64_t result
= k40_k95
+ n00_n55
;
354 // shift so msb == p₀₀ (p₀₀..p₅₅0⁸)
356 // reverse the 64-bit value to get: 0⁸p₅₅..p₀₀
357 result
= reverse_bits_64(result
);
361 /// Calculate and return q₄₃..q₀₀
362 /// per Definition 3.11:
363 /// bitstream_q = (p₀₂ ... p₄₅) ⊕ (p₀₈ ... p₅₁) ⊕ (p₁₂ ... p₅₅)
364 /// <-- 44b --> <-- 44b --> <-- 44b -->
365 /// q43_q00 = (p₄₅ ... p₀₂) ⊕ (p₅₁ ... p₀₈) ⊕ (p₅₅ ... p₁₂)
367 /// <param name="p55_p00">56 bit value: p₅₅..p₀₀</param>
368 /// <returns>44-bit value: q₄₃..q₀₀</returns>
369 static inline uint64_t calculate__q43_q00(const uint64_t *p55_p00
) {
370 ASSERT(p55_p00
!= nullptr);
371 static const uint64_t C_BITMASK44
= (1ull << 44) - 1u;
372 uint64_t result
= (*p55_p00
>> 2);
373 result
^= (*p55_p00
>> 8);
374 result
^= (*p55_p00
>> 12);
375 result
&= C_BITMASK44
;
381 /// Relies on old g22 bit (now in L00).
382 /// May modify G00, G03, G04, G05, G06, G13, G16
384 static inline void g_successor(ID48LIBX_STATE_REGISTERS
*ssr
) {
385 ASSERT(ssr
!= nullptr);
386 ASSERT(!is_ssr_state_stable(ssr
));
387 assign_single_ssr_bit(ssr
, SSR_BIT_G00
, test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_j
));
388 //alternatively: set to zero, because `j` includes the start bit state
389 //if (test_single_ssr_bit(ssr, SSR_UNSTABLE_BIT_j)) {
390 // flip_single_ssr_bit(ssr, SSR_BIT_G00);
392 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_G22
)) {
393 // taps ==> [ n, 16, 13, 6, 5, 3, 0 ]
394 // 0b000'0001'0010'0000'0110'1001 == 0x012069
395 static const uint64_t G22_XOR_MASK
= 0x0000000240D20000ull
;
396 // static assert is only available in C11 (or C++11) and later...
397 // _Static_assert(G22_XOR_MASK == (0x012069ull << SSR_SHIFT_COUNT_REG_G), "G22 XOR Mask invalid");
398 ssr
->Raw
^= G22_XOR_MASK
;
400 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_i
)) {
401 flip_single_ssr_bit(ssr
, SSR_BIT_G04
);
405 static inline ID48LIBX_STATE_REGISTERS
init_id48libx_state_register(const ID48LIB_KEY
*k96
, const ID48LIB_NONCE
*n56
) {
406 ASSERT(k96
!= nullptr);
407 ASSERT(n56
!= nullptr);
408 ID48LIBX_STATE_REGISTERS result
;
410 ID48LIBX_STATE_REGISTERS
*const ssr
= &result
; // the pointer is constant ... not the value it points to
412 const uint64_t p55_p00
= calculate__p55_p00(k96
, n56
);
413 // p55_p00 is used to set initial value of register l
415 static const uint8_t C_BITMASK7
= ((1u << 7) - 1u);
417 ((uint8_t)(p55_p00
>> 55)) ^ // 0 0 0 0 0 0 p55
418 ((uint8_t)(p55_p00
>> 51)) ^ // 0 0 p55 p54 p53 p52 p51
419 ((uint8_t)(p55_p00
>> 45)) // p51 p50 p49 p48 p47 p46 p45
421 set_register_l(ssr
, l
);
422 ASSERT(l
== get_register_l(ssr
));
425 // p is used to calculate q
426 const uint64_t q43_q00
= calculate__q43_q00(&p55_p00
);
428 // init( q₂₀..q₄₂, q₀₀..q₁₉ )
429 // ===> G(q₂₀..q₄₂, 0, q₀₀..q₁₉)
430 // ===> g₀₀..g₂₂ :=== q₂₀..q₄₂
431 // and j₀₀..j₁₉ :=== q₀₀..q₁₉
433 // But, since I'm storing the register with g₀₀ as lsb:
434 // ===> g₂₂..g₀₀ :=== q₄₂..q₂₀
436 static const uint32_t C_BITMASK23
= ((1u << 23) - 1u);
437 const uint32_t g
= ((uint32_t)(q43_q00
>> 20)) & C_BITMASK23
;
438 set_register_g(ssr
, g
);
439 ASSERT(g
== get_register_g(ssr
));
442 // input bits for `j` during init are q00..q19, with q19 used first
443 // For ease of use, I'll generate this as q00..q19, so the loop
444 // can test the lsb (and then shift it right one bit)
445 uint32_t q00_q19
= reverse_bits_32(((uint32_t)q43_q00
) << 12);
446 uint32_t q_lsb_next
= q00_q19
;
449 // G(g,0,j) twenty times, using q19, q18, ... q00 for `j`
450 for (uint8_t ix
= 0; ix
< 20; ++ix
) {
451 ASSERT(is_ssr_state_stable(ssr
));
452 ssr
->Raw
<<= 1; // starts the process ... it's now an unstable value
453 ASSERT(!is_ssr_state_stable(ssr
));
454 assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_j
, (q_lsb_next
& 1u) != 0);
455 assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_i
, 0);
459 // save only the register bits
460 ssr
->Raw
&= SSR_BITMASK_REG_ALL
;
461 // mark this as a stable value
465 // h00..h12 is defined as 0 p00..p11
466 // but since we're storing h as h12..h00: p11..p00 0
468 // NOTE: delay `h` until loops done, else low bits
469 // will shift into / break calculation of g() above
470 static const uint16_t C_BITMASK_H_INIT
= (1u << 13) - 2u; // 0b1'1111'1111'1110
471 const uint16_t h
= (((uint16_t)p55_p00
) << 1) & C_BITMASK_H_INIT
;
472 set_register_h(ssr
, h
);
473 ASSERT(h
== get_register_h(ssr
));
479 /// H(h) matches the research paper, definition 3.3
481 /// Reads bits H01, H08, H09, H11, H12.
484 /// If ssr is in unstable state, caller is responsible for ensuring
485 /// the values have not changed.
487 static inline bool calculate_feedback_h(const ID48LIBX_STATE_REGISTERS
*ssr
) {
488 ASSERT(ssr
!= nullptr);
489 // ( h₀₁ && h₀₈ ) || ( h₀₉ && h₁₁ ) || (!h₁₂ )
490 // \____ a1 ____/ \____ a2 ____/ \____ a3 ____/
491 // result == xor(a1,a2,a3)
492 bool a1
= is_ssr_state_stable(ssr
) ?
493 test_single_ssr_bit(ssr
, SSR_BIT_H01
) && test_single_ssr_bit(ssr
, SSR_BIT_H08
) :
494 test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_H01
) && test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_H08
);
495 bool a2
= is_ssr_state_stable(ssr
) ?
496 test_single_ssr_bit(ssr
, SSR_BIT_H09
) && test_single_ssr_bit(ssr
, SSR_BIT_H11
) :
497 test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_H09
) && test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_H11
);
498 bool a3
= is_ssr_state_stable(ssr
) ?
499 !test_single_ssr_bit(ssr
, SSR_BIT_H12
) :
500 !test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_H12
);
502 if (a1
) result
= !result
;
503 if (a2
) result
= !result
;
504 if (a3
) result
= !result
;
509 /// fₗ(...) matches the research paper, definition 3.4
510 /// hard-coded to use bits for calculation of 'a'
512 static inline bool calculate_feedback_l(const ID48LIBX_STATE_REGISTERS
*ssr
) {
513 ASSERT(ssr
!= nullptr);
514 // a = fₗ( g00 g04 g06 g13 g18 h03 ) ⊕ g22 ⊕ r02 ⊕ r06
515 // fₗ( x₀ x₁ x₂ x₃ x₄ x₅ )
516 bool x0
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G00
: SSR_UNSTABLE_OLD_BIT_G00
);
517 bool x1
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G04
: SSR_UNSTABLE_OLD_BIT_G04
);
518 bool x2
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G06
: SSR_UNSTABLE_OLD_BIT_G06
);
519 bool x3
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G13
: SSR_UNSTABLE_OLD_BIT_G13
);
520 bool x4
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G18
: SSR_UNSTABLE_OLD_BIT_G18
);
521 bool x5
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_H03
: SSR_UNSTABLE_OLD_BIT_H03
);
523 bool line1
= !x0
&& !x2
&& x3
;
524 bool line2
= x2
&& x4
&& !x5
;
525 bool line3
= x0
&& !x1
&& !x4
;
526 bool line4
= x1
&& !x3
&& x5
;
528 bool result
= line1
|| line2
|| line3
|| line4
;
533 /// fₘ(...) matches the research paper, definition 3.5
534 /// hard-coded to use bits for calculation of 'b'
536 static inline bool calculate_feedback_m(const ID48LIBX_STATE_REGISTERS
*ssr
) {
537 ASSERT(ssr
!= nullptr);
538 // b = fₘ( g01 g05 g10 g15 h00 h07 ) ⊕ l00 ⊕ l03 ⊕ l06
539 // fₘ( x₀ x₁ x₂ x₃ x₄ x₅ )
540 bool x0
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G01
: SSR_UNSTABLE_OLD_BIT_G01
);
541 bool x1
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G05
: SSR_UNSTABLE_OLD_BIT_G05
);
542 bool x2
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G10
: SSR_UNSTABLE_OLD_BIT_G10
);
543 bool x3
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_G15
: SSR_UNSTABLE_OLD_BIT_G15
);
544 bool x4
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_H00
: SSR_UNSTABLE_OLD_BIT_H00
);
545 bool x5
= test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_H07
: SSR_UNSTABLE_OLD_BIT_H07
);
547 bool line1
= x1
&& !x2
&& !x4
;
548 bool line2
= x0
&& x2
&& !x3
;
549 bool line3
= !x1
&& x3
&& x5
;
550 bool line4
= !x0
&& x4
&& !x5
;
552 bool result
= line1
|| line2
|| line3
|| line4
;
557 /// fᵣ(...) matches the research paper, definition 3.6
558 /// hard-coded to use bits for calculation of 'c'
560 static inline bool calculate_feedback_r(const ID48LIBX_STATE_REGISTERS
*ssr
) {
561 ASSERT(ssr
!= nullptr);
562 ASSERT(!is_ssr_state_stable(ssr
));
563 // c = fᵣ( g02 g03⊕i g09 g14 g16 h01 ) ⊕ m00 ⊕ m03 ⊕ m06
564 // fᵣ( x₀ x₁ x₂ x₃ x₄ x₅ )
565 bool x0
= test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_G02
);
566 bool x1
= test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_G03
);
567 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_i
)) { x1
= !x1
; }
568 bool x2
= test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_G09
);
569 bool x3
= test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_G14
);
570 bool x4
= test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_G16
);
571 bool x5
= test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_H01
);
573 bool line1
= x1
&& x3
&& !x5
;
574 bool line2
= x2
&& !x3
&& !x4
;
575 bool line3
= !x0
&& !x2
&& x5
;
576 bool line4
= x0
&& !x1
&& x4
;
577 bool result
= line1
|| line2
|| line3
|| line4
;
582 /// Matches the research paper, definition 3.7
583 /// See also Definition 3.2, defining that parameter as `j`.
585 static inline bool calculate_j(const ID48LIBX_STATE_REGISTERS
*ssr
) {
586 ASSERT(ssr
!= nullptr);
587 // g′ := G(g, i, l₀₁ ⊕ m₀₆ ⊕ h₀₂ ⊕ h₀₈ ⊕ h₁₂)
588 // ^^^^^^^^^^^^^^^^^^^^^^^^^^^------ calculates `j`
590 if (test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_L01
: SSR_UNSTABLE_OLD_BIT_L01
)) result
= !result
;
591 if (test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_M06
: SSR_UNSTABLE_OLD_BIT_M06
)) result
= !result
;
592 if (test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_H02
: SSR_UNSTABLE_OLD_BIT_H02
)) result
= !result
;
593 if (test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_H08
: SSR_UNSTABLE_OLD_BIT_H08
)) result
= !result
;
594 if (test_single_ssr_bit(ssr
, is_ssr_state_stable(ssr
) ? SSR_BIT_H12
: SSR_UNSTABLE_OLD_BIT_H12
)) result
= !result
;
600 /// REQUIRES INPUT BIT `i` TO BE VALID.
601 /// Calculates a, b, c, j and new value for H₀₀.
602 /// These are the only bits changed by this function.
604 static inline void calculate_temporaries(ID48LIBX_STATE_REGISTERS
*ssr
) {
605 ASSERT(ssr
!= nullptr);
606 #pragma region // to be removed after all is validated
607 static const uint64_t bits_must_remain_same_mask
=
609 (1ull << SSR_UNSTABLE_BIT_a
) |
610 (1ull << SSR_UNSTABLE_BIT_b
) |
611 (1ull << SSR_UNSTABLE_BIT_c
) |
612 (1ull << SSR_UNSTABLE_BIT_j
) |
613 (1ull << SSR_UNSTABLE_NEW_BIT_H00
)
616 const uint64_t backup
= ssr
->Raw
& bits_must_remain_same_mask
;
617 (void)backup
; // to avoid warning about unused variable
618 #pragma endregion // to be removed after all is validated
620 // Only bits that change value: H00, a, b, c, j
622 ASSERT(!is_ssr_state_stable(ssr
)); // assigning temp values directly in ssr, so...
623 assign_single_ssr_bit(ssr
, SSR_UNSTABLE_NEW_BIT_H00
, calculate_feedback_h(ssr
));
624 assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_a
, calculate_feedback_l(ssr
));
625 assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_b
, calculate_feedback_m(ssr
));
626 assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_c
, calculate_feedback_r(ssr
));
627 assign_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_j
, calculate_j(ssr
));
629 // NOTE: Could scramble the below nine lines into any order desired.
630 // If start by setting the outputs all to zero, could also scramble the above into this mix
632 // a = fₗ() ⊕ g22 ⊕ r02 ⊕ r06
633 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_G22
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_a
);
634 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R02
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_a
);
635 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R06
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_a
);
636 // b = fₘ() ⊕ l00 ⊕ l03 ⊕ l06
637 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L00
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_b
);
638 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L03
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_b
);
639 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L06
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_b
);
640 // c = fᵣ() ⊕ m00 ⊕ m03 ⊕ m06
641 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_M00
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_c
);
642 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_M03
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_c
);
643 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_M06
)) flip_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_c
);
645 #pragma region // to be removed after all is validated
646 const uint64_t chk
= ssr
->Raw
& bits_must_remain_same_mask
;
647 (void)chk
; // to avoid warning about unused variable
648 ASSERT(chk
== backup
);
649 #pragma endregion // to be removed after all is validated
655 static inline OUTPUT_INDEX2
calculate_output_index(const ID48LIBX_STATE_REGISTERS
*ssr
) {
656 // Fₒ( abc l₀l₂l₃l₄l₅l₆ m₀m₁m₃m₅ r₀r₁r₂r₃r₄r₅r₆ )
657 // msb 19 ---^ lsb 00 ---^^
658 ASSERT(ssr
!= nullptr);
659 ASSERT(!is_ssr_state_stable(ssr
));
660 OUTPUT_INDEX2 result
;
662 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_a
)) result
.Raw
|= (1u << 19);
663 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_b
)) result
.Raw
|= (1u << 18);
664 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_c
)) result
.Raw
|= (1u << 17);
665 //bool bit17 = test_single_ssr_bit(ssr, SSR_UNSTABLE_BIT_c);
666 //if (test_single_ssr_bit(ssr, SSR_UNSTABLE_BIT_i) ) bit17 = !bit17;
667 //if (bit17 ) result.Raw |= (1u << 17);
668 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L00
)) result
.Raw
|= (1u << 16);
669 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L02
)) result
.Raw
|= (1u << 15);
670 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L03
)) result
.Raw
|= (1u << 14);
671 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L04
)) result
.Raw
|= (1u << 13);
672 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L05
)) result
.Raw
|= (1u << 12);
673 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_L06
)) result
.Raw
|= (1u << 11);
674 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_M00
)) result
.Raw
|= (1u << 10);
675 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_M01
)) result
.Raw
|= (1u << 9);
676 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_M03
)) result
.Raw
|= (1u << 8);
677 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_M05
)) result
.Raw
|= (1u << 7);
678 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R00
)) result
.Raw
|= (1u << 6);
679 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R01
)) result
.Raw
|= (1u << 5);
680 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R02
)) result
.Raw
|= (1u << 4);
681 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R03
)) result
.Raw
|= (1u << 3);
682 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R04
)) result
.Raw
|= (1u << 2);
683 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R05
)) result
.Raw
|= (1u << 1);
684 if (test_single_ssr_bit(ssr
, SSR_UNSTABLE_OLD_BIT_R06
)) result
.Raw
|= (1u << 0);
688 // returns a single bit corresponding to the output bit for this transition
689 static inline bool calculate_successor_state(ID48LIBX_STATE_REGISTERS
*ssr
, bool i
) {
690 ASSERT(ssr
!= nullptr);
691 ASSERT(is_ssr_state_stable(ssr
));
694 // HACK -- ORDER OF THESE OPERATIONS MATTERS ...
695 // to avoid overwriting bits needed for calculation of temporaries
697 // 1. ssr_new = ssr_old << 1; // all prior values still available (even r₀₆)
698 // 2. store input bit `i` // required many places
699 // 3. calculate and store a/b/c/j h'00 // can use SSR_UNSTABLE_OLD_BIT_... to get old values
700 // 4. calculate and save output index // relies on a/b/c AND the bits that get modified using a/b/c,
701 // // so must be after calculate a/b/c and before setting new L00,M00,R00 values
702 // 5. G(g, i, j) // relies on SSR_UNSTABLE_OLD_BIT_G22, which is now L00 ... aka must do before L()
703 // 6. L() // overwrite L00 with `a`
704 // 7. M() // overwrite M00 with `b`
705 // 8. R() // overwrite R00 with `c`
709 // 1. ssr_new = ssr_old << 1;
710 ssr
->Raw
<<= 1; // begin!
712 // 2. store input bit `i`
713 assign_temporary_i(ssr
, i
);
715 // 3. calculate and store a/b/c/j and new H00 bits
716 calculate_temporaries(ssr
); // updates new H00, stores a/c/c and j
718 // 4. calculate and save output index
719 OUTPUT_INDEX2 output_index
= calculate_output_index(ssr
); // note: does *NOT* rely on new H00 value
720 bool output_result
= id48libx_output_lookup(output_index
.Raw
);
722 // 5. g --> g', aka G(g, i, j)
726 assign_single_ssr_bit(ssr
, SSR_BIT_L00
, test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_a
));
729 assign_single_ssr_bit(ssr
, SSR_BIT_M00
, test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_b
));
732 assign_single_ssr_bit(ssr
, SSR_BIT_R00
, test_single_ssr_bit(ssr
, SSR_UNSTABLE_BIT_c
));
734 // Done! Clear temporaries and indicate this is a final state
736 // Keep only the registers (no temporaries)
737 ssr
->Raw
&= SSR_BITMASK_REG_ALL
;
739 // Mark as stable view of the SSR
742 return output_result
;
746 /// Returns a value where the least significant bit is the
747 /// first input bit, so that the value can be right-shifted
748 /// by one bit each iteration (allowing least significant bit
749 /// to always be the input bit).
751 static inline INPUT_BITS2
get_key_input_bits(const ID48LIB_KEY
*k
) {
752 ASSERT(k
!= nullptr);
754 // Per research paper, key bit 39 is used first.
755 // So, what should end up in result is: 0²⁴ k₀₀..K₃₉
756 // This allows simply shifting the lsb out each cycle....
761 // k[ 0] :== K₉₅..K₈₈
763 // k[ 7] :== K₃₉..K₃₂
765 // k[11] :== K₀₇..K₀₀
766 for (uint8_t i
= 0; i
< 5; ++i
) {
768 uint8_t tmp
= k
->k
[11 - i
]; // e.g., first loop will contain K₀₇..K₀₀
769 tmp
= reverse_bits_08(tmp
); // e.g., first loop will contain K₀₀..K₀₇
773 static const uint64_t INPUT_MASK
= (1ull << 40) - 1u;
774 (void)INPUT_MASK
; // to avoid warning about unused variable
775 ASSERT((result
.Raw
& (~INPUT_MASK
)) == 0ull);
779 static inline bool shift_out_next_input_bit(INPUT_BITS2
*inputs
) {
780 ASSERT(inputs
!= nullptr);
781 bool result
= inputs
->Raw
& 1ull;
785 static inline void shift_in_next_output_bit(OUTPUT_BITS2
*outputs
, bool v
) {
786 ASSERT(outputs
!= nullptr);
788 if (v
) outputs
->Raw
|= 1ull;
791 static inline void extract_frn(const OUTPUT_BITS2
*outputs
, ID48LIB_FRN
*frn28_out
) {
792 ASSERT(outputs
!= nullptr);
793 ASSERT(frn28_out
!= nullptr);
795 static const uint64_t C_MASK28
= (1ull << 28) - 1u;
796 uint64_t tmp
= outputs
->Raw
;
797 tmp
>>= 20; // remove the 20 bit grn (but still has 7 ignored bits)
798 tmp
&= C_MASK28
; // tmp now has exactly 28 valid bits
799 tmp
<<= 4; // align to 32-bits for easier assignment to output
800 // tmp now :== O₀₀..O₂₇ 0000
801 frn28_out
->frn
[0] = (uint8_t)((tmp
>> (8 * 3)) & 0xFFu
);
802 frn28_out
->frn
[1] = (uint8_t)((tmp
>> (8 * 2)) & 0xFFu
);
803 frn28_out
->frn
[2] = (uint8_t)((tmp
>> (8 * 1)) & 0xFFu
);
804 frn28_out
->frn
[3] = (uint8_t)((tmp
>> (8 * 0)) & 0xFFu
);
806 static inline void extract_grn(const OUTPUT_BITS2
*outputs
, ID48LIB_GRN
*grn20_out
) {
807 ASSERT(outputs
!= nullptr);
808 ASSERT(grn20_out
!= nullptr);
809 memset(grn20_out
, 0, sizeof(ID48LIB_GRN
));
811 static const uint64_t C_MASK20
= (1ull << 20) - 1u;
812 uint64_t tmp
= outputs
->Raw
;
813 tmp
&= C_MASK20
; // tmp now has exactly 20 valid bits
814 tmp
<<= 4; // align to 24-bits for easier assignment to output
815 grn20_out
->grn
[0] = (uint8_t)((tmp
>> (8 * 2)) & 0xFFu
);
816 grn20_out
->grn
[1] = (uint8_t)((tmp
>> (8 * 1)) & 0xFFu
);
817 grn20_out
->grn
[2] = (uint8_t)((tmp
>> (8 * 0)) & 0xFFu
);
820 static void retro_generator_impl(
821 const ID48LIB_KEY
*k
,
822 const ID48LIB_NONCE
*n
,
823 ID48LIB_FRN
*frn28_out
,
824 ID48LIB_GRN
*grn20_out
826 ASSERT(k
!= nullptr);
827 ASSERT(n
!= nullptr);
828 ASSERT(frn28_out
!= nullptr);
829 ASSERT(grn20_out
!= nullptr);
830 memset(frn28_out
, 0, sizeof(ID48LIB_FRN
));
831 memset(grn20_out
, 0, sizeof(ID48LIB_GRN
));
833 ID48LIBX_STATE_REGISTERS ssr
= init_id48libx_state_register(k
, n
);
835 // get 55-bit successor state input
836 INPUT_BITS2 inputs
= get_key_input_bits(k
);
837 OUTPUT_BITS2 outputs
;
839 for (uint8_t ix
= 0; ix
< 55; ix
++) {
840 ASSERT(is_ssr_state_stable(&ssr
));
842 // input bit `i` is not valid in stable state...
843 bool input_bit
= shift_out_next_input_bit(&inputs
);
844 // calculate the next state... (note: logs calculations for this state)
845 bool output_bit
= calculate_successor_state(&ssr
, input_bit
);
846 ASSERT(is_ssr_state_stable(&ssr
));
848 // store the output bit
849 shift_in_next_output_bit(&outputs
, output_bit
);
852 // convert the output bits into frn/grn
853 extract_frn(&outputs
, frn28_out
);
854 extract_grn(&outputs
, grn20_out
);
859 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
860 // ******************************************************************************************************************** //
861 // *** Everything above this line in the file is declared static, *** //
862 // *** which avoids polluting the global namespace. *** //
863 // *** Everything below is technically visible, but not necessarily an exported API. *** //
864 // *** In C++, this separation is much more easily achieved using an anonymous namespace. C'est la vie! *** //
865 // ******************************************************************************************************************** //
866 //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
869 ID48LIBX_SUCCESSOR_RESULT
id48libx_retro003_successor(const ID48LIBX_STATE_REGISTERS
*initial_state
, uint8_t input_bit
) {
870 ASSERT(initial_state
!= nullptr);
871 ID48LIBX_SUCCESSOR_RESULT r
;
872 memset(&r
, 0, sizeof(ID48LIBX_SUCCESSOR_RESULT
));
873 ID48LIBX_STATE_REGISTERS s
= *initial_state
;
874 bool output_bit
= calculate_successor_state(&s
, !!input_bit
);
876 r
.output
= output_bit
;
880 ID48LIBX_STATE_REGISTERS
id48libx_retro003_init(const ID48LIB_KEY
*key
, const ID48LIB_NONCE
*nonce
) {
881 ASSERT(key
!= nullptr);
882 ASSERT(nonce
!= nullptr);
884 ID48LIBX_STATE_REGISTERS ssr
= init_id48libx_state_register(key
, nonce
);
885 ID48LIBX_STATE_REGISTERS result
;
886 memset(&result
, 0, sizeof(ID48LIBX_STATE_REGISTERS
));
887 result
.Raw
= ssr
.Raw
;
892 void id48lib_generator(
893 const ID48LIB_KEY
*k
,
894 const ID48LIB_NONCE
*n
,
895 ID48LIB_FRN
*frn28_out
,
896 ID48LIB_GRN
*grn20_out
898 retro_generator_impl(k
, n
, frn28_out
, grn20_out
);