regen pidl all: rm epan/dissectors/pidl/*-stamp; pushd epan/dissectors/pidl/ && make...
[wireshark-sm.git] / wsutil / pint.h
blobb4984b5fb80d41aa55acaa6a80205972a52f2ac8
1 /** @file
3 * Definitions for extracting and translating integers safely and portably
4 * via pointers.
6 * Wireshark - Network traffic analyzer
7 * By Gerald Combs <gerald@wireshark.org>
8 * Copyright 1998 Gerald Combs
10 * SPDX-License-Identifier: GPL-2.0-or-later
13 #ifndef __PINT_H__
14 #define __PINT_H__
16 #include <inttypes.h>
18 #include <glib.h>
20 /* Routines that take a possibly-unaligned pointer to a 16-bit, 24-bit,
21 * 32-bit, 40-bit, ... 64-bit integral quantity, in a particular byte
22 * order, and fetch the value and return it in host byte order.
24 * The pntohN() routines fetch big-endian values; the pletohN() routines
25 * fetch little-endian values.
28 /* On most architectures, accesses of 16, 32, and 64 bit quantities can be
29 * heavily optimized. gcc and clang recognize portable versions below and,
30 * at -Os and higher, optimize them appropriately (for gcc, that includes
31 * for z/Architecture, PPC64, MIPS, etc.). Older versions don't do as good
32 * of a job with 16 bit accesses, though.
34 * Unfortunately, MSVC and icc (both the "classic" version and the new
35 * LLVM-based Intel C Compiler) do not, according to Matt Godbolt's Compiler
36 * Explorer (https://godbolt.org) as of the end of 2022. They *do* recognize
37 * and optimize a memcpy based approach (which avoids unaligned accesses on,
38 * say, ARM32), though that requires byteswapping appropriately.
41 #if (defined(_MSC_VER) && !defined(__clang__)) || defined(__INTEL_COMPILER) || defined(__INTEL_LLVM_COMPILER)
42 /* MSVC or Intel C Compiler (Classic or new LLVM version), but not
43 * clang-cl on Windows.
45 /* Unfortunately, C23 did not fully accept the N3022 Modern Bit Utilities
46 * proposal, so a standard bytereverse function has been deferred for some
47 * future version:
48 * https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3048.htm
49 * https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3022.htm
51 * So choose byteswap intrinsics we know we have.
53 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__INTEL_LLVM_COMPILER) && !defined(__clang__)
54 /* Intel and clang-cl both define _MSC_VER when compiling on Windows for
55 * greater compatibility (just as they define __GNUC__ on other platforms).
56 * However, at least on some versions, while including the MSVC <stdlib.h>
57 * provides access to the _byteswap_ intrinsics, they are not actually
58 * optimized into a single x86 BSWAP function, unlike the gcc-style intrinsics
59 * (which both support.) See: https://stackoverflow.com/q/72327906
61 #include <stdlib.h> // For MSVC _byteswap intrinsics
62 #define pint_bswap16(x) _byteswap_ushort(x)
63 #define pint_bswap32(x) _byteswap_ulong(x)
64 /* Hopefully MSVC never decides that a long is 64 bit. */
65 #define pint_bswap64(x) _byteswap_uint64(x)
66 #elif defined(__INTEL_COMPILER)
67 /* The (deprecated) Intel C++ Compiler Classic has these byteswap intrinsics.
68 * It also has the GCC-style intrinsics, though __builtin_bswap16 wasn't
69 * added until some point after icc 13.0 but at least by 16.0, reflecting
70 * that it wasn't added to gcc until 4.8.
72 #define pint_bswap16(x) _bswap16(x)
73 #define pint_bswap32(x) _bswap32(x)
74 #define pint_bswap64(x) _bswap64(x)
75 #else
76 /* GCC-style _bswap intrinsics */
77 /* The new LLVM-based Intel C++ Compiler doesn't have the above intrinsics,
78 * but it always has all the GCC intrinsics.
80 /* __builtin_bswap32 and __builtin_bswap64 intrinsics have been supported
81 * for a long time on gcc (4.1), and clang (pre 3.0), versions that predate
82 * C11 and C+11 support, which we require, so we could assume we have them.
84 * __builtin_bswap16 was added a bit later, gcc 4.8, and clang 3.2. While
85 * those versions or later are required for full C11 and C++11 support,
86 * some earlier versions claim to support C11 and C++11 in ways that might
87 * allow them to get past CMake. We don't use this codepath for those
88 * compilers because they heavily optimize the portable versions, though.
90 #define pint_bswap16(x) __builtin_bswap16(x)
91 #define pint_bswap32(x) __builtin_bswap32(x)
92 #define pint_bswap64(x) __builtin_bswap64(x)
93 #endif
95 static inline uint16_t pntoh16(const void *p)
97 uint16_t ret;
98 memcpy(&ret, p, sizeof(ret));
99 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
100 ret = pint_bswap16(ret);
101 #endif
102 return ret;
105 static inline uint32_t pntoh32(const void *p)
107 uint32_t ret;
108 memcpy(&ret, p, sizeof(ret));
109 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
110 ret = pint_bswap32(ret);
111 #endif
112 return ret;
115 static inline uint64_t pntoh64(const void *p)
117 uint64_t ret;
118 memcpy(&ret, p, sizeof(ret));
119 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
120 ret = pint_bswap64(ret);
121 #endif
122 return ret;
125 static inline uint16_t pletoh16(const void *p)
127 uint16_t ret;
128 memcpy(&ret, p, sizeof(ret));
129 #if G_BYTE_ORDER == G_BIG_ENDIAN
130 ret = pint_bswap16(ret);
131 #endif
132 return ret;
135 static inline uint32_t pletoh32(const void *p)
137 uint32_t ret;
138 memcpy(&ret, p, sizeof(ret));
139 #if G_BYTE_ORDER == G_BIG_ENDIAN
140 ret = pint_bswap32(ret);
141 #endif
142 return ret;
145 static inline uint64_t pletoh64(const void *p)
147 uint64_t ret;
148 memcpy(&ret, p, sizeof(ret));
149 #if G_BYTE_ORDER == G_BIG_ENDIAN
150 ret = pint_bswap64(ret);
151 #endif
152 return ret;
155 static inline void phton16(uint8_t *p, uint16_t v)
157 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
158 v = pint_bswap16(v);
159 #endif
160 memcpy(p, &v, sizeof(v));
163 static inline void phton32(uint8_t *p, uint32_t v)
165 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
166 v = pint_bswap32(v);
167 #endif
168 memcpy(p, &v, sizeof(v));
171 static inline void phton64(uint8_t *p, uint64_t v) {
172 #if G_BYTE_ORDER == G_LITTLE_ENDIAN
173 v = pint_bswap64(v);
174 #endif
175 memcpy(p, &v, sizeof(v));
178 static inline void phtole32(uint8_t *p, uint32_t v)
180 #if G_BYTE_ORDER == G_BIG_ENDIAN
181 v = pint_bswap32(v);
182 #endif
183 memcpy(p, &v, sizeof(v));
186 static inline void phtole64(uint8_t *p, uint64_t v) {
187 #if G_BYTE_ORDER == G_BIG_ENDIAN
188 v = pint_bswap64(v);
189 #endif
190 memcpy(p, &v, sizeof(v));
193 #else
194 /* Portable functions */
195 static inline uint16_t pntoh16(const void *p)
197 return (uint16_t)*((const uint8_t *)(p)+0)<<8|
198 (uint16_t)*((const uint8_t *)(p)+1)<<0;
201 static inline uint32_t pntoh32(const void *p)
203 return (uint32_t)*((const uint8_t *)(p)+0)<<24|
204 (uint32_t)*((const uint8_t *)(p)+1)<<16|
205 (uint32_t)*((const uint8_t *)(p)+2)<<8|
206 (uint32_t)*((const uint8_t *)(p)+3)<<0;
209 static inline uint64_t pntoh64(const void *p)
211 return (uint64_t)*((const uint8_t *)(p)+0)<<56|
212 (uint64_t)*((const uint8_t *)(p)+1)<<48|
213 (uint64_t)*((const uint8_t *)(p)+2)<<40|
214 (uint64_t)*((const uint8_t *)(p)+3)<<32|
215 (uint64_t)*((const uint8_t *)(p)+4)<<24|
216 (uint64_t)*((const uint8_t *)(p)+5)<<16|
217 (uint64_t)*((const uint8_t *)(p)+6)<<8|
218 (uint64_t)*((const uint8_t *)(p)+7)<<0;
221 static inline uint16_t pletoh16(const void *p)
223 return (uint16_t)*((const uint8_t *)(p)+1)<<8|
224 (uint16_t)*((const uint8_t *)(p)+0)<<0;
227 static inline uint32_t pletoh32(const void *p)
229 return (uint32_t)*((const uint8_t *)(p)+3)<<24|
230 (uint32_t)*((const uint8_t *)(p)+2)<<16|
231 (uint32_t)*((const uint8_t *)(p)+1)<<8|
232 (uint32_t)*((const uint8_t *)(p)+0)<<0;
235 static inline uint64_t pletoh64(const void *p)
237 return (uint64_t)*((const uint8_t *)(p)+7)<<56|
238 (uint64_t)*((const uint8_t *)(p)+6)<<48|
239 (uint64_t)*((const uint8_t *)(p)+5)<<40|
240 (uint64_t)*((const uint8_t *)(p)+4)<<32|
241 (uint64_t)*((const uint8_t *)(p)+3)<<24|
242 (uint64_t)*((const uint8_t *)(p)+2)<<16|
243 (uint64_t)*((const uint8_t *)(p)+1)<<8|
244 (uint64_t)*((const uint8_t *)(p)+0)<<0;
247 /* Pointer routines to put items out in a particular byte order.
248 * These will work regardless of the byte alignment of the pointer.
251 static inline void phton16(uint8_t *p, uint16_t v)
253 p[0] = (uint8_t)(v >> 8);
254 p[1] = (uint8_t)(v >> 0);
257 static inline void phton32(uint8_t *p, uint32_t v)
259 p[0] = (uint8_t)(v >> 24);
260 p[1] = (uint8_t)(v >> 16);
261 p[2] = (uint8_t)(v >> 8);
262 p[3] = (uint8_t)(v >> 0);
265 static inline void phton64(uint8_t *p, uint64_t v) {
266 p[0] = (uint8_t)(v >> 56);
267 p[1] = (uint8_t)(v >> 48);
268 p[2] = (uint8_t)(v >> 40);
269 p[3] = (uint8_t)(v >> 32);
270 p[4] = (uint8_t)(v >> 24);
271 p[5] = (uint8_t)(v >> 16);
272 p[6] = (uint8_t)(v >> 8);
273 p[7] = (uint8_t)(v >> 0);
276 static inline void phtole32(uint8_t *p, uint32_t v) {
277 p[0] = (uint8_t)(v >> 0);
278 p[1] = (uint8_t)(v >> 8);
279 p[2] = (uint8_t)(v >> 16);
280 p[3] = (uint8_t)(v >> 24);
283 static inline void phtole64(uint8_t *p, uint64_t v) {
284 p[0] = (uint8_t)(v >> 0);
285 p[1] = (uint8_t)(v >> 8);
286 p[2] = (uint8_t)(v >> 16);
287 p[3] = (uint8_t)(v >> 24);
288 p[4] = (uint8_t)(v >> 32);
289 p[5] = (uint8_t)(v >> 40);
290 p[6] = (uint8_t)(v >> 48);
291 p[7] = (uint8_t)(v >> 56);
293 #endif
295 static inline uint32_t pntoh24(const void *p)
297 return (uint32_t)*((const uint8_t *)(p)+0)<<16|
298 (uint32_t)*((const uint8_t *)(p)+1)<<8|
299 (uint32_t)*((const uint8_t *)(p)+2)<<0;
302 static inline uint64_t pntoh40(const void *p)
304 return (uint64_t)*((const uint8_t *)(p)+0)<<32|
305 (uint64_t)*((const uint8_t *)(p)+1)<<24|
306 (uint64_t)*((const uint8_t *)(p)+2)<<16|
307 (uint64_t)*((const uint8_t *)(p)+3)<<8|
308 (uint64_t)*((const uint8_t *)(p)+4)<<0;
311 static inline uint64_t pntoh48(const void *p)
313 return (uint64_t)*((const uint8_t *)(p)+0)<<40|
314 (uint64_t)*((const uint8_t *)(p)+1)<<32|
315 (uint64_t)*((const uint8_t *)(p)+2)<<24|
316 (uint64_t)*((const uint8_t *)(p)+3)<<16|
317 (uint64_t)*((const uint8_t *)(p)+4)<<8|
318 (uint64_t)*((const uint8_t *)(p)+5)<<0;
321 static inline uint64_t pntoh56(const void *p)
323 return (uint64_t)*((const uint8_t *)(p)+0)<<48|
324 (uint64_t)*((const uint8_t *)(p)+1)<<40|
325 (uint64_t)*((const uint8_t *)(p)+2)<<32|
326 (uint64_t)*((const uint8_t *)(p)+3)<<24|
327 (uint64_t)*((const uint8_t *)(p)+4)<<16|
328 (uint64_t)*((const uint8_t *)(p)+5)<<8|
329 (uint64_t)*((const uint8_t *)(p)+6)<<0;
332 static inline uint32_t pletoh24(const void *p)
334 return (uint32_t)*((const uint8_t *)(p)+2)<<16|
335 (uint32_t)*((const uint8_t *)(p)+1)<<8|
336 (uint32_t)*((const uint8_t *)(p)+0)<<0;
339 static inline uint64_t pletoh40(const void *p)
341 return (uint64_t)*((const uint8_t *)(p)+4)<<32|
342 (uint64_t)*((const uint8_t *)(p)+3)<<24|
343 (uint64_t)*((const uint8_t *)(p)+2)<<16|
344 (uint64_t)*((const uint8_t *)(p)+1)<<8|
345 (uint64_t)*((const uint8_t *)(p)+0)<<0;
348 static inline uint64_t pletoh48(const void *p)
350 return (uint64_t)*((const uint8_t *)(p)+5)<<40|
351 (uint64_t)*((const uint8_t *)(p)+4)<<32|
352 (uint64_t)*((const uint8_t *)(p)+3)<<24|
353 (uint64_t)*((const uint8_t *)(p)+2)<<16|
354 (uint64_t)*((const uint8_t *)(p)+1)<<8|
355 (uint64_t)*((const uint8_t *)(p)+0)<<0;
358 static inline uint64_t pletoh56(const void *p)
360 return (uint64_t)*((const uint8_t *)(p)+6)<<48|
361 (uint64_t)*((const uint8_t *)(p)+5)<<40|
362 (uint64_t)*((const uint8_t *)(p)+4)<<32|
363 (uint64_t)*((const uint8_t *)(p)+3)<<24|
364 (uint64_t)*((const uint8_t *)(p)+2)<<16|
365 (uint64_t)*((const uint8_t *)(p)+1)<<8|
366 (uint64_t)*((const uint8_t *)(p)+0)<<0;
369 #endif /* PINT_H */
372 * Editor modelines - https://www.wireshark.org/tools/modelines.html
374 * Local Variables:
375 * c-basic-offset: 4
376 * tab-width: 8
377 * indent-tabs-mode: nil
378 * End:
380 * ex: set shiftwidth=4 tabstop=8 expandtab:
381 * :indentSize=4:tabSize=8:noTabs=true: