1 /**
2 * \file alignment.h
3 *
4 * \brief Utility code for dealing with unaligned memory accesses
5 */
6 /*
7 * Copyright The Mbed TLS Contributors
8 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
9 */
10
11 #ifndef MBEDTLS_LIBRARY_ALIGNMENT_H
12 #define MBEDTLS_LIBRARY_ALIGNMENT_H
13
14 #include <stdint.h>
15 #include <string.h>
16 #include <stdlib.h>
17
18 /*
19 * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory
20 * accesses are known to be efficient.
21 *
22 * All functions defined here will behave correctly regardless, but might be less
23 * efficient when this is not defined.
24 */
25 #if defined(__ARM_FEATURE_UNALIGNED) \
26 || defined(MBEDTLS_ARCH_IS_X86) || defined(MBEDTLS_ARCH_IS_X64) \
27 || defined(MBEDTLS_PLATFORM_IS_WINDOWS_ON_ARM64)
28 /*
29 * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9
30 * (and later versions) for Arm v7 and later; all x86 platforms should have
31 * efficient unaligned access.
32 *
33 * https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#alignment
34 * specifies that on Windows-on-Arm64, unaligned access is safe (except for uncached
35 * device memory).
36 */
37 #define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS
38 #endif
39
40 #if defined(__IAR_SYSTEMS_ICC__) && \
41 (defined(MBEDTLS_ARCH_IS_ARM64) || defined(MBEDTLS_ARCH_IS_ARM32) \
42 || defined(__ICCRX__) || defined(__ICCRL78__) || defined(__ICCRISCV__))
43 #pragma language=save
44 #pragma language=extended
45 #define MBEDTLS_POP_IAR_LANGUAGE_PRAGMA
46 /* IAR recommend this technique for accessing unaligned data in
47 * https://www.iar.com/knowledge/support/technical-notes/compiler/accessing-unaligned-data
48 * This results in a single load / store instruction (if unaligned access is supported).
49 * According to that document, this is only supported on certain architectures.
50 */
51 #define UINT_UNALIGNED
52 typedef uint16_t __packed mbedtls_uint16_unaligned_t;
53 typedef uint32_t __packed mbedtls_uint32_unaligned_t;
54 typedef uint64_t __packed mbedtls_uint64_unaligned_t;
55 #elif defined(MBEDTLS_COMPILER_IS_GCC) && (MBEDTLS_GCC_VERSION >= 40504) && \
56 ((MBEDTLS_GCC_VERSION < 60300) || (!defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)))
57 /*
58 * gcc may generate a branch to memcpy for calls like `memcpy(dest, src, 4)` rather than
59 * generating some LDR or LDRB instructions (similar for stores).
60 *
61 * This is architecture dependent: x86-64 seems fine even with old gcc; 32-bit Arm
62 * is affected. To keep it simple, we enable for all architectures.
63 *
64 * For versions of gcc < 5.4.0 this issue always happens.
65 * For gcc < 6.3.0, this issue happens at -O0
66 * For all versions, this issue happens iff unaligned access is not supported.
67 *
68 * For gcc 4.x, this implementation will generate byte-by-byte loads even if unaligned access is
69 * supported, which is correct but not optimal.
70 *
71 * For performance (and code size, in some cases), we want to avoid the branch and just generate
72 * some inline load/store instructions since the access is small and constant-size.
73 *
74 * The manual states:
75 * "The packed attribute specifies that a variable or structure field should have the smallest
76 * possible alignment��one byte for a variable"
77 * https://gcc.gnu.org/onlinedocs/gcc-4.5.4/gcc/Variable-Attributes.html
78 *
79 * Previous implementations used __attribute__((__aligned__(1)), but had issues with a gcc bug:
80 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94662
81 *
82 * Tested with several versions of GCC from 4.5.0 up to 13.2.0
83 * We don't enable for older than 4.5.0 as this has not been tested.
84 */
85 #define UINT_UNALIGNED_STRUCT
86 typedef struct {
87 uint16_t x;
88 } __attribute__((packed)) mbedtls_uint16_unaligned_t;
89 typedef struct {
90 uint32_t x;
91 } __attribute__((packed)) mbedtls_uint32_unaligned_t;
92 typedef struct {
93 uint64_t x;
94 } __attribute__((packed)) mbedtls_uint64_unaligned_t;
95 #endif
96
97 /*
98 * We try to force mbedtls_(get|put)_unaligned_uintXX to be always inline, because this results
99 * in code that is both smaller and faster. IAR and gcc both benefit from this when optimising
100 * for size.
101 */
102
103 /**
104 * Read the unsigned 16 bits integer from the given address, which need not
105 * be aligned.
106 *
107 * \param p pointer to 2 bytes of data
108 * \return Data at the given address
109 */
110 uint16_t mbedtls_get_unaligned_uint16(const void *p);
111
112 /**
113 * Write the unsigned 16 bits integer to the given address, which need not
114 * be aligned.
115 *
116 * \param p pointer to 2 bytes of data
117 * \param x data to write
118 */
119 void mbedtls_put_unaligned_uint16(void *p, uint16_t x);
120
121 /**
122 * Read the unsigned 32 bits integer from the given address, which need not
123 * be aligned.
124 *
125 * \param p pointer to 4 bytes of data
126 * \return Data at the given address
127 */
128 uint32_t mbedtls_get_unaligned_uint32(const void *p);
129
130 /**
131 * Write the unsigned 32 bits integer to the given address, which need not
132 * be aligned.
133 *
134 * \param p pointer to 4 bytes of data
135 * \param x data to write
136 */
137 void mbedtls_put_unaligned_uint32(void *p, uint32_t x);
138
139 /**
140 * Read the unsigned 64 bits integer from the given address, which need not
141 * be aligned.
142 *
143 * \param p pointer to 8 bytes of data
144 * \return Data at the given address
145 */
146 uint64_t mbedtls_get_unaligned_uint64(const void *p);
147
148 /**
149 * Write the unsigned 64 bits integer to the given address, which need not
150 * be aligned.
151 *
152 * \param p pointer to 8 bytes of data
153 * \param x data to write
154 */
155 void mbedtls_put_unaligned_uint64(void *p, uint64_t x);
156 #if defined(MBEDTLS_POP_IAR_LANGUAGE_PRAGMA)
157 #pragma language=restore
158 #endif
159
160 /** Byte Reading Macros
161 *
162 * Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
163 * byte from x, where byte 0 is the least significant byte.
164 */
165 #define MBEDTLS_BYTE_0(x) ((uint8_t) ((x) & 0xff))
166 #define MBEDTLS_BYTE_1(x) ((uint8_t) (((x) >> 8) & 0xff))
167 #define MBEDTLS_BYTE_2(x) ((uint8_t) (((x) >> 16) & 0xff))
168 #define MBEDTLS_BYTE_3(x) ((uint8_t) (((x) >> 24) & 0xff))
169 #define MBEDTLS_BYTE_4(x) ((uint8_t) (((x) >> 32) & 0xff))
170 #define MBEDTLS_BYTE_5(x) ((uint8_t) (((x) >> 40) & 0xff))
171 #define MBEDTLS_BYTE_6(x) ((uint8_t) (((x) >> 48) & 0xff))
172 #define MBEDTLS_BYTE_7(x) ((uint8_t) (((x) >> 56) & 0xff))
173
174 /*
175 * Detect GCC built-in byteswap routines
176 */
177 #if defined(__GNUC__) && defined(__GNUC_PREREQ)
178 #if __GNUC_PREREQ(4, 8)
179 #define MBEDTLS_BSWAP16 __builtin_bswap16
180 #endif /* __GNUC_PREREQ(4,8) */
181 #if __GNUC_PREREQ(4, 3)
182 #define MBEDTLS_BSWAP32 __builtin_bswap32
183 #define MBEDTLS_BSWAP64 __builtin_bswap64
184 #endif /* __GNUC_PREREQ(4,3) */
185 #endif /* defined(__GNUC__) && defined(__GNUC_PREREQ) */
186
187 /*
188 * Detect Clang built-in byteswap routines
189 */
190 #if defined(__clang__) && defined(__has_builtin)
191 #if __has_builtin(__builtin_bswap16) && !defined(MBEDTLS_BSWAP16)
192 #define MBEDTLS_BSWAP16 __builtin_bswap16
193 #endif /* __has_builtin(__builtin_bswap16) */
194 #if __has_builtin(__builtin_bswap32) && !defined(MBEDTLS_BSWAP32)
195 #define MBEDTLS_BSWAP32 __builtin_bswap32
196 #endif /* __has_builtin(__builtin_bswap32) */
197 #if __has_builtin(__builtin_bswap64) && !defined(MBEDTLS_BSWAP64)
198 #define MBEDTLS_BSWAP64 __builtin_bswap64
199 #endif /* __has_builtin(__builtin_bswap64) */
200 #endif /* defined(__clang__) && defined(__has_builtin) */
201
202 /*
203 * Detect MSVC built-in byteswap routines
204 */
205 #if defined(_MSC_VER)
206 #if !defined(MBEDTLS_BSWAP16)
207 #define MBEDTLS_BSWAP16 _byteswap_ushort
208 #endif
209 #if !defined(MBEDTLS_BSWAP32)
210 #define MBEDTLS_BSWAP32 _byteswap_ulong
211 #endif
212 #if !defined(MBEDTLS_BSWAP64)
213 #define MBEDTLS_BSWAP64 _byteswap_uint64
214 #endif
215 #endif /* defined(_MSC_VER) */
216
217 /* Detect armcc built-in byteswap routine */
218 #if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 410000) && !defined(MBEDTLS_BSWAP32)
219 #if defined(__ARM_ACLE) /* ARM Compiler 6 - earlier versions don't need a header */
220 #include <arm_acle.h>
221 #endif
222 #define MBEDTLS_BSWAP32 __rev
223 #endif
224
225 /* Detect IAR built-in byteswap routine */
226 #if defined(__IAR_SYSTEMS_ICC__)
227 #if defined(__ARM_ACLE)
228 #include <arm_acle.h>
229 #define MBEDTLS_BSWAP16(x) ((uint16_t) __rev16((uint32_t) (x)))
230 #define MBEDTLS_BSWAP32 __rev
231 #define MBEDTLS_BSWAP64 __revll
232 #endif
233 #endif
234
235 /*
236 * Where compiler built-ins are not present, fall back to C code that the
237 * compiler may be able to detect and transform into the relevant bswap or
238 * similar instruction.
239 */
240 #if !defined(MBEDTLS_BSWAP16)
mbedtls_bswap16(uint16_t x)241 static inline uint16_t mbedtls_bswap16(uint16_t x)
242 {
243 return
244 (x & 0x00ff) << 8 |
245 (x & 0xff00) >> 8;
246 }
247 #define MBEDTLS_BSWAP16 mbedtls_bswap16
248 #endif /* !defined(MBEDTLS_BSWAP16) */
249
250 #if !defined(MBEDTLS_BSWAP32)
mbedtls_bswap32(uint32_t x)251 static inline uint32_t mbedtls_bswap32(uint32_t x)
252 {
253 return
254 (x & 0x000000ff) << 24 |
255 (x & 0x0000ff00) << 8 |
256 (x & 0x00ff0000) >> 8 |
257 (x & 0xff000000) >> 24;
258 }
259 #define MBEDTLS_BSWAP32 mbedtls_bswap32
260 #endif /* !defined(MBEDTLS_BSWAP32) */
261
262 #if !defined(MBEDTLS_BSWAP64)
mbedtls_bswap64(uint64_t x)263 static inline uint64_t mbedtls_bswap64(uint64_t x)
264 {
265 return
266 (x & 0x00000000000000ffULL) << 56 |
267 (x & 0x000000000000ff00ULL) << 40 |
268 (x & 0x0000000000ff0000ULL) << 24 |
269 (x & 0x00000000ff000000ULL) << 8 |
270 (x & 0x000000ff00000000ULL) >> 8 |
271 (x & 0x0000ff0000000000ULL) >> 24 |
272 (x & 0x00ff000000000000ULL) >> 40 |
273 (x & 0xff00000000000000ULL) >> 56;
274 }
275 #define MBEDTLS_BSWAP64 mbedtls_bswap64
276 #endif /* !defined(MBEDTLS_BSWAP64) */
277
278 #if !defined(__BYTE_ORDER__)
279
280 #if defined(__LITTLE_ENDIAN__)
281 /* IAR defines __xxx_ENDIAN__, but not __BYTE_ORDER__ */
282 #define MBEDTLS_IS_BIG_ENDIAN 0
283 #elif defined(__BIG_ENDIAN__)
284 #define MBEDTLS_IS_BIG_ENDIAN 1
285 #else
286 static const uint16_t mbedtls_byte_order_detector = { 0x100 };
287 #define MBEDTLS_IS_BIG_ENDIAN (*((unsigned char *) (&mbedtls_byte_order_detector)) == 0x01)
288 #endif
289
290 #else
291
292 #if (__BYTE_ORDER__) == (__ORDER_BIG_ENDIAN__)
293 #define MBEDTLS_IS_BIG_ENDIAN 1
294 #else
295 #define MBEDTLS_IS_BIG_ENDIAN 0
296 #endif
297
298 #endif /* !defined(__BYTE_ORDER__) */
299
300 /**
301 * Get the unsigned 32 bits integer corresponding to four bytes in
302 * big-endian order (MSB first).
303 *
304 * \param data Base address of the memory to get the four bytes from.
305 * \param offset Offset from \p data of the first and most significant
306 * byte of the four bytes to build the 32 bits unsigned
307 * integer from.
308 */
309 #define MBEDTLS_GET_UINT32_BE(data, offset) \
310 ((MBEDTLS_IS_BIG_ENDIAN) \
311 ? mbedtls_get_unaligned_uint32((data) + (offset)) \
312 : MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
313 )
314
315 /**
316 * Put in memory a 32 bits unsigned integer in big-endian order.
317 *
318 * \param n 32 bits unsigned integer to put in memory.
319 * \param data Base address of the memory where to put the 32
320 * bits unsigned integer in.
321 * \param offset Offset from \p data where to put the most significant
322 * byte of the 32 bits unsigned integer \p n.
323 */
324 #define MBEDTLS_PUT_UINT32_BE(n, data, offset) \
325 { \
326 if (MBEDTLS_IS_BIG_ENDIAN) \
327 { \
328 mbedtls_put_unaligned_uint32((data) + (offset), (uint32_t) (n)); \
329 } \
330 else \
331 { \
332 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
333 } \
334 }
335
336 /**
337 * Get the unsigned 32 bits integer corresponding to four bytes in
338 * little-endian order (LSB first).
339 *
340 * \param data Base address of the memory to get the four bytes from.
341 * \param offset Offset from \p data of the first and least significant
342 * byte of the four bytes to build the 32 bits unsigned
343 * integer from.
344 */
345 #define MBEDTLS_GET_UINT32_LE(data, offset) \
346 ((MBEDTLS_IS_BIG_ENDIAN) \
347 ? MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
348 : mbedtls_get_unaligned_uint32((data) + (offset)) \
349 )
350
351
352 /**
353 * Put in memory a 32 bits unsigned integer in little-endian order.
354 *
355 * \param n 32 bits unsigned integer to put in memory.
356 * \param data Base address of the memory where to put the 32
357 * bits unsigned integer in.
358 * \param offset Offset from \p data where to put the least significant
359 * byte of the 32 bits unsigned integer \p n.
360 */
361 #define MBEDTLS_PUT_UINT32_LE(n, data, offset) \
362 { \
363 if (MBEDTLS_IS_BIG_ENDIAN) \
364 { \
365 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
366 } \
367 else \
368 { \
369 mbedtls_put_unaligned_uint32((data) + (offset), ((uint32_t) (n))); \
370 } \
371 }
372
373 /**
374 * Get the unsigned 16 bits integer corresponding to two bytes in
375 * little-endian order (LSB first).
376 *
377 * \param data Base address of the memory to get the two bytes from.
378 * \param offset Offset from \p data of the first and least significant
379 * byte of the two bytes to build the 16 bits unsigned
380 * integer from.
381 */
382 #define MBEDTLS_GET_UINT16_LE(data, offset) \
383 ((MBEDTLS_IS_BIG_ENDIAN) \
384 ? MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
385 : mbedtls_get_unaligned_uint16((data) + (offset)) \
386 )
387
388 /**
389 * Put in memory a 16 bits unsigned integer in little-endian order.
390 *
391 * \param n 16 bits unsigned integer to put in memory.
392 * \param data Base address of the memory where to put the 16
393 * bits unsigned integer in.
394 * \param offset Offset from \p data where to put the least significant
395 * byte of the 16 bits unsigned integer \p n.
396 */
397 #define MBEDTLS_PUT_UINT16_LE(n, data, offset) \
398 { \
399 if (MBEDTLS_IS_BIG_ENDIAN) \
400 { \
401 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
402 } \
403 else \
404 { \
405 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
406 } \
407 }
408
409 /**
410 * Get the unsigned 16 bits integer corresponding to two bytes in
411 * big-endian order (MSB first).
412 *
413 * \param data Base address of the memory to get the two bytes from.
414 * \param offset Offset from \p data of the first and most significant
415 * byte of the two bytes to build the 16 bits unsigned
416 * integer from.
417 */
418 #define MBEDTLS_GET_UINT16_BE(data, offset) \
419 ((MBEDTLS_IS_BIG_ENDIAN) \
420 ? mbedtls_get_unaligned_uint16((data) + (offset)) \
421 : MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
422 )
423
424 /**
425 * Put in memory a 16 bits unsigned integer in big-endian order.
426 *
427 * \param n 16 bits unsigned integer to put in memory.
428 * \param data Base address of the memory where to put the 16
429 * bits unsigned integer in.
430 * \param offset Offset from \p data where to put the most significant
431 * byte of the 16 bits unsigned integer \p n.
432 */
433 #define MBEDTLS_PUT_UINT16_BE(n, data, offset) \
434 { \
435 if (MBEDTLS_IS_BIG_ENDIAN) \
436 { \
437 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
438 } \
439 else \
440 { \
441 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
442 } \
443 }
444
445 /**
446 * Get the unsigned 24 bits integer corresponding to three bytes in
447 * big-endian order (MSB first).
448 *
449 * \param data Base address of the memory to get the three bytes from.
450 * \param offset Offset from \p data of the first and most significant
451 * byte of the three bytes to build the 24 bits unsigned
452 * integer from.
453 */
454 #define MBEDTLS_GET_UINT24_BE(data, offset) \
455 ( \
456 ((uint32_t) (data)[(offset)] << 16) \
457 | ((uint32_t) (data)[(offset) + 1] << 8) \
458 | ((uint32_t) (data)[(offset) + 2]) \
459 )
460
461 /**
462 * Put in memory a 24 bits unsigned integer in big-endian order.
463 *
464 * \param n 24 bits unsigned integer to put in memory.
465 * \param data Base address of the memory where to put the 24
466 * bits unsigned integer in.
467 * \param offset Offset from \p data where to put the most significant
468 * byte of the 24 bits unsigned integer \p n.
469 */
470 #define MBEDTLS_PUT_UINT24_BE(n, data, offset) \
471 { \
472 (data)[(offset)] = MBEDTLS_BYTE_2(n); \
473 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
474 (data)[(offset) + 2] = MBEDTLS_BYTE_0(n); \
475 }
476
477 /**
478 * Get the unsigned 24 bits integer corresponding to three bytes in
479 * little-endian order (LSB first).
480 *
481 * \param data Base address of the memory to get the three bytes from.
482 * \param offset Offset from \p data of the first and least significant
483 * byte of the three bytes to build the 24 bits unsigned
484 * integer from.
485 */
486 #define MBEDTLS_GET_UINT24_LE(data, offset) \
487 ( \
488 ((uint32_t) (data)[(offset)]) \
489 | ((uint32_t) (data)[(offset) + 1] << 8) \
490 | ((uint32_t) (data)[(offset) + 2] << 16) \
491 )
492
493 /**
494 * Put in memory a 24 bits unsigned integer in little-endian order.
495 *
496 * \param n 24 bits unsigned integer to put in memory.
497 * \param data Base address of the memory where to put the 24
498 * bits unsigned integer in.
499 * \param offset Offset from \p data where to put the least significant
500 * byte of the 24 bits unsigned integer \p n.
501 */
502 #define MBEDTLS_PUT_UINT24_LE(n, data, offset) \
503 { \
504 (data)[(offset)] = MBEDTLS_BYTE_0(n); \
505 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
506 (data)[(offset) + 2] = MBEDTLS_BYTE_2(n); \
507 }
508
509 /**
510 * Get the unsigned 64 bits integer corresponding to eight bytes in
511 * big-endian order (MSB first).
512 *
513 * \param data Base address of the memory to get the eight bytes from.
514 * \param offset Offset from \p data of the first and most significant
515 * byte of the eight bytes to build the 64 bits unsigned
516 * integer from.
517 */
518 #define MBEDTLS_GET_UINT64_BE(data, offset) \
519 ((MBEDTLS_IS_BIG_ENDIAN) \
520 ? mbedtls_get_unaligned_uint64((data) + (offset)) \
521 : MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
522 )
523
524 /**
525 * Put in memory a 64 bits unsigned integer in big-endian order.
526 *
527 * \param n 64 bits unsigned integer to put in memory.
528 * \param data Base address of the memory where to put the 64
529 * bits unsigned integer in.
530 * \param offset Offset from \p data where to put the most significant
531 * byte of the 64 bits unsigned integer \p n.
532 */
533 #define MBEDTLS_PUT_UINT64_BE(n, data, offset) \
534 { \
535 if (MBEDTLS_IS_BIG_ENDIAN) \
536 { \
537 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
538 } \
539 else \
540 { \
541 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
542 } \
543 }
544
545 /**
546 * Get the unsigned 64 bits integer corresponding to eight bytes in
547 * little-endian order (LSB first).
548 *
549 * \param data Base address of the memory to get the eight bytes from.
550 * \param offset Offset from \p data of the first and least significant
551 * byte of the eight bytes to build the 64 bits unsigned
552 * integer from.
553 */
554 #define MBEDTLS_GET_UINT64_LE(data, offset) \
555 ((MBEDTLS_IS_BIG_ENDIAN) \
556 ? MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
557 : mbedtls_get_unaligned_uint64((data) + (offset)) \
558 )
559
560 /**
561 * Put in memory a 64 bits unsigned integer in little-endian order.
562 *
563 * \param n 64 bits unsigned integer to put in memory.
564 * \param data Base address of the memory where to put the 64
565 * bits unsigned integer in.
566 * \param offset Offset from \p data where to put the least significant
567 * byte of the 64 bits unsigned integer \p n.
568 */
569 #define MBEDTLS_PUT_UINT64_LE(n, data, offset) \
570 { \
571 if (MBEDTLS_IS_BIG_ENDIAN) \
572 { \
573 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
574 } \
575 else \
576 { \
577 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
578 } \
579 }
580
581 #endif /* MBEDTLS_LIBRARY_ALIGNMENT_H */
582