treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / c6x / include / asm / unaligned.h
blobb56ba7110f5a316e2ef6105b3ce793b9f26ee083
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Port on Texas Instruments TMS320C6x architecture
5 * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
6 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
7 * Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
8 */
9 #ifndef _ASM_C6X_UNALIGNED_H
10 #define _ASM_C6X_UNALIGNED_H
12 #include <linux/swab.h>
15 * The C64x+ can do unaligned word and dword accesses in hardware
16 * using special load/store instructions.
19 static inline u16 get_unaligned_le16(const void *p)
21 const u8 *_p = p;
22 return _p[0] | _p[1] << 8;
25 static inline u16 get_unaligned_be16(const void *p)
27 const u8 *_p = p;
28 return _p[0] << 8 | _p[1];
31 static inline void put_unaligned_le16(u16 val, void *p)
33 u8 *_p = p;
34 _p[0] = val;
35 _p[1] = val >> 8;
38 static inline void put_unaligned_be16(u16 val, void *p)
40 u8 *_p = p;
41 _p[0] = val >> 8;
42 _p[1] = val;
45 static inline u32 get_unaligned32(const void *p)
47 u32 val = (u32) p;
48 asm (" ldnw .d1t1 *%0,%0\n"
49 " nop 4\n"
50 : "+a"(val));
51 return val;
54 static inline void put_unaligned32(u32 val, void *p)
56 asm volatile (" stnw .d2t1 %0,*%1\n"
57 : : "a"(val), "b"(p) : "memory");
60 static inline u64 get_unaligned64(const void *p)
62 u64 val;
63 asm volatile (" ldndw .d1t1 *%1,%0\n"
64 " nop 4\n"
65 : "=a"(val) : "a"(p));
66 return val;
69 static inline void put_unaligned64(u64 val, const void *p)
71 asm volatile (" stndw .d2t1 %0,*%1\n"
72 : : "a"(val), "b"(p) : "memory");
75 #ifdef CONFIG_CPU_BIG_ENDIAN
77 #define get_unaligned_le32(p) __swab32(get_unaligned32(p))
78 #define get_unaligned_le64(p) __swab64(get_unaligned64(p))
79 #define get_unaligned_be32(p) get_unaligned32(p)
80 #define get_unaligned_be64(p) get_unaligned64(p)
81 #define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
82 #define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
83 #define put_unaligned_be32(v, p) put_unaligned32((v), (p))
84 #define put_unaligned_be64(v, p) put_unaligned64((v), (p))
85 #define get_unaligned __get_unaligned_be
86 #define put_unaligned __put_unaligned_be
88 #else
90 #define get_unaligned_le32(p) get_unaligned32(p)
91 #define get_unaligned_le64(p) get_unaligned64(p)
92 #define get_unaligned_be32(p) __swab32(get_unaligned32(p))
93 #define get_unaligned_be64(p) __swab64(get_unaligned64(p))
94 #define put_unaligned_le32(v, p) put_unaligned32((v), (p))
95 #define put_unaligned_le64(v, p) put_unaligned64((v), (p))
96 #define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
97 #define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
98 #define get_unaligned __get_unaligned_le
99 #define put_unaligned __put_unaligned_le
101 #endif
104 * Cause a link-time error if we try an unaligned access other than
105 * 1,2,4 or 8 bytes long
107 extern int __bad_unaligned_access_size(void);
109 #define __get_unaligned_le(ptr) (typeof(*(ptr)))({ \
110 sizeof(*(ptr)) == 1 ? *(ptr) : \
111 (sizeof(*(ptr)) == 2 ? get_unaligned_le16((ptr)) : \
112 (sizeof(*(ptr)) == 4 ? get_unaligned_le32((ptr)) : \
113 (sizeof(*(ptr)) == 8 ? get_unaligned_le64((ptr)) : \
114 __bad_unaligned_access_size()))); \
117 #define __get_unaligned_be(ptr) (__force typeof(*(ptr)))({ \
118 sizeof(*(ptr)) == 1 ? *(ptr) : \
119 (sizeof(*(ptr)) == 2 ? get_unaligned_be16((ptr)) : \
120 (sizeof(*(ptr)) == 4 ? get_unaligned_be32((ptr)) : \
121 (sizeof(*(ptr)) == 8 ? get_unaligned_be64((ptr)) : \
122 __bad_unaligned_access_size()))); \
125 #define __put_unaligned_le(val, ptr) ({ \
126 void *__gu_p = (ptr); \
127 switch (sizeof(*(ptr))) { \
128 case 1: \
129 *(u8 *)__gu_p = (__force u8)(val); \
130 break; \
131 case 2: \
132 put_unaligned_le16((__force u16)(val), __gu_p); \
133 break; \
134 case 4: \
135 put_unaligned_le32((__force u32)(val), __gu_p); \
136 break; \
137 case 8: \
138 put_unaligned_le64((__force u64)(val), __gu_p); \
139 break; \
140 default: \
141 __bad_unaligned_access_size(); \
142 break; \
144 (void)0; })
146 #define __put_unaligned_be(val, ptr) ({ \
147 void *__gu_p = (ptr); \
148 switch (sizeof(*(ptr))) { \
149 case 1: \
150 *(u8 *)__gu_p = (__force u8)(val); \
151 break; \
152 case 2: \
153 put_unaligned_be16((__force u16)(val), __gu_p); \
154 break; \
155 case 4: \
156 put_unaligned_be32((__force u32)(val), __gu_p); \
157 break; \
158 case 8: \
159 put_unaligned_be64((__force u64)(val), __gu_p); \
160 break; \
161 default: \
162 __bad_unaligned_access_size(); \
163 break; \
165 (void)0; })
167 #endif /* _ASM_C6X_UNALIGNED_H */