x86/setup: Do not reserve crashkernel high memory if low reservation failed
[linux/fpc-iii.git] / fs / befs / endian.h
blob27223878ba9fc9b9ba354a887837653ecdd170f4
1 /*
2 * linux/fs/befs/endian.h
4 * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com>
6 * Partially based on similar funtions in the sysv driver.
7 */
9 #ifndef LINUX_BEFS_ENDIAN
10 #define LINUX_BEFS_ENDIAN
12 #include <asm/byteorder.h>
14 static inline u64
15 fs64_to_cpu(const struct super_block *sb, fs64 n)
17 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
18 return le64_to_cpu((__force __le64)n);
19 else
20 return be64_to_cpu((__force __be64)n);
23 static inline fs64
24 cpu_to_fs64(const struct super_block *sb, u64 n)
26 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
27 return (__force fs64)cpu_to_le64(n);
28 else
29 return (__force fs64)cpu_to_be64(n);
32 static inline u32
33 fs32_to_cpu(const struct super_block *sb, fs32 n)
35 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
36 return le32_to_cpu((__force __le32)n);
37 else
38 return be32_to_cpu((__force __be32)n);
41 static inline fs32
42 cpu_to_fs32(const struct super_block *sb, u32 n)
44 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
45 return (__force fs32)cpu_to_le32(n);
46 else
47 return (__force fs32)cpu_to_be32(n);
50 static inline u16
51 fs16_to_cpu(const struct super_block *sb, fs16 n)
53 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
54 return le16_to_cpu((__force __le16)n);
55 else
56 return be16_to_cpu((__force __be16)n);
59 static inline fs16
60 cpu_to_fs16(const struct super_block *sb, u16 n)
62 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
63 return (__force fs16)cpu_to_le16(n);
64 else
65 return (__force fs16)cpu_to_be16(n);
68 /* Composite types below here */
70 static inline befs_block_run
71 fsrun_to_cpu(const struct super_block *sb, befs_disk_block_run n)
73 befs_block_run run;
75 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) {
76 run.allocation_group = le32_to_cpu((__force __le32)n.allocation_group);
77 run.start = le16_to_cpu((__force __le16)n.start);
78 run.len = le16_to_cpu((__force __le16)n.len);
79 } else {
80 run.allocation_group = be32_to_cpu((__force __be32)n.allocation_group);
81 run.start = be16_to_cpu((__force __be16)n.start);
82 run.len = be16_to_cpu((__force __be16)n.len);
84 return run;
87 static inline befs_disk_block_run
88 cpu_to_fsrun(const struct super_block *sb, befs_block_run n)
90 befs_disk_block_run run;
92 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) {
93 run.allocation_group = cpu_to_le32(n.allocation_group);
94 run.start = cpu_to_le16(n.start);
95 run.len = cpu_to_le16(n.len);
96 } else {
97 run.allocation_group = cpu_to_be32(n.allocation_group);
98 run.start = cpu_to_be16(n.start);
99 run.len = cpu_to_be16(n.len);
101 return run;
104 static inline befs_data_stream
105 fsds_to_cpu(const struct super_block *sb, const befs_disk_data_stream *n)
107 befs_data_stream data;
108 int i;
110 for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; ++i)
111 data.direct[i] = fsrun_to_cpu(sb, n->direct[i]);
113 data.max_direct_range = fs64_to_cpu(sb, n->max_direct_range);
114 data.indirect = fsrun_to_cpu(sb, n->indirect);
115 data.max_indirect_range = fs64_to_cpu(sb, n->max_indirect_range);
116 data.double_indirect = fsrun_to_cpu(sb, n->double_indirect);
117 data.max_double_indirect_range = fs64_to_cpu(sb,
119 max_double_indirect_range);
120 data.size = fs64_to_cpu(sb, n->size);
122 return data;
125 #endif //LINUX_BEFS_ENDIAN