0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef LINUX_BEFS_ENDIAN
0011 #define LINUX_BEFS_ENDIAN
0012
0013 #include <asm/byteorder.h>
0014
0015 static inline u64
0016 fs64_to_cpu(const struct super_block *sb, fs64 n)
0017 {
0018 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
0019 return le64_to_cpu((__force __le64)n);
0020 else
0021 return be64_to_cpu((__force __be64)n);
0022 }
0023
0024 static inline fs64
0025 cpu_to_fs64(const struct super_block *sb, u64 n)
0026 {
0027 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
0028 return (__force fs64)cpu_to_le64(n);
0029 else
0030 return (__force fs64)cpu_to_be64(n);
0031 }
0032
0033 static inline u32
0034 fs32_to_cpu(const struct super_block *sb, fs32 n)
0035 {
0036 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
0037 return le32_to_cpu((__force __le32)n);
0038 else
0039 return be32_to_cpu((__force __be32)n);
0040 }
0041
0042 static inline fs32
0043 cpu_to_fs32(const struct super_block *sb, u32 n)
0044 {
0045 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
0046 return (__force fs32)cpu_to_le32(n);
0047 else
0048 return (__force fs32)cpu_to_be32(n);
0049 }
0050
0051 static inline u16
0052 fs16_to_cpu(const struct super_block *sb, fs16 n)
0053 {
0054 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
0055 return le16_to_cpu((__force __le16)n);
0056 else
0057 return be16_to_cpu((__force __be16)n);
0058 }
0059
0060 static inline fs16
0061 cpu_to_fs16(const struct super_block *sb, u16 n)
0062 {
0063 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
0064 return (__force fs16)cpu_to_le16(n);
0065 else
0066 return (__force fs16)cpu_to_be16(n);
0067 }
0068
0069
0070
0071 static inline befs_block_run
0072 fsrun_to_cpu(const struct super_block *sb, befs_disk_block_run n)
0073 {
0074 befs_block_run run;
0075
0076 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) {
0077 run.allocation_group = le32_to_cpu((__force __le32)n.allocation_group);
0078 run.start = le16_to_cpu((__force __le16)n.start);
0079 run.len = le16_to_cpu((__force __le16)n.len);
0080 } else {
0081 run.allocation_group = be32_to_cpu((__force __be32)n.allocation_group);
0082 run.start = be16_to_cpu((__force __be16)n.start);
0083 run.len = be16_to_cpu((__force __be16)n.len);
0084 }
0085 return run;
0086 }
0087
0088 static inline befs_disk_block_run
0089 cpu_to_fsrun(const struct super_block *sb, befs_block_run n)
0090 {
0091 befs_disk_block_run run;
0092
0093 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE) {
0094 run.allocation_group = cpu_to_le32(n.allocation_group);
0095 run.start = cpu_to_le16(n.start);
0096 run.len = cpu_to_le16(n.len);
0097 } else {
0098 run.allocation_group = cpu_to_be32(n.allocation_group);
0099 run.start = cpu_to_be16(n.start);
0100 run.len = cpu_to_be16(n.len);
0101 }
0102 return run;
0103 }
0104
0105 static inline befs_data_stream
0106 fsds_to_cpu(const struct super_block *sb, const befs_disk_data_stream *n)
0107 {
0108 befs_data_stream data;
0109 int i;
0110
0111 for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; ++i)
0112 data.direct[i] = fsrun_to_cpu(sb, n->direct[i]);
0113
0114 data.max_direct_range = fs64_to_cpu(sb, n->max_direct_range);
0115 data.indirect = fsrun_to_cpu(sb, n->indirect);
0116 data.max_indirect_range = fs64_to_cpu(sb, n->max_indirect_range);
0117 data.double_indirect = fsrun_to_cpu(sb, n->double_indirect);
0118 data.max_double_indirect_range = fs64_to_cpu(sb,
0119 n->
0120 max_double_indirect_range);
0121 data.size = fs64_to_cpu(sb, n->size);
0122
0123 return data;
0124 }
0125
0126 #endif