1 /*
2 * lz4defs.h -- architecture specific defines
3 *
4 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #ifdef __XEN__
12 #include <asm/byteorder.h>
13 #endif
14
get_unaligned_le16(const void * p)15 static inline u16 INIT get_unaligned_le16(const void *p)
16 {
17 return le16_to_cpup(p);
18 }
19
get_unaligned_le32(const void * p)20 static inline u32 INIT get_unaligned_le32(const void *p)
21 {
22 return le32_to_cpup(p);
23 }
24
25 /*
26 * Detects 64 bits mode
27 */
28 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
29 || defined(__ppc64__) || defined(__LP64__))
30 #define LZ4_ARCH64 1
31 #else
32 #define LZ4_ARCH64 0
33 #endif
34
35 /*
36 * Architecture-specific macros
37 */
38 #define BYTE u8
39 typedef struct _U16_S { u16 v; } U16_S;
40 typedef struct _U32_S { u32 v; } U32_S;
41 typedef struct _U64_S { u64 v; } U64_S;
42 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
43 || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
44 && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
45
46 #define A16(x) (((U16_S *)(x))->v)
47 #define A32(x) (((U32_S *)(x))->v)
48 #define A64(x) (((U64_S *)(x))->v)
49
50 #define PUT4(s, d) (A32(d) = A32(s))
51 #define PUT8(s, d) (A64(d) = A64(s))
52 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
53 do { \
54 A16(p) = v; \
55 p += 2; \
56 } while (0)
57 #else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
58
59 #define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
60 #define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
61 #define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
62
63 #define PUT4(s, d) \
64 put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
65 #define PUT8(s, d) \
66 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
67
68 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
69 do { \
70 put_unaligned(v, (u16 *)(p)); \
71 p += 2; \
72 } while (0)
73 #endif
74
75 #define COPYLENGTH 8
76 #define ML_BITS 4
77 #define ML_MASK ((1U << ML_BITS) - 1)
78 #define RUN_BITS (8 - ML_BITS)
79 #define RUN_MASK ((1U << RUN_BITS) - 1)
80 #define MEMORY_USAGE 14
81 #define MINMATCH 4
82 #define SKIPSTRENGTH 6
83 #define LASTLITERALS 5
84 #define MFLIMIT (COPYLENGTH + MINMATCH)
85 #define MINLENGTH (MFLIMIT + 1)
86 #define MAXD_LOG 16
87 #define MAXD (1 << MAXD_LOG)
88 #define MAXD_MASK (u32)(MAXD - 1)
89 #define MAX_DISTANCE (MAXD - 1)
90 #define HASH_LOG (MAXD_LOG - 1)
91 #define HASHTABLESIZE (1 << HASH_LOG)
92 #define MAX_NB_ATTEMPTS 256
93 #define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
94 #define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1))
95 #define HASHLOG64K ((MEMORY_USAGE - 2) + 1)
96 #define HASH64KTABLESIZE (1U << HASHLOG64K)
97 #define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
98 ((MINMATCH * 8) - (MEMORY_USAGE-2)))
99 #define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \
100 ((MINMATCH * 8) - HASHLOG64K))
101 #define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
102 ((MINMATCH * 8) - HASH_LOG))
103
104 #if LZ4_ARCH64/* 64-bit */
105 #define STEPSIZE 8
106
107 #define LZ4_COPYSTEP(s, d) \
108 do { \
109 PUT8(s, d); \
110 d += 8; \
111 s += 8; \
112 } while (0)
113
114 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
115
116 #define LZ4_SECURECOPY(s, d, e) \
117 do { \
118 if (d < e) { \
119 LZ4_WILDCOPY(s, d, e); \
120 } \
121 } while (0)
122 #define HTYPE u32
123
124 #ifdef __BIG_ENDIAN
125 #define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
126 #else
127 #define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
128 #endif
129
130 #else /* 32-bit */
131 #define STEPSIZE 4
132
133 #define LZ4_COPYSTEP(s, d) \
134 do { \
135 PUT4(s, d); \
136 d += 4; \
137 s += 4; \
138 } while (0)
139
140 #define LZ4_COPYPACKET(s, d) \
141 do { \
142 LZ4_COPYSTEP(s, d); \
143 LZ4_COPYSTEP(s, d); \
144 } while (0)
145
146 #define LZ4_SECURECOPY LZ4_WILDCOPY
147 #define HTYPE const u8*
148
149 #ifdef __BIG_ENDIAN
150 #define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
151 #else
152 #define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
153 #endif
154
155 #endif
156
157 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
158 (d = s - get_unaligned_le16(p))
159
160 #define LZ4_WILDCOPY(s, d, e) \
161 do { \
162 LZ4_COPYPACKET(s, d); \
163 } while (d < e)
164
165 #define LZ4_BLINDCOPY(s, d, l) \
166 do { \
167 u8 *e = (d) + l; \
168 LZ4_WILDCOPY(s, d, e); \
169 d = e; \
170 } while (0)
171