1 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #ifndef _MLXSW_ITEM_H
5 #define _MLXSW_ITEM_H
6
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/bitops.h>
10
11 struct mlxsw_item {
12 unsigned short offset; /* bytes in container */
13 short step; /* step in bytes for indexed items */
14 unsigned short in_step_offset; /* offset within one step */
15 unsigned char shift; /* shift in bits */
16 unsigned char element_size; /* size of element in bit array */
17 bool no_real_shift;
18 union {
19 unsigned char bits;
20 unsigned short bytes;
21 } size;
22 const char *name;
23 };
24
25 static inline unsigned int
__mlxsw_item_offset(const struct mlxsw_item * item,unsigned short index,size_t typesize)26 __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index,
27 size_t typesize)
28 {
29 BUG_ON(index && !item->step);
30 if (item->offset % typesize != 0 ||
31 item->step % typesize != 0 ||
32 item->in_step_offset % typesize != 0) {
33 pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
34 item->name, item->offset, item->step,
35 item->in_step_offset, typesize);
36 BUG();
37 }
38
39 return ((item->offset + item->step * index + item->in_step_offset) /
40 typesize);
41 }
42
__mlxsw_item_get8(const char * buf,const struct mlxsw_item * item,unsigned short index)43 static inline u8 __mlxsw_item_get8(const char *buf,
44 const struct mlxsw_item *item,
45 unsigned short index)
46 {
47 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8));
48 u8 *b = (u8 *) buf;
49 u8 tmp;
50
51 tmp = b[offset];
52 tmp >>= item->shift;
53 tmp &= GENMASK(item->size.bits - 1, 0);
54 if (item->no_real_shift)
55 tmp <<= item->shift;
56 return tmp;
57 }
58
__mlxsw_item_set8(char * buf,const struct mlxsw_item * item,unsigned short index,u8 val)59 static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item,
60 unsigned short index, u8 val)
61 {
62 unsigned int offset = __mlxsw_item_offset(item, index,
63 sizeof(u8));
64 u8 *b = (u8 *) buf;
65 u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
66 u8 tmp;
67
68 if (!item->no_real_shift)
69 val <<= item->shift;
70 val &= mask;
71 tmp = b[offset];
72 tmp &= ~mask;
73 tmp |= val;
74 b[offset] = tmp;
75 }
76
__mlxsw_item_get16(const char * buf,const struct mlxsw_item * item,unsigned short index)77 static inline u16 __mlxsw_item_get16(const char *buf,
78 const struct mlxsw_item *item,
79 unsigned short index)
80 {
81 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
82 __be16 *b = (__be16 *) buf;
83 u16 tmp;
84
85 tmp = be16_to_cpu(b[offset]);
86 tmp >>= item->shift;
87 tmp &= GENMASK(item->size.bits - 1, 0);
88 if (item->no_real_shift)
89 tmp <<= item->shift;
90 return tmp;
91 }
92
__mlxsw_item_set16(char * buf,const struct mlxsw_item * item,unsigned short index,u16 val)93 static inline void __mlxsw_item_set16(char *buf, const struct mlxsw_item *item,
94 unsigned short index, u16 val)
95 {
96 unsigned int offset = __mlxsw_item_offset(item, index,
97 sizeof(u16));
98 __be16 *b = (__be16 *) buf;
99 u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
100 u16 tmp;
101
102 if (!item->no_real_shift)
103 val <<= item->shift;
104 val &= mask;
105 tmp = be16_to_cpu(b[offset]);
106 tmp &= ~mask;
107 tmp |= val;
108 b[offset] = cpu_to_be16(tmp);
109 }
110
__mlxsw_item_get32(const char * buf,const struct mlxsw_item * item,unsigned short index)111 static inline u32 __mlxsw_item_get32(const char *buf,
112 const struct mlxsw_item *item,
113 unsigned short index)
114 {
115 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
116 __be32 *b = (__be32 *) buf;
117 u32 tmp;
118
119 tmp = be32_to_cpu(b[offset]);
120 tmp >>= item->shift;
121 tmp &= GENMASK(item->size.bits - 1, 0);
122 if (item->no_real_shift)
123 tmp <<= item->shift;
124 return tmp;
125 }
126
__mlxsw_item_set32(char * buf,const struct mlxsw_item * item,unsigned short index,u32 val)127 static inline void __mlxsw_item_set32(char *buf, const struct mlxsw_item *item,
128 unsigned short index, u32 val)
129 {
130 unsigned int offset = __mlxsw_item_offset(item, index,
131 sizeof(u32));
132 __be32 *b = (__be32 *) buf;
133 u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
134 u32 tmp;
135
136 if (!item->no_real_shift)
137 val <<= item->shift;
138 val &= mask;
139 tmp = be32_to_cpu(b[offset]);
140 tmp &= ~mask;
141 tmp |= val;
142 b[offset] = cpu_to_be32(tmp);
143 }
144
__mlxsw_item_get64(const char * buf,const struct mlxsw_item * item,unsigned short index)145 static inline u64 __mlxsw_item_get64(const char *buf,
146 const struct mlxsw_item *item,
147 unsigned short index)
148 {
149 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
150 __be64 *b = (__be64 *) buf;
151 u64 tmp;
152
153 tmp = be64_to_cpu(b[offset]);
154 tmp >>= item->shift;
155 tmp &= GENMASK_ULL(item->size.bits - 1, 0);
156 if (item->no_real_shift)
157 tmp <<= item->shift;
158 return tmp;
159 }
160
__mlxsw_item_set64(char * buf,const struct mlxsw_item * item,unsigned short index,u64 val)161 static inline void __mlxsw_item_set64(char *buf, const struct mlxsw_item *item,
162 unsigned short index, u64 val)
163 {
164 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
165 __be64 *b = (__be64 *) buf;
166 u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
167 u64 tmp;
168
169 if (!item->no_real_shift)
170 val <<= item->shift;
171 val &= mask;
172 tmp = be64_to_cpu(b[offset]);
173 tmp &= ~mask;
174 tmp |= val;
175 b[offset] = cpu_to_be64(tmp);
176 }
177
__mlxsw_item_memcpy_from(const char * buf,char * dst,const struct mlxsw_item * item,unsigned short index)178 static inline void __mlxsw_item_memcpy_from(const char *buf, char *dst,
179 const struct mlxsw_item *item,
180 unsigned short index)
181 {
182 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
183
184 memcpy(dst, &buf[offset], item->size.bytes);
185 }
186
__mlxsw_item_memcpy_to(char * buf,const char * src,const struct mlxsw_item * item,unsigned short index)187 static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
188 const struct mlxsw_item *item,
189 unsigned short index)
190 {
191 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
192
193 memcpy(&buf[offset], src, item->size.bytes);
194 }
195
__mlxsw_item_data(char * buf,const struct mlxsw_item * item,unsigned short index)196 static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item,
197 unsigned short index)
198 {
199 unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
200
201 return &buf[offset];
202 }
203
204 static inline u16
__mlxsw_item_bit_array_offset(const struct mlxsw_item * item,u16 index,u8 * shift)205 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
206 u16 index, u8 *shift)
207 {
208 u16 max_index, be_index;
209 u16 offset; /* byte offset inside the array */
210 u8 in_byte_index;
211
212 BUG_ON(index && !item->element_size);
213 if (item->offset % sizeof(u32) != 0 ||
214 BITS_PER_BYTE % item->element_size != 0) {
215 pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
216 item->name, item->offset, item->element_size);
217 BUG();
218 }
219
220 max_index = (item->size.bytes << 3) / item->element_size - 1;
221 be_index = max_index - index;
222 offset = be_index * item->element_size >> 3;
223 in_byte_index = index % (BITS_PER_BYTE / item->element_size);
224 *shift = in_byte_index * item->element_size;
225
226 return item->offset + offset;
227 }
228
__mlxsw_item_bit_array_get(const char * buf,const struct mlxsw_item * item,u16 index)229 static inline u8 __mlxsw_item_bit_array_get(const char *buf,
230 const struct mlxsw_item *item,
231 u16 index)
232 {
233 u8 shift, tmp;
234 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
235
236 tmp = buf[offset];
237 tmp >>= shift;
238 tmp &= GENMASK(item->element_size - 1, 0);
239 return tmp;
240 }
241
__mlxsw_item_bit_array_set(char * buf,const struct mlxsw_item * item,u16 index,u8 val)242 static inline void __mlxsw_item_bit_array_set(char *buf,
243 const struct mlxsw_item *item,
244 u16 index, u8 val)
245 {
246 u8 shift, tmp;
247 u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
248 u8 mask = GENMASK(item->element_size - 1, 0) << shift;
249
250 val <<= shift;
251 val &= mask;
252 tmp = buf[offset];
253 tmp &= ~mask;
254 tmp |= val;
255 buf[offset] = tmp;
256 }
257
258 #define __ITEM_NAME(_type, _cname, _iname) \
259 mlxsw_##_type##_##_cname##_##_iname##_item
260
261 /* _type: cmd_mbox, reg, etc.
262 * _cname: containter name (e.g. command name, register name)
263 * _iname: item name within the container
264 */
265
266 #define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits) \
267 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
268 .offset = _offset, \
269 .shift = _shift, \
270 .size = {.bits = _sizebits,}, \
271 .name = #_type "_" #_cname "_" #_iname, \
272 }; \
273 static inline u8 __maybe_unused \
274 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
275 { \
276 return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
277 } \
278 static inline void __maybe_unused \
279 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \
280 { \
281 __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
282 }
283
284 #define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
285 _step, _instepoffset, _norealshift) \
286 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
287 .offset = _offset, \
288 .step = _step, \
289 .in_step_offset = _instepoffset, \
290 .shift = _shift, \
291 .no_real_shift = _norealshift, \
292 .size = {.bits = _sizebits,}, \
293 .name = #_type "_" #_cname "_" #_iname, \
294 }; \
295 static inline u8 __maybe_unused \
296 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
297 { \
298 return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
299 index); \
300 } \
301 static inline void __maybe_unused \
302 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
303 u8 val) \
304 { \
305 __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), \
306 index, val); \
307 }
308
309 #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits) \
310 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
311 .offset = _offset, \
312 .shift = _shift, \
313 .size = {.bits = _sizebits,}, \
314 .name = #_type "_" #_cname "_" #_iname, \
315 }; \
316 static inline u16 __maybe_unused \
317 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
318 { \
319 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
320 } \
321 static inline void __maybe_unused \
322 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \
323 { \
324 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
325 }
326
327 #define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
328 _step, _instepoffset, _norealshift) \
329 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
330 .offset = _offset, \
331 .step = _step, \
332 .in_step_offset = _instepoffset, \
333 .shift = _shift, \
334 .no_real_shift = _norealshift, \
335 .size = {.bits = _sizebits,}, \
336 .name = #_type "_" #_cname "_" #_iname, \
337 }; \
338 static inline u16 __maybe_unused \
339 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
340 { \
341 return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
342 index); \
343 } \
344 static inline void __maybe_unused \
345 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
346 u16 val) \
347 { \
348 __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), \
349 index, val); \
350 }
351
352 #define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits) \
353 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
354 .offset = _offset, \
355 .shift = _shift, \
356 .size = {.bits = _sizebits,}, \
357 .name = #_type "_" #_cname "_" #_iname, \
358 }; \
359 static inline u32 __maybe_unused \
360 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
361 { \
362 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
363 } \
364 static inline void __maybe_unused \
365 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
366 { \
367 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
368 }
369
370 #define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
371 _step, _instepoffset, _norealshift) \
372 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
373 .offset = _offset, \
374 .step = _step, \
375 .in_step_offset = _instepoffset, \
376 .shift = _shift, \
377 .no_real_shift = _norealshift, \
378 .size = {.bits = _sizebits,}, \
379 .name = #_type "_" #_cname "_" #_iname, \
380 }; \
381 static inline u32 __maybe_unused \
382 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
383 { \
384 return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
385 index); \
386 } \
387 static inline void __maybe_unused \
388 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
389 u32 val) \
390 { \
391 __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), \
392 index, val); \
393 }
394
395 #define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits) \
396 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
397 .offset = _offset, \
398 .shift = _shift, \
399 .size = {.bits = _sizebits,}, \
400 .name = #_type "_" #_cname "_" #_iname, \
401 }; \
402 static inline u64 __maybe_unused \
403 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
404 { \
405 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
406 } \
407 static inline void __maybe_unused \
408 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \
409 { \
410 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
411 }
412
413 #define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift, \
414 _sizebits, _step, _instepoffset, _norealshift) \
415 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
416 .offset = _offset, \
417 .step = _step, \
418 .in_step_offset = _instepoffset, \
419 .shift = _shift, \
420 .no_real_shift = _norealshift, \
421 .size = {.bits = _sizebits,}, \
422 .name = #_type "_" #_cname "_" #_iname, \
423 }; \
424 static inline u64 __maybe_unused \
425 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
426 { \
427 return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
428 index); \
429 } \
430 static inline void __maybe_unused \
431 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
432 u64 val) \
433 { \
434 __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), \
435 index, val); \
436 }
437
438 #define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes) \
439 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
440 .offset = _offset, \
441 .size = {.bytes = _sizebytes,}, \
442 .name = #_type "_" #_cname "_" #_iname, \
443 }; \
444 static inline void __maybe_unused \
445 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
446 { \
447 __mlxsw_item_memcpy_from(buf, dst, \
448 &__ITEM_NAME(_type, _cname, _iname), 0); \
449 } \
450 static inline void __maybe_unused \
451 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
452 { \
453 __mlxsw_item_memcpy_to(buf, src, \
454 &__ITEM_NAME(_type, _cname, _iname), 0); \
455 } \
456 static inline char * __maybe_unused \
457 mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \
458 { \
459 return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
460 }
461
462 #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \
463 _step, _instepoffset) \
464 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
465 .offset = _offset, \
466 .step = _step, \
467 .in_step_offset = _instepoffset, \
468 .size = {.bytes = _sizebytes,}, \
469 .name = #_type "_" #_cname "_" #_iname, \
470 }; \
471 static inline void __maybe_unused \
472 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
473 unsigned short index, \
474 char *dst) \
475 { \
476 __mlxsw_item_memcpy_from(buf, dst, \
477 &__ITEM_NAME(_type, _cname, _iname), index); \
478 } \
479 static inline void __maybe_unused \
480 mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
481 unsigned short index, \
482 const char *src) \
483 { \
484 __mlxsw_item_memcpy_to(buf, src, \
485 &__ITEM_NAME(_type, _cname, _iname), index); \
486 } \
487 static inline char * __maybe_unused \
488 mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \
489 { \
490 return __mlxsw_item_data(buf, \
491 &__ITEM_NAME(_type, _cname, _iname), index); \
492 }
493
494 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \
495 _element_size) \
496 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
497 .offset = _offset, \
498 .element_size = _element_size, \
499 .size = {.bytes = _sizebytes,}, \
500 .name = #_type "_" #_cname "_" #_iname, \
501 }; \
502 static inline u8 __maybe_unused \
503 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
504 { \
505 return __mlxsw_item_bit_array_get(buf, \
506 &__ITEM_NAME(_type, _cname, _iname), \
507 index); \
508 } \
509 static inline void __maybe_unused \
510 mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
511 { \
512 return __mlxsw_item_bit_array_set(buf, \
513 &__ITEM_NAME(_type, _cname, _iname), \
514 index, val); \
515 } \
516
517 #endif
518