1 /* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (C) 2018 Marvell International Ltd.
4 */
5
6 #ifndef __OCTEONTX_BCH_REGS_H__
7 #define __OCTEONTX_BCH_REGS_H__
8
9 #define BCH_NR_VF 1
10
11 union bch_cmd {
12 u64 u[4];
13 struct fields {
14 struct {
15 u64 size:12;
16 u64 reserved_12_31:20;
17 u64 ecc_level:4;
18 u64 reserved_36_61:26;
19 u64 ecc_gen:2;
20 } cword;
21 struct {
22 u64 ptr:49;
23 u64 reserved_49_55:7;
24 u64 nc:1;
25 u64 fw:1;
26 u64 reserved_58_63:6;
27 } oword;
28 struct {
29 u64 ptr:49;
30 u64 reserved_49_55:7;
31 u64 nc:1;
32 u64 reserved_57_63:7;
33 } iword;
34 struct {
35 u64 ptr:49;
36 u64 reserved_49_63:15;
37 } rword;
38 } s;
39 };
40
41 enum ecc_gen {
42 eg_correct,
43 eg_copy,
44 eg_gen,
45 eg_copy3,
46 };
47
48 /** Response from BCH instruction */
49 union bch_resp {
50 u16 u16;
51 struct {
52 u16 num_errors:7; /** Number of errors in block */
53 u16 zero:6; /** Always zero, ignore */
54 u16 erased:1; /** Block is erased */
55 u16 uncorrectable:1;/** too many bits flipped */
56 u16 done:1; /** Block is done */
57 } s;
58 };
59
60 union bch_vqx_ctl {
61 u64 u;
62 struct {
63 u64 reserved_0:1;
64 u64 cmd_be:1;
65 u64 max_read:4;
66 u64 reserved_6_15:10;
67 u64 erase_disable:1;
68 u64 one_cmd:1;
69 u64 early_term:4;
70 u64 reserved_22_63:42;
71 } s;
72 };
73
74 union bch_vqx_cmd_buf {
75 u64 u;
76 struct {
77 u64 reserved_0_32:33;
78 u64 size:13;
79 u64 dfb:1;
80 u64 ldwb:1;
81 u64 reserved_48_63:16;
82 } s;
83 };
84
85 /* keep queue state indexed, even though just one supported here,
86 * for later generalization to similarly-shaped queues on other Cavium devices
87 */
88 enum {
89 QID_BCH,
90 QID_MAX
91 };
92
93 struct bch_q {
94 struct udevice *dev;
95 int index;
96 u16 max_depth;
97 u16 pool_size_m1;
98 u64 *base_vaddr;
99 dma_addr_t base_paddr;
100 };
101
102 extern struct bch_q octeontx_bch_q[QID_MAX];
103
104 /* with one dma-mapped area, virt<->phys conversions by +/- (vaddr-paddr) */
qphys(int qid,void * v)105 static inline dma_addr_t qphys(int qid, void *v)
106 {
107 struct bch_q *q = &octeontx_bch_q[qid];
108 int off = (u8 *)v - (u8 *)q->base_vaddr;
109
110 return q->base_paddr + off;
111 }
112
113 #define octeontx_ptr_to_phys(v) qphys(QID_BCH, (v))
114
qvirt(int qid,dma_addr_t p)115 static inline void *qvirt(int qid, dma_addr_t p)
116 {
117 struct bch_q *q = &octeontx_bch_q[qid];
118 int off = p - q->base_paddr;
119
120 return q->base_vaddr + off;
121 }
122
123 #define octeontx_phys_to_ptr(p) qvirt(QID_BCH, (p))
124
125 /* plenty for interleaved r/w on two planes with 16k page, ecc_size 1k */
126 /* QDEPTH >= 16, as successive chunks must align on 128-byte boundaries */
127 #define QDEPTH 256 /* u64s in a command queue chunk, incl next-pointer */
128 #define NQS 1 /* linked chunks in the chain */
129
130 /**
131 * Write an arbitrary number of command words to a command queue.
132 * This is a generic function; the fixed number of command word
133 * functions yield higher performance.
134 *
135 * Could merge with crypto version for FPA use on cn83xx
136 */
octeontx_cmd_queue_write(int queue_id,bool use_locking,int cmd_count,const u64 * cmds)137 static inline int octeontx_cmd_queue_write(int queue_id, bool use_locking,
138 int cmd_count, const u64 *cmds)
139 {
140 int ret = 0;
141 u64 *cmd_ptr;
142 struct bch_q *qptr = &octeontx_bch_q[queue_id];
143
144 if (unlikely(cmd_count < 1 || cmd_count > 32))
145 return -EINVAL;
146 if (unlikely(!cmds))
147 return -EINVAL;
148
149 cmd_ptr = qptr->base_vaddr;
150
151 while (cmd_count > 0) {
152 int slot = qptr->index % (QDEPTH * NQS);
153
154 if (slot % QDEPTH != QDEPTH - 1) {
155 cmd_ptr[slot] = *cmds++;
156 cmd_count--;
157 }
158
159 qptr->index++;
160 }
161
162 __iowmb(); /* flush commands before ringing bell */
163
164 return ret;
165 }
166
167 #endif /* __OCTEONTX_BCH_REGS_H__ */
168