1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include "adf_gen2_hw_data.h"
4 #include "icp_qat_hw.h"
5 #include <linux/pci.h>
6 
7 #define ADF_GEN2_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
8 
adf_gen2_get_pf2vf_offset(u32 i)9 u32 adf_gen2_get_pf2vf_offset(u32 i)
10 {
11 	return ADF_GEN2_PF2VF_OFFSET(i);
12 }
13 EXPORT_SYMBOL_GPL(adf_gen2_get_pf2vf_offset);
14 
adf_gen2_get_vf2pf_sources(void __iomem * pmisc_addr)15 u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
16 {
17 	u32 errsou3, errmsk3, vf_int_mask;
18 
19 	/* Get the interrupt sources triggered by VFs */
20 	errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
21 	vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
22 
23 	/* To avoid adding duplicate entries to work queue, clear
24 	 * vf_int_mask_sets bits that are already masked in ERRMSK register.
25 	 */
26 	errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
27 	vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
28 
29 	return vf_int_mask;
30 }
31 EXPORT_SYMBOL_GPL(adf_gen2_get_vf2pf_sources);
32 
adf_gen2_enable_vf2pf_interrupts(void __iomem * pmisc_addr,u32 vf_mask)33 void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
34 {
35 	/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
36 	if (vf_mask & 0xFFFF) {
37 		u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
38 			  & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
39 		ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
40 	}
41 }
42 EXPORT_SYMBOL_GPL(adf_gen2_enable_vf2pf_interrupts);
43 
adf_gen2_disable_vf2pf_interrupts(void __iomem * pmisc_addr,u32 vf_mask)44 void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
45 {
46 	/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
47 	if (vf_mask & 0xFFFF) {
48 		u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
49 			  | ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
50 		ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
51 	}
52 }
53 EXPORT_SYMBOL_GPL(adf_gen2_disable_vf2pf_interrupts);
54 
adf_gen2_get_num_accels(struct adf_hw_device_data * self)55 u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
56 {
57 	if (!self || !self->accel_mask)
58 		return 0;
59 
60 	return hweight16(self->accel_mask);
61 }
62 EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
63 
adf_gen2_get_num_aes(struct adf_hw_device_data * self)64 u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
65 {
66 	if (!self || !self->ae_mask)
67 		return 0;
68 
69 	return hweight32(self->ae_mask);
70 }
71 EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
72 
adf_gen2_enable_error_correction(struct adf_accel_dev * accel_dev)73 void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
74 {
75 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
76 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)
77 					[hw_data->get_misc_bar_id(hw_data)];
78 	unsigned long accel_mask = hw_data->accel_mask;
79 	unsigned long ae_mask = hw_data->ae_mask;
80 	void __iomem *csr = misc_bar->virt_addr;
81 	unsigned int val, i;
82 
83 	/* Enable Accel Engine error detection & correction */
84 	for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
85 		val = ADF_CSR_RD(csr, ADF_GEN2_AE_CTX_ENABLES(i));
86 		val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
87 		ADF_CSR_WR(csr, ADF_GEN2_AE_CTX_ENABLES(i), val);
88 		val = ADF_CSR_RD(csr, ADF_GEN2_AE_MISC_CONTROL(i));
89 		val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
90 		ADF_CSR_WR(csr, ADF_GEN2_AE_MISC_CONTROL(i), val);
91 	}
92 
93 	/* Enable shared memory error detection & correction */
94 	for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
95 		val = ADF_CSR_RD(csr, ADF_GEN2_UERRSSMSH(i));
96 		val |= ADF_GEN2_ERRSSMSH_EN;
97 		ADF_CSR_WR(csr, ADF_GEN2_UERRSSMSH(i), val);
98 		val = ADF_CSR_RD(csr, ADF_GEN2_CERRSSMSH(i));
99 		val |= ADF_GEN2_ERRSSMSH_EN;
100 		ADF_CSR_WR(csr, ADF_GEN2_CERRSSMSH(i), val);
101 	}
102 }
103 EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
104 
adf_gen2_cfg_iov_thds(struct adf_accel_dev * accel_dev,bool enable,int num_a_regs,int num_b_regs)105 void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
106 			   int num_a_regs, int num_b_regs)
107 {
108 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
109 	void __iomem *pmisc_addr;
110 	struct adf_bar *pmisc;
111 	int pmisc_id, i;
112 	u32 reg;
113 
114 	pmisc_id = hw_data->get_misc_bar_id(hw_data);
115 	pmisc = &GET_BARS(accel_dev)[pmisc_id];
116 	pmisc_addr = pmisc->virt_addr;
117 
118 	/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
119 	for (i = 0; i < num_a_regs; i++) {
120 		reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
121 		if (enable)
122 			reg |= AE2FUNCTION_MAP_VALID;
123 		else
124 			reg &= ~AE2FUNCTION_MAP_VALID;
125 		WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
126 	}
127 
128 	/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
129 	for (i = 0; i < num_b_regs; i++) {
130 		reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
131 		if (enable)
132 			reg |= AE2FUNCTION_MAP_VALID;
133 		else
134 			reg &= ~AE2FUNCTION_MAP_VALID;
135 		WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
136 	}
137 }
138 EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
139 
adf_gen2_get_admin_info(struct admin_info * admin_csrs_info)140 void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
141 {
142 	admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
143 	admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
144 	admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
145 }
146 EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
147 
adf_gen2_get_arb_info(struct arb_info * arb_info)148 void adf_gen2_get_arb_info(struct arb_info *arb_info)
149 {
150 	arb_info->arb_cfg = ADF_ARB_CONFIG;
151 	arb_info->arb_offset = ADF_ARB_OFFSET;
152 	arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
153 }
154 EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
155 
build_csr_ring_base_addr(dma_addr_t addr,u32 size)156 static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
157 {
158 	return BUILD_RING_BASE_ADDR(addr, size);
159 }
160 
read_csr_ring_head(void __iomem * csr_base_addr,u32 bank,u32 ring)161 static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring)
162 {
163 	return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
164 }
165 
write_csr_ring_head(void __iomem * csr_base_addr,u32 bank,u32 ring,u32 value)166 static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring,
167 				u32 value)
168 {
169 	WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
170 }
171 
read_csr_ring_tail(void __iomem * csr_base_addr,u32 bank,u32 ring)172 static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring)
173 {
174 	return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
175 }
176 
write_csr_ring_tail(void __iomem * csr_base_addr,u32 bank,u32 ring,u32 value)177 static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
178 				u32 value)
179 {
180 	WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
181 }
182 
read_csr_e_stat(void __iomem * csr_base_addr,u32 bank)183 static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
184 {
185 	return READ_CSR_E_STAT(csr_base_addr, bank);
186 }
187 
write_csr_ring_config(void __iomem * csr_base_addr,u32 bank,u32 ring,u32 value)188 static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank,
189 				  u32 ring, u32 value)
190 {
191 	WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
192 }
193 
write_csr_ring_base(void __iomem * csr_base_addr,u32 bank,u32 ring,dma_addr_t addr)194 static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
195 				dma_addr_t addr)
196 {
197 	WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
198 }
199 
write_csr_int_flag(void __iomem * csr_base_addr,u32 bank,u32 value)200 static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
201 {
202 	WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
203 }
204 
write_csr_int_srcsel(void __iomem * csr_base_addr,u32 bank)205 static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
206 {
207 	WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
208 }
209 
write_csr_int_col_en(void __iomem * csr_base_addr,u32 bank,u32 value)210 static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
211 				 u32 value)
212 {
213 	WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
214 }
215 
write_csr_int_col_ctl(void __iomem * csr_base_addr,u32 bank,u32 value)216 static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
217 				  u32 value)
218 {
219 	WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
220 }
221 
write_csr_int_flag_and_col(void __iomem * csr_base_addr,u32 bank,u32 value)222 static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
223 				       u32 value)
224 {
225 	WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
226 }
227 
write_csr_ring_srv_arb_en(void __iomem * csr_base_addr,u32 bank,u32 value)228 static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
229 				      u32 value)
230 {
231 	WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
232 }
233 
adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops * csr_ops)234 void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
235 {
236 	csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
237 	csr_ops->read_csr_ring_head = read_csr_ring_head;
238 	csr_ops->write_csr_ring_head = write_csr_ring_head;
239 	csr_ops->read_csr_ring_tail = read_csr_ring_tail;
240 	csr_ops->write_csr_ring_tail = write_csr_ring_tail;
241 	csr_ops->read_csr_e_stat = read_csr_e_stat;
242 	csr_ops->write_csr_ring_config = write_csr_ring_config;
243 	csr_ops->write_csr_ring_base = write_csr_ring_base;
244 	csr_ops->write_csr_int_flag = write_csr_int_flag;
245 	csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
246 	csr_ops->write_csr_int_col_en = write_csr_int_col_en;
247 	csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
248 	csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
249 	csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
250 }
251 EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);
252 
adf_gen2_get_accel_cap(struct adf_accel_dev * accel_dev)253 u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
254 {
255 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
256 	struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
257 	u32 straps = hw_data->straps;
258 	u32 fuses = hw_data->fuses;
259 	u32 legfuses;
260 	u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
261 			   ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
262 			   ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
263 
264 	/* Read accelerator capabilities mask */
265 	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
266 
267 	if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
268 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
269 	if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
270 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
271 	if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
272 		capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
273 
274 	if ((straps | fuses) & ADF_POWERGATE_PKE)
275 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
276 
277 	return capabilities;
278 }
279 EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
280 
adf_gen2_set_ssm_wdtimer(struct adf_accel_dev * accel_dev)281 void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
282 {
283 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
284 	u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
285 	u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
286 	unsigned long accel_mask = hw_data->accel_mask;
287 	void __iomem *pmisc_addr;
288 	struct adf_bar *pmisc;
289 	int pmisc_id;
290 	u32 i = 0;
291 
292 	pmisc_id = hw_data->get_misc_bar_id(hw_data);
293 	pmisc = &GET_BARS(accel_dev)[pmisc_id];
294 	pmisc_addr = pmisc->virt_addr;
295 
296 	/* Configures WDT timers */
297 	for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
298 		/* Enable WDT for sym and dc */
299 		ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
300 		/* Enable WDT for pke */
301 		ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
302 	}
303 }
304 EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
305