1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/aer.h>
3 #include <linux/delay.h>
4 #include <linux/firmware.h>
5 #include <linux/list.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/pci.h>
9 #include <linux/pci_ids.h>
10
11 #include "nitrox_dev.h"
12 #include "nitrox_common.h"
13 #include "nitrox_csr.h"
14 #include "nitrox_hal.h"
15 #include "nitrox_isr.h"
16 #include "nitrox_debugfs.h"
17
18 #define CNN55XX_DEV_ID 0x12
19 #define UCODE_HLEN 48
20 #define DEFAULT_SE_GROUP 0
21 #define DEFAULT_AE_GROUP 0
22
23 #define DRIVER_VERSION "1.2"
24 #define CNN55XX_UCD_BLOCK_SIZE 32768
25 #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
26 #define FW_DIR "cavium/"
27 /* SE microcode */
28 #define SE_FW FW_DIR "cnn55xx_se.fw"
29 /* AE microcode */
30 #define AE_FW FW_DIR "cnn55xx_ae.fw"
31
32 static const char nitrox_driver_name[] = "CNN55XX";
33
34 static LIST_HEAD(ndevlist);
35 static DEFINE_MUTEX(devlist_lock);
36 static unsigned int num_devices;
37
38 /*
39 * nitrox_pci_tbl - PCI Device ID Table
40 */
41 static const struct pci_device_id nitrox_pci_tbl[] = {
42 {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
43 /* required last entry */
44 {0, }
45 };
46 MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
47
48 static unsigned int qlen = DEFAULT_CMD_QLEN;
49 module_param(qlen, uint, 0644);
50 MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
51
52 /**
53 * struct ucode - Firmware Header
54 * @id: microcode ID
55 * @version: firmware version
56 * @code_size: code section size
57 * @raz: alignment
58 * @code: code section
59 */
60 struct ucode {
61 u8 id;
62 char version[VERSION_LEN - 1];
63 __be32 code_size;
64 u8 raz[12];
65 u64 code[];
66 };
67
68 /*
69 * write_to_ucd_unit - Write Firmware to NITROX UCD unit
70 */
write_to_ucd_unit(struct nitrox_device * ndev,u32 ucode_size,u64 * ucode_data,int block_num)71 static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
72 u64 *ucode_data, int block_num)
73 {
74 u32 code_size;
75 u64 offset, data;
76 int i = 0;
77
78 /*
79 * UCD structure
80 *
81 * -------------
82 * | BLK 7 |
83 * -------------
84 * | BLK 6 |
85 * -------------
86 * | ... |
87 * -------------
88 * | BLK 0 |
89 * -------------
90 * Total of 8 blocks, each size 32KB
91 */
92
93 /* set the block number */
94 offset = UCD_UCODE_LOAD_BLOCK_NUM;
95 nitrox_write_csr(ndev, offset, block_num);
96
97 code_size = roundup(ucode_size, 16);
98 while (code_size) {
99 data = ucode_data[i];
100 /* write 8 bytes at a time */
101 offset = UCD_UCODE_LOAD_IDX_DATAX(i);
102 nitrox_write_csr(ndev, offset, data);
103 code_size -= 8;
104 i++;
105 }
106
107 usleep_range(300, 400);
108 }
109
nitrox_load_fw(struct nitrox_device * ndev)110 static int nitrox_load_fw(struct nitrox_device *ndev)
111 {
112 const struct firmware *fw;
113 const char *fw_name;
114 struct ucode *ucode;
115 u64 *ucode_data;
116 u64 offset;
117 union ucd_core_eid_ucode_block_num core_2_eid_val;
118 union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
119 union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
120 u32 ucode_size;
121 int ret, i = 0;
122
123 fw_name = SE_FW;
124 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
125
126 ret = request_firmware(&fw, fw_name, DEV(ndev));
127 if (ret < 0) {
128 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
129 return ret;
130 }
131
132 ucode = (struct ucode *)fw->data;
133
134 ucode_size = be32_to_cpu(ucode->code_size) * 2;
135 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
136 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
137 ucode_size, fw_name);
138 release_firmware(fw);
139 return -EINVAL;
140 }
141 ucode_data = ucode->code;
142
143 /* copy the firmware version */
144 memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
145 ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
146
147 /* Load SE Firmware on UCD Block 0 */
148 write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
149
150 release_firmware(fw);
151
152 /* put all SE cores in DEFAULT_SE_GROUP */
153 offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
154 nitrox_write_csr(ndev, offset, (~0ULL));
155
156 /* write block number and firmware length
157 * bit:<2:0> block number
158 * bit:3 is set SE uses 32KB microcode
159 * bit:3 is clear SE uses 64KB microcode
160 */
161 core_2_eid_val.value = 0ULL;
162 core_2_eid_val.ucode_blk = 0;
163 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
164 core_2_eid_val.ucode_len = 1;
165 else
166 core_2_eid_val.ucode_len = 0;
167
168 for (i = 0; i < ndev->hw.se_cores; i++) {
169 offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
170 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
171 }
172
173
174 fw_name = AE_FW;
175 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
176
177 ret = request_firmware(&fw, fw_name, DEV(ndev));
178 if (ret < 0) {
179 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
180 return ret;
181 }
182
183 ucode = (struct ucode *)fw->data;
184
185 ucode_size = be32_to_cpu(ucode->code_size) * 2;
186 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
187 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
188 ucode_size, fw_name);
189 release_firmware(fw);
190 return -EINVAL;
191 }
192 ucode_data = ucode->code;
193
194 /* copy the firmware version */
195 memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
196 ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
197
198 /* Load AE Firmware on UCD Block 2 */
199 write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
200
201 release_firmware(fw);
202
203 /* put all AE cores in DEFAULT_AE_GROUP */
204 offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
205 aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
206 nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
207 offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
208 aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
209 nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
210
211 /* write block number and firmware length
212 * bit:<2:0> block number
213 * bit:3 is set AE uses 32KB microcode
214 * bit:3 is clear AE uses 64KB microcode
215 */
216 core_2_eid_val.value = 0ULL;
217 core_2_eid_val.ucode_blk = 2;
218 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
219 core_2_eid_val.ucode_len = 1;
220 else
221 core_2_eid_val.ucode_len = 0;
222
223 for (i = 0; i < ndev->hw.ae_cores; i++) {
224 offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
225 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
226 }
227
228 return 0;
229 }
230
231 /**
232 * nitrox_add_to_devlist - add NITROX device to global device list
233 * @ndev: NITROX device
234 */
nitrox_add_to_devlist(struct nitrox_device * ndev)235 static int nitrox_add_to_devlist(struct nitrox_device *ndev)
236 {
237 struct nitrox_device *dev;
238 int ret = 0;
239
240 INIT_LIST_HEAD(&ndev->list);
241 refcount_set(&ndev->refcnt, 1);
242
243 mutex_lock(&devlist_lock);
244 list_for_each_entry(dev, &ndevlist, list) {
245 if (dev == ndev) {
246 ret = -EEXIST;
247 goto unlock;
248 }
249 }
250 ndev->idx = num_devices++;
251 list_add_tail(&ndev->list, &ndevlist);
252 unlock:
253 mutex_unlock(&devlist_lock);
254 return ret;
255 }
256
257 /**
258 * nitrox_remove_from_devlist - remove NITROX device from
259 * global device list
260 * @ndev: NITROX device
261 */
nitrox_remove_from_devlist(struct nitrox_device * ndev)262 static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
263 {
264 mutex_lock(&devlist_lock);
265 list_del(&ndev->list);
266 num_devices--;
267 mutex_unlock(&devlist_lock);
268 }
269
nitrox_get_first_device(void)270 struct nitrox_device *nitrox_get_first_device(void)
271 {
272 struct nitrox_device *ndev;
273
274 mutex_lock(&devlist_lock);
275 list_for_each_entry(ndev, &ndevlist, list) {
276 if (nitrox_ready(ndev))
277 break;
278 }
279 mutex_unlock(&devlist_lock);
280 if (&ndev->list == &ndevlist)
281 return NULL;
282
283 refcount_inc(&ndev->refcnt);
284 /* barrier to sync with other cpus */
285 smp_mb__after_atomic();
286 return ndev;
287 }
288
nitrox_put_device(struct nitrox_device * ndev)289 void nitrox_put_device(struct nitrox_device *ndev)
290 {
291 if (!ndev)
292 return;
293
294 refcount_dec(&ndev->refcnt);
295 /* barrier to sync with other cpus */
296 smp_mb__after_atomic();
297 }
298
nitrox_device_flr(struct pci_dev * pdev)299 static int nitrox_device_flr(struct pci_dev *pdev)
300 {
301 int pos = 0;
302
303 pos = pci_save_state(pdev);
304 if (pos) {
305 dev_err(&pdev->dev, "Failed to save pci state\n");
306 return -ENOMEM;
307 }
308
309 pcie_reset_flr(pdev, PCI_RESET_DO_RESET);
310
311 pci_restore_state(pdev);
312
313 return 0;
314 }
315
nitrox_pf_sw_init(struct nitrox_device * ndev)316 static int nitrox_pf_sw_init(struct nitrox_device *ndev)
317 {
318 int err;
319
320 err = nitrox_common_sw_init(ndev);
321 if (err)
322 return err;
323
324 err = nitrox_register_interrupts(ndev);
325 if (err)
326 nitrox_common_sw_cleanup(ndev);
327
328 return err;
329 }
330
nitrox_pf_sw_cleanup(struct nitrox_device * ndev)331 static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
332 {
333 nitrox_unregister_interrupts(ndev);
334 nitrox_common_sw_cleanup(ndev);
335 }
336
337 /**
338 * nitrox_bist_check - Check NITROX BIST registers status
339 * @ndev: NITROX device
340 */
nitrox_bist_check(struct nitrox_device * ndev)341 static int nitrox_bist_check(struct nitrox_device *ndev)
342 {
343 u64 value = 0;
344 int i;
345
346 for (i = 0; i < NR_CLUSTERS; i++) {
347 value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
348 value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
349 }
350 value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
351 value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
352 value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
353 value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
354 value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
355 value += nitrox_read_csr(ndev, POM_BIST_REG);
356 value += nitrox_read_csr(ndev, BMI_BIST_REG);
357 value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
358 value += nitrox_read_csr(ndev, BMO_BIST_REG);
359 value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
360 value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
361 if (value)
362 return -EIO;
363 return 0;
364 }
365
nitrox_pf_hw_init(struct nitrox_device * ndev)366 static int nitrox_pf_hw_init(struct nitrox_device *ndev)
367 {
368 int err;
369
370 err = nitrox_bist_check(ndev);
371 if (err) {
372 dev_err(&ndev->pdev->dev, "BIST check failed\n");
373 return err;
374 }
375 /* get cores information */
376 nitrox_get_hwinfo(ndev);
377
378 nitrox_config_nps_core_unit(ndev);
379 nitrox_config_aqm_unit(ndev);
380 nitrox_config_nps_pkt_unit(ndev);
381 nitrox_config_pom_unit(ndev);
382 nitrox_config_efl_unit(ndev);
383 /* configure IO units */
384 nitrox_config_bmi_unit(ndev);
385 nitrox_config_bmo_unit(ndev);
386 /* configure Local Buffer Cache */
387 nitrox_config_lbc_unit(ndev);
388 nitrox_config_rand_unit(ndev);
389
390 /* load firmware on cores */
391 err = nitrox_load_fw(ndev);
392 if (err)
393 return err;
394
395 nitrox_config_emu_unit(ndev);
396
397 return 0;
398 }
399
400 /**
401 * nitrox_probe - NITROX Initialization function.
402 * @pdev: PCI device information struct
403 * @id: entry in nitrox_pci_tbl
404 *
405 * Return: 0, if the driver is bound to the device, or
406 * a negative error if there is failure.
407 */
nitrox_probe(struct pci_dev * pdev,const struct pci_device_id * id)408 static int nitrox_probe(struct pci_dev *pdev,
409 const struct pci_device_id *id)
410 {
411 struct nitrox_device *ndev;
412 int err;
413
414 dev_info_once(&pdev->dev, "%s driver version %s\n",
415 nitrox_driver_name, DRIVER_VERSION);
416
417 err = pci_enable_device_mem(pdev);
418 if (err)
419 return err;
420
421 /* do FLR */
422 err = nitrox_device_flr(pdev);
423 if (err) {
424 dev_err(&pdev->dev, "FLR failed\n");
425 goto flr_fail;
426 }
427
428 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
429 dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
430 } else {
431 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
432 if (err) {
433 dev_err(&pdev->dev, "DMA configuration failed\n");
434 goto flr_fail;
435 }
436 }
437
438 err = pci_request_mem_regions(pdev, nitrox_driver_name);
439 if (err)
440 goto flr_fail;
441 pci_set_master(pdev);
442
443 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
444 if (!ndev) {
445 err = -ENOMEM;
446 goto ndev_fail;
447 }
448
449 pci_set_drvdata(pdev, ndev);
450 ndev->pdev = pdev;
451
452 /* add to device list */
453 nitrox_add_to_devlist(ndev);
454
455 ndev->hw.vendor_id = pdev->vendor;
456 ndev->hw.device_id = pdev->device;
457 ndev->hw.revision_id = pdev->revision;
458 /* command timeout in jiffies */
459 ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
460 ndev->node = dev_to_node(&pdev->dev);
461 if (ndev->node == NUMA_NO_NODE)
462 ndev->node = 0;
463
464 ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
465 pci_resource_len(pdev, 0));
466 if (!ndev->bar_addr) {
467 err = -EIO;
468 goto ioremap_err;
469 }
470 /* allocate command queus based on cpus, max queues are 64 */
471 ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
472 ndev->qlen = qlen;
473
474 err = nitrox_pf_sw_init(ndev);
475 if (err)
476 goto pf_sw_fail;
477
478 err = nitrox_pf_hw_init(ndev);
479 if (err)
480 goto pf_hw_fail;
481
482 nitrox_debugfs_init(ndev);
483
484 /* clear the statistics */
485 atomic64_set(&ndev->stats.posted, 0);
486 atomic64_set(&ndev->stats.completed, 0);
487 atomic64_set(&ndev->stats.dropped, 0);
488
489 atomic_set(&ndev->state, __NDEV_READY);
490 /* barrier to sync with other cpus */
491 smp_mb__after_atomic();
492
493 err = nitrox_crypto_register();
494 if (err)
495 goto crypto_fail;
496
497 return 0;
498
499 crypto_fail:
500 nitrox_debugfs_exit(ndev);
501 atomic_set(&ndev->state, __NDEV_NOT_READY);
502 /* barrier to sync with other cpus */
503 smp_mb__after_atomic();
504 pf_hw_fail:
505 nitrox_pf_sw_cleanup(ndev);
506 pf_sw_fail:
507 iounmap(ndev->bar_addr);
508 ioremap_err:
509 nitrox_remove_from_devlist(ndev);
510 kfree(ndev);
511 pci_set_drvdata(pdev, NULL);
512 ndev_fail:
513 pci_release_mem_regions(pdev);
514 flr_fail:
515 pci_disable_device(pdev);
516 return err;
517 }
518
519 /**
520 * nitrox_remove - Unbind the driver from the device.
521 * @pdev: PCI device information struct
522 */
nitrox_remove(struct pci_dev * pdev)523 static void nitrox_remove(struct pci_dev *pdev)
524 {
525 struct nitrox_device *ndev = pci_get_drvdata(pdev);
526
527 if (!ndev)
528 return;
529
530 if (!refcount_dec_and_test(&ndev->refcnt)) {
531 dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
532 refcount_read(&ndev->refcnt));
533 return;
534 }
535
536 dev_info(DEV(ndev), "Removing Device %x:%x\n",
537 ndev->hw.vendor_id, ndev->hw.device_id);
538
539 atomic_set(&ndev->state, __NDEV_NOT_READY);
540 /* barrier to sync with other cpus */
541 smp_mb__after_atomic();
542
543 nitrox_remove_from_devlist(ndev);
544
545 /* disable SR-IOV */
546 nitrox_sriov_configure(pdev, 0);
547 nitrox_crypto_unregister();
548 nitrox_debugfs_exit(ndev);
549 nitrox_pf_sw_cleanup(ndev);
550
551 iounmap(ndev->bar_addr);
552 kfree(ndev);
553
554 pci_set_drvdata(pdev, NULL);
555 pci_release_mem_regions(pdev);
556 pci_disable_device(pdev);
557 }
558
nitrox_shutdown(struct pci_dev * pdev)559 static void nitrox_shutdown(struct pci_dev *pdev)
560 {
561 pci_set_drvdata(pdev, NULL);
562 pci_release_mem_regions(pdev);
563 pci_disable_device(pdev);
564 }
565
566 static struct pci_driver nitrox_driver = {
567 .name = nitrox_driver_name,
568 .id_table = nitrox_pci_tbl,
569 .probe = nitrox_probe,
570 .remove = nitrox_remove,
571 .shutdown = nitrox_shutdown,
572 .sriov_configure = nitrox_sriov_configure,
573 };
574
575 module_pci_driver(nitrox_driver);
576
577 MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
578 MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
579 MODULE_LICENSE("GPL");
580 MODULE_VERSION(DRIVER_VERSION);
581 MODULE_FIRMWARE(SE_FW);
582