1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #ifndef _QED_IF_H
8 #define _QED_IF_H
9
10 #include <linux/ethtool.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/skbuff.h>
16 #include <asm/byteorder.h>
17 #include <linux/io.h>
18 #include <linux/compiler.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/slab.h>
22 #include <linux/qed/common_hsi.h>
23 #include <linux/qed/qed_chain.h>
24 #include <linux/io-64-nonatomic-lo-hi.h>
25 #include <net/devlink.h>
26
27 #define QED_TX_SWS_TIMER_DFLT 500
28 #define QED_TWO_MSL_TIMER_DFLT 4000
29
30 enum dcbx_protocol_type {
31 DCBX_PROTOCOL_ISCSI,
32 DCBX_PROTOCOL_FCOE,
33 DCBX_PROTOCOL_ROCE,
34 DCBX_PROTOCOL_ROCE_V2,
35 DCBX_PROTOCOL_ETH,
36 DCBX_MAX_PROTOCOL_TYPE
37 };
38
39 #define QED_ROCE_PROTOCOL_INDEX (3)
40
41 #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
42 #define QED_LLDP_PORT_ID_STAT_LEN 4
43 #define QED_DCBX_MAX_APP_PROTOCOL 32
44 #define QED_MAX_PFC_PRIORITIES 8
45 #define QED_DCBX_DSCP_SIZE 64
46
47 struct qed_dcbx_lldp_remote {
48 u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
49 u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
50 bool enable_rx;
51 bool enable_tx;
52 u32 tx_interval;
53 u32 max_credit;
54 };
55
56 struct qed_dcbx_lldp_local {
57 u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
58 u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
59 };
60
61 struct qed_dcbx_app_prio {
62 u8 roce;
63 u8 roce_v2;
64 u8 fcoe;
65 u8 iscsi;
66 u8 eth;
67 };
68
69 struct qed_dbcx_pfc_params {
70 bool willing;
71 bool enabled;
72 u8 prio[QED_MAX_PFC_PRIORITIES];
73 u8 max_tc;
74 };
75
76 enum qed_dcbx_sf_ieee_type {
77 QED_DCBX_SF_IEEE_ETHTYPE,
78 QED_DCBX_SF_IEEE_TCP_PORT,
79 QED_DCBX_SF_IEEE_UDP_PORT,
80 QED_DCBX_SF_IEEE_TCP_UDP_PORT
81 };
82
83 struct qed_app_entry {
84 bool ethtype;
85 enum qed_dcbx_sf_ieee_type sf_ieee;
86 bool enabled;
87 u8 prio;
88 u16 proto_id;
89 enum dcbx_protocol_type proto_type;
90 };
91
92 struct qed_dcbx_params {
93 struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
94 u16 num_app_entries;
95 bool app_willing;
96 bool app_valid;
97 bool app_error;
98 bool ets_willing;
99 bool ets_enabled;
100 bool ets_cbs;
101 bool valid;
102 u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
103 u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
104 u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
105 struct qed_dbcx_pfc_params pfc;
106 u8 max_ets_tc;
107 };
108
109 struct qed_dcbx_admin_params {
110 struct qed_dcbx_params params;
111 bool valid;
112 };
113
114 struct qed_dcbx_remote_params {
115 struct qed_dcbx_params params;
116 bool valid;
117 };
118
119 struct qed_dcbx_operational_params {
120 struct qed_dcbx_app_prio app_prio;
121 struct qed_dcbx_params params;
122 bool valid;
123 bool enabled;
124 bool ieee;
125 bool cee;
126 bool local;
127 u32 err;
128 };
129
130 struct qed_dcbx_get {
131 struct qed_dcbx_operational_params operational;
132 struct qed_dcbx_lldp_remote lldp_remote;
133 struct qed_dcbx_lldp_local lldp_local;
134 struct qed_dcbx_remote_params remote;
135 struct qed_dcbx_admin_params local;
136 };
137
138 enum qed_nvm_images {
139 QED_NVM_IMAGE_ISCSI_CFG,
140 QED_NVM_IMAGE_FCOE_CFG,
141 QED_NVM_IMAGE_MDUMP,
142 QED_NVM_IMAGE_NVM_CFG1,
143 QED_NVM_IMAGE_DEFAULT_CFG,
144 QED_NVM_IMAGE_NVM_META,
145 };
146
147 struct qed_link_eee_params {
148 u32 tx_lpi_timer;
149 #define QED_EEE_1G_ADV BIT(0)
150 #define QED_EEE_10G_ADV BIT(1)
151
152 /* Capabilities are represented using QED_EEE_*_ADV values */
153 u8 adv_caps;
154 u8 lp_adv_caps;
155 bool enable;
156 bool tx_lpi_enable;
157 };
158
159 enum qed_led_mode {
160 QED_LED_MODE_OFF,
161 QED_LED_MODE_ON,
162 QED_LED_MODE_RESTORE
163 };
164
165 struct qed_mfw_tlv_eth {
166 u16 lso_maxoff_size;
167 bool lso_maxoff_size_set;
168 u16 lso_minseg_size;
169 bool lso_minseg_size_set;
170 u8 prom_mode;
171 bool prom_mode_set;
172 u16 tx_descr_size;
173 bool tx_descr_size_set;
174 u16 rx_descr_size;
175 bool rx_descr_size_set;
176 u16 netq_count;
177 bool netq_count_set;
178 u32 tcp4_offloads;
179 bool tcp4_offloads_set;
180 u32 tcp6_offloads;
181 bool tcp6_offloads_set;
182 u16 tx_descr_qdepth;
183 bool tx_descr_qdepth_set;
184 u16 rx_descr_qdepth;
185 bool rx_descr_qdepth_set;
186 u8 iov_offload;
187 #define QED_MFW_TLV_IOV_OFFLOAD_NONE (0)
188 #define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1)
189 #define QED_MFW_TLV_IOV_OFFLOAD_VEB (2)
190 #define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3)
191 bool iov_offload_set;
192 u8 txqs_empty;
193 bool txqs_empty_set;
194 u8 rxqs_empty;
195 bool rxqs_empty_set;
196 u8 num_txqs_full;
197 bool num_txqs_full_set;
198 u8 num_rxqs_full;
199 bool num_rxqs_full_set;
200 };
201
202 #define QED_MFW_TLV_TIME_SIZE 14
203 struct qed_mfw_tlv_time {
204 bool b_set;
205 u8 month;
206 u8 day;
207 u8 hour;
208 u8 min;
209 u16 msec;
210 u16 usec;
211 };
212
213 struct qed_mfw_tlv_fcoe {
214 u8 scsi_timeout;
215 bool scsi_timeout_set;
216 u32 rt_tov;
217 bool rt_tov_set;
218 u32 ra_tov;
219 bool ra_tov_set;
220 u32 ed_tov;
221 bool ed_tov_set;
222 u32 cr_tov;
223 bool cr_tov_set;
224 u8 boot_type;
225 bool boot_type_set;
226 u8 npiv_state;
227 bool npiv_state_set;
228 u32 num_npiv_ids;
229 bool num_npiv_ids_set;
230 u8 switch_name[8];
231 bool switch_name_set;
232 u16 switch_portnum;
233 bool switch_portnum_set;
234 u8 switch_portid[3];
235 bool switch_portid_set;
236 u8 vendor_name[8];
237 bool vendor_name_set;
238 u8 switch_model[8];
239 bool switch_model_set;
240 u8 switch_fw_version[8];
241 bool switch_fw_version_set;
242 u8 qos_pri;
243 bool qos_pri_set;
244 u8 port_alias[3];
245 bool port_alias_set;
246 u8 port_state;
247 #define QED_MFW_TLV_PORT_STATE_OFFLINE (0)
248 #define QED_MFW_TLV_PORT_STATE_LOOP (1)
249 #define QED_MFW_TLV_PORT_STATE_P2P (2)
250 #define QED_MFW_TLV_PORT_STATE_FABRIC (3)
251 bool port_state_set;
252 u16 fip_tx_descr_size;
253 bool fip_tx_descr_size_set;
254 u16 fip_rx_descr_size;
255 bool fip_rx_descr_size_set;
256 u16 link_failures;
257 bool link_failures_set;
258 u8 fcoe_boot_progress;
259 bool fcoe_boot_progress_set;
260 u64 rx_bcast;
261 bool rx_bcast_set;
262 u64 tx_bcast;
263 bool tx_bcast_set;
264 u16 fcoe_txq_depth;
265 bool fcoe_txq_depth_set;
266 u16 fcoe_rxq_depth;
267 bool fcoe_rxq_depth_set;
268 u64 fcoe_rx_frames;
269 bool fcoe_rx_frames_set;
270 u64 fcoe_rx_bytes;
271 bool fcoe_rx_bytes_set;
272 u64 fcoe_tx_frames;
273 bool fcoe_tx_frames_set;
274 u64 fcoe_tx_bytes;
275 bool fcoe_tx_bytes_set;
276 u16 crc_count;
277 bool crc_count_set;
278 u32 crc_err_src_fcid[5];
279 bool crc_err_src_fcid_set[5];
280 struct qed_mfw_tlv_time crc_err[5];
281 u16 losync_err;
282 bool losync_err_set;
283 u16 losig_err;
284 bool losig_err_set;
285 u16 primtive_err;
286 bool primtive_err_set;
287 u16 disparity_err;
288 bool disparity_err_set;
289 u16 code_violation_err;
290 bool code_violation_err_set;
291 u32 flogi_param[4];
292 bool flogi_param_set[4];
293 struct qed_mfw_tlv_time flogi_tstamp;
294 u32 flogi_acc_param[4];
295 bool flogi_acc_param_set[4];
296 struct qed_mfw_tlv_time flogi_acc_tstamp;
297 u32 flogi_rjt;
298 bool flogi_rjt_set;
299 struct qed_mfw_tlv_time flogi_rjt_tstamp;
300 u32 fdiscs;
301 bool fdiscs_set;
302 u8 fdisc_acc;
303 bool fdisc_acc_set;
304 u8 fdisc_rjt;
305 bool fdisc_rjt_set;
306 u8 plogi;
307 bool plogi_set;
308 u8 plogi_acc;
309 bool plogi_acc_set;
310 u8 plogi_rjt;
311 bool plogi_rjt_set;
312 u32 plogi_dst_fcid[5];
313 bool plogi_dst_fcid_set[5];
314 struct qed_mfw_tlv_time plogi_tstamp[5];
315 u32 plogi_acc_src_fcid[5];
316 bool plogi_acc_src_fcid_set[5];
317 struct qed_mfw_tlv_time plogi_acc_tstamp[5];
318 u8 tx_plogos;
319 bool tx_plogos_set;
320 u8 plogo_acc;
321 bool plogo_acc_set;
322 u8 plogo_rjt;
323 bool plogo_rjt_set;
324 u32 plogo_src_fcid[5];
325 bool plogo_src_fcid_set[5];
326 struct qed_mfw_tlv_time plogo_tstamp[5];
327 u8 rx_logos;
328 bool rx_logos_set;
329 u8 tx_accs;
330 bool tx_accs_set;
331 u8 tx_prlis;
332 bool tx_prlis_set;
333 u8 rx_accs;
334 bool rx_accs_set;
335 u8 tx_abts;
336 bool tx_abts_set;
337 u8 rx_abts_acc;
338 bool rx_abts_acc_set;
339 u8 rx_abts_rjt;
340 bool rx_abts_rjt_set;
341 u32 abts_dst_fcid[5];
342 bool abts_dst_fcid_set[5];
343 struct qed_mfw_tlv_time abts_tstamp[5];
344 u8 rx_rscn;
345 bool rx_rscn_set;
346 u32 rx_rscn_nport[4];
347 bool rx_rscn_nport_set[4];
348 u8 tx_lun_rst;
349 bool tx_lun_rst_set;
350 u8 abort_task_sets;
351 bool abort_task_sets_set;
352 u8 tx_tprlos;
353 bool tx_tprlos_set;
354 u8 tx_nos;
355 bool tx_nos_set;
356 u8 rx_nos;
357 bool rx_nos_set;
358 u8 ols;
359 bool ols_set;
360 u8 lr;
361 bool lr_set;
362 u8 lrr;
363 bool lrr_set;
364 u8 tx_lip;
365 bool tx_lip_set;
366 u8 rx_lip;
367 bool rx_lip_set;
368 u8 eofa;
369 bool eofa_set;
370 u8 eofni;
371 bool eofni_set;
372 u8 scsi_chks;
373 bool scsi_chks_set;
374 u8 scsi_cond_met;
375 bool scsi_cond_met_set;
376 u8 scsi_busy;
377 bool scsi_busy_set;
378 u8 scsi_inter;
379 bool scsi_inter_set;
380 u8 scsi_inter_cond_met;
381 bool scsi_inter_cond_met_set;
382 u8 scsi_rsv_conflicts;
383 bool scsi_rsv_conflicts_set;
384 u8 scsi_tsk_full;
385 bool scsi_tsk_full_set;
386 u8 scsi_aca_active;
387 bool scsi_aca_active_set;
388 u8 scsi_tsk_abort;
389 bool scsi_tsk_abort_set;
390 u32 scsi_rx_chk[5];
391 bool scsi_rx_chk_set[5];
392 struct qed_mfw_tlv_time scsi_chk_tstamp[5];
393 };
394
395 struct qed_mfw_tlv_iscsi {
396 u8 target_llmnr;
397 bool target_llmnr_set;
398 u8 header_digest;
399 bool header_digest_set;
400 u8 data_digest;
401 bool data_digest_set;
402 u8 auth_method;
403 #define QED_MFW_TLV_AUTH_METHOD_NONE (1)
404 #define QED_MFW_TLV_AUTH_METHOD_CHAP (2)
405 #define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3)
406 bool auth_method_set;
407 u16 boot_taget_portal;
408 bool boot_taget_portal_set;
409 u16 frame_size;
410 bool frame_size_set;
411 u16 tx_desc_size;
412 bool tx_desc_size_set;
413 u16 rx_desc_size;
414 bool rx_desc_size_set;
415 u8 boot_progress;
416 bool boot_progress_set;
417 u16 tx_desc_qdepth;
418 bool tx_desc_qdepth_set;
419 u16 rx_desc_qdepth;
420 bool rx_desc_qdepth_set;
421 u64 rx_frames;
422 bool rx_frames_set;
423 u64 rx_bytes;
424 bool rx_bytes_set;
425 u64 tx_frames;
426 bool tx_frames_set;
427 u64 tx_bytes;
428 bool tx_bytes_set;
429 };
430
431 enum qed_db_rec_width {
432 DB_REC_WIDTH_32B,
433 DB_REC_WIDTH_64B,
434 };
435
436 enum qed_db_rec_space {
437 DB_REC_KERNEL,
438 DB_REC_USER,
439 };
440
441 #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
442 (void __iomem *)(reg_addr))
443
444 #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
445
446 #define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
447 (void __iomem *)(reg_addr))
448
449 #define QED_COALESCE_MAX 0x1FF
450 #define QED_DEFAULT_RX_USECS 12
451 #define QED_DEFAULT_TX_USECS 48
452
453 /* forward */
454 struct qed_dev;
455
456 struct qed_eth_pf_params {
457 /* The following parameters are used during HW-init
458 * and these parameters need to be passed as arguments
459 * to update_pf_params routine invoked before slowpath start
460 */
461 u16 num_cons;
462
463 /* per-VF number of CIDs */
464 u8 num_vf_cons;
465 #define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
466
467 /* To enable arfs, previous to HW-init a positive number needs to be
468 * set [as filters require allocated searcher ILT memory].
469 * This will set the maximal number of configured steering-filters.
470 */
471 u32 num_arfs_filters;
472 };
473
474 struct qed_fcoe_pf_params {
475 /* The following parameters are used during protocol-init */
476 u64 glbl_q_params_addr;
477 u64 bdq_pbl_base_addr[2];
478
479 /* The following parameters are used during HW-init
480 * and these parameters need to be passed as arguments
481 * to update_pf_params routine invoked before slowpath start
482 */
483 u16 num_cons;
484 u16 num_tasks;
485
486 /* The following parameters are used during protocol-init */
487 u16 sq_num_pbl_pages;
488
489 u16 cq_num_entries;
490 u16 cmdq_num_entries;
491 u16 rq_buffer_log_size;
492 u16 mtu;
493 u16 dummy_icid;
494 u16 bdq_xoff_threshold[2];
495 u16 bdq_xon_threshold[2];
496 u16 rq_buffer_size;
497 u8 num_cqs; /* num of global CQs */
498 u8 log_page_size;
499 u8 gl_rq_pi;
500 u8 gl_cmd_pi;
501 u8 debug_mode;
502 u8 is_target;
503 u8 bdq_pbl_num_entries[2];
504 };
505
506 /* Most of the parameters below are described in the FW iSCSI / TCP HSI */
507 struct qed_iscsi_pf_params {
508 u64 glbl_q_params_addr;
509 u64 bdq_pbl_base_addr[3];
510 u16 cq_num_entries;
511 u16 cmdq_num_entries;
512 u32 two_msl_timer;
513 u16 tx_sws_timer;
514
515 /* The following parameters are used during HW-init
516 * and these parameters need to be passed as arguments
517 * to update_pf_params routine invoked before slowpath start
518 */
519 u16 num_cons;
520 u16 num_tasks;
521
522 /* The following parameters are used during protocol-init */
523 u16 half_way_close_timeout;
524 u16 bdq_xoff_threshold[3];
525 u16 bdq_xon_threshold[3];
526 u16 cmdq_xoff_threshold;
527 u16 cmdq_xon_threshold;
528 u16 rq_buffer_size;
529
530 u8 num_sq_pages_in_ring;
531 u8 num_r2tq_pages_in_ring;
532 u8 num_uhq_pages_in_ring;
533 u8 num_queues;
534 u8 log_page_size;
535 u8 rqe_log_size;
536 u8 max_fin_rt;
537 u8 gl_rq_pi;
538 u8 gl_cmd_pi;
539 u8 debug_mode;
540 u8 ll2_ooo_queue_id;
541
542 u8 is_target;
543 u8 is_soc_en;
544 u8 soc_num_of_blocks_log;
545 u8 bdq_pbl_num_entries[3];
546 };
547
548 struct qed_nvmetcp_pf_params {
549 u64 glbl_q_params_addr;
550 u16 cq_num_entries;
551 u16 num_cons;
552 u16 num_tasks;
553 u8 num_sq_pages_in_ring;
554 u8 num_r2tq_pages_in_ring;
555 u8 num_uhq_pages_in_ring;
556 u8 num_queues;
557 u8 gl_rq_pi;
558 u8 gl_cmd_pi;
559 u8 debug_mode;
560 u8 ll2_ooo_queue_id;
561 u16 min_rto;
562 };
563
564 struct qed_rdma_pf_params {
565 /* Supplied to QED during resource allocation (may affect the ILT and
566 * the doorbell BAR).
567 */
568 u32 min_dpis; /* number of requested DPIs */
569 u32 num_qps; /* number of requested Queue Pairs */
570 u32 num_srqs; /* number of requested SRQ */
571 u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
572 u8 gl_pi; /* protocol index */
573
574 /* Will allocate rate limiters to be used with QPs */
575 u8 enable_dcqcn;
576 };
577
578 struct qed_pf_params {
579 struct qed_eth_pf_params eth_pf_params;
580 struct qed_fcoe_pf_params fcoe_pf_params;
581 struct qed_iscsi_pf_params iscsi_pf_params;
582 struct qed_nvmetcp_pf_params nvmetcp_pf_params;
583 struct qed_rdma_pf_params rdma_pf_params;
584 };
585
586 enum qed_int_mode {
587 QED_INT_MODE_INTA,
588 QED_INT_MODE_MSIX,
589 QED_INT_MODE_MSI,
590 QED_INT_MODE_POLL,
591 };
592
593 struct qed_sb_info {
594 struct status_block *sb_virt;
595 dma_addr_t sb_phys;
596 u32 sb_ack; /* Last given ack */
597 u16 igu_sb_id;
598 void __iomem *igu_addr;
599 u8 flags;
600 #define QED_SB_INFO_INIT 0x1
601 #define QED_SB_INFO_SETUP 0x2
602
603 struct qed_dev *cdev;
604 };
605
606 enum qed_hw_err_type {
607 QED_HW_ERR_FAN_FAIL,
608 QED_HW_ERR_MFW_RESP_FAIL,
609 QED_HW_ERR_HW_ATTN,
610 QED_HW_ERR_DMAE_FAIL,
611 QED_HW_ERR_RAMROD_FAIL,
612 QED_HW_ERR_FW_ASSERT,
613 QED_HW_ERR_LAST,
614 };
615
616 enum qed_dev_type {
617 QED_DEV_TYPE_BB,
618 QED_DEV_TYPE_AH,
619 };
620
621 struct qed_dev_info {
622 unsigned long pci_mem_start;
623 unsigned long pci_mem_end;
624 unsigned int pci_irq;
625 u8 num_hwfns;
626
627 u8 hw_mac[ETH_ALEN];
628
629 /* FW version */
630 u16 fw_major;
631 u16 fw_minor;
632 u16 fw_rev;
633 u16 fw_eng;
634
635 /* MFW version */
636 u32 mfw_rev;
637 #define QED_MFW_VERSION_0_MASK 0x000000FF
638 #define QED_MFW_VERSION_0_OFFSET 0
639 #define QED_MFW_VERSION_1_MASK 0x0000FF00
640 #define QED_MFW_VERSION_1_OFFSET 8
641 #define QED_MFW_VERSION_2_MASK 0x00FF0000
642 #define QED_MFW_VERSION_2_OFFSET 16
643 #define QED_MFW_VERSION_3_MASK 0xFF000000
644 #define QED_MFW_VERSION_3_OFFSET 24
645
646 u32 flash_size;
647 bool b_arfs_capable;
648 bool b_inter_pf_switch;
649 bool tx_switching;
650 bool rdma_supported;
651 u16 mtu;
652
653 bool wol_support;
654 bool smart_an;
655
656 /* MBI version */
657 u32 mbi_version;
658 #define QED_MBI_VERSION_0_MASK 0x000000FF
659 #define QED_MBI_VERSION_0_OFFSET 0
660 #define QED_MBI_VERSION_1_MASK 0x0000FF00
661 #define QED_MBI_VERSION_1_OFFSET 8
662 #define QED_MBI_VERSION_2_MASK 0x00FF0000
663 #define QED_MBI_VERSION_2_OFFSET 16
664
665 enum qed_dev_type dev_type;
666
667 /* Output parameters for qede */
668 bool vxlan_enable;
669 bool gre_enable;
670 bool geneve_enable;
671
672 u8 abs_pf_id;
673 };
674
675 enum qed_sb_type {
676 QED_SB_TYPE_L2_QUEUE,
677 QED_SB_TYPE_CNQ,
678 QED_SB_TYPE_STORAGE,
679 };
680
681 enum qed_protocol {
682 QED_PROTOCOL_ETH,
683 QED_PROTOCOL_ISCSI,
684 QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
685 QED_PROTOCOL_FCOE,
686 };
687
688 enum qed_fec_mode {
689 QED_FEC_MODE_NONE = BIT(0),
690 QED_FEC_MODE_FIRECODE = BIT(1),
691 QED_FEC_MODE_RS = BIT(2),
692 QED_FEC_MODE_AUTO = BIT(3),
693 QED_FEC_MODE_UNSUPPORTED = BIT(4),
694 };
695
696 struct qed_link_params {
697 bool link_up;
698
699 u32 override_flags;
700 #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
701 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
702 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
703 #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
704 #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
705 #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
706 #define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
707
708 bool autoneg;
709 __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
710 u32 forced_speed;
711
712 u32 pause_config;
713 #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
714 #define QED_LINK_PAUSE_RX_ENABLE BIT(1)
715 #define QED_LINK_PAUSE_TX_ENABLE BIT(2)
716
717 u32 loopback_mode;
718 #define QED_LINK_LOOPBACK_NONE BIT(0)
719 #define QED_LINK_LOOPBACK_INT_PHY BIT(1)
720 #define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
721 #define QED_LINK_LOOPBACK_EXT BIT(3)
722 #define QED_LINK_LOOPBACK_MAC BIT(4)
723 #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
724 #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
725 #define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
726 #define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
727 #define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
728
729 struct qed_link_eee_params eee;
730 u32 fec;
731 };
732
733 struct qed_link_output {
734 bool link_up;
735
736 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
737 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
738 __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
739
740 u32 speed; /* In Mb/s */
741 u8 duplex; /* In DUPLEX defs */
742 u8 port; /* In PORT defs */
743 bool autoneg;
744 u32 pause_config;
745
746 /* EEE - capability & param */
747 bool eee_supported;
748 bool eee_active;
749 u8 sup_caps;
750 struct qed_link_eee_params eee;
751
752 u32 sup_fec;
753 u32 active_fec;
754 };
755
756 struct qed_probe_params {
757 enum qed_protocol protocol;
758 u32 dp_module;
759 u8 dp_level;
760 bool is_vf;
761 bool recov_in_prog;
762 };
763
764 #define QED_DRV_VER_STR_SIZE 12
765 struct qed_slowpath_params {
766 u32 int_mode;
767 u8 drv_major;
768 u8 drv_minor;
769 u8 drv_rev;
770 u8 drv_eng;
771 u8 name[QED_DRV_VER_STR_SIZE];
772 };
773
774 #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
775
776 struct qed_int_info {
777 struct msix_entry *msix;
778 u8 msix_cnt;
779
780 /* This should be updated by the protocol driver */
781 u8 used_cnt;
782 };
783
784 struct qed_generic_tlvs {
785 #define QED_TLV_IP_CSUM BIT(0)
786 #define QED_TLV_LSO BIT(1)
787 u16 feat_flags;
788 #define QED_TLV_MAC_COUNT 3
789 u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
790 };
791
792 #define QED_I2C_DEV_ADDR_A0 0xA0
793 #define QED_I2C_DEV_ADDR_A2 0xA2
794
795 #define QED_NVM_SIGNATURE 0x12435687
796
797 enum qed_nvm_flash_cmd {
798 QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
799 QED_NVM_FLASH_CMD_FILE_START = 0x3,
800 QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
801 QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5,
802 QED_NVM_FLASH_CMD_NVM_MAX,
803 };
804
805 struct qed_devlink {
806 struct qed_dev *cdev;
807 struct devlink_health_reporter *fw_reporter;
808 };
809
810 struct qed_common_cb_ops {
811 void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
812 void (*link_update)(void *dev, struct qed_link_output *link);
813 void (*schedule_recovery_handler)(void *dev);
814 void (*schedule_hw_err_handler)(void *dev,
815 enum qed_hw_err_type err_type);
816 void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
817 void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
818 void (*get_protocol_tlv_data)(void *dev, void *data);
819 void (*bw_update)(void *dev);
820 };
821
822 struct qed_selftest_ops {
823 /**
824 * selftest_interrupt(): Perform interrupt test.
825 *
826 * @cdev: Qed dev pointer.
827 *
828 * Return: 0 on success, error otherwise.
829 */
830 int (*selftest_interrupt)(struct qed_dev *cdev);
831
832 /**
833 * selftest_memory(): Perform memory test.
834 *
835 * @cdev: Qed dev pointer.
836 *
837 * Return: 0 on success, error otherwise.
838 */
839 int (*selftest_memory)(struct qed_dev *cdev);
840
841 /**
842 * selftest_register(): Perform register test.
843 *
844 * @cdev: Qed dev pointer.
845 *
846 * Return: 0 on success, error otherwise.
847 */
848 int (*selftest_register)(struct qed_dev *cdev);
849
850 /**
851 * selftest_clock(): Perform clock test.
852 *
853 * @cdev: Qed dev pointer.
854 *
855 * Return: 0 on success, error otherwise.
856 */
857 int (*selftest_clock)(struct qed_dev *cdev);
858
859 /**
860 * selftest_nvram(): Perform nvram test.
861 *
862 * @cdev: Qed dev pointer.
863 *
864 * Return: 0 on success, error otherwise.
865 */
866 int (*selftest_nvram) (struct qed_dev *cdev);
867 };
868
869 struct qed_common_ops {
870 struct qed_selftest_ops *selftest;
871
872 struct qed_dev* (*probe)(struct pci_dev *dev,
873 struct qed_probe_params *params);
874
875 void (*remove)(struct qed_dev *cdev);
876
877 int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
878
879 void (*set_name) (struct qed_dev *cdev, char name[]);
880
881 /* Client drivers need to make this call before slowpath_start.
882 * PF params required for the call before slowpath_start is
883 * documented within the qed_pf_params structure definition.
884 */
885 void (*update_pf_params)(struct qed_dev *cdev,
886 struct qed_pf_params *params);
887
888 int (*slowpath_start)(struct qed_dev *cdev,
889 struct qed_slowpath_params *params);
890
891 int (*slowpath_stop)(struct qed_dev *cdev);
892
893 /* Requests to use `cnt' interrupts for fastpath.
894 * upon success, returns number of interrupts allocated for fastpath.
895 */
896 int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
897
898 /* Fills `info' with pointers required for utilizing interrupts */
899 int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
900
901 u32 (*sb_init)(struct qed_dev *cdev,
902 struct qed_sb_info *sb_info,
903 void *sb_virt_addr,
904 dma_addr_t sb_phy_addr,
905 u16 sb_id,
906 enum qed_sb_type type);
907
908 u32 (*sb_release)(struct qed_dev *cdev,
909 struct qed_sb_info *sb_info,
910 u16 sb_id,
911 enum qed_sb_type type);
912
913 void (*simd_handler_config)(struct qed_dev *cdev,
914 void *token,
915 int index,
916 void (*handler)(void *));
917
918 void (*simd_handler_clean)(struct qed_dev *cdev, int index);
919
920 int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
921
922 int (*dbg_grc_size)(struct qed_dev *cdev);
923
924 int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
925
926 int (*dbg_all_data_size)(struct qed_dev *cdev);
927
928 int (*report_fatal_error)(struct devlink *devlink,
929 enum qed_hw_err_type err_type);
930
931 /**
932 * can_link_change(): can the instance change the link or not.
933 *
934 * @cdev: Qed dev pointer.
935 *
936 * Return: true if link-change is allowed, false otherwise.
937 */
938 bool (*can_link_change)(struct qed_dev *cdev);
939
940 /**
941 * set_link(): set links according to params.
942 *
943 * @cdev: Qed dev pointer.
944 * @params: values used to override the default link configuration.
945 *
946 * Return: 0 on success, error otherwise.
947 */
948 int (*set_link)(struct qed_dev *cdev,
949 struct qed_link_params *params);
950
951 /**
952 * get_link(): returns the current link state.
953 *
954 * @cdev: Qed dev pointer.
955 * @if_link: structure to be filled with current link configuration.
956 *
957 * Return: Void.
958 */
959 void (*get_link)(struct qed_dev *cdev,
960 struct qed_link_output *if_link);
961
962 /**
963 * drain(): drains chip in case Tx completions fail to arrive due to pause.
964 *
965 * @cdev: Qed dev pointer.
966 *
967 * Return: Int.
968 */
969 int (*drain)(struct qed_dev *cdev);
970
971 /**
972 * update_msglvl(): update module debug level.
973 *
974 * @cdev: Qed dev pointer.
975 * @dp_module: Debug module.
976 * @dp_level: Debug level.
977 *
978 * Return: Void.
979 */
980 void (*update_msglvl)(struct qed_dev *cdev,
981 u32 dp_module,
982 u8 dp_level);
983
984 int (*chain_alloc)(struct qed_dev *cdev,
985 struct qed_chain *chain,
986 struct qed_chain_init_params *params);
987
988 void (*chain_free)(struct qed_dev *cdev,
989 struct qed_chain *p_chain);
990
991 /**
992 * nvm_flash(): Flash nvm data.
993 *
994 * @cdev: Qed dev pointer.
995 * @name: file containing the data.
996 *
997 * Return: 0 on success, error otherwise.
998 */
999 int (*nvm_flash)(struct qed_dev *cdev, const char *name);
1000
1001 /**
1002 * nvm_get_image(): reads an entire image from nvram.
1003 *
1004 * @cdev: Qed dev pointer.
1005 * @type: type of the request nvram image.
1006 * @buf: preallocated buffer to fill with the image.
1007 * @len: length of the allocated buffer.
1008 *
1009 * Return: 0 on success, error otherwise.
1010 */
1011 int (*nvm_get_image)(struct qed_dev *cdev,
1012 enum qed_nvm_images type, u8 *buf, u16 len);
1013
1014 /**
1015 * set_coalesce(): Configure Rx coalesce value in usec.
1016 *
1017 * @cdev: Qed dev pointer.
1018 * @rx_coal: Rx coalesce value in usec.
1019 * @tx_coal: Tx coalesce value in usec.
1020 * @handle: Handle.
1021 *
1022 * Return: 0 on success, error otherwise.
1023 */
1024 int (*set_coalesce)(struct qed_dev *cdev,
1025 u16 rx_coal, u16 tx_coal, void *handle);
1026
1027 /**
1028 * set_led() - Configure LED mode.
1029 *
1030 * @cdev: Qed dev pointer.
1031 * @mode: LED mode.
1032 *
1033 * Return: 0 on success, error otherwise.
1034 */
1035 int (*set_led)(struct qed_dev *cdev,
1036 enum qed_led_mode mode);
1037
1038 /**
1039 * attn_clr_enable(): Prevent attentions from being reasserted.
1040 *
1041 * @cdev: Qed dev pointer.
1042 * @clr_enable: Clear enable.
1043 *
1044 * Return: Void.
1045 */
1046 void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
1047
1048 /**
1049 * db_recovery_add(): add doorbell information to the doorbell
1050 * recovery mechanism.
1051 *
1052 * @cdev: Qed dev pointer.
1053 * @db_addr: Doorbell address.
1054 * @db_data: Dddress of where db_data is stored.
1055 * @db_width: Doorbell is 32b or 64b.
1056 * @db_space: Doorbell recovery addresses are user or kernel space.
1057 *
1058 * Return: Int.
1059 */
1060 int (*db_recovery_add)(struct qed_dev *cdev,
1061 void __iomem *db_addr,
1062 void *db_data,
1063 enum qed_db_rec_width db_width,
1064 enum qed_db_rec_space db_space);
1065
1066 /**
1067 * db_recovery_del(): remove doorbell information from the doorbell
1068 * recovery mechanism. db_data serves as key (db_addr is not unique).
1069 *
1070 * @cdev: Qed dev pointer.
1071 * @db_addr: Doorbell address.
1072 * @db_data: Address where db_data is stored. Serves as key for the
1073 * entry to delete.
1074 *
1075 * Return: Int.
1076 */
1077 int (*db_recovery_del)(struct qed_dev *cdev,
1078 void __iomem *db_addr, void *db_data);
1079
1080 /**
1081 * recovery_process(): Trigger a recovery process.
1082 *
1083 * @cdev: Qed dev pointer.
1084 *
1085 * Return: 0 on success, error otherwise.
1086 */
1087 int (*recovery_process)(struct qed_dev *cdev);
1088
1089 /**
1090 * recovery_prolog(): Execute the prolog operations of a recovery process.
1091 *
1092 * @cdev: Qed dev pointer.
1093 *
1094 * Return: 0 on success, error otherwise.
1095 */
1096 int (*recovery_prolog)(struct qed_dev *cdev);
1097
1098 /**
1099 * update_drv_state(): API to inform the change in the driver state.
1100 *
1101 * @cdev: Qed dev pointer.
1102 * @active: Active
1103 *
1104 * Return: Int.
1105 */
1106 int (*update_drv_state)(struct qed_dev *cdev, bool active);
1107
1108 /**
1109 * update_mac(): API to inform the change in the mac address.
1110 *
1111 * @cdev: Qed dev pointer.
1112 * @mac: MAC.
1113 *
1114 * Return: Int.
1115 */
1116 int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
1117
1118 /**
1119 * update_mtu(): API to inform the change in the mtu.
1120 *
1121 * @cdev: Qed dev pointer.
1122 * @mtu: MTU.
1123 *
1124 * Return: Int.
1125 */
1126 int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
1127
1128 /**
1129 * update_wol(): Update of changes in the WoL configuration.
1130 *
1131 * @cdev: Qed dev pointer.
1132 * @enabled: true iff WoL should be enabled.
1133 *
1134 * Return: Int.
1135 */
1136 int (*update_wol) (struct qed_dev *cdev, bool enabled);
1137
1138 /**
1139 * read_module_eeprom(): Read EEPROM.
1140 *
1141 * @cdev: Qed dev pointer.
1142 * @buf: buffer.
1143 * @dev_addr: PHY device memory region.
1144 * @offset: offset into eeprom contents to be read.
1145 * @len: buffer length, i.e., max bytes to be read.
1146 *
1147 * Return: Int.
1148 */
1149 int (*read_module_eeprom)(struct qed_dev *cdev,
1150 char *buf, u8 dev_addr, u32 offset, u32 len);
1151
1152 /**
1153 * get_affin_hwfn_idx(): Get affine HW function.
1154 *
1155 * @cdev: Qed dev pointer.
1156 *
1157 * Return: u8.
1158 */
1159 u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
1160
1161 /**
1162 * read_nvm_cfg(): Read NVM config attribute value.
1163 *
1164 * @cdev: Qed dev pointer.
1165 * @buf: Buffer.
1166 * @cmd: NVM CFG command id.
1167 * @entity_id: Entity id.
1168 *
1169 * Return: Int.
1170 */
1171 int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
1172 u32 entity_id);
1173 /**
1174 * read_nvm_cfg_len(): Read NVM config attribute value.
1175 *
1176 * @cdev: Qed dev pointer.
1177 * @cmd: NVM CFG command id.
1178 *
1179 * Return: config id length, 0 on error.
1180 */
1181 int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
1182
1183 /**
1184 * set_grc_config(): Configure value for grc config id.
1185 *
1186 * @cdev: Qed dev pointer.
1187 * @cfg_id: grc config id
1188 * @val: grc config value
1189 *
1190 * Return: Int.
1191 */
1192 int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
1193
1194 struct devlink* (*devlink_register)(struct qed_dev *cdev);
1195
1196 void (*devlink_unregister)(struct devlink *devlink);
1197 };
1198
1199 #define MASK_FIELD(_name, _value) \
1200 ((_value) &= (_name ## _MASK))
1201
1202 #define FIELD_VALUE(_name, _value) \
1203 ((_value & _name ## _MASK) << _name ## _SHIFT)
1204
1205 #define SET_FIELD(value, name, flag) \
1206 do { \
1207 (value) &= ~(name ## _MASK << name ## _SHIFT); \
1208 (value) |= (((u64)flag) << (name ## _SHIFT)); \
1209 } while (0)
1210
1211 #define GET_FIELD(value, name) \
1212 (((value) >> (name ## _SHIFT)) & name ## _MASK)
1213
1214 #define GET_MFW_FIELD(name, field) \
1215 (((name) & (field ## _MASK)) >> (field ## _OFFSET))
1216
1217 #define SET_MFW_FIELD(name, field, value) \
1218 do { \
1219 (name) &= ~(field ## _MASK); \
1220 (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
1221 } while (0)
1222
1223 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
1224
1225 /* Debug print definitions */
1226 #define DP_ERR(cdev, fmt, ...) \
1227 do { \
1228 pr_err("[%s:%d(%s)]" fmt, \
1229 __func__, __LINE__, \
1230 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1231 ## __VA_ARGS__); \
1232 } while (0)
1233
1234 #define DP_NOTICE(cdev, fmt, ...) \
1235 do { \
1236 if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
1237 pr_notice("[%s:%d(%s)]" fmt, \
1238 __func__, __LINE__, \
1239 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1240 ## __VA_ARGS__); \
1241 \
1242 } \
1243 } while (0)
1244
1245 #define DP_INFO(cdev, fmt, ...) \
1246 do { \
1247 if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
1248 pr_notice("[%s:%d(%s)]" fmt, \
1249 __func__, __LINE__, \
1250 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1251 ## __VA_ARGS__); \
1252 } \
1253 } while (0)
1254
1255 #define DP_VERBOSE(cdev, module, fmt, ...) \
1256 do { \
1257 if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
1258 ((cdev)->dp_module & module))) { \
1259 pr_notice("[%s:%d(%s)]" fmt, \
1260 __func__, __LINE__, \
1261 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1262 ## __VA_ARGS__); \
1263 } \
1264 } while (0)
1265
1266 enum DP_LEVEL {
1267 QED_LEVEL_VERBOSE = 0x0,
1268 QED_LEVEL_INFO = 0x1,
1269 QED_LEVEL_NOTICE = 0x2,
1270 QED_LEVEL_ERR = 0x3,
1271 };
1272
1273 #define QED_LOG_LEVEL_SHIFT (30)
1274 #define QED_LOG_VERBOSE_MASK (0x3fffffff)
1275 #define QED_LOG_INFO_MASK (0x40000000)
1276 #define QED_LOG_NOTICE_MASK (0x80000000)
1277
1278 enum DP_MODULE {
1279 QED_MSG_SPQ = 0x10000,
1280 QED_MSG_STATS = 0x20000,
1281 QED_MSG_DCB = 0x40000,
1282 QED_MSG_IOV = 0x80000,
1283 QED_MSG_SP = 0x100000,
1284 QED_MSG_STORAGE = 0x200000,
1285 QED_MSG_CXT = 0x800000,
1286 QED_MSG_LL2 = 0x1000000,
1287 QED_MSG_ILT = 0x2000000,
1288 QED_MSG_RDMA = 0x4000000,
1289 QED_MSG_DEBUG = 0x8000000,
1290 /* to be added...up to 0x8000000 */
1291 };
1292
1293 enum qed_mf_mode {
1294 QED_MF_DEFAULT,
1295 QED_MF_OVLAN,
1296 QED_MF_NPAR,
1297 };
1298
1299 struct qed_eth_stats_common {
1300 u64 no_buff_discards;
1301 u64 packet_too_big_discard;
1302 u64 ttl0_discard;
1303 u64 rx_ucast_bytes;
1304 u64 rx_mcast_bytes;
1305 u64 rx_bcast_bytes;
1306 u64 rx_ucast_pkts;
1307 u64 rx_mcast_pkts;
1308 u64 rx_bcast_pkts;
1309 u64 mftag_filter_discards;
1310 u64 mac_filter_discards;
1311 u64 gft_filter_drop;
1312 u64 tx_ucast_bytes;
1313 u64 tx_mcast_bytes;
1314 u64 tx_bcast_bytes;
1315 u64 tx_ucast_pkts;
1316 u64 tx_mcast_pkts;
1317 u64 tx_bcast_pkts;
1318 u64 tx_err_drop_pkts;
1319 u64 tpa_coalesced_pkts;
1320 u64 tpa_coalesced_events;
1321 u64 tpa_aborts_num;
1322 u64 tpa_not_coalesced_pkts;
1323 u64 tpa_coalesced_bytes;
1324
1325 /* port */
1326 u64 rx_64_byte_packets;
1327 u64 rx_65_to_127_byte_packets;
1328 u64 rx_128_to_255_byte_packets;
1329 u64 rx_256_to_511_byte_packets;
1330 u64 rx_512_to_1023_byte_packets;
1331 u64 rx_1024_to_1518_byte_packets;
1332 u64 rx_crc_errors;
1333 u64 rx_mac_crtl_frames;
1334 u64 rx_pause_frames;
1335 u64 rx_pfc_frames;
1336 u64 rx_align_errors;
1337 u64 rx_carrier_errors;
1338 u64 rx_oversize_packets;
1339 u64 rx_jabbers;
1340 u64 rx_undersize_packets;
1341 u64 rx_fragments;
1342 u64 tx_64_byte_packets;
1343 u64 tx_65_to_127_byte_packets;
1344 u64 tx_128_to_255_byte_packets;
1345 u64 tx_256_to_511_byte_packets;
1346 u64 tx_512_to_1023_byte_packets;
1347 u64 tx_1024_to_1518_byte_packets;
1348 u64 tx_pause_frames;
1349 u64 tx_pfc_frames;
1350 u64 brb_truncates;
1351 u64 brb_discards;
1352 u64 rx_mac_bytes;
1353 u64 rx_mac_uc_packets;
1354 u64 rx_mac_mc_packets;
1355 u64 rx_mac_bc_packets;
1356 u64 rx_mac_frames_ok;
1357 u64 tx_mac_bytes;
1358 u64 tx_mac_uc_packets;
1359 u64 tx_mac_mc_packets;
1360 u64 tx_mac_bc_packets;
1361 u64 tx_mac_ctrl_frames;
1362 u64 link_change_count;
1363 };
1364
1365 struct qed_eth_stats_bb {
1366 u64 rx_1519_to_1522_byte_packets;
1367 u64 rx_1519_to_2047_byte_packets;
1368 u64 rx_2048_to_4095_byte_packets;
1369 u64 rx_4096_to_9216_byte_packets;
1370 u64 rx_9217_to_16383_byte_packets;
1371 u64 tx_1519_to_2047_byte_packets;
1372 u64 tx_2048_to_4095_byte_packets;
1373 u64 tx_4096_to_9216_byte_packets;
1374 u64 tx_9217_to_16383_byte_packets;
1375 u64 tx_lpi_entry_count;
1376 u64 tx_total_collisions;
1377 };
1378
1379 struct qed_eth_stats_ah {
1380 u64 rx_1519_to_max_byte_packets;
1381 u64 tx_1519_to_max_byte_packets;
1382 };
1383
1384 struct qed_eth_stats {
1385 struct qed_eth_stats_common common;
1386
1387 union {
1388 struct qed_eth_stats_bb bb;
1389 struct qed_eth_stats_ah ah;
1390 };
1391 };
1392
1393 #define QED_SB_IDX 0x0002
1394
1395 #define RX_PI 0
1396 #define TX_PI(tc) (RX_PI + 1 + tc)
1397
1398 struct qed_sb_cnt_info {
1399 /* Original, current, and free SBs for PF */
1400 int orig;
1401 int cnt;
1402 int free_cnt;
1403
1404 /* Original, current and free SBS for child VFs */
1405 int iov_orig;
1406 int iov_cnt;
1407 int free_cnt_iov;
1408 };
1409
qed_sb_update_sb_idx(struct qed_sb_info * sb_info)1410 static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
1411 {
1412 u32 prod = 0;
1413 u16 rc = 0;
1414
1415 prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
1416 STATUS_BLOCK_PROD_INDEX_MASK;
1417 if (sb_info->sb_ack != prod) {
1418 sb_info->sb_ack = prod;
1419 rc |= QED_SB_IDX;
1420 }
1421
1422 /* Let SB update */
1423 return rc;
1424 }
1425
1426 /**
1427 * qed_sb_ack(): This function creates an update command for interrupts
1428 * that is written to the IGU.
1429 *
1430 * @sb_info: This is the structure allocated and
1431 * initialized per status block. Assumption is
1432 * that it was initialized using qed_sb_init
1433 * @int_cmd: Enable/Disable/Nop
1434 * @upd_flg: Whether igu consumer should be updated.
1435 *
1436 * Return: inline void.
1437 */
qed_sb_ack(struct qed_sb_info * sb_info,enum igu_int_cmd int_cmd,u8 upd_flg)1438 static inline void qed_sb_ack(struct qed_sb_info *sb_info,
1439 enum igu_int_cmd int_cmd,
1440 u8 upd_flg)
1441 {
1442 u32 igu_ack;
1443
1444 igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1445 (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1446 (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1447 (IGU_SEG_ACCESS_REG <<
1448 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1449
1450 DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
1451
1452 /* Both segments (interrupts & acks) are written to same place address;
1453 * Need to guarantee all commands will be received (in-order) by HW.
1454 */
1455 barrier();
1456 }
1457
__internal_ram_wr(void * p_hwfn,void __iomem * addr,int size,u32 * data)1458 static inline void __internal_ram_wr(void *p_hwfn,
1459 void __iomem *addr,
1460 int size,
1461 u32 *data)
1462
1463 {
1464 unsigned int i;
1465
1466 for (i = 0; i < size / sizeof(*data); i++)
1467 DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
1468 }
1469
internal_ram_wr(void __iomem * addr,int size,u32 * data)1470 static inline void internal_ram_wr(void __iomem *addr,
1471 int size,
1472 u32 *data)
1473 {
1474 __internal_ram_wr(NULL, addr, size, data);
1475 }
1476
1477 enum qed_rss_caps {
1478 QED_RSS_IPV4 = 0x1,
1479 QED_RSS_IPV6 = 0x2,
1480 QED_RSS_IPV4_TCP = 0x4,
1481 QED_RSS_IPV6_TCP = 0x8,
1482 QED_RSS_IPV4_UDP = 0x10,
1483 QED_RSS_IPV6_UDP = 0x20,
1484 };
1485
1486 #define QED_RSS_IND_TABLE_SIZE 128
1487 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
1488 #endif
1489