1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
9 
10 #ifndef AQ_RING_H
11 #define AQ_RING_H
12 
13 #include "aq_common.h"
14 
15 struct page;
16 struct aq_nic_cfg_s;
17 
18 struct aq_rxpage {
19 	struct page *page;
20 	dma_addr_t daddr;
21 	unsigned int order;
22 	unsigned int pg_off;
23 };
24 
25 /*           TxC       SOP        DX         EOP
26  *         +----------+----------+----------+-----------
27  *   8bytes|len l3,l4 | pa       | pa       | pa
28  *         +----------+----------+----------+-----------
29  * 4/8bytes|len pkt   |len pkt   |          | skb
30  *         +----------+----------+----------+-----------
31  * 4/8bytes|is_gso    |len,flags |len       |len,is_eop
32  *         +----------+----------+----------+-----------
33  *
34  *  This aq_ring_buff_s doesn't have endianness dependency.
35  *  It is __packed for cache line optimizations.
36  */
37 struct __packed aq_ring_buff_s {
38 	union {
39 		/* RX/TX */
40 		dma_addr_t pa;
41 		/* RX */
42 		struct {
43 			u32 rss_hash;
44 			u16 next;
45 			u8 is_hash_l4;
46 			u8 rsvd1;
47 			struct aq_rxpage rxdata;
48 			u16 vlan_rx_tag;
49 		};
50 		/* EOP */
51 		struct {
52 			dma_addr_t pa_eop;
53 			struct sk_buff *skb;
54 		};
55 		/* TxC */
56 		struct {
57 			u32 mss;
58 			u8 len_l2;
59 			u8 len_l3;
60 			u8 len_l4;
61 			u8 is_ipv6:1;
62 			u8 rsvd2:7;
63 			u32 len_pkt;
64 			u16 vlan_tx_tag;
65 		};
66 	};
67 	union {
68 		struct {
69 			u32 len:16;
70 			u32 is_ip_cso:1;
71 			u32 is_udp_cso:1;
72 			u32 is_tcp_cso:1;
73 			u32 is_cso_err:1;
74 			u32 is_sop:1;
75 			u32 is_eop:1;
76 			u32 is_gso_tcp:1;
77 			u32 is_gso_udp:1;
78 			u32 is_mapped:1;
79 			u32 is_cleaned:1;
80 			u32 is_error:1;
81 			u32 is_vlan:1;
82 			u32 is_lro:1;
83 			u32 rsvd3:3;
84 			u16 eop_index;
85 			u16 rsvd4;
86 		};
87 		u64 flags;
88 	};
89 };
90 
91 struct aq_ring_stats_rx_s {
92 	struct u64_stats_sync syncp;	/* must be first */
93 	u64 errors;
94 	u64 packets;
95 	u64 bytes;
96 	u64 lro_packets;
97 	u64 jumbo_packets;
98 	u64 alloc_fails;
99 	u64 skb_alloc_fails;
100 	u64 polls;
101 	u64 pg_losts;
102 	u64 pg_flips;
103 	u64 pg_reuses;
104 };
105 
106 struct aq_ring_stats_tx_s {
107 	struct u64_stats_sync syncp;	/* must be first */
108 	u64 errors;
109 	u64 packets;
110 	u64 bytes;
111 	u64 queue_restarts;
112 };
113 
114 union aq_ring_stats_s {
115 	struct aq_ring_stats_rx_s rx;
116 	struct aq_ring_stats_tx_s tx;
117 };
118 
119 enum atl_ring_type {
120 	ATL_RING_TX,
121 	ATL_RING_RX,
122 };
123 
124 struct aq_ring_s {
125 	struct aq_ring_buff_s *buff_ring;
126 	u8 *dx_ring;		/* descriptors ring, dma shared mem */
127 	struct aq_nic_s *aq_nic;
128 	unsigned int idx;	/* for HW layer registers operations */
129 	unsigned int hw_head;
130 	unsigned int sw_head;
131 	unsigned int sw_tail;
132 	unsigned int size;	/* descriptors number */
133 	unsigned int dx_size;	/* TX or RX descriptor size,  */
134 				/* stored here for fater math */
135 	unsigned int page_order;
136 	union aq_ring_stats_s stats;
137 	dma_addr_t dx_ring_pa;
138 	enum atl_ring_type ring_type;
139 };
140 
141 struct aq_ring_param_s {
142 	unsigned int vec_idx;
143 	unsigned int cpu;
144 	cpumask_t affinity_mask;
145 };
146 
aq_buf_vaddr(struct aq_rxpage * rxpage)147 static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
148 {
149 	return page_to_virt(rxpage->page) + rxpage->pg_off;
150 }
151 
aq_buf_daddr(struct aq_rxpage * rxpage)152 static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
153 {
154 	return rxpage->daddr + rxpage->pg_off;
155 }
156 
aq_ring_next_dx(struct aq_ring_s * self,unsigned int dx)157 static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
158 					   unsigned int dx)
159 {
160 	return (++dx >= self->size) ? 0U : dx;
161 }
162 
aq_ring_avail_dx(struct aq_ring_s * self)163 static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
164 {
165 	return (((self->sw_tail >= self->sw_head)) ?
166 		(self->size - 1) - self->sw_tail + self->sw_head :
167 		self->sw_head - self->sw_tail - 1);
168 }
169 
170 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
171 				   struct aq_nic_s *aq_nic,
172 				   unsigned int idx,
173 				   struct aq_nic_cfg_s *aq_nic_cfg);
174 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
175 				   struct aq_nic_s *aq_nic,
176 				   unsigned int idx,
177 				   struct aq_nic_cfg_s *aq_nic_cfg);
178 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
179 void aq_ring_rx_deinit(struct aq_ring_s *self);
180 void aq_ring_free(struct aq_ring_s *self);
181 void aq_ring_update_queue_state(struct aq_ring_s *ring);
182 void aq_ring_queue_wake(struct aq_ring_s *ring);
183 void aq_ring_queue_stop(struct aq_ring_s *ring);
184 bool aq_ring_tx_clean(struct aq_ring_s *self);
185 int aq_ring_rx_clean(struct aq_ring_s *self,
186 		     struct napi_struct *napi,
187 		     int *work_done,
188 		     int budget);
189 int aq_ring_rx_fill(struct aq_ring_s *self);
190 
191 struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
192 		struct aq_nic_s *aq_nic, unsigned int idx,
193 		unsigned int size, unsigned int dx_size);
194 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
195 
196 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
197 
198 #endif /* AQ_RING_H */
199