1 /* SPDX-License-Identifier: ISC */
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #ifndef __MT76_H
7 #define __MT76_H
8 
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/spinlock.h>
12 #include <linux/skbuff.h>
13 #include <linux/leds.h>
14 #include <linux/usb.h>
15 #include <linux/average.h>
16 #include <net/mac80211.h>
17 #include "util.h"
18 #include "testmode.h"
19 
20 #define MT_MCU_RING_SIZE	32
21 #define MT_RX_BUF_SIZE		2048
22 #define MT_SKB_HEAD_LEN		128
23 
24 #define MT_MAX_NON_AQL_PKT	16
25 #define MT_TXQ_FREE_THR		32
26 
27 #define MT76_TOKEN_FREE_THR	64
28 
29 struct mt76_dev;
30 struct mt76_phy;
31 struct mt76_wcid;
32 struct mt76s_intr;
33 
34 struct mt76_reg_pair {
35 	u32 reg;
36 	u32 value;
37 };
38 
39 enum mt76_bus_type {
40 	MT76_BUS_MMIO,
41 	MT76_BUS_USB,
42 	MT76_BUS_SDIO,
43 };
44 
45 struct mt76_bus_ops {
46 	u32 (*rr)(struct mt76_dev *dev, u32 offset);
47 	void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
48 	u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
49 	void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
50 			   int len);
51 	void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
52 			  int len);
53 	int (*wr_rp)(struct mt76_dev *dev, u32 base,
54 		     const struct mt76_reg_pair *rp, int len);
55 	int (*rd_rp)(struct mt76_dev *dev, u32 base,
56 		     struct mt76_reg_pair *rp, int len);
57 	enum mt76_bus_type type;
58 };
59 
60 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
61 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
62 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
63 
64 enum mt76_txq_id {
65 	MT_TXQ_VO = IEEE80211_AC_VO,
66 	MT_TXQ_VI = IEEE80211_AC_VI,
67 	MT_TXQ_BE = IEEE80211_AC_BE,
68 	MT_TXQ_BK = IEEE80211_AC_BK,
69 	MT_TXQ_PSD,
70 	MT_TXQ_BEACON,
71 	MT_TXQ_CAB,
72 	__MT_TXQ_MAX
73 };
74 
75 enum mt76_mcuq_id {
76 	MT_MCUQ_WM,
77 	MT_MCUQ_WA,
78 	MT_MCUQ_FWDL,
79 	__MT_MCUQ_MAX
80 };
81 
82 enum mt76_rxq_id {
83 	MT_RXQ_MAIN,
84 	MT_RXQ_MCU,
85 	MT_RXQ_MCU_WA,
86 	MT_RXQ_EXT,
87 	MT_RXQ_EXT_WA,
88 	__MT_RXQ_MAX
89 };
90 
91 enum mt76_cipher_type {
92 	MT_CIPHER_NONE,
93 	MT_CIPHER_WEP40,
94 	MT_CIPHER_TKIP,
95 	MT_CIPHER_TKIP_NO_MIC,
96 	MT_CIPHER_AES_CCMP,
97 	MT_CIPHER_WEP104,
98 	MT_CIPHER_BIP_CMAC_128,
99 	MT_CIPHER_WEP128,
100 	MT_CIPHER_WAPI,
101 	MT_CIPHER_CCMP_CCX,
102 	MT_CIPHER_CCMP_256,
103 	MT_CIPHER_GCMP,
104 	MT_CIPHER_GCMP_256,
105 };
106 
107 struct mt76_queue_buf {
108 	dma_addr_t addr;
109 	u16 len;
110 	bool skip_unmap;
111 };
112 
113 struct mt76_tx_info {
114 	struct mt76_queue_buf buf[32];
115 	struct sk_buff *skb;
116 	int nbuf;
117 	u32 info;
118 };
119 
120 struct mt76_queue_entry {
121 	union {
122 		void *buf;
123 		struct sk_buff *skb;
124 	};
125 	union {
126 		struct mt76_txwi_cache *txwi;
127 		struct urb *urb;
128 		int buf_sz;
129 	};
130 	u32 dma_addr[2];
131 	u16 dma_len[2];
132 	u16 wcid;
133 	bool skip_buf0:1;
134 	bool skip_buf1:1;
135 	bool done:1;
136 };
137 
138 struct mt76_queue_regs {
139 	u32 desc_base;
140 	u32 ring_size;
141 	u32 cpu_idx;
142 	u32 dma_idx;
143 } __packed __aligned(4);
144 
145 struct mt76_queue {
146 	struct mt76_queue_regs __iomem *regs;
147 
148 	spinlock_t lock;
149 	spinlock_t cleanup_lock;
150 	struct mt76_queue_entry *entry;
151 	struct mt76_desc *desc;
152 
153 	u16 first;
154 	u16 head;
155 	u16 tail;
156 	int ndesc;
157 	int queued;
158 	int buf_size;
159 	bool stopped;
160 	bool blocked;
161 
162 	u8 buf_offset;
163 	u8 hw_idx;
164 	u8 qid;
165 
166 	dma_addr_t desc_dma;
167 	struct sk_buff *rx_head;
168 	struct page_frag_cache rx_page;
169 };
170 
171 struct mt76_mcu_ops {
172 	u32 headroom;
173 	u32 tailroom;
174 
175 	int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
176 			    int len, bool wait_resp);
177 	int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
178 				int cmd, int *seq);
179 	int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
180 				  struct sk_buff *skb, int seq);
181 	u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
182 	void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
183 	int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
184 			 const struct mt76_reg_pair *rp, int len);
185 	int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
186 			 struct mt76_reg_pair *rp, int len);
187 	int (*mcu_restart)(struct mt76_dev *dev);
188 };
189 
190 struct mt76_queue_ops {
191 	int (*init)(struct mt76_dev *dev,
192 		    int (*poll)(struct napi_struct *napi, int budget));
193 
194 	int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
195 		     int idx, int n_desc, int bufsize,
196 		     u32 ring_base);
197 
198 	int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
199 			    struct sk_buff *skb, struct mt76_wcid *wcid,
200 			    struct ieee80211_sta *sta);
201 
202 	int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
203 				struct sk_buff *skb, u32 tx_info);
204 
205 	void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
206 			 int *len, u32 *info, bool *more);
207 
208 	void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
209 
210 	void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
211 			   bool flush);
212 
213 	void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
214 
215 	void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
216 
217 	void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
218 };
219 
220 enum mt76_wcid_flags {
221 	MT_WCID_FLAG_CHECK_PS,
222 	MT_WCID_FLAG_PS,
223 	MT_WCID_FLAG_4ADDR,
224 	MT_WCID_FLAG_HDR_TRANS,
225 };
226 
227 #define MT76_N_WCIDS 288
228 
229 /* stored in ieee80211_tx_info::hw_queue */
230 #define MT_TX_HW_QUEUE_EXT_PHY		BIT(3)
231 
232 DECLARE_EWMA(signal, 10, 8);
233 
234 #define MT_WCID_TX_INFO_RATE		GENMASK(15, 0)
235 #define MT_WCID_TX_INFO_NSS		GENMASK(17, 16)
236 #define MT_WCID_TX_INFO_TXPWR_ADJ	GENMASK(25, 18)
237 #define MT_WCID_TX_INFO_SET		BIT(31)
238 
239 struct mt76_wcid {
240 	struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
241 
242 	atomic_t non_aql_packets;
243 	unsigned long flags;
244 
245 	struct ewma_signal rssi;
246 	int inactive_count;
247 
248 	struct rate_info rate;
249 
250 	u16 idx;
251 	u8 hw_key_idx;
252 	u8 hw_key_idx2;
253 
254 	u8 sta:1;
255 	u8 ext_phy:1;
256 	u8 amsdu:1;
257 
258 	u8 rx_check_pn;
259 	u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
260 	u16 cipher;
261 
262 	u32 tx_info;
263 	bool sw_iv;
264 
265 	struct list_head list;
266 	struct idr pktid;
267 };
268 
269 struct mt76_txq {
270 	struct mt76_wcid *wcid;
271 
272 	u16 agg_ssn;
273 	bool send_bar;
274 	bool aggr;
275 };
276 
277 struct mt76_txwi_cache {
278 	struct list_head list;
279 	dma_addr_t dma_addr;
280 
281 	struct sk_buff *skb;
282 };
283 
284 struct mt76_rx_tid {
285 	struct rcu_head rcu_head;
286 
287 	struct mt76_dev *dev;
288 
289 	spinlock_t lock;
290 	struct delayed_work reorder_work;
291 
292 	u16 head;
293 	u16 size;
294 	u16 nframes;
295 
296 	u8 num;
297 
298 	u8 started:1, stopped:1, timer_pending:1;
299 
300 	struct sk_buff *reorder_buf[];
301 };
302 
303 #define MT_TX_CB_DMA_DONE		BIT(0)
304 #define MT_TX_CB_TXS_DONE		BIT(1)
305 #define MT_TX_CB_TXS_FAILED		BIT(2)
306 
307 #define MT_PACKET_ID_MASK		GENMASK(6, 0)
308 #define MT_PACKET_ID_NO_ACK		0
309 #define MT_PACKET_ID_NO_SKB		1
310 #define MT_PACKET_ID_FIRST		2
311 #define MT_PACKET_ID_HAS_RATE		BIT(7)
312 /* This is timer for when to give up when waiting for TXS callback,
313  * with starting time being the time at which the DMA_DONE callback
314  * was seen (so, we know packet was processed then, it should not take
315  * long after that for firmware to send the TXS callback if it is going
316  * to do so.)
317  */
318 #define MT_TX_STATUS_SKB_TIMEOUT	(HZ / 4)
319 
320 struct mt76_tx_cb {
321 	unsigned long jiffies;
322 	u16 wcid;
323 	u8 pktid;
324 	u8 flags;
325 };
326 
327 enum {
328 	MT76_STATE_INITIALIZED,
329 	MT76_STATE_RUNNING,
330 	MT76_STATE_MCU_RUNNING,
331 	MT76_SCANNING,
332 	MT76_HW_SCANNING,
333 	MT76_HW_SCHED_SCANNING,
334 	MT76_RESTART,
335 	MT76_RESET,
336 	MT76_MCU_RESET,
337 	MT76_REMOVED,
338 	MT76_READING_STATS,
339 	MT76_STATE_POWER_OFF,
340 	MT76_STATE_SUSPEND,
341 	MT76_STATE_ROC,
342 	MT76_STATE_PM,
343 };
344 
345 struct mt76_hw_cap {
346 	bool has_2ghz;
347 	bool has_5ghz;
348 	bool has_6ghz;
349 };
350 
351 #define MT_DRV_TXWI_NO_FREE		BIT(0)
352 #define MT_DRV_TX_ALIGNED4_SKBS		BIT(1)
353 #define MT_DRV_SW_RX_AIRTIME		BIT(2)
354 #define MT_DRV_RX_DMA_HDR		BIT(3)
355 #define MT_DRV_HW_MGMT_TXQ		BIT(4)
356 
357 struct mt76_driver_ops {
358 	u32 drv_flags;
359 	u32 survey_flags;
360 	u16 txwi_size;
361 	u16 token_size;
362 	u8 mcs_rates;
363 
364 	void (*update_survey)(struct mt76_phy *phy);
365 
366 	int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
367 			      enum mt76_txq_id qid, struct mt76_wcid *wcid,
368 			      struct ieee80211_sta *sta,
369 			      struct mt76_tx_info *tx_info);
370 
371 	void (*tx_complete_skb)(struct mt76_dev *dev,
372 				struct mt76_queue_entry *e);
373 
374 	bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
375 
376 	void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
377 		       struct sk_buff *skb);
378 
379 	void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
380 
381 	void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
382 		       bool ps);
383 
384 	int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
385 		       struct ieee80211_sta *sta);
386 
387 	void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
388 			  struct ieee80211_sta *sta);
389 
390 	void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
391 			   struct ieee80211_sta *sta);
392 };
393 
394 struct mt76_channel_state {
395 	u64 cc_active;
396 	u64 cc_busy;
397 	u64 cc_rx;
398 	u64 cc_bss_rx;
399 	u64 cc_tx;
400 
401 	s8 noise;
402 };
403 
404 struct mt76_sband {
405 	struct ieee80211_supported_band sband;
406 	struct mt76_channel_state *chan;
407 };
408 
409 struct mt76_rate_power {
410 	union {
411 		struct {
412 			s8 cck[4];
413 			s8 ofdm[8];
414 			s8 stbc[10];
415 			s8 ht[16];
416 			s8 vht[10];
417 		};
418 		s8 all[48];
419 	};
420 };
421 
422 /* addr req mask */
423 #define MT_VEND_TYPE_EEPROM	BIT(31)
424 #define MT_VEND_TYPE_CFG	BIT(30)
425 #define MT_VEND_TYPE_MASK	(MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
426 
427 #define MT_VEND_ADDR(type, n)	(MT_VEND_TYPE_##type | (n))
428 enum mt_vendor_req {
429 	MT_VEND_DEV_MODE =	0x1,
430 	MT_VEND_WRITE =		0x2,
431 	MT_VEND_POWER_ON =	0x4,
432 	MT_VEND_MULTI_WRITE =	0x6,
433 	MT_VEND_MULTI_READ =	0x7,
434 	MT_VEND_READ_EEPROM =	0x9,
435 	MT_VEND_WRITE_FCE =	0x42,
436 	MT_VEND_WRITE_CFG =	0x46,
437 	MT_VEND_READ_CFG =	0x47,
438 	MT_VEND_READ_EXT =	0x63,
439 	MT_VEND_WRITE_EXT =	0x66,
440 	MT_VEND_FEATURE_SET =	0x91,
441 };
442 
443 enum mt76u_in_ep {
444 	MT_EP_IN_PKT_RX,
445 	MT_EP_IN_CMD_RESP,
446 	__MT_EP_IN_MAX,
447 };
448 
449 enum mt76u_out_ep {
450 	MT_EP_OUT_INBAND_CMD,
451 	MT_EP_OUT_AC_BE,
452 	MT_EP_OUT_AC_BK,
453 	MT_EP_OUT_AC_VI,
454 	MT_EP_OUT_AC_VO,
455 	MT_EP_OUT_HCCA,
456 	__MT_EP_OUT_MAX,
457 };
458 
459 struct mt76_mcu {
460 	struct mutex mutex;
461 	u32 msg_seq;
462 	int timeout;
463 
464 	struct sk_buff_head res_q;
465 	wait_queue_head_t wait;
466 };
467 
468 #define MT_TX_SG_MAX_SIZE	8
469 #define MT_RX_SG_MAX_SIZE	4
470 #define MT_NUM_TX_ENTRIES	256
471 #define MT_NUM_RX_ENTRIES	128
472 #define MCU_RESP_URB_SIZE	1024
473 struct mt76_usb {
474 	struct mutex usb_ctrl_mtx;
475 	u8 *data;
476 	u16 data_len;
477 
478 	struct mt76_worker status_worker;
479 	struct mt76_worker rx_worker;
480 
481 	struct work_struct stat_work;
482 
483 	u8 out_ep[__MT_EP_OUT_MAX];
484 	u8 in_ep[__MT_EP_IN_MAX];
485 	bool sg_en;
486 
487 	struct mt76u_mcu {
488 		u8 *data;
489 		/* multiple reads */
490 		struct mt76_reg_pair *rp;
491 		int rp_len;
492 		u32 base;
493 		bool burst;
494 	} mcu;
495 };
496 
497 #define MT76S_XMIT_BUF_SZ	(16 * PAGE_SIZE)
498 struct mt76_sdio {
499 	struct mt76_worker txrx_worker;
500 	struct mt76_worker status_worker;
501 	struct mt76_worker net_worker;
502 
503 	struct work_struct stat_work;
504 
505 	u8 *xmit_buf[IEEE80211_NUM_ACS + 2];
506 
507 	struct sdio_func *func;
508 	void *intr_data;
509 	u8 hw_ver;
510 	wait_queue_head_t wait;
511 
512 	struct {
513 		int pse_data_quota;
514 		int ple_data_quota;
515 		int pse_mcu_quota;
516 		int pse_page_size;
517 		int deficit;
518 	} sched;
519 
520 	int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr);
521 };
522 
523 struct mt76_mmio {
524 	void __iomem *regs;
525 	spinlock_t irq_lock;
526 	u32 irqmask;
527 };
528 
529 struct mt76_rx_status {
530 	union {
531 		struct mt76_wcid *wcid;
532 		u16 wcid_idx;
533 	};
534 
535 	u32 reorder_time;
536 
537 	u32 ampdu_ref;
538 	u32 timestamp;
539 
540 	u8 iv[6];
541 
542 	u8 ext_phy:1;
543 	u8 aggr:1;
544 	u8 qos_ctl;
545 	u16 seqno;
546 
547 	u16 freq;
548 	u32 flag;
549 	u8 enc_flags;
550 	u8 encoding:2, bw:3, he_ru:3;
551 	u8 he_gi:2, he_dcm:1;
552 	u8 amsdu:1, first_amsdu:1, last_amsdu:1;
553 	u8 rate_idx;
554 	u8 nss;
555 	u8 band;
556 	s8 signal;
557 	u8 chains;
558 	s8 chain_signal[IEEE80211_MAX_CHAINS];
559 };
560 
561 struct mt76_freq_range_power {
562 	const struct cfg80211_sar_freq_ranges *range;
563 	s8 power;
564 };
565 
566 struct mt76_testmode_ops {
567 	int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state);
568 	int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
569 			  enum mt76_testmode_state new_state);
570 	int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
571 };
572 
573 struct mt76_testmode_data {
574 	enum mt76_testmode_state state;
575 
576 	u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
577 	struct sk_buff *tx_skb;
578 
579 	u32 tx_count;
580 	u16 tx_mpdu_len;
581 
582 	u8 tx_rate_mode;
583 	u8 tx_rate_idx;
584 	u8 tx_rate_nss;
585 	u8 tx_rate_sgi;
586 	u8 tx_rate_ldpc;
587 	u8 tx_rate_stbc;
588 	u8 tx_ltf;
589 
590 	u8 tx_antenna_mask;
591 	u8 tx_spe_idx;
592 
593 	u8 tx_duty_cycle;
594 	u32 tx_time;
595 	u32 tx_ipg;
596 
597 	u32 freq_offset;
598 
599 	u8 tx_power[4];
600 	u8 tx_power_control;
601 
602 	u32 tx_pending;
603 	u32 tx_queued;
604 	u16 tx_queued_limit;
605 	u32 tx_done;
606 	struct {
607 		u64 packets[__MT_RXQ_MAX];
608 		u64 fcs_error[__MT_RXQ_MAX];
609 	} rx_stats;
610 };
611 
612 struct mt76_vif {
613 	u8 idx;
614 	u8 omac_idx;
615 	u8 band_idx;
616 	u8 wmm_idx;
617 	u8 scan_seq_num;
618 };
619 
620 struct mt76_phy {
621 	struct ieee80211_hw *hw;
622 	struct mt76_dev *dev;
623 	void *priv;
624 
625 	unsigned long state;
626 
627 	struct mt76_queue *q_tx[__MT_TXQ_MAX];
628 
629 	struct cfg80211_chan_def chandef;
630 	struct ieee80211_channel *main_chan;
631 
632 	struct mt76_channel_state *chan_state;
633 	ktime_t survey_time;
634 
635 	struct mt76_hw_cap cap;
636 	struct mt76_sband sband_2g;
637 	struct mt76_sband sband_5g;
638 	struct mt76_sband sband_6g;
639 
640 	u8 macaddr[ETH_ALEN];
641 
642 	int txpower_cur;
643 	u8 antenna_mask;
644 	u16 chainmask;
645 
646 #ifdef CONFIG_NL80211_TESTMODE
647 	struct mt76_testmode_data test;
648 #endif
649 
650 	struct delayed_work mac_work;
651 	u8 mac_work_count;
652 
653 	struct {
654 		struct sk_buff *head;
655 		struct sk_buff **tail;
656 		u16 seqno;
657 	} rx_amsdu[__MT_RXQ_MAX];
658 
659 	struct mt76_freq_range_power *frp;
660 };
661 
662 struct mt76_dev {
663 	struct mt76_phy phy; /* must be first */
664 
665 	struct mt76_phy *phy2;
666 
667 	struct ieee80211_hw *hw;
668 
669 	spinlock_t lock;
670 	spinlock_t cc_lock;
671 
672 	u32 cur_cc_bss_rx;
673 
674 	struct mt76_rx_status rx_ampdu_status;
675 	u32 rx_ampdu_len;
676 	u32 rx_ampdu_ref;
677 
678 	struct mutex mutex;
679 
680 	const struct mt76_bus_ops *bus;
681 	const struct mt76_driver_ops *drv;
682 	const struct mt76_mcu_ops *mcu_ops;
683 	struct device *dev;
684 
685 	struct mt76_mcu mcu;
686 
687 	struct net_device napi_dev;
688 	struct net_device tx_napi_dev;
689 	spinlock_t rx_lock;
690 	struct napi_struct napi[__MT_RXQ_MAX];
691 	struct sk_buff_head rx_skb[__MT_RXQ_MAX];
692 
693 	struct list_head txwi_cache;
694 	struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
695 	struct mt76_queue q_rx[__MT_RXQ_MAX];
696 	const struct mt76_queue_ops *queue_ops;
697 	int tx_dma_idx[4];
698 
699 	struct mt76_worker tx_worker;
700 	struct napi_struct tx_napi;
701 
702 	spinlock_t token_lock;
703 	struct idr token;
704 	int token_count;
705 
706 	wait_queue_head_t tx_wait;
707 	/* spinclock used to protect wcid pktid linked list */
708 	spinlock_t status_lock;
709 
710 	u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
711 	u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
712 
713 	u32 vif_mask;
714 
715 	struct mt76_wcid global_wcid;
716 	struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
717 	struct list_head wcid_list;
718 
719 	u32 rev;
720 
721 	u32 aggr_stats[32];
722 
723 	struct tasklet_struct pre_tbtt_tasklet;
724 	int beacon_int;
725 	u8 beacon_mask;
726 
727 	struct debugfs_blob_wrapper eeprom;
728 	struct debugfs_blob_wrapper otp;
729 
730 	struct mt76_rate_power rate_power;
731 
732 	char alpha2[3];
733 	enum nl80211_dfs_regions region;
734 
735 	u32 debugfs_reg;
736 
737 	struct led_classdev led_cdev;
738 	char led_name[32];
739 	bool led_al;
740 	u8 led_pin;
741 
742 	u8 csa_complete;
743 
744 	u32 rxfilter;
745 
746 #ifdef CONFIG_NL80211_TESTMODE
747 	const struct mt76_testmode_ops *test_ops;
748 	struct {
749 		const char *name;
750 		u32 offset;
751 	} test_mtd;
752 #endif
753 	struct workqueue_struct *wq;
754 
755 	union {
756 		struct mt76_mmio mmio;
757 		struct mt76_usb usb;
758 		struct mt76_sdio sdio;
759 	};
760 };
761 
762 struct mt76_power_limits {
763 	s8 cck[4];
764 	s8 ofdm[8];
765 	s8 mcs[4][10];
766 	s8 ru[7][12];
767 };
768 
769 enum mt76_phy_type {
770 	MT_PHY_TYPE_CCK,
771 	MT_PHY_TYPE_OFDM,
772 	MT_PHY_TYPE_HT,
773 	MT_PHY_TYPE_HT_GF,
774 	MT_PHY_TYPE_VHT,
775 	MT_PHY_TYPE_HE_SU = 8,
776 	MT_PHY_TYPE_HE_EXT_SU,
777 	MT_PHY_TYPE_HE_TB,
778 	MT_PHY_TYPE_HE_MU,
779 	__MT_PHY_TYPE_HE_MAX,
780 };
781 
782 struct mt76_sta_stats {
783 	u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
784 	u64 tx_bw[4];		/* 20, 40, 80, 160 */
785 	u64 tx_nss[4];		/* 1, 2, 3, 4 */
786 	u64 tx_mcs[16];		/* mcs idx */
787 };
788 
789 struct mt76_ethtool_worker_info {
790 	u64 *data;
791 	int idx;
792 	int initial_stat_idx;
793 	int worker_stat_count;
794 	int sta_count;
795 };
796 
797 #define CCK_RATE(_idx, _rate) {					\
798 	.bitrate = _rate,					\
799 	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
800 	.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx),		\
801 	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx),	\
802 }
803 
804 #define OFDM_RATE(_idx, _rate) {				\
805 	.bitrate = _rate,					\
806 	.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx),		\
807 	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx),	\
808 }
809 
810 extern struct ieee80211_rate mt76_rates[12];
811 extern const struct cfg80211_sar_capa mt76_sar_capa;
812 
813 #define __mt76_rr(dev, ...)	(dev)->bus->rr((dev), __VA_ARGS__)
814 #define __mt76_wr(dev, ...)	(dev)->bus->wr((dev), __VA_ARGS__)
815 #define __mt76_rmw(dev, ...)	(dev)->bus->rmw((dev), __VA_ARGS__)
816 #define __mt76_wr_copy(dev, ...)	(dev)->bus->write_copy((dev), __VA_ARGS__)
817 #define __mt76_rr_copy(dev, ...)	(dev)->bus->read_copy((dev), __VA_ARGS__)
818 
819 #define __mt76_set(dev, offset, val)	__mt76_rmw(dev, offset, 0, val)
820 #define __mt76_clear(dev, offset, val)	__mt76_rmw(dev, offset, val, 0)
821 
822 #define mt76_rr(dev, ...)	(dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
823 #define mt76_wr(dev, ...)	(dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
824 #define mt76_rmw(dev, ...)	(dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
825 #define mt76_wr_copy(dev, ...)	(dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
826 #define mt76_rr_copy(dev, ...)	(dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
827 #define mt76_wr_rp(dev, ...)	(dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
828 #define mt76_rd_rp(dev, ...)	(dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
829 
830 
831 #define mt76_mcu_restart(dev, ...)	(dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
832 #define __mt76_mcu_restart(dev, ...)	(dev)->mcu_ops->mcu_restart((dev))
833 
834 #define mt76_set(dev, offset, val)	mt76_rmw(dev, offset, 0, val)
835 #define mt76_clear(dev, offset, val)	mt76_rmw(dev, offset, val, 0)
836 
837 #define mt76_get_field(_dev, _reg, _field)		\
838 	FIELD_GET(_field, mt76_rr(dev, _reg))
839 
840 #define mt76_rmw_field(_dev, _reg, _field, _val)	\
841 	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
842 
843 #define __mt76_rmw_field(_dev, _reg, _field, _val)	\
844 	__mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
845 
846 #define mt76_hw(dev) (dev)->mphy.hw
847 
848 static inline struct ieee80211_hw *
mt76_wcid_hw(struct mt76_dev * dev,u16 wcid)849 mt76_wcid_hw(struct mt76_dev *dev, u16 wcid)
850 {
851 	if (wcid <= MT76_N_WCIDS &&
852 	    mt76_wcid_mask_test(dev->wcid_phy_mask, wcid))
853 		return dev->phy2->hw;
854 
855 	return dev->phy.hw;
856 }
857 
858 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
859 		 int timeout);
860 
861 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
862 
863 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
864 		      int timeout);
865 
866 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
867 
868 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
869 void mt76_pci_disable_aspm(struct pci_dev *pdev);
870 
mt76_chip(struct mt76_dev * dev)871 static inline u16 mt76_chip(struct mt76_dev *dev)
872 {
873 	return dev->rev >> 16;
874 }
875 
mt76_rev(struct mt76_dev * dev)876 static inline u16 mt76_rev(struct mt76_dev *dev)
877 {
878 	return dev->rev & 0xffff;
879 }
880 
881 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
882 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
883 
884 #define mt76_init_queues(dev, ...)		(dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
885 #define mt76_queue_alloc(dev, ...)	(dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
886 #define mt76_tx_queue_skb_raw(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
887 #define mt76_tx_queue_skb(dev, ...)	(dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
888 #define mt76_queue_rx_reset(dev, ...)	(dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
889 #define mt76_queue_tx_cleanup(dev, ...)	(dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
890 #define mt76_queue_rx_cleanup(dev, ...)	(dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
891 #define mt76_queue_kick(dev, ...)	(dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
892 #define mt76_queue_reset(dev, ...)	(dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
893 
894 #define mt76_for_each_q_rx(dev, i)	\
895 	for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
896 		    (dev)->q_rx[i].ndesc; i++)
897 
898 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
899 				   const struct ieee80211_ops *ops,
900 				   const struct mt76_driver_ops *drv_ops);
901 int mt76_register_device(struct mt76_dev *dev, bool vht,
902 			 struct ieee80211_rate *rates, int n_rates);
903 void mt76_unregister_device(struct mt76_dev *dev);
904 void mt76_free_device(struct mt76_dev *dev);
905 void mt76_unregister_phy(struct mt76_phy *phy);
906 
907 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
908 				const struct ieee80211_ops *ops);
909 int mt76_register_phy(struct mt76_phy *phy, bool vht,
910 		      struct ieee80211_rate *rates, int n_rates);
911 
912 struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy,
913 					  const struct file_operations *ops);
mt76_register_debugfs(struct mt76_dev * dev)914 static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
915 {
916 	return mt76_register_debugfs_fops(&dev->phy, NULL);
917 }
918 
919 int mt76_queues_read(struct seq_file *s, void *data);
920 void mt76_seq_puts_array(struct seq_file *file, const char *str,
921 			 s8 *val, int len);
922 
923 int mt76_eeprom_init(struct mt76_dev *dev, int len);
924 void mt76_eeprom_override(struct mt76_phy *phy);
925 int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
926 
927 struct mt76_queue *
928 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
929 		int ring_base);
930 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
mt76_init_tx_queue(struct mt76_phy * phy,int qid,int idx,int n_desc,int ring_base)931 static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
932 				     int n_desc, int ring_base)
933 {
934 	struct mt76_queue *q;
935 
936 	q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base);
937 	if (IS_ERR(q))
938 		return PTR_ERR(q);
939 
940 	q->qid = qid;
941 	phy->q_tx[qid] = q;
942 
943 	return 0;
944 }
945 
mt76_init_mcu_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base)946 static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
947 				      int n_desc, int ring_base)
948 {
949 	struct mt76_queue *q;
950 
951 	q = mt76_init_queue(dev, qid, idx, n_desc, ring_base);
952 	if (IS_ERR(q))
953 		return PTR_ERR(q);
954 
955 	q->qid = __MT_TXQ_MAX + qid;
956 	dev->q_mcu[qid] = q;
957 
958 	return 0;
959 }
960 
961 static inline struct mt76_phy *
mt76_dev_phy(struct mt76_dev * dev,bool phy_ext)962 mt76_dev_phy(struct mt76_dev *dev, bool phy_ext)
963 {
964 	if (phy_ext && dev->phy2)
965 		return dev->phy2;
966 	return &dev->phy;
967 }
968 
969 static inline struct ieee80211_hw *
mt76_phy_hw(struct mt76_dev * dev,bool phy_ext)970 mt76_phy_hw(struct mt76_dev *dev, bool phy_ext)
971 {
972 	return mt76_dev_phy(dev, phy_ext)->hw;
973 }
974 
975 static inline u8 *
mt76_get_txwi_ptr(struct mt76_dev * dev,struct mt76_txwi_cache * t)976 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
977 {
978 	return (u8 *)t - dev->drv->txwi_size;
979 }
980 
981 /* increment with wrap-around */
mt76_incr(int val,int size)982 static inline int mt76_incr(int val, int size)
983 {
984 	return (val + 1) & (size - 1);
985 }
986 
987 /* decrement with wrap-around */
mt76_decr(int val,int size)988 static inline int mt76_decr(int val, int size)
989 {
990 	return (val - 1) & (size - 1);
991 }
992 
993 u8 mt76_ac_to_hwq(u8 ac);
994 
995 static inline struct ieee80211_txq *
mtxq_to_txq(struct mt76_txq * mtxq)996 mtxq_to_txq(struct mt76_txq *mtxq)
997 {
998 	void *ptr = mtxq;
999 
1000 	return container_of(ptr, struct ieee80211_txq, drv_priv);
1001 }
1002 
1003 static inline struct ieee80211_sta *
wcid_to_sta(struct mt76_wcid * wcid)1004 wcid_to_sta(struct mt76_wcid *wcid)
1005 {
1006 	void *ptr = wcid;
1007 
1008 	if (!wcid || !wcid->sta)
1009 		return NULL;
1010 
1011 	return container_of(ptr, struct ieee80211_sta, drv_priv);
1012 }
1013 
mt76_tx_skb_cb(struct sk_buff * skb)1014 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
1015 {
1016 	BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
1017 		     sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
1018 	return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
1019 }
1020 
mt76_skb_get_hdr(struct sk_buff * skb)1021 static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
1022 {
1023 	struct mt76_rx_status mstat;
1024 	u8 *data = skb->data;
1025 
1026 	/* Alignment concerns */
1027 	BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
1028 	BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
1029 
1030 	mstat = *((struct mt76_rx_status *)skb->cb);
1031 
1032 	if (mstat.flag & RX_FLAG_RADIOTAP_HE)
1033 		data += sizeof(struct ieee80211_radiotap_he);
1034 	if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU)
1035 		data += sizeof(struct ieee80211_radiotap_he_mu);
1036 
1037 	return data;
1038 }
1039 
mt76_insert_hdr_pad(struct sk_buff * skb)1040 static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
1041 {
1042 	int len = ieee80211_get_hdrlen_from_skb(skb);
1043 
1044 	if (len % 4 == 0)
1045 		return;
1046 
1047 	skb_push(skb, 2);
1048 	memmove(skb->data, skb->data + 2, len);
1049 
1050 	skb->data[len] = 0;
1051 	skb->data[len + 1] = 0;
1052 }
1053 
mt76_is_skb_pktid(u8 pktid)1054 static inline bool mt76_is_skb_pktid(u8 pktid)
1055 {
1056 	if (pktid & MT_PACKET_ID_HAS_RATE)
1057 		return false;
1058 
1059 	return pktid >= MT_PACKET_ID_FIRST;
1060 }
1061 
mt76_tx_power_nss_delta(u8 nss)1062 static inline u8 mt76_tx_power_nss_delta(u8 nss)
1063 {
1064 	static const u8 nss_delta[4] = { 0, 6, 9, 12 };
1065 
1066 	return nss_delta[nss - 1];
1067 }
1068 
mt76_testmode_enabled(struct mt76_phy * phy)1069 static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
1070 {
1071 #ifdef CONFIG_NL80211_TESTMODE
1072 	return phy->test.state != MT76_TM_STATE_OFF;
1073 #else
1074 	return false;
1075 #endif
1076 }
1077 
mt76_is_testmode_skb(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw)1078 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
1079 					struct sk_buff *skb,
1080 					struct ieee80211_hw **hw)
1081 {
1082 #ifdef CONFIG_NL80211_TESTMODE
1083 	if (skb == dev->phy.test.tx_skb)
1084 		*hw = dev->phy.hw;
1085 	else if (dev->phy2 && skb == dev->phy2->test.tx_skb)
1086 		*hw = dev->phy2->hw;
1087 	else
1088 		return false;
1089 	return true;
1090 #else
1091 	return false;
1092 #endif
1093 }
1094 
1095 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
1096 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
1097 	     struct mt76_wcid *wcid, struct sk_buff *skb);
1098 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
1099 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
1100 			 bool send_bar);
1101 void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
1102 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
1103 void mt76_txq_schedule_all(struct mt76_phy *phy);
1104 void mt76_tx_worker_run(struct mt76_dev *dev);
1105 void mt76_tx_worker(struct mt76_worker *w);
1106 void mt76_release_buffered_frames(struct ieee80211_hw *hw,
1107 				  struct ieee80211_sta *sta,
1108 				  u16 tids, int nframes,
1109 				  enum ieee80211_frame_release_type reason,
1110 				  bool more_data);
1111 bool mt76_has_tx_pending(struct mt76_phy *phy);
1112 void mt76_set_channel(struct mt76_phy *phy);
1113 void mt76_update_survey(struct mt76_phy *phy);
1114 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
1115 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
1116 		    struct survey_info *survey);
1117 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
1118 
1119 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
1120 		       u16 ssn, u16 size);
1121 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
1122 
1123 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1124 			 struct ieee80211_key_conf *key);
1125 
1126 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
1127 			 __acquires(&dev->status_lock);
1128 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
1129 			   __releases(&dev->status_lock);
1130 
1131 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
1132 			   struct sk_buff *skb);
1133 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
1134 				       struct mt76_wcid *wcid, int pktid,
1135 				       struct sk_buff_head *list);
1136 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
1137 			     struct sk_buff_head *list);
1138 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
1139 			    struct list_head *free_list);
1140 static inline void
mt76_tx_complete_skb(struct mt76_dev * dev,u16 wcid,struct sk_buff * skb)1141 mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
1142 {
1143     __mt76_tx_complete_skb(dev, wcid, skb, NULL);
1144 }
1145 
1146 void mt76_tx_status_check(struct mt76_dev *dev, bool flush);
1147 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1148 		   struct ieee80211_sta *sta,
1149 		   enum ieee80211_sta_state old_state,
1150 		   enum ieee80211_sta_state new_state);
1151 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1152 		       struct ieee80211_sta *sta);
1153 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1154 			     struct ieee80211_sta *sta);
1155 
1156 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
1157 
1158 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1159 		     int *dbm);
1160 
1161 void mt76_csa_check(struct mt76_dev *dev);
1162 void mt76_csa_finish(struct mt76_dev *dev);
1163 
1164 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
1165 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
1166 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
1167 int mt76_get_rate(struct mt76_dev *dev,
1168 		  struct ieee80211_supported_band *sband,
1169 		  int idx, bool cck);
1170 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1171 		  const u8 *mac);
1172 void mt76_sw_scan_complete(struct ieee80211_hw *hw,
1173 			   struct ieee80211_vif *vif);
1174 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1175 		      void *data, int len);
1176 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
1177 		       struct netlink_callback *cb, void *data, int len);
1178 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
1179 int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
1180 
mt76_testmode_reset(struct mt76_phy * phy,bool disable)1181 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
1182 {
1183 #ifdef CONFIG_NL80211_TESTMODE
1184 	enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
1185 
1186 	if (disable || phy->test.state == MT76_TM_STATE_OFF)
1187 		state = MT76_TM_STATE_OFF;
1188 
1189 	mt76_testmode_set_state(phy, state);
1190 #endif
1191 }
1192 
1193 
1194 /* internal */
1195 static inline struct ieee80211_hw *
mt76_tx_status_get_hw(struct mt76_dev * dev,struct sk_buff * skb)1196 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
1197 {
1198 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1199 	struct ieee80211_hw *hw = dev->phy.hw;
1200 
1201 	if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2)
1202 		hw = dev->phy2->hw;
1203 
1204 	info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY;
1205 
1206 	return hw;
1207 }
1208 
1209 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
1210 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1211 		      struct napi_struct *napi);
1212 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1213 			   struct napi_struct *napi);
1214 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
1215 void mt76_testmode_tx_pending(struct mt76_phy *phy);
1216 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
1217 			    struct mt76_queue_entry *e);
1218 
1219 /* usb */
mt76u_urb_error(struct urb * urb)1220 static inline bool mt76u_urb_error(struct urb *urb)
1221 {
1222 	return urb->status &&
1223 	       urb->status != -ECONNRESET &&
1224 	       urb->status != -ESHUTDOWN &&
1225 	       urb->status != -ENOENT;
1226 }
1227 
1228 /* Map hardware queues to usb endpoints */
q2ep(u8 qid)1229 static inline u8 q2ep(u8 qid)
1230 {
1231 	/* TODO: take management packets to queue 5 */
1232 	return qid + 1;
1233 }
1234 
1235 static inline int
mt76u_bulk_msg(struct mt76_dev * dev,void * data,int len,int * actual_len,int timeout,int ep)1236 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
1237 	       int timeout, int ep)
1238 {
1239 	struct usb_interface *uintf = to_usb_interface(dev->dev);
1240 	struct usb_device *udev = interface_to_usbdev(uintf);
1241 	struct mt76_usb *usb = &dev->usb;
1242 	unsigned int pipe;
1243 
1244 	if (actual_len)
1245 		pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
1246 	else
1247 		pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
1248 
1249 	return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
1250 }
1251 
1252 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
1253 			 struct mt76_sta_stats *stats);
1254 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
1255 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
1256 			 u8 req_type, u16 val, u16 offset,
1257 			 void *buf, size_t len);
1258 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
1259 		     const u16 offset, const u32 val);
1260 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1261 	       bool ext);
1262 int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
1263 int mt76u_alloc_queues(struct mt76_dev *dev);
1264 void mt76u_stop_tx(struct mt76_dev *dev);
1265 void mt76u_stop_rx(struct mt76_dev *dev);
1266 int mt76u_resume_rx(struct mt76_dev *dev);
1267 void mt76u_queues_deinit(struct mt76_dev *dev);
1268 
1269 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
1270 	       const struct mt76_bus_ops *bus_ops);
1271 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
1272 int mt76s_alloc_tx(struct mt76_dev *dev);
1273 void mt76s_deinit(struct mt76_dev *dev);
1274 void mt76s_sdio_irq(struct sdio_func *func);
1275 void mt76s_txrx_worker(struct mt76_sdio *sdio);
1276 bool mt76s_txqs_empty(struct mt76_dev *dev);
1277 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func,
1278 		  int hw_ver);
1279 u32 mt76s_rr(struct mt76_dev *dev, u32 offset);
1280 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val);
1281 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
1282 u32 mt76s_read_pcr(struct mt76_dev *dev);
1283 void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
1284 		      const void *data, int len);
1285 void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
1286 		     void *data, int len);
1287 int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
1288 		const struct mt76_reg_pair *data,
1289 		int len);
1290 int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
1291 		struct mt76_reg_pair *data, int len);
1292 
1293 struct sk_buff *
1294 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
1295 		   int data_len);
1296 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
1297 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
1298 				      unsigned long expires);
1299 int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
1300 			      int len, bool wait_resp, struct sk_buff **ret);
1301 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
1302 				  int cmd, bool wait_resp, struct sk_buff **ret);
1303 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
1304 			     int len, int max_len);
1305 static inline int
mt76_mcu_send_firmware(struct mt76_dev * dev,int cmd,const void * data,int len)1306 mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
1307 		       int len)
1308 {
1309 	int max_len = 4096 - dev->mcu_ops->headroom;
1310 
1311 	return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len);
1312 }
1313 
1314 static inline int
mt76_mcu_send_msg(struct mt76_dev * dev,int cmd,const void * data,int len,bool wait_resp)1315 mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len,
1316 		  bool wait_resp)
1317 {
1318 	return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL);
1319 }
1320 
1321 static inline int
mt76_mcu_skb_send_msg(struct mt76_dev * dev,struct sk_buff * skb,int cmd,bool wait_resp)1322 mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
1323 		      bool wait_resp)
1324 {
1325 	return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL);
1326 }
1327 
1328 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
1329 
1330 s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
1331 			      struct ieee80211_channel *chan,
1332 			      struct mt76_power_limits *dest,
1333 			      s8 target_power);
1334 
1335 struct mt76_txwi_cache *
1336 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
1337 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
1338 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
1339 
mt76_set_tx_blocked(struct mt76_dev * dev,bool blocked)1340 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
1341 {
1342 	spin_lock_bh(&dev->token_lock);
1343 	__mt76_set_tx_blocked(dev, blocked);
1344 	spin_unlock_bh(&dev->token_lock);
1345 }
1346 
1347 static inline int
mt76_token_get(struct mt76_dev * dev,struct mt76_txwi_cache ** ptxwi)1348 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
1349 {
1350 	int token;
1351 
1352 	spin_lock_bh(&dev->token_lock);
1353 	token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
1354 			  GFP_ATOMIC);
1355 	spin_unlock_bh(&dev->token_lock);
1356 
1357 	return token;
1358 }
1359 
1360 static inline struct mt76_txwi_cache *
mt76_token_put(struct mt76_dev * dev,int token)1361 mt76_token_put(struct mt76_dev *dev, int token)
1362 {
1363 	struct mt76_txwi_cache *txwi;
1364 
1365 	spin_lock_bh(&dev->token_lock);
1366 	txwi = idr_remove(&dev->token, token);
1367 	spin_unlock_bh(&dev->token_lock);
1368 
1369 	return txwi;
1370 }
1371 
mt76_packet_id_init(struct mt76_wcid * wcid)1372 static inline void mt76_packet_id_init(struct mt76_wcid *wcid)
1373 {
1374 	INIT_LIST_HEAD(&wcid->list);
1375 	idr_init(&wcid->pktid);
1376 }
1377 
1378 static inline void
mt76_packet_id_flush(struct mt76_dev * dev,struct mt76_wcid * wcid)1379 mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
1380 {
1381 	struct sk_buff_head list;
1382 
1383 	mt76_tx_status_lock(dev, &list);
1384 	mt76_tx_status_skb_get(dev, wcid, -1, &list);
1385 	mt76_tx_status_unlock(dev, &list);
1386 
1387 	idr_destroy(&wcid->pktid);
1388 }
1389 
1390 #endif
1391