1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
5  */
6 
7 #ifndef __MT76_UTIL_H
8 #define __MT76_UTIL_H
9 
10 #include <linux/skbuff.h>
11 #include <linux/bitops.h>
12 #include <linux/bitfield.h>
13 #include <net/mac80211.h>
14 
15 struct mt76_worker
16 {
17 	struct task_struct *task;
18 	void (*fn)(struct mt76_worker *);
19 	unsigned long state;
20 };
21 
22 enum {
23 	MT76_WORKER_SCHEDULED,
24 	MT76_WORKER_RUNNING,
25 };
26 
27 #define MT76_INCR(_var, _size) \
28 	(_var = (((_var) + 1) % (_size)))
29 
30 int mt76_wcid_alloc(u32 *mask, int size);
31 
32 static inline bool
mt76_wcid_mask_test(u32 * mask,int idx)33 mt76_wcid_mask_test(u32 *mask, int idx)
34 {
35 	return mask[idx / 32] & BIT(idx % 32);
36 }
37 
38 static inline void
mt76_wcid_mask_set(u32 * mask,int idx)39 mt76_wcid_mask_set(u32 *mask, int idx)
40 {
41 	mask[idx / 32] |= BIT(idx % 32);
42 }
43 
44 static inline void
mt76_wcid_mask_clear(u32 * mask,int idx)45 mt76_wcid_mask_clear(u32 *mask, int idx)
46 {
47 	mask[idx / 32] &= ~BIT(idx % 32);
48 }
49 
50 static inline void
mt76_skb_set_moredata(struct sk_buff * skb,bool enable)51 mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
52 {
53 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
54 
55 	if (enable)
56 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
57 	else
58 		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
59 }
60 
61 int __mt76_worker_fn(void *ptr);
62 
63 static inline int
mt76_worker_setup(struct ieee80211_hw * hw,struct mt76_worker * w,void (* fn)(struct mt76_worker *),const char * name)64 mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w,
65 		  void (*fn)(struct mt76_worker *),
66 		  const char *name)
67 {
68 	const char *dev_name = wiphy_name(hw->wiphy);
69 	int ret;
70 
71 	if (fn)
72 		w->fn = fn;
73 	w->task = kthread_run(__mt76_worker_fn, w,
74 			      "mt76-%s %s", name, dev_name);
75 
76 	if (IS_ERR(w->task)) {
77 		ret = PTR_ERR(w->task);
78 		w->task = NULL;
79 		return ret;
80 	}
81 
82 	return 0;
83 }
84 
mt76_worker_schedule(struct mt76_worker * w)85 static inline void mt76_worker_schedule(struct mt76_worker *w)
86 {
87 	if (!w->task)
88 		return;
89 
90 	if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) &&
91 	    !test_bit(MT76_WORKER_RUNNING, &w->state))
92 		wake_up_process(w->task);
93 }
94 
mt76_worker_disable(struct mt76_worker * w)95 static inline void mt76_worker_disable(struct mt76_worker *w)
96 {
97 	if (!w->task)
98 		return;
99 
100 	kthread_park(w->task);
101 	WRITE_ONCE(w->state, 0);
102 }
103 
mt76_worker_enable(struct mt76_worker * w)104 static inline void mt76_worker_enable(struct mt76_worker *w)
105 {
106 	if (!w->task)
107 		return;
108 
109 	kthread_unpark(w->task);
110 	mt76_worker_schedule(w);
111 }
112 
mt76_worker_teardown(struct mt76_worker * w)113 static inline void mt76_worker_teardown(struct mt76_worker *w)
114 {
115 	if (!w->task)
116 		return;
117 
118 	kthread_stop(w->task);
119 	w->task = NULL;
120 }
121 
122 #endif
123