1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3
4 #include "mt76_connac.h"
5
mt76_connac_pm_wake(struct mt76_phy * phy,struct mt76_connac_pm * pm)6 int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
7 {
8 struct mt76_dev *dev = phy->dev;
9
10 if (!pm->enable)
11 return 0;
12
13 if (mt76_is_usb(dev))
14 return 0;
15
16 cancel_delayed_work_sync(&pm->ps_work);
17 if (!test_bit(MT76_STATE_PM, &phy->state))
18 return 0;
19
20 if (pm->suspended)
21 return 0;
22
23 queue_work(dev->wq, &pm->wake_work);
24 if (!wait_event_timeout(pm->wait,
25 !test_bit(MT76_STATE_PM, &phy->state),
26 3 * HZ)) {
27 ieee80211_wake_queues(phy->hw);
28 return -ETIMEDOUT;
29 }
30
31 return 0;
32 }
33 EXPORT_SYMBOL_GPL(mt76_connac_pm_wake);
34
mt76_connac_power_save_sched(struct mt76_phy * phy,struct mt76_connac_pm * pm)35 void mt76_connac_power_save_sched(struct mt76_phy *phy,
36 struct mt76_connac_pm *pm)
37 {
38 struct mt76_dev *dev = phy->dev;
39
40 if (mt76_is_usb(dev))
41 return;
42
43 if (!pm->enable)
44 return;
45
46 if (pm->suspended)
47 return;
48
49 pm->last_activity = jiffies;
50
51 if (!test_bit(MT76_STATE_PM, &phy->state)) {
52 cancel_delayed_work(&phy->mac_work);
53 queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout);
54 }
55 }
56 EXPORT_SYMBOL_GPL(mt76_connac_power_save_sched);
57
mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm * pm,struct mt76_wcid * wcid)58 void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
59 struct mt76_wcid *wcid)
60 {
61 int i;
62
63 spin_lock_bh(&pm->txq_lock);
64 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
65 if (wcid && pm->tx_q[i].wcid != wcid)
66 continue;
67
68 dev_kfree_skb(pm->tx_q[i].skb);
69 pm->tx_q[i].skb = NULL;
70 }
71 spin_unlock_bh(&pm->txq_lock);
72 }
73 EXPORT_SYMBOL_GPL(mt76_connac_free_pending_tx_skbs);
74
mt76_connac_pm_queue_skb(struct ieee80211_hw * hw,struct mt76_connac_pm * pm,struct mt76_wcid * wcid,struct sk_buff * skb)75 void mt76_connac_pm_queue_skb(struct ieee80211_hw *hw,
76 struct mt76_connac_pm *pm,
77 struct mt76_wcid *wcid,
78 struct sk_buff *skb)
79 {
80 int qid = skb_get_queue_mapping(skb);
81 struct mt76_phy *phy = hw->priv;
82
83 spin_lock_bh(&pm->txq_lock);
84 if (!pm->tx_q[qid].skb) {
85 ieee80211_stop_queues(hw);
86 pm->tx_q[qid].wcid = wcid;
87 pm->tx_q[qid].skb = skb;
88 queue_work(phy->dev->wq, &pm->wake_work);
89 } else {
90 dev_kfree_skb(skb);
91 }
92 spin_unlock_bh(&pm->txq_lock);
93 }
94 EXPORT_SYMBOL_GPL(mt76_connac_pm_queue_skb);
95
mt76_connac_pm_dequeue_skbs(struct mt76_phy * phy,struct mt76_connac_pm * pm)96 void mt76_connac_pm_dequeue_skbs(struct mt76_phy *phy,
97 struct mt76_connac_pm *pm)
98 {
99 int i;
100
101 spin_lock_bh(&pm->txq_lock);
102 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
103 struct mt76_wcid *wcid = pm->tx_q[i].wcid;
104 struct ieee80211_sta *sta = NULL;
105
106 if (!pm->tx_q[i].skb)
107 continue;
108
109 if (wcid && wcid->sta)
110 sta = container_of((void *)wcid, struct ieee80211_sta,
111 drv_priv);
112
113 mt76_tx(phy, sta, wcid, pm->tx_q[i].skb);
114 pm->tx_q[i].skb = NULL;
115 }
116 spin_unlock_bh(&pm->txq_lock);
117
118 mt76_worker_schedule(&phy->dev->tx_worker);
119 }
120 EXPORT_SYMBOL_GPL(mt76_connac_pm_dequeue_skbs);
121