1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
11 #include "idxd.h"
12 #include "registers.h"
13
14 enum irq_work_type {
15 IRQ_WORK_NORMAL = 0,
16 IRQ_WORK_PROCESS_FAULT,
17 };
18
19 struct idxd_fault {
20 struct work_struct work;
21 u64 addr;
22 struct idxd_device *idxd;
23 };
24
idxd_device_reinit(struct work_struct * work)25 static void idxd_device_reinit(struct work_struct *work)
26 {
27 struct idxd_device *idxd = container_of(work, struct idxd_device, work);
28 struct device *dev = &idxd->pdev->dev;
29 int rc, i;
30
31 idxd_device_reset(idxd);
32 rc = idxd_device_config(idxd);
33 if (rc < 0)
34 goto out;
35
36 rc = idxd_device_enable(idxd);
37 if (rc < 0)
38 goto out;
39
40 for (i = 0; i < idxd->max_wqs; i++) {
41 struct idxd_wq *wq = idxd->wqs[i];
42
43 if (wq->state == IDXD_WQ_ENABLED) {
44 rc = idxd_wq_enable(wq);
45 if (rc < 0) {
46 dev_warn(dev, "Unable to re-enable wq %s\n",
47 dev_name(wq_confdev(wq)));
48 }
49 }
50 }
51
52 return;
53
54 out:
55 idxd_device_clear_state(idxd);
56 }
57
process_misc_interrupts(struct idxd_device * idxd,u32 cause)58 static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
59 {
60 struct device *dev = &idxd->pdev->dev;
61 union gensts_reg gensts;
62 u32 val = 0;
63 int i;
64 bool err = false;
65
66 if (cause & IDXD_INTC_HALT_STATE)
67 goto halt;
68
69 if (cause & IDXD_INTC_ERR) {
70 spin_lock(&idxd->dev_lock);
71 for (i = 0; i < 4; i++)
72 idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
73 IDXD_SWERR_OFFSET + i * sizeof(u64));
74
75 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
76 idxd->reg_base + IDXD_SWERR_OFFSET);
77
78 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
79 int id = idxd->sw_err.wq_idx;
80 struct idxd_wq *wq = idxd->wqs[id];
81
82 if (wq->type == IDXD_WQT_USER)
83 wake_up_interruptible(&wq->err_queue);
84 } else {
85 int i;
86
87 for (i = 0; i < idxd->max_wqs; i++) {
88 struct idxd_wq *wq = idxd->wqs[i];
89
90 if (wq->type == IDXD_WQT_USER)
91 wake_up_interruptible(&wq->err_queue);
92 }
93 }
94
95 spin_unlock(&idxd->dev_lock);
96 val |= IDXD_INTC_ERR;
97
98 for (i = 0; i < 4; i++)
99 dev_warn(dev, "err[%d]: %#16.16llx\n",
100 i, idxd->sw_err.bits[i]);
101 err = true;
102 }
103
104 if (cause & IDXD_INTC_CMD) {
105 val |= IDXD_INTC_CMD;
106 complete(idxd->cmd_done);
107 }
108
109 if (cause & IDXD_INTC_OCCUPY) {
110 /* Driver does not utilize occupancy interrupt */
111 val |= IDXD_INTC_OCCUPY;
112 }
113
114 if (cause & IDXD_INTC_PERFMON_OVFL) {
115 val |= IDXD_INTC_PERFMON_OVFL;
116 perfmon_counter_overflow(idxd);
117 }
118
119 val ^= cause;
120 if (val)
121 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
122 val);
123
124 if (!err)
125 return 0;
126
127 halt:
128 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
129 if (gensts.state == IDXD_DEVICE_STATE_HALT) {
130 idxd->state = IDXD_DEV_HALTED;
131 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
132 /*
133 * If we need a software reset, we will throw the work
134 * on a system workqueue in order to allow interrupts
135 * for the device command completions.
136 */
137 INIT_WORK(&idxd->work, idxd_device_reinit);
138 queue_work(idxd->wq, &idxd->work);
139 } else {
140 idxd->state = IDXD_DEV_HALTED;
141 idxd_wqs_quiesce(idxd);
142 idxd_wqs_unmap_portal(idxd);
143 spin_lock(&idxd->dev_lock);
144 idxd_device_clear_state(idxd);
145 dev_err(&idxd->pdev->dev,
146 "idxd halted, need %s.\n",
147 gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
148 "FLR" : "system reset");
149 spin_unlock(&idxd->dev_lock);
150 return -ENXIO;
151 }
152 }
153
154 return 0;
155 }
156
idxd_misc_thread(int vec,void * data)157 irqreturn_t idxd_misc_thread(int vec, void *data)
158 {
159 struct idxd_irq_entry *irq_entry = data;
160 struct idxd_device *idxd = irq_entry->idxd;
161 int rc;
162 u32 cause;
163
164 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
165 if (cause)
166 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
167
168 while (cause) {
169 rc = process_misc_interrupts(idxd, cause);
170 if (rc < 0)
171 break;
172 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
173 if (cause)
174 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
175 }
176
177 return IRQ_HANDLED;
178 }
179
irq_process_pending_llist(struct idxd_irq_entry * irq_entry)180 static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
181 {
182 struct idxd_desc *desc, *t;
183 struct llist_node *head;
184
185 head = llist_del_all(&irq_entry->pending_llist);
186 if (!head)
187 return;
188
189 llist_for_each_entry_safe(desc, t, head, llnode) {
190 u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
191
192 if (status) {
193 /*
194 * Check against the original status as ABORT is software defined
195 * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
196 */
197 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
198 complete_desc(desc, IDXD_COMPLETE_ABORT);
199 continue;
200 }
201
202 complete_desc(desc, IDXD_COMPLETE_NORMAL);
203 } else {
204 spin_lock(&irq_entry->list_lock);
205 list_add_tail(&desc->list,
206 &irq_entry->work_list);
207 spin_unlock(&irq_entry->list_lock);
208 }
209 }
210 }
211
irq_process_work_list(struct idxd_irq_entry * irq_entry)212 static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
213 {
214 LIST_HEAD(flist);
215 struct idxd_desc *desc, *n;
216
217 /*
218 * This lock protects list corruption from access of list outside of the irq handler
219 * thread.
220 */
221 spin_lock(&irq_entry->list_lock);
222 if (list_empty(&irq_entry->work_list)) {
223 spin_unlock(&irq_entry->list_lock);
224 return;
225 }
226
227 list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
228 if (desc->completion->status) {
229 list_move_tail(&desc->list, &flist);
230 }
231 }
232
233 spin_unlock(&irq_entry->list_lock);
234
235 list_for_each_entry(desc, &flist, list) {
236 /*
237 * Check against the original status as ABORT is software defined
238 * and 0xff, which DSA_COMP_STATUS_MASK can mask out.
239 */
240 if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
241 complete_desc(desc, IDXD_COMPLETE_ABORT);
242 continue;
243 }
244
245 complete_desc(desc, IDXD_COMPLETE_NORMAL);
246 }
247 }
248
idxd_wq_thread(int irq,void * data)249 irqreturn_t idxd_wq_thread(int irq, void *data)
250 {
251 struct idxd_irq_entry *irq_entry = data;
252
253 /*
254 * There are two lists we are processing. The pending_llist is where
255 * submmiter adds all the submitted descriptor after sending it to
256 * the workqueue. It's a lockless singly linked list. The work_list
257 * is the common linux double linked list. We are in a scenario of
258 * multiple producers and a single consumer. The producers are all
259 * the kernel submitters of descriptors, and the consumer is the
260 * kernel irq handler thread for the msix vector when using threaded
261 * irq. To work with the restrictions of llist to remain lockless,
262 * we are doing the following steps:
263 * 1. Iterate through the work_list and process any completed
264 * descriptor. Delete the completed entries during iteration.
265 * 2. llist_del_all() from the pending list.
266 * 3. Iterate through the llist that was deleted from the pending list
267 * and process the completed entries.
268 * 4. If the entry is still waiting on hardware, list_add_tail() to
269 * the work_list.
270 */
271 irq_process_work_list(irq_entry);
272 irq_process_pending_llist(irq_entry);
273
274 return IRQ_HANDLED;
275 }
276