1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Support PCI/PCIe on PowerNV platforms
4 *
5 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/pci.h>
10 #include <linux/delay.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/irq.h>
14 #include <linux/io.h>
15 #include <linux/msi.h>
16 #include <linux/iommu.h>
17 #include <linux/sched/mm.h>
18
19 #include <asm/sections.h>
20 #include <asm/io.h>
21 #include <asm/prom.h>
22 #include <asm/pci-bridge.h>
23 #include <asm/machdep.h>
24 #include <asm/msi_bitmap.h>
25 #include <asm/ppc-pci.h>
26 #include <asm/pnv-pci.h>
27 #include <asm/opal.h>
28 #include <asm/iommu.h>
29 #include <asm/tce.h>
30 #include <asm/firmware.h>
31 #include <asm/eeh_event.h>
32 #include <asm/eeh.h>
33
34 #include "powernv.h"
35 #include "pci.h"
36
37 static DEFINE_MUTEX(tunnel_mutex);
38
pnv_pci_get_slot_id(struct device_node * np,uint64_t * id)39 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
40 {
41 struct device_node *node = np;
42 u32 bdfn;
43 u64 phbid;
44 int ret;
45
46 ret = of_property_read_u32(np, "reg", &bdfn);
47 if (ret)
48 return -ENXIO;
49
50 bdfn = ((bdfn & 0x00ffff00) >> 8);
51 for (node = np; node; node = of_get_parent(node)) {
52 if (!PCI_DN(node)) {
53 of_node_put(node);
54 break;
55 }
56
57 if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
58 !of_device_is_compatible(node, "ibm,ioda3-phb") &&
59 !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
60 of_node_put(node);
61 continue;
62 }
63
64 ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
65 if (ret) {
66 of_node_put(node);
67 return -ENXIO;
68 }
69
70 if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
71 *id = PCI_PHB_SLOT_ID(phbid);
72 else
73 *id = PCI_SLOT_ID(phbid, bdfn);
74 return 0;
75 }
76
77 return -ENODEV;
78 }
79 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
80
pnv_pci_get_device_tree(uint32_t phandle,void * buf,uint64_t len)81 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
82 {
83 int64_t rc;
84
85 if (!opal_check_token(OPAL_GET_DEVICE_TREE))
86 return -ENXIO;
87
88 rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
89 if (rc < OPAL_SUCCESS)
90 return -EIO;
91
92 return rc;
93 }
94 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
95
pnv_pci_get_presence_state(uint64_t id,uint8_t * state)96 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
97 {
98 int64_t rc;
99
100 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
101 return -ENXIO;
102
103 rc = opal_pci_get_presence_state(id, (uint64_t)state);
104 if (rc != OPAL_SUCCESS)
105 return -EIO;
106
107 return 0;
108 }
109 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
110
pnv_pci_get_power_state(uint64_t id,uint8_t * state)111 int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
112 {
113 int64_t rc;
114
115 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
116 return -ENXIO;
117
118 rc = opal_pci_get_power_state(id, (uint64_t)state);
119 if (rc != OPAL_SUCCESS)
120 return -EIO;
121
122 return 0;
123 }
124 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
125
pnv_pci_set_power_state(uint64_t id,uint8_t state,struct opal_msg * msg)126 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
127 {
128 struct opal_msg m;
129 int token, ret;
130 int64_t rc;
131
132 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
133 return -ENXIO;
134
135 token = opal_async_get_token_interruptible();
136 if (unlikely(token < 0))
137 return token;
138
139 rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
140 if (rc == OPAL_SUCCESS) {
141 ret = 0;
142 goto exit;
143 } else if (rc != OPAL_ASYNC_COMPLETION) {
144 ret = -EIO;
145 goto exit;
146 }
147
148 ret = opal_async_wait_response(token, &m);
149 if (ret < 0)
150 goto exit;
151
152 if (msg) {
153 ret = 1;
154 memcpy(msg, &m, sizeof(m));
155 }
156
157 exit:
158 opal_async_release_token(token);
159 return ret;
160 }
161 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
162
163 /* Nicely print the contents of the PE State Tables (PEST). */
pnv_pci_dump_pest(__be64 pestA[],__be64 pestB[],int pest_size)164 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
165 {
166 __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
167 bool dup = false;
168 int i;
169
170 for (i = 0; i < pest_size; i++) {
171 __be64 peA = be64_to_cpu(pestA[i]);
172 __be64 peB = be64_to_cpu(pestB[i]);
173
174 if (peA != prevA || peB != prevB) {
175 if (dup) {
176 pr_info("PE[..%03x] A/B: as above\n", i-1);
177 dup = false;
178 }
179 prevA = peA;
180 prevB = peB;
181 if (peA & PNV_IODA_STOPPED_STATE ||
182 peB & PNV_IODA_STOPPED_STATE)
183 pr_info("PE[%03x] A/B: %016llx %016llx\n",
184 i, peA, peB);
185 } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
186 peB & PNV_IODA_STOPPED_STATE)) {
187 dup = true;
188 }
189 }
190 }
191
pnv_pci_dump_p7ioc_diag_data(struct pci_controller * hose,struct OpalIoPhbErrorCommon * common)192 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
193 struct OpalIoPhbErrorCommon *common)
194 {
195 struct OpalIoP7IOCPhbErrorData *data;
196
197 data = (struct OpalIoP7IOCPhbErrorData *)common;
198 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
199 hose->global_number, be32_to_cpu(common->version));
200
201 if (data->brdgCtl)
202 pr_info("brdgCtl: %08x\n",
203 be32_to_cpu(data->brdgCtl));
204 if (data->portStatusReg || data->rootCmplxStatus ||
205 data->busAgentStatus)
206 pr_info("UtlSts: %08x %08x %08x\n",
207 be32_to_cpu(data->portStatusReg),
208 be32_to_cpu(data->rootCmplxStatus),
209 be32_to_cpu(data->busAgentStatus));
210 if (data->deviceStatus || data->slotStatus ||
211 data->linkStatus || data->devCmdStatus ||
212 data->devSecStatus)
213 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
214 be32_to_cpu(data->deviceStatus),
215 be32_to_cpu(data->slotStatus),
216 be32_to_cpu(data->linkStatus),
217 be32_to_cpu(data->devCmdStatus),
218 be32_to_cpu(data->devSecStatus));
219 if (data->rootErrorStatus || data->uncorrErrorStatus ||
220 data->corrErrorStatus)
221 pr_info("RootErrSts: %08x %08x %08x\n",
222 be32_to_cpu(data->rootErrorStatus),
223 be32_to_cpu(data->uncorrErrorStatus),
224 be32_to_cpu(data->corrErrorStatus));
225 if (data->tlpHdr1 || data->tlpHdr2 ||
226 data->tlpHdr3 || data->tlpHdr4)
227 pr_info("RootErrLog: %08x %08x %08x %08x\n",
228 be32_to_cpu(data->tlpHdr1),
229 be32_to_cpu(data->tlpHdr2),
230 be32_to_cpu(data->tlpHdr3),
231 be32_to_cpu(data->tlpHdr4));
232 if (data->sourceId || data->errorClass ||
233 data->correlator)
234 pr_info("RootErrLog1: %08x %016llx %016llx\n",
235 be32_to_cpu(data->sourceId),
236 be64_to_cpu(data->errorClass),
237 be64_to_cpu(data->correlator));
238 if (data->p7iocPlssr || data->p7iocCsr)
239 pr_info("PhbSts: %016llx %016llx\n",
240 be64_to_cpu(data->p7iocPlssr),
241 be64_to_cpu(data->p7iocCsr));
242 if (data->lemFir)
243 pr_info("Lem: %016llx %016llx %016llx\n",
244 be64_to_cpu(data->lemFir),
245 be64_to_cpu(data->lemErrorMask),
246 be64_to_cpu(data->lemWOF));
247 if (data->phbErrorStatus)
248 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
249 be64_to_cpu(data->phbErrorStatus),
250 be64_to_cpu(data->phbFirstErrorStatus),
251 be64_to_cpu(data->phbErrorLog0),
252 be64_to_cpu(data->phbErrorLog1));
253 if (data->mmioErrorStatus)
254 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
255 be64_to_cpu(data->mmioErrorStatus),
256 be64_to_cpu(data->mmioFirstErrorStatus),
257 be64_to_cpu(data->mmioErrorLog0),
258 be64_to_cpu(data->mmioErrorLog1));
259 if (data->dma0ErrorStatus)
260 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
261 be64_to_cpu(data->dma0ErrorStatus),
262 be64_to_cpu(data->dma0FirstErrorStatus),
263 be64_to_cpu(data->dma0ErrorLog0),
264 be64_to_cpu(data->dma0ErrorLog1));
265 if (data->dma1ErrorStatus)
266 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
267 be64_to_cpu(data->dma1ErrorStatus),
268 be64_to_cpu(data->dma1FirstErrorStatus),
269 be64_to_cpu(data->dma1ErrorLog0),
270 be64_to_cpu(data->dma1ErrorLog1));
271
272 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
273 }
274
pnv_pci_dump_phb3_diag_data(struct pci_controller * hose,struct OpalIoPhbErrorCommon * common)275 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
276 struct OpalIoPhbErrorCommon *common)
277 {
278 struct OpalIoPhb3ErrorData *data;
279
280 data = (struct OpalIoPhb3ErrorData*)common;
281 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
282 hose->global_number, be32_to_cpu(common->version));
283 if (data->brdgCtl)
284 pr_info("brdgCtl: %08x\n",
285 be32_to_cpu(data->brdgCtl));
286 if (data->portStatusReg || data->rootCmplxStatus ||
287 data->busAgentStatus)
288 pr_info("UtlSts: %08x %08x %08x\n",
289 be32_to_cpu(data->portStatusReg),
290 be32_to_cpu(data->rootCmplxStatus),
291 be32_to_cpu(data->busAgentStatus));
292 if (data->deviceStatus || data->slotStatus ||
293 data->linkStatus || data->devCmdStatus ||
294 data->devSecStatus)
295 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
296 be32_to_cpu(data->deviceStatus),
297 be32_to_cpu(data->slotStatus),
298 be32_to_cpu(data->linkStatus),
299 be32_to_cpu(data->devCmdStatus),
300 be32_to_cpu(data->devSecStatus));
301 if (data->rootErrorStatus || data->uncorrErrorStatus ||
302 data->corrErrorStatus)
303 pr_info("RootErrSts: %08x %08x %08x\n",
304 be32_to_cpu(data->rootErrorStatus),
305 be32_to_cpu(data->uncorrErrorStatus),
306 be32_to_cpu(data->corrErrorStatus));
307 if (data->tlpHdr1 || data->tlpHdr2 ||
308 data->tlpHdr3 || data->tlpHdr4)
309 pr_info("RootErrLog: %08x %08x %08x %08x\n",
310 be32_to_cpu(data->tlpHdr1),
311 be32_to_cpu(data->tlpHdr2),
312 be32_to_cpu(data->tlpHdr3),
313 be32_to_cpu(data->tlpHdr4));
314 if (data->sourceId || data->errorClass ||
315 data->correlator)
316 pr_info("RootErrLog1: %08x %016llx %016llx\n",
317 be32_to_cpu(data->sourceId),
318 be64_to_cpu(data->errorClass),
319 be64_to_cpu(data->correlator));
320 if (data->nFir)
321 pr_info("nFir: %016llx %016llx %016llx\n",
322 be64_to_cpu(data->nFir),
323 be64_to_cpu(data->nFirMask),
324 be64_to_cpu(data->nFirWOF));
325 if (data->phbPlssr || data->phbCsr)
326 pr_info("PhbSts: %016llx %016llx\n",
327 be64_to_cpu(data->phbPlssr),
328 be64_to_cpu(data->phbCsr));
329 if (data->lemFir)
330 pr_info("Lem: %016llx %016llx %016llx\n",
331 be64_to_cpu(data->lemFir),
332 be64_to_cpu(data->lemErrorMask),
333 be64_to_cpu(data->lemWOF));
334 if (data->phbErrorStatus)
335 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
336 be64_to_cpu(data->phbErrorStatus),
337 be64_to_cpu(data->phbFirstErrorStatus),
338 be64_to_cpu(data->phbErrorLog0),
339 be64_to_cpu(data->phbErrorLog1));
340 if (data->mmioErrorStatus)
341 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
342 be64_to_cpu(data->mmioErrorStatus),
343 be64_to_cpu(data->mmioFirstErrorStatus),
344 be64_to_cpu(data->mmioErrorLog0),
345 be64_to_cpu(data->mmioErrorLog1));
346 if (data->dma0ErrorStatus)
347 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
348 be64_to_cpu(data->dma0ErrorStatus),
349 be64_to_cpu(data->dma0FirstErrorStatus),
350 be64_to_cpu(data->dma0ErrorLog0),
351 be64_to_cpu(data->dma0ErrorLog1));
352 if (data->dma1ErrorStatus)
353 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
354 be64_to_cpu(data->dma1ErrorStatus),
355 be64_to_cpu(data->dma1FirstErrorStatus),
356 be64_to_cpu(data->dma1ErrorLog0),
357 be64_to_cpu(data->dma1ErrorLog1));
358
359 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
360 }
361
pnv_pci_dump_phb4_diag_data(struct pci_controller * hose,struct OpalIoPhbErrorCommon * common)362 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
363 struct OpalIoPhbErrorCommon *common)
364 {
365 struct OpalIoPhb4ErrorData *data;
366
367 data = (struct OpalIoPhb4ErrorData*)common;
368 pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
369 hose->global_number, be32_to_cpu(common->version));
370 if (data->brdgCtl)
371 pr_info("brdgCtl: %08x\n",
372 be32_to_cpu(data->brdgCtl));
373 if (data->deviceStatus || data->slotStatus ||
374 data->linkStatus || data->devCmdStatus ||
375 data->devSecStatus)
376 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
377 be32_to_cpu(data->deviceStatus),
378 be32_to_cpu(data->slotStatus),
379 be32_to_cpu(data->linkStatus),
380 be32_to_cpu(data->devCmdStatus),
381 be32_to_cpu(data->devSecStatus));
382 if (data->rootErrorStatus || data->uncorrErrorStatus ||
383 data->corrErrorStatus)
384 pr_info("RootErrSts: %08x %08x %08x\n",
385 be32_to_cpu(data->rootErrorStatus),
386 be32_to_cpu(data->uncorrErrorStatus),
387 be32_to_cpu(data->corrErrorStatus));
388 if (data->tlpHdr1 || data->tlpHdr2 ||
389 data->tlpHdr3 || data->tlpHdr4)
390 pr_info("RootErrLog: %08x %08x %08x %08x\n",
391 be32_to_cpu(data->tlpHdr1),
392 be32_to_cpu(data->tlpHdr2),
393 be32_to_cpu(data->tlpHdr3),
394 be32_to_cpu(data->tlpHdr4));
395 if (data->sourceId)
396 pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId));
397 if (data->nFir)
398 pr_info("nFir: %016llx %016llx %016llx\n",
399 be64_to_cpu(data->nFir),
400 be64_to_cpu(data->nFirMask),
401 be64_to_cpu(data->nFirWOF));
402 if (data->phbPlssr || data->phbCsr)
403 pr_info("PhbSts: %016llx %016llx\n",
404 be64_to_cpu(data->phbPlssr),
405 be64_to_cpu(data->phbCsr));
406 if (data->lemFir)
407 pr_info("Lem: %016llx %016llx %016llx\n",
408 be64_to_cpu(data->lemFir),
409 be64_to_cpu(data->lemErrorMask),
410 be64_to_cpu(data->lemWOF));
411 if (data->phbErrorStatus)
412 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
413 be64_to_cpu(data->phbErrorStatus),
414 be64_to_cpu(data->phbFirstErrorStatus),
415 be64_to_cpu(data->phbErrorLog0),
416 be64_to_cpu(data->phbErrorLog1));
417 if (data->phbTxeErrorStatus)
418 pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n",
419 be64_to_cpu(data->phbTxeErrorStatus),
420 be64_to_cpu(data->phbTxeFirstErrorStatus),
421 be64_to_cpu(data->phbTxeErrorLog0),
422 be64_to_cpu(data->phbTxeErrorLog1));
423 if (data->phbRxeArbErrorStatus)
424 pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n",
425 be64_to_cpu(data->phbRxeArbErrorStatus),
426 be64_to_cpu(data->phbRxeArbFirstErrorStatus),
427 be64_to_cpu(data->phbRxeArbErrorLog0),
428 be64_to_cpu(data->phbRxeArbErrorLog1));
429 if (data->phbRxeMrgErrorStatus)
430 pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n",
431 be64_to_cpu(data->phbRxeMrgErrorStatus),
432 be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
433 be64_to_cpu(data->phbRxeMrgErrorLog0),
434 be64_to_cpu(data->phbRxeMrgErrorLog1));
435 if (data->phbRxeTceErrorStatus)
436 pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n",
437 be64_to_cpu(data->phbRxeTceErrorStatus),
438 be64_to_cpu(data->phbRxeTceFirstErrorStatus),
439 be64_to_cpu(data->phbRxeTceErrorLog0),
440 be64_to_cpu(data->phbRxeTceErrorLog1));
441
442 if (data->phbPblErrorStatus)
443 pr_info("PblErr: %016llx %016llx %016llx %016llx\n",
444 be64_to_cpu(data->phbPblErrorStatus),
445 be64_to_cpu(data->phbPblFirstErrorStatus),
446 be64_to_cpu(data->phbPblErrorLog0),
447 be64_to_cpu(data->phbPblErrorLog1));
448 if (data->phbPcieDlpErrorStatus)
449 pr_info("PcieDlp: %016llx %016llx %016llx\n",
450 be64_to_cpu(data->phbPcieDlpErrorLog1),
451 be64_to_cpu(data->phbPcieDlpErrorLog2),
452 be64_to_cpu(data->phbPcieDlpErrorStatus));
453 if (data->phbRegbErrorStatus)
454 pr_info("RegbErr: %016llx %016llx %016llx %016llx\n",
455 be64_to_cpu(data->phbRegbErrorStatus),
456 be64_to_cpu(data->phbRegbFirstErrorStatus),
457 be64_to_cpu(data->phbRegbErrorLog0),
458 be64_to_cpu(data->phbRegbErrorLog1));
459
460
461 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
462 }
463
pnv_pci_dump_phb_diag_data(struct pci_controller * hose,unsigned char * log_buff)464 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
465 unsigned char *log_buff)
466 {
467 struct OpalIoPhbErrorCommon *common;
468
469 if (!hose || !log_buff)
470 return;
471
472 common = (struct OpalIoPhbErrorCommon *)log_buff;
473 switch (be32_to_cpu(common->ioType)) {
474 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
475 pnv_pci_dump_p7ioc_diag_data(hose, common);
476 break;
477 case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
478 pnv_pci_dump_phb3_diag_data(hose, common);
479 break;
480 case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
481 pnv_pci_dump_phb4_diag_data(hose, common);
482 break;
483 default:
484 pr_warn("%s: Unrecognized ioType %d\n",
485 __func__, be32_to_cpu(common->ioType));
486 }
487 }
488
pnv_pci_handle_eeh_config(struct pnv_phb * phb,u32 pe_no)489 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
490 {
491 unsigned long flags, rc;
492 int has_diag, ret = 0;
493
494 spin_lock_irqsave(&phb->lock, flags);
495
496 /* Fetch PHB diag-data */
497 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
498 phb->diag_data_size);
499 has_diag = (rc == OPAL_SUCCESS);
500
501 /* If PHB supports compound PE, to handle it */
502 if (phb->unfreeze_pe) {
503 ret = phb->unfreeze_pe(phb,
504 pe_no,
505 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
506 } else {
507 rc = opal_pci_eeh_freeze_clear(phb->opal_id,
508 pe_no,
509 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
510 if (rc) {
511 pr_warn("%s: Failure %ld clearing frozen "
512 "PHB#%x-PE#%x\n",
513 __func__, rc, phb->hose->global_number,
514 pe_no);
515 ret = -EIO;
516 }
517 }
518
519 /*
520 * For now, let's only display the diag buffer when we fail to clear
521 * the EEH status. We'll do more sensible things later when we have
522 * proper EEH support. We need to make sure we don't pollute ourselves
523 * with the normal errors generated when probing empty slots
524 */
525 if (has_diag && ret)
526 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
527
528 spin_unlock_irqrestore(&phb->lock, flags);
529 }
530
pnv_pci_config_check_eeh(struct pci_dn * pdn)531 static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
532 {
533 struct pnv_phb *phb = pdn->phb->private_data;
534 u8 fstate = 0;
535 __be16 pcierr = 0;
536 unsigned int pe_no;
537 s64 rc;
538
539 /*
540 * Get the PE#. During the PCI probe stage, we might not
541 * setup that yet. So all ER errors should be mapped to
542 * reserved PE.
543 */
544 pe_no = pdn->pe_number;
545 if (pe_no == IODA_INVALID_PE) {
546 pe_no = phb->ioda.reserved_pe_idx;
547 }
548
549 /*
550 * Fetch frozen state. If the PHB support compound PE,
551 * we need handle that case.
552 */
553 if (phb->get_pe_state) {
554 fstate = phb->get_pe_state(phb, pe_no);
555 } else {
556 rc = opal_pci_eeh_freeze_status(phb->opal_id,
557 pe_no,
558 &fstate,
559 &pcierr,
560 NULL);
561 if (rc) {
562 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
563 __func__, rc, phb->hose->global_number, pe_no);
564 return;
565 }
566 }
567
568 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
569 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
570
571 /* Clear the frozen state if applicable */
572 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
573 fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
574 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
575 /*
576 * If PHB supports compound PE, freeze it for
577 * consistency.
578 */
579 if (phb->freeze_pe)
580 phb->freeze_pe(phb, pe_no);
581
582 pnv_pci_handle_eeh_config(phb, pe_no);
583 }
584 }
585
pnv_pci_cfg_read(struct pci_dn * pdn,int where,int size,u32 * val)586 int pnv_pci_cfg_read(struct pci_dn *pdn,
587 int where, int size, u32 *val)
588 {
589 struct pnv_phb *phb = pdn->phb->private_data;
590 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
591 s64 rc;
592
593 switch (size) {
594 case 1: {
595 u8 v8;
596 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
597 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
598 break;
599 }
600 case 2: {
601 __be16 v16;
602 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
603 &v16);
604 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
605 break;
606 }
607 case 4: {
608 __be32 v32;
609 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
610 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
611 break;
612 }
613 default:
614 return PCIBIOS_FUNC_NOT_SUPPORTED;
615 }
616
617 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
618 __func__, pdn->busno, pdn->devfn, where, size, *val);
619 return PCIBIOS_SUCCESSFUL;
620 }
621
pnv_pci_cfg_write(struct pci_dn * pdn,int where,int size,u32 val)622 int pnv_pci_cfg_write(struct pci_dn *pdn,
623 int where, int size, u32 val)
624 {
625 struct pnv_phb *phb = pdn->phb->private_data;
626 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
627
628 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
629 __func__, pdn->busno, pdn->devfn, where, size, val);
630 switch (size) {
631 case 1:
632 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
633 break;
634 case 2:
635 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
636 break;
637 case 4:
638 opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
639 break;
640 default:
641 return PCIBIOS_FUNC_NOT_SUPPORTED;
642 }
643
644 return PCIBIOS_SUCCESSFUL;
645 }
646
647 #ifdef CONFIG_EEH
pnv_pci_cfg_check(struct pci_dn * pdn)648 static bool pnv_pci_cfg_check(struct pci_dn *pdn)
649 {
650 struct eeh_dev *edev = NULL;
651 struct pnv_phb *phb = pdn->phb->private_data;
652
653 /* EEH not enabled ? */
654 if (!(phb->flags & PNV_PHB_FLAG_EEH))
655 return true;
656
657 /* PE reset or device removed ? */
658 edev = pdn->edev;
659 if (edev) {
660 if (edev->pe &&
661 (edev->pe->state & EEH_PE_CFG_BLOCKED))
662 return false;
663
664 if (edev->mode & EEH_DEV_REMOVED)
665 return false;
666 }
667
668 return true;
669 }
670 #else
pnv_pci_cfg_check(struct pci_dn * pdn)671 static inline pnv_pci_cfg_check(struct pci_dn *pdn)
672 {
673 return true;
674 }
675 #endif /* CONFIG_EEH */
676
pnv_pci_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)677 static int pnv_pci_read_config(struct pci_bus *bus,
678 unsigned int devfn,
679 int where, int size, u32 *val)
680 {
681 struct pci_dn *pdn;
682 struct pnv_phb *phb;
683 int ret;
684
685 *val = 0xFFFFFFFF;
686 pdn = pci_get_pdn_by_devfn(bus, devfn);
687 if (!pdn)
688 return PCIBIOS_DEVICE_NOT_FOUND;
689
690 if (!pnv_pci_cfg_check(pdn))
691 return PCIBIOS_DEVICE_NOT_FOUND;
692
693 ret = pnv_pci_cfg_read(pdn, where, size, val);
694 phb = pdn->phb->private_data;
695 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
696 if (*val == EEH_IO_ERROR_VALUE(size) &&
697 eeh_dev_check_failure(pdn->edev))
698 return PCIBIOS_DEVICE_NOT_FOUND;
699 } else {
700 pnv_pci_config_check_eeh(pdn);
701 }
702
703 return ret;
704 }
705
pnv_pci_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)706 static int pnv_pci_write_config(struct pci_bus *bus,
707 unsigned int devfn,
708 int where, int size, u32 val)
709 {
710 struct pci_dn *pdn;
711 struct pnv_phb *phb;
712 int ret;
713
714 pdn = pci_get_pdn_by_devfn(bus, devfn);
715 if (!pdn)
716 return PCIBIOS_DEVICE_NOT_FOUND;
717
718 if (!pnv_pci_cfg_check(pdn))
719 return PCIBIOS_DEVICE_NOT_FOUND;
720
721 ret = pnv_pci_cfg_write(pdn, where, size, val);
722 phb = pdn->phb->private_data;
723 if (!(phb->flags & PNV_PHB_FLAG_EEH))
724 pnv_pci_config_check_eeh(pdn);
725
726 return ret;
727 }
728
729 struct pci_ops pnv_pci_ops = {
730 .read = pnv_pci_read_config,
731 .write = pnv_pci_write_config,
732 };
733
pnv_pci_table_alloc(int nid)734 struct iommu_table *pnv_pci_table_alloc(int nid)
735 {
736 struct iommu_table *tbl;
737
738 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
739 if (!tbl)
740 return NULL;
741
742 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
743 kref_init(&tbl->it_kref);
744
745 return tbl;
746 }
747
pnv_pci_get_phb_node(struct pci_dev * dev)748 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
749 {
750 struct pci_controller *hose = pci_bus_to_host(dev->bus);
751
752 return of_node_get(hose->dn);
753 }
754 EXPORT_SYMBOL(pnv_pci_get_phb_node);
755
pnv_pci_set_tunnel_bar(struct pci_dev * dev,u64 addr,int enable)756 int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
757 {
758 struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
759 u64 tunnel_bar;
760 __be64 val;
761 int rc;
762
763 if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
764 return -ENXIO;
765 if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
766 return -ENXIO;
767
768 mutex_lock(&tunnel_mutex);
769 rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
770 if (rc != OPAL_SUCCESS) {
771 rc = -EIO;
772 goto out;
773 }
774 tunnel_bar = be64_to_cpu(val);
775 if (enable) {
776 /*
777 * Only one device per PHB can use atomics.
778 * Our policy is first-come, first-served.
779 */
780 if (tunnel_bar) {
781 if (tunnel_bar != addr)
782 rc = -EBUSY;
783 else
784 rc = 0; /* Setting same address twice is ok */
785 goto out;
786 }
787 } else {
788 /*
789 * The device that owns atomics and wants to release
790 * them must pass the same address with enable == 0.
791 */
792 if (tunnel_bar != addr) {
793 rc = -EPERM;
794 goto out;
795 }
796 addr = 0x0ULL;
797 }
798 rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
799 rc = opal_error_code(rc);
800 out:
801 mutex_unlock(&tunnel_mutex);
802 return rc;
803 }
804 EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
805
pnv_pci_shutdown(void)806 void pnv_pci_shutdown(void)
807 {
808 struct pci_controller *hose;
809
810 list_for_each_entry(hose, &hose_list, list_node)
811 if (hose->controller_ops.shutdown)
812 hose->controller_ops.shutdown(hose);
813 }
814
815 /* Fixup wrong class code in p7ioc and p8 root complex */
pnv_p7ioc_rc_quirk(struct pci_dev * dev)816 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
817 {
818 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
819 }
820 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
821
pnv_pci_init(void)822 void __init pnv_pci_init(void)
823 {
824 struct device_node *np;
825
826 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
827
828 /* If we don't have OPAL, eg. in sim, just skip PCI probe */
829 if (!firmware_has_feature(FW_FEATURE_OPAL))
830 return;
831
832 #ifdef CONFIG_PCIEPORTBUS
833 /*
834 * On PowerNV PCIe devices are (currently) managed in cooperation
835 * with firmware. This isn't *strictly* required, but there's enough
836 * assumptions baked into both firmware and the platform code that
837 * it's unwise to allow the portbus services to be used.
838 *
839 * We need to fix this eventually, but for now set this flag to disable
840 * the portbus driver. The AER service isn't required since that AER
841 * events are handled via EEH. The pciehp hotplug driver can't work
842 * without kernel changes (and portbus binding breaks pnv_php). The
843 * other services also require some thinking about how we're going
844 * to integrate them.
845 */
846 pcie_ports_disabled = true;
847 #endif
848
849 /* Look for IODA IO-Hubs. */
850 for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
851 pnv_pci_init_ioda_hub(np);
852 }
853
854 /* Look for ioda2 built-in PHB3's */
855 for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
856 pnv_pci_init_ioda2_phb(np);
857
858 /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
859 for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
860 pnv_pci_init_ioda2_phb(np);
861
862 /* Look for NPU2 OpenCAPI PHBs */
863 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
864 pnv_pci_init_npu2_opencapi_phb(np);
865
866 /* Configure IOMMU DMA hooks */
867 set_pci_dma_ops(&dma_iommu_ops);
868 }
869
pnv_tce_iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)870 static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
871 unsigned long action, void *data)
872 {
873 struct device *dev = data;
874
875 switch (action) {
876 case BUS_NOTIFY_DEL_DEVICE:
877 iommu_del_device(dev);
878 return 0;
879 default:
880 return 0;
881 }
882 }
883
884 static struct notifier_block pnv_tce_iommu_bus_nb = {
885 .notifier_call = pnv_tce_iommu_bus_notifier,
886 };
887
pnv_tce_iommu_bus_notifier_init(void)888 static int __init pnv_tce_iommu_bus_notifier_init(void)
889 {
890 bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
891 return 0;
892 }
893 machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
894