1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 *
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
7 *
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9 */
10
11 #include <linux/acpi.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/dmi.h>
15 #include <linux/init.h>
16 #include <linux/msi.h>
17 #include <linux/of.h>
18 #include <linux/pci.h>
19 #include <linux/pm.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/log2.h>
25 #include <linux/logic_pio.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/interrupt.h>
28 #include <linux/device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pci_hotplug.h>
31 #include <linux/vmalloc.h>
32 #include <asm/dma.h>
33 #include <linux/aer.h>
34 #include <linux/bitfield.h>
35 #include "pci.h"
36
37 DEFINE_MUTEX(pci_slot_mutex);
38
39 const char *pci_power_names[] = {
40 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41 };
42 EXPORT_SYMBOL_GPL(pci_power_names);
43
44 int isa_dma_bridge_buggy;
45 EXPORT_SYMBOL(isa_dma_bridge_buggy);
46
47 int pci_pci_problems;
48 EXPORT_SYMBOL(pci_pci_problems);
49
50 unsigned int pci_pm_d3hot_delay;
51
52 static void pci_pme_list_scan(struct work_struct *work);
53
54 static LIST_HEAD(pci_pme_list);
55 static DEFINE_MUTEX(pci_pme_list_mutex);
56 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57
58 struct pci_pme_device {
59 struct list_head list;
60 struct pci_dev *dev;
61 };
62
63 #define PME_TIMEOUT 1000 /* How long between PME checks */
64
pci_dev_d3_sleep(struct pci_dev * dev)65 static void pci_dev_d3_sleep(struct pci_dev *dev)
66 {
67 unsigned int delay = dev->d3hot_delay;
68
69 if (delay < pci_pm_d3hot_delay)
70 delay = pci_pm_d3hot_delay;
71
72 if (delay)
73 msleep(delay);
74 }
75
pci_reset_supported(struct pci_dev * dev)76 bool pci_reset_supported(struct pci_dev *dev)
77 {
78 return dev->reset_methods[0] != 0;
79 }
80
81 #ifdef CONFIG_PCI_DOMAINS
82 int pci_domains_supported = 1;
83 #endif
84
85 #define DEFAULT_CARDBUS_IO_SIZE (256)
86 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
87 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
88 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
89 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
90
91 #define DEFAULT_HOTPLUG_IO_SIZE (256)
92 #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
93 #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
94 /* hpiosize=nn can override this */
95 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
96 /*
97 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
98 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
99 * pci=hpmemsize=nnM overrides both
100 */
101 unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
102 unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
103
104 #define DEFAULT_HOTPLUG_BUS_SIZE 1
105 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
106
107
108 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
109 #ifdef CONFIG_PCIE_BUS_TUNE_OFF
110 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
111 #elif defined CONFIG_PCIE_BUS_SAFE
112 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
113 #elif defined CONFIG_PCIE_BUS_PERFORMANCE
114 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
115 #elif defined CONFIG_PCIE_BUS_PEER2PEER
116 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
117 #else
118 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
119 #endif
120
121 /*
122 * The default CLS is used if arch didn't set CLS explicitly and not
123 * all pci devices agree on the same value. Arch can override either
124 * the dfl or actual value as it sees fit. Don't forget this is
125 * measured in 32-bit words, not bytes.
126 */
127 u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
128 u8 pci_cache_line_size;
129
130 /*
131 * If we set up a device for bus mastering, we need to check the latency
132 * timer as certain BIOSes forget to set it properly.
133 */
134 unsigned int pcibios_max_latency = 255;
135
136 /* If set, the PCIe ARI capability will not be used. */
137 static bool pcie_ari_disabled;
138
139 /* If set, the PCIe ATS capability will not be used. */
140 static bool pcie_ats_disabled;
141
142 /* If set, the PCI config space of each device is printed during boot. */
143 bool pci_early_dump;
144
pci_ats_disabled(void)145 bool pci_ats_disabled(void)
146 {
147 return pcie_ats_disabled;
148 }
149 EXPORT_SYMBOL_GPL(pci_ats_disabled);
150
151 /* Disable bridge_d3 for all PCIe ports */
152 static bool pci_bridge_d3_disable;
153 /* Force bridge_d3 for all PCIe ports */
154 static bool pci_bridge_d3_force;
155
pcie_port_pm_setup(char * str)156 static int __init pcie_port_pm_setup(char *str)
157 {
158 if (!strcmp(str, "off"))
159 pci_bridge_d3_disable = true;
160 else if (!strcmp(str, "force"))
161 pci_bridge_d3_force = true;
162 return 1;
163 }
164 __setup("pcie_port_pm=", pcie_port_pm_setup);
165
166 /* Time to wait after a reset for device to become responsive */
167 #define PCIE_RESET_READY_POLL_MS 60000
168
169 /**
170 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
171 * @bus: pointer to PCI bus structure to search
172 *
173 * Given a PCI bus, returns the highest PCI bus number present in the set
174 * including the given PCI bus and its list of child PCI buses.
175 */
pci_bus_max_busnr(struct pci_bus * bus)176 unsigned char pci_bus_max_busnr(struct pci_bus *bus)
177 {
178 struct pci_bus *tmp;
179 unsigned char max, n;
180
181 max = bus->busn_res.end;
182 list_for_each_entry(tmp, &bus->children, node) {
183 n = pci_bus_max_busnr(tmp);
184 if (n > max)
185 max = n;
186 }
187 return max;
188 }
189 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
190
191 /**
192 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
193 * @pdev: the PCI device
194 *
195 * Returns error bits set in PCI_STATUS and clears them.
196 */
pci_status_get_and_clear_errors(struct pci_dev * pdev)197 int pci_status_get_and_clear_errors(struct pci_dev *pdev)
198 {
199 u16 status;
200 int ret;
201
202 ret = pci_read_config_word(pdev, PCI_STATUS, &status);
203 if (ret != PCIBIOS_SUCCESSFUL)
204 return -EIO;
205
206 status &= PCI_STATUS_ERROR_BITS;
207 if (status)
208 pci_write_config_word(pdev, PCI_STATUS, status);
209
210 return status;
211 }
212 EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
213
214 #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)215 static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
216 bool write_combine)
217 {
218 struct resource *res = &pdev->resource[bar];
219 resource_size_t start = res->start;
220 resource_size_t size = resource_size(res);
221
222 /*
223 * Make sure the BAR is actually a memory resource, not an IO resource
224 */
225 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
226 pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
227 return NULL;
228 }
229
230 if (write_combine)
231 return ioremap_wc(start, size);
232
233 return ioremap(start, size);
234 }
235
pci_ioremap_bar(struct pci_dev * pdev,int bar)236 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
237 {
238 return __pci_ioremap_resource(pdev, bar, false);
239 }
240 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
241
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)242 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
243 {
244 return __pci_ioremap_resource(pdev, bar, true);
245 }
246 EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
247 #endif
248
249 /**
250 * pci_dev_str_match_path - test if a path string matches a device
251 * @dev: the PCI device to test
252 * @path: string to match the device against
253 * @endptr: pointer to the string after the match
254 *
255 * Test if a string (typically from a kernel parameter) formatted as a
256 * path of device/function addresses matches a PCI device. The string must
257 * be of the form:
258 *
259 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
260 *
261 * A path for a device can be obtained using 'lspci -t'. Using a path
262 * is more robust against bus renumbering than using only a single bus,
263 * device and function address.
264 *
265 * Returns 1 if the string matches the device, 0 if it does not and
266 * a negative error code if it fails to parse the string.
267 */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)268 static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
269 const char **endptr)
270 {
271 int ret;
272 unsigned int seg, bus, slot, func;
273 char *wpath, *p;
274 char end;
275
276 *endptr = strchrnul(path, ';');
277
278 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
279 if (!wpath)
280 return -ENOMEM;
281
282 while (1) {
283 p = strrchr(wpath, '/');
284 if (!p)
285 break;
286 ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
287 if (ret != 2) {
288 ret = -EINVAL;
289 goto free_and_exit;
290 }
291
292 if (dev->devfn != PCI_DEVFN(slot, func)) {
293 ret = 0;
294 goto free_and_exit;
295 }
296
297 /*
298 * Note: we don't need to get a reference to the upstream
299 * bridge because we hold a reference to the top level
300 * device which should hold a reference to the bridge,
301 * and so on.
302 */
303 dev = pci_upstream_bridge(dev);
304 if (!dev) {
305 ret = 0;
306 goto free_and_exit;
307 }
308
309 *p = 0;
310 }
311
312 ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
313 &func, &end);
314 if (ret != 4) {
315 seg = 0;
316 ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
317 if (ret != 3) {
318 ret = -EINVAL;
319 goto free_and_exit;
320 }
321 }
322
323 ret = (seg == pci_domain_nr(dev->bus) &&
324 bus == dev->bus->number &&
325 dev->devfn == PCI_DEVFN(slot, func));
326
327 free_and_exit:
328 kfree(wpath);
329 return ret;
330 }
331
332 /**
333 * pci_dev_str_match - test if a string matches a device
334 * @dev: the PCI device to test
335 * @p: string to match the device against
336 * @endptr: pointer to the string after the match
337 *
338 * Test if a string (typically from a kernel parameter) matches a specified
339 * PCI device. The string may be of one of the following formats:
340 *
341 * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
342 * pci:<vendor>:<device>[:<subvendor>:<subdevice>]
343 *
344 * The first format specifies a PCI bus/device/function address which
345 * may change if new hardware is inserted, if motherboard firmware changes,
346 * or due to changes caused in kernel parameters. If the domain is
347 * left unspecified, it is taken to be 0. In order to be robust against
348 * bus renumbering issues, a path of PCI device/function numbers may be used
349 * to address the specific device. The path for a device can be determined
350 * through the use of 'lspci -t'.
351 *
352 * The second format matches devices using IDs in the configuration
353 * space which may match multiple devices in the system. A value of 0
354 * for any field will match all devices. (Note: this differs from
355 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
356 * legacy reasons and convenience so users don't have to specify
357 * FFFFFFFFs on the command line.)
358 *
359 * Returns 1 if the string matches the device, 0 if it does not and
360 * a negative error code if the string cannot be parsed.
361 */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)362 static int pci_dev_str_match(struct pci_dev *dev, const char *p,
363 const char **endptr)
364 {
365 int ret;
366 int count;
367 unsigned short vendor, device, subsystem_vendor, subsystem_device;
368
369 if (strncmp(p, "pci:", 4) == 0) {
370 /* PCI vendor/device (subvendor/subdevice) IDs are specified */
371 p += 4;
372 ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
373 &subsystem_vendor, &subsystem_device, &count);
374 if (ret != 4) {
375 ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
376 if (ret != 2)
377 return -EINVAL;
378
379 subsystem_vendor = 0;
380 subsystem_device = 0;
381 }
382
383 p += count;
384
385 if ((!vendor || vendor == dev->vendor) &&
386 (!device || device == dev->device) &&
387 (!subsystem_vendor ||
388 subsystem_vendor == dev->subsystem_vendor) &&
389 (!subsystem_device ||
390 subsystem_device == dev->subsystem_device))
391 goto found;
392 } else {
393 /*
394 * PCI Bus, Device, Function IDs are specified
395 * (optionally, may include a path of devfns following it)
396 */
397 ret = pci_dev_str_match_path(dev, p, &p);
398 if (ret < 0)
399 return ret;
400 else if (ret)
401 goto found;
402 }
403
404 *endptr = p;
405 return 0;
406
407 found:
408 *endptr = p;
409 return 1;
410 }
411
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)412 static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
413 u8 pos, int cap, int *ttl)
414 {
415 u8 id;
416 u16 ent;
417
418 pci_bus_read_config_byte(bus, devfn, pos, &pos);
419
420 while ((*ttl)--) {
421 if (pos < 0x40)
422 break;
423 pos &= ~3;
424 pci_bus_read_config_word(bus, devfn, pos, &ent);
425
426 id = ent & 0xff;
427 if (id == 0xff)
428 break;
429 if (id == cap)
430 return pos;
431 pos = (ent >> 8);
432 }
433 return 0;
434 }
435
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)436 static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
437 u8 pos, int cap)
438 {
439 int ttl = PCI_FIND_CAP_TTL;
440
441 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
442 }
443
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)444 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
445 {
446 return __pci_find_next_cap(dev->bus, dev->devfn,
447 pos + PCI_CAP_LIST_NEXT, cap);
448 }
449 EXPORT_SYMBOL_GPL(pci_find_next_capability);
450
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)451 static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
452 unsigned int devfn, u8 hdr_type)
453 {
454 u16 status;
455
456 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
457 if (!(status & PCI_STATUS_CAP_LIST))
458 return 0;
459
460 switch (hdr_type) {
461 case PCI_HEADER_TYPE_NORMAL:
462 case PCI_HEADER_TYPE_BRIDGE:
463 return PCI_CAPABILITY_LIST;
464 case PCI_HEADER_TYPE_CARDBUS:
465 return PCI_CB_CAPABILITY_LIST;
466 }
467
468 return 0;
469 }
470
471 /**
472 * pci_find_capability - query for devices' capabilities
473 * @dev: PCI device to query
474 * @cap: capability code
475 *
476 * Tell if a device supports a given PCI capability.
477 * Returns the address of the requested capability structure within the
478 * device's PCI configuration space or 0 in case the device does not
479 * support it. Possible values for @cap include:
480 *
481 * %PCI_CAP_ID_PM Power Management
482 * %PCI_CAP_ID_AGP Accelerated Graphics Port
483 * %PCI_CAP_ID_VPD Vital Product Data
484 * %PCI_CAP_ID_SLOTID Slot Identification
485 * %PCI_CAP_ID_MSI Message Signalled Interrupts
486 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
487 * %PCI_CAP_ID_PCIX PCI-X
488 * %PCI_CAP_ID_EXP PCI Express
489 */
pci_find_capability(struct pci_dev * dev,int cap)490 u8 pci_find_capability(struct pci_dev *dev, int cap)
491 {
492 u8 pos;
493
494 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
495 if (pos)
496 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
497
498 return pos;
499 }
500 EXPORT_SYMBOL(pci_find_capability);
501
502 /**
503 * pci_bus_find_capability - query for devices' capabilities
504 * @bus: the PCI bus to query
505 * @devfn: PCI device to query
506 * @cap: capability code
507 *
508 * Like pci_find_capability() but works for PCI devices that do not have a
509 * pci_dev structure set up yet.
510 *
511 * Returns the address of the requested capability structure within the
512 * device's PCI configuration space or 0 in case the device does not
513 * support it.
514 */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)515 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
516 {
517 u8 hdr_type, pos;
518
519 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
520
521 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
522 if (pos)
523 pos = __pci_find_next_cap(bus, devfn, pos, cap);
524
525 return pos;
526 }
527 EXPORT_SYMBOL(pci_bus_find_capability);
528
529 /**
530 * pci_find_next_ext_capability - Find an extended capability
531 * @dev: PCI device to query
532 * @start: address at which to start looking (0 to start at beginning of list)
533 * @cap: capability code
534 *
535 * Returns the address of the next matching extended capability structure
536 * within the device's PCI configuration space or 0 if the device does
537 * not support it. Some capabilities can occur several times, e.g., the
538 * vendor-specific capability, and this provides a way to find them all.
539 */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)540 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
541 {
542 u32 header;
543 int ttl;
544 u16 pos = PCI_CFG_SPACE_SIZE;
545
546 /* minimum 8 bytes per capability */
547 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
548
549 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
550 return 0;
551
552 if (start)
553 pos = start;
554
555 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
556 return 0;
557
558 /*
559 * If we have no capabilities, this is indicated by cap ID,
560 * cap version and next pointer all being 0.
561 */
562 if (header == 0)
563 return 0;
564
565 while (ttl-- > 0) {
566 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
567 return pos;
568
569 pos = PCI_EXT_CAP_NEXT(header);
570 if (pos < PCI_CFG_SPACE_SIZE)
571 break;
572
573 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
574 break;
575 }
576
577 return 0;
578 }
579 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
580
581 /**
582 * pci_find_ext_capability - Find an extended capability
583 * @dev: PCI device to query
584 * @cap: capability code
585 *
586 * Returns the address of the requested extended capability structure
587 * within the device's PCI configuration space or 0 if the device does
588 * not support it. Possible values for @cap include:
589 *
590 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
591 * %PCI_EXT_CAP_ID_VC Virtual Channel
592 * %PCI_EXT_CAP_ID_DSN Device Serial Number
593 * %PCI_EXT_CAP_ID_PWR Power Budgeting
594 */
pci_find_ext_capability(struct pci_dev * dev,int cap)595 u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
596 {
597 return pci_find_next_ext_capability(dev, 0, cap);
598 }
599 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
600
601 /**
602 * pci_get_dsn - Read and return the 8-byte Device Serial Number
603 * @dev: PCI device to query
604 *
605 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
606 * Number.
607 *
608 * Returns the DSN, or zero if the capability does not exist.
609 */
pci_get_dsn(struct pci_dev * dev)610 u64 pci_get_dsn(struct pci_dev *dev)
611 {
612 u32 dword;
613 u64 dsn;
614 int pos;
615
616 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
617 if (!pos)
618 return 0;
619
620 /*
621 * The Device Serial Number is two dwords offset 4 bytes from the
622 * capability position. The specification says that the first dword is
623 * the lower half, and the second dword is the upper half.
624 */
625 pos += 4;
626 pci_read_config_dword(dev, pos, &dword);
627 dsn = (u64)dword;
628 pci_read_config_dword(dev, pos + 4, &dword);
629 dsn |= ((u64)dword) << 32;
630
631 return dsn;
632 }
633 EXPORT_SYMBOL_GPL(pci_get_dsn);
634
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)635 static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
636 {
637 int rc, ttl = PCI_FIND_CAP_TTL;
638 u8 cap, mask;
639
640 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
641 mask = HT_3BIT_CAP_MASK;
642 else
643 mask = HT_5BIT_CAP_MASK;
644
645 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
646 PCI_CAP_ID_HT, &ttl);
647 while (pos) {
648 rc = pci_read_config_byte(dev, pos + 3, &cap);
649 if (rc != PCIBIOS_SUCCESSFUL)
650 return 0;
651
652 if ((cap & mask) == ht_cap)
653 return pos;
654
655 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
656 pos + PCI_CAP_LIST_NEXT,
657 PCI_CAP_ID_HT, &ttl);
658 }
659
660 return 0;
661 }
662
663 /**
664 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
665 * @dev: PCI device to query
666 * @pos: Position from which to continue searching
667 * @ht_cap: HyperTransport capability code
668 *
669 * To be used in conjunction with pci_find_ht_capability() to search for
670 * all capabilities matching @ht_cap. @pos should always be a value returned
671 * from pci_find_ht_capability().
672 *
673 * NB. To be 100% safe against broken PCI devices, the caller should take
674 * steps to avoid an infinite loop.
675 */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)676 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
677 {
678 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
679 }
680 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
681
682 /**
683 * pci_find_ht_capability - query a device's HyperTransport capabilities
684 * @dev: PCI device to query
685 * @ht_cap: HyperTransport capability code
686 *
687 * Tell if a device supports a given HyperTransport capability.
688 * Returns an address within the device's PCI configuration space
689 * or 0 in case the device does not support the request capability.
690 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
691 * which has a HyperTransport capability matching @ht_cap.
692 */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)693 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
694 {
695 u8 pos;
696
697 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
698 if (pos)
699 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
700
701 return pos;
702 }
703 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
704
705 /**
706 * pci_find_vsec_capability - Find a vendor-specific extended capability
707 * @dev: PCI device to query
708 * @vendor: Vendor ID for which capability is defined
709 * @cap: Vendor-specific capability ID
710 *
711 * If @dev has Vendor ID @vendor, search for a VSEC capability with
712 * VSEC ID @cap. If found, return the capability offset in
713 * config space; otherwise return 0.
714 */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)715 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
716 {
717 u16 vsec = 0;
718 u32 header;
719
720 if (vendor != dev->vendor)
721 return 0;
722
723 while ((vsec = pci_find_next_ext_capability(dev, vsec,
724 PCI_EXT_CAP_ID_VNDR))) {
725 if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
726 &header) == PCIBIOS_SUCCESSFUL &&
727 PCI_VNDR_HEADER_ID(header) == cap)
728 return vsec;
729 }
730
731 return 0;
732 }
733 EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
734
735 /**
736 * pci_find_dvsec_capability - Find DVSEC for vendor
737 * @dev: PCI device to query
738 * @vendor: Vendor ID to match for the DVSEC
739 * @dvsec: Designated Vendor-specific capability ID
740 *
741 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
742 * offset in config space; otherwise return 0.
743 */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)744 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
745 {
746 int pos;
747
748 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
749 if (!pos)
750 return 0;
751
752 while (pos) {
753 u16 v, id;
754
755 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
756 pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
757 if (vendor == v && dvsec == id)
758 return pos;
759
760 pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
761 }
762
763 return 0;
764 }
765 EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
766
767 /**
768 * pci_find_parent_resource - return resource region of parent bus of given
769 * region
770 * @dev: PCI device structure contains resources to be searched
771 * @res: child resource record for which parent is sought
772 *
773 * For given resource region of given device, return the resource region of
774 * parent bus the given region is contained in.
775 */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)776 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
777 struct resource *res)
778 {
779 const struct pci_bus *bus = dev->bus;
780 struct resource *r;
781 int i;
782
783 pci_bus_for_each_resource(bus, r, i) {
784 if (!r)
785 continue;
786 if (resource_contains(r, res)) {
787
788 /*
789 * If the window is prefetchable but the BAR is
790 * not, the allocator made a mistake.
791 */
792 if (r->flags & IORESOURCE_PREFETCH &&
793 !(res->flags & IORESOURCE_PREFETCH))
794 return NULL;
795
796 /*
797 * If we're below a transparent bridge, there may
798 * be both a positively-decoded aperture and a
799 * subtractively-decoded region that contain the BAR.
800 * We want the positively-decoded one, so this depends
801 * on pci_bus_for_each_resource() giving us those
802 * first.
803 */
804 return r;
805 }
806 }
807 return NULL;
808 }
809 EXPORT_SYMBOL(pci_find_parent_resource);
810
811 /**
812 * pci_find_resource - Return matching PCI device resource
813 * @dev: PCI device to query
814 * @res: Resource to look for
815 *
816 * Goes over standard PCI resources (BARs) and checks if the given resource
817 * is partially or fully contained in any of them. In that case the
818 * matching resource is returned, %NULL otherwise.
819 */
pci_find_resource(struct pci_dev * dev,struct resource * res)820 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
821 {
822 int i;
823
824 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
825 struct resource *r = &dev->resource[i];
826
827 if (r->start && resource_contains(r, res))
828 return r;
829 }
830
831 return NULL;
832 }
833 EXPORT_SYMBOL(pci_find_resource);
834
835 /**
836 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
837 * @dev: the PCI device to operate on
838 * @pos: config space offset of status word
839 * @mask: mask of bit(s) to care about in status word
840 *
841 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
842 */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)843 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
844 {
845 int i;
846
847 /* Wait for Transaction Pending bit clean */
848 for (i = 0; i < 4; i++) {
849 u16 status;
850 if (i)
851 msleep((1 << (i - 1)) * 100);
852
853 pci_read_config_word(dev, pos, &status);
854 if (!(status & mask))
855 return 1;
856 }
857
858 return 0;
859 }
860
861 static int pci_acs_enable;
862
863 /**
864 * pci_request_acs - ask for ACS to be enabled if supported
865 */
pci_request_acs(void)866 void pci_request_acs(void)
867 {
868 pci_acs_enable = 1;
869 }
870
871 static const char *disable_acs_redir_param;
872
873 /**
874 * pci_disable_acs_redir - disable ACS redirect capabilities
875 * @dev: the PCI device
876 *
877 * For only devices specified in the disable_acs_redir parameter.
878 */
pci_disable_acs_redir(struct pci_dev * dev)879 static void pci_disable_acs_redir(struct pci_dev *dev)
880 {
881 int ret = 0;
882 const char *p;
883 int pos;
884 u16 ctrl;
885
886 if (!disable_acs_redir_param)
887 return;
888
889 p = disable_acs_redir_param;
890 while (*p) {
891 ret = pci_dev_str_match(dev, p, &p);
892 if (ret < 0) {
893 pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
894 disable_acs_redir_param);
895
896 break;
897 } else if (ret == 1) {
898 /* Found a match */
899 break;
900 }
901
902 if (*p != ';' && *p != ',') {
903 /* End of param or invalid format */
904 break;
905 }
906 p++;
907 }
908
909 if (ret != 1)
910 return;
911
912 if (!pci_dev_specific_disable_acs_redir(dev))
913 return;
914
915 pos = dev->acs_cap;
916 if (!pos) {
917 pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
918 return;
919 }
920
921 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
922
923 /* P2P Request & Completion Redirect */
924 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
925
926 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
927
928 pci_info(dev, "disabled ACS redirect\n");
929 }
930
931 /**
932 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
933 * @dev: the PCI device
934 */
pci_std_enable_acs(struct pci_dev * dev)935 static void pci_std_enable_acs(struct pci_dev *dev)
936 {
937 int pos;
938 u16 cap;
939 u16 ctrl;
940
941 pos = dev->acs_cap;
942 if (!pos)
943 return;
944
945 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
946 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
947
948 /* Source Validation */
949 ctrl |= (cap & PCI_ACS_SV);
950
951 /* P2P Request Redirect */
952 ctrl |= (cap & PCI_ACS_RR);
953
954 /* P2P Completion Redirect */
955 ctrl |= (cap & PCI_ACS_CR);
956
957 /* Upstream Forwarding */
958 ctrl |= (cap & PCI_ACS_UF);
959
960 /* Enable Translation Blocking for external devices and noats */
961 if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
962 ctrl |= (cap & PCI_ACS_TB);
963
964 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
965 }
966
967 /**
968 * pci_enable_acs - enable ACS if hardware support it
969 * @dev: the PCI device
970 */
pci_enable_acs(struct pci_dev * dev)971 static void pci_enable_acs(struct pci_dev *dev)
972 {
973 if (!pci_acs_enable)
974 goto disable_acs_redir;
975
976 if (!pci_dev_specific_enable_acs(dev))
977 goto disable_acs_redir;
978
979 pci_std_enable_acs(dev);
980
981 disable_acs_redir:
982 /*
983 * Note: pci_disable_acs_redir() must be called even if ACS was not
984 * enabled by the kernel because it may have been enabled by
985 * platform firmware. So if we are told to disable it, we should
986 * always disable it after setting the kernel's default
987 * preferences.
988 */
989 pci_disable_acs_redir(dev);
990 }
991
992 /**
993 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
994 * @dev: PCI device to have its BARs restored
995 *
996 * Restore the BAR values for a given device, so as to make it
997 * accessible by its driver.
998 */
pci_restore_bars(struct pci_dev * dev)999 static void pci_restore_bars(struct pci_dev *dev)
1000 {
1001 int i;
1002
1003 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1004 pci_update_resource(dev, i);
1005 }
1006
platform_pci_power_manageable(struct pci_dev * dev)1007 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1008 {
1009 if (pci_use_mid_pm())
1010 return true;
1011
1012 return acpi_pci_power_manageable(dev);
1013 }
1014
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1015 static inline int platform_pci_set_power_state(struct pci_dev *dev,
1016 pci_power_t t)
1017 {
1018 if (pci_use_mid_pm())
1019 return mid_pci_set_power_state(dev, t);
1020
1021 return acpi_pci_set_power_state(dev, t);
1022 }
1023
platform_pci_get_power_state(struct pci_dev * dev)1024 static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1025 {
1026 if (pci_use_mid_pm())
1027 return mid_pci_get_power_state(dev);
1028
1029 return acpi_pci_get_power_state(dev);
1030 }
1031
platform_pci_refresh_power_state(struct pci_dev * dev)1032 static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1033 {
1034 if (!pci_use_mid_pm())
1035 acpi_pci_refresh_power_state(dev);
1036 }
1037
platform_pci_choose_state(struct pci_dev * dev)1038 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1039 {
1040 if (pci_use_mid_pm())
1041 return PCI_POWER_ERROR;
1042
1043 return acpi_pci_choose_state(dev);
1044 }
1045
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1046 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1047 {
1048 if (pci_use_mid_pm())
1049 return PCI_POWER_ERROR;
1050
1051 return acpi_pci_wakeup(dev, enable);
1052 }
1053
platform_pci_need_resume(struct pci_dev * dev)1054 static inline bool platform_pci_need_resume(struct pci_dev *dev)
1055 {
1056 if (pci_use_mid_pm())
1057 return false;
1058
1059 return acpi_pci_need_resume(dev);
1060 }
1061
platform_pci_bridge_d3(struct pci_dev * dev)1062 static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1063 {
1064 if (pci_use_mid_pm())
1065 return false;
1066
1067 return acpi_pci_bridge_d3(dev);
1068 }
1069
1070 /**
1071 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
1072 * given PCI device
1073 * @dev: PCI device to handle.
1074 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1075 *
1076 * RETURN VALUE:
1077 * -EINVAL if the requested state is invalid.
1078 * -EIO if device does not support PCI PM or its PM capabilities register has a
1079 * wrong version, or device doesn't support the requested state.
1080 * 0 if device already is in the requested state.
1081 * 0 if device's power state has been successfully changed.
1082 */
pci_raw_set_power_state(struct pci_dev * dev,pci_power_t state)1083 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1084 {
1085 u16 pmcsr;
1086 bool need_restore = false;
1087
1088 /* Check if we're already there */
1089 if (dev->current_state == state)
1090 return 0;
1091
1092 if (!dev->pm_cap)
1093 return -EIO;
1094
1095 if (state < PCI_D0 || state > PCI_D3hot)
1096 return -EINVAL;
1097
1098 /*
1099 * Validate transition: We can enter D0 from any state, but if
1100 * we're already in a low-power state, we can only go deeper. E.g.,
1101 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1102 * we'd have to go from D3 to D0, then to D1.
1103 */
1104 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1105 && dev->current_state > state) {
1106 pci_err(dev, "invalid power transition (from %s to %s)\n",
1107 pci_power_name(dev->current_state),
1108 pci_power_name(state));
1109 return -EINVAL;
1110 }
1111
1112 /* Check if this device supports the desired state */
1113 if ((state == PCI_D1 && !dev->d1_support)
1114 || (state == PCI_D2 && !dev->d2_support))
1115 return -EIO;
1116
1117 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1118 if (pmcsr == (u16) ~0) {
1119 pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1120 pci_power_name(dev->current_state),
1121 pci_power_name(state));
1122 return -EIO;
1123 }
1124
1125 /*
1126 * If we're (effectively) in D3, force entire word to 0.
1127 * This doesn't affect PME_Status, disables PME_En, and
1128 * sets PowerState to 0.
1129 */
1130 switch (dev->current_state) {
1131 case PCI_D0:
1132 case PCI_D1:
1133 case PCI_D2:
1134 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1135 pmcsr |= state;
1136 break;
1137 case PCI_D3hot:
1138 case PCI_D3cold:
1139 case PCI_UNKNOWN: /* Boot-up */
1140 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1141 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1142 need_restore = true;
1143 fallthrough; /* force to D0 */
1144 default:
1145 pmcsr = 0;
1146 break;
1147 }
1148
1149 /* Enter specified state */
1150 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1151
1152 /*
1153 * Mandatory power management transition delays; see PCI PM 1.1
1154 * 5.6.1 table 18
1155 */
1156 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1157 pci_dev_d3_sleep(dev);
1158 else if (state == PCI_D2 || dev->current_state == PCI_D2)
1159 udelay(PCI_PM_D2_DELAY);
1160
1161 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1162 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1163 if (dev->current_state != state)
1164 pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1165 pci_power_name(dev->current_state),
1166 pci_power_name(state));
1167
1168 /*
1169 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1170 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1171 * from D3hot to D0 _may_ perform an internal reset, thereby
1172 * going to "D0 Uninitialized" rather than "D0 Initialized".
1173 * For example, at least some versions of the 3c905B and the
1174 * 3c556B exhibit this behaviour.
1175 *
1176 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1177 * devices in a D3hot state at boot. Consequently, we need to
1178 * restore at least the BARs so that the device will be
1179 * accessible to its driver.
1180 */
1181 if (need_restore)
1182 pci_restore_bars(dev);
1183
1184 if (dev->bus->self)
1185 pcie_aspm_pm_state_change(dev->bus->self);
1186
1187 return 0;
1188 }
1189
1190 /**
1191 * pci_update_current_state - Read power state of given device and cache it
1192 * @dev: PCI device to handle.
1193 * @state: State to cache in case the device doesn't have the PM capability
1194 *
1195 * The power state is read from the PMCSR register, which however is
1196 * inaccessible in D3cold. The platform firmware is therefore queried first
1197 * to detect accessibility of the register. In case the platform firmware
1198 * reports an incorrect state or the device isn't power manageable by the
1199 * platform at all, we try to detect D3cold by testing accessibility of the
1200 * vendor ID in config space.
1201 */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1202 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1203 {
1204 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1205 !pci_device_is_present(dev)) {
1206 dev->current_state = PCI_D3cold;
1207 } else if (dev->pm_cap) {
1208 u16 pmcsr;
1209
1210 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1211 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1212 } else {
1213 dev->current_state = state;
1214 }
1215 }
1216
1217 /**
1218 * pci_refresh_power_state - Refresh the given device's power state data
1219 * @dev: Target PCI device.
1220 *
1221 * Ask the platform to refresh the devices power state information and invoke
1222 * pci_update_current_state() to update its current PCI power state.
1223 */
pci_refresh_power_state(struct pci_dev * dev)1224 void pci_refresh_power_state(struct pci_dev *dev)
1225 {
1226 platform_pci_refresh_power_state(dev);
1227 pci_update_current_state(dev, dev->current_state);
1228 }
1229
1230 /**
1231 * pci_platform_power_transition - Use platform to change device power state
1232 * @dev: PCI device to handle.
1233 * @state: State to put the device into.
1234 */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1235 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1236 {
1237 int error;
1238
1239 error = platform_pci_set_power_state(dev, state);
1240 if (!error)
1241 pci_update_current_state(dev, state);
1242 else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1243 dev->current_state = PCI_D0;
1244
1245 return error;
1246 }
1247 EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1248
pci_resume_one(struct pci_dev * pci_dev,void * ign)1249 static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1250 {
1251 pm_request_resume(&pci_dev->dev);
1252 return 0;
1253 }
1254
1255 /**
1256 * pci_resume_bus - Walk given bus and runtime resume devices on it
1257 * @bus: Top bus of the subtree to walk.
1258 */
pci_resume_bus(struct pci_bus * bus)1259 void pci_resume_bus(struct pci_bus *bus)
1260 {
1261 if (bus)
1262 pci_walk_bus(bus, pci_resume_one, NULL);
1263 }
1264
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1265 static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1266 {
1267 int delay = 1;
1268 u32 id;
1269
1270 /*
1271 * After reset, the device should not silently discard config
1272 * requests, but it may still indicate that it needs more time by
1273 * responding to them with CRS completions. The Root Port will
1274 * generally synthesize ~0 data to complete the read (except when
1275 * CRS SV is enabled and the read was for the Vendor ID; in that
1276 * case it synthesizes 0x0001 data).
1277 *
1278 * Wait for the device to return a non-CRS completion. Read the
1279 * Command register instead of Vendor ID so we don't have to
1280 * contend with the CRS SV value.
1281 */
1282 pci_read_config_dword(dev, PCI_COMMAND, &id);
1283 while (id == ~0) {
1284 if (delay > timeout) {
1285 pci_warn(dev, "not ready %dms after %s; giving up\n",
1286 delay - 1, reset_type);
1287 return -ENOTTY;
1288 }
1289
1290 if (delay > 1000)
1291 pci_info(dev, "not ready %dms after %s; waiting\n",
1292 delay - 1, reset_type);
1293
1294 msleep(delay);
1295 delay *= 2;
1296 pci_read_config_dword(dev, PCI_COMMAND, &id);
1297 }
1298
1299 if (delay > 1000)
1300 pci_info(dev, "ready %dms after %s\n", delay - 1,
1301 reset_type);
1302
1303 return 0;
1304 }
1305
1306 /**
1307 * pci_power_up - Put the given device into D0
1308 * @dev: PCI device to power up
1309 */
pci_power_up(struct pci_dev * dev)1310 int pci_power_up(struct pci_dev *dev)
1311 {
1312 pci_platform_power_transition(dev, PCI_D0);
1313
1314 /*
1315 * Mandatory power management transition delays are handled in
1316 * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
1317 * corresponding bridge.
1318 */
1319 if (dev->runtime_d3cold) {
1320 /*
1321 * When powering on a bridge from D3cold, the whole hierarchy
1322 * may be powered on into D0uninitialized state, resume them to
1323 * give them a chance to suspend again
1324 */
1325 pci_resume_bus(dev->subordinate);
1326 }
1327
1328 return pci_raw_set_power_state(dev, PCI_D0);
1329 }
1330
1331 /**
1332 * __pci_dev_set_current_state - Set current state of a PCI device
1333 * @dev: Device to handle
1334 * @data: pointer to state to be set
1335 */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1336 static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1337 {
1338 pci_power_t state = *(pci_power_t *)data;
1339
1340 dev->current_state = state;
1341 return 0;
1342 }
1343
1344 /**
1345 * pci_bus_set_current_state - Walk given bus and set current state of devices
1346 * @bus: Top bus of the subtree to walk.
1347 * @state: state to be set
1348 */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1349 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1350 {
1351 if (bus)
1352 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1353 }
1354
1355 /**
1356 * pci_set_power_state - Set the power state of a PCI device
1357 * @dev: PCI device to handle.
1358 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1359 *
1360 * Transition a device to a new power state, using the platform firmware and/or
1361 * the device's PCI PM registers.
1362 *
1363 * RETURN VALUE:
1364 * -EINVAL if the requested state is invalid.
1365 * -EIO if device does not support PCI PM or its PM capabilities register has a
1366 * wrong version, or device doesn't support the requested state.
1367 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1368 * 0 if device already is in the requested state.
1369 * 0 if the transition is to D3 but D3 is not supported.
1370 * 0 if device's power state has been successfully changed.
1371 */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1372 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1373 {
1374 int error;
1375
1376 /* Bound the state we're entering */
1377 if (state > PCI_D3cold)
1378 state = PCI_D3cold;
1379 else if (state < PCI_D0)
1380 state = PCI_D0;
1381 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1382
1383 /*
1384 * If the device or the parent bridge do not support PCI
1385 * PM, ignore the request if we're doing anything other
1386 * than putting it into D0 (which would only happen on
1387 * boot).
1388 */
1389 return 0;
1390
1391 /* Check if we're already there */
1392 if (dev->current_state == state)
1393 return 0;
1394
1395 if (state == PCI_D0)
1396 return pci_power_up(dev);
1397
1398 /*
1399 * This device is quirked not to be put into D3, so don't put it in
1400 * D3
1401 */
1402 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1403 return 0;
1404
1405 /*
1406 * To put device in D3cold, we put device into D3hot in native
1407 * way, then put device into D3cold with platform ops
1408 */
1409 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1410 PCI_D3hot : state);
1411
1412 if (pci_platform_power_transition(dev, state))
1413 return error;
1414
1415 /* Powering off a bridge may power off the whole hierarchy */
1416 if (state == PCI_D3cold)
1417 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1418
1419 return 0;
1420 }
1421 EXPORT_SYMBOL(pci_set_power_state);
1422
1423 #define PCI_EXP_SAVE_REGS 7
1424
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1425 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1426 u16 cap, bool extended)
1427 {
1428 struct pci_cap_saved_state *tmp;
1429
1430 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1431 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1432 return tmp;
1433 }
1434 return NULL;
1435 }
1436
pci_find_saved_cap(struct pci_dev * dev,char cap)1437 struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1438 {
1439 return _pci_find_saved_cap(dev, cap, false);
1440 }
1441
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1442 struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1443 {
1444 return _pci_find_saved_cap(dev, cap, true);
1445 }
1446
pci_save_pcie_state(struct pci_dev * dev)1447 static int pci_save_pcie_state(struct pci_dev *dev)
1448 {
1449 int i = 0;
1450 struct pci_cap_saved_state *save_state;
1451 u16 *cap;
1452
1453 if (!pci_is_pcie(dev))
1454 return 0;
1455
1456 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1457 if (!save_state) {
1458 pci_err(dev, "buffer not found in %s\n", __func__);
1459 return -ENOMEM;
1460 }
1461
1462 cap = (u16 *)&save_state->cap.data[0];
1463 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1464 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1465 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1466 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1467 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1468 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1469 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1470
1471 return 0;
1472 }
1473
pci_bridge_reconfigure_ltr(struct pci_dev * dev)1474 void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1475 {
1476 #ifdef CONFIG_PCIEASPM
1477 struct pci_dev *bridge;
1478 u32 ctl;
1479
1480 bridge = pci_upstream_bridge(dev);
1481 if (bridge && bridge->ltr_path) {
1482 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1483 if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1484 pci_dbg(bridge, "re-enabling LTR\n");
1485 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1486 PCI_EXP_DEVCTL2_LTR_EN);
1487 }
1488 }
1489 #endif
1490 }
1491
pci_restore_pcie_state(struct pci_dev * dev)1492 static void pci_restore_pcie_state(struct pci_dev *dev)
1493 {
1494 int i = 0;
1495 struct pci_cap_saved_state *save_state;
1496 u16 *cap;
1497
1498 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1499 if (!save_state)
1500 return;
1501
1502 /*
1503 * Downstream ports reset the LTR enable bit when link goes down.
1504 * Check and re-configure the bit here before restoring device.
1505 * PCIe r5.0, sec 7.5.3.16.
1506 */
1507 pci_bridge_reconfigure_ltr(dev);
1508
1509 cap = (u16 *)&save_state->cap.data[0];
1510 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1511 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1512 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1513 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1514 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1515 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1516 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1517 }
1518
pci_save_pcix_state(struct pci_dev * dev)1519 static int pci_save_pcix_state(struct pci_dev *dev)
1520 {
1521 int pos;
1522 struct pci_cap_saved_state *save_state;
1523
1524 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1525 if (!pos)
1526 return 0;
1527
1528 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1529 if (!save_state) {
1530 pci_err(dev, "buffer not found in %s\n", __func__);
1531 return -ENOMEM;
1532 }
1533
1534 pci_read_config_word(dev, pos + PCI_X_CMD,
1535 (u16 *)save_state->cap.data);
1536
1537 return 0;
1538 }
1539
pci_restore_pcix_state(struct pci_dev * dev)1540 static void pci_restore_pcix_state(struct pci_dev *dev)
1541 {
1542 int i = 0, pos;
1543 struct pci_cap_saved_state *save_state;
1544 u16 *cap;
1545
1546 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1547 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1548 if (!save_state || !pos)
1549 return;
1550 cap = (u16 *)&save_state->cap.data[0];
1551
1552 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1553 }
1554
pci_save_ltr_state(struct pci_dev * dev)1555 static void pci_save_ltr_state(struct pci_dev *dev)
1556 {
1557 int ltr;
1558 struct pci_cap_saved_state *save_state;
1559 u16 *cap;
1560
1561 if (!pci_is_pcie(dev))
1562 return;
1563
1564 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1565 if (!ltr)
1566 return;
1567
1568 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1569 if (!save_state) {
1570 pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1571 return;
1572 }
1573
1574 cap = (u16 *)&save_state->cap.data[0];
1575 pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1576 pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1577 }
1578
pci_restore_ltr_state(struct pci_dev * dev)1579 static void pci_restore_ltr_state(struct pci_dev *dev)
1580 {
1581 struct pci_cap_saved_state *save_state;
1582 int ltr;
1583 u16 *cap;
1584
1585 save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1586 ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1587 if (!save_state || !ltr)
1588 return;
1589
1590 cap = (u16 *)&save_state->cap.data[0];
1591 pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1592 pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1593 }
1594
1595 /**
1596 * pci_save_state - save the PCI configuration space of a device before
1597 * suspending
1598 * @dev: PCI device that we're dealing with
1599 */
pci_save_state(struct pci_dev * dev)1600 int pci_save_state(struct pci_dev *dev)
1601 {
1602 int i;
1603 /* XXX: 100% dword access ok here? */
1604 for (i = 0; i < 16; i++) {
1605 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1606 pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1607 i * 4, dev->saved_config_space[i]);
1608 }
1609 dev->state_saved = true;
1610
1611 i = pci_save_pcie_state(dev);
1612 if (i != 0)
1613 return i;
1614
1615 i = pci_save_pcix_state(dev);
1616 if (i != 0)
1617 return i;
1618
1619 pci_save_ltr_state(dev);
1620 pci_save_dpc_state(dev);
1621 pci_save_aer_state(dev);
1622 pci_save_ptm_state(dev);
1623 return pci_save_vc_state(dev);
1624 }
1625 EXPORT_SYMBOL(pci_save_state);
1626
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1627 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1628 u32 saved_val, int retry, bool force)
1629 {
1630 u32 val;
1631
1632 pci_read_config_dword(pdev, offset, &val);
1633 if (!force && val == saved_val)
1634 return;
1635
1636 for (;;) {
1637 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1638 offset, val, saved_val);
1639 pci_write_config_dword(pdev, offset, saved_val);
1640 if (retry-- <= 0)
1641 return;
1642
1643 pci_read_config_dword(pdev, offset, &val);
1644 if (val == saved_val)
1645 return;
1646
1647 mdelay(1);
1648 }
1649 }
1650
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1651 static void pci_restore_config_space_range(struct pci_dev *pdev,
1652 int start, int end, int retry,
1653 bool force)
1654 {
1655 int index;
1656
1657 for (index = end; index >= start; index--)
1658 pci_restore_config_dword(pdev, 4 * index,
1659 pdev->saved_config_space[index],
1660 retry, force);
1661 }
1662
pci_restore_config_space(struct pci_dev * pdev)1663 static void pci_restore_config_space(struct pci_dev *pdev)
1664 {
1665 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1666 pci_restore_config_space_range(pdev, 10, 15, 0, false);
1667 /* Restore BARs before the command register. */
1668 pci_restore_config_space_range(pdev, 4, 9, 10, false);
1669 pci_restore_config_space_range(pdev, 0, 3, 0, false);
1670 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1671 pci_restore_config_space_range(pdev, 12, 15, 0, false);
1672
1673 /*
1674 * Force rewriting of prefetch registers to avoid S3 resume
1675 * issues on Intel PCI bridges that occur when these
1676 * registers are not explicitly written.
1677 */
1678 pci_restore_config_space_range(pdev, 9, 11, 0, true);
1679 pci_restore_config_space_range(pdev, 0, 8, 0, false);
1680 } else {
1681 pci_restore_config_space_range(pdev, 0, 15, 0, false);
1682 }
1683 }
1684
pci_restore_rebar_state(struct pci_dev * pdev)1685 static void pci_restore_rebar_state(struct pci_dev *pdev)
1686 {
1687 unsigned int pos, nbars, i;
1688 u32 ctrl;
1689
1690 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1691 if (!pos)
1692 return;
1693
1694 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1695 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1696 PCI_REBAR_CTRL_NBAR_SHIFT;
1697
1698 for (i = 0; i < nbars; i++, pos += 8) {
1699 struct resource *res;
1700 int bar_idx, size;
1701
1702 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1703 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1704 res = pdev->resource + bar_idx;
1705 size = pci_rebar_bytes_to_size(resource_size(res));
1706 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1707 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1708 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1709 }
1710 }
1711
1712 /**
1713 * pci_restore_state - Restore the saved state of a PCI device
1714 * @dev: PCI device that we're dealing with
1715 */
pci_restore_state(struct pci_dev * dev)1716 void pci_restore_state(struct pci_dev *dev)
1717 {
1718 if (!dev->state_saved)
1719 return;
1720
1721 /*
1722 * Restore max latencies (in the LTR capability) before enabling
1723 * LTR itself (in the PCIe capability).
1724 */
1725 pci_restore_ltr_state(dev);
1726
1727 pci_restore_pcie_state(dev);
1728 pci_restore_pasid_state(dev);
1729 pci_restore_pri_state(dev);
1730 pci_restore_ats_state(dev);
1731 pci_restore_vc_state(dev);
1732 pci_restore_rebar_state(dev);
1733 pci_restore_dpc_state(dev);
1734 pci_restore_ptm_state(dev);
1735
1736 pci_aer_clear_status(dev);
1737 pci_restore_aer_state(dev);
1738
1739 pci_restore_config_space(dev);
1740
1741 pci_restore_pcix_state(dev);
1742 pci_restore_msi_state(dev);
1743
1744 /* Restore ACS and IOV configuration state */
1745 pci_enable_acs(dev);
1746 pci_restore_iov_state(dev);
1747
1748 dev->state_saved = false;
1749 }
1750 EXPORT_SYMBOL(pci_restore_state);
1751
1752 struct pci_saved_state {
1753 u32 config_space[16];
1754 struct pci_cap_saved_data cap[];
1755 };
1756
1757 /**
1758 * pci_store_saved_state - Allocate and return an opaque struct containing
1759 * the device saved state.
1760 * @dev: PCI device that we're dealing with
1761 *
1762 * Return NULL if no state or error.
1763 */
pci_store_saved_state(struct pci_dev * dev)1764 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1765 {
1766 struct pci_saved_state *state;
1767 struct pci_cap_saved_state *tmp;
1768 struct pci_cap_saved_data *cap;
1769 size_t size;
1770
1771 if (!dev->state_saved)
1772 return NULL;
1773
1774 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1775
1776 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1777 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1778
1779 state = kzalloc(size, GFP_KERNEL);
1780 if (!state)
1781 return NULL;
1782
1783 memcpy(state->config_space, dev->saved_config_space,
1784 sizeof(state->config_space));
1785
1786 cap = state->cap;
1787 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1788 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1789 memcpy(cap, &tmp->cap, len);
1790 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1791 }
1792 /* Empty cap_save terminates list */
1793
1794 return state;
1795 }
1796 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1797
1798 /**
1799 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1800 * @dev: PCI device that we're dealing with
1801 * @state: Saved state returned from pci_store_saved_state()
1802 */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)1803 int pci_load_saved_state(struct pci_dev *dev,
1804 struct pci_saved_state *state)
1805 {
1806 struct pci_cap_saved_data *cap;
1807
1808 dev->state_saved = false;
1809
1810 if (!state)
1811 return 0;
1812
1813 memcpy(dev->saved_config_space, state->config_space,
1814 sizeof(state->config_space));
1815
1816 cap = state->cap;
1817 while (cap->size) {
1818 struct pci_cap_saved_state *tmp;
1819
1820 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1821 if (!tmp || tmp->cap.size != cap->size)
1822 return -EINVAL;
1823
1824 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1825 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1826 sizeof(struct pci_cap_saved_data) + cap->size);
1827 }
1828
1829 dev->state_saved = true;
1830 return 0;
1831 }
1832 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1833
1834 /**
1835 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1836 * and free the memory allocated for it.
1837 * @dev: PCI device that we're dealing with
1838 * @state: Pointer to saved state returned from pci_store_saved_state()
1839 */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)1840 int pci_load_and_free_saved_state(struct pci_dev *dev,
1841 struct pci_saved_state **state)
1842 {
1843 int ret = pci_load_saved_state(dev, *state);
1844 kfree(*state);
1845 *state = NULL;
1846 return ret;
1847 }
1848 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1849
pcibios_enable_device(struct pci_dev * dev,int bars)1850 int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1851 {
1852 return pci_enable_resources(dev, bars);
1853 }
1854
do_pci_enable_device(struct pci_dev * dev,int bars)1855 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1856 {
1857 int err;
1858 struct pci_dev *bridge;
1859 u16 cmd;
1860 u8 pin;
1861
1862 err = pci_set_power_state(dev, PCI_D0);
1863 if (err < 0 && err != -EIO)
1864 return err;
1865
1866 bridge = pci_upstream_bridge(dev);
1867 if (bridge)
1868 pcie_aspm_powersave_config_link(bridge);
1869
1870 err = pcibios_enable_device(dev, bars);
1871 if (err < 0)
1872 return err;
1873 pci_fixup_device(pci_fixup_enable, dev);
1874
1875 if (dev->msi_enabled || dev->msix_enabled)
1876 return 0;
1877
1878 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1879 if (pin) {
1880 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1881 if (cmd & PCI_COMMAND_INTX_DISABLE)
1882 pci_write_config_word(dev, PCI_COMMAND,
1883 cmd & ~PCI_COMMAND_INTX_DISABLE);
1884 }
1885
1886 return 0;
1887 }
1888
1889 /**
1890 * pci_reenable_device - Resume abandoned device
1891 * @dev: PCI device to be resumed
1892 *
1893 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1894 * to be called by normal code, write proper resume handler and use it instead.
1895 */
pci_reenable_device(struct pci_dev * dev)1896 int pci_reenable_device(struct pci_dev *dev)
1897 {
1898 if (pci_is_enabled(dev))
1899 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1900 return 0;
1901 }
1902 EXPORT_SYMBOL(pci_reenable_device);
1903
pci_enable_bridge(struct pci_dev * dev)1904 static void pci_enable_bridge(struct pci_dev *dev)
1905 {
1906 struct pci_dev *bridge;
1907 int retval;
1908
1909 bridge = pci_upstream_bridge(dev);
1910 if (bridge)
1911 pci_enable_bridge(bridge);
1912
1913 if (pci_is_enabled(dev)) {
1914 if (!dev->is_busmaster)
1915 pci_set_master(dev);
1916 return;
1917 }
1918
1919 retval = pci_enable_device(dev);
1920 if (retval)
1921 pci_err(dev, "Error enabling bridge (%d), continuing\n",
1922 retval);
1923 pci_set_master(dev);
1924 }
1925
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)1926 static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1927 {
1928 struct pci_dev *bridge;
1929 int err;
1930 int i, bars = 0;
1931
1932 /*
1933 * Power state could be unknown at this point, either due to a fresh
1934 * boot or a device removal call. So get the current power state
1935 * so that things like MSI message writing will behave as expected
1936 * (e.g. if the device really is in D0 at enable time).
1937 */
1938 pci_update_current_state(dev, dev->current_state);
1939
1940 if (atomic_inc_return(&dev->enable_cnt) > 1)
1941 return 0; /* already enabled */
1942
1943 bridge = pci_upstream_bridge(dev);
1944 if (bridge)
1945 pci_enable_bridge(bridge);
1946
1947 /* only skip sriov related */
1948 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1949 if (dev->resource[i].flags & flags)
1950 bars |= (1 << i);
1951 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1952 if (dev->resource[i].flags & flags)
1953 bars |= (1 << i);
1954
1955 err = do_pci_enable_device(dev, bars);
1956 if (err < 0)
1957 atomic_dec(&dev->enable_cnt);
1958 return err;
1959 }
1960
1961 /**
1962 * pci_enable_device_io - Initialize a device for use with IO space
1963 * @dev: PCI device to be initialized
1964 *
1965 * Initialize device before it's used by a driver. Ask low-level code
1966 * to enable I/O resources. Wake up the device if it was suspended.
1967 * Beware, this function can fail.
1968 */
pci_enable_device_io(struct pci_dev * dev)1969 int pci_enable_device_io(struct pci_dev *dev)
1970 {
1971 return pci_enable_device_flags(dev, IORESOURCE_IO);
1972 }
1973 EXPORT_SYMBOL(pci_enable_device_io);
1974
1975 /**
1976 * pci_enable_device_mem - Initialize a device for use with Memory space
1977 * @dev: PCI device to be initialized
1978 *
1979 * Initialize device before it's used by a driver. Ask low-level code
1980 * to enable Memory resources. Wake up the device if it was suspended.
1981 * Beware, this function can fail.
1982 */
pci_enable_device_mem(struct pci_dev * dev)1983 int pci_enable_device_mem(struct pci_dev *dev)
1984 {
1985 return pci_enable_device_flags(dev, IORESOURCE_MEM);
1986 }
1987 EXPORT_SYMBOL(pci_enable_device_mem);
1988
1989 /**
1990 * pci_enable_device - Initialize device before it's used by a driver.
1991 * @dev: PCI device to be initialized
1992 *
1993 * Initialize device before it's used by a driver. Ask low-level code
1994 * to enable I/O and memory. Wake up the device if it was suspended.
1995 * Beware, this function can fail.
1996 *
1997 * Note we don't actually enable the device many times if we call
1998 * this function repeatedly (we just increment the count).
1999 */
pci_enable_device(struct pci_dev * dev)2000 int pci_enable_device(struct pci_dev *dev)
2001 {
2002 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2003 }
2004 EXPORT_SYMBOL(pci_enable_device);
2005
2006 /*
2007 * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
2008 * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
2009 * there's no need to track it separately. pci_devres is initialized
2010 * when a device is enabled using managed PCI device enable interface.
2011 */
2012 struct pci_devres {
2013 unsigned int enabled:1;
2014 unsigned int pinned:1;
2015 unsigned int orig_intx:1;
2016 unsigned int restore_intx:1;
2017 unsigned int mwi:1;
2018 u32 region_mask;
2019 };
2020
pcim_release(struct device * gendev,void * res)2021 static void pcim_release(struct device *gendev, void *res)
2022 {
2023 struct pci_dev *dev = to_pci_dev(gendev);
2024 struct pci_devres *this = res;
2025 int i;
2026
2027 if (dev->msi_enabled)
2028 pci_disable_msi(dev);
2029 if (dev->msix_enabled)
2030 pci_disable_msix(dev);
2031
2032 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2033 if (this->region_mask & (1 << i))
2034 pci_release_region(dev, i);
2035
2036 if (this->mwi)
2037 pci_clear_mwi(dev);
2038
2039 if (this->restore_intx)
2040 pci_intx(dev, this->orig_intx);
2041
2042 if (this->enabled && !this->pinned)
2043 pci_disable_device(dev);
2044 }
2045
get_pci_dr(struct pci_dev * pdev)2046 static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2047 {
2048 struct pci_devres *dr, *new_dr;
2049
2050 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2051 if (dr)
2052 return dr;
2053
2054 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2055 if (!new_dr)
2056 return NULL;
2057 return devres_get(&pdev->dev, new_dr, NULL, NULL);
2058 }
2059
find_pci_dr(struct pci_dev * pdev)2060 static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2061 {
2062 if (pci_is_managed(pdev))
2063 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2064 return NULL;
2065 }
2066
2067 /**
2068 * pcim_enable_device - Managed pci_enable_device()
2069 * @pdev: PCI device to be initialized
2070 *
2071 * Managed pci_enable_device().
2072 */
pcim_enable_device(struct pci_dev * pdev)2073 int pcim_enable_device(struct pci_dev *pdev)
2074 {
2075 struct pci_devres *dr;
2076 int rc;
2077
2078 dr = get_pci_dr(pdev);
2079 if (unlikely(!dr))
2080 return -ENOMEM;
2081 if (dr->enabled)
2082 return 0;
2083
2084 rc = pci_enable_device(pdev);
2085 if (!rc) {
2086 pdev->is_managed = 1;
2087 dr->enabled = 1;
2088 }
2089 return rc;
2090 }
2091 EXPORT_SYMBOL(pcim_enable_device);
2092
2093 /**
2094 * pcim_pin_device - Pin managed PCI device
2095 * @pdev: PCI device to pin
2096 *
2097 * Pin managed PCI device @pdev. Pinned device won't be disabled on
2098 * driver detach. @pdev must have been enabled with
2099 * pcim_enable_device().
2100 */
pcim_pin_device(struct pci_dev * pdev)2101 void pcim_pin_device(struct pci_dev *pdev)
2102 {
2103 struct pci_devres *dr;
2104
2105 dr = find_pci_dr(pdev);
2106 WARN_ON(!dr || !dr->enabled);
2107 if (dr)
2108 dr->pinned = 1;
2109 }
2110 EXPORT_SYMBOL(pcim_pin_device);
2111
2112 /*
2113 * pcibios_device_add - provide arch specific hooks when adding device dev
2114 * @dev: the PCI device being added
2115 *
2116 * Permits the platform to provide architecture specific functionality when
2117 * devices are added. This is the default implementation. Architecture
2118 * implementations can override this.
2119 */
pcibios_device_add(struct pci_dev * dev)2120 int __weak pcibios_device_add(struct pci_dev *dev)
2121 {
2122 return 0;
2123 }
2124
2125 /**
2126 * pcibios_release_device - provide arch specific hooks when releasing
2127 * device dev
2128 * @dev: the PCI device being released
2129 *
2130 * Permits the platform to provide architecture specific functionality when
2131 * devices are released. This is the default implementation. Architecture
2132 * implementations can override this.
2133 */
pcibios_release_device(struct pci_dev * dev)2134 void __weak pcibios_release_device(struct pci_dev *dev) {}
2135
2136 /**
2137 * pcibios_disable_device - disable arch specific PCI resources for device dev
2138 * @dev: the PCI device to disable
2139 *
2140 * Disables architecture specific PCI resources for the device. This
2141 * is the default implementation. Architecture implementations can
2142 * override this.
2143 */
pcibios_disable_device(struct pci_dev * dev)2144 void __weak pcibios_disable_device(struct pci_dev *dev) {}
2145
2146 /**
2147 * pcibios_penalize_isa_irq - penalize an ISA IRQ
2148 * @irq: ISA IRQ to penalize
2149 * @active: IRQ active or not
2150 *
2151 * Permits the platform to provide architecture-specific functionality when
2152 * penalizing ISA IRQs. This is the default implementation. Architecture
2153 * implementations can override this.
2154 */
pcibios_penalize_isa_irq(int irq,int active)2155 void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2156
do_pci_disable_device(struct pci_dev * dev)2157 static void do_pci_disable_device(struct pci_dev *dev)
2158 {
2159 u16 pci_command;
2160
2161 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2162 if (pci_command & PCI_COMMAND_MASTER) {
2163 pci_command &= ~PCI_COMMAND_MASTER;
2164 pci_write_config_word(dev, PCI_COMMAND, pci_command);
2165 }
2166
2167 pcibios_disable_device(dev);
2168 }
2169
2170 /**
2171 * pci_disable_enabled_device - Disable device without updating enable_cnt
2172 * @dev: PCI device to disable
2173 *
2174 * NOTE: This function is a backend of PCI power management routines and is
2175 * not supposed to be called drivers.
2176 */
pci_disable_enabled_device(struct pci_dev * dev)2177 void pci_disable_enabled_device(struct pci_dev *dev)
2178 {
2179 if (pci_is_enabled(dev))
2180 do_pci_disable_device(dev);
2181 }
2182
2183 /**
2184 * pci_disable_device - Disable PCI device after use
2185 * @dev: PCI device to be disabled
2186 *
2187 * Signal to the system that the PCI device is not in use by the system
2188 * anymore. This only involves disabling PCI bus-mastering, if active.
2189 *
2190 * Note we don't actually disable the device until all callers of
2191 * pci_enable_device() have called pci_disable_device().
2192 */
pci_disable_device(struct pci_dev * dev)2193 void pci_disable_device(struct pci_dev *dev)
2194 {
2195 struct pci_devres *dr;
2196
2197 dr = find_pci_dr(dev);
2198 if (dr)
2199 dr->enabled = 0;
2200
2201 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2202 "disabling already-disabled device");
2203
2204 if (atomic_dec_return(&dev->enable_cnt) != 0)
2205 return;
2206
2207 do_pci_disable_device(dev);
2208
2209 dev->is_busmaster = 0;
2210 }
2211 EXPORT_SYMBOL(pci_disable_device);
2212
2213 /**
2214 * pcibios_set_pcie_reset_state - set reset state for device dev
2215 * @dev: the PCIe device reset
2216 * @state: Reset state to enter into
2217 *
2218 * Set the PCIe reset state for the device. This is the default
2219 * implementation. Architecture implementations can override this.
2220 */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2221 int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2222 enum pcie_reset_state state)
2223 {
2224 return -EINVAL;
2225 }
2226
2227 /**
2228 * pci_set_pcie_reset_state - set reset state for device dev
2229 * @dev: the PCIe device reset
2230 * @state: Reset state to enter into
2231 *
2232 * Sets the PCI reset state for the device.
2233 */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2234 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2235 {
2236 return pcibios_set_pcie_reset_state(dev, state);
2237 }
2238 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2239
2240 #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2241 void pcie_clear_device_status(struct pci_dev *dev)
2242 {
2243 u16 sta;
2244
2245 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2246 pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2247 }
2248 #endif
2249
2250 /**
2251 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2252 * @dev: PCIe root port or event collector.
2253 */
pcie_clear_root_pme_status(struct pci_dev * dev)2254 void pcie_clear_root_pme_status(struct pci_dev *dev)
2255 {
2256 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2257 }
2258
2259 /**
2260 * pci_check_pme_status - Check if given device has generated PME.
2261 * @dev: Device to check.
2262 *
2263 * Check the PME status of the device and if set, clear it and clear PME enable
2264 * (if set). Return 'true' if PME status and PME enable were both set or
2265 * 'false' otherwise.
2266 */
pci_check_pme_status(struct pci_dev * dev)2267 bool pci_check_pme_status(struct pci_dev *dev)
2268 {
2269 int pmcsr_pos;
2270 u16 pmcsr;
2271 bool ret = false;
2272
2273 if (!dev->pm_cap)
2274 return false;
2275
2276 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2277 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2278 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2279 return false;
2280
2281 /* Clear PME status. */
2282 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2283 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2284 /* Disable PME to avoid interrupt flood. */
2285 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2286 ret = true;
2287 }
2288
2289 pci_write_config_word(dev, pmcsr_pos, pmcsr);
2290
2291 return ret;
2292 }
2293
2294 /**
2295 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2296 * @dev: Device to handle.
2297 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2298 *
2299 * Check if @dev has generated PME and queue a resume request for it in that
2300 * case.
2301 */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2302 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2303 {
2304 if (pme_poll_reset && dev->pme_poll)
2305 dev->pme_poll = false;
2306
2307 if (pci_check_pme_status(dev)) {
2308 pci_wakeup_event(dev);
2309 pm_request_resume(&dev->dev);
2310 }
2311 return 0;
2312 }
2313
2314 /**
2315 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2316 * @bus: Top bus of the subtree to walk.
2317 */
pci_pme_wakeup_bus(struct pci_bus * bus)2318 void pci_pme_wakeup_bus(struct pci_bus *bus)
2319 {
2320 if (bus)
2321 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2322 }
2323
2324
2325 /**
2326 * pci_pme_capable - check the capability of PCI device to generate PME#
2327 * @dev: PCI device to handle.
2328 * @state: PCI state from which device will issue PME#.
2329 */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2330 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2331 {
2332 if (!dev->pm_cap)
2333 return false;
2334
2335 return !!(dev->pme_support & (1 << state));
2336 }
2337 EXPORT_SYMBOL(pci_pme_capable);
2338
pci_pme_list_scan(struct work_struct * work)2339 static void pci_pme_list_scan(struct work_struct *work)
2340 {
2341 struct pci_pme_device *pme_dev, *n;
2342
2343 mutex_lock(&pci_pme_list_mutex);
2344 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2345 if (pme_dev->dev->pme_poll) {
2346 struct pci_dev *bridge;
2347
2348 bridge = pme_dev->dev->bus->self;
2349 /*
2350 * If bridge is in low power state, the
2351 * configuration space of subordinate devices
2352 * may be not accessible
2353 */
2354 if (bridge && bridge->current_state != PCI_D0)
2355 continue;
2356 /*
2357 * If the device is in D3cold it should not be
2358 * polled either.
2359 */
2360 if (pme_dev->dev->current_state == PCI_D3cold)
2361 continue;
2362
2363 pci_pme_wakeup(pme_dev->dev, NULL);
2364 } else {
2365 list_del(&pme_dev->list);
2366 kfree(pme_dev);
2367 }
2368 }
2369 if (!list_empty(&pci_pme_list))
2370 queue_delayed_work(system_freezable_wq, &pci_pme_work,
2371 msecs_to_jiffies(PME_TIMEOUT));
2372 mutex_unlock(&pci_pme_list_mutex);
2373 }
2374
__pci_pme_active(struct pci_dev * dev,bool enable)2375 static void __pci_pme_active(struct pci_dev *dev, bool enable)
2376 {
2377 u16 pmcsr;
2378
2379 if (!dev->pme_support)
2380 return;
2381
2382 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2383 /* Clear PME_Status by writing 1 to it and enable PME# */
2384 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2385 if (!enable)
2386 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2387
2388 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2389 }
2390
2391 /**
2392 * pci_pme_restore - Restore PME configuration after config space restore.
2393 * @dev: PCI device to update.
2394 */
pci_pme_restore(struct pci_dev * dev)2395 void pci_pme_restore(struct pci_dev *dev)
2396 {
2397 u16 pmcsr;
2398
2399 if (!dev->pme_support)
2400 return;
2401
2402 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2403 if (dev->wakeup_prepared) {
2404 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2405 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2406 } else {
2407 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2408 pmcsr |= PCI_PM_CTRL_PME_STATUS;
2409 }
2410 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2411 }
2412
2413 /**
2414 * pci_pme_active - enable or disable PCI device's PME# function
2415 * @dev: PCI device to handle.
2416 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2417 *
2418 * The caller must verify that the device is capable of generating PME# before
2419 * calling this function with @enable equal to 'true'.
2420 */
pci_pme_active(struct pci_dev * dev,bool enable)2421 void pci_pme_active(struct pci_dev *dev, bool enable)
2422 {
2423 __pci_pme_active(dev, enable);
2424
2425 /*
2426 * PCI (as opposed to PCIe) PME requires that the device have
2427 * its PME# line hooked up correctly. Not all hardware vendors
2428 * do this, so the PME never gets delivered and the device
2429 * remains asleep. The easiest way around this is to
2430 * periodically walk the list of suspended devices and check
2431 * whether any have their PME flag set. The assumption is that
2432 * we'll wake up often enough anyway that this won't be a huge
2433 * hit, and the power savings from the devices will still be a
2434 * win.
2435 *
2436 * Although PCIe uses in-band PME message instead of PME# line
2437 * to report PME, PME does not work for some PCIe devices in
2438 * reality. For example, there are devices that set their PME
2439 * status bits, but don't really bother to send a PME message;
2440 * there are PCI Express Root Ports that don't bother to
2441 * trigger interrupts when they receive PME messages from the
2442 * devices below. So PME poll is used for PCIe devices too.
2443 */
2444
2445 if (dev->pme_poll) {
2446 struct pci_pme_device *pme_dev;
2447 if (enable) {
2448 pme_dev = kmalloc(sizeof(struct pci_pme_device),
2449 GFP_KERNEL);
2450 if (!pme_dev) {
2451 pci_warn(dev, "can't enable PME#\n");
2452 return;
2453 }
2454 pme_dev->dev = dev;
2455 mutex_lock(&pci_pme_list_mutex);
2456 list_add(&pme_dev->list, &pci_pme_list);
2457 if (list_is_singular(&pci_pme_list))
2458 queue_delayed_work(system_freezable_wq,
2459 &pci_pme_work,
2460 msecs_to_jiffies(PME_TIMEOUT));
2461 mutex_unlock(&pci_pme_list_mutex);
2462 } else {
2463 mutex_lock(&pci_pme_list_mutex);
2464 list_for_each_entry(pme_dev, &pci_pme_list, list) {
2465 if (pme_dev->dev == dev) {
2466 list_del(&pme_dev->list);
2467 kfree(pme_dev);
2468 break;
2469 }
2470 }
2471 mutex_unlock(&pci_pme_list_mutex);
2472 }
2473 }
2474
2475 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2476 }
2477 EXPORT_SYMBOL(pci_pme_active);
2478
2479 /**
2480 * __pci_enable_wake - enable PCI device as wakeup event source
2481 * @dev: PCI device affected
2482 * @state: PCI state from which device will issue wakeup events
2483 * @enable: True to enable event generation; false to disable
2484 *
2485 * This enables the device as a wakeup event source, or disables it.
2486 * When such events involves platform-specific hooks, those hooks are
2487 * called automatically by this routine.
2488 *
2489 * Devices with legacy power management (no standard PCI PM capabilities)
2490 * always require such platform hooks.
2491 *
2492 * RETURN VALUE:
2493 * 0 is returned on success
2494 * -EINVAL is returned if device is not supposed to wake up the system
2495 * Error code depending on the platform is returned if both the platform and
2496 * the native mechanism fail to enable the generation of wake-up events
2497 */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2498 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2499 {
2500 int ret = 0;
2501
2502 /*
2503 * Bridges that are not power-manageable directly only signal
2504 * wakeup on behalf of subordinate devices which is set up
2505 * elsewhere, so skip them. However, bridges that are
2506 * power-manageable may signal wakeup for themselves (for example,
2507 * on a hotplug event) and they need to be covered here.
2508 */
2509 if (!pci_power_manageable(dev))
2510 return 0;
2511
2512 /* Don't do the same thing twice in a row for one device. */
2513 if (!!enable == !!dev->wakeup_prepared)
2514 return 0;
2515
2516 /*
2517 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2518 * Anderson we should be doing PME# wake enable followed by ACPI wake
2519 * enable. To disable wake-up we call the platform first, for symmetry.
2520 */
2521
2522 if (enable) {
2523 int error;
2524
2525 /*
2526 * Enable PME signaling if the device can signal PME from
2527 * D3cold regardless of whether or not it can signal PME from
2528 * the current target state, because that will allow it to
2529 * signal PME when the hierarchy above it goes into D3cold and
2530 * the device itself ends up in D3cold as a result of that.
2531 */
2532 if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2533 pci_pme_active(dev, true);
2534 else
2535 ret = 1;
2536 error = platform_pci_set_wakeup(dev, true);
2537 if (ret)
2538 ret = error;
2539 if (!ret)
2540 dev->wakeup_prepared = true;
2541 } else {
2542 platform_pci_set_wakeup(dev, false);
2543 pci_pme_active(dev, false);
2544 dev->wakeup_prepared = false;
2545 }
2546
2547 return ret;
2548 }
2549
2550 /**
2551 * pci_enable_wake - change wakeup settings for a PCI device
2552 * @pci_dev: Target device
2553 * @state: PCI state from which device will issue wakeup events
2554 * @enable: Whether or not to enable event generation
2555 *
2556 * If @enable is set, check device_may_wakeup() for the device before calling
2557 * __pci_enable_wake() for it.
2558 */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2559 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2560 {
2561 if (enable && !device_may_wakeup(&pci_dev->dev))
2562 return -EINVAL;
2563
2564 return __pci_enable_wake(pci_dev, state, enable);
2565 }
2566 EXPORT_SYMBOL(pci_enable_wake);
2567
2568 /**
2569 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2570 * @dev: PCI device to prepare
2571 * @enable: True to enable wake-up event generation; false to disable
2572 *
2573 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2574 * and this function allows them to set that up cleanly - pci_enable_wake()
2575 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2576 * ordering constraints.
2577 *
2578 * This function only returns error code if the device is not allowed to wake
2579 * up the system from sleep or it is not capable of generating PME# from both
2580 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2581 */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2582 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2583 {
2584 return pci_pme_capable(dev, PCI_D3cold) ?
2585 pci_enable_wake(dev, PCI_D3cold, enable) :
2586 pci_enable_wake(dev, PCI_D3hot, enable);
2587 }
2588 EXPORT_SYMBOL(pci_wake_from_d3);
2589
2590 /**
2591 * pci_target_state - find an appropriate low power state for a given PCI dev
2592 * @dev: PCI device
2593 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2594 *
2595 * Use underlying platform code to find a supported low power state for @dev.
2596 * If the platform can't manage @dev, return the deepest state from which it
2597 * can generate wake events, based on any available PME info.
2598 */
pci_target_state(struct pci_dev * dev,bool wakeup)2599 static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2600 {
2601 if (platform_pci_power_manageable(dev)) {
2602 /*
2603 * Call the platform to find the target state for the device.
2604 */
2605 pci_power_t state = platform_pci_choose_state(dev);
2606
2607 switch (state) {
2608 case PCI_POWER_ERROR:
2609 case PCI_UNKNOWN:
2610 return PCI_D3hot;
2611
2612 case PCI_D1:
2613 case PCI_D2:
2614 if (pci_no_d1d2(dev))
2615 return PCI_D3hot;
2616 }
2617
2618 return state;
2619 }
2620
2621 /*
2622 * If the device is in D3cold even though it's not power-manageable by
2623 * the platform, it may have been powered down by non-standard means.
2624 * Best to let it slumber.
2625 */
2626 if (dev->current_state == PCI_D3cold)
2627 return PCI_D3cold;
2628 else if (!dev->pm_cap)
2629 return PCI_D0;
2630
2631 if (wakeup && dev->pme_support) {
2632 pci_power_t state = PCI_D3hot;
2633
2634 /*
2635 * Find the deepest state from which the device can generate
2636 * PME#.
2637 */
2638 while (state && !(dev->pme_support & (1 << state)))
2639 state--;
2640
2641 if (state)
2642 return state;
2643 else if (dev->pme_support & 1)
2644 return PCI_D0;
2645 }
2646
2647 return PCI_D3hot;
2648 }
2649
2650 /**
2651 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2652 * into a sleep state
2653 * @dev: Device to handle.
2654 *
2655 * Choose the power state appropriate for the device depending on whether
2656 * it can wake up the system and/or is power manageable by the platform
2657 * (PCI_D3hot is the default) and put the device into that state.
2658 */
pci_prepare_to_sleep(struct pci_dev * dev)2659 int pci_prepare_to_sleep(struct pci_dev *dev)
2660 {
2661 bool wakeup = device_may_wakeup(&dev->dev);
2662 pci_power_t target_state = pci_target_state(dev, wakeup);
2663 int error;
2664
2665 if (target_state == PCI_POWER_ERROR)
2666 return -EIO;
2667
2668 /*
2669 * There are systems (for example, Intel mobile chips since Coffee
2670 * Lake) where the power drawn while suspended can be significantly
2671 * reduced by disabling PTM on PCIe root ports as this allows the
2672 * port to enter a lower-power PM state and the SoC to reach a
2673 * lower-power idle state as a whole.
2674 */
2675 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2676 pci_disable_ptm(dev);
2677
2678 pci_enable_wake(dev, target_state, wakeup);
2679
2680 error = pci_set_power_state(dev, target_state);
2681
2682 if (error) {
2683 pci_enable_wake(dev, target_state, false);
2684 pci_restore_ptm_state(dev);
2685 }
2686
2687 return error;
2688 }
2689 EXPORT_SYMBOL(pci_prepare_to_sleep);
2690
2691 /**
2692 * pci_back_from_sleep - turn PCI device on during system-wide transition
2693 * into working state
2694 * @dev: Device to handle.
2695 *
2696 * Disable device's system wake-up capability and put it into D0.
2697 */
pci_back_from_sleep(struct pci_dev * dev)2698 int pci_back_from_sleep(struct pci_dev *dev)
2699 {
2700 int ret = pci_set_power_state(dev, PCI_D0);
2701
2702 if (ret)
2703 return ret;
2704
2705 pci_enable_wake(dev, PCI_D0, false);
2706 return 0;
2707 }
2708 EXPORT_SYMBOL(pci_back_from_sleep);
2709
2710 /**
2711 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2712 * @dev: PCI device being suspended.
2713 *
2714 * Prepare @dev to generate wake-up events at run time and put it into a low
2715 * power state.
2716 */
pci_finish_runtime_suspend(struct pci_dev * dev)2717 int pci_finish_runtime_suspend(struct pci_dev *dev)
2718 {
2719 pci_power_t target_state;
2720 int error;
2721
2722 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2723 if (target_state == PCI_POWER_ERROR)
2724 return -EIO;
2725
2726 dev->runtime_d3cold = target_state == PCI_D3cold;
2727
2728 /*
2729 * There are systems (for example, Intel mobile chips since Coffee
2730 * Lake) where the power drawn while suspended can be significantly
2731 * reduced by disabling PTM on PCIe root ports as this allows the
2732 * port to enter a lower-power PM state and the SoC to reach a
2733 * lower-power idle state as a whole.
2734 */
2735 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2736 pci_disable_ptm(dev);
2737
2738 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2739
2740 error = pci_set_power_state(dev, target_state);
2741
2742 if (error) {
2743 pci_enable_wake(dev, target_state, false);
2744 pci_restore_ptm_state(dev);
2745 dev->runtime_d3cold = false;
2746 }
2747
2748 return error;
2749 }
2750
2751 /**
2752 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2753 * @dev: Device to check.
2754 *
2755 * Return true if the device itself is capable of generating wake-up events
2756 * (through the platform or using the native PCIe PME) or if the device supports
2757 * PME and one of its upstream bridges can generate wake-up events.
2758 */
pci_dev_run_wake(struct pci_dev * dev)2759 bool pci_dev_run_wake(struct pci_dev *dev)
2760 {
2761 struct pci_bus *bus = dev->bus;
2762
2763 if (!dev->pme_support)
2764 return false;
2765
2766 /* PME-capable in principle, but not from the target power state */
2767 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2768 return false;
2769
2770 if (device_can_wakeup(&dev->dev))
2771 return true;
2772
2773 while (bus->parent) {
2774 struct pci_dev *bridge = bus->self;
2775
2776 if (device_can_wakeup(&bridge->dev))
2777 return true;
2778
2779 bus = bus->parent;
2780 }
2781
2782 /* We have reached the root bus. */
2783 if (bus->bridge)
2784 return device_can_wakeup(bus->bridge);
2785
2786 return false;
2787 }
2788 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2789
2790 /**
2791 * pci_dev_need_resume - Check if it is necessary to resume the device.
2792 * @pci_dev: Device to check.
2793 *
2794 * Return 'true' if the device is not runtime-suspended or it has to be
2795 * reconfigured due to wakeup settings difference between system and runtime
2796 * suspend, or the current power state of it is not suitable for the upcoming
2797 * (system-wide) transition.
2798 */
pci_dev_need_resume(struct pci_dev * pci_dev)2799 bool pci_dev_need_resume(struct pci_dev *pci_dev)
2800 {
2801 struct device *dev = &pci_dev->dev;
2802 pci_power_t target_state;
2803
2804 if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2805 return true;
2806
2807 target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2808
2809 /*
2810 * If the earlier platform check has not triggered, D3cold is just power
2811 * removal on top of D3hot, so no need to resume the device in that
2812 * case.
2813 */
2814 return target_state != pci_dev->current_state &&
2815 target_state != PCI_D3cold &&
2816 pci_dev->current_state != PCI_D3hot;
2817 }
2818
2819 /**
2820 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2821 * @pci_dev: Device to check.
2822 *
2823 * If the device is suspended and it is not configured for system wakeup,
2824 * disable PME for it to prevent it from waking up the system unnecessarily.
2825 *
2826 * Note that if the device's power state is D3cold and the platform check in
2827 * pci_dev_need_resume() has not triggered, the device's configuration need not
2828 * be changed.
2829 */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2830 void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2831 {
2832 struct device *dev = &pci_dev->dev;
2833
2834 spin_lock_irq(&dev->power.lock);
2835
2836 if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2837 pci_dev->current_state < PCI_D3cold)
2838 __pci_pme_active(pci_dev, false);
2839
2840 spin_unlock_irq(&dev->power.lock);
2841 }
2842
2843 /**
2844 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2845 * @pci_dev: Device to handle.
2846 *
2847 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2848 * it might have been disabled during the prepare phase of system suspend if
2849 * the device was not configured for system wakeup.
2850 */
pci_dev_complete_resume(struct pci_dev * pci_dev)2851 void pci_dev_complete_resume(struct pci_dev *pci_dev)
2852 {
2853 struct device *dev = &pci_dev->dev;
2854
2855 if (!pci_dev_run_wake(pci_dev))
2856 return;
2857
2858 spin_lock_irq(&dev->power.lock);
2859
2860 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2861 __pci_pme_active(pci_dev, true);
2862
2863 spin_unlock_irq(&dev->power.lock);
2864 }
2865
2866 /**
2867 * pci_choose_state - Choose the power state of a PCI device.
2868 * @dev: Target PCI device.
2869 * @state: Target state for the whole system.
2870 *
2871 * Returns PCI power state suitable for @dev and @state.
2872 */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2873 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2874 {
2875 if (state.event == PM_EVENT_ON)
2876 return PCI_D0;
2877
2878 return pci_target_state(dev, false);
2879 }
2880 EXPORT_SYMBOL(pci_choose_state);
2881
pci_config_pm_runtime_get(struct pci_dev * pdev)2882 void pci_config_pm_runtime_get(struct pci_dev *pdev)
2883 {
2884 struct device *dev = &pdev->dev;
2885 struct device *parent = dev->parent;
2886
2887 if (parent)
2888 pm_runtime_get_sync(parent);
2889 pm_runtime_get_noresume(dev);
2890 /*
2891 * pdev->current_state is set to PCI_D3cold during suspending,
2892 * so wait until suspending completes
2893 */
2894 pm_runtime_barrier(dev);
2895 /*
2896 * Only need to resume devices in D3cold, because config
2897 * registers are still accessible for devices suspended but
2898 * not in D3cold.
2899 */
2900 if (pdev->current_state == PCI_D3cold)
2901 pm_runtime_resume(dev);
2902 }
2903
pci_config_pm_runtime_put(struct pci_dev * pdev)2904 void pci_config_pm_runtime_put(struct pci_dev *pdev)
2905 {
2906 struct device *dev = &pdev->dev;
2907 struct device *parent = dev->parent;
2908
2909 pm_runtime_put(dev);
2910 if (parent)
2911 pm_runtime_put_sync(parent);
2912 }
2913
2914 static const struct dmi_system_id bridge_d3_blacklist[] = {
2915 #ifdef CONFIG_X86
2916 {
2917 /*
2918 * Gigabyte X299 root port is not marked as hotplug capable
2919 * which allows Linux to power manage it. However, this
2920 * confuses the BIOS SMI handler so don't power manage root
2921 * ports on that system.
2922 */
2923 .ident = "X299 DESIGNARE EX-CF",
2924 .matches = {
2925 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2926 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2927 },
2928 },
2929 #endif
2930 { }
2931 };
2932
2933 /**
2934 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2935 * @bridge: Bridge to check
2936 *
2937 * This function checks if it is possible to move the bridge to D3.
2938 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2939 */
pci_bridge_d3_possible(struct pci_dev * bridge)2940 bool pci_bridge_d3_possible(struct pci_dev *bridge)
2941 {
2942 if (!pci_is_pcie(bridge))
2943 return false;
2944
2945 switch (pci_pcie_type(bridge)) {
2946 case PCI_EXP_TYPE_ROOT_PORT:
2947 case PCI_EXP_TYPE_UPSTREAM:
2948 case PCI_EXP_TYPE_DOWNSTREAM:
2949 if (pci_bridge_d3_disable)
2950 return false;
2951
2952 /*
2953 * Hotplug ports handled by firmware in System Management Mode
2954 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2955 */
2956 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2957 return false;
2958
2959 if (pci_bridge_d3_force)
2960 return true;
2961
2962 /* Even the oldest 2010 Thunderbolt controller supports D3. */
2963 if (bridge->is_thunderbolt)
2964 return true;
2965
2966 /* Platform might know better if the bridge supports D3 */
2967 if (platform_pci_bridge_d3(bridge))
2968 return true;
2969
2970 /*
2971 * Hotplug ports handled natively by the OS were not validated
2972 * by vendors for runtime D3 at least until 2018 because there
2973 * was no OS support.
2974 */
2975 if (bridge->is_hotplug_bridge)
2976 return false;
2977
2978 if (dmi_check_system(bridge_d3_blacklist))
2979 return false;
2980
2981 /*
2982 * It should be safe to put PCIe ports from 2015 or newer
2983 * to D3.
2984 */
2985 if (dmi_get_bios_year() >= 2015)
2986 return true;
2987 break;
2988 }
2989
2990 return false;
2991 }
2992
pci_dev_check_d3cold(struct pci_dev * dev,void * data)2993 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2994 {
2995 bool *d3cold_ok = data;
2996
2997 if (/* The device needs to be allowed to go D3cold ... */
2998 dev->no_d3cold || !dev->d3cold_allowed ||
2999
3000 /* ... and if it is wakeup capable to do so from D3cold. */
3001 (device_may_wakeup(&dev->dev) &&
3002 !pci_pme_capable(dev, PCI_D3cold)) ||
3003
3004 /* If it is a bridge it must be allowed to go to D3. */
3005 !pci_power_manageable(dev))
3006
3007 *d3cold_ok = false;
3008
3009 return !*d3cold_ok;
3010 }
3011
3012 /*
3013 * pci_bridge_d3_update - Update bridge D3 capabilities
3014 * @dev: PCI device which is changed
3015 *
3016 * Update upstream bridge PM capabilities accordingly depending on if the
3017 * device PM configuration was changed or the device is being removed. The
3018 * change is also propagated upstream.
3019 */
pci_bridge_d3_update(struct pci_dev * dev)3020 void pci_bridge_d3_update(struct pci_dev *dev)
3021 {
3022 bool remove = !device_is_registered(&dev->dev);
3023 struct pci_dev *bridge;
3024 bool d3cold_ok = true;
3025
3026 bridge = pci_upstream_bridge(dev);
3027 if (!bridge || !pci_bridge_d3_possible(bridge))
3028 return;
3029
3030 /*
3031 * If D3 is currently allowed for the bridge, removing one of its
3032 * children won't change that.
3033 */
3034 if (remove && bridge->bridge_d3)
3035 return;
3036
3037 /*
3038 * If D3 is currently allowed for the bridge and a child is added or
3039 * changed, disallowance of D3 can only be caused by that child, so
3040 * we only need to check that single device, not any of its siblings.
3041 *
3042 * If D3 is currently not allowed for the bridge, checking the device
3043 * first may allow us to skip checking its siblings.
3044 */
3045 if (!remove)
3046 pci_dev_check_d3cold(dev, &d3cold_ok);
3047
3048 /*
3049 * If D3 is currently not allowed for the bridge, this may be caused
3050 * either by the device being changed/removed or any of its siblings,
3051 * so we need to go through all children to find out if one of them
3052 * continues to block D3.
3053 */
3054 if (d3cold_ok && !bridge->bridge_d3)
3055 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3056 &d3cold_ok);
3057
3058 if (bridge->bridge_d3 != d3cold_ok) {
3059 bridge->bridge_d3 = d3cold_ok;
3060 /* Propagate change to upstream bridges */
3061 pci_bridge_d3_update(bridge);
3062 }
3063 }
3064
3065 /**
3066 * pci_d3cold_enable - Enable D3cold for device
3067 * @dev: PCI device to handle
3068 *
3069 * This function can be used in drivers to enable D3cold from the device
3070 * they handle. It also updates upstream PCI bridge PM capabilities
3071 * accordingly.
3072 */
pci_d3cold_enable(struct pci_dev * dev)3073 void pci_d3cold_enable(struct pci_dev *dev)
3074 {
3075 if (dev->no_d3cold) {
3076 dev->no_d3cold = false;
3077 pci_bridge_d3_update(dev);
3078 }
3079 }
3080 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3081
3082 /**
3083 * pci_d3cold_disable - Disable D3cold for device
3084 * @dev: PCI device to handle
3085 *
3086 * This function can be used in drivers to disable D3cold from the device
3087 * they handle. It also updates upstream PCI bridge PM capabilities
3088 * accordingly.
3089 */
pci_d3cold_disable(struct pci_dev * dev)3090 void pci_d3cold_disable(struct pci_dev *dev)
3091 {
3092 if (!dev->no_d3cold) {
3093 dev->no_d3cold = true;
3094 pci_bridge_d3_update(dev);
3095 }
3096 }
3097 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3098
3099 /**
3100 * pci_pm_init - Initialize PM functions of given PCI device
3101 * @dev: PCI device to handle.
3102 */
pci_pm_init(struct pci_dev * dev)3103 void pci_pm_init(struct pci_dev *dev)
3104 {
3105 int pm;
3106 u16 status;
3107 u16 pmc;
3108
3109 pm_runtime_forbid(&dev->dev);
3110 pm_runtime_set_active(&dev->dev);
3111 pm_runtime_enable(&dev->dev);
3112 device_enable_async_suspend(&dev->dev);
3113 dev->wakeup_prepared = false;
3114
3115 dev->pm_cap = 0;
3116 dev->pme_support = 0;
3117
3118 /* find PCI PM capability in list */
3119 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3120 if (!pm)
3121 return;
3122 /* Check device's ability to generate PME# */
3123 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3124
3125 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3126 pci_err(dev, "unsupported PM cap regs version (%u)\n",
3127 pmc & PCI_PM_CAP_VER_MASK);
3128 return;
3129 }
3130
3131 dev->pm_cap = pm;
3132 dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3133 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3134 dev->bridge_d3 = pci_bridge_d3_possible(dev);
3135 dev->d3cold_allowed = true;
3136
3137 dev->d1_support = false;
3138 dev->d2_support = false;
3139 if (!pci_no_d1d2(dev)) {
3140 if (pmc & PCI_PM_CAP_D1)
3141 dev->d1_support = true;
3142 if (pmc & PCI_PM_CAP_D2)
3143 dev->d2_support = true;
3144
3145 if (dev->d1_support || dev->d2_support)
3146 pci_info(dev, "supports%s%s\n",
3147 dev->d1_support ? " D1" : "",
3148 dev->d2_support ? " D2" : "");
3149 }
3150
3151 pmc &= PCI_PM_CAP_PME_MASK;
3152 if (pmc) {
3153 pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3154 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3155 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3156 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3157 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3158 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3159 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3160 dev->pme_poll = true;
3161 /*
3162 * Make device's PM flags reflect the wake-up capability, but
3163 * let the user space enable it to wake up the system as needed.
3164 */
3165 device_set_wakeup_capable(&dev->dev, true);
3166 /* Disable the PME# generation functionality */
3167 pci_pme_active(dev, false);
3168 }
3169
3170 pci_read_config_word(dev, PCI_STATUS, &status);
3171 if (status & PCI_STATUS_IMM_READY)
3172 dev->imm_ready = 1;
3173 }
3174
pci_ea_flags(struct pci_dev * dev,u8 prop)3175 static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3176 {
3177 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3178
3179 switch (prop) {
3180 case PCI_EA_P_MEM:
3181 case PCI_EA_P_VF_MEM:
3182 flags |= IORESOURCE_MEM;
3183 break;
3184 case PCI_EA_P_MEM_PREFETCH:
3185 case PCI_EA_P_VF_MEM_PREFETCH:
3186 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3187 break;
3188 case PCI_EA_P_IO:
3189 flags |= IORESOURCE_IO;
3190 break;
3191 default:
3192 return 0;
3193 }
3194
3195 return flags;
3196 }
3197
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3198 static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3199 u8 prop)
3200 {
3201 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3202 return &dev->resource[bei];
3203 #ifdef CONFIG_PCI_IOV
3204 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3205 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3206 return &dev->resource[PCI_IOV_RESOURCES +
3207 bei - PCI_EA_BEI_VF_BAR0];
3208 #endif
3209 else if (bei == PCI_EA_BEI_ROM)
3210 return &dev->resource[PCI_ROM_RESOURCE];
3211 else
3212 return NULL;
3213 }
3214
3215 /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3216 static int pci_ea_read(struct pci_dev *dev, int offset)
3217 {
3218 struct resource *res;
3219 int ent_size, ent_offset = offset;
3220 resource_size_t start, end;
3221 unsigned long flags;
3222 u32 dw0, bei, base, max_offset;
3223 u8 prop;
3224 bool support_64 = (sizeof(resource_size_t) >= 8);
3225
3226 pci_read_config_dword(dev, ent_offset, &dw0);
3227 ent_offset += 4;
3228
3229 /* Entry size field indicates DWORDs after 1st */
3230 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3231
3232 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3233 goto out;
3234
3235 bei = (dw0 & PCI_EA_BEI) >> 4;
3236 prop = (dw0 & PCI_EA_PP) >> 8;
3237
3238 /*
3239 * If the Property is in the reserved range, try the Secondary
3240 * Property instead.
3241 */
3242 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3243 prop = (dw0 & PCI_EA_SP) >> 16;
3244 if (prop > PCI_EA_P_BRIDGE_IO)
3245 goto out;
3246
3247 res = pci_ea_get_resource(dev, bei, prop);
3248 if (!res) {
3249 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3250 goto out;
3251 }
3252
3253 flags = pci_ea_flags(dev, prop);
3254 if (!flags) {
3255 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3256 goto out;
3257 }
3258
3259 /* Read Base */
3260 pci_read_config_dword(dev, ent_offset, &base);
3261 start = (base & PCI_EA_FIELD_MASK);
3262 ent_offset += 4;
3263
3264 /* Read MaxOffset */
3265 pci_read_config_dword(dev, ent_offset, &max_offset);
3266 ent_offset += 4;
3267
3268 /* Read Base MSBs (if 64-bit entry) */
3269 if (base & PCI_EA_IS_64) {
3270 u32 base_upper;
3271
3272 pci_read_config_dword(dev, ent_offset, &base_upper);
3273 ent_offset += 4;
3274
3275 flags |= IORESOURCE_MEM_64;
3276
3277 /* entry starts above 32-bit boundary, can't use */
3278 if (!support_64 && base_upper)
3279 goto out;
3280
3281 if (support_64)
3282 start |= ((u64)base_upper << 32);
3283 }
3284
3285 end = start + (max_offset | 0x03);
3286
3287 /* Read MaxOffset MSBs (if 64-bit entry) */
3288 if (max_offset & PCI_EA_IS_64) {
3289 u32 max_offset_upper;
3290
3291 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3292 ent_offset += 4;
3293
3294 flags |= IORESOURCE_MEM_64;
3295
3296 /* entry too big, can't use */
3297 if (!support_64 && max_offset_upper)
3298 goto out;
3299
3300 if (support_64)
3301 end += ((u64)max_offset_upper << 32);
3302 }
3303
3304 if (end < start) {
3305 pci_err(dev, "EA Entry crosses address boundary\n");
3306 goto out;
3307 }
3308
3309 if (ent_size != ent_offset - offset) {
3310 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3311 ent_size, ent_offset - offset);
3312 goto out;
3313 }
3314
3315 res->name = pci_name(dev);
3316 res->start = start;
3317 res->end = end;
3318 res->flags = flags;
3319
3320 if (bei <= PCI_EA_BEI_BAR5)
3321 pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3322 bei, res, prop);
3323 else if (bei == PCI_EA_BEI_ROM)
3324 pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3325 res, prop);
3326 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3327 pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3328 bei - PCI_EA_BEI_VF_BAR0, res, prop);
3329 else
3330 pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3331 bei, res, prop);
3332
3333 out:
3334 return offset + ent_size;
3335 }
3336
3337 /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3338 void pci_ea_init(struct pci_dev *dev)
3339 {
3340 int ea;
3341 u8 num_ent;
3342 int offset;
3343 int i;
3344
3345 /* find PCI EA capability in list */
3346 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3347 if (!ea)
3348 return;
3349
3350 /* determine the number of entries */
3351 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3352 &num_ent);
3353 num_ent &= PCI_EA_NUM_ENT_MASK;
3354
3355 offset = ea + PCI_EA_FIRST_ENT;
3356
3357 /* Skip DWORD 2 for type 1 functions */
3358 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3359 offset += 4;
3360
3361 /* parse each EA entry */
3362 for (i = 0; i < num_ent; ++i)
3363 offset = pci_ea_read(dev, offset);
3364 }
3365
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3366 static void pci_add_saved_cap(struct pci_dev *pci_dev,
3367 struct pci_cap_saved_state *new_cap)
3368 {
3369 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3370 }
3371
3372 /**
3373 * _pci_add_cap_save_buffer - allocate buffer for saving given
3374 * capability registers
3375 * @dev: the PCI device
3376 * @cap: the capability to allocate the buffer for
3377 * @extended: Standard or Extended capability ID
3378 * @size: requested size of the buffer
3379 */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3380 static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3381 bool extended, unsigned int size)
3382 {
3383 int pos;
3384 struct pci_cap_saved_state *save_state;
3385
3386 if (extended)
3387 pos = pci_find_ext_capability(dev, cap);
3388 else
3389 pos = pci_find_capability(dev, cap);
3390
3391 if (!pos)
3392 return 0;
3393
3394 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3395 if (!save_state)
3396 return -ENOMEM;
3397
3398 save_state->cap.cap_nr = cap;
3399 save_state->cap.cap_extended = extended;
3400 save_state->cap.size = size;
3401 pci_add_saved_cap(dev, save_state);
3402
3403 return 0;
3404 }
3405
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3406 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3407 {
3408 return _pci_add_cap_save_buffer(dev, cap, false, size);
3409 }
3410
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3411 int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3412 {
3413 return _pci_add_cap_save_buffer(dev, cap, true, size);
3414 }
3415
3416 /**
3417 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3418 * @dev: the PCI device
3419 */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3420 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3421 {
3422 int error;
3423
3424 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3425 PCI_EXP_SAVE_REGS * sizeof(u16));
3426 if (error)
3427 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3428
3429 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3430 if (error)
3431 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3432
3433 error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3434 2 * sizeof(u16));
3435 if (error)
3436 pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3437
3438 pci_allocate_vc_save_buffers(dev);
3439 }
3440
pci_free_cap_save_buffers(struct pci_dev * dev)3441 void pci_free_cap_save_buffers(struct pci_dev *dev)
3442 {
3443 struct pci_cap_saved_state *tmp;
3444 struct hlist_node *n;
3445
3446 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3447 kfree(tmp);
3448 }
3449
3450 /**
3451 * pci_configure_ari - enable or disable ARI forwarding
3452 * @dev: the PCI device
3453 *
3454 * If @dev and its upstream bridge both support ARI, enable ARI in the
3455 * bridge. Otherwise, disable ARI in the bridge.
3456 */
pci_configure_ari(struct pci_dev * dev)3457 void pci_configure_ari(struct pci_dev *dev)
3458 {
3459 u32 cap;
3460 struct pci_dev *bridge;
3461
3462 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3463 return;
3464
3465 bridge = dev->bus->self;
3466 if (!bridge)
3467 return;
3468
3469 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3470 if (!(cap & PCI_EXP_DEVCAP2_ARI))
3471 return;
3472
3473 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3474 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3475 PCI_EXP_DEVCTL2_ARI);
3476 bridge->ari_enabled = 1;
3477 } else {
3478 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3479 PCI_EXP_DEVCTL2_ARI);
3480 bridge->ari_enabled = 0;
3481 }
3482 }
3483
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3484 static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3485 {
3486 int pos;
3487 u16 cap, ctrl;
3488
3489 pos = pdev->acs_cap;
3490 if (!pos)
3491 return false;
3492
3493 /*
3494 * Except for egress control, capabilities are either required
3495 * or only required if controllable. Features missing from the
3496 * capability field can therefore be assumed as hard-wired enabled.
3497 */
3498 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3499 acs_flags &= (cap | PCI_ACS_EC);
3500
3501 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3502 return (ctrl & acs_flags) == acs_flags;
3503 }
3504
3505 /**
3506 * pci_acs_enabled - test ACS against required flags for a given device
3507 * @pdev: device to test
3508 * @acs_flags: required PCI ACS flags
3509 *
3510 * Return true if the device supports the provided flags. Automatically
3511 * filters out flags that are not implemented on multifunction devices.
3512 *
3513 * Note that this interface checks the effective ACS capabilities of the
3514 * device rather than the actual capabilities. For instance, most single
3515 * function endpoints are not required to support ACS because they have no
3516 * opportunity for peer-to-peer access. We therefore return 'true'
3517 * regardless of whether the device exposes an ACS capability. This makes
3518 * it much easier for callers of this function to ignore the actual type
3519 * or topology of the device when testing ACS support.
3520 */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3521 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3522 {
3523 int ret;
3524
3525 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3526 if (ret >= 0)
3527 return ret > 0;
3528
3529 /*
3530 * Conventional PCI and PCI-X devices never support ACS, either
3531 * effectively or actually. The shared bus topology implies that
3532 * any device on the bus can receive or snoop DMA.
3533 */
3534 if (!pci_is_pcie(pdev))
3535 return false;
3536
3537 switch (pci_pcie_type(pdev)) {
3538 /*
3539 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3540 * but since their primary interface is PCI/X, we conservatively
3541 * handle them as we would a non-PCIe device.
3542 */
3543 case PCI_EXP_TYPE_PCIE_BRIDGE:
3544 /*
3545 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
3546 * applicable... must never implement an ACS Extended Capability...".
3547 * This seems arbitrary, but we take a conservative interpretation
3548 * of this statement.
3549 */
3550 case PCI_EXP_TYPE_PCI_BRIDGE:
3551 case PCI_EXP_TYPE_RC_EC:
3552 return false;
3553 /*
3554 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3555 * implement ACS in order to indicate their peer-to-peer capabilities,
3556 * regardless of whether they are single- or multi-function devices.
3557 */
3558 case PCI_EXP_TYPE_DOWNSTREAM:
3559 case PCI_EXP_TYPE_ROOT_PORT:
3560 return pci_acs_flags_enabled(pdev, acs_flags);
3561 /*
3562 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3563 * implemented by the remaining PCIe types to indicate peer-to-peer
3564 * capabilities, but only when they are part of a multifunction
3565 * device. The footnote for section 6.12 indicates the specific
3566 * PCIe types included here.
3567 */
3568 case PCI_EXP_TYPE_ENDPOINT:
3569 case PCI_EXP_TYPE_UPSTREAM:
3570 case PCI_EXP_TYPE_LEG_END:
3571 case PCI_EXP_TYPE_RC_END:
3572 if (!pdev->multifunction)
3573 break;
3574
3575 return pci_acs_flags_enabled(pdev, acs_flags);
3576 }
3577
3578 /*
3579 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3580 * to single function devices with the exception of downstream ports.
3581 */
3582 return true;
3583 }
3584
3585 /**
3586 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3587 * @start: starting downstream device
3588 * @end: ending upstream device or NULL to search to the root bus
3589 * @acs_flags: required flags
3590 *
3591 * Walk up a device tree from start to end testing PCI ACS support. If
3592 * any step along the way does not support the required flags, return false.
3593 */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3594 bool pci_acs_path_enabled(struct pci_dev *start,
3595 struct pci_dev *end, u16 acs_flags)
3596 {
3597 struct pci_dev *pdev, *parent = start;
3598
3599 do {
3600 pdev = parent;
3601
3602 if (!pci_acs_enabled(pdev, acs_flags))
3603 return false;
3604
3605 if (pci_is_root_bus(pdev->bus))
3606 return (end == NULL);
3607
3608 parent = pdev->bus->self;
3609 } while (pdev != end);
3610
3611 return true;
3612 }
3613
3614 /**
3615 * pci_acs_init - Initialize ACS if hardware supports it
3616 * @dev: the PCI device
3617 */
pci_acs_init(struct pci_dev * dev)3618 void pci_acs_init(struct pci_dev *dev)
3619 {
3620 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3621
3622 /*
3623 * Attempt to enable ACS regardless of capability because some Root
3624 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3625 * the standard ACS capability but still support ACS via those
3626 * quirks.
3627 */
3628 pci_enable_acs(dev);
3629 }
3630
3631 /**
3632 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3633 * @pdev: PCI device
3634 * @bar: BAR to find
3635 *
3636 * Helper to find the position of the ctrl register for a BAR.
3637 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3638 * Returns -ENOENT if no ctrl register for the BAR could be found.
3639 */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3640 static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3641 {
3642 unsigned int pos, nbars, i;
3643 u32 ctrl;
3644
3645 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3646 if (!pos)
3647 return -ENOTSUPP;
3648
3649 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3650 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3651 PCI_REBAR_CTRL_NBAR_SHIFT;
3652
3653 for (i = 0; i < nbars; i++, pos += 8) {
3654 int bar_idx;
3655
3656 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3657 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3658 if (bar_idx == bar)
3659 return pos;
3660 }
3661
3662 return -ENOENT;
3663 }
3664
3665 /**
3666 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3667 * @pdev: PCI device
3668 * @bar: BAR to query
3669 *
3670 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3671 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3672 */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3673 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3674 {
3675 int pos;
3676 u32 cap;
3677
3678 pos = pci_rebar_find_pos(pdev, bar);
3679 if (pos < 0)
3680 return 0;
3681
3682 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3683 cap &= PCI_REBAR_CAP_SIZES;
3684
3685 /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3686 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3687 bar == 0 && cap == 0x7000)
3688 cap = 0x3f000;
3689
3690 return cap >> 4;
3691 }
3692 EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3693
3694 /**
3695 * pci_rebar_get_current_size - get the current size of a BAR
3696 * @pdev: PCI device
3697 * @bar: BAR to set size to
3698 *
3699 * Read the size of a BAR from the resizable BAR config.
3700 * Returns size if found or negative error code.
3701 */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3702 int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3703 {
3704 int pos;
3705 u32 ctrl;
3706
3707 pos = pci_rebar_find_pos(pdev, bar);
3708 if (pos < 0)
3709 return pos;
3710
3711 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3712 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3713 }
3714
3715 /**
3716 * pci_rebar_set_size - set a new size for a BAR
3717 * @pdev: PCI device
3718 * @bar: BAR to set size to
3719 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3720 *
3721 * Set the new size of a BAR as defined in the spec.
3722 * Returns zero if resizing was successful, error code otherwise.
3723 */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3724 int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3725 {
3726 int pos;
3727 u32 ctrl;
3728
3729 pos = pci_rebar_find_pos(pdev, bar);
3730 if (pos < 0)
3731 return pos;
3732
3733 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3734 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3735 ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3736 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3737 return 0;
3738 }
3739
3740 /**
3741 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3742 * @dev: the PCI device
3743 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3744 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3745 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3746 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3747 *
3748 * Return 0 if all upstream bridges support AtomicOp routing, egress
3749 * blocking is disabled on all upstream ports, and the root port supports
3750 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3751 * AtomicOp completion), or negative otherwise.
3752 */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3753 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3754 {
3755 struct pci_bus *bus = dev->bus;
3756 struct pci_dev *bridge;
3757 u32 cap, ctl2;
3758
3759 /*
3760 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3761 * in Device Control 2 is reserved in VFs and the PF value applies
3762 * to all associated VFs.
3763 */
3764 if (dev->is_virtfn)
3765 return -EINVAL;
3766
3767 if (!pci_is_pcie(dev))
3768 return -EINVAL;
3769
3770 /*
3771 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3772 * AtomicOp requesters. For now, we only support endpoints as
3773 * requesters and root ports as completers. No endpoints as
3774 * completers, and no peer-to-peer.
3775 */
3776
3777 switch (pci_pcie_type(dev)) {
3778 case PCI_EXP_TYPE_ENDPOINT:
3779 case PCI_EXP_TYPE_LEG_END:
3780 case PCI_EXP_TYPE_RC_END:
3781 break;
3782 default:
3783 return -EINVAL;
3784 }
3785
3786 while (bus->parent) {
3787 bridge = bus->self;
3788
3789 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3790
3791 switch (pci_pcie_type(bridge)) {
3792 /* Ensure switch ports support AtomicOp routing */
3793 case PCI_EXP_TYPE_UPSTREAM:
3794 case PCI_EXP_TYPE_DOWNSTREAM:
3795 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3796 return -EINVAL;
3797 break;
3798
3799 /* Ensure root port supports all the sizes we care about */
3800 case PCI_EXP_TYPE_ROOT_PORT:
3801 if ((cap & cap_mask) != cap_mask)
3802 return -EINVAL;
3803 break;
3804 }
3805
3806 /* Ensure upstream ports don't block AtomicOps on egress */
3807 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3808 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3809 &ctl2);
3810 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3811 return -EINVAL;
3812 }
3813
3814 bus = bus->parent;
3815 }
3816
3817 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3818 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3819 return 0;
3820 }
3821 EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3822
3823 /**
3824 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3825 * @dev: the PCI device
3826 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3827 *
3828 * Perform INTx swizzling for a device behind one level of bridge. This is
3829 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3830 * behind bridges on add-in cards. For devices with ARI enabled, the slot
3831 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3832 * the PCI Express Base Specification, Revision 2.1)
3833 */
pci_swizzle_interrupt_pin(const struct pci_dev * dev,u8 pin)3834 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3835 {
3836 int slot;
3837
3838 if (pci_ari_enabled(dev->bus))
3839 slot = 0;
3840 else
3841 slot = PCI_SLOT(dev->devfn);
3842
3843 return (((pin - 1) + slot) % 4) + 1;
3844 }
3845
pci_get_interrupt_pin(struct pci_dev * dev,struct pci_dev ** bridge)3846 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
3847 {
3848 u8 pin;
3849
3850 pin = dev->pin;
3851 if (!pin)
3852 return -1;
3853
3854 while (!pci_is_root_bus(dev->bus)) {
3855 pin = pci_swizzle_interrupt_pin(dev, pin);
3856 dev = dev->bus->self;
3857 }
3858 *bridge = dev;
3859 return pin;
3860 }
3861
3862 /**
3863 * pci_common_swizzle - swizzle INTx all the way to root bridge
3864 * @dev: the PCI device
3865 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3866 *
3867 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3868 * bridges all the way up to a PCI root bus.
3869 */
pci_common_swizzle(struct pci_dev * dev,u8 * pinp)3870 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3871 {
3872 u8 pin = *pinp;
3873
3874 while (!pci_is_root_bus(dev->bus)) {
3875 pin = pci_swizzle_interrupt_pin(dev, pin);
3876 dev = dev->bus->self;
3877 }
3878 *pinp = pin;
3879 return PCI_SLOT(dev->devfn);
3880 }
3881 EXPORT_SYMBOL_GPL(pci_common_swizzle);
3882
3883 /**
3884 * pci_release_region - Release a PCI bar
3885 * @pdev: PCI device whose resources were previously reserved by
3886 * pci_request_region()
3887 * @bar: BAR to release
3888 *
3889 * Releases the PCI I/O and memory resources previously reserved by a
3890 * successful call to pci_request_region(). Call this function only
3891 * after all use of the PCI regions has ceased.
3892 */
pci_release_region(struct pci_dev * pdev,int bar)3893 void pci_release_region(struct pci_dev *pdev, int bar)
3894 {
3895 struct pci_devres *dr;
3896
3897 if (pci_resource_len(pdev, bar) == 0)
3898 return;
3899 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3900 release_region(pci_resource_start(pdev, bar),
3901 pci_resource_len(pdev, bar));
3902 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3903 release_mem_region(pci_resource_start(pdev, bar),
3904 pci_resource_len(pdev, bar));
3905
3906 dr = find_pci_dr(pdev);
3907 if (dr)
3908 dr->region_mask &= ~(1 << bar);
3909 }
3910 EXPORT_SYMBOL(pci_release_region);
3911
3912 /**
3913 * __pci_request_region - Reserved PCI I/O and memory resource
3914 * @pdev: PCI device whose resources are to be reserved
3915 * @bar: BAR to be reserved
3916 * @res_name: Name to be associated with resource.
3917 * @exclusive: whether the region access is exclusive or not
3918 *
3919 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3920 * being reserved by owner @res_name. Do not access any
3921 * address inside the PCI regions unless this call returns
3922 * successfully.
3923 *
3924 * If @exclusive is set, then the region is marked so that userspace
3925 * is explicitly not allowed to map the resource via /dev/mem or
3926 * sysfs MMIO access.
3927 *
3928 * Returns 0 on success, or %EBUSY on error. A warning
3929 * message is also printed on failure.
3930 */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3931 static int __pci_request_region(struct pci_dev *pdev, int bar,
3932 const char *res_name, int exclusive)
3933 {
3934 struct pci_devres *dr;
3935
3936 if (pci_resource_len(pdev, bar) == 0)
3937 return 0;
3938
3939 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3940 if (!request_region(pci_resource_start(pdev, bar),
3941 pci_resource_len(pdev, bar), res_name))
3942 goto err_out;
3943 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3944 if (!__request_mem_region(pci_resource_start(pdev, bar),
3945 pci_resource_len(pdev, bar), res_name,
3946 exclusive))
3947 goto err_out;
3948 }
3949
3950 dr = find_pci_dr(pdev);
3951 if (dr)
3952 dr->region_mask |= 1 << bar;
3953
3954 return 0;
3955
3956 err_out:
3957 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3958 &pdev->resource[bar]);
3959 return -EBUSY;
3960 }
3961
3962 /**
3963 * pci_request_region - Reserve PCI I/O and memory resource
3964 * @pdev: PCI device whose resources are to be reserved
3965 * @bar: BAR to be reserved
3966 * @res_name: Name to be associated with resource
3967 *
3968 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3969 * being reserved by owner @res_name. Do not access any
3970 * address inside the PCI regions unless this call returns
3971 * successfully.
3972 *
3973 * Returns 0 on success, or %EBUSY on error. A warning
3974 * message is also printed on failure.
3975 */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)3976 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3977 {
3978 return __pci_request_region(pdev, bar, res_name, 0);
3979 }
3980 EXPORT_SYMBOL(pci_request_region);
3981
3982 /**
3983 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3984 * @pdev: PCI device whose resources were previously reserved
3985 * @bars: Bitmask of BARs to be released
3986 *
3987 * Release selected PCI I/O and memory resources previously reserved.
3988 * Call this function only after all use of the PCI regions has ceased.
3989 */
pci_release_selected_regions(struct pci_dev * pdev,int bars)3990 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3991 {
3992 int i;
3993
3994 for (i = 0; i < PCI_STD_NUM_BARS; i++)
3995 if (bars & (1 << i))
3996 pci_release_region(pdev, i);
3997 }
3998 EXPORT_SYMBOL(pci_release_selected_regions);
3999
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)4000 static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4001 const char *res_name, int excl)
4002 {
4003 int i;
4004
4005 for (i = 0; i < PCI_STD_NUM_BARS; i++)
4006 if (bars & (1 << i))
4007 if (__pci_request_region(pdev, i, res_name, excl))
4008 goto err_out;
4009 return 0;
4010
4011 err_out:
4012 while (--i >= 0)
4013 if (bars & (1 << i))
4014 pci_release_region(pdev, i);
4015
4016 return -EBUSY;
4017 }
4018
4019
4020 /**
4021 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4022 * @pdev: PCI device whose resources are to be reserved
4023 * @bars: Bitmask of BARs to be requested
4024 * @res_name: Name to be associated with resource
4025 */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)4026 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4027 const char *res_name)
4028 {
4029 return __pci_request_selected_regions(pdev, bars, res_name, 0);
4030 }
4031 EXPORT_SYMBOL(pci_request_selected_regions);
4032
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)4033 int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4034 const char *res_name)
4035 {
4036 return __pci_request_selected_regions(pdev, bars, res_name,
4037 IORESOURCE_EXCLUSIVE);
4038 }
4039 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4040
4041 /**
4042 * pci_release_regions - Release reserved PCI I/O and memory resources
4043 * @pdev: PCI device whose resources were previously reserved by
4044 * pci_request_regions()
4045 *
4046 * Releases all PCI I/O and memory resources previously reserved by a
4047 * successful call to pci_request_regions(). Call this function only
4048 * after all use of the PCI regions has ceased.
4049 */
4050
pci_release_regions(struct pci_dev * pdev)4051 void pci_release_regions(struct pci_dev *pdev)
4052 {
4053 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4054 }
4055 EXPORT_SYMBOL(pci_release_regions);
4056
4057 /**
4058 * pci_request_regions - Reserve PCI I/O and memory resources
4059 * @pdev: PCI device whose resources are to be reserved
4060 * @res_name: Name to be associated with resource.
4061 *
4062 * Mark all PCI regions associated with PCI device @pdev as
4063 * being reserved by owner @res_name. Do not access any
4064 * address inside the PCI regions unless this call returns
4065 * successfully.
4066 *
4067 * Returns 0 on success, or %EBUSY on error. A warning
4068 * message is also printed on failure.
4069 */
pci_request_regions(struct pci_dev * pdev,const char * res_name)4070 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4071 {
4072 return pci_request_selected_regions(pdev,
4073 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4074 }
4075 EXPORT_SYMBOL(pci_request_regions);
4076
4077 /**
4078 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4079 * @pdev: PCI device whose resources are to be reserved
4080 * @res_name: Name to be associated with resource.
4081 *
4082 * Mark all PCI regions associated with PCI device @pdev as being reserved
4083 * by owner @res_name. Do not access any address inside the PCI regions
4084 * unless this call returns successfully.
4085 *
4086 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4087 * and the sysfs MMIO access will not be allowed.
4088 *
4089 * Returns 0 on success, or %EBUSY on error. A warning message is also
4090 * printed on failure.
4091 */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)4092 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4093 {
4094 return pci_request_selected_regions_exclusive(pdev,
4095 ((1 << PCI_STD_NUM_BARS) - 1), res_name);
4096 }
4097 EXPORT_SYMBOL(pci_request_regions_exclusive);
4098
4099 /*
4100 * Record the PCI IO range (expressed as CPU physical address + size).
4101 * Return a negative value if an error has occurred, zero otherwise
4102 */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4103 int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4104 resource_size_t size)
4105 {
4106 int ret = 0;
4107 #ifdef PCI_IOBASE
4108 struct logic_pio_hwaddr *range;
4109
4110 if (!size || addr + size < addr)
4111 return -EINVAL;
4112
4113 range = kzalloc(sizeof(*range), GFP_ATOMIC);
4114 if (!range)
4115 return -ENOMEM;
4116
4117 range->fwnode = fwnode;
4118 range->size = size;
4119 range->hw_start = addr;
4120 range->flags = LOGIC_PIO_CPU_MMIO;
4121
4122 ret = logic_pio_register_range(range);
4123 if (ret)
4124 kfree(range);
4125
4126 /* Ignore duplicates due to deferred probing */
4127 if (ret == -EEXIST)
4128 ret = 0;
4129 #endif
4130
4131 return ret;
4132 }
4133
pci_pio_to_address(unsigned long pio)4134 phys_addr_t pci_pio_to_address(unsigned long pio)
4135 {
4136 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4137
4138 #ifdef PCI_IOBASE
4139 if (pio >= MMIO_UPPER_LIMIT)
4140 return address;
4141
4142 address = logic_pio_to_hwaddr(pio);
4143 #endif
4144
4145 return address;
4146 }
4147 EXPORT_SYMBOL_GPL(pci_pio_to_address);
4148
pci_address_to_pio(phys_addr_t address)4149 unsigned long __weak pci_address_to_pio(phys_addr_t address)
4150 {
4151 #ifdef PCI_IOBASE
4152 return logic_pio_trans_cpuaddr(address);
4153 #else
4154 if (address > IO_SPACE_LIMIT)
4155 return (unsigned long)-1;
4156
4157 return (unsigned long) address;
4158 #endif
4159 }
4160
4161 /**
4162 * pci_remap_iospace - Remap the memory mapped I/O space
4163 * @res: Resource describing the I/O space
4164 * @phys_addr: physical address of range to be mapped
4165 *
4166 * Remap the memory mapped I/O space described by the @res and the CPU
4167 * physical address @phys_addr into virtual address space. Only
4168 * architectures that have memory mapped IO functions defined (and the
4169 * PCI_IOBASE value defined) should call this function.
4170 */
4171 #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4172 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4173 {
4174 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4175 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4176
4177 if (!(res->flags & IORESOURCE_IO))
4178 return -EINVAL;
4179
4180 if (res->end > IO_SPACE_LIMIT)
4181 return -EINVAL;
4182
4183 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4184 pgprot_device(PAGE_KERNEL));
4185 #else
4186 /*
4187 * This architecture does not have memory mapped I/O space,
4188 * so this function should never be called
4189 */
4190 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4191 return -ENODEV;
4192 #endif
4193 }
4194 EXPORT_SYMBOL(pci_remap_iospace);
4195 #endif
4196
4197 /**
4198 * pci_unmap_iospace - Unmap the memory mapped I/O space
4199 * @res: resource to be unmapped
4200 *
4201 * Unmap the CPU virtual address @res from virtual address space. Only
4202 * architectures that have memory mapped IO functions defined (and the
4203 * PCI_IOBASE value defined) should call this function.
4204 */
pci_unmap_iospace(struct resource * res)4205 void pci_unmap_iospace(struct resource *res)
4206 {
4207 #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4208 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4209
4210 vunmap_range(vaddr, vaddr + resource_size(res));
4211 #endif
4212 }
4213 EXPORT_SYMBOL(pci_unmap_iospace);
4214
devm_pci_unmap_iospace(struct device * dev,void * ptr)4215 static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4216 {
4217 struct resource **res = ptr;
4218
4219 pci_unmap_iospace(*res);
4220 }
4221
4222 /**
4223 * devm_pci_remap_iospace - Managed pci_remap_iospace()
4224 * @dev: Generic device to remap IO address for
4225 * @res: Resource describing the I/O space
4226 * @phys_addr: physical address of range to be mapped
4227 *
4228 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
4229 * detach.
4230 */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)4231 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4232 phys_addr_t phys_addr)
4233 {
4234 const struct resource **ptr;
4235 int error;
4236
4237 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4238 if (!ptr)
4239 return -ENOMEM;
4240
4241 error = pci_remap_iospace(res, phys_addr);
4242 if (error) {
4243 devres_free(ptr);
4244 } else {
4245 *ptr = res;
4246 devres_add(dev, ptr);
4247 }
4248
4249 return error;
4250 }
4251 EXPORT_SYMBOL(devm_pci_remap_iospace);
4252
4253 /**
4254 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4255 * @dev: Generic device to remap IO address for
4256 * @offset: Resource address to map
4257 * @size: Size of map
4258 *
4259 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
4260 * detach.
4261 */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)4262 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4263 resource_size_t offset,
4264 resource_size_t size)
4265 {
4266 void __iomem **ptr, *addr;
4267
4268 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4269 if (!ptr)
4270 return NULL;
4271
4272 addr = pci_remap_cfgspace(offset, size);
4273 if (addr) {
4274 *ptr = addr;
4275 devres_add(dev, ptr);
4276 } else
4277 devres_free(ptr);
4278
4279 return addr;
4280 }
4281 EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4282
4283 /**
4284 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4285 * @dev: generic device to handle the resource for
4286 * @res: configuration space resource to be handled
4287 *
4288 * Checks that a resource is a valid memory region, requests the memory
4289 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4290 * proper PCI configuration space memory attributes are guaranteed.
4291 *
4292 * All operations are managed and will be undone on driver detach.
4293 *
4294 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4295 * on failure. Usage example::
4296 *
4297 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4298 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4299 * if (IS_ERR(base))
4300 * return PTR_ERR(base);
4301 */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)4302 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4303 struct resource *res)
4304 {
4305 resource_size_t size;
4306 const char *name;
4307 void __iomem *dest_ptr;
4308
4309 BUG_ON(!dev);
4310
4311 if (!res || resource_type(res) != IORESOURCE_MEM) {
4312 dev_err(dev, "invalid resource\n");
4313 return IOMEM_ERR_PTR(-EINVAL);
4314 }
4315
4316 size = resource_size(res);
4317
4318 if (res->name)
4319 name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4320 res->name);
4321 else
4322 name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4323 if (!name)
4324 return IOMEM_ERR_PTR(-ENOMEM);
4325
4326 if (!devm_request_mem_region(dev, res->start, size, name)) {
4327 dev_err(dev, "can't request region for resource %pR\n", res);
4328 return IOMEM_ERR_PTR(-EBUSY);
4329 }
4330
4331 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4332 if (!dest_ptr) {
4333 dev_err(dev, "ioremap failed for resource %pR\n", res);
4334 devm_release_mem_region(dev, res->start, size);
4335 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4336 }
4337
4338 return dest_ptr;
4339 }
4340 EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4341
__pci_set_master(struct pci_dev * dev,bool enable)4342 static void __pci_set_master(struct pci_dev *dev, bool enable)
4343 {
4344 u16 old_cmd, cmd;
4345
4346 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4347 if (enable)
4348 cmd = old_cmd | PCI_COMMAND_MASTER;
4349 else
4350 cmd = old_cmd & ~PCI_COMMAND_MASTER;
4351 if (cmd != old_cmd) {
4352 pci_dbg(dev, "%s bus mastering\n",
4353 enable ? "enabling" : "disabling");
4354 pci_write_config_word(dev, PCI_COMMAND, cmd);
4355 }
4356 dev->is_busmaster = enable;
4357 }
4358
4359 /**
4360 * pcibios_setup - process "pci=" kernel boot arguments
4361 * @str: string used to pass in "pci=" kernel boot arguments
4362 *
4363 * Process kernel boot arguments. This is the default implementation.
4364 * Architecture specific implementations can override this as necessary.
4365 */
pcibios_setup(char * str)4366 char * __weak __init pcibios_setup(char *str)
4367 {
4368 return str;
4369 }
4370
4371 /**
4372 * pcibios_set_master - enable PCI bus-mastering for device dev
4373 * @dev: the PCI device to enable
4374 *
4375 * Enables PCI bus-mastering for the device. This is the default
4376 * implementation. Architecture specific implementations can override
4377 * this if necessary.
4378 */
pcibios_set_master(struct pci_dev * dev)4379 void __weak pcibios_set_master(struct pci_dev *dev)
4380 {
4381 u8 lat;
4382
4383 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4384 if (pci_is_pcie(dev))
4385 return;
4386
4387 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4388 if (lat < 16)
4389 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4390 else if (lat > pcibios_max_latency)
4391 lat = pcibios_max_latency;
4392 else
4393 return;
4394
4395 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4396 }
4397
4398 /**
4399 * pci_set_master - enables bus-mastering for device dev
4400 * @dev: the PCI device to enable
4401 *
4402 * Enables bus-mastering on the device and calls pcibios_set_master()
4403 * to do the needed arch specific settings.
4404 */
pci_set_master(struct pci_dev * dev)4405 void pci_set_master(struct pci_dev *dev)
4406 {
4407 __pci_set_master(dev, true);
4408 pcibios_set_master(dev);
4409 }
4410 EXPORT_SYMBOL(pci_set_master);
4411
4412 /**
4413 * pci_clear_master - disables bus-mastering for device dev
4414 * @dev: the PCI device to disable
4415 */
pci_clear_master(struct pci_dev * dev)4416 void pci_clear_master(struct pci_dev *dev)
4417 {
4418 __pci_set_master(dev, false);
4419 }
4420 EXPORT_SYMBOL(pci_clear_master);
4421
4422 /**
4423 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4424 * @dev: the PCI device for which MWI is to be enabled
4425 *
4426 * Helper function for pci_set_mwi.
4427 * Originally copied from drivers/net/acenic.c.
4428 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4429 *
4430 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4431 */
pci_set_cacheline_size(struct pci_dev * dev)4432 int pci_set_cacheline_size(struct pci_dev *dev)
4433 {
4434 u8 cacheline_size;
4435
4436 if (!pci_cache_line_size)
4437 return -EINVAL;
4438
4439 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4440 equal to or multiple of the right value. */
4441 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4442 if (cacheline_size >= pci_cache_line_size &&
4443 (cacheline_size % pci_cache_line_size) == 0)
4444 return 0;
4445
4446 /* Write the correct value. */
4447 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4448 /* Read it back. */
4449 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4450 if (cacheline_size == pci_cache_line_size)
4451 return 0;
4452
4453 pci_dbg(dev, "cache line size of %d is not supported\n",
4454 pci_cache_line_size << 2);
4455
4456 return -EINVAL;
4457 }
4458 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4459
4460 /**
4461 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4462 * @dev: the PCI device for which MWI is enabled
4463 *
4464 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4465 *
4466 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4467 */
pci_set_mwi(struct pci_dev * dev)4468 int pci_set_mwi(struct pci_dev *dev)
4469 {
4470 #ifdef PCI_DISABLE_MWI
4471 return 0;
4472 #else
4473 int rc;
4474 u16 cmd;
4475
4476 rc = pci_set_cacheline_size(dev);
4477 if (rc)
4478 return rc;
4479
4480 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4481 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4482 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4483 cmd |= PCI_COMMAND_INVALIDATE;
4484 pci_write_config_word(dev, PCI_COMMAND, cmd);
4485 }
4486 return 0;
4487 #endif
4488 }
4489 EXPORT_SYMBOL(pci_set_mwi);
4490
4491 /**
4492 * pcim_set_mwi - a device-managed pci_set_mwi()
4493 * @dev: the PCI device for which MWI is enabled
4494 *
4495 * Managed pci_set_mwi().
4496 *
4497 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4498 */
pcim_set_mwi(struct pci_dev * dev)4499 int pcim_set_mwi(struct pci_dev *dev)
4500 {
4501 struct pci_devres *dr;
4502
4503 dr = find_pci_dr(dev);
4504 if (!dr)
4505 return -ENOMEM;
4506
4507 dr->mwi = 1;
4508 return pci_set_mwi(dev);
4509 }
4510 EXPORT_SYMBOL(pcim_set_mwi);
4511
4512 /**
4513 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4514 * @dev: the PCI device for which MWI is enabled
4515 *
4516 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4517 * Callers are not required to check the return value.
4518 *
4519 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4520 */
pci_try_set_mwi(struct pci_dev * dev)4521 int pci_try_set_mwi(struct pci_dev *dev)
4522 {
4523 #ifdef PCI_DISABLE_MWI
4524 return 0;
4525 #else
4526 return pci_set_mwi(dev);
4527 #endif
4528 }
4529 EXPORT_SYMBOL(pci_try_set_mwi);
4530
4531 /**
4532 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4533 * @dev: the PCI device to disable
4534 *
4535 * Disables PCI Memory-Write-Invalidate transaction on the device
4536 */
pci_clear_mwi(struct pci_dev * dev)4537 void pci_clear_mwi(struct pci_dev *dev)
4538 {
4539 #ifndef PCI_DISABLE_MWI
4540 u16 cmd;
4541
4542 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4543 if (cmd & PCI_COMMAND_INVALIDATE) {
4544 cmd &= ~PCI_COMMAND_INVALIDATE;
4545 pci_write_config_word(dev, PCI_COMMAND, cmd);
4546 }
4547 #endif
4548 }
4549 EXPORT_SYMBOL(pci_clear_mwi);
4550
4551 /**
4552 * pci_disable_parity - disable parity checking for device
4553 * @dev: the PCI device to operate on
4554 *
4555 * Disable parity checking for device @dev
4556 */
pci_disable_parity(struct pci_dev * dev)4557 void pci_disable_parity(struct pci_dev *dev)
4558 {
4559 u16 cmd;
4560
4561 pci_read_config_word(dev, PCI_COMMAND, &cmd);
4562 if (cmd & PCI_COMMAND_PARITY) {
4563 cmd &= ~PCI_COMMAND_PARITY;
4564 pci_write_config_word(dev, PCI_COMMAND, cmd);
4565 }
4566 }
4567
4568 /**
4569 * pci_intx - enables/disables PCI INTx for device dev
4570 * @pdev: the PCI device to operate on
4571 * @enable: boolean: whether to enable or disable PCI INTx
4572 *
4573 * Enables/disables PCI INTx for device @pdev
4574 */
pci_intx(struct pci_dev * pdev,int enable)4575 void pci_intx(struct pci_dev *pdev, int enable)
4576 {
4577 u16 pci_command, new;
4578
4579 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4580
4581 if (enable)
4582 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4583 else
4584 new = pci_command | PCI_COMMAND_INTX_DISABLE;
4585
4586 if (new != pci_command) {
4587 struct pci_devres *dr;
4588
4589 pci_write_config_word(pdev, PCI_COMMAND, new);
4590
4591 dr = find_pci_dr(pdev);
4592 if (dr && !dr->restore_intx) {
4593 dr->restore_intx = 1;
4594 dr->orig_intx = !enable;
4595 }
4596 }
4597 }
4598 EXPORT_SYMBOL_GPL(pci_intx);
4599
pci_check_and_set_intx_mask(struct pci_dev * dev,bool mask)4600 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4601 {
4602 struct pci_bus *bus = dev->bus;
4603 bool mask_updated = true;
4604 u32 cmd_status_dword;
4605 u16 origcmd, newcmd;
4606 unsigned long flags;
4607 bool irq_pending;
4608
4609 /*
4610 * We do a single dword read to retrieve both command and status.
4611 * Document assumptions that make this possible.
4612 */
4613 BUILD_BUG_ON(PCI_COMMAND % 4);
4614 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4615
4616 raw_spin_lock_irqsave(&pci_lock, flags);
4617
4618 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4619
4620 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4621
4622 /*
4623 * Check interrupt status register to see whether our device
4624 * triggered the interrupt (when masking) or the next IRQ is
4625 * already pending (when unmasking).
4626 */
4627 if (mask != irq_pending) {
4628 mask_updated = false;
4629 goto done;
4630 }
4631
4632 origcmd = cmd_status_dword;
4633 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4634 if (mask)
4635 newcmd |= PCI_COMMAND_INTX_DISABLE;
4636 if (newcmd != origcmd)
4637 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4638
4639 done:
4640 raw_spin_unlock_irqrestore(&pci_lock, flags);
4641
4642 return mask_updated;
4643 }
4644
4645 /**
4646 * pci_check_and_mask_intx - mask INTx on pending interrupt
4647 * @dev: the PCI device to operate on
4648 *
4649 * Check if the device dev has its INTx line asserted, mask it and return
4650 * true in that case. False is returned if no interrupt was pending.
4651 */
pci_check_and_mask_intx(struct pci_dev * dev)4652 bool pci_check_and_mask_intx(struct pci_dev *dev)
4653 {
4654 return pci_check_and_set_intx_mask(dev, true);
4655 }
4656 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4657
4658 /**
4659 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4660 * @dev: the PCI device to operate on
4661 *
4662 * Check if the device dev has its INTx line asserted, unmask it if not and
4663 * return true. False is returned and the mask remains active if there was
4664 * still an interrupt pending.
4665 */
pci_check_and_unmask_intx(struct pci_dev * dev)4666 bool pci_check_and_unmask_intx(struct pci_dev *dev)
4667 {
4668 return pci_check_and_set_intx_mask(dev, false);
4669 }
4670 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4671
4672 /**
4673 * pci_wait_for_pending_transaction - wait for pending transaction
4674 * @dev: the PCI device to operate on
4675 *
4676 * Return 0 if transaction is pending 1 otherwise.
4677 */
pci_wait_for_pending_transaction(struct pci_dev * dev)4678 int pci_wait_for_pending_transaction(struct pci_dev *dev)
4679 {
4680 if (!pci_is_pcie(dev))
4681 return 1;
4682
4683 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4684 PCI_EXP_DEVSTA_TRPND);
4685 }
4686 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4687
4688 /**
4689 * pcie_flr - initiate a PCIe function level reset
4690 * @dev: device to reset
4691 *
4692 * Initiate a function level reset unconditionally on @dev without
4693 * checking any flags and DEVCAP
4694 */
pcie_flr(struct pci_dev * dev)4695 int pcie_flr(struct pci_dev *dev)
4696 {
4697 if (!pci_wait_for_pending_transaction(dev))
4698 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4699
4700 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4701
4702 if (dev->imm_ready)
4703 return 0;
4704
4705 /*
4706 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4707 * 100ms, but may silently discard requests while the FLR is in
4708 * progress. Wait 100ms before trying to access the device.
4709 */
4710 msleep(100);
4711
4712 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4713 }
4714 EXPORT_SYMBOL_GPL(pcie_flr);
4715
4716 /**
4717 * pcie_reset_flr - initiate a PCIe function level reset
4718 * @dev: device to reset
4719 * @probe: if true, return 0 if device can be reset this way
4720 *
4721 * Initiate a function level reset on @dev.
4722 */
pcie_reset_flr(struct pci_dev * dev,bool probe)4723 int pcie_reset_flr(struct pci_dev *dev, bool probe)
4724 {
4725 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4726 return -ENOTTY;
4727
4728 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4729 return -ENOTTY;
4730
4731 if (probe)
4732 return 0;
4733
4734 return pcie_flr(dev);
4735 }
4736 EXPORT_SYMBOL_GPL(pcie_reset_flr);
4737
pci_af_flr(struct pci_dev * dev,bool probe)4738 static int pci_af_flr(struct pci_dev *dev, bool probe)
4739 {
4740 int pos;
4741 u8 cap;
4742
4743 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4744 if (!pos)
4745 return -ENOTTY;
4746
4747 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4748 return -ENOTTY;
4749
4750 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4751 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4752 return -ENOTTY;
4753
4754 if (probe)
4755 return 0;
4756
4757 /*
4758 * Wait for Transaction Pending bit to clear. A word-aligned test
4759 * is used, so we use the control offset rather than status and shift
4760 * the test bit to match.
4761 */
4762 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4763 PCI_AF_STATUS_TP << 8))
4764 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4765
4766 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4767
4768 if (dev->imm_ready)
4769 return 0;
4770
4771 /*
4772 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4773 * updated 27 July 2006; a device must complete an FLR within
4774 * 100ms, but may silently discard requests while the FLR is in
4775 * progress. Wait 100ms before trying to access the device.
4776 */
4777 msleep(100);
4778
4779 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4780 }
4781
4782 /**
4783 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4784 * @dev: Device to reset.
4785 * @probe: if true, return 0 if the device can be reset this way.
4786 *
4787 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4788 * unset, it will be reinitialized internally when going from PCI_D3hot to
4789 * PCI_D0. If that's the case and the device is not in a low-power state
4790 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4791 *
4792 * NOTE: This causes the caller to sleep for twice the device power transition
4793 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4794 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4795 * Moreover, only devices in D0 can be reset by this function.
4796 */
pci_pm_reset(struct pci_dev * dev,bool probe)4797 static int pci_pm_reset(struct pci_dev *dev, bool probe)
4798 {
4799 u16 csr;
4800
4801 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4802 return -ENOTTY;
4803
4804 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4805 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4806 return -ENOTTY;
4807
4808 if (probe)
4809 return 0;
4810
4811 if (dev->current_state != PCI_D0)
4812 return -EINVAL;
4813
4814 csr &= ~PCI_PM_CTRL_STATE_MASK;
4815 csr |= PCI_D3hot;
4816 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4817 pci_dev_d3_sleep(dev);
4818
4819 csr &= ~PCI_PM_CTRL_STATE_MASK;
4820 csr |= PCI_D0;
4821 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4822 pci_dev_d3_sleep(dev);
4823
4824 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4825 }
4826
4827 /**
4828 * pcie_wait_for_link_delay - Wait until link is active or inactive
4829 * @pdev: Bridge device
4830 * @active: waiting for active or inactive?
4831 * @delay: Delay to wait after link has become active (in ms)
4832 *
4833 * Use this to wait till link becomes active or inactive.
4834 */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4835 static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4836 int delay)
4837 {
4838 int timeout = 1000;
4839 bool ret;
4840 u16 lnk_status;
4841
4842 /*
4843 * Some controllers might not implement link active reporting. In this
4844 * case, we wait for 1000 ms + any delay requested by the caller.
4845 */
4846 if (!pdev->link_active_reporting) {
4847 msleep(timeout + delay);
4848 return true;
4849 }
4850
4851 /*
4852 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4853 * after which we should expect an link active if the reset was
4854 * successful. If so, software must wait a minimum 100ms before sending
4855 * configuration requests to devices downstream this port.
4856 *
4857 * If the link fails to activate, either the device was physically
4858 * removed or the link is permanently failed.
4859 */
4860 if (active)
4861 msleep(20);
4862 for (;;) {
4863 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4864 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4865 if (ret == active)
4866 break;
4867 if (timeout <= 0)
4868 break;
4869 msleep(10);
4870 timeout -= 10;
4871 }
4872 if (active && ret)
4873 msleep(delay);
4874
4875 return ret == active;
4876 }
4877
4878 /**
4879 * pcie_wait_for_link - Wait until link is active or inactive
4880 * @pdev: Bridge device
4881 * @active: waiting for active or inactive?
4882 *
4883 * Use this to wait till link becomes active or inactive.
4884 */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4885 bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4886 {
4887 return pcie_wait_for_link_delay(pdev, active, 100);
4888 }
4889
4890 /*
4891 * Find maximum D3cold delay required by all the devices on the bus. The
4892 * spec says 100 ms, but firmware can lower it and we allow drivers to
4893 * increase it as well.
4894 *
4895 * Called with @pci_bus_sem locked for reading.
4896 */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4897 static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4898 {
4899 const struct pci_dev *pdev;
4900 int min_delay = 100;
4901 int max_delay = 0;
4902
4903 list_for_each_entry(pdev, &bus->devices, bus_list) {
4904 if (pdev->d3cold_delay < min_delay)
4905 min_delay = pdev->d3cold_delay;
4906 if (pdev->d3cold_delay > max_delay)
4907 max_delay = pdev->d3cold_delay;
4908 }
4909
4910 return max(min_delay, max_delay);
4911 }
4912
4913 /**
4914 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4915 * @dev: PCI bridge
4916 *
4917 * Handle necessary delays before access to the devices on the secondary
4918 * side of the bridge are permitted after D3cold to D0 transition.
4919 *
4920 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4921 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4922 * 4.3.2.
4923 */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev)4924 void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4925 {
4926 struct pci_dev *child;
4927 int delay;
4928
4929 if (pci_dev_is_disconnected(dev))
4930 return;
4931
4932 if (!pci_is_bridge(dev) || !dev->bridge_d3)
4933 return;
4934
4935 down_read(&pci_bus_sem);
4936
4937 /*
4938 * We only deal with devices that are present currently on the bus.
4939 * For any hot-added devices the access delay is handled in pciehp
4940 * board_added(). In case of ACPI hotplug the firmware is expected
4941 * to configure the devices before OS is notified.
4942 */
4943 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4944 up_read(&pci_bus_sem);
4945 return;
4946 }
4947
4948 /* Take d3cold_delay requirements into account */
4949 delay = pci_bus_max_d3cold_delay(dev->subordinate);
4950 if (!delay) {
4951 up_read(&pci_bus_sem);
4952 return;
4953 }
4954
4955 child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4956 bus_list);
4957 up_read(&pci_bus_sem);
4958
4959 /*
4960 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4961 * accessing the device after reset (that is 1000 ms + 100 ms). In
4962 * practice this should not be needed because we don't do power
4963 * management for them (see pci_bridge_d3_possible()).
4964 */
4965 if (!pci_is_pcie(dev)) {
4966 pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4967 msleep(1000 + delay);
4968 return;
4969 }
4970
4971 /*
4972 * For PCIe downstream and root ports that do not support speeds
4973 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4974 * speeds (gen3) we need to wait first for the data link layer to
4975 * become active.
4976 *
4977 * However, 100 ms is the minimum and the PCIe spec says the
4978 * software must allow at least 1s before it can determine that the
4979 * device that did not respond is a broken device. There is
4980 * evidence that 100 ms is not always enough, for example certain
4981 * Titan Ridge xHCI controller does not always respond to
4982 * configuration requests if we only wait for 100 ms (see
4983 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
4984 *
4985 * Therefore we wait for 100 ms and check for the device presence.
4986 * If it is still not present give it an additional 100 ms.
4987 */
4988 if (!pcie_downstream_port(dev))
4989 return;
4990
4991 if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4992 pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4993 msleep(delay);
4994 } else {
4995 pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4996 delay);
4997 if (!pcie_wait_for_link_delay(dev, true, delay)) {
4998 /* Did not train, no need to wait any further */
4999 pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5000 return;
5001 }
5002 }
5003
5004 if (!pci_device_is_present(child)) {
5005 pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
5006 msleep(delay);
5007 }
5008 }
5009
pci_reset_secondary_bus(struct pci_dev * dev)5010 void pci_reset_secondary_bus(struct pci_dev *dev)
5011 {
5012 u16 ctrl;
5013
5014 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5015 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5016 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5017
5018 /*
5019 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
5020 * this to 2ms to ensure that we meet the minimum requirement.
5021 */
5022 msleep(2);
5023
5024 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5025 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5026
5027 /*
5028 * Trhfa for conventional PCI is 2^25 clock cycles.
5029 * Assuming a minimum 33MHz clock this results in a 1s
5030 * delay before we can consider subordinate devices to
5031 * be re-initialized. PCIe has some ways to shorten this,
5032 * but we don't make use of them yet.
5033 */
5034 ssleep(1);
5035 }
5036
pcibios_reset_secondary_bus(struct pci_dev * dev)5037 void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5038 {
5039 pci_reset_secondary_bus(dev);
5040 }
5041
5042 /**
5043 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
5044 * @dev: Bridge device
5045 *
5046 * Use the bridge control register to assert reset on the secondary bus.
5047 * Devices on the secondary bus are left in power-on state.
5048 */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)5049 int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5050 {
5051 pcibios_reset_secondary_bus(dev);
5052
5053 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
5054 }
5055 EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5056
pci_parent_bus_reset(struct pci_dev * dev,bool probe)5057 static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5058 {
5059 struct pci_dev *pdev;
5060
5061 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5062 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5063 return -ENOTTY;
5064
5065 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5066 if (pdev != dev)
5067 return -ENOTTY;
5068
5069 if (probe)
5070 return 0;
5071
5072 return pci_bridge_secondary_bus_reset(dev->bus->self);
5073 }
5074
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)5075 static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5076 {
5077 int rc = -ENOTTY;
5078
5079 if (!hotplug || !try_module_get(hotplug->owner))
5080 return rc;
5081
5082 if (hotplug->ops->reset_slot)
5083 rc = hotplug->ops->reset_slot(hotplug, probe);
5084
5085 module_put(hotplug->owner);
5086
5087 return rc;
5088 }
5089
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)5090 static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5091 {
5092 if (dev->multifunction || dev->subordinate || !dev->slot ||
5093 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5094 return -ENOTTY;
5095
5096 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5097 }
5098
pci_reset_bus_function(struct pci_dev * dev,bool probe)5099 static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5100 {
5101 int rc;
5102
5103 rc = pci_dev_reset_slot_function(dev, probe);
5104 if (rc != -ENOTTY)
5105 return rc;
5106 return pci_parent_bus_reset(dev, probe);
5107 }
5108
pci_dev_lock(struct pci_dev * dev)5109 void pci_dev_lock(struct pci_dev *dev)
5110 {
5111 pci_cfg_access_lock(dev);
5112 /* block PM suspend, driver probe, etc. */
5113 device_lock(&dev->dev);
5114 }
5115 EXPORT_SYMBOL_GPL(pci_dev_lock);
5116
5117 /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5118 int pci_dev_trylock(struct pci_dev *dev)
5119 {
5120 if (pci_cfg_access_trylock(dev)) {
5121 if (device_trylock(&dev->dev))
5122 return 1;
5123 pci_cfg_access_unlock(dev);
5124 }
5125
5126 return 0;
5127 }
5128 EXPORT_SYMBOL_GPL(pci_dev_trylock);
5129
pci_dev_unlock(struct pci_dev * dev)5130 void pci_dev_unlock(struct pci_dev *dev)
5131 {
5132 device_unlock(&dev->dev);
5133 pci_cfg_access_unlock(dev);
5134 }
5135 EXPORT_SYMBOL_GPL(pci_dev_unlock);
5136
pci_dev_save_and_disable(struct pci_dev * dev)5137 static void pci_dev_save_and_disable(struct pci_dev *dev)
5138 {
5139 const struct pci_error_handlers *err_handler =
5140 dev->driver ? dev->driver->err_handler : NULL;
5141
5142 /*
5143 * dev->driver->err_handler->reset_prepare() is protected against
5144 * races with ->remove() by the device lock, which must be held by
5145 * the caller.
5146 */
5147 if (err_handler && err_handler->reset_prepare)
5148 err_handler->reset_prepare(dev);
5149
5150 /*
5151 * Wake-up device prior to save. PM registers default to D0 after
5152 * reset and a simple register restore doesn't reliably return
5153 * to a non-D0 state anyway.
5154 */
5155 pci_set_power_state(dev, PCI_D0);
5156
5157 pci_save_state(dev);
5158 /*
5159 * Disable the device by clearing the Command register, except for
5160 * INTx-disable which is set. This not only disables MMIO and I/O port
5161 * BARs, but also prevents the device from being Bus Master, preventing
5162 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
5163 * compliant devices, INTx-disable prevents legacy interrupts.
5164 */
5165 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5166 }
5167
pci_dev_restore(struct pci_dev * dev)5168 static void pci_dev_restore(struct pci_dev *dev)
5169 {
5170 const struct pci_error_handlers *err_handler =
5171 dev->driver ? dev->driver->err_handler : NULL;
5172
5173 pci_restore_state(dev);
5174
5175 /*
5176 * dev->driver->err_handler->reset_done() is protected against
5177 * races with ->remove() by the device lock, which must be held by
5178 * the caller.
5179 */
5180 if (err_handler && err_handler->reset_done)
5181 err_handler->reset_done(dev);
5182 }
5183
5184 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5185 static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5186 { },
5187 { pci_dev_specific_reset, .name = "device_specific" },
5188 { pci_dev_acpi_reset, .name = "acpi" },
5189 { pcie_reset_flr, .name = "flr" },
5190 { pci_af_flr, .name = "af_flr" },
5191 { pci_pm_reset, .name = "pm" },
5192 { pci_reset_bus_function, .name = "bus" },
5193 };
5194
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)5195 static ssize_t reset_method_show(struct device *dev,
5196 struct device_attribute *attr, char *buf)
5197 {
5198 struct pci_dev *pdev = to_pci_dev(dev);
5199 ssize_t len = 0;
5200 int i, m;
5201
5202 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5203 m = pdev->reset_methods[i];
5204 if (!m)
5205 break;
5206
5207 len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5208 pci_reset_fn_methods[m].name);
5209 }
5210
5211 if (len)
5212 len += sysfs_emit_at(buf, len, "\n");
5213
5214 return len;
5215 }
5216
reset_method_lookup(const char * name)5217 static int reset_method_lookup(const char *name)
5218 {
5219 int m;
5220
5221 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5222 if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5223 return m;
5224 }
5225
5226 return 0; /* not found */
5227 }
5228
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5229 static ssize_t reset_method_store(struct device *dev,
5230 struct device_attribute *attr,
5231 const char *buf, size_t count)
5232 {
5233 struct pci_dev *pdev = to_pci_dev(dev);
5234 char *options, *name;
5235 int m, n;
5236 u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5237
5238 if (sysfs_streq(buf, "")) {
5239 pdev->reset_methods[0] = 0;
5240 pci_warn(pdev, "All device reset methods disabled by user");
5241 return count;
5242 }
5243
5244 if (sysfs_streq(buf, "default")) {
5245 pci_init_reset_methods(pdev);
5246 return count;
5247 }
5248
5249 options = kstrndup(buf, count, GFP_KERNEL);
5250 if (!options)
5251 return -ENOMEM;
5252
5253 n = 0;
5254 while ((name = strsep(&options, " ")) != NULL) {
5255 if (sysfs_streq(name, ""))
5256 continue;
5257
5258 name = strim(name);
5259
5260 m = reset_method_lookup(name);
5261 if (!m) {
5262 pci_err(pdev, "Invalid reset method '%s'", name);
5263 goto error;
5264 }
5265
5266 if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5267 pci_err(pdev, "Unsupported reset method '%s'", name);
5268 goto error;
5269 }
5270
5271 if (n == PCI_NUM_RESET_METHODS - 1) {
5272 pci_err(pdev, "Too many reset methods\n");
5273 goto error;
5274 }
5275
5276 reset_methods[n++] = m;
5277 }
5278
5279 reset_methods[n] = 0;
5280
5281 /* Warn if dev-specific supported but not highest priority */
5282 if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5283 reset_methods[0] != 1)
5284 pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5285 memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5286 kfree(options);
5287 return count;
5288
5289 error:
5290 /* Leave previous methods unchanged */
5291 kfree(options);
5292 return -EINVAL;
5293 }
5294 static DEVICE_ATTR_RW(reset_method);
5295
5296 static struct attribute *pci_dev_reset_method_attrs[] = {
5297 &dev_attr_reset_method.attr,
5298 NULL,
5299 };
5300
pci_dev_reset_method_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)5301 static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5302 struct attribute *a, int n)
5303 {
5304 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5305
5306 if (!pci_reset_supported(pdev))
5307 return 0;
5308
5309 return a->mode;
5310 }
5311
5312 const struct attribute_group pci_dev_reset_method_attr_group = {
5313 .attrs = pci_dev_reset_method_attrs,
5314 .is_visible = pci_dev_reset_method_attr_is_visible,
5315 };
5316
5317 /**
5318 * __pci_reset_function_locked - reset a PCI device function while holding
5319 * the @dev mutex lock.
5320 * @dev: PCI device to reset
5321 *
5322 * Some devices allow an individual function to be reset without affecting
5323 * other functions in the same device. The PCI device must be responsive
5324 * to PCI config space in order to use this function.
5325 *
5326 * The device function is presumed to be unused and the caller is holding
5327 * the device mutex lock when this function is called.
5328 *
5329 * Resetting the device will make the contents of PCI configuration space
5330 * random, so any caller of this must be prepared to reinitialise the
5331 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5332 * etc.
5333 *
5334 * Returns 0 if the device function was successfully reset or negative if the
5335 * device doesn't support resetting a single function.
5336 */
__pci_reset_function_locked(struct pci_dev * dev)5337 int __pci_reset_function_locked(struct pci_dev *dev)
5338 {
5339 int i, m, rc;
5340
5341 might_sleep();
5342
5343 /*
5344 * A reset method returns -ENOTTY if it doesn't support this device and
5345 * we should try the next method.
5346 *
5347 * If it returns 0 (success), we're finished. If it returns any other
5348 * error, we're also finished: this indicates that further reset
5349 * mechanisms might be broken on the device.
5350 */
5351 for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5352 m = dev->reset_methods[i];
5353 if (!m)
5354 return -ENOTTY;
5355
5356 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5357 if (!rc)
5358 return 0;
5359 if (rc != -ENOTTY)
5360 return rc;
5361 }
5362
5363 return -ENOTTY;
5364 }
5365 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5366
5367 /**
5368 * pci_init_reset_methods - check whether device can be safely reset
5369 * and store supported reset mechanisms.
5370 * @dev: PCI device to check for reset mechanisms
5371 *
5372 * Some devices allow an individual function to be reset without affecting
5373 * other functions in the same device. The PCI device must be in D0-D3hot
5374 * state.
5375 *
5376 * Stores reset mechanisms supported by device in reset_methods byte array
5377 * which is a member of struct pci_dev.
5378 */
pci_init_reset_methods(struct pci_dev * dev)5379 void pci_init_reset_methods(struct pci_dev *dev)
5380 {
5381 int m, i, rc;
5382
5383 BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5384
5385 might_sleep();
5386
5387 i = 0;
5388 for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5389 rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5390 if (!rc)
5391 dev->reset_methods[i++] = m;
5392 else if (rc != -ENOTTY)
5393 break;
5394 }
5395
5396 dev->reset_methods[i] = 0;
5397 }
5398
5399 /**
5400 * pci_reset_function - quiesce and reset a PCI device function
5401 * @dev: PCI device to reset
5402 *
5403 * Some devices allow an individual function to be reset without affecting
5404 * other functions in the same device. The PCI device must be responsive
5405 * to PCI config space in order to use this function.
5406 *
5407 * This function does not just reset the PCI portion of a device, but
5408 * clears all the state associated with the device. This function differs
5409 * from __pci_reset_function_locked() in that it saves and restores device state
5410 * over the reset and takes the PCI device lock.
5411 *
5412 * Returns 0 if the device function was successfully reset or negative if the
5413 * device doesn't support resetting a single function.
5414 */
pci_reset_function(struct pci_dev * dev)5415 int pci_reset_function(struct pci_dev *dev)
5416 {
5417 int rc;
5418
5419 if (!pci_reset_supported(dev))
5420 return -ENOTTY;
5421
5422 pci_dev_lock(dev);
5423 pci_dev_save_and_disable(dev);
5424
5425 rc = __pci_reset_function_locked(dev);
5426
5427 pci_dev_restore(dev);
5428 pci_dev_unlock(dev);
5429
5430 return rc;
5431 }
5432 EXPORT_SYMBOL_GPL(pci_reset_function);
5433
5434 /**
5435 * pci_reset_function_locked - quiesce and reset a PCI device function
5436 * @dev: PCI device to reset
5437 *
5438 * Some devices allow an individual function to be reset without affecting
5439 * other functions in the same device. The PCI device must be responsive
5440 * to PCI config space in order to use this function.
5441 *
5442 * This function does not just reset the PCI portion of a device, but
5443 * clears all the state associated with the device. This function differs
5444 * from __pci_reset_function_locked() in that it saves and restores device state
5445 * over the reset. It also differs from pci_reset_function() in that it
5446 * requires the PCI device lock to be held.
5447 *
5448 * Returns 0 if the device function was successfully reset or negative if the
5449 * device doesn't support resetting a single function.
5450 */
pci_reset_function_locked(struct pci_dev * dev)5451 int pci_reset_function_locked(struct pci_dev *dev)
5452 {
5453 int rc;
5454
5455 if (!pci_reset_supported(dev))
5456 return -ENOTTY;
5457
5458 pci_dev_save_and_disable(dev);
5459
5460 rc = __pci_reset_function_locked(dev);
5461
5462 pci_dev_restore(dev);
5463
5464 return rc;
5465 }
5466 EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5467
5468 /**
5469 * pci_try_reset_function - quiesce and reset a PCI device function
5470 * @dev: PCI device to reset
5471 *
5472 * Same as above, except return -EAGAIN if unable to lock device.
5473 */
pci_try_reset_function(struct pci_dev * dev)5474 int pci_try_reset_function(struct pci_dev *dev)
5475 {
5476 int rc;
5477
5478 if (!pci_reset_supported(dev))
5479 return -ENOTTY;
5480
5481 if (!pci_dev_trylock(dev))
5482 return -EAGAIN;
5483
5484 pci_dev_save_and_disable(dev);
5485 rc = __pci_reset_function_locked(dev);
5486 pci_dev_restore(dev);
5487 pci_dev_unlock(dev);
5488
5489 return rc;
5490 }
5491 EXPORT_SYMBOL_GPL(pci_try_reset_function);
5492
5493 /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resetable(struct pci_bus * bus)5494 static bool pci_bus_resetable(struct pci_bus *bus)
5495 {
5496 struct pci_dev *dev;
5497
5498
5499 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5500 return false;
5501
5502 list_for_each_entry(dev, &bus->devices, bus_list) {
5503 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5504 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5505 return false;
5506 }
5507
5508 return true;
5509 }
5510
5511 /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5512 static void pci_bus_lock(struct pci_bus *bus)
5513 {
5514 struct pci_dev *dev;
5515
5516 list_for_each_entry(dev, &bus->devices, bus_list) {
5517 pci_dev_lock(dev);
5518 if (dev->subordinate)
5519 pci_bus_lock(dev->subordinate);
5520 }
5521 }
5522
5523 /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5524 static void pci_bus_unlock(struct pci_bus *bus)
5525 {
5526 struct pci_dev *dev;
5527
5528 list_for_each_entry(dev, &bus->devices, bus_list) {
5529 if (dev->subordinate)
5530 pci_bus_unlock(dev->subordinate);
5531 pci_dev_unlock(dev);
5532 }
5533 }
5534
5535 /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5536 static int pci_bus_trylock(struct pci_bus *bus)
5537 {
5538 struct pci_dev *dev;
5539
5540 list_for_each_entry(dev, &bus->devices, bus_list) {
5541 if (!pci_dev_trylock(dev))
5542 goto unlock;
5543 if (dev->subordinate) {
5544 if (!pci_bus_trylock(dev->subordinate)) {
5545 pci_dev_unlock(dev);
5546 goto unlock;
5547 }
5548 }
5549 }
5550 return 1;
5551
5552 unlock:
5553 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5554 if (dev->subordinate)
5555 pci_bus_unlock(dev->subordinate);
5556 pci_dev_unlock(dev);
5557 }
5558 return 0;
5559 }
5560
5561 /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resetable(struct pci_slot * slot)5562 static bool pci_slot_resetable(struct pci_slot *slot)
5563 {
5564 struct pci_dev *dev;
5565
5566 if (slot->bus->self &&
5567 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5568 return false;
5569
5570 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5571 if (!dev->slot || dev->slot != slot)
5572 continue;
5573 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5574 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5575 return false;
5576 }
5577
5578 return true;
5579 }
5580
5581 /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5582 static void pci_slot_lock(struct pci_slot *slot)
5583 {
5584 struct pci_dev *dev;
5585
5586 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5587 if (!dev->slot || dev->slot != slot)
5588 continue;
5589 pci_dev_lock(dev);
5590 if (dev->subordinate)
5591 pci_bus_lock(dev->subordinate);
5592 }
5593 }
5594
5595 /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5596 static void pci_slot_unlock(struct pci_slot *slot)
5597 {
5598 struct pci_dev *dev;
5599
5600 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5601 if (!dev->slot || dev->slot != slot)
5602 continue;
5603 if (dev->subordinate)
5604 pci_bus_unlock(dev->subordinate);
5605 pci_dev_unlock(dev);
5606 }
5607 }
5608
5609 /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5610 static int pci_slot_trylock(struct pci_slot *slot)
5611 {
5612 struct pci_dev *dev;
5613
5614 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5615 if (!dev->slot || dev->slot != slot)
5616 continue;
5617 if (!pci_dev_trylock(dev))
5618 goto unlock;
5619 if (dev->subordinate) {
5620 if (!pci_bus_trylock(dev->subordinate)) {
5621 pci_dev_unlock(dev);
5622 goto unlock;
5623 }
5624 }
5625 }
5626 return 1;
5627
5628 unlock:
5629 list_for_each_entry_continue_reverse(dev,
5630 &slot->bus->devices, bus_list) {
5631 if (!dev->slot || dev->slot != slot)
5632 continue;
5633 if (dev->subordinate)
5634 pci_bus_unlock(dev->subordinate);
5635 pci_dev_unlock(dev);
5636 }
5637 return 0;
5638 }
5639
5640 /*
5641 * Save and disable devices from the top of the tree down while holding
5642 * the @dev mutex lock for the entire tree.
5643 */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5644 static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5645 {
5646 struct pci_dev *dev;
5647
5648 list_for_each_entry(dev, &bus->devices, bus_list) {
5649 pci_dev_save_and_disable(dev);
5650 if (dev->subordinate)
5651 pci_bus_save_and_disable_locked(dev->subordinate);
5652 }
5653 }
5654
5655 /*
5656 * Restore devices from top of the tree down while holding @dev mutex lock
5657 * for the entire tree. Parent bridges need to be restored before we can
5658 * get to subordinate devices.
5659 */
pci_bus_restore_locked(struct pci_bus * bus)5660 static void pci_bus_restore_locked(struct pci_bus *bus)
5661 {
5662 struct pci_dev *dev;
5663
5664 list_for_each_entry(dev, &bus->devices, bus_list) {
5665 pci_dev_restore(dev);
5666 if (dev->subordinate)
5667 pci_bus_restore_locked(dev->subordinate);
5668 }
5669 }
5670
5671 /*
5672 * Save and disable devices from the top of the tree down while holding
5673 * the @dev mutex lock for the entire tree.
5674 */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5675 static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5676 {
5677 struct pci_dev *dev;
5678
5679 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5680 if (!dev->slot || dev->slot != slot)
5681 continue;
5682 pci_dev_save_and_disable(dev);
5683 if (dev->subordinate)
5684 pci_bus_save_and_disable_locked(dev->subordinate);
5685 }
5686 }
5687
5688 /*
5689 * Restore devices from top of the tree down while holding @dev mutex lock
5690 * for the entire tree. Parent bridges need to be restored before we can
5691 * get to subordinate devices.
5692 */
pci_slot_restore_locked(struct pci_slot * slot)5693 static void pci_slot_restore_locked(struct pci_slot *slot)
5694 {
5695 struct pci_dev *dev;
5696
5697 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5698 if (!dev->slot || dev->slot != slot)
5699 continue;
5700 pci_dev_restore(dev);
5701 if (dev->subordinate)
5702 pci_bus_restore_locked(dev->subordinate);
5703 }
5704 }
5705
pci_slot_reset(struct pci_slot * slot,bool probe)5706 static int pci_slot_reset(struct pci_slot *slot, bool probe)
5707 {
5708 int rc;
5709
5710 if (!slot || !pci_slot_resetable(slot))
5711 return -ENOTTY;
5712
5713 if (!probe)
5714 pci_slot_lock(slot);
5715
5716 might_sleep();
5717
5718 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5719
5720 if (!probe)
5721 pci_slot_unlock(slot);
5722
5723 return rc;
5724 }
5725
5726 /**
5727 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5728 * @slot: PCI slot to probe
5729 *
5730 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5731 */
pci_probe_reset_slot(struct pci_slot * slot)5732 int pci_probe_reset_slot(struct pci_slot *slot)
5733 {
5734 return pci_slot_reset(slot, PCI_RESET_PROBE);
5735 }
5736 EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5737
5738 /**
5739 * __pci_reset_slot - Try to reset a PCI slot
5740 * @slot: PCI slot to reset
5741 *
5742 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5743 * independent of other slots. For instance, some slots may support slot power
5744 * control. In the case of a 1:1 bus to slot architecture, this function may
5745 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5746 * Generally a slot reset should be attempted before a bus reset. All of the
5747 * function of the slot and any subordinate buses behind the slot are reset
5748 * through this function. PCI config space of all devices in the slot and
5749 * behind the slot is saved before and restored after reset.
5750 *
5751 * Same as above except return -EAGAIN if the slot cannot be locked
5752 */
__pci_reset_slot(struct pci_slot * slot)5753 static int __pci_reset_slot(struct pci_slot *slot)
5754 {
5755 int rc;
5756
5757 rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5758 if (rc)
5759 return rc;
5760
5761 if (pci_slot_trylock(slot)) {
5762 pci_slot_save_and_disable_locked(slot);
5763 might_sleep();
5764 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5765 pci_slot_restore_locked(slot);
5766 pci_slot_unlock(slot);
5767 } else
5768 rc = -EAGAIN;
5769
5770 return rc;
5771 }
5772
pci_bus_reset(struct pci_bus * bus,bool probe)5773 static int pci_bus_reset(struct pci_bus *bus, bool probe)
5774 {
5775 int ret;
5776
5777 if (!bus->self || !pci_bus_resetable(bus))
5778 return -ENOTTY;
5779
5780 if (probe)
5781 return 0;
5782
5783 pci_bus_lock(bus);
5784
5785 might_sleep();
5786
5787 ret = pci_bridge_secondary_bus_reset(bus->self);
5788
5789 pci_bus_unlock(bus);
5790
5791 return ret;
5792 }
5793
5794 /**
5795 * pci_bus_error_reset - reset the bridge's subordinate bus
5796 * @bridge: The parent device that connects to the bus to reset
5797 *
5798 * This function will first try to reset the slots on this bus if the method is
5799 * available. If slot reset fails or is not available, this will fall back to a
5800 * secondary bus reset.
5801 */
pci_bus_error_reset(struct pci_dev * bridge)5802 int pci_bus_error_reset(struct pci_dev *bridge)
5803 {
5804 struct pci_bus *bus = bridge->subordinate;
5805 struct pci_slot *slot;
5806
5807 if (!bus)
5808 return -ENOTTY;
5809
5810 mutex_lock(&pci_slot_mutex);
5811 if (list_empty(&bus->slots))
5812 goto bus_reset;
5813
5814 list_for_each_entry(slot, &bus->slots, list)
5815 if (pci_probe_reset_slot(slot))
5816 goto bus_reset;
5817
5818 list_for_each_entry(slot, &bus->slots, list)
5819 if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5820 goto bus_reset;
5821
5822 mutex_unlock(&pci_slot_mutex);
5823 return 0;
5824 bus_reset:
5825 mutex_unlock(&pci_slot_mutex);
5826 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5827 }
5828
5829 /**
5830 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5831 * @bus: PCI bus to probe
5832 *
5833 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5834 */
pci_probe_reset_bus(struct pci_bus * bus)5835 int pci_probe_reset_bus(struct pci_bus *bus)
5836 {
5837 return pci_bus_reset(bus, PCI_RESET_PROBE);
5838 }
5839 EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5840
5841 /**
5842 * __pci_reset_bus - Try to reset a PCI bus
5843 * @bus: top level PCI bus to reset
5844 *
5845 * Same as above except return -EAGAIN if the bus cannot be locked
5846 */
__pci_reset_bus(struct pci_bus * bus)5847 static int __pci_reset_bus(struct pci_bus *bus)
5848 {
5849 int rc;
5850
5851 rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5852 if (rc)
5853 return rc;
5854
5855 if (pci_bus_trylock(bus)) {
5856 pci_bus_save_and_disable_locked(bus);
5857 might_sleep();
5858 rc = pci_bridge_secondary_bus_reset(bus->self);
5859 pci_bus_restore_locked(bus);
5860 pci_bus_unlock(bus);
5861 } else
5862 rc = -EAGAIN;
5863
5864 return rc;
5865 }
5866
5867 /**
5868 * pci_reset_bus - Try to reset a PCI bus
5869 * @pdev: top level PCI device to reset via slot/bus
5870 *
5871 * Same as above except return -EAGAIN if the bus cannot be locked
5872 */
pci_reset_bus(struct pci_dev * pdev)5873 int pci_reset_bus(struct pci_dev *pdev)
5874 {
5875 return (!pci_probe_reset_slot(pdev->slot)) ?
5876 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5877 }
5878 EXPORT_SYMBOL_GPL(pci_reset_bus);
5879
5880 /**
5881 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5882 * @dev: PCI device to query
5883 *
5884 * Returns mmrbc: maximum designed memory read count in bytes or
5885 * appropriate error value.
5886 */
pcix_get_max_mmrbc(struct pci_dev * dev)5887 int pcix_get_max_mmrbc(struct pci_dev *dev)
5888 {
5889 int cap;
5890 u32 stat;
5891
5892 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5893 if (!cap)
5894 return -EINVAL;
5895
5896 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5897 return -EINVAL;
5898
5899 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5900 }
5901 EXPORT_SYMBOL(pcix_get_max_mmrbc);
5902
5903 /**
5904 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5905 * @dev: PCI device to query
5906 *
5907 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5908 * value.
5909 */
pcix_get_mmrbc(struct pci_dev * dev)5910 int pcix_get_mmrbc(struct pci_dev *dev)
5911 {
5912 int cap;
5913 u16 cmd;
5914
5915 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5916 if (!cap)
5917 return -EINVAL;
5918
5919 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5920 return -EINVAL;
5921
5922 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5923 }
5924 EXPORT_SYMBOL(pcix_get_mmrbc);
5925
5926 /**
5927 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5928 * @dev: PCI device to query
5929 * @mmrbc: maximum memory read count in bytes
5930 * valid values are 512, 1024, 2048, 4096
5931 *
5932 * If possible sets maximum memory read byte count, some bridges have errata
5933 * that prevent this.
5934 */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5935 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5936 {
5937 int cap;
5938 u32 stat, v, o;
5939 u16 cmd;
5940
5941 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5942 return -EINVAL;
5943
5944 v = ffs(mmrbc) - 10;
5945
5946 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5947 if (!cap)
5948 return -EINVAL;
5949
5950 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5951 return -EINVAL;
5952
5953 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5954 return -E2BIG;
5955
5956 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5957 return -EINVAL;
5958
5959 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5960 if (o != v) {
5961 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
5962 return -EIO;
5963
5964 cmd &= ~PCI_X_CMD_MAX_READ;
5965 cmd |= v << 2;
5966 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5967 return -EIO;
5968 }
5969 return 0;
5970 }
5971 EXPORT_SYMBOL(pcix_set_mmrbc);
5972
5973 /**
5974 * pcie_get_readrq - get PCI Express read request size
5975 * @dev: PCI device to query
5976 *
5977 * Returns maximum memory read request in bytes or appropriate error value.
5978 */
pcie_get_readrq(struct pci_dev * dev)5979 int pcie_get_readrq(struct pci_dev *dev)
5980 {
5981 u16 ctl;
5982
5983 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
5984
5985 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5986 }
5987 EXPORT_SYMBOL(pcie_get_readrq);
5988
5989 /**
5990 * pcie_set_readrq - set PCI Express maximum memory read request
5991 * @dev: PCI device to query
5992 * @rq: maximum memory read count in bytes
5993 * valid values are 128, 256, 512, 1024, 2048, 4096
5994 *
5995 * If possible sets maximum memory read request in bytes
5996 */
pcie_set_readrq(struct pci_dev * dev,int rq)5997 int pcie_set_readrq(struct pci_dev *dev, int rq)
5998 {
5999 u16 v;
6000 int ret;
6001
6002 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6003 return -EINVAL;
6004
6005 /*
6006 * If using the "performance" PCIe config, we clamp the read rq
6007 * size to the max packet size to keep the host bridge from
6008 * generating requests larger than we can cope with.
6009 */
6010 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6011 int mps = pcie_get_mps(dev);
6012
6013 if (mps < rq)
6014 rq = mps;
6015 }
6016
6017 v = (ffs(rq) - 8) << 12;
6018
6019 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6020 PCI_EXP_DEVCTL_READRQ, v);
6021
6022 return pcibios_err_to_errno(ret);
6023 }
6024 EXPORT_SYMBOL(pcie_set_readrq);
6025
6026 /**
6027 * pcie_get_mps - get PCI Express maximum payload size
6028 * @dev: PCI device to query
6029 *
6030 * Returns maximum payload size in bytes
6031 */
pcie_get_mps(struct pci_dev * dev)6032 int pcie_get_mps(struct pci_dev *dev)
6033 {
6034 u16 ctl;
6035
6036 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6037
6038 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6039 }
6040 EXPORT_SYMBOL(pcie_get_mps);
6041
6042 /**
6043 * pcie_set_mps - set PCI Express maximum payload size
6044 * @dev: PCI device to query
6045 * @mps: maximum payload size in bytes
6046 * valid values are 128, 256, 512, 1024, 2048, 4096
6047 *
6048 * If possible sets maximum payload size
6049 */
pcie_set_mps(struct pci_dev * dev,int mps)6050 int pcie_set_mps(struct pci_dev *dev, int mps)
6051 {
6052 u16 v;
6053 int ret;
6054
6055 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6056 return -EINVAL;
6057
6058 v = ffs(mps) - 8;
6059 if (v > dev->pcie_mpss)
6060 return -EINVAL;
6061 v <<= 5;
6062
6063 ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6064 PCI_EXP_DEVCTL_PAYLOAD, v);
6065
6066 return pcibios_err_to_errno(ret);
6067 }
6068 EXPORT_SYMBOL(pcie_set_mps);
6069
6070 /**
6071 * pcie_bandwidth_available - determine minimum link settings of a PCIe
6072 * device and its bandwidth limitation
6073 * @dev: PCI device to query
6074 * @limiting_dev: storage for device causing the bandwidth limitation
6075 * @speed: storage for speed of limiting device
6076 * @width: storage for width of limiting device
6077 *
6078 * Walk up the PCI device chain and find the point where the minimum
6079 * bandwidth is available. Return the bandwidth available there and (if
6080 * limiting_dev, speed, and width pointers are supplied) information about
6081 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
6082 * raw bandwidth.
6083 */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6084 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6085 enum pci_bus_speed *speed,
6086 enum pcie_link_width *width)
6087 {
6088 u16 lnksta;
6089 enum pci_bus_speed next_speed;
6090 enum pcie_link_width next_width;
6091 u32 bw, next_bw;
6092
6093 if (speed)
6094 *speed = PCI_SPEED_UNKNOWN;
6095 if (width)
6096 *width = PCIE_LNK_WIDTH_UNKNOWN;
6097
6098 bw = 0;
6099
6100 while (dev) {
6101 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6102
6103 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
6104 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
6105 PCI_EXP_LNKSTA_NLW_SHIFT;
6106
6107 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6108
6109 /* Check if current device limits the total bandwidth */
6110 if (!bw || next_bw <= bw) {
6111 bw = next_bw;
6112
6113 if (limiting_dev)
6114 *limiting_dev = dev;
6115 if (speed)
6116 *speed = next_speed;
6117 if (width)
6118 *width = next_width;
6119 }
6120
6121 dev = pci_upstream_bridge(dev);
6122 }
6123
6124 return bw;
6125 }
6126 EXPORT_SYMBOL(pcie_bandwidth_available);
6127
6128 /**
6129 * pcie_get_speed_cap - query for the PCI device's link speed capability
6130 * @dev: PCI device to query
6131 *
6132 * Query the PCI device speed capability. Return the maximum link speed
6133 * supported by the device.
6134 */
pcie_get_speed_cap(struct pci_dev * dev)6135 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6136 {
6137 u32 lnkcap2, lnkcap;
6138
6139 /*
6140 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
6141 * implementation note there recommends using the Supported Link
6142 * Speeds Vector in Link Capabilities 2 when supported.
6143 *
6144 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6145 * should use the Supported Link Speeds field in Link Capabilities,
6146 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6147 */
6148 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6149
6150 /* PCIe r3.0-compliant */
6151 if (lnkcap2)
6152 return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6153
6154 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6155 if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6156 return PCIE_SPEED_5_0GT;
6157 else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6158 return PCIE_SPEED_2_5GT;
6159
6160 return PCI_SPEED_UNKNOWN;
6161 }
6162 EXPORT_SYMBOL(pcie_get_speed_cap);
6163
6164 /**
6165 * pcie_get_width_cap - query for the PCI device's link width capability
6166 * @dev: PCI device to query
6167 *
6168 * Query the PCI device width capability. Return the maximum link width
6169 * supported by the device.
6170 */
pcie_get_width_cap(struct pci_dev * dev)6171 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6172 {
6173 u32 lnkcap;
6174
6175 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6176 if (lnkcap)
6177 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
6178
6179 return PCIE_LNK_WIDTH_UNKNOWN;
6180 }
6181 EXPORT_SYMBOL(pcie_get_width_cap);
6182
6183 /**
6184 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6185 * @dev: PCI device
6186 * @speed: storage for link speed
6187 * @width: storage for link width
6188 *
6189 * Calculate a PCI device's link bandwidth by querying for its link speed
6190 * and width, multiplying them, and applying encoding overhead. The result
6191 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6192 */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6193 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6194 enum pcie_link_width *width)
6195 {
6196 *speed = pcie_get_speed_cap(dev);
6197 *width = pcie_get_width_cap(dev);
6198
6199 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6200 return 0;
6201
6202 return *width * PCIE_SPEED2MBS_ENC(*speed);
6203 }
6204
6205 /**
6206 * __pcie_print_link_status - Report the PCI device's link speed and width
6207 * @dev: PCI device to query
6208 * @verbose: Print info even when enough bandwidth is available
6209 *
6210 * If the available bandwidth at the device is less than the device is
6211 * capable of, report the device's maximum possible bandwidth and the
6212 * upstream link that limits its performance. If @verbose, always print
6213 * the available bandwidth, even if the device isn't constrained.
6214 */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6215 void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6216 {
6217 enum pcie_link_width width, width_cap;
6218 enum pci_bus_speed speed, speed_cap;
6219 struct pci_dev *limiting_dev = NULL;
6220 u32 bw_avail, bw_cap;
6221
6222 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6223 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6224
6225 if (bw_avail >= bw_cap && verbose)
6226 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6227 bw_cap / 1000, bw_cap % 1000,
6228 pci_speed_string(speed_cap), width_cap);
6229 else if (bw_avail < bw_cap)
6230 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6231 bw_avail / 1000, bw_avail % 1000,
6232 pci_speed_string(speed), width,
6233 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6234 bw_cap / 1000, bw_cap % 1000,
6235 pci_speed_string(speed_cap), width_cap);
6236 }
6237
6238 /**
6239 * pcie_print_link_status - Report the PCI device's link speed and width
6240 * @dev: PCI device to query
6241 *
6242 * Report the available bandwidth at the device.
6243 */
pcie_print_link_status(struct pci_dev * dev)6244 void pcie_print_link_status(struct pci_dev *dev)
6245 {
6246 __pcie_print_link_status(dev, true);
6247 }
6248 EXPORT_SYMBOL(pcie_print_link_status);
6249
6250 /**
6251 * pci_select_bars - Make BAR mask from the type of resource
6252 * @dev: the PCI device for which BAR mask is made
6253 * @flags: resource type mask to be selected
6254 *
6255 * This helper routine makes bar mask from the type of resource.
6256 */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6257 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6258 {
6259 int i, bars = 0;
6260 for (i = 0; i < PCI_NUM_RESOURCES; i++)
6261 if (pci_resource_flags(dev, i) & flags)
6262 bars |= (1 << i);
6263 return bars;
6264 }
6265 EXPORT_SYMBOL(pci_select_bars);
6266
6267 /* Some architectures require additional programming to enable VGA */
6268 static arch_set_vga_state_t arch_set_vga_state;
6269
pci_register_set_vga_state(arch_set_vga_state_t func)6270 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6271 {
6272 arch_set_vga_state = func; /* NULL disables */
6273 }
6274
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6275 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6276 unsigned int command_bits, u32 flags)
6277 {
6278 if (arch_set_vga_state)
6279 return arch_set_vga_state(dev, decode, command_bits,
6280 flags);
6281 return 0;
6282 }
6283
6284 /**
6285 * pci_set_vga_state - set VGA decode state on device and parents if requested
6286 * @dev: the PCI device
6287 * @decode: true = enable decoding, false = disable decoding
6288 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6289 * @flags: traverse ancestors and change bridges
6290 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6291 */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6292 int pci_set_vga_state(struct pci_dev *dev, bool decode,
6293 unsigned int command_bits, u32 flags)
6294 {
6295 struct pci_bus *bus;
6296 struct pci_dev *bridge;
6297 u16 cmd;
6298 int rc;
6299
6300 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6301
6302 /* ARCH specific VGA enables */
6303 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6304 if (rc)
6305 return rc;
6306
6307 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6308 pci_read_config_word(dev, PCI_COMMAND, &cmd);
6309 if (decode)
6310 cmd |= command_bits;
6311 else
6312 cmd &= ~command_bits;
6313 pci_write_config_word(dev, PCI_COMMAND, cmd);
6314 }
6315
6316 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6317 return 0;
6318
6319 bus = dev->bus;
6320 while (bus) {
6321 bridge = bus->self;
6322 if (bridge) {
6323 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6324 &cmd);
6325 if (decode)
6326 cmd |= PCI_BRIDGE_CTL_VGA;
6327 else
6328 cmd &= ~PCI_BRIDGE_CTL_VGA;
6329 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6330 cmd);
6331 }
6332 bus = bus->parent;
6333 }
6334 return 0;
6335 }
6336
6337 #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6338 bool pci_pr3_present(struct pci_dev *pdev)
6339 {
6340 struct acpi_device *adev;
6341
6342 if (acpi_disabled)
6343 return false;
6344
6345 adev = ACPI_COMPANION(&pdev->dev);
6346 if (!adev)
6347 return false;
6348
6349 return adev->power.flags.power_resources &&
6350 acpi_has_method(adev->handle, "_PR3");
6351 }
6352 EXPORT_SYMBOL_GPL(pci_pr3_present);
6353 #endif
6354
6355 /**
6356 * pci_add_dma_alias - Add a DMA devfn alias for a device
6357 * @dev: the PCI device for which alias is added
6358 * @devfn_from: alias slot and function
6359 * @nr_devfns: number of subsequent devfns to alias
6360 *
6361 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6362 * which is used to program permissible bus-devfn source addresses for DMA
6363 * requests in an IOMMU. These aliases factor into IOMMU group creation
6364 * and are useful for devices generating DMA requests beyond or different
6365 * from their logical bus-devfn. Examples include device quirks where the
6366 * device simply uses the wrong devfn, as well as non-transparent bridges
6367 * where the alias may be a proxy for devices in another domain.
6368 *
6369 * IOMMU group creation is performed during device discovery or addition,
6370 * prior to any potential DMA mapping and therefore prior to driver probing
6371 * (especially for userspace assigned devices where IOMMU group definition
6372 * cannot be left as a userspace activity). DMA aliases should therefore
6373 * be configured via quirks, such as the PCI fixup header quirk.
6374 */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6375 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6376 unsigned int nr_devfns)
6377 {
6378 int devfn_to;
6379
6380 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6381 devfn_to = devfn_from + nr_devfns - 1;
6382
6383 if (!dev->dma_alias_mask)
6384 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6385 if (!dev->dma_alias_mask) {
6386 pci_warn(dev, "Unable to allocate DMA alias mask\n");
6387 return;
6388 }
6389
6390 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6391
6392 if (nr_devfns == 1)
6393 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6394 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6395 else if (nr_devfns > 1)
6396 pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6397 PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6398 PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6399 }
6400
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6401 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6402 {
6403 return (dev1->dma_alias_mask &&
6404 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6405 (dev2->dma_alias_mask &&
6406 test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6407 pci_real_dma_dev(dev1) == dev2 ||
6408 pci_real_dma_dev(dev2) == dev1;
6409 }
6410
pci_device_is_present(struct pci_dev * pdev)6411 bool pci_device_is_present(struct pci_dev *pdev)
6412 {
6413 u32 v;
6414
6415 if (pci_dev_is_disconnected(pdev))
6416 return false;
6417 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6418 }
6419 EXPORT_SYMBOL_GPL(pci_device_is_present);
6420
pci_ignore_hotplug(struct pci_dev * dev)6421 void pci_ignore_hotplug(struct pci_dev *dev)
6422 {
6423 struct pci_dev *bridge = dev->bus->self;
6424
6425 dev->ignore_hotplug = 1;
6426 /* Propagate the "ignore hotplug" setting to the parent bridge. */
6427 if (bridge)
6428 bridge->ignore_hotplug = 1;
6429 }
6430 EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6431
6432 /**
6433 * pci_real_dma_dev - Get PCI DMA device for PCI device
6434 * @dev: the PCI device that may have a PCI DMA alias
6435 *
6436 * Permits the platform to provide architecture-specific functionality to
6437 * devices needing to alias DMA to another PCI device on another PCI bus. If
6438 * the PCI device is on the same bus, it is recommended to use
6439 * pci_add_dma_alias(). This is the default implementation. Architecture
6440 * implementations can override this.
6441 */
pci_real_dma_dev(struct pci_dev * dev)6442 struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6443 {
6444 return dev;
6445 }
6446
pcibios_default_alignment(void)6447 resource_size_t __weak pcibios_default_alignment(void)
6448 {
6449 return 0;
6450 }
6451
6452 /*
6453 * Arches that don't want to expose struct resource to userland as-is in
6454 * sysfs and /proc can implement their own pci_resource_to_user().
6455 */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6456 void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6457 const struct resource *rsrc,
6458 resource_size_t *start, resource_size_t *end)
6459 {
6460 *start = rsrc->start;
6461 *end = rsrc->end;
6462 }
6463
6464 static char *resource_alignment_param;
6465 static DEFINE_SPINLOCK(resource_alignment_lock);
6466
6467 /**
6468 * pci_specified_resource_alignment - get resource alignment specified by user.
6469 * @dev: the PCI device to get
6470 * @resize: whether or not to change resources' size when reassigning alignment
6471 *
6472 * RETURNS: Resource alignment if it is specified.
6473 * Zero if it is not specified.
6474 */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6475 static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6476 bool *resize)
6477 {
6478 int align_order, count;
6479 resource_size_t align = pcibios_default_alignment();
6480 const char *p;
6481 int ret;
6482
6483 spin_lock(&resource_alignment_lock);
6484 p = resource_alignment_param;
6485 if (!p || !*p)
6486 goto out;
6487 if (pci_has_flag(PCI_PROBE_ONLY)) {
6488 align = 0;
6489 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6490 goto out;
6491 }
6492
6493 while (*p) {
6494 count = 0;
6495 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6496 p[count] == '@') {
6497 p += count + 1;
6498 if (align_order > 63) {
6499 pr_err("PCI: Invalid requested alignment (order %d)\n",
6500 align_order);
6501 align_order = PAGE_SHIFT;
6502 }
6503 } else {
6504 align_order = PAGE_SHIFT;
6505 }
6506
6507 ret = pci_dev_str_match(dev, p, &p);
6508 if (ret == 1) {
6509 *resize = true;
6510 align = 1ULL << align_order;
6511 break;
6512 } else if (ret < 0) {
6513 pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6514 p);
6515 break;
6516 }
6517
6518 if (*p != ';' && *p != ',') {
6519 /* End of param or invalid format */
6520 break;
6521 }
6522 p++;
6523 }
6524 out:
6525 spin_unlock(&resource_alignment_lock);
6526 return align;
6527 }
6528
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6529 static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6530 resource_size_t align, bool resize)
6531 {
6532 struct resource *r = &dev->resource[bar];
6533 resource_size_t size;
6534
6535 if (!(r->flags & IORESOURCE_MEM))
6536 return;
6537
6538 if (r->flags & IORESOURCE_PCI_FIXED) {
6539 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6540 bar, r, (unsigned long long)align);
6541 return;
6542 }
6543
6544 size = resource_size(r);
6545 if (size >= align)
6546 return;
6547
6548 /*
6549 * Increase the alignment of the resource. There are two ways we
6550 * can do this:
6551 *
6552 * 1) Increase the size of the resource. BARs are aligned on their
6553 * size, so when we reallocate space for this resource, we'll
6554 * allocate it with the larger alignment. This also prevents
6555 * assignment of any other BARs inside the alignment region, so
6556 * if we're requesting page alignment, this means no other BARs
6557 * will share the page.
6558 *
6559 * The disadvantage is that this makes the resource larger than
6560 * the hardware BAR, which may break drivers that compute things
6561 * based on the resource size, e.g., to find registers at a
6562 * fixed offset before the end of the BAR.
6563 *
6564 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6565 * set r->start to the desired alignment. By itself this
6566 * doesn't prevent other BARs being put inside the alignment
6567 * region, but if we realign *every* resource of every device in
6568 * the system, none of them will share an alignment region.
6569 *
6570 * When the user has requested alignment for only some devices via
6571 * the "pci=resource_alignment" argument, "resize" is true and we
6572 * use the first method. Otherwise we assume we're aligning all
6573 * devices and we use the second.
6574 */
6575
6576 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6577 bar, r, (unsigned long long)align);
6578
6579 if (resize) {
6580 r->start = 0;
6581 r->end = align - 1;
6582 } else {
6583 r->flags &= ~IORESOURCE_SIZEALIGN;
6584 r->flags |= IORESOURCE_STARTALIGN;
6585 r->start = align;
6586 r->end = r->start + size - 1;
6587 }
6588 r->flags |= IORESOURCE_UNSET;
6589 }
6590
6591 /*
6592 * This function disables memory decoding and releases memory resources
6593 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6594 * It also rounds up size to specified alignment.
6595 * Later on, the kernel will assign page-aligned memory resource back
6596 * to the device.
6597 */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6598 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6599 {
6600 int i;
6601 struct resource *r;
6602 resource_size_t align;
6603 u16 command;
6604 bool resize = false;
6605
6606 /*
6607 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6608 * 3.4.1.11. Their resources are allocated from the space
6609 * described by the VF BARx register in the PF's SR-IOV capability.
6610 * We can't influence their alignment here.
6611 */
6612 if (dev->is_virtfn)
6613 return;
6614
6615 /* check if specified PCI is target device to reassign */
6616 align = pci_specified_resource_alignment(dev, &resize);
6617 if (!align)
6618 return;
6619
6620 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6621 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6622 pci_warn(dev, "Can't reassign resources to host bridge\n");
6623 return;
6624 }
6625
6626 pci_read_config_word(dev, PCI_COMMAND, &command);
6627 command &= ~PCI_COMMAND_MEMORY;
6628 pci_write_config_word(dev, PCI_COMMAND, command);
6629
6630 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6631 pci_request_resource_alignment(dev, i, align, resize);
6632
6633 /*
6634 * Need to disable bridge's resource window,
6635 * to enable the kernel to reassign new resource
6636 * window later on.
6637 */
6638 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6639 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6640 r = &dev->resource[i];
6641 if (!(r->flags & IORESOURCE_MEM))
6642 continue;
6643 r->flags |= IORESOURCE_UNSET;
6644 r->end = resource_size(r) - 1;
6645 r->start = 0;
6646 }
6647 pci_disable_bridge_window(dev);
6648 }
6649 }
6650
resource_alignment_show(struct bus_type * bus,char * buf)6651 static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6652 {
6653 size_t count = 0;
6654
6655 spin_lock(&resource_alignment_lock);
6656 if (resource_alignment_param)
6657 count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6658 spin_unlock(&resource_alignment_lock);
6659
6660 return count;
6661 }
6662
resource_alignment_store(struct bus_type * bus,const char * buf,size_t count)6663 static ssize_t resource_alignment_store(struct bus_type *bus,
6664 const char *buf, size_t count)
6665 {
6666 char *param, *old, *end;
6667
6668 if (count >= (PAGE_SIZE - 1))
6669 return -EINVAL;
6670
6671 param = kstrndup(buf, count, GFP_KERNEL);
6672 if (!param)
6673 return -ENOMEM;
6674
6675 end = strchr(param, '\n');
6676 if (end)
6677 *end = '\0';
6678
6679 spin_lock(&resource_alignment_lock);
6680 old = resource_alignment_param;
6681 if (strlen(param)) {
6682 resource_alignment_param = param;
6683 } else {
6684 kfree(param);
6685 resource_alignment_param = NULL;
6686 }
6687 spin_unlock(&resource_alignment_lock);
6688
6689 kfree(old);
6690
6691 return count;
6692 }
6693
6694 static BUS_ATTR_RW(resource_alignment);
6695
pci_resource_alignment_sysfs_init(void)6696 static int __init pci_resource_alignment_sysfs_init(void)
6697 {
6698 return bus_create_file(&pci_bus_type,
6699 &bus_attr_resource_alignment);
6700 }
6701 late_initcall(pci_resource_alignment_sysfs_init);
6702
pci_no_domains(void)6703 static void pci_no_domains(void)
6704 {
6705 #ifdef CONFIG_PCI_DOMAINS
6706 pci_domains_supported = 0;
6707 #endif
6708 }
6709
6710 #ifdef CONFIG_PCI_DOMAINS_GENERIC
6711 static atomic_t __domain_nr = ATOMIC_INIT(-1);
6712
pci_get_new_domain_nr(void)6713 static int pci_get_new_domain_nr(void)
6714 {
6715 return atomic_inc_return(&__domain_nr);
6716 }
6717
of_pci_bus_find_domain_nr(struct device * parent)6718 static int of_pci_bus_find_domain_nr(struct device *parent)
6719 {
6720 static int use_dt_domains = -1;
6721 int domain = -1;
6722
6723 if (parent)
6724 domain = of_get_pci_domain_nr(parent->of_node);
6725
6726 /*
6727 * Check DT domain and use_dt_domains values.
6728 *
6729 * If DT domain property is valid (domain >= 0) and
6730 * use_dt_domains != 0, the DT assignment is valid since this means
6731 * we have not previously allocated a domain number by using
6732 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6733 * 1, to indicate that we have just assigned a domain number from
6734 * DT.
6735 *
6736 * If DT domain property value is not valid (ie domain < 0), and we
6737 * have not previously assigned a domain number from DT
6738 * (use_dt_domains != 1) we should assign a domain number by
6739 * using the:
6740 *
6741 * pci_get_new_domain_nr()
6742 *
6743 * API and update the use_dt_domains value to keep track of method we
6744 * are using to assign domain numbers (use_dt_domains = 0).
6745 *
6746 * All other combinations imply we have a platform that is trying
6747 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6748 * which is a recipe for domain mishandling and it is prevented by
6749 * invalidating the domain value (domain = -1) and printing a
6750 * corresponding error.
6751 */
6752 if (domain >= 0 && use_dt_domains) {
6753 use_dt_domains = 1;
6754 } else if (domain < 0 && use_dt_domains != 1) {
6755 use_dt_domains = 0;
6756 domain = pci_get_new_domain_nr();
6757 } else {
6758 if (parent)
6759 pr_err("Node %pOF has ", parent->of_node);
6760 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6761 domain = -1;
6762 }
6763
6764 return domain;
6765 }
6766
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6767 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6768 {
6769 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6770 acpi_pci_bus_find_domain_nr(bus);
6771 }
6772 #endif
6773
6774 /**
6775 * pci_ext_cfg_avail - can we access extended PCI config space?
6776 *
6777 * Returns 1 if we can access PCI extended config space (offsets
6778 * greater than 0xff). This is the default implementation. Architecture
6779 * implementations can override this.
6780 */
pci_ext_cfg_avail(void)6781 int __weak pci_ext_cfg_avail(void)
6782 {
6783 return 1;
6784 }
6785
pci_fixup_cardbus(struct pci_bus * bus)6786 void __weak pci_fixup_cardbus(struct pci_bus *bus)
6787 {
6788 }
6789 EXPORT_SYMBOL(pci_fixup_cardbus);
6790
pci_setup(char * str)6791 static int __init pci_setup(char *str)
6792 {
6793 while (str) {
6794 char *k = strchr(str, ',');
6795 if (k)
6796 *k++ = 0;
6797 if (*str && (str = pcibios_setup(str)) && *str) {
6798 if (!strcmp(str, "nomsi")) {
6799 pci_no_msi();
6800 } else if (!strncmp(str, "noats", 5)) {
6801 pr_info("PCIe: ATS is disabled\n");
6802 pcie_ats_disabled = true;
6803 } else if (!strcmp(str, "noaer")) {
6804 pci_no_aer();
6805 } else if (!strcmp(str, "earlydump")) {
6806 pci_early_dump = true;
6807 } else if (!strncmp(str, "realloc=", 8)) {
6808 pci_realloc_get_opt(str + 8);
6809 } else if (!strncmp(str, "realloc", 7)) {
6810 pci_realloc_get_opt("on");
6811 } else if (!strcmp(str, "nodomains")) {
6812 pci_no_domains();
6813 } else if (!strncmp(str, "noari", 5)) {
6814 pcie_ari_disabled = true;
6815 } else if (!strncmp(str, "cbiosize=", 9)) {
6816 pci_cardbus_io_size = memparse(str + 9, &str);
6817 } else if (!strncmp(str, "cbmemsize=", 10)) {
6818 pci_cardbus_mem_size = memparse(str + 10, &str);
6819 } else if (!strncmp(str, "resource_alignment=", 19)) {
6820 resource_alignment_param = str + 19;
6821 } else if (!strncmp(str, "ecrc=", 5)) {
6822 pcie_ecrc_get_policy(str + 5);
6823 } else if (!strncmp(str, "hpiosize=", 9)) {
6824 pci_hotplug_io_size = memparse(str + 9, &str);
6825 } else if (!strncmp(str, "hpmmiosize=", 11)) {
6826 pci_hotplug_mmio_size = memparse(str + 11, &str);
6827 } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6828 pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6829 } else if (!strncmp(str, "hpmemsize=", 10)) {
6830 pci_hotplug_mmio_size = memparse(str + 10, &str);
6831 pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6832 } else if (!strncmp(str, "hpbussize=", 10)) {
6833 pci_hotplug_bus_size =
6834 simple_strtoul(str + 10, &str, 0);
6835 if (pci_hotplug_bus_size > 0xff)
6836 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6837 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6838 pcie_bus_config = PCIE_BUS_TUNE_OFF;
6839 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
6840 pcie_bus_config = PCIE_BUS_SAFE;
6841 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
6842 pcie_bus_config = PCIE_BUS_PERFORMANCE;
6843 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6844 pcie_bus_config = PCIE_BUS_PEER2PEER;
6845 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6846 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6847 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6848 disable_acs_redir_param = str + 18;
6849 } else {
6850 pr_err("PCI: Unknown option `%s'\n", str);
6851 }
6852 }
6853 str = k;
6854 }
6855 return 0;
6856 }
6857 early_param("pci", pci_setup);
6858
6859 /*
6860 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6861 * in pci_setup(), above, to point to data in the __initdata section which
6862 * will be freed after the init sequence is complete. We can't allocate memory
6863 * in pci_setup() because some architectures do not have any memory allocation
6864 * service available during an early_param() call. So we allocate memory and
6865 * copy the variable here before the init section is freed.
6866 *
6867 */
pci_realloc_setup_params(void)6868 static int __init pci_realloc_setup_params(void)
6869 {
6870 resource_alignment_param = kstrdup(resource_alignment_param,
6871 GFP_KERNEL);
6872 disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6873
6874 return 0;
6875 }
6876 pure_initcall(pci_realloc_setup_params);
6877