1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
4 * (C) Copyright 2002-2004 IBM Corp.
5 * (C) Copyright 2003 Matthew Wilcox
6 * (C) Copyright 2003 Hewlett-Packard
7 * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
8 * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
9 *
10 * File attributes for PCI devices
11 *
12 * Modeled after usb's driverfs.c
13 */
14
15
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/pci.h>
19 #include <linux/stat.h>
20 #include <linux/export.h>
21 #include <linux/topology.h>
22 #include <linux/mm.h>
23 #include <linux/fs.h>
24 #include <linux/capability.h>
25 #include <linux/security.h>
26 #include <linux/slab.h>
27 #include <linux/vgaarb.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/msi.h>
30 #include <linux/of.h>
31 #include "pci.h"
32
33 static int sysfs_initialized; /* = 0 */
34
35 /* show configuration fields */
36 #define pci_config_attr(field, format_string) \
37 static ssize_t \
38 field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
39 { \
40 struct pci_dev *pdev; \
41 \
42 pdev = to_pci_dev(dev); \
43 return sysfs_emit(buf, format_string, pdev->field); \
44 } \
45 static DEVICE_ATTR_RO(field)
46
47 pci_config_attr(vendor, "0x%04x\n");
48 pci_config_attr(device, "0x%04x\n");
49 pci_config_attr(subsystem_vendor, "0x%04x\n");
50 pci_config_attr(subsystem_device, "0x%04x\n");
51 pci_config_attr(revision, "0x%02x\n");
52 pci_config_attr(class, "0x%06x\n");
53
irq_show(struct device * dev,struct device_attribute * attr,char * buf)54 static ssize_t irq_show(struct device *dev,
55 struct device_attribute *attr,
56 char *buf)
57 {
58 struct pci_dev *pdev = to_pci_dev(dev);
59
60 #ifdef CONFIG_PCI_MSI
61 /*
62 * For MSI, show the first MSI IRQ; for all other cases including
63 * MSI-X, show the legacy INTx IRQ.
64 */
65 if (pdev->msi_enabled) {
66 struct msi_desc *desc = first_pci_msi_entry(pdev);
67
68 return sysfs_emit(buf, "%u\n", desc->irq);
69 }
70 #endif
71
72 return sysfs_emit(buf, "%u\n", pdev->irq);
73 }
74 static DEVICE_ATTR_RO(irq);
75
broken_parity_status_show(struct device * dev,struct device_attribute * attr,char * buf)76 static ssize_t broken_parity_status_show(struct device *dev,
77 struct device_attribute *attr,
78 char *buf)
79 {
80 struct pci_dev *pdev = to_pci_dev(dev);
81 return sysfs_emit(buf, "%u\n", pdev->broken_parity_status);
82 }
83
broken_parity_status_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)84 static ssize_t broken_parity_status_store(struct device *dev,
85 struct device_attribute *attr,
86 const char *buf, size_t count)
87 {
88 struct pci_dev *pdev = to_pci_dev(dev);
89 unsigned long val;
90
91 if (kstrtoul(buf, 0, &val) < 0)
92 return -EINVAL;
93
94 pdev->broken_parity_status = !!val;
95
96 return count;
97 }
98 static DEVICE_ATTR_RW(broken_parity_status);
99
pci_dev_show_local_cpu(struct device * dev,bool list,struct device_attribute * attr,char * buf)100 static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
101 struct device_attribute *attr, char *buf)
102 {
103 const struct cpumask *mask;
104
105 #ifdef CONFIG_NUMA
106 if (dev_to_node(dev) == NUMA_NO_NODE)
107 mask = cpu_online_mask;
108 else
109 mask = cpumask_of_node(dev_to_node(dev));
110 #else
111 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
112 #endif
113 return cpumap_print_to_pagebuf(list, buf, mask);
114 }
115
local_cpus_show(struct device * dev,struct device_attribute * attr,char * buf)116 static ssize_t local_cpus_show(struct device *dev,
117 struct device_attribute *attr, char *buf)
118 {
119 return pci_dev_show_local_cpu(dev, false, attr, buf);
120 }
121 static DEVICE_ATTR_RO(local_cpus);
122
local_cpulist_show(struct device * dev,struct device_attribute * attr,char * buf)123 static ssize_t local_cpulist_show(struct device *dev,
124 struct device_attribute *attr, char *buf)
125 {
126 return pci_dev_show_local_cpu(dev, true, attr, buf);
127 }
128 static DEVICE_ATTR_RO(local_cpulist);
129
130 /*
131 * PCI Bus Class Devices
132 */
cpuaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)133 static ssize_t cpuaffinity_show(struct device *dev,
134 struct device_attribute *attr, char *buf)
135 {
136 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
137
138 return cpumap_print_to_pagebuf(false, buf, cpumask);
139 }
140 static DEVICE_ATTR_RO(cpuaffinity);
141
cpulistaffinity_show(struct device * dev,struct device_attribute * attr,char * buf)142 static ssize_t cpulistaffinity_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
144 {
145 const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
146
147 return cpumap_print_to_pagebuf(true, buf, cpumask);
148 }
149 static DEVICE_ATTR_RO(cpulistaffinity);
150
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)151 static ssize_t power_state_show(struct device *dev,
152 struct device_attribute *attr, char *buf)
153 {
154 struct pci_dev *pdev = to_pci_dev(dev);
155
156 return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state));
157 }
158 static DEVICE_ATTR_RO(power_state);
159
160 /* show resources */
resource_show(struct device * dev,struct device_attribute * attr,char * buf)161 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
162 char *buf)
163 {
164 struct pci_dev *pci_dev = to_pci_dev(dev);
165 int i;
166 int max;
167 resource_size_t start, end;
168 size_t len = 0;
169
170 if (pci_dev->subordinate)
171 max = DEVICE_COUNT_RESOURCE;
172 else
173 max = PCI_BRIDGE_RESOURCES;
174
175 for (i = 0; i < max; i++) {
176 struct resource *res = &pci_dev->resource[i];
177 pci_resource_to_user(pci_dev, i, res, &start, &end);
178 len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
179 (unsigned long long)start,
180 (unsigned long long)end,
181 (unsigned long long)res->flags);
182 }
183 return len;
184 }
185 static DEVICE_ATTR_RO(resource);
186
max_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)187 static ssize_t max_link_speed_show(struct device *dev,
188 struct device_attribute *attr, char *buf)
189 {
190 struct pci_dev *pdev = to_pci_dev(dev);
191
192 return sysfs_emit(buf, "%s\n",
193 pci_speed_string(pcie_get_speed_cap(pdev)));
194 }
195 static DEVICE_ATTR_RO(max_link_speed);
196
max_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)197 static ssize_t max_link_width_show(struct device *dev,
198 struct device_attribute *attr, char *buf)
199 {
200 struct pci_dev *pdev = to_pci_dev(dev);
201
202 return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
203 }
204 static DEVICE_ATTR_RO(max_link_width);
205
current_link_speed_show(struct device * dev,struct device_attribute * attr,char * buf)206 static ssize_t current_link_speed_show(struct device *dev,
207 struct device_attribute *attr, char *buf)
208 {
209 struct pci_dev *pci_dev = to_pci_dev(dev);
210 u16 linkstat;
211 int err;
212 enum pci_bus_speed speed;
213
214 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
215 if (err)
216 return -EINVAL;
217
218 speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS];
219
220 return sysfs_emit(buf, "%s\n", pci_speed_string(speed));
221 }
222 static DEVICE_ATTR_RO(current_link_speed);
223
current_link_width_show(struct device * dev,struct device_attribute * attr,char * buf)224 static ssize_t current_link_width_show(struct device *dev,
225 struct device_attribute *attr, char *buf)
226 {
227 struct pci_dev *pci_dev = to_pci_dev(dev);
228 u16 linkstat;
229 int err;
230
231 err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
232 if (err)
233 return -EINVAL;
234
235 return sysfs_emit(buf, "%u\n",
236 (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
237 }
238 static DEVICE_ATTR_RO(current_link_width);
239
secondary_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)240 static ssize_t secondary_bus_number_show(struct device *dev,
241 struct device_attribute *attr,
242 char *buf)
243 {
244 struct pci_dev *pci_dev = to_pci_dev(dev);
245 u8 sec_bus;
246 int err;
247
248 err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
249 if (err)
250 return -EINVAL;
251
252 return sysfs_emit(buf, "%u\n", sec_bus);
253 }
254 static DEVICE_ATTR_RO(secondary_bus_number);
255
subordinate_bus_number_show(struct device * dev,struct device_attribute * attr,char * buf)256 static ssize_t subordinate_bus_number_show(struct device *dev,
257 struct device_attribute *attr,
258 char *buf)
259 {
260 struct pci_dev *pci_dev = to_pci_dev(dev);
261 u8 sub_bus;
262 int err;
263
264 err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
265 if (err)
266 return -EINVAL;
267
268 return sysfs_emit(buf, "%u\n", sub_bus);
269 }
270 static DEVICE_ATTR_RO(subordinate_bus_number);
271
ari_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)272 static ssize_t ari_enabled_show(struct device *dev,
273 struct device_attribute *attr,
274 char *buf)
275 {
276 struct pci_dev *pci_dev = to_pci_dev(dev);
277
278 return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus));
279 }
280 static DEVICE_ATTR_RO(ari_enabled);
281
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)282 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
283 char *buf)
284 {
285 struct pci_dev *pci_dev = to_pci_dev(dev);
286
287 return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
288 pci_dev->vendor, pci_dev->device,
289 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
290 (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
291 (u8)(pci_dev->class));
292 }
293 static DEVICE_ATTR_RO(modalias);
294
enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)295 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
296 const char *buf, size_t count)
297 {
298 struct pci_dev *pdev = to_pci_dev(dev);
299 unsigned long val;
300 ssize_t result = 0;
301
302 /* this can crash the machine when done on the "wrong" device */
303 if (!capable(CAP_SYS_ADMIN))
304 return -EPERM;
305
306 if (kstrtoul(buf, 0, &val) < 0)
307 return -EINVAL;
308
309 device_lock(dev);
310 if (dev->driver)
311 result = -EBUSY;
312 else if (val)
313 result = pci_enable_device(pdev);
314 else if (pci_is_enabled(pdev))
315 pci_disable_device(pdev);
316 else
317 result = -EIO;
318 device_unlock(dev);
319
320 return result < 0 ? result : count;
321 }
322
enable_show(struct device * dev,struct device_attribute * attr,char * buf)323 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
324 char *buf)
325 {
326 struct pci_dev *pdev;
327
328 pdev = to_pci_dev(dev);
329 return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt));
330 }
331 static DEVICE_ATTR_RW(enable);
332
333 #ifdef CONFIG_NUMA
numa_node_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)334 static ssize_t numa_node_store(struct device *dev,
335 struct device_attribute *attr, const char *buf,
336 size_t count)
337 {
338 struct pci_dev *pdev = to_pci_dev(dev);
339 int node;
340
341 if (!capable(CAP_SYS_ADMIN))
342 return -EPERM;
343
344 if (kstrtoint(buf, 0, &node) < 0)
345 return -EINVAL;
346
347 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
348 return -EINVAL;
349
350 if (node != NUMA_NO_NODE && !node_online(node))
351 return -EINVAL;
352
353 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
354 pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
355 node);
356
357 dev->numa_node = node;
358 return count;
359 }
360
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)361 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
362 char *buf)
363 {
364 return sysfs_emit(buf, "%d\n", dev->numa_node);
365 }
366 static DEVICE_ATTR_RW(numa_node);
367 #endif
368
dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)369 static ssize_t dma_mask_bits_show(struct device *dev,
370 struct device_attribute *attr, char *buf)
371 {
372 struct pci_dev *pdev = to_pci_dev(dev);
373
374 return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask));
375 }
376 static DEVICE_ATTR_RO(dma_mask_bits);
377
consistent_dma_mask_bits_show(struct device * dev,struct device_attribute * attr,char * buf)378 static ssize_t consistent_dma_mask_bits_show(struct device *dev,
379 struct device_attribute *attr,
380 char *buf)
381 {
382 return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask));
383 }
384 static DEVICE_ATTR_RO(consistent_dma_mask_bits);
385
msi_bus_show(struct device * dev,struct device_attribute * attr,char * buf)386 static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
387 char *buf)
388 {
389 struct pci_dev *pdev = to_pci_dev(dev);
390 struct pci_bus *subordinate = pdev->subordinate;
391
392 return sysfs_emit(buf, "%u\n", subordinate ?
393 !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
394 : !pdev->no_msi);
395 }
396
msi_bus_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)397 static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
398 const char *buf, size_t count)
399 {
400 struct pci_dev *pdev = to_pci_dev(dev);
401 struct pci_bus *subordinate = pdev->subordinate;
402 unsigned long val;
403
404 if (!capable(CAP_SYS_ADMIN))
405 return -EPERM;
406
407 if (kstrtoul(buf, 0, &val) < 0)
408 return -EINVAL;
409
410 /*
411 * "no_msi" and "bus_flags" only affect what happens when a driver
412 * requests MSI or MSI-X. They don't affect any drivers that have
413 * already requested MSI or MSI-X.
414 */
415 if (!subordinate) {
416 pdev->no_msi = !val;
417 pci_info(pdev, "MSI/MSI-X %s for future drivers\n",
418 val ? "allowed" : "disallowed");
419 return count;
420 }
421
422 if (val)
423 subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
424 else
425 subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
426
427 dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
428 val ? "allowed" : "disallowed");
429 return count;
430 }
431 static DEVICE_ATTR_RW(msi_bus);
432
rescan_store(struct bus_type * bus,const char * buf,size_t count)433 static ssize_t rescan_store(struct bus_type *bus, const char *buf, size_t count)
434 {
435 unsigned long val;
436 struct pci_bus *b = NULL;
437
438 if (kstrtoul(buf, 0, &val) < 0)
439 return -EINVAL;
440
441 if (val) {
442 pci_lock_rescan_remove();
443 while ((b = pci_find_next_bus(b)) != NULL)
444 pci_rescan_bus(b);
445 pci_unlock_rescan_remove();
446 }
447 return count;
448 }
449 static BUS_ATTR_WO(rescan);
450
451 static struct attribute *pci_bus_attrs[] = {
452 &bus_attr_rescan.attr,
453 NULL,
454 };
455
456 static const struct attribute_group pci_bus_group = {
457 .attrs = pci_bus_attrs,
458 };
459
460 const struct attribute_group *pci_bus_groups[] = {
461 &pci_bus_group,
462 NULL,
463 };
464
dev_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)465 static ssize_t dev_rescan_store(struct device *dev,
466 struct device_attribute *attr, const char *buf,
467 size_t count)
468 {
469 unsigned long val;
470 struct pci_dev *pdev = to_pci_dev(dev);
471
472 if (kstrtoul(buf, 0, &val) < 0)
473 return -EINVAL;
474
475 if (val) {
476 pci_lock_rescan_remove();
477 pci_rescan_bus(pdev->bus);
478 pci_unlock_rescan_remove();
479 }
480 return count;
481 }
482 static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
483 dev_rescan_store);
484
remove_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)485 static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
486 const char *buf, size_t count)
487 {
488 unsigned long val;
489
490 if (kstrtoul(buf, 0, &val) < 0)
491 return -EINVAL;
492
493 if (val && device_remove_file_self(dev, attr))
494 pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
495 return count;
496 }
497 static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL,
498 remove_store);
499
bus_rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)500 static ssize_t bus_rescan_store(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t count)
503 {
504 unsigned long val;
505 struct pci_bus *bus = to_pci_bus(dev);
506
507 if (kstrtoul(buf, 0, &val) < 0)
508 return -EINVAL;
509
510 if (val) {
511 pci_lock_rescan_remove();
512 if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
513 pci_rescan_bus_bridge_resize(bus->self);
514 else
515 pci_rescan_bus(bus);
516 pci_unlock_rescan_remove();
517 }
518 return count;
519 }
520 static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
521 bus_rescan_store);
522
523 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
d3cold_allowed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)524 static ssize_t d3cold_allowed_store(struct device *dev,
525 struct device_attribute *attr,
526 const char *buf, size_t count)
527 {
528 struct pci_dev *pdev = to_pci_dev(dev);
529 unsigned long val;
530
531 if (kstrtoul(buf, 0, &val) < 0)
532 return -EINVAL;
533
534 pdev->d3cold_allowed = !!val;
535 if (pdev->d3cold_allowed)
536 pci_d3cold_enable(pdev);
537 else
538 pci_d3cold_disable(pdev);
539
540 pm_runtime_resume(dev);
541
542 return count;
543 }
544
d3cold_allowed_show(struct device * dev,struct device_attribute * attr,char * buf)545 static ssize_t d3cold_allowed_show(struct device *dev,
546 struct device_attribute *attr, char *buf)
547 {
548 struct pci_dev *pdev = to_pci_dev(dev);
549 return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed);
550 }
551 static DEVICE_ATTR_RW(d3cold_allowed);
552 #endif
553
554 #ifdef CONFIG_OF
devspec_show(struct device * dev,struct device_attribute * attr,char * buf)555 static ssize_t devspec_show(struct device *dev,
556 struct device_attribute *attr, char *buf)
557 {
558 struct pci_dev *pdev = to_pci_dev(dev);
559 struct device_node *np = pci_device_to_OF_node(pdev);
560
561 if (np == NULL)
562 return 0;
563 return sysfs_emit(buf, "%pOF\n", np);
564 }
565 static DEVICE_ATTR_RO(devspec);
566 #endif
567
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)568 static ssize_t driver_override_store(struct device *dev,
569 struct device_attribute *attr,
570 const char *buf, size_t count)
571 {
572 struct pci_dev *pdev = to_pci_dev(dev);
573 char *driver_override, *old, *cp;
574
575 /* We need to keep extra room for a newline */
576 if (count >= (PAGE_SIZE - 1))
577 return -EINVAL;
578
579 driver_override = kstrndup(buf, count, GFP_KERNEL);
580 if (!driver_override)
581 return -ENOMEM;
582
583 cp = strchr(driver_override, '\n');
584 if (cp)
585 *cp = '\0';
586
587 device_lock(dev);
588 old = pdev->driver_override;
589 if (strlen(driver_override)) {
590 pdev->driver_override = driver_override;
591 } else {
592 kfree(driver_override);
593 pdev->driver_override = NULL;
594 }
595 device_unlock(dev);
596
597 kfree(old);
598
599 return count;
600 }
601
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)602 static ssize_t driver_override_show(struct device *dev,
603 struct device_attribute *attr, char *buf)
604 {
605 struct pci_dev *pdev = to_pci_dev(dev);
606 ssize_t len;
607
608 device_lock(dev);
609 len = sysfs_emit(buf, "%s\n", pdev->driver_override);
610 device_unlock(dev);
611 return len;
612 }
613 static DEVICE_ATTR_RW(driver_override);
614
615 static struct attribute *pci_dev_attrs[] = {
616 &dev_attr_power_state.attr,
617 &dev_attr_resource.attr,
618 &dev_attr_vendor.attr,
619 &dev_attr_device.attr,
620 &dev_attr_subsystem_vendor.attr,
621 &dev_attr_subsystem_device.attr,
622 &dev_attr_revision.attr,
623 &dev_attr_class.attr,
624 &dev_attr_irq.attr,
625 &dev_attr_local_cpus.attr,
626 &dev_attr_local_cpulist.attr,
627 &dev_attr_modalias.attr,
628 #ifdef CONFIG_NUMA
629 &dev_attr_numa_node.attr,
630 #endif
631 &dev_attr_dma_mask_bits.attr,
632 &dev_attr_consistent_dma_mask_bits.attr,
633 &dev_attr_enable.attr,
634 &dev_attr_broken_parity_status.attr,
635 &dev_attr_msi_bus.attr,
636 #if defined(CONFIG_PM) && defined(CONFIG_ACPI)
637 &dev_attr_d3cold_allowed.attr,
638 #endif
639 #ifdef CONFIG_OF
640 &dev_attr_devspec.attr,
641 #endif
642 &dev_attr_driver_override.attr,
643 &dev_attr_ari_enabled.attr,
644 NULL,
645 };
646
647 static struct attribute *pci_bridge_attrs[] = {
648 &dev_attr_subordinate_bus_number.attr,
649 &dev_attr_secondary_bus_number.attr,
650 NULL,
651 };
652
653 static struct attribute *pcie_dev_attrs[] = {
654 &dev_attr_current_link_speed.attr,
655 &dev_attr_current_link_width.attr,
656 &dev_attr_max_link_width.attr,
657 &dev_attr_max_link_speed.attr,
658 NULL,
659 };
660
661 static struct attribute *pcibus_attrs[] = {
662 &dev_attr_bus_rescan.attr,
663 &dev_attr_cpuaffinity.attr,
664 &dev_attr_cpulistaffinity.attr,
665 NULL,
666 };
667
668 static const struct attribute_group pcibus_group = {
669 .attrs = pcibus_attrs,
670 };
671
672 const struct attribute_group *pcibus_groups[] = {
673 &pcibus_group,
674 NULL,
675 };
676
boot_vga_show(struct device * dev,struct device_attribute * attr,char * buf)677 static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
678 char *buf)
679 {
680 struct pci_dev *pdev = to_pci_dev(dev);
681 struct pci_dev *vga_dev = vga_default_device();
682
683 if (vga_dev)
684 return sysfs_emit(buf, "%u\n", (pdev == vga_dev));
685
686 return sysfs_emit(buf, "%u\n",
687 !!(pdev->resource[PCI_ROM_RESOURCE].flags &
688 IORESOURCE_ROM_SHADOW));
689 }
690 static DEVICE_ATTR_RO(boot_vga);
691
pci_read_config(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)692 static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
693 struct bin_attribute *bin_attr, char *buf,
694 loff_t off, size_t count)
695 {
696 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
697 unsigned int size = 64;
698 loff_t init_off = off;
699 u8 *data = (u8 *) buf;
700
701 /* Several chips lock up trying to read undefined config space */
702 if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
703 size = dev->cfg_size;
704 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
705 size = 128;
706
707 if (off > size)
708 return 0;
709 if (off + count > size) {
710 size -= off;
711 count = size;
712 } else {
713 size = count;
714 }
715
716 pci_config_pm_runtime_get(dev);
717
718 if ((off & 1) && size) {
719 u8 val;
720 pci_user_read_config_byte(dev, off, &val);
721 data[off - init_off] = val;
722 off++;
723 size--;
724 }
725
726 if ((off & 3) && size > 2) {
727 u16 val;
728 pci_user_read_config_word(dev, off, &val);
729 data[off - init_off] = val & 0xff;
730 data[off - init_off + 1] = (val >> 8) & 0xff;
731 off += 2;
732 size -= 2;
733 }
734
735 while (size > 3) {
736 u32 val;
737 pci_user_read_config_dword(dev, off, &val);
738 data[off - init_off] = val & 0xff;
739 data[off - init_off + 1] = (val >> 8) & 0xff;
740 data[off - init_off + 2] = (val >> 16) & 0xff;
741 data[off - init_off + 3] = (val >> 24) & 0xff;
742 off += 4;
743 size -= 4;
744 cond_resched();
745 }
746
747 if (size >= 2) {
748 u16 val;
749 pci_user_read_config_word(dev, off, &val);
750 data[off - init_off] = val & 0xff;
751 data[off - init_off + 1] = (val >> 8) & 0xff;
752 off += 2;
753 size -= 2;
754 }
755
756 if (size > 0) {
757 u8 val;
758 pci_user_read_config_byte(dev, off, &val);
759 data[off - init_off] = val;
760 off++;
761 --size;
762 }
763
764 pci_config_pm_runtime_put(dev);
765
766 return count;
767 }
768
pci_write_config(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)769 static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
770 struct bin_attribute *bin_attr, char *buf,
771 loff_t off, size_t count)
772 {
773 struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
774 unsigned int size = count;
775 loff_t init_off = off;
776 u8 *data = (u8 *) buf;
777 int ret;
778
779 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
780 if (ret)
781 return ret;
782
783 if (off > dev->cfg_size)
784 return 0;
785 if (off + count > dev->cfg_size) {
786 size = dev->cfg_size - off;
787 count = size;
788 }
789
790 pci_config_pm_runtime_get(dev);
791
792 if ((off & 1) && size) {
793 pci_user_write_config_byte(dev, off, data[off - init_off]);
794 off++;
795 size--;
796 }
797
798 if ((off & 3) && size > 2) {
799 u16 val = data[off - init_off];
800 val |= (u16) data[off - init_off + 1] << 8;
801 pci_user_write_config_word(dev, off, val);
802 off += 2;
803 size -= 2;
804 }
805
806 while (size > 3) {
807 u32 val = data[off - init_off];
808 val |= (u32) data[off - init_off + 1] << 8;
809 val |= (u32) data[off - init_off + 2] << 16;
810 val |= (u32) data[off - init_off + 3] << 24;
811 pci_user_write_config_dword(dev, off, val);
812 off += 4;
813 size -= 4;
814 }
815
816 if (size >= 2) {
817 u16 val = data[off - init_off];
818 val |= (u16) data[off - init_off + 1] << 8;
819 pci_user_write_config_word(dev, off, val);
820 off += 2;
821 size -= 2;
822 }
823
824 if (size) {
825 pci_user_write_config_byte(dev, off, data[off - init_off]);
826 off++;
827 --size;
828 }
829
830 pci_config_pm_runtime_put(dev);
831
832 return count;
833 }
834 static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
835
836 static struct bin_attribute *pci_dev_config_attrs[] = {
837 &bin_attr_config,
838 NULL,
839 };
840
pci_dev_config_attr_is_visible(struct kobject * kobj,struct bin_attribute * a,int n)841 static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
842 struct bin_attribute *a, int n)
843 {
844 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
845
846 a->size = PCI_CFG_SPACE_SIZE;
847 if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
848 a->size = PCI_CFG_SPACE_EXP_SIZE;
849
850 return a->attr.mode;
851 }
852
853 static const struct attribute_group pci_dev_config_attr_group = {
854 .bin_attrs = pci_dev_config_attrs,
855 .is_bin_visible = pci_dev_config_attr_is_visible,
856 };
857
858 #ifdef HAVE_PCI_LEGACY
859 /**
860 * pci_read_legacy_io - read byte(s) from legacy I/O port space
861 * @filp: open sysfs file
862 * @kobj: kobject corresponding to file to read from
863 * @bin_attr: struct bin_attribute for this file
864 * @buf: buffer to store results
865 * @off: offset into legacy I/O port space
866 * @count: number of bytes to read
867 *
868 * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
869 * callback routine (pci_legacy_read).
870 */
pci_read_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)871 static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
872 struct bin_attribute *bin_attr, char *buf,
873 loff_t off, size_t count)
874 {
875 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
876
877 /* Only support 1, 2 or 4 byte accesses */
878 if (count != 1 && count != 2 && count != 4)
879 return -EINVAL;
880
881 return pci_legacy_read(bus, off, (u32 *)buf, count);
882 }
883
884 /**
885 * pci_write_legacy_io - write byte(s) to legacy I/O port space
886 * @filp: open sysfs file
887 * @kobj: kobject corresponding to file to read from
888 * @bin_attr: struct bin_attribute for this file
889 * @buf: buffer containing value to be written
890 * @off: offset into legacy I/O port space
891 * @count: number of bytes to write
892 *
893 * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
894 * callback routine (pci_legacy_write).
895 */
pci_write_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)896 static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
897 struct bin_attribute *bin_attr, char *buf,
898 loff_t off, size_t count)
899 {
900 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
901
902 /* Only support 1, 2 or 4 byte accesses */
903 if (count != 1 && count != 2 && count != 4)
904 return -EINVAL;
905
906 return pci_legacy_write(bus, off, *(u32 *)buf, count);
907 }
908
909 /**
910 * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
911 * @filp: open sysfs file
912 * @kobj: kobject corresponding to device to be mapped
913 * @attr: struct bin_attribute for this file
914 * @vma: struct vm_area_struct passed to mmap
915 *
916 * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
917 * legacy memory space (first meg of bus space) into application virtual
918 * memory space.
919 */
pci_mmap_legacy_mem(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)920 static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
921 struct bin_attribute *attr,
922 struct vm_area_struct *vma)
923 {
924 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
925
926 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
927 }
928
929 /**
930 * pci_mmap_legacy_io - map legacy PCI IO into user memory space
931 * @filp: open sysfs file
932 * @kobj: kobject corresponding to device to be mapped
933 * @attr: struct bin_attribute for this file
934 * @vma: struct vm_area_struct passed to mmap
935 *
936 * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
937 * legacy IO space (first meg of bus space) into application virtual
938 * memory space. Returns -ENOSYS if the operation isn't supported
939 */
pci_mmap_legacy_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)940 static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
941 struct bin_attribute *attr,
942 struct vm_area_struct *vma)
943 {
944 struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
945
946 return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
947 }
948
949 /**
950 * pci_adjust_legacy_attr - adjustment of legacy file attributes
951 * @b: bus to create files under
952 * @mmap_type: I/O port or memory
953 *
954 * Stub implementation. Can be overridden by arch if necessary.
955 */
pci_adjust_legacy_attr(struct pci_bus * b,enum pci_mmap_state mmap_type)956 void __weak pci_adjust_legacy_attr(struct pci_bus *b,
957 enum pci_mmap_state mmap_type)
958 {
959 }
960
961 /**
962 * pci_create_legacy_files - create legacy I/O port and memory files
963 * @b: bus to create files under
964 *
965 * Some platforms allow access to legacy I/O port and ISA memory space on
966 * a per-bus basis. This routine creates the files and ties them into
967 * their associated read, write and mmap files from pci-sysfs.c
968 *
969 * On error unwind, but don't propagate the error to the caller
970 * as it is ok to set up the PCI bus without these files.
971 */
pci_create_legacy_files(struct pci_bus * b)972 void pci_create_legacy_files(struct pci_bus *b)
973 {
974 int error;
975
976 if (!sysfs_initialized)
977 return;
978
979 b->legacy_io = kcalloc(2, sizeof(struct bin_attribute),
980 GFP_ATOMIC);
981 if (!b->legacy_io)
982 goto kzalloc_err;
983
984 sysfs_bin_attr_init(b->legacy_io);
985 b->legacy_io->attr.name = "legacy_io";
986 b->legacy_io->size = 0xffff;
987 b->legacy_io->attr.mode = 0600;
988 b->legacy_io->read = pci_read_legacy_io;
989 b->legacy_io->write = pci_write_legacy_io;
990 b->legacy_io->mmap = pci_mmap_legacy_io;
991 b->legacy_io->f_mapping = iomem_get_mapping;
992 pci_adjust_legacy_attr(b, pci_mmap_io);
993 error = device_create_bin_file(&b->dev, b->legacy_io);
994 if (error)
995 goto legacy_io_err;
996
997 /* Allocated above after the legacy_io struct */
998 b->legacy_mem = b->legacy_io + 1;
999 sysfs_bin_attr_init(b->legacy_mem);
1000 b->legacy_mem->attr.name = "legacy_mem";
1001 b->legacy_mem->size = 1024*1024;
1002 b->legacy_mem->attr.mode = 0600;
1003 b->legacy_mem->mmap = pci_mmap_legacy_mem;
1004 b->legacy_mem->f_mapping = iomem_get_mapping;
1005 pci_adjust_legacy_attr(b, pci_mmap_mem);
1006 error = device_create_bin_file(&b->dev, b->legacy_mem);
1007 if (error)
1008 goto legacy_mem_err;
1009
1010 return;
1011
1012 legacy_mem_err:
1013 device_remove_bin_file(&b->dev, b->legacy_io);
1014 legacy_io_err:
1015 kfree(b->legacy_io);
1016 b->legacy_io = NULL;
1017 kzalloc_err:
1018 dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n");
1019 }
1020
pci_remove_legacy_files(struct pci_bus * b)1021 void pci_remove_legacy_files(struct pci_bus *b)
1022 {
1023 if (b->legacy_io) {
1024 device_remove_bin_file(&b->dev, b->legacy_io);
1025 device_remove_bin_file(&b->dev, b->legacy_mem);
1026 kfree(b->legacy_io); /* both are allocated here */
1027 }
1028 }
1029 #endif /* HAVE_PCI_LEGACY */
1030
1031 #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
1032
pci_mmap_fits(struct pci_dev * pdev,int resno,struct vm_area_struct * vma,enum pci_mmap_api mmap_api)1033 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
1034 enum pci_mmap_api mmap_api)
1035 {
1036 unsigned long nr, start, size;
1037 resource_size_t pci_start = 0, pci_end;
1038
1039 if (pci_resource_len(pdev, resno) == 0)
1040 return 0;
1041 nr = vma_pages(vma);
1042 start = vma->vm_pgoff;
1043 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
1044 if (mmap_api == PCI_MMAP_PROCFS) {
1045 pci_resource_to_user(pdev, resno, &pdev->resource[resno],
1046 &pci_start, &pci_end);
1047 pci_start >>= PAGE_SHIFT;
1048 }
1049 if (start >= pci_start && start < pci_start + size &&
1050 start + nr <= pci_start + size)
1051 return 1;
1052 return 0;
1053 }
1054
1055 /**
1056 * pci_mmap_resource - map a PCI resource into user memory space
1057 * @kobj: kobject for mapping
1058 * @attr: struct bin_attribute for the file being mapped
1059 * @vma: struct vm_area_struct passed into the mmap
1060 * @write_combine: 1 for write_combine mapping
1061 *
1062 * Use the regular PCI mapping routines to map a PCI resource into userspace.
1063 */
pci_mmap_resource(struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma,int write_combine)1064 static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
1065 struct vm_area_struct *vma, int write_combine)
1066 {
1067 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1068 int bar = (unsigned long)attr->private;
1069 enum pci_mmap_state mmap_type;
1070 struct resource *res = &pdev->resource[bar];
1071 int ret;
1072
1073 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1074 if (ret)
1075 return ret;
1076
1077 if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
1078 return -EINVAL;
1079
1080 if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS))
1081 return -EINVAL;
1082
1083 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
1084
1085 return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
1086 }
1087
pci_mmap_resource_uc(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)1088 static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
1089 struct bin_attribute *attr,
1090 struct vm_area_struct *vma)
1091 {
1092 return pci_mmap_resource(kobj, attr, vma, 0);
1093 }
1094
pci_mmap_resource_wc(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)1095 static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
1096 struct bin_attribute *attr,
1097 struct vm_area_struct *vma)
1098 {
1099 return pci_mmap_resource(kobj, attr, vma, 1);
1100 }
1101
pci_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count,bool write)1102 static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
1103 struct bin_attribute *attr, char *buf,
1104 loff_t off, size_t count, bool write)
1105 {
1106 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1107 int bar = (unsigned long)attr->private;
1108 unsigned long port = off;
1109
1110 port += pci_resource_start(pdev, bar);
1111
1112 if (port > pci_resource_end(pdev, bar))
1113 return 0;
1114
1115 if (port + count - 1 > pci_resource_end(pdev, bar))
1116 return -EINVAL;
1117
1118 switch (count) {
1119 case 1:
1120 if (write)
1121 outb(*(u8 *)buf, port);
1122 else
1123 *(u8 *)buf = inb(port);
1124 return 1;
1125 case 2:
1126 if (write)
1127 outw(*(u16 *)buf, port);
1128 else
1129 *(u16 *)buf = inw(port);
1130 return 2;
1131 case 4:
1132 if (write)
1133 outl(*(u32 *)buf, port);
1134 else
1135 *(u32 *)buf = inl(port);
1136 return 4;
1137 }
1138 return -EINVAL;
1139 }
1140
pci_read_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)1141 static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
1142 struct bin_attribute *attr, char *buf,
1143 loff_t off, size_t count)
1144 {
1145 return pci_resource_io(filp, kobj, attr, buf, off, count, false);
1146 }
1147
pci_write_resource_io(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)1148 static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
1149 struct bin_attribute *attr, char *buf,
1150 loff_t off, size_t count)
1151 {
1152 int ret;
1153
1154 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
1155 if (ret)
1156 return ret;
1157
1158 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
1159 }
1160
1161 /**
1162 * pci_remove_resource_files - cleanup resource files
1163 * @pdev: dev to cleanup
1164 *
1165 * If we created resource files for @pdev, remove them from sysfs and
1166 * free their resources.
1167 */
pci_remove_resource_files(struct pci_dev * pdev)1168 static void pci_remove_resource_files(struct pci_dev *pdev)
1169 {
1170 int i;
1171
1172 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1173 struct bin_attribute *res_attr;
1174
1175 res_attr = pdev->res_attr[i];
1176 if (res_attr) {
1177 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1178 kfree(res_attr);
1179 }
1180
1181 res_attr = pdev->res_attr_wc[i];
1182 if (res_attr) {
1183 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
1184 kfree(res_attr);
1185 }
1186 }
1187 }
1188
pci_create_attr(struct pci_dev * pdev,int num,int write_combine)1189 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
1190 {
1191 /* allocate attribute structure, piggyback attribute name */
1192 int name_len = write_combine ? 13 : 10;
1193 struct bin_attribute *res_attr;
1194 char *res_attr_name;
1195 int retval;
1196
1197 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
1198 if (!res_attr)
1199 return -ENOMEM;
1200
1201 res_attr_name = (char *)(res_attr + 1);
1202
1203 sysfs_bin_attr_init(res_attr);
1204 if (write_combine) {
1205 pdev->res_attr_wc[num] = res_attr;
1206 sprintf(res_attr_name, "resource%d_wc", num);
1207 res_attr->mmap = pci_mmap_resource_wc;
1208 } else {
1209 pdev->res_attr[num] = res_attr;
1210 sprintf(res_attr_name, "resource%d", num);
1211 if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
1212 res_attr->read = pci_read_resource_io;
1213 res_attr->write = pci_write_resource_io;
1214 if (arch_can_pci_mmap_io())
1215 res_attr->mmap = pci_mmap_resource_uc;
1216 } else {
1217 res_attr->mmap = pci_mmap_resource_uc;
1218 }
1219 }
1220 if (res_attr->mmap)
1221 res_attr->f_mapping = iomem_get_mapping;
1222 res_attr->attr.name = res_attr_name;
1223 res_attr->attr.mode = 0600;
1224 res_attr->size = pci_resource_len(pdev, num);
1225 res_attr->private = (void *)(unsigned long)num;
1226 retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
1227 if (retval)
1228 kfree(res_attr);
1229
1230 return retval;
1231 }
1232
1233 /**
1234 * pci_create_resource_files - create resource files in sysfs for @dev
1235 * @pdev: dev in question
1236 *
1237 * Walk the resources in @pdev creating files for each resource available.
1238 */
pci_create_resource_files(struct pci_dev * pdev)1239 static int pci_create_resource_files(struct pci_dev *pdev)
1240 {
1241 int i;
1242 int retval;
1243
1244 /* Expose the PCI resources from this device as files */
1245 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1246
1247 /* skip empty resources */
1248 if (!pci_resource_len(pdev, i))
1249 continue;
1250
1251 retval = pci_create_attr(pdev, i, 0);
1252 /* for prefetchable resources, create a WC mappable file */
1253 if (!retval && arch_can_pci_mmap_wc() &&
1254 pdev->resource[i].flags & IORESOURCE_PREFETCH)
1255 retval = pci_create_attr(pdev, i, 1);
1256 if (retval) {
1257 pci_remove_resource_files(pdev);
1258 return retval;
1259 }
1260 }
1261 return 0;
1262 }
1263 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
pci_create_resource_files(struct pci_dev * dev)1264 int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
pci_remove_resource_files(struct pci_dev * dev)1265 void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
1266 #endif
1267
1268 /**
1269 * pci_write_rom - used to enable access to the PCI ROM display
1270 * @filp: sysfs file
1271 * @kobj: kernel object handle
1272 * @bin_attr: struct bin_attribute for this file
1273 * @buf: user input
1274 * @off: file offset
1275 * @count: number of byte in input
1276 *
1277 * writing anything except 0 enables it
1278 */
pci_write_rom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1279 static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
1280 struct bin_attribute *bin_attr, char *buf,
1281 loff_t off, size_t count)
1282 {
1283 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1284
1285 if ((off == 0) && (*buf == '0') && (count == 2))
1286 pdev->rom_attr_enabled = 0;
1287 else
1288 pdev->rom_attr_enabled = 1;
1289
1290 return count;
1291 }
1292
1293 /**
1294 * pci_read_rom - read a PCI ROM
1295 * @filp: sysfs file
1296 * @kobj: kernel object handle
1297 * @bin_attr: struct bin_attribute for this file
1298 * @buf: where to put the data we read from the ROM
1299 * @off: file offset
1300 * @count: number of bytes to read
1301 *
1302 * Put @count bytes starting at @off into @buf from the ROM in the PCI
1303 * device corresponding to @kobj.
1304 */
pci_read_rom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1305 static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
1306 struct bin_attribute *bin_attr, char *buf,
1307 loff_t off, size_t count)
1308 {
1309 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1310 void __iomem *rom;
1311 size_t size;
1312
1313 if (!pdev->rom_attr_enabled)
1314 return -EINVAL;
1315
1316 rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
1317 if (!rom || !size)
1318 return -EIO;
1319
1320 if (off >= size)
1321 count = 0;
1322 else {
1323 if (off + count > size)
1324 count = size - off;
1325
1326 memcpy_fromio(buf, rom + off, count);
1327 }
1328 pci_unmap_rom(pdev, rom);
1329
1330 return count;
1331 }
1332 static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
1333
1334 static struct bin_attribute *pci_dev_rom_attrs[] = {
1335 &bin_attr_rom,
1336 NULL,
1337 };
1338
pci_dev_rom_attr_is_visible(struct kobject * kobj,struct bin_attribute * a,int n)1339 static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
1340 struct bin_attribute *a, int n)
1341 {
1342 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1343 size_t rom_size;
1344
1345 /* If the device has a ROM, try to expose it in sysfs. */
1346 rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
1347 if (!rom_size)
1348 return 0;
1349
1350 a->size = rom_size;
1351
1352 return a->attr.mode;
1353 }
1354
1355 static const struct attribute_group pci_dev_rom_attr_group = {
1356 .bin_attrs = pci_dev_rom_attrs,
1357 .is_bin_visible = pci_dev_rom_attr_is_visible,
1358 };
1359
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1360 static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
1361 const char *buf, size_t count)
1362 {
1363 struct pci_dev *pdev = to_pci_dev(dev);
1364 unsigned long val;
1365 ssize_t result;
1366
1367 if (kstrtoul(buf, 0, &val) < 0)
1368 return -EINVAL;
1369
1370 if (val != 1)
1371 return -EINVAL;
1372
1373 pm_runtime_get_sync(dev);
1374 result = pci_reset_function(pdev);
1375 pm_runtime_put(dev);
1376 if (result < 0)
1377 return result;
1378
1379 return count;
1380 }
1381 static DEVICE_ATTR_WO(reset);
1382
1383 static struct attribute *pci_dev_reset_attrs[] = {
1384 &dev_attr_reset.attr,
1385 NULL,
1386 };
1387
pci_dev_reset_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)1388 static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj,
1389 struct attribute *a, int n)
1390 {
1391 struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
1392
1393 if (!pci_reset_supported(pdev))
1394 return 0;
1395
1396 return a->mode;
1397 }
1398
1399 static const struct attribute_group pci_dev_reset_attr_group = {
1400 .attrs = pci_dev_reset_attrs,
1401 .is_visible = pci_dev_reset_attr_is_visible,
1402 };
1403
pci_create_sysfs_dev_files(struct pci_dev * pdev)1404 int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
1405 {
1406 if (!sysfs_initialized)
1407 return -EACCES;
1408
1409 return pci_create_resource_files(pdev);
1410 }
1411
1412 /**
1413 * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
1414 * @pdev: device whose entries we should free
1415 *
1416 * Cleanup when @pdev is removed from sysfs.
1417 */
pci_remove_sysfs_dev_files(struct pci_dev * pdev)1418 void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
1419 {
1420 if (!sysfs_initialized)
1421 return;
1422
1423 pci_remove_resource_files(pdev);
1424 }
1425
pci_sysfs_init(void)1426 static int __init pci_sysfs_init(void)
1427 {
1428 struct pci_dev *pdev = NULL;
1429 struct pci_bus *pbus = NULL;
1430 int retval;
1431
1432 sysfs_initialized = 1;
1433 for_each_pci_dev(pdev) {
1434 retval = pci_create_sysfs_dev_files(pdev);
1435 if (retval) {
1436 pci_dev_put(pdev);
1437 return retval;
1438 }
1439 }
1440
1441 while ((pbus = pci_find_next_bus(pbus)))
1442 pci_create_legacy_files(pbus);
1443
1444 return 0;
1445 }
1446 late_initcall(pci_sysfs_init);
1447
1448 static struct attribute *pci_dev_dev_attrs[] = {
1449 &dev_attr_boot_vga.attr,
1450 NULL,
1451 };
1452
pci_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1453 static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
1454 struct attribute *a, int n)
1455 {
1456 struct device *dev = kobj_to_dev(kobj);
1457 struct pci_dev *pdev = to_pci_dev(dev);
1458
1459 if (a == &dev_attr_boot_vga.attr)
1460 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
1461 return 0;
1462
1463 return a->mode;
1464 }
1465
1466 static struct attribute *pci_dev_hp_attrs[] = {
1467 &dev_attr_remove.attr,
1468 &dev_attr_dev_rescan.attr,
1469 NULL,
1470 };
1471
pci_dev_hp_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1472 static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
1473 struct attribute *a, int n)
1474 {
1475 struct device *dev = kobj_to_dev(kobj);
1476 struct pci_dev *pdev = to_pci_dev(dev);
1477
1478 if (pdev->is_virtfn)
1479 return 0;
1480
1481 return a->mode;
1482 }
1483
pci_bridge_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1484 static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
1485 struct attribute *a, int n)
1486 {
1487 struct device *dev = kobj_to_dev(kobj);
1488 struct pci_dev *pdev = to_pci_dev(dev);
1489
1490 if (pci_is_bridge(pdev))
1491 return a->mode;
1492
1493 return 0;
1494 }
1495
pcie_dev_attrs_are_visible(struct kobject * kobj,struct attribute * a,int n)1496 static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
1497 struct attribute *a, int n)
1498 {
1499 struct device *dev = kobj_to_dev(kobj);
1500 struct pci_dev *pdev = to_pci_dev(dev);
1501
1502 if (pci_is_pcie(pdev))
1503 return a->mode;
1504
1505 return 0;
1506 }
1507
1508 static const struct attribute_group pci_dev_group = {
1509 .attrs = pci_dev_attrs,
1510 };
1511
1512 const struct attribute_group *pci_dev_groups[] = {
1513 &pci_dev_group,
1514 &pci_dev_config_attr_group,
1515 &pci_dev_rom_attr_group,
1516 &pci_dev_reset_attr_group,
1517 &pci_dev_reset_method_attr_group,
1518 &pci_dev_vpd_attr_group,
1519 #ifdef CONFIG_DMI
1520 &pci_dev_smbios_attr_group,
1521 #endif
1522 #ifdef CONFIG_ACPI
1523 &pci_dev_acpi_attr_group,
1524 #endif
1525 NULL,
1526 };
1527
1528 static const struct attribute_group pci_dev_hp_attr_group = {
1529 .attrs = pci_dev_hp_attrs,
1530 .is_visible = pci_dev_hp_attrs_are_visible,
1531 };
1532
1533 static const struct attribute_group pci_dev_attr_group = {
1534 .attrs = pci_dev_dev_attrs,
1535 .is_visible = pci_dev_attrs_are_visible,
1536 };
1537
1538 static const struct attribute_group pci_bridge_attr_group = {
1539 .attrs = pci_bridge_attrs,
1540 .is_visible = pci_bridge_attrs_are_visible,
1541 };
1542
1543 static const struct attribute_group pcie_dev_attr_group = {
1544 .attrs = pcie_dev_attrs,
1545 .is_visible = pcie_dev_attrs_are_visible,
1546 };
1547
1548 static const struct attribute_group *pci_dev_attr_groups[] = {
1549 &pci_dev_attr_group,
1550 &pci_dev_hp_attr_group,
1551 #ifdef CONFIG_PCI_IOV
1552 &sriov_pf_dev_attr_group,
1553 &sriov_vf_dev_attr_group,
1554 #endif
1555 &pci_bridge_attr_group,
1556 &pcie_dev_attr_group,
1557 #ifdef CONFIG_PCIEAER
1558 &aer_stats_attr_group,
1559 #endif
1560 #ifdef CONFIG_PCIEASPM
1561 &aspm_ctrl_attr_group,
1562 #endif
1563 NULL,
1564 };
1565
1566 const struct device_type pci_dev_type = {
1567 .groups = pci_dev_attr_groups,
1568 };
1569