1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Contains common pci routines for ALL ppc platform
4 * (based on pci_32.c and pci_64.c)
5 *
6 * Port for PPC64 David Engebretsen, IBM Corp.
7 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
8 *
9 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Rework, based on alpha PCI code.
11 *
12 * Common pmac/prep/chrp pci routines. -- Cort
13 */
14
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/export.h>
21 #include <linux/of_address.h>
22 #include <linux/of_pci.h>
23 #include <linux/mm.h>
24 #include <linux/shmem_fs.h>
25 #include <linux/list.h>
26 #include <linux/syscalls.h>
27 #include <linux/irq.h>
28 #include <linux/vmalloc.h>
29 #include <linux/slab.h>
30 #include <linux/vgaarb.h>
31 #include <linux/numa.h>
32 #include <linux/msi.h>
33
34 #include <asm/processor.h>
35 #include <asm/io.h>
36 #include <asm/prom.h>
37 #include <asm/pci-bridge.h>
38 #include <asm/byteorder.h>
39 #include <asm/machdep.h>
40 #include <asm/ppc-pci.h>
41 #include <asm/eeh.h>
42
43 #include "../../../drivers/pci/pci.h"
44
45 /* hose_spinlock protects accesses to the the phb_bitmap. */
46 static DEFINE_SPINLOCK(hose_spinlock);
47 LIST_HEAD(hose_list);
48
49 /* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
50 #define MAX_PHBS 0x10000
51
52 /*
53 * For dynamic PHB numbering: used/free PHBs tracking bitmap.
54 * Accesses to this bitmap should be protected by hose_spinlock.
55 */
56 static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
57
58 /* ISA Memory physical address */
59 resource_size_t isa_mem_base;
60 EXPORT_SYMBOL(isa_mem_base);
61
62
63 static const struct dma_map_ops *pci_dma_ops;
64
set_pci_dma_ops(const struct dma_map_ops * dma_ops)65 void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
66 {
67 pci_dma_ops = dma_ops;
68 }
69
70 /*
71 * This function should run under locking protection, specifically
72 * hose_spinlock.
73 */
get_phb_number(struct device_node * dn)74 static int get_phb_number(struct device_node *dn)
75 {
76 int ret, phb_id = -1;
77 u32 prop_32;
78 u64 prop;
79
80 /*
81 * Try fixed PHB numbering first, by checking archs and reading
82 * the respective device-tree properties. Firstly, try powernv by
83 * reading "ibm,opal-phbid", only present in OPAL environment.
84 */
85 ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
86 if (ret) {
87 ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
88 prop = prop_32;
89 }
90
91 if (!ret)
92 phb_id = (int)(prop & (MAX_PHBS - 1));
93
94 /* We need to be sure to not use the same PHB number twice. */
95 if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
96 return phb_id;
97
98 /*
99 * If not pseries nor powernv, or if fixed PHB numbering tried to add
100 * the same PHB number twice, then fallback to dynamic PHB numbering.
101 */
102 phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
103 BUG_ON(phb_id >= MAX_PHBS);
104 set_bit(phb_id, phb_bitmap);
105
106 return phb_id;
107 }
108
pcibios_alloc_controller(struct device_node * dev)109 struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
110 {
111 struct pci_controller *phb;
112
113 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
114 if (phb == NULL)
115 return NULL;
116 spin_lock(&hose_spinlock);
117 phb->global_number = get_phb_number(dev);
118 list_add_tail(&phb->list_node, &hose_list);
119 spin_unlock(&hose_spinlock);
120 phb->dn = dev;
121 phb->is_dynamic = slab_is_available();
122 #ifdef CONFIG_PPC64
123 if (dev) {
124 int nid = of_node_to_nid(dev);
125
126 if (nid < 0 || !node_online(nid))
127 nid = NUMA_NO_NODE;
128
129 PHB_SET_NODE(phb, nid);
130 }
131 #endif
132 return phb;
133 }
134 EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
135
pcibios_free_controller(struct pci_controller * phb)136 void pcibios_free_controller(struct pci_controller *phb)
137 {
138 spin_lock(&hose_spinlock);
139
140 /* Clear bit of phb_bitmap to allow reuse of this PHB number. */
141 if (phb->global_number < MAX_PHBS)
142 clear_bit(phb->global_number, phb_bitmap);
143
144 list_del(&phb->list_node);
145 spin_unlock(&hose_spinlock);
146
147 if (phb->is_dynamic)
148 kfree(phb);
149 }
150 EXPORT_SYMBOL_GPL(pcibios_free_controller);
151
152 /*
153 * This function is used to call pcibios_free_controller()
154 * in a deferred manner: a callback from the PCI subsystem.
155 *
156 * _*DO NOT*_ call pcibios_free_controller() explicitly if
157 * this is used (or it may access an invalid *phb pointer).
158 *
159 * The callback occurs when all references to the root bus
160 * are dropped (e.g., child buses/devices and their users).
161 *
162 * It's called as .release_fn() of 'struct pci_host_bridge'
163 * which is associated with the 'struct pci_controller.bus'
164 * (root bus) - it expects .release_data to hold a pointer
165 * to 'struct pci_controller'.
166 *
167 * In order to use it, register .release_fn()/release_data
168 * like this:
169 *
170 * pci_set_host_bridge_release(bridge,
171 * pcibios_free_controller_deferred
172 * (void *) phb);
173 *
174 * e.g. in the pcibios_root_bridge_prepare() callback from
175 * pci_create_root_bus().
176 */
pcibios_free_controller_deferred(struct pci_host_bridge * bridge)177 void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
178 {
179 struct pci_controller *phb = (struct pci_controller *)
180 bridge->release_data;
181
182 pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
183
184 pcibios_free_controller(phb);
185 }
186 EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
187
188 /*
189 * The function is used to return the minimal alignment
190 * for memory or I/O windows of the associated P2P bridge.
191 * By default, 4KiB alignment for I/O windows and 1MiB for
192 * memory windows.
193 */
pcibios_window_alignment(struct pci_bus * bus,unsigned long type)194 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
195 unsigned long type)
196 {
197 struct pci_controller *phb = pci_bus_to_host(bus);
198
199 if (phb->controller_ops.window_alignment)
200 return phb->controller_ops.window_alignment(bus, type);
201
202 /*
203 * PCI core will figure out the default
204 * alignment: 4KiB for I/O and 1MiB for
205 * memory window.
206 */
207 return 1;
208 }
209
pcibios_setup_bridge(struct pci_bus * bus,unsigned long type)210 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
211 {
212 struct pci_controller *hose = pci_bus_to_host(bus);
213
214 if (hose->controller_ops.setup_bridge)
215 hose->controller_ops.setup_bridge(bus, type);
216 }
217
pcibios_reset_secondary_bus(struct pci_dev * dev)218 void pcibios_reset_secondary_bus(struct pci_dev *dev)
219 {
220 struct pci_controller *phb = pci_bus_to_host(dev->bus);
221
222 if (phb->controller_ops.reset_secondary_bus) {
223 phb->controller_ops.reset_secondary_bus(dev);
224 return;
225 }
226
227 pci_reset_secondary_bus(dev);
228 }
229
pcibios_default_alignment(void)230 resource_size_t pcibios_default_alignment(void)
231 {
232 if (ppc_md.pcibios_default_alignment)
233 return ppc_md.pcibios_default_alignment();
234
235 return 0;
236 }
237
238 #ifdef CONFIG_PCI_IOV
pcibios_iov_resource_alignment(struct pci_dev * pdev,int resno)239 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
240 {
241 if (ppc_md.pcibios_iov_resource_alignment)
242 return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
243
244 return pci_iov_resource_size(pdev, resno);
245 }
246
pcibios_sriov_enable(struct pci_dev * pdev,u16 num_vfs)247 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
248 {
249 if (ppc_md.pcibios_sriov_enable)
250 return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
251
252 return 0;
253 }
254
pcibios_sriov_disable(struct pci_dev * pdev)255 int pcibios_sriov_disable(struct pci_dev *pdev)
256 {
257 if (ppc_md.pcibios_sriov_disable)
258 return ppc_md.pcibios_sriov_disable(pdev);
259
260 return 0;
261 }
262
263 #endif /* CONFIG_PCI_IOV */
264
pcibios_io_size(const struct pci_controller * hose)265 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
266 {
267 #ifdef CONFIG_PPC64
268 return hose->pci_io_size;
269 #else
270 return resource_size(&hose->io_resource);
271 #endif
272 }
273
pcibios_vaddr_is_ioport(void __iomem * address)274 int pcibios_vaddr_is_ioport(void __iomem *address)
275 {
276 int ret = 0;
277 struct pci_controller *hose;
278 resource_size_t size;
279
280 spin_lock(&hose_spinlock);
281 list_for_each_entry(hose, &hose_list, list_node) {
282 size = pcibios_io_size(hose);
283 if (address >= hose->io_base_virt &&
284 address < (hose->io_base_virt + size)) {
285 ret = 1;
286 break;
287 }
288 }
289 spin_unlock(&hose_spinlock);
290 return ret;
291 }
292
pci_address_to_pio(phys_addr_t address)293 unsigned long pci_address_to_pio(phys_addr_t address)
294 {
295 struct pci_controller *hose;
296 resource_size_t size;
297 unsigned long ret = ~0;
298
299 spin_lock(&hose_spinlock);
300 list_for_each_entry(hose, &hose_list, list_node) {
301 size = pcibios_io_size(hose);
302 if (address >= hose->io_base_phys &&
303 address < (hose->io_base_phys + size)) {
304 unsigned long base =
305 (unsigned long)hose->io_base_virt - _IO_BASE;
306 ret = base + (address - hose->io_base_phys);
307 break;
308 }
309 }
310 spin_unlock(&hose_spinlock);
311
312 return ret;
313 }
314 EXPORT_SYMBOL_GPL(pci_address_to_pio);
315
316 /*
317 * Return the domain number for this bus.
318 */
pci_domain_nr(struct pci_bus * bus)319 int pci_domain_nr(struct pci_bus *bus)
320 {
321 struct pci_controller *hose = pci_bus_to_host(bus);
322
323 return hose->global_number;
324 }
325 EXPORT_SYMBOL(pci_domain_nr);
326
327 /* This routine is meant to be used early during boot, when the
328 * PCI bus numbers have not yet been assigned, and you need to
329 * issue PCI config cycles to an OF device.
330 * It could also be used to "fix" RTAS config cycles if you want
331 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
332 * config cycles.
333 */
pci_find_hose_for_OF_device(struct device_node * node)334 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
335 {
336 while(node) {
337 struct pci_controller *hose, *tmp;
338 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
339 if (hose->dn == node)
340 return hose;
341 node = node->parent;
342 }
343 return NULL;
344 }
345
pci_find_controller_for_domain(int domain_nr)346 struct pci_controller *pci_find_controller_for_domain(int domain_nr)
347 {
348 struct pci_controller *hose;
349
350 list_for_each_entry(hose, &hose_list, list_node)
351 if (hose->global_number == domain_nr)
352 return hose;
353
354 return NULL;
355 }
356
357 struct pci_intx_virq {
358 int virq;
359 struct kref kref;
360 struct list_head list_node;
361 };
362
363 static LIST_HEAD(intx_list);
364 static DEFINE_MUTEX(intx_mutex);
365
ppc_pci_intx_release(struct kref * kref)366 static void ppc_pci_intx_release(struct kref *kref)
367 {
368 struct pci_intx_virq *vi = container_of(kref, struct pci_intx_virq, kref);
369
370 list_del(&vi->list_node);
371 irq_dispose_mapping(vi->virq);
372 kfree(vi);
373 }
374
ppc_pci_unmap_irq_line(struct notifier_block * nb,unsigned long action,void * data)375 static int ppc_pci_unmap_irq_line(struct notifier_block *nb,
376 unsigned long action, void *data)
377 {
378 struct pci_dev *pdev = to_pci_dev(data);
379
380 if (action == BUS_NOTIFY_DEL_DEVICE) {
381 struct pci_intx_virq *vi;
382
383 mutex_lock(&intx_mutex);
384 list_for_each_entry(vi, &intx_list, list_node) {
385 if (vi->virq == pdev->irq) {
386 kref_put(&vi->kref, ppc_pci_intx_release);
387 break;
388 }
389 }
390 mutex_unlock(&intx_mutex);
391 }
392
393 return NOTIFY_DONE;
394 }
395
396 static struct notifier_block ppc_pci_unmap_irq_notifier = {
397 .notifier_call = ppc_pci_unmap_irq_line,
398 };
399
ppc_pci_register_irq_notifier(void)400 static int ppc_pci_register_irq_notifier(void)
401 {
402 return bus_register_notifier(&pci_bus_type, &ppc_pci_unmap_irq_notifier);
403 }
404 arch_initcall(ppc_pci_register_irq_notifier);
405
406 /*
407 * Reads the interrupt pin to determine if interrupt is use by card.
408 * If the interrupt is used, then gets the interrupt line from the
409 * openfirmware and sets it in the pci_dev and pci_config line.
410 */
pci_read_irq_line(struct pci_dev * pci_dev)411 static int pci_read_irq_line(struct pci_dev *pci_dev)
412 {
413 int virq;
414 struct pci_intx_virq *vi, *vitmp;
415
416 /* Preallocate vi as rewind is complex if this fails after mapping */
417 vi = kzalloc(sizeof(struct pci_intx_virq), GFP_KERNEL);
418 if (!vi)
419 return -1;
420
421 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
422
423 /* Try to get a mapping from the device-tree */
424 virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
425 if (virq <= 0) {
426 u8 line, pin;
427
428 /* If that fails, lets fallback to what is in the config
429 * space and map that through the default controller. We
430 * also set the type to level low since that's what PCI
431 * interrupts are. If your platform does differently, then
432 * either provide a proper interrupt tree or don't use this
433 * function.
434 */
435 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
436 goto error_exit;
437 if (pin == 0)
438 goto error_exit;
439 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
440 line == 0xff || line == 0) {
441 goto error_exit;
442 }
443 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
444 line, pin);
445
446 virq = irq_create_mapping(NULL, line);
447 if (virq)
448 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
449 }
450
451 if (!virq) {
452 pr_debug(" Failed to map !\n");
453 goto error_exit;
454 }
455
456 pr_debug(" Mapped to linux irq %d\n", virq);
457
458 pci_dev->irq = virq;
459
460 mutex_lock(&intx_mutex);
461 list_for_each_entry(vitmp, &intx_list, list_node) {
462 if (vitmp->virq == virq) {
463 kref_get(&vitmp->kref);
464 kfree(vi);
465 vi = NULL;
466 break;
467 }
468 }
469 if (vi) {
470 vi->virq = virq;
471 kref_init(&vi->kref);
472 list_add_tail(&vi->list_node, &intx_list);
473 }
474 mutex_unlock(&intx_mutex);
475
476 return 0;
477 error_exit:
478 kfree(vi);
479 return -1;
480 }
481
482 /*
483 * Platform support for /proc/bus/pci/X/Y mmap()s.
484 * -- paulus.
485 */
pci_iobar_pfn(struct pci_dev * pdev,int bar,struct vm_area_struct * vma)486 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
487 {
488 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
489 resource_size_t ioaddr = pci_resource_start(pdev, bar);
490
491 if (!hose)
492 return -EINVAL;
493
494 /* Convert to an offset within this PCI controller */
495 ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
496
497 vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
498 return 0;
499 }
500
501 /*
502 * This one is used by /dev/mem and fbdev who have no clue about the
503 * PCI device, it tries to find the PCI device first and calls the
504 * above routine
505 */
pci_phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t prot)506 pgprot_t pci_phys_mem_access_prot(struct file *file,
507 unsigned long pfn,
508 unsigned long size,
509 pgprot_t prot)
510 {
511 struct pci_dev *pdev = NULL;
512 struct resource *found = NULL;
513 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
514 int i;
515
516 if (page_is_ram(pfn))
517 return prot;
518
519 prot = pgprot_noncached(prot);
520 for_each_pci_dev(pdev) {
521 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
522 struct resource *rp = &pdev->resource[i];
523 int flags = rp->flags;
524
525 /* Active and same type? */
526 if ((flags & IORESOURCE_MEM) == 0)
527 continue;
528 /* In the range of this resource? */
529 if (offset < (rp->start & PAGE_MASK) ||
530 offset > rp->end)
531 continue;
532 found = rp;
533 break;
534 }
535 if (found)
536 break;
537 }
538 if (found) {
539 if (found->flags & IORESOURCE_PREFETCH)
540 prot = pgprot_noncached_wc(prot);
541 pci_dev_put(pdev);
542 }
543
544 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
545 (unsigned long long)offset, pgprot_val(prot));
546
547 return prot;
548 }
549
550 /* This provides legacy IO read access on a bus */
pci_legacy_read(struct pci_bus * bus,loff_t port,u32 * val,size_t size)551 int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
552 {
553 unsigned long offset;
554 struct pci_controller *hose = pci_bus_to_host(bus);
555 struct resource *rp = &hose->io_resource;
556 void __iomem *addr;
557
558 /* Check if port can be supported by that bus. We only check
559 * the ranges of the PHB though, not the bus itself as the rules
560 * for forwarding legacy cycles down bridges are not our problem
561 * here. So if the host bridge supports it, we do it.
562 */
563 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
564 offset += port;
565
566 if (!(rp->flags & IORESOURCE_IO))
567 return -ENXIO;
568 if (offset < rp->start || (offset + size) > rp->end)
569 return -ENXIO;
570 addr = hose->io_base_virt + port;
571
572 switch(size) {
573 case 1:
574 *((u8 *)val) = in_8(addr);
575 return 1;
576 case 2:
577 if (port & 1)
578 return -EINVAL;
579 *((u16 *)val) = in_le16(addr);
580 return 2;
581 case 4:
582 if (port & 3)
583 return -EINVAL;
584 *((u32 *)val) = in_le32(addr);
585 return 4;
586 }
587 return -EINVAL;
588 }
589
590 /* This provides legacy IO write access on a bus */
pci_legacy_write(struct pci_bus * bus,loff_t port,u32 val,size_t size)591 int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
592 {
593 unsigned long offset;
594 struct pci_controller *hose = pci_bus_to_host(bus);
595 struct resource *rp = &hose->io_resource;
596 void __iomem *addr;
597
598 /* Check if port can be supported by that bus. We only check
599 * the ranges of the PHB though, not the bus itself as the rules
600 * for forwarding legacy cycles down bridges are not our problem
601 * here. So if the host bridge supports it, we do it.
602 */
603 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
604 offset += port;
605
606 if (!(rp->flags & IORESOURCE_IO))
607 return -ENXIO;
608 if (offset < rp->start || (offset + size) > rp->end)
609 return -ENXIO;
610 addr = hose->io_base_virt + port;
611
612 /* WARNING: The generic code is idiotic. It gets passed a pointer
613 * to what can be a 1, 2 or 4 byte quantity and always reads that
614 * as a u32, which means that we have to correct the location of
615 * the data read within those 32 bits for size 1 and 2
616 */
617 switch(size) {
618 case 1:
619 out_8(addr, val >> 24);
620 return 1;
621 case 2:
622 if (port & 1)
623 return -EINVAL;
624 out_le16(addr, val >> 16);
625 return 2;
626 case 4:
627 if (port & 3)
628 return -EINVAL;
629 out_le32(addr, val);
630 return 4;
631 }
632 return -EINVAL;
633 }
634
635 /* This provides legacy IO or memory mmap access on a bus */
pci_mmap_legacy_page_range(struct pci_bus * bus,struct vm_area_struct * vma,enum pci_mmap_state mmap_state)636 int pci_mmap_legacy_page_range(struct pci_bus *bus,
637 struct vm_area_struct *vma,
638 enum pci_mmap_state mmap_state)
639 {
640 struct pci_controller *hose = pci_bus_to_host(bus);
641 resource_size_t offset =
642 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
643 resource_size_t size = vma->vm_end - vma->vm_start;
644 struct resource *rp;
645
646 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
647 pci_domain_nr(bus), bus->number,
648 mmap_state == pci_mmap_mem ? "MEM" : "IO",
649 (unsigned long long)offset,
650 (unsigned long long)(offset + size - 1));
651
652 if (mmap_state == pci_mmap_mem) {
653 /* Hack alert !
654 *
655 * Because X is lame and can fail starting if it gets an error trying
656 * to mmap legacy_mem (instead of just moving on without legacy memory
657 * access) we fake it here by giving it anonymous memory, effectively
658 * behaving just like /dev/zero
659 */
660 if ((offset + size) > hose->isa_mem_size) {
661 printk(KERN_DEBUG
662 "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
663 current->comm, current->pid, pci_domain_nr(bus), bus->number);
664 if (vma->vm_flags & VM_SHARED)
665 return shmem_zero_setup(vma);
666 return 0;
667 }
668 offset += hose->isa_mem_phys;
669 } else {
670 unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
671 unsigned long roffset = offset + io_offset;
672 rp = &hose->io_resource;
673 if (!(rp->flags & IORESOURCE_IO))
674 return -ENXIO;
675 if (roffset < rp->start || (roffset + size) > rp->end)
676 return -ENXIO;
677 offset += hose->io_base_phys;
678 }
679 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
680
681 vma->vm_pgoff = offset >> PAGE_SHIFT;
682 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
683 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
684 vma->vm_end - vma->vm_start,
685 vma->vm_page_prot);
686 }
687
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)688 void pci_resource_to_user(const struct pci_dev *dev, int bar,
689 const struct resource *rsrc,
690 resource_size_t *start, resource_size_t *end)
691 {
692 struct pci_bus_region region;
693
694 if (rsrc->flags & IORESOURCE_IO) {
695 pcibios_resource_to_bus(dev->bus, ®ion,
696 (struct resource *) rsrc);
697 *start = region.start;
698 *end = region.end;
699 return;
700 }
701
702 /* We pass a CPU physical address to userland for MMIO instead of a
703 * BAR value because X is lame and expects to be able to use that
704 * to pass to /dev/mem!
705 *
706 * That means we may have 64-bit values where some apps only expect
707 * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
708 */
709 *start = rsrc->start;
710 *end = rsrc->end;
711 }
712
713 /**
714 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
715 * @hose: newly allocated pci_controller to be setup
716 * @dev: device node of the host bridge
717 * @primary: set if primary bus (32 bits only, soon to be deprecated)
718 *
719 * This function will parse the "ranges" property of a PCI host bridge device
720 * node and setup the resource mapping of a pci controller based on its
721 * content.
722 *
723 * Life would be boring if it wasn't for a few issues that we have to deal
724 * with here:
725 *
726 * - We can only cope with one IO space range and up to 3 Memory space
727 * ranges. However, some machines (thanks Apple !) tend to split their
728 * space into lots of small contiguous ranges. So we have to coalesce.
729 *
730 * - Some busses have IO space not starting at 0, which causes trouble with
731 * the way we do our IO resource renumbering. The code somewhat deals with
732 * it for 64 bits but I would expect problems on 32 bits.
733 *
734 * - Some 32 bits platforms such as 4xx can have physical space larger than
735 * 32 bits so we need to use 64 bits values for the parsing
736 */
pci_process_bridge_OF_ranges(struct pci_controller * hose,struct device_node * dev,int primary)737 void pci_process_bridge_OF_ranges(struct pci_controller *hose,
738 struct device_node *dev, int primary)
739 {
740 int memno = 0;
741 struct resource *res;
742 struct of_pci_range range;
743 struct of_pci_range_parser parser;
744
745 printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
746 dev, primary ? "(primary)" : "");
747
748 /* Check for ranges property */
749 if (of_pci_range_parser_init(&parser, dev))
750 return;
751
752 /* Parse it */
753 for_each_of_pci_range(&parser, &range) {
754 /* If we failed translation or got a zero-sized region
755 * (some FW try to feed us with non sensical zero sized regions
756 * such as power3 which look like some kind of attempt at exposing
757 * the VGA memory hole)
758 */
759 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
760 continue;
761
762 /* Act based on address space type */
763 res = NULL;
764 switch (range.flags & IORESOURCE_TYPE_BITS) {
765 case IORESOURCE_IO:
766 printk(KERN_INFO
767 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
768 range.cpu_addr, range.cpu_addr + range.size - 1,
769 range.pci_addr);
770
771 /* We support only one IO range */
772 if (hose->pci_io_size) {
773 printk(KERN_INFO
774 " \\--> Skipped (too many) !\n");
775 continue;
776 }
777 #ifdef CONFIG_PPC32
778 /* On 32 bits, limit I/O space to 16MB */
779 if (range.size > 0x01000000)
780 range.size = 0x01000000;
781
782 /* 32 bits needs to map IOs here */
783 hose->io_base_virt = ioremap(range.cpu_addr,
784 range.size);
785
786 /* Expect trouble if pci_addr is not 0 */
787 if (primary)
788 isa_io_base =
789 (unsigned long)hose->io_base_virt;
790 #endif /* CONFIG_PPC32 */
791 /* pci_io_size and io_base_phys always represent IO
792 * space starting at 0 so we factor in pci_addr
793 */
794 hose->pci_io_size = range.pci_addr + range.size;
795 hose->io_base_phys = range.cpu_addr - range.pci_addr;
796
797 /* Build resource */
798 res = &hose->io_resource;
799 range.cpu_addr = range.pci_addr;
800 break;
801 case IORESOURCE_MEM:
802 printk(KERN_INFO
803 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
804 range.cpu_addr, range.cpu_addr + range.size - 1,
805 range.pci_addr,
806 (range.flags & IORESOURCE_PREFETCH) ?
807 "Prefetch" : "");
808
809 /* We support only 3 memory ranges */
810 if (memno >= 3) {
811 printk(KERN_INFO
812 " \\--> Skipped (too many) !\n");
813 continue;
814 }
815 /* Handles ISA memory hole space here */
816 if (range.pci_addr == 0) {
817 if (primary || isa_mem_base == 0)
818 isa_mem_base = range.cpu_addr;
819 hose->isa_mem_phys = range.cpu_addr;
820 hose->isa_mem_size = range.size;
821 }
822
823 /* Build resource */
824 hose->mem_offset[memno] = range.cpu_addr -
825 range.pci_addr;
826 res = &hose->mem_resources[memno++];
827 break;
828 }
829 if (res != NULL) {
830 res->name = dev->full_name;
831 res->flags = range.flags;
832 res->start = range.cpu_addr;
833 res->end = range.cpu_addr + range.size - 1;
834 res->parent = res->child = res->sibling = NULL;
835 }
836 }
837 }
838
839 /* Decide whether to display the domain number in /proc */
pci_proc_domain(struct pci_bus * bus)840 int pci_proc_domain(struct pci_bus *bus)
841 {
842 struct pci_controller *hose = pci_bus_to_host(bus);
843
844 if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
845 return 0;
846 if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
847 return hose->global_number != 0;
848 return 1;
849 }
850
pcibios_root_bridge_prepare(struct pci_host_bridge * bridge)851 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
852 {
853 if (ppc_md.pcibios_root_bridge_prepare)
854 return ppc_md.pcibios_root_bridge_prepare(bridge);
855
856 return 0;
857 }
858
859 /* This header fixup will do the resource fixup for all devices as they are
860 * probed, but not for bridge ranges
861 */
pcibios_fixup_resources(struct pci_dev * dev)862 static void pcibios_fixup_resources(struct pci_dev *dev)
863 {
864 struct pci_controller *hose = pci_bus_to_host(dev->bus);
865 int i;
866
867 if (!hose) {
868 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
869 pci_name(dev));
870 return;
871 }
872
873 if (dev->is_virtfn)
874 return;
875
876 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
877 struct resource *res = dev->resource + i;
878 struct pci_bus_region reg;
879 if (!res->flags)
880 continue;
881
882 /* If we're going to re-assign everything, we mark all resources
883 * as unset (and 0-base them). In addition, we mark BARs starting
884 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
885 * since in that case, we don't want to re-assign anything
886 */
887 pcibios_resource_to_bus(dev->bus, ®, res);
888 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
889 (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
890 /* Only print message if not re-assigning */
891 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
892 pr_debug("PCI:%s Resource %d %pR is unassigned\n",
893 pci_name(dev), i, res);
894 res->end -= res->start;
895 res->start = 0;
896 res->flags |= IORESOURCE_UNSET;
897 continue;
898 }
899
900 pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
901 }
902
903 /* Call machine specific resource fixup */
904 if (ppc_md.pcibios_fixup_resources)
905 ppc_md.pcibios_fixup_resources(dev);
906 }
907 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
908
909 /* This function tries to figure out if a bridge resource has been initialized
910 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
911 * things go more smoothly when it gets it right. It should covers cases such
912 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
913 */
pcibios_uninitialized_bridge_resource(struct pci_bus * bus,struct resource * res)914 static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
915 struct resource *res)
916 {
917 struct pci_controller *hose = pci_bus_to_host(bus);
918 struct pci_dev *dev = bus->self;
919 resource_size_t offset;
920 struct pci_bus_region region;
921 u16 command;
922 int i;
923
924 /* We don't do anything if PCI_PROBE_ONLY is set */
925 if (pci_has_flag(PCI_PROBE_ONLY))
926 return 0;
927
928 /* Job is a bit different between memory and IO */
929 if (res->flags & IORESOURCE_MEM) {
930 pcibios_resource_to_bus(dev->bus, ®ion, res);
931
932 /* If the BAR is non-0 then it's probably been initialized */
933 if (region.start != 0)
934 return 0;
935
936 /* The BAR is 0, let's check if memory decoding is enabled on
937 * the bridge. If not, we consider it unassigned
938 */
939 pci_read_config_word(dev, PCI_COMMAND, &command);
940 if ((command & PCI_COMMAND_MEMORY) == 0)
941 return 1;
942
943 /* Memory decoding is enabled and the BAR is 0. If any of the bridge
944 * resources covers that starting address (0 then it's good enough for
945 * us for memory space)
946 */
947 for (i = 0; i < 3; i++) {
948 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
949 hose->mem_resources[i].start == hose->mem_offset[i])
950 return 0;
951 }
952
953 /* Well, it starts at 0 and we know it will collide so we may as
954 * well consider it as unassigned. That covers the Apple case.
955 */
956 return 1;
957 } else {
958 /* If the BAR is non-0, then we consider it assigned */
959 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
960 if (((res->start - offset) & 0xfffffffful) != 0)
961 return 0;
962
963 /* Here, we are a bit different than memory as typically IO space
964 * starting at low addresses -is- valid. What we do instead if that
965 * we consider as unassigned anything that doesn't have IO enabled
966 * in the PCI command register, and that's it.
967 */
968 pci_read_config_word(dev, PCI_COMMAND, &command);
969 if (command & PCI_COMMAND_IO)
970 return 0;
971
972 /* It's starting at 0 and IO is disabled in the bridge, consider
973 * it unassigned
974 */
975 return 1;
976 }
977 }
978
979 /* Fixup resources of a PCI<->PCI bridge */
pcibios_fixup_bridge(struct pci_bus * bus)980 static void pcibios_fixup_bridge(struct pci_bus *bus)
981 {
982 struct resource *res;
983 int i;
984
985 struct pci_dev *dev = bus->self;
986
987 pci_bus_for_each_resource(bus, res, i) {
988 if (!res || !res->flags)
989 continue;
990 if (i >= 3 && bus->self->transparent)
991 continue;
992
993 /* If we're going to reassign everything, we can
994 * shrink the P2P resource to have size as being
995 * of 0 in order to save space.
996 */
997 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
998 res->flags |= IORESOURCE_UNSET;
999 res->start = 0;
1000 res->end = -1;
1001 continue;
1002 }
1003
1004 pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
1005
1006 /* Try to detect uninitialized P2P bridge resources,
1007 * and clear them out so they get re-assigned later
1008 */
1009 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1010 res->flags = 0;
1011 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1012 }
1013 }
1014 }
1015
pcibios_setup_bus_self(struct pci_bus * bus)1016 void pcibios_setup_bus_self(struct pci_bus *bus)
1017 {
1018 struct pci_controller *phb;
1019
1020 /* Fix up the bus resources for P2P bridges */
1021 if (bus->self != NULL)
1022 pcibios_fixup_bridge(bus);
1023
1024 /* Platform specific bus fixups. This is currently only used
1025 * by fsl_pci and I'm hoping to get rid of it at some point
1026 */
1027 if (ppc_md.pcibios_fixup_bus)
1028 ppc_md.pcibios_fixup_bus(bus);
1029
1030 /* Setup bus DMA mappings */
1031 phb = pci_bus_to_host(bus);
1032 if (phb->controller_ops.dma_bus_setup)
1033 phb->controller_ops.dma_bus_setup(bus);
1034 }
1035
pcibios_bus_add_device(struct pci_dev * dev)1036 void pcibios_bus_add_device(struct pci_dev *dev)
1037 {
1038 struct pci_controller *phb;
1039 /* Fixup NUMA node as it may not be setup yet by the generic
1040 * code and is needed by the DMA init
1041 */
1042 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1043
1044 /* Hook up default DMA ops */
1045 set_dma_ops(&dev->dev, pci_dma_ops);
1046 dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
1047
1048 /* Additional platform DMA/iommu setup */
1049 phb = pci_bus_to_host(dev->bus);
1050 if (phb->controller_ops.dma_dev_setup)
1051 phb->controller_ops.dma_dev_setup(dev);
1052
1053 /* Read default IRQs and fixup if necessary */
1054 pci_read_irq_line(dev);
1055 if (ppc_md.pci_irq_fixup)
1056 ppc_md.pci_irq_fixup(dev);
1057
1058 if (ppc_md.pcibios_bus_add_device)
1059 ppc_md.pcibios_bus_add_device(dev);
1060 }
1061
pcibios_device_add(struct pci_dev * dev)1062 int pcibios_device_add(struct pci_dev *dev)
1063 {
1064 struct irq_domain *d;
1065
1066 #ifdef CONFIG_PCI_IOV
1067 if (ppc_md.pcibios_fixup_sriov)
1068 ppc_md.pcibios_fixup_sriov(dev);
1069 #endif /* CONFIG_PCI_IOV */
1070
1071 d = dev_get_msi_domain(&dev->bus->dev);
1072 if (d)
1073 dev_set_msi_domain(&dev->dev, d);
1074 return 0;
1075 }
1076
pcibios_set_master(struct pci_dev * dev)1077 void pcibios_set_master(struct pci_dev *dev)
1078 {
1079 /* No special bus mastering setup handling */
1080 }
1081
pcibios_fixup_bus(struct pci_bus * bus)1082 void pcibios_fixup_bus(struct pci_bus *bus)
1083 {
1084 /* When called from the generic PCI probe, read PCI<->PCI bridge
1085 * bases. This is -not- called when generating the PCI tree from
1086 * the OF device-tree.
1087 */
1088 pci_read_bridge_bases(bus);
1089
1090 /* Now fixup the bus bus */
1091 pcibios_setup_bus_self(bus);
1092 }
1093 EXPORT_SYMBOL(pcibios_fixup_bus);
1094
skip_isa_ioresource_align(struct pci_dev * dev)1095 static int skip_isa_ioresource_align(struct pci_dev *dev)
1096 {
1097 if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1098 !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1099 return 1;
1100 return 0;
1101 }
1102
1103 /*
1104 * We need to avoid collisions with `mirrored' VGA ports
1105 * and other strange ISA hardware, so we always want the
1106 * addresses to be allocated in the 0x000-0x0ff region
1107 * modulo 0x400.
1108 *
1109 * Why? Because some silly external IO cards only decode
1110 * the low 10 bits of the IO address. The 0x00-0xff region
1111 * is reserved for motherboard devices that decode all 16
1112 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1113 * but we want to try to avoid allocating at 0x2900-0x2bff
1114 * which might have be mirrored at 0x0100-0x03ff..
1115 */
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)1116 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1117 resource_size_t size, resource_size_t align)
1118 {
1119 struct pci_dev *dev = data;
1120 resource_size_t start = res->start;
1121
1122 if (res->flags & IORESOURCE_IO) {
1123 if (skip_isa_ioresource_align(dev))
1124 return start;
1125 if (start & 0x300)
1126 start = (start + 0x3ff) & ~0x3ff;
1127 }
1128
1129 return start;
1130 }
1131 EXPORT_SYMBOL(pcibios_align_resource);
1132
1133 /*
1134 * Reparent resource children of pr that conflict with res
1135 * under res, and make res replace those children.
1136 */
reparent_resources(struct resource * parent,struct resource * res)1137 static int reparent_resources(struct resource *parent,
1138 struct resource *res)
1139 {
1140 struct resource *p, **pp;
1141 struct resource **firstpp = NULL;
1142
1143 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1144 if (p->end < res->start)
1145 continue;
1146 if (res->end < p->start)
1147 break;
1148 if (p->start < res->start || p->end > res->end)
1149 return -1; /* not completely contained */
1150 if (firstpp == NULL)
1151 firstpp = pp;
1152 }
1153 if (firstpp == NULL)
1154 return -1; /* didn't find any conflicting entries? */
1155 res->parent = parent;
1156 res->child = *firstpp;
1157 res->sibling = *pp;
1158 *firstpp = res;
1159 *pp = NULL;
1160 for (p = res->child; p != NULL; p = p->sibling) {
1161 p->parent = res;
1162 pr_debug("PCI: Reparented %s %pR under %s\n",
1163 p->name, p, res->name);
1164 }
1165 return 0;
1166 }
1167
1168 /*
1169 * Handle resources of PCI devices. If the world were perfect, we could
1170 * just allocate all the resource regions and do nothing more. It isn't.
1171 * On the other hand, we cannot just re-allocate all devices, as it would
1172 * require us to know lots of host bridge internals. So we attempt to
1173 * keep as much of the original configuration as possible, but tweak it
1174 * when it's found to be wrong.
1175 *
1176 * Known BIOS problems we have to work around:
1177 * - I/O or memory regions not configured
1178 * - regions configured, but not enabled in the command register
1179 * - bogus I/O addresses above 64K used
1180 * - expansion ROMs left enabled (this may sound harmless, but given
1181 * the fact the PCI specs explicitly allow address decoders to be
1182 * shared between expansion ROMs and other resource regions, it's
1183 * at least dangerous)
1184 *
1185 * Our solution:
1186 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1187 * This gives us fixed barriers on where we can allocate.
1188 * (2) Allocate resources for all enabled devices. If there is
1189 * a collision, just mark the resource as unallocated. Also
1190 * disable expansion ROMs during this step.
1191 * (3) Try to allocate resources for disabled devices. If the
1192 * resources were assigned correctly, everything goes well,
1193 * if they weren't, they won't disturb allocation of other
1194 * resources.
1195 * (4) Assign new addresses to resources which were either
1196 * not configured at all or misconfigured. If explicitly
1197 * requested by the user, configure expansion ROM address
1198 * as well.
1199 */
1200
pcibios_allocate_bus_resources(struct pci_bus * bus)1201 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1202 {
1203 struct pci_bus *b;
1204 int i;
1205 struct resource *res, *pr;
1206
1207 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1208 pci_domain_nr(bus), bus->number);
1209
1210 pci_bus_for_each_resource(bus, res, i) {
1211 if (!res || !res->flags || res->start > res->end || res->parent)
1212 continue;
1213
1214 /* If the resource was left unset at this point, we clear it */
1215 if (res->flags & IORESOURCE_UNSET)
1216 goto clear_resource;
1217
1218 if (bus->parent == NULL)
1219 pr = (res->flags & IORESOURCE_IO) ?
1220 &ioport_resource : &iomem_resource;
1221 else {
1222 pr = pci_find_parent_resource(bus->self, res);
1223 if (pr == res) {
1224 /* this happens when the generic PCI
1225 * code (wrongly) decides that this
1226 * bridge is transparent -- paulus
1227 */
1228 continue;
1229 }
1230 }
1231
1232 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1233 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1234 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1235
1236 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1237 struct pci_dev *dev = bus->self;
1238
1239 if (request_resource(pr, res) == 0)
1240 continue;
1241 /*
1242 * Must be a conflict with an existing entry.
1243 * Move that entry (or entries) under the
1244 * bridge resource and try again.
1245 */
1246 if (reparent_resources(pr, res) == 0)
1247 continue;
1248
1249 if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1250 pci_claim_bridge_resource(dev,
1251 i + PCI_BRIDGE_RESOURCES) == 0)
1252 continue;
1253 }
1254 pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
1255 i, bus->number);
1256 clear_resource:
1257 /* The resource might be figured out when doing
1258 * reassignment based on the resources required
1259 * by the downstream PCI devices. Here we set
1260 * the size of the resource to be 0 in order to
1261 * save more space.
1262 */
1263 res->start = 0;
1264 res->end = -1;
1265 res->flags = 0;
1266 }
1267
1268 list_for_each_entry(b, &bus->children, node)
1269 pcibios_allocate_bus_resources(b);
1270 }
1271
alloc_resource(struct pci_dev * dev,int idx)1272 static inline void alloc_resource(struct pci_dev *dev, int idx)
1273 {
1274 struct resource *pr, *r = &dev->resource[idx];
1275
1276 pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1277 pci_name(dev), idx, r);
1278
1279 pr = pci_find_parent_resource(dev, r);
1280 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1281 request_resource(pr, r) < 0) {
1282 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1283 " of device %s, will remap\n", idx, pci_name(dev));
1284 if (pr)
1285 pr_debug("PCI: parent is %p: %pR\n", pr, pr);
1286 /* We'll assign a new address later */
1287 r->flags |= IORESOURCE_UNSET;
1288 r->end -= r->start;
1289 r->start = 0;
1290 }
1291 }
1292
pcibios_allocate_resources(int pass)1293 static void __init pcibios_allocate_resources(int pass)
1294 {
1295 struct pci_dev *dev = NULL;
1296 int idx, disabled;
1297 u16 command;
1298 struct resource *r;
1299
1300 for_each_pci_dev(dev) {
1301 pci_read_config_word(dev, PCI_COMMAND, &command);
1302 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1303 r = &dev->resource[idx];
1304 if (r->parent) /* Already allocated */
1305 continue;
1306 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1307 continue; /* Not assigned at all */
1308 /* We only allocate ROMs on pass 1 just in case they
1309 * have been screwed up by firmware
1310 */
1311 if (idx == PCI_ROM_RESOURCE )
1312 disabled = 1;
1313 if (r->flags & IORESOURCE_IO)
1314 disabled = !(command & PCI_COMMAND_IO);
1315 else
1316 disabled = !(command & PCI_COMMAND_MEMORY);
1317 if (pass == disabled)
1318 alloc_resource(dev, idx);
1319 }
1320 if (pass)
1321 continue;
1322 r = &dev->resource[PCI_ROM_RESOURCE];
1323 if (r->flags) {
1324 /* Turn the ROM off, leave the resource region,
1325 * but keep it unregistered.
1326 */
1327 u32 reg;
1328 pci_read_config_dword(dev, dev->rom_base_reg, ®);
1329 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1330 pr_debug("PCI: Switching off ROM of %s\n",
1331 pci_name(dev));
1332 r->flags &= ~IORESOURCE_ROM_ENABLE;
1333 pci_write_config_dword(dev, dev->rom_base_reg,
1334 reg & ~PCI_ROM_ADDRESS_ENABLE);
1335 }
1336 }
1337 }
1338 }
1339
pcibios_reserve_legacy_regions(struct pci_bus * bus)1340 static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1341 {
1342 struct pci_controller *hose = pci_bus_to_host(bus);
1343 resource_size_t offset;
1344 struct resource *res, *pres;
1345 int i;
1346
1347 pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1348
1349 /* Check for IO */
1350 if (!(hose->io_resource.flags & IORESOURCE_IO))
1351 goto no_io;
1352 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1353 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1354 BUG_ON(res == NULL);
1355 res->name = "Legacy IO";
1356 res->flags = IORESOURCE_IO;
1357 res->start = offset;
1358 res->end = (offset + 0xfff) & 0xfffffffful;
1359 pr_debug("Candidate legacy IO: %pR\n", res);
1360 if (request_resource(&hose->io_resource, res)) {
1361 printk(KERN_DEBUG
1362 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1363 pci_domain_nr(bus), bus->number, res);
1364 kfree(res);
1365 }
1366
1367 no_io:
1368 /* Check for memory */
1369 for (i = 0; i < 3; i++) {
1370 pres = &hose->mem_resources[i];
1371 offset = hose->mem_offset[i];
1372 if (!(pres->flags & IORESOURCE_MEM))
1373 continue;
1374 pr_debug("hose mem res: %pR\n", pres);
1375 if ((pres->start - offset) <= 0xa0000 &&
1376 (pres->end - offset) >= 0xbffff)
1377 break;
1378 }
1379 if (i >= 3)
1380 return;
1381 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1382 BUG_ON(res == NULL);
1383 res->name = "Legacy VGA memory";
1384 res->flags = IORESOURCE_MEM;
1385 res->start = 0xa0000 + offset;
1386 res->end = 0xbffff + offset;
1387 pr_debug("Candidate VGA memory: %pR\n", res);
1388 if (request_resource(pres, res)) {
1389 printk(KERN_DEBUG
1390 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1391 pci_domain_nr(bus), bus->number, res);
1392 kfree(res);
1393 }
1394 }
1395
pcibios_resource_survey(void)1396 void __init pcibios_resource_survey(void)
1397 {
1398 struct pci_bus *b;
1399
1400 /* Allocate and assign resources */
1401 list_for_each_entry(b, &pci_root_buses, node)
1402 pcibios_allocate_bus_resources(b);
1403 if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1404 pcibios_allocate_resources(0);
1405 pcibios_allocate_resources(1);
1406 }
1407
1408 /* Before we start assigning unassigned resource, we try to reserve
1409 * the low IO area and the VGA memory area if they intersect the
1410 * bus available resources to avoid allocating things on top of them
1411 */
1412 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1413 list_for_each_entry(b, &pci_root_buses, node)
1414 pcibios_reserve_legacy_regions(b);
1415 }
1416
1417 /* Now, if the platform didn't decide to blindly trust the firmware,
1418 * we proceed to assigning things that were left unassigned
1419 */
1420 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1421 pr_debug("PCI: Assigning unassigned resources...\n");
1422 pci_assign_unassigned_resources();
1423 }
1424 }
1425
1426 /* This is used by the PCI hotplug driver to allocate resource
1427 * of newly plugged busses. We can try to consolidate with the
1428 * rest of the code later, for now, keep it as-is as our main
1429 * resource allocation function doesn't deal with sub-trees yet.
1430 */
pcibios_claim_one_bus(struct pci_bus * bus)1431 void pcibios_claim_one_bus(struct pci_bus *bus)
1432 {
1433 struct pci_dev *dev;
1434 struct pci_bus *child_bus;
1435
1436 list_for_each_entry(dev, &bus->devices, bus_list) {
1437 int i;
1438
1439 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1440 struct resource *r = &dev->resource[i];
1441
1442 if (r->parent || !r->start || !r->flags)
1443 continue;
1444
1445 pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1446 pci_name(dev), i, r);
1447
1448 if (pci_claim_resource(dev, i) == 0)
1449 continue;
1450
1451 pci_claim_bridge_resource(dev, i);
1452 }
1453 }
1454
1455 list_for_each_entry(child_bus, &bus->children, node)
1456 pcibios_claim_one_bus(child_bus);
1457 }
1458 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1459
1460
1461 /* pcibios_finish_adding_to_bus
1462 *
1463 * This is to be called by the hotplug code after devices have been
1464 * added to a bus, this include calling it for a PHB that is just
1465 * being added
1466 */
pcibios_finish_adding_to_bus(struct pci_bus * bus)1467 void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1468 {
1469 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1470 pci_domain_nr(bus), bus->number);
1471
1472 /* Allocate bus and devices resources */
1473 pcibios_allocate_bus_resources(bus);
1474 pcibios_claim_one_bus(bus);
1475 if (!pci_has_flag(PCI_PROBE_ONLY)) {
1476 if (bus->self)
1477 pci_assign_unassigned_bridge_resources(bus->self);
1478 else
1479 pci_assign_unassigned_bus_resources(bus);
1480 }
1481
1482 /* Add new devices to global lists. Register in proc, sysfs. */
1483 pci_bus_add_devices(bus);
1484 }
1485 EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1486
pcibios_enable_device(struct pci_dev * dev,int mask)1487 int pcibios_enable_device(struct pci_dev *dev, int mask)
1488 {
1489 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1490
1491 if (phb->controller_ops.enable_device_hook)
1492 if (!phb->controller_ops.enable_device_hook(dev))
1493 return -EINVAL;
1494
1495 return pci_enable_resources(dev, mask);
1496 }
1497
pcibios_disable_device(struct pci_dev * dev)1498 void pcibios_disable_device(struct pci_dev *dev)
1499 {
1500 struct pci_controller *phb = pci_bus_to_host(dev->bus);
1501
1502 if (phb->controller_ops.disable_device)
1503 phb->controller_ops.disable_device(dev);
1504 }
1505
pcibios_io_space_offset(struct pci_controller * hose)1506 resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1507 {
1508 return (unsigned long) hose->io_base_virt - _IO_BASE;
1509 }
1510
pcibios_setup_phb_resources(struct pci_controller * hose,struct list_head * resources)1511 static void pcibios_setup_phb_resources(struct pci_controller *hose,
1512 struct list_head *resources)
1513 {
1514 struct resource *res;
1515 resource_size_t offset;
1516 int i;
1517
1518 /* Hookup PHB IO resource */
1519 res = &hose->io_resource;
1520
1521 if (!res->flags) {
1522 pr_debug("PCI: I/O resource not set for host"
1523 " bridge %pOF (domain %d)\n",
1524 hose->dn, hose->global_number);
1525 } else {
1526 offset = pcibios_io_space_offset(hose);
1527
1528 pr_debug("PCI: PHB IO resource = %pR off 0x%08llx\n",
1529 res, (unsigned long long)offset);
1530 pci_add_resource_offset(resources, res, offset);
1531 }
1532
1533 /* Hookup PHB Memory resources */
1534 for (i = 0; i < 3; ++i) {
1535 res = &hose->mem_resources[i];
1536 if (!res->flags)
1537 continue;
1538
1539 offset = hose->mem_offset[i];
1540 pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1541 res, (unsigned long long)offset);
1542
1543 pci_add_resource_offset(resources, res, offset);
1544 }
1545 }
1546
1547 /*
1548 * Null PCI config access functions, for the case when we can't
1549 * find a hose.
1550 */
1551 #define NULL_PCI_OP(rw, size, type) \
1552 static int \
1553 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1554 { \
1555 return PCIBIOS_DEVICE_NOT_FOUND; \
1556 }
1557
1558 static int
null_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)1559 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1560 int len, u32 *val)
1561 {
1562 return PCIBIOS_DEVICE_NOT_FOUND;
1563 }
1564
1565 static int
null_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)1566 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1567 int len, u32 val)
1568 {
1569 return PCIBIOS_DEVICE_NOT_FOUND;
1570 }
1571
1572 static struct pci_ops null_pci_ops =
1573 {
1574 .read = null_read_config,
1575 .write = null_write_config,
1576 };
1577
1578 /*
1579 * These functions are used early on before PCI scanning is done
1580 * and all of the pci_dev and pci_bus structures have been created.
1581 */
1582 static struct pci_bus *
fake_pci_bus(struct pci_controller * hose,int busnr)1583 fake_pci_bus(struct pci_controller *hose, int busnr)
1584 {
1585 static struct pci_bus bus;
1586
1587 if (hose == NULL) {
1588 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1589 }
1590 bus.number = busnr;
1591 bus.sysdata = hose;
1592 bus.ops = hose? hose->ops: &null_pci_ops;
1593 return &bus;
1594 }
1595
1596 #define EARLY_PCI_OP(rw, size, type) \
1597 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1598 int devfn, int offset, type value) \
1599 { \
1600 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1601 devfn, offset, value); \
1602 }
1603
EARLY_PCI_OP(read,byte,u8 *)1604 EARLY_PCI_OP(read, byte, u8 *)
1605 EARLY_PCI_OP(read, word, u16 *)
1606 EARLY_PCI_OP(read, dword, u32 *)
1607 EARLY_PCI_OP(write, byte, u8)
1608 EARLY_PCI_OP(write, word, u16)
1609 EARLY_PCI_OP(write, dword, u32)
1610
1611 int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1612 int cap)
1613 {
1614 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1615 }
1616
pcibios_get_phb_of_node(struct pci_bus * bus)1617 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1618 {
1619 struct pci_controller *hose = bus->sysdata;
1620
1621 return of_node_get(hose->dn);
1622 }
1623
1624 /**
1625 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1626 * @hose: Pointer to the PCI host controller instance structure
1627 */
pcibios_scan_phb(struct pci_controller * hose)1628 void pcibios_scan_phb(struct pci_controller *hose)
1629 {
1630 LIST_HEAD(resources);
1631 struct pci_bus *bus;
1632 struct device_node *node = hose->dn;
1633 int mode;
1634
1635 pr_debug("PCI: Scanning PHB %pOF\n", node);
1636
1637 /* Get some IO space for the new PHB */
1638 pcibios_setup_phb_io_space(hose);
1639
1640 /* Wire up PHB bus resources */
1641 pcibios_setup_phb_resources(hose, &resources);
1642
1643 hose->busn.start = hose->first_busno;
1644 hose->busn.end = hose->last_busno;
1645 hose->busn.flags = IORESOURCE_BUS;
1646 pci_add_resource(&resources, &hose->busn);
1647
1648 /* Create an empty bus for the toplevel */
1649 bus = pci_create_root_bus(hose->parent, hose->first_busno,
1650 hose->ops, hose, &resources);
1651 if (bus == NULL) {
1652 pr_err("Failed to create bus for PCI domain %04x\n",
1653 hose->global_number);
1654 pci_free_resource_list(&resources);
1655 return;
1656 }
1657 hose->bus = bus;
1658
1659 /* Get probe mode and perform scan */
1660 mode = PCI_PROBE_NORMAL;
1661 if (node && hose->controller_ops.probe_mode)
1662 mode = hose->controller_ops.probe_mode(bus);
1663 pr_debug(" probe mode: %d\n", mode);
1664 if (mode == PCI_PROBE_DEVTREE)
1665 of_scan_bus(node, bus);
1666
1667 if (mode == PCI_PROBE_NORMAL) {
1668 pci_bus_update_busn_res_end(bus, 255);
1669 hose->last_busno = pci_scan_child_bus(bus);
1670 pci_bus_update_busn_res_end(bus, hose->last_busno);
1671 }
1672
1673 /* Platform gets a chance to do some global fixups before
1674 * we proceed to resource allocation
1675 */
1676 if (ppc_md.pcibios_fixup_phb)
1677 ppc_md.pcibios_fixup_phb(hose);
1678
1679 /* Configure PCI Express settings */
1680 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1681 struct pci_bus *child;
1682 list_for_each_entry(child, &bus->children, node)
1683 pcie_bus_configure_settings(child);
1684 }
1685 }
1686 EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1687
fixup_hide_host_resource_fsl(struct pci_dev * dev)1688 static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1689 {
1690 int i, class = dev->class >> 8;
1691 /* When configured as agent, programing interface = 1 */
1692 int prog_if = dev->class & 0xf;
1693
1694 if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1695 class == PCI_CLASS_BRIDGE_OTHER) &&
1696 (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1697 (prog_if == 0) &&
1698 (dev->bus->parent == NULL)) {
1699 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1700 dev->resource[i].start = 0;
1701 dev->resource[i].end = 0;
1702 dev->resource[i].flags = 0;
1703 }
1704 }
1705 }
1706 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1707 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1708
1709
discover_phbs(void)1710 static int __init discover_phbs(void)
1711 {
1712 if (ppc_md.discover_phbs)
1713 ppc_md.discover_phbs();
1714
1715 return 0;
1716 }
1717 core_initcall(discover_phbs);
1718