1 // SPDX-License-Identifier: GPL-2.0
2 /* pci.c: UltraSparc PCI controller support.
4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
8 * OF tree based PCI bus probing taken from the PowerPC port
9 * with minor modifications, see there for credits.
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/sched.h>
16 #include <linux/capability.h>
17 #include <linux/errno.h>
18 #include <linux/pci.h>
19 #include <linux/msi.h>
20 #include <linux/irq.h>
21 #include <linux/init.h>
23 #include <linux/of_device.h>
24 #include <linux/pgtable.h>
26 #include <linux/uaccess.h>
34 /* List of all PCI controllers found in the system. */
35 struct pci_pbm_info
*pci_pbm_root
= NULL
;
37 /* Each PBM found gets a unique index. */
40 volatile int pci_poke_in_progress
;
41 volatile int pci_poke_cpu
= -1;
42 volatile int pci_poke_faulted
;
44 static DEFINE_SPINLOCK(pci_poke_lock
);
46 void pci_config_read8(u8
*addr
, u8
*ret
)
51 spin_lock_irqsave(&pci_poke_lock
, flags
);
52 pci_poke_cpu
= smp_processor_id();
53 pci_poke_in_progress
= 1;
55 __asm__
__volatile__("membar #Sync\n\t"
56 "lduba [%1] %2, %0\n\t"
59 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
)
61 pci_poke_in_progress
= 0;
63 if (!pci_poke_faulted
)
65 spin_unlock_irqrestore(&pci_poke_lock
, flags
);
68 void pci_config_read16(u16
*addr
, u16
*ret
)
73 spin_lock_irqsave(&pci_poke_lock
, flags
);
74 pci_poke_cpu
= smp_processor_id();
75 pci_poke_in_progress
= 1;
77 __asm__
__volatile__("membar #Sync\n\t"
78 "lduha [%1] %2, %0\n\t"
81 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
)
83 pci_poke_in_progress
= 0;
85 if (!pci_poke_faulted
)
87 spin_unlock_irqrestore(&pci_poke_lock
, flags
);
90 void pci_config_read32(u32
*addr
, u32
*ret
)
95 spin_lock_irqsave(&pci_poke_lock
, flags
);
96 pci_poke_cpu
= smp_processor_id();
97 pci_poke_in_progress
= 1;
99 __asm__
__volatile__("membar #Sync\n\t"
100 "lduwa [%1] %2, %0\n\t"
103 : "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
)
105 pci_poke_in_progress
= 0;
107 if (!pci_poke_faulted
)
109 spin_unlock_irqrestore(&pci_poke_lock
, flags
);
112 void pci_config_write8(u8
*addr
, u8 val
)
116 spin_lock_irqsave(&pci_poke_lock
, flags
);
117 pci_poke_cpu
= smp_processor_id();
118 pci_poke_in_progress
= 1;
119 pci_poke_faulted
= 0;
120 __asm__
__volatile__("membar #Sync\n\t"
121 "stba %0, [%1] %2\n\t"
124 : "r" (val
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
)
126 pci_poke_in_progress
= 0;
128 spin_unlock_irqrestore(&pci_poke_lock
, flags
);
131 void pci_config_write16(u16
*addr
, u16 val
)
135 spin_lock_irqsave(&pci_poke_lock
, flags
);
136 pci_poke_cpu
= smp_processor_id();
137 pci_poke_in_progress
= 1;
138 pci_poke_faulted
= 0;
139 __asm__
__volatile__("membar #Sync\n\t"
140 "stha %0, [%1] %2\n\t"
143 : "r" (val
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
)
145 pci_poke_in_progress
= 0;
147 spin_unlock_irqrestore(&pci_poke_lock
, flags
);
150 void pci_config_write32(u32
*addr
, u32 val
)
154 spin_lock_irqsave(&pci_poke_lock
, flags
);
155 pci_poke_cpu
= smp_processor_id();
156 pci_poke_in_progress
= 1;
157 pci_poke_faulted
= 0;
158 __asm__
__volatile__("membar #Sync\n\t"
159 "stwa %0, [%1] %2\n\t"
162 : "r" (val
), "r" (addr
), "i" (ASI_PHYS_BYPASS_EC_E_L
)
164 pci_poke_in_progress
= 0;
166 spin_unlock_irqrestore(&pci_poke_lock
, flags
);
169 static int ofpci_verbose
;
171 static int __init
ofpci_debug(char *str
)
175 get_option(&str
, &val
);
181 __setup("ofpci_debug=", ofpci_debug
);
183 static unsigned long pci_parse_of_flags(u32 addr0
)
185 unsigned long flags
= 0;
187 if (addr0
& 0x02000000) {
188 flags
= IORESOURCE_MEM
| PCI_BASE_ADDRESS_SPACE_MEMORY
;
189 flags
|= (addr0
>> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M
;
190 if (addr0
& 0x01000000)
191 flags
|= IORESOURCE_MEM_64
192 | PCI_BASE_ADDRESS_MEM_TYPE_64
;
193 if (addr0
& 0x40000000)
194 flags
|= IORESOURCE_PREFETCH
195 | PCI_BASE_ADDRESS_MEM_PREFETCH
;
196 } else if (addr0
& 0x01000000)
197 flags
= IORESOURCE_IO
| PCI_BASE_ADDRESS_SPACE_IO
;
201 /* The of_device layer has translated all of the assigned-address properties
202 * into physical address resources, we only have to figure out the register
205 static void pci_parse_of_addrs(struct platform_device
*op
,
206 struct device_node
*node
,
209 struct resource
*op_res
;
213 addrs
= of_get_property(node
, "assigned-addresses", &proplen
);
217 pci_info(dev
, " parse addresses (%d bytes) @ %p\n",
219 op_res
= &op
->resource
[0];
220 for (; proplen
>= 20; proplen
-= 20, addrs
+= 5, op_res
++) {
221 struct resource
*res
;
225 flags
= pci_parse_of_flags(addrs
[0]);
230 pci_info(dev
, " start: %llx, end: %llx, i: %x\n",
231 op_res
->start
, op_res
->end
, i
);
233 if (PCI_BASE_ADDRESS_0
<= i
&& i
<= PCI_BASE_ADDRESS_5
) {
234 res
= &dev
->resource
[(i
- PCI_BASE_ADDRESS_0
) >> 2];
235 } else if (i
== dev
->rom_base_reg
) {
236 res
= &dev
->resource
[PCI_ROM_RESOURCE
];
237 flags
|= IORESOURCE_READONLY
| IORESOURCE_SIZEALIGN
;
239 pci_err(dev
, "bad cfg reg num 0x%x\n", i
);
242 res
->start
= op_res
->start
;
243 res
->end
= op_res
->end
;
245 res
->name
= pci_name(dev
);
247 pci_info(dev
, "reg 0x%x: %pR\n", i
, res
);
251 static void pci_init_dev_archdata(struct dev_archdata
*sd
, void *iommu
,
252 void *stc
, void *host_controller
,
253 struct platform_device
*op
,
258 sd
->host_controller
= host_controller
;
260 sd
->numa_node
= numa_node
;
263 static struct pci_dev
*of_create_pci_dev(struct pci_pbm_info
*pbm
,
264 struct device_node
*node
,
265 struct pci_bus
*bus
, int devfn
)
267 struct dev_archdata
*sd
;
268 struct platform_device
*op
;
272 dev
= pci_alloc_dev(bus
);
276 op
= of_find_device_by_node(node
);
277 sd
= &dev
->dev
.archdata
;
278 pci_init_dev_archdata(sd
, pbm
->iommu
, &pbm
->stc
, pbm
, op
,
280 sd
= &op
->dev
.archdata
;
281 sd
->iommu
= pbm
->iommu
;
283 sd
->numa_node
= pbm
->numa_node
;
285 if (of_node_name_eq(node
, "ebus"))
286 of_propagate_archdata(op
);
289 pci_info(bus
," create device, devfn: %x, type: %s\n",
290 devfn
, of_node_get_device_type(node
));
293 dev
->dev
.parent
= bus
->bridge
;
294 dev
->dev
.bus
= &pci_bus_type
;
295 dev
->dev
.of_node
= of_node_get(node
);
297 dev
->multifunction
= 0; /* maybe a lie? */
298 set_pcie_port_type(dev
);
300 pci_dev_assign_slot(dev
);
301 dev
->vendor
= of_getintprop_default(node
, "vendor-id", 0xffff);
302 dev
->device
= of_getintprop_default(node
, "device-id", 0xffff);
303 dev
->subsystem_vendor
=
304 of_getintprop_default(node
, "subsystem-vendor-id", 0);
305 dev
->subsystem_device
=
306 of_getintprop_default(node
, "subsystem-id", 0);
308 dev
->cfg_size
= pci_cfg_space_size(dev
);
310 /* We can't actually use the firmware value, we have
311 * to read what is in the register right now. One
312 * reason is that in the case of IDE interfaces the
313 * firmware can sample the value before the the IDE
314 * interface is programmed into native mode.
316 pci_read_config_dword(dev
, PCI_CLASS_REVISION
, &class);
317 dev
->class = class >> 8;
318 dev
->revision
= class & 0xff;
320 dev_set_name(&dev
->dev
, "%04x:%02x:%02x.%d", pci_domain_nr(bus
),
321 dev
->bus
->number
, PCI_SLOT(devfn
), PCI_FUNC(devfn
));
323 /* I have seen IDE devices which will not respond to
324 * the bmdma simplex check reads if bus mastering is
327 if ((dev
->class >> 8) == PCI_CLASS_STORAGE_IDE
)
330 dev
->current_state
= PCI_UNKNOWN
; /* unknown power state */
331 dev
->error_state
= pci_channel_io_normal
;
332 dev
->dma_mask
= 0xffffffff;
334 if (of_node_name_eq(node
, "pci")) {
335 /* a PCI-PCI bridge */
336 dev
->hdr_type
= PCI_HEADER_TYPE_BRIDGE
;
337 dev
->rom_base_reg
= PCI_ROM_ADDRESS1
;
338 } else if (of_node_is_type(node
, "cardbus")) {
339 dev
->hdr_type
= PCI_HEADER_TYPE_CARDBUS
;
341 dev
->hdr_type
= PCI_HEADER_TYPE_NORMAL
;
342 dev
->rom_base_reg
= PCI_ROM_ADDRESS
;
344 dev
->irq
= sd
->op
->archdata
.irqs
[0];
345 if (dev
->irq
== 0xffffffff)
346 dev
->irq
= PCI_IRQ_NONE
;
349 pci_info(dev
, "[%04x:%04x] type %02x class %#08x\n",
350 dev
->vendor
, dev
->device
, dev
->hdr_type
, dev
->class);
352 pci_parse_of_addrs(sd
->op
, node
, dev
);
355 pci_info(dev
, " adding to system ...\n");
357 pci_device_add(dev
, bus
);
362 static void apb_calc_first_last(u8 map
, u32
*first_p
, u32
*last_p
)
364 u32 idx
, first
, last
;
368 for (idx
= 0; idx
< 8; idx
++) {
369 if ((map
& (1 << idx
)) != 0) {
381 /* Cook up fake bus resources for SUNW,simba PCI bridges which lack
382 * a proper 'ranges' property.
384 static void apb_fake_ranges(struct pci_dev
*dev
,
386 struct pci_pbm_info
*pbm
)
388 struct pci_bus_region region
;
389 struct resource
*res
;
393 pci_read_config_byte(dev
, APB_IO_ADDRESS_MAP
, &map
);
394 apb_calc_first_last(map
, &first
, &last
);
395 res
= bus
->resource
[0];
396 res
->flags
= IORESOURCE_IO
;
397 region
.start
= (first
<< 21);
398 region
.end
= (last
<< 21) + ((1 << 21) - 1);
399 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
401 pci_read_config_byte(dev
, APB_MEM_ADDRESS_MAP
, &map
);
402 apb_calc_first_last(map
, &first
, &last
);
403 res
= bus
->resource
[1];
404 res
->flags
= IORESOURCE_MEM
;
405 region
.start
= (first
<< 29);
406 region
.end
= (last
<< 29) + ((1 << 29) - 1);
407 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
410 static void pci_of_scan_bus(struct pci_pbm_info
*pbm
,
411 struct device_node
*node
,
412 struct pci_bus
*bus
);
414 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
416 static void of_scan_pci_bridge(struct pci_pbm_info
*pbm
,
417 struct device_node
*node
,
421 const u32
*busrange
, *ranges
;
423 struct pci_bus_region region
;
424 struct resource
*res
;
429 pci_info(dev
, "of_scan_pci_bridge(%pOF)\n", node
);
431 /* parse bus-range property */
432 busrange
= of_get_property(node
, "bus-range", &len
);
433 if (busrange
== NULL
|| len
!= 8) {
434 pci_info(dev
, "Can't get bus-range for PCI-PCI bridge %pOF\n",
440 pci_info(dev
, " Bridge bus range [%u --> %u]\n",
441 busrange
[0], busrange
[1]);
443 ranges
= of_get_property(node
, "ranges", &len
);
445 if (ranges
== NULL
) {
446 const char *model
= of_get_property(node
, "model", NULL
);
447 if (model
&& !strcmp(model
, "SUNW,simba"))
451 bus
= pci_add_new_bus(dev
->bus
, dev
, busrange
[0]);
453 pci_err(dev
, "Failed to create pci bus for %pOF\n",
458 bus
->primary
= dev
->bus
->number
;
459 pci_bus_insert_busn_res(bus
, busrange
[0], busrange
[1]);
463 pci_info(dev
, " Bridge ranges[%p] simba[%d]\n",
466 /* parse ranges property, or cook one up by hand for Simba */
467 /* PCI #address-cells == 3 and #size-cells == 2 always */
468 res
= &dev
->resource
[PCI_BRIDGE_RESOURCES
];
469 for (i
= 0; i
< PCI_NUM_RESOURCES
- PCI_BRIDGE_RESOURCES
; ++i
) {
471 bus
->resource
[i
] = res
;
475 apb_fake_ranges(dev
, bus
, pbm
);
477 } else if (ranges
== NULL
) {
478 pci_read_bridge_bases(bus
);
482 for (; len
>= 32; len
-= 32, ranges
+= 8) {
486 pci_info(dev
, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
488 ranges
[0], ranges
[1], ranges
[2], ranges
[3],
489 ranges
[4], ranges
[5], ranges
[6], ranges
[7]);
491 flags
= pci_parse_of_flags(ranges
[0]);
492 size
= GET_64BIT(ranges
, 6);
493 if (flags
== 0 || size
== 0)
496 /* On PCI-Express systems, PCI bridges that have no devices downstream
497 * have a bogus size value where the first 32-bit cell is 0xffffffff.
498 * This results in a bogus range where start + size overflows.
500 * Just skip these otherwise the kernel will complain when the resource
501 * tries to be claimed.
503 if (size
>> 32 == 0xffffffff)
506 if (flags
& IORESOURCE_IO
) {
507 res
= bus
->resource
[0];
509 pci_err(dev
, "ignoring extra I/O range"
510 " for bridge %pOF\n", node
);
514 if (i
>= PCI_NUM_RESOURCES
- PCI_BRIDGE_RESOURCES
) {
515 pci_err(dev
, "too many memory ranges"
516 " for bridge %pOF\n", node
);
519 res
= bus
->resource
[i
];
524 region
.start
= start
= GET_64BIT(ranges
, 1);
525 region
.end
= region
.start
+ size
- 1;
528 pci_info(dev
, " Using flags[%08x] start[%016llx] size[%016llx]\n",
531 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
534 sprintf(bus
->name
, "PCI Bus %04x:%02x", pci_domain_nr(bus
),
537 pci_info(dev
, " bus name: %s\n", bus
->name
);
539 pci_of_scan_bus(pbm
, node
, bus
);
542 static void pci_of_scan_bus(struct pci_pbm_info
*pbm
,
543 struct device_node
*node
,
546 struct device_node
*child
;
548 int reglen
, devfn
, prev_devfn
;
552 pci_info(bus
, "scan_bus[%pOF] bus no %d\n",
557 while ((child
= of_get_next_child(node
, child
)) != NULL
) {
559 pci_info(bus
, " * %pOF\n", child
);
560 reg
= of_get_property(child
, "reg", ®len
);
561 if (reg
== NULL
|| reglen
< 20)
564 devfn
= (reg
[0] >> 8) & 0xff;
566 /* This is a workaround for some device trees
567 * which list PCI devices twice. On the V100
568 * for example, device number 3 is listed twice.
569 * Once as "pm" and once again as "lomp".
571 if (devfn
== prev_devfn
)
575 /* create a new pci_dev for this device */
576 dev
= of_create_pci_dev(pbm
, child
, bus
, devfn
);
580 pci_info(dev
, "dev header type: %x\n", dev
->hdr_type
);
582 if (pci_is_bridge(dev
))
583 of_scan_pci_bridge(pbm
, child
, dev
);
588 show_pciobppath_attr(struct device
* dev
, struct device_attribute
* attr
, char * buf
)
590 struct pci_dev
*pdev
;
591 struct device_node
*dp
;
593 pdev
= to_pci_dev(dev
);
594 dp
= pdev
->dev
.of_node
;
596 return scnprintf(buf
, PAGE_SIZE
, "%pOF\n", dp
);
599 static DEVICE_ATTR(obppath
, S_IRUSR
| S_IRGRP
| S_IROTH
, show_pciobppath_attr
, NULL
);
601 static void pci_bus_register_of_sysfs(struct pci_bus
*bus
)
604 struct pci_bus
*child_bus
;
607 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
608 /* we don't really care if we can create this file or
609 * not, but we need to assign the result of the call
610 * or the world will fall under alien invasion and
611 * everybody will be frozen on a spaceship ready to be
612 * eaten on alpha centauri by some green and jelly
615 err
= sysfs_create_file(&dev
->dev
.kobj
, &dev_attr_obppath
.attr
);
618 list_for_each_entry(child_bus
, &bus
->children
, node
)
619 pci_bus_register_of_sysfs(child_bus
);
622 static void pci_claim_legacy_resources(struct pci_dev
*dev
)
624 struct pci_bus_region region
;
625 struct resource
*p
, *root
, *conflict
;
627 if ((dev
->class >> 8) != PCI_CLASS_DISPLAY_VGA
)
630 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
634 p
->name
= "Video RAM area";
635 p
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
637 region
.start
= 0xa0000UL
;
638 region
.end
= region
.start
+ 0x1ffffUL
;
639 pcibios_bus_to_resource(dev
->bus
, p
, ®ion
);
641 root
= pci_find_parent_resource(dev
, p
);
643 pci_info(dev
, "can't claim VGA legacy %pR: no compatible bridge window\n", p
);
647 conflict
= request_resource_conflict(root
, p
);
649 pci_info(dev
, "can't claim VGA legacy %pR: address conflict with %s %pR\n",
650 p
, conflict
->name
, conflict
);
654 pci_info(dev
, "VGA legacy framebuffer %pR\n", p
);
661 static void pci_claim_bus_resources(struct pci_bus
*bus
)
663 struct pci_bus
*child_bus
;
666 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
669 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
670 struct resource
*r
= &dev
->resource
[i
];
672 if (r
->parent
|| !r
->start
|| !r
->flags
)
676 pci_info(dev
, "Claiming Resource %d: %pR\n",
679 pci_claim_resource(dev
, i
);
682 pci_claim_legacy_resources(dev
);
685 list_for_each_entry(child_bus
, &bus
->children
, node
)
686 pci_claim_bus_resources(child_bus
);
689 struct pci_bus
*pci_scan_one_pbm(struct pci_pbm_info
*pbm
,
690 struct device
*parent
)
692 LIST_HEAD(resources
);
693 struct device_node
*node
= pbm
->op
->dev
.of_node
;
696 printk("PCI: Scanning PBM %pOF\n", node
);
698 pci_add_resource_offset(&resources
, &pbm
->io_space
,
700 pci_add_resource_offset(&resources
, &pbm
->mem_space
,
702 if (pbm
->mem64_space
.flags
)
703 pci_add_resource_offset(&resources
, &pbm
->mem64_space
,
705 pbm
->busn
.start
= pbm
->pci_first_busno
;
706 pbm
->busn
.end
= pbm
->pci_last_busno
;
707 pbm
->busn
.flags
= IORESOURCE_BUS
;
708 pci_add_resource(&resources
, &pbm
->busn
);
709 bus
= pci_create_root_bus(parent
, pbm
->pci_first_busno
, pbm
->pci_ops
,
712 printk(KERN_ERR
"Failed to create bus for %pOF\n", node
);
713 pci_free_resource_list(&resources
);
717 pci_of_scan_bus(pbm
, node
, bus
);
718 pci_bus_register_of_sysfs(bus
);
720 pci_claim_bus_resources(bus
);
722 pci_bus_add_devices(bus
);
726 int pcibios_enable_device(struct pci_dev
*dev
, int mask
)
731 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
734 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
735 struct resource
*res
= &dev
->resource
[i
];
737 /* Only set up the requested stuff */
738 if (!(mask
& (1<<i
)))
741 if (res
->flags
& IORESOURCE_IO
)
742 cmd
|= PCI_COMMAND_IO
;
743 if (res
->flags
& IORESOURCE_MEM
)
744 cmd
|= PCI_COMMAND_MEMORY
;
748 pci_info(dev
, "enabling device (%04x -> %04x)\n", oldcmd
, cmd
);
749 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
754 /* Platform support for /proc/bus/pci/X/Y mmap()s. */
756 /* If the user uses a host-bridge as the PCI device, he may use
757 * this to perform a raw mmap() of the I/O or MEM space behind
760 * This can be useful for execution of x86 PCI bios initialization code
761 * on a PCI card, like the xfree86 int10 stuff does.
763 static int __pci_mmap_make_offset_bus(struct pci_dev
*pdev
, struct vm_area_struct
*vma
,
764 enum pci_mmap_state mmap_state
)
766 struct pci_pbm_info
*pbm
= pdev
->dev
.archdata
.host_controller
;
767 unsigned long space_size
, user_offset
, user_size
;
769 if (mmap_state
== pci_mmap_io
) {
770 space_size
= resource_size(&pbm
->io_space
);
772 space_size
= resource_size(&pbm
->mem_space
);
775 /* Make sure the request is in range. */
776 user_offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
777 user_size
= vma
->vm_end
- vma
->vm_start
;
779 if (user_offset
>= space_size
||
780 (user_offset
+ user_size
) > space_size
)
783 if (mmap_state
== pci_mmap_io
) {
784 vma
->vm_pgoff
= (pbm
->io_space
.start
+
785 user_offset
) >> PAGE_SHIFT
;
787 vma
->vm_pgoff
= (pbm
->mem_space
.start
+
788 user_offset
) >> PAGE_SHIFT
;
794 /* Adjust vm_pgoff of VMA such that it is the physical page offset
795 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
797 * Basically, the user finds the base address for his device which he wishes
798 * to mmap. They read the 32-bit value from the config space base register,
799 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
800 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
802 * Returns negative error code on failure, zero on success.
804 static int __pci_mmap_make_offset(struct pci_dev
*pdev
,
805 struct vm_area_struct
*vma
,
806 enum pci_mmap_state mmap_state
)
808 unsigned long user_paddr
, user_size
;
811 /* First compute the physical address in vma->vm_pgoff,
812 * making sure the user offset is within range in the
813 * appropriate PCI space.
815 err
= __pci_mmap_make_offset_bus(pdev
, vma
, mmap_state
);
819 /* If this is a mapping on a host bridge, any address
822 if ((pdev
->class >> 8) == PCI_CLASS_BRIDGE_HOST
)
825 /* Otherwise make sure it's in the range for one of the
826 * device's resources.
828 user_paddr
= vma
->vm_pgoff
<< PAGE_SHIFT
;
829 user_size
= vma
->vm_end
- vma
->vm_start
;
831 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++) {
832 struct resource
*rp
= &pdev
->resource
[i
];
833 resource_size_t aligned_end
;
840 if (i
== PCI_ROM_RESOURCE
) {
841 if (mmap_state
!= pci_mmap_mem
)
844 if ((mmap_state
== pci_mmap_io
&&
845 (rp
->flags
& IORESOURCE_IO
) == 0) ||
846 (mmap_state
== pci_mmap_mem
&&
847 (rp
->flags
& IORESOURCE_MEM
) == 0))
851 /* Align the resource end to the next page address.
852 * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1),
853 * because actually we need the address of the next byte
856 aligned_end
= (rp
->end
+ PAGE_SIZE
) & PAGE_MASK
;
858 if ((rp
->start
<= user_paddr
) &&
859 (user_paddr
+ user_size
) <= aligned_end
)
863 if (i
> PCI_ROM_RESOURCE
)
869 /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
872 static void __pci_mmap_set_pgprot(struct pci_dev
*dev
, struct vm_area_struct
*vma
,
873 enum pci_mmap_state mmap_state
)
875 /* Our io_remap_pfn_range takes care of this, do nothing. */
878 /* Perform the actual remap of the pages for a PCI device mapping, as appropriate
879 * for this architecture. The region in the process to map is described by vm_start
880 * and vm_end members of VMA, the base physical address is found in vm_pgoff.
881 * The pci device structure is provided so that architectures may make mapping
882 * decisions on a per-device or per-bus basis.
884 * Returns a negative error code on failure, zero on success.
886 int pci_mmap_page_range(struct pci_dev
*dev
, int bar
,
887 struct vm_area_struct
*vma
,
888 enum pci_mmap_state mmap_state
, int write_combine
)
892 ret
= __pci_mmap_make_offset(dev
, vma
, mmap_state
);
896 __pci_mmap_set_pgprot(dev
, vma
, mmap_state
);
898 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
899 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
901 vma
->vm_end
- vma
->vm_start
,
910 int pcibus_to_node(struct pci_bus
*pbus
)
912 struct pci_pbm_info
*pbm
= pbus
->sysdata
;
914 return pbm
->numa_node
;
916 EXPORT_SYMBOL(pcibus_to_node
);
919 /* Return the domain number for this pci bus */
921 int pci_domain_nr(struct pci_bus
*pbus
)
923 struct pci_pbm_info
*pbm
= pbus
->sysdata
;
934 EXPORT_SYMBOL(pci_domain_nr
);
936 #ifdef CONFIG_PCI_MSI
937 int arch_setup_msi_irq(struct pci_dev
*pdev
, struct msi_desc
*desc
)
939 struct pci_pbm_info
*pbm
= pdev
->dev
.archdata
.host_controller
;
942 if (!pbm
->setup_msi_irq
)
945 return pbm
->setup_msi_irq(&irq
, pdev
, desc
);
948 void arch_teardown_msi_irq(unsigned int irq
)
950 struct msi_desc
*entry
= irq_get_msi_desc(irq
);
951 struct pci_dev
*pdev
= msi_desc_to_pci_dev(entry
);
952 struct pci_pbm_info
*pbm
= pdev
->dev
.archdata
.host_controller
;
954 if (pbm
->teardown_msi_irq
)
955 pbm
->teardown_msi_irq(irq
, pdev
);
957 #endif /* !(CONFIG_PCI_MSI) */
959 /* ALI sound chips generate 31-bits of DMA, a special register
960 * determines what bit 31 is emitted as.
962 int ali_sound_dma_hack(struct device
*dev
, u64 device_mask
)
964 struct iommu
*iommu
= dev
->archdata
.iommu
;
965 struct pci_dev
*ali_isa_bridge
;
968 if (!dev_is_pci(dev
))
971 if (to_pci_dev(dev
)->vendor
!= PCI_VENDOR_ID_AL
||
972 to_pci_dev(dev
)->device
!= PCI_DEVICE_ID_AL_M5451
||
973 device_mask
!= 0x7fffffff)
976 ali_isa_bridge
= pci_get_device(PCI_VENDOR_ID_AL
,
977 PCI_DEVICE_ID_AL_M1533
,
980 pci_read_config_byte(ali_isa_bridge
, 0x7e, &val
);
981 if (iommu
->dma_addr_mask
& 0x80000000)
985 pci_write_config_byte(ali_isa_bridge
, 0x7e, val
);
986 pci_dev_put(ali_isa_bridge
);
990 void pci_resource_to_user(const struct pci_dev
*pdev
, int bar
,
991 const struct resource
*rp
, resource_size_t
*start
,
992 resource_size_t
*end
)
994 struct pci_bus_region region
;
997 * "User" addresses are shown in /sys/devices/pci.../.../resource
998 * and /proc/bus/pci/devices and used as mmap offsets for
999 * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()).
1001 * On sparc, these are PCI bus addresses, i.e., raw BAR values.
1003 pcibios_resource_to_bus(pdev
->bus
, ®ion
, (struct resource
*) rp
);
1004 *start
= region
.start
;
1008 void pcibios_set_master(struct pci_dev
*dev
)
1010 /* No special bus mastering setup handling */
1013 #ifdef CONFIG_PCI_IOV
1014 int pcibios_add_device(struct pci_dev
*dev
)
1016 struct pci_dev
*pdev
;
1018 /* Add sriov arch specific initialization here.
1019 * Copy dev_archdata from PF to VF
1021 if (dev
->is_virtfn
) {
1022 struct dev_archdata
*psd
;
1025 psd
= &pdev
->dev
.archdata
;
1026 pci_init_dev_archdata(&dev
->dev
.archdata
, psd
->iommu
,
1027 psd
->stc
, psd
->host_controller
, NULL
,
1032 #endif /* CONFIG_PCI_IOV */
1034 static int __init
pcibios_init(void)
1036 pci_dfl_cache_line_size
= 64 >> 2;
1039 subsys_initcall(pcibios_init
);
1043 #define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */
1045 static void pcie_bus_slot_names(struct pci_bus
*pbus
)
1047 struct pci_dev
*pdev
;
1048 struct pci_bus
*bus
;
1050 list_for_each_entry(pdev
, &pbus
->devices
, bus_list
) {
1051 char name
[SLOT_NAME_SIZE
];
1052 struct pci_slot
*pci_slot
;
1053 const u32
*slot_num
;
1056 slot_num
= of_get_property(pdev
->dev
.of_node
,
1057 "physical-slot#", &len
);
1059 if (slot_num
== NULL
|| len
!= 4)
1062 snprintf(name
, sizeof(name
), "%u", slot_num
[0]);
1063 pci_slot
= pci_create_slot(pbus
, slot_num
[0], name
, NULL
);
1065 if (IS_ERR(pci_slot
))
1066 pr_err("PCI: pci_create_slot returned %ld.\n",
1070 list_for_each_entry(bus
, &pbus
->children
, node
)
1071 pcie_bus_slot_names(bus
);
1074 static void pci_bus_slot_names(struct device_node
*node
, struct pci_bus
*bus
)
1076 const struct pci_slot_names
{
1084 prop
= of_get_property(node
, "slot-names", &len
);
1088 mask
= prop
->slot_mask
;
1092 pci_info(bus
, "Making slots for [%pOF] mask[0x%02x]\n",
1097 struct pci_slot
*pci_slot
;
1098 u32 this_bit
= 1 << i
;
1100 if (!(mask
& this_bit
)) {
1106 pci_info(bus
, "Making slot [%s]\n", sp
);
1108 pci_slot
= pci_create_slot(bus
, i
, sp
, NULL
);
1109 if (IS_ERR(pci_slot
))
1110 pci_err(bus
, "pci_create_slot returned %ld\n",
1113 sp
+= strlen(sp
) + 1;
1119 static int __init
of_pci_slot_init(void)
1121 struct pci_bus
*pbus
= NULL
;
1123 while ((pbus
= pci_find_next_bus(pbus
)) != NULL
) {
1124 struct device_node
*node
;
1125 struct pci_dev
*pdev
;
1127 pdev
= list_first_entry(&pbus
->devices
, struct pci_dev
,
1130 if (pdev
&& pci_is_pcie(pdev
)) {
1131 pcie_bus_slot_names(pbus
);
1136 /* PCI->PCI bridge */
1137 node
= pbus
->self
->dev
.of_node
;
1140 struct pci_pbm_info
*pbm
= pbus
->sysdata
;
1142 /* Host PCI controller */
1143 node
= pbm
->op
->dev
.of_node
;
1146 pci_bus_slot_names(node
, pbus
);
1152 device_initcall(of_pci_slot_init
);