2 * Common prep/chrp pci routines. -- Cort
5 #include <linux/kernel.h>
7 #include <linux/delay.h>
8 #include <linux/string.h>
9 #include <linux/init.h>
10 #include <linux/capability.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
13 #include <linux/bootmem.h>
15 #include <asm/processor.h>
18 #include <asm/sections.h>
19 #include <asm/pci-bridge.h>
20 #include <asm/byteorder.h>
22 #include <asm/uaccess.h>
23 #include <asm/machdep.h>
28 #define DBG(x...) printk(x)
33 unsigned long isa_io_base = 0;
34 unsigned long isa_mem_base = 0;
35 unsigned long pci_dram_offset = 0;
36 int pcibios_assign_bus_offset = 1;
38 void pcibios_make_OF_bus_map(void);
40 static int pci_relocate_bridge_resource(struct pci_bus *bus, int i);
41 static int probe_resource(struct pci_bus *parent, struct resource *pr,
42 struct resource *res, struct resource **conflict);
43 static void update_bridge_base(struct pci_bus *bus, int i);
44 static void pcibios_fixup_resources(struct pci_dev* dev);
45 static void fixup_broken_pcnet32(struct pci_dev* dev);
46 static int reparent_resources(struct resource *parent, struct resource *res);
47 static void fixup_cpc710_pci64(struct pci_dev* dev);
49 /* By default, we don't re-assign bus numbers.
51 int pci_assign_all_buses;
53 struct pci_controller* hose_head;
54 struct pci_controller** hose_tail = &hose_head;
56 static int pci_bus_count;
59 fixup_broken_pcnet32(struct pci_dev* dev)
61 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
62 dev->vendor = PCI_VENDOR_ID_AMD;
63 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
66 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
69 fixup_cpc710_pci64(struct pci_dev* dev)
71 /* Hide the PCI64 BARs from the kernel as their content doesn't
72 * fit well in the resource management
74 dev->resource[0].start = dev->resource[0].end = 0;
75 dev->resource[0].flags = 0;
76 dev->resource[1].start = dev->resource[1].end = 0;
77 dev->resource[1].flags = 0;
79 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
82 pcibios_fixup_resources(struct pci_dev *dev)
84 struct pci_controller* hose = (struct pci_controller *)dev->sysdata;
89 printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev));
92 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
93 struct resource *res = dev->resource + i;
96 if (res->end == 0xffffffff) {
97 DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
99 (unsigned long long)res->start,
100 (unsigned long long)res->end);
101 res->end -= res->start;
103 res->flags |= IORESOURCE_UNSET;
107 if (res->flags & IORESOURCE_MEM) {
108 offset = hose->pci_mem_offset;
109 } else if (res->flags & IORESOURCE_IO) {
110 offset = (unsigned long) hose->io_base_virt
114 res->start += offset;
117 printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
118 i, res->flags, pci_name(dev),
119 res->start - offset, res->start);
124 /* Call machine specific resource fixup */
125 if (ppc_md.pcibios_fixup_resources)
126 ppc_md.pcibios_fixup_resources(dev);
128 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
130 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
131 struct resource *res)
133 unsigned long offset = 0;
134 struct pci_controller *hose = dev->sysdata;
136 if (hose && res->flags & IORESOURCE_IO)
137 offset = (unsigned long)hose->io_base_virt - isa_io_base;
138 else if (hose && res->flags & IORESOURCE_MEM)
139 offset = hose->pci_mem_offset;
140 region->start = res->start - offset;
141 region->end = res->end - offset;
143 EXPORT_SYMBOL(pcibios_resource_to_bus);
145 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
146 struct pci_bus_region *region)
148 unsigned long offset = 0;
149 struct pci_controller *hose = dev->sysdata;
151 if (hose && res->flags & IORESOURCE_IO)
152 offset = (unsigned long)hose->io_base_virt - isa_io_base;
153 else if (hose && res->flags & IORESOURCE_MEM)
154 offset = hose->pci_mem_offset;
155 res->start = region->start + offset;
156 res->end = region->end + offset;
158 EXPORT_SYMBOL(pcibios_bus_to_resource);
161 * We need to avoid collisions with `mirrored' VGA ports
162 * and other strange ISA hardware, so we always want the
163 * addresses to be allocated in the 0x000-0x0ff region
166 * Why? Because some silly external IO cards only decode
167 * the low 10 bits of the IO address. The 0x00-0xff region
168 * is reserved for motherboard devices that decode all 16
169 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
170 * but we want to try to avoid allocating at 0x2900-0x2bff
171 * which might have be mirrored at 0x0100-0x03ff..
173 void pcibios_align_resource(void *data, struct resource *res,
174 resource_size_t size, resource_size_t align)
176 struct pci_dev *dev = data;
178 if (res->flags & IORESOURCE_IO) {
179 resource_size_t start = res->start;
182 printk(KERN_ERR "PCI: I/O Region %s/%d too large"
183 " (%lld bytes)\n", pci_name(dev),
184 dev->resource - res, (unsigned long long)size);
188 start = (start + 0x3ff) & ~0x3ff;
193 EXPORT_SYMBOL(pcibios_align_resource);
196 * Handle resources of PCI devices. If the world were perfect, we could
197 * just allocate all the resource regions and do nothing more. It isn't.
198 * On the other hand, we cannot just re-allocate all devices, as it would
199 * require us to know lots of host bridge internals. So we attempt to
200 * keep as much of the original configuration as possible, but tweak it
201 * when it's found to be wrong.
203 * Known BIOS problems we have to work around:
204 * - I/O or memory regions not configured
205 * - regions configured, but not enabled in the command register
206 * - bogus I/O addresses above 64K used
207 * - expansion ROMs left enabled (this may sound harmless, but given
208 * the fact the PCI specs explicitly allow address decoders to be
209 * shared between expansion ROMs and other resource regions, it's
210 * at least dangerous)
213 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
214 * This gives us fixed barriers on where we can allocate.
215 * (2) Allocate resources for all enabled devices. If there is
216 * a collision, just mark the resource as unallocated. Also
217 * disable expansion ROMs during this step.
218 * (3) Try to allocate resources for disabled devices. If the
219 * resources were assigned correctly, everything goes well,
220 * if they weren't, they won't disturb allocation of other
222 * (4) Assign new addresses to resources which were either
223 * not configured at all or misconfigured. If explicitly
224 * requested by the user, configure expansion ROM address
229 pcibios_allocate_bus_resources(struct list_head *bus_list)
233 struct resource *res, *pr;
235 /* Depth-First Search on bus tree */
236 list_for_each_entry(bus, bus_list, node) {
237 for (i = 0; i < 4; ++i) {
238 if ((res = bus->resource[i]) == NULL || !res->flags
239 || res->start > res->end)
241 if (bus->parent == NULL)
242 pr = (res->flags & IORESOURCE_IO)?
243 &ioport_resource: &iomem_resource;
245 pr = pci_find_parent_resource(bus->self, res);
247 /* this happens when the generic PCI
248 * code (wrongly) decides that this
249 * bridge is transparent -- paulus
255 DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
256 (unsigned long long)res->start,
257 (unsigned long long)res->end, res->flags, pr);
259 if (request_resource(pr, res) == 0)
262 * Must be a conflict with an existing entry.
263 * Move that entry (or entries) under the
264 * bridge resource and try again.
266 if (reparent_resources(pr, res) == 0)
269 printk(KERN_ERR "PCI: Cannot allocate resource region "
270 "%d of PCI bridge %d\n", i, bus->number);
271 if (pci_relocate_bridge_resource(bus, i))
272 bus->resource[i] = NULL;
274 pcibios_allocate_bus_resources(&bus->children);
279 * Reparent resource children of pr that conflict with res
280 * under res, and make res replace those children.
283 reparent_resources(struct resource *parent, struct resource *res)
285 struct resource *p, **pp;
286 struct resource **firstpp = NULL;
288 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
289 if (p->end < res->start)
291 if (res->end < p->start)
293 if (p->start < res->start || p->end > res->end)
294 return -1; /* not completely contained */
299 return -1; /* didn't find any conflicting entries? */
300 res->parent = parent;
301 res->child = *firstpp;
305 for (p = res->child; p != NULL; p = p->sibling) {
307 DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
308 p->name, (unsigned long long)p->start,
309 (unsigned long long)p->end, res->name);
315 * A bridge has been allocated a range which is outside the range
316 * of its parent bridge, so it needs to be moved.
319 pci_relocate_bridge_resource(struct pci_bus *bus, int i)
321 struct resource *res, *pr, *conflict;
322 unsigned long try, size;
324 struct pci_bus *parent = bus->parent;
326 if (parent == NULL) {
327 /* shouldn't ever happen */
328 printk(KERN_ERR "PCI: can't move host bridge resource\n");
331 res = bus->resource[i];
335 for (j = 0; j < 4; j++) {
336 struct resource *r = parent->resource[j];
339 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
341 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) {
345 if (res->flags & IORESOURCE_PREFETCH)
350 size = res->end - res->start;
351 if (pr->start > pr->end || size > pr->end - pr->start)
355 res->start = try - size;
357 if (probe_resource(bus->parent, pr, res, &conflict) == 0)
359 if (conflict->start <= pr->start + size)
361 try = conflict->start - 1;
363 if (request_resource(pr, res)) {
364 DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
365 (unsigned long long)res->start,
366 (unsigned long long)res->end);
367 return -1; /* "can't happen" */
369 update_bridge_base(bus, i);
370 printk(KERN_INFO "PCI: bridge %d resource %d moved to %llx..%llx\n",
371 bus->number, i, (unsigned long long)res->start,
372 (unsigned long long)res->end);
377 probe_resource(struct pci_bus *parent, struct resource *pr,
378 struct resource *res, struct resource **conflict)
385 for (r = pr->child; r != NULL; r = r->sibling) {
386 if (r->end >= res->start && res->end >= r->start) {
391 list_for_each_entry(bus, &parent->children, node) {
392 for (i = 0; i < 4; ++i) {
393 if ((r = bus->resource[i]) == NULL)
395 if (!r->flags || r->start > r->end || r == res)
397 if (pci_find_parent_resource(bus->self, r) != pr)
399 if (r->end >= res->start && res->end >= r->start) {
405 list_for_each_entry(dev, &parent->devices, bus_list) {
406 for (i = 0; i < 6; ++i) {
407 r = &dev->resource[i];
408 if (!r->flags || (r->flags & IORESOURCE_UNSET))
410 if (pci_find_parent_resource(dev, r) != pr)
412 if (r->end >= res->start && res->end >= r->start) {
422 update_bridge_base(struct pci_bus *bus, int i)
424 struct resource *res = bus->resource[i];
425 u8 io_base_lo, io_limit_lo;
426 u16 mem_base, mem_limit;
428 unsigned long start, end, off;
429 struct pci_dev *dev = bus->self;
430 struct pci_controller *hose = dev->sysdata;
433 printk("update_bridge_base: no hose?\n");
436 pci_read_config_word(dev, PCI_COMMAND, &cmd);
437 pci_write_config_word(dev, PCI_COMMAND,
438 cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY));
439 if (res->flags & IORESOURCE_IO) {
440 off = (unsigned long) hose->io_base_virt - isa_io_base;
441 start = res->start - off;
442 end = res->end - off;
443 io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
444 io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
446 pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
448 pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
450 io_base_lo |= PCI_IO_RANGE_TYPE_32;
452 io_base_lo |= PCI_IO_RANGE_TYPE_16;
453 pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
454 pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
456 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
458 off = hose->pci_mem_offset;
459 mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK;
460 mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK;
461 pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base);
462 pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit);
464 } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH))
465 == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
466 off = hose->pci_mem_offset;
467 mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK;
468 mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK;
469 pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base);
470 pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit);
473 DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n",
474 pci_name(dev), i, res->flags);
476 pci_write_config_word(dev, PCI_COMMAND, cmd);
479 static inline void alloc_resource(struct pci_dev *dev, int idx)
481 struct resource *pr, *r = &dev->resource[idx];
483 DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
484 pci_name(dev), idx, (unsigned long long)r->start,
485 (unsigned long long)r->end, r->flags);
486 pr = pci_find_parent_resource(dev, r);
487 if (!pr || request_resource(pr, r) < 0) {
488 printk(KERN_ERR "PCI: Cannot allocate resource region %d"
489 " of device %s\n", idx, pci_name(dev));
491 DBG("PCI: parent is %p: %016llx-%016llx (f=%lx)\n",
492 pr, (unsigned long long)pr->start,
493 (unsigned long long)pr->end, pr->flags);
494 /* We'll assign a new address later */
495 r->flags |= IORESOURCE_UNSET;
502 pcibios_allocate_resources(int pass)
504 struct pci_dev *dev = NULL;
509 for_each_pci_dev(dev) {
510 pci_read_config_word(dev, PCI_COMMAND, &command);
511 for (idx = 0; idx < 6; idx++) {
512 r = &dev->resource[idx];
513 if (r->parent) /* Already allocated */
515 if (!r->flags || (r->flags & IORESOURCE_UNSET))
516 continue; /* Not assigned at all */
517 if (r->flags & IORESOURCE_IO)
518 disabled = !(command & PCI_COMMAND_IO);
520 disabled = !(command & PCI_COMMAND_MEMORY);
521 if (pass == disabled)
522 alloc_resource(dev, idx);
526 r = &dev->resource[PCI_ROM_RESOURCE];
527 if (r->flags & IORESOURCE_ROM_ENABLE) {
528 /* Turn the ROM off, leave the resource region, but keep it unregistered. */
530 DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
531 r->flags &= ~IORESOURCE_ROM_ENABLE;
532 pci_read_config_dword(dev, dev->rom_base_reg, ®);
533 pci_write_config_dword(dev, dev->rom_base_reg,
534 reg & ~PCI_ROM_ADDRESS_ENABLE);
540 pcibios_assign_resources(void)
542 struct pci_dev *dev = NULL;
546 for_each_pci_dev(dev) {
547 int class = dev->class >> 8;
549 /* Don't touch classless devices and host bridges */
550 if (!class || class == PCI_CLASS_BRIDGE_HOST)
553 for (idx = 0; idx < 6; idx++) {
554 r = &dev->resource[idx];
557 * We shall assign a new address to this resource,
558 * either because the BIOS (sic) forgot to do so
559 * or because we have decided the old address was
560 * unusable for some reason.
562 if ((r->flags & IORESOURCE_UNSET) && r->end &&
563 (!ppc_md.pcibios_enable_device_hook ||
564 !ppc_md.pcibios_enable_device_hook(dev, 1))) {
565 r->flags &= ~IORESOURCE_UNSET;
566 pci_assign_resource(dev, idx);
570 #if 0 /* don't assign ROMs */
571 r = &dev->resource[PCI_ROM_RESOURCE];
575 pci_assign_resource(dev, PCI_ROM_RESOURCE);
582 pcibios_enable_resources(struct pci_dev *dev, int mask)
588 pci_read_config_word(dev, PCI_COMMAND, &cmd);
590 for (idx=0; idx<6; idx++) {
591 /* Only set up the requested stuff */
592 if (!(mask & (1<<idx)))
595 r = &dev->resource[idx];
596 if (r->flags & IORESOURCE_UNSET) {
597 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
600 if (r->flags & IORESOURCE_IO)
601 cmd |= PCI_COMMAND_IO;
602 if (r->flags & IORESOURCE_MEM)
603 cmd |= PCI_COMMAND_MEMORY;
605 if (dev->resource[PCI_ROM_RESOURCE].start)
606 cmd |= PCI_COMMAND_MEMORY;
607 if (cmd != old_cmd) {
608 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
609 pci_write_config_word(dev, PCI_COMMAND, cmd);
614 static int next_controller_index;
616 struct pci_controller * __init
617 pcibios_alloc_controller(void)
619 struct pci_controller *hose;
621 hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose));
622 memset(hose, 0, sizeof(struct pci_controller));
625 hose_tail = &hose->next;
627 hose->index = next_controller_index++;
632 void pcibios_make_OF_bus_map(void)
639 struct pci_controller *hose;
643 printk(KERN_INFO "PCI: Probing PCI hardware\n");
645 /* Scan all of the recorded PCI controllers. */
646 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
647 if (pci_assign_all_buses)
648 hose->first_busno = next_busno;
649 hose->last_busno = 0xff;
650 bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
651 hose->last_busno = bus->subordinate;
652 if (pci_assign_all_buses || next_busno <= hose->last_busno)
653 next_busno = hose->last_busno + pcibios_assign_bus_offset;
655 pci_bus_count = next_busno;
657 /* OpenFirmware based machines need a map of OF bus
658 * numbers vs. kernel bus numbers since we may have to
661 if (pci_assign_all_buses && have_of)
662 pcibios_make_OF_bus_map();
664 /* Do machine dependent PCI interrupt routing */
665 if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
666 pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
668 /* Call machine dependent fixup */
669 if (ppc_md.pcibios_fixup)
670 ppc_md.pcibios_fixup();
672 /* Allocate and assign resources */
673 pcibios_allocate_bus_resources(&pci_root_buses);
674 pcibios_allocate_resources(0);
675 pcibios_allocate_resources(1);
676 pcibios_assign_resources();
678 /* Call machine dependent post-init code */
679 if (ppc_md.pcibios_after_init)
680 ppc_md.pcibios_after_init();
685 subsys_initcall(pcibios_init);
688 common_swizzle(struct pci_dev *dev, unsigned char *pinp)
690 struct pci_controller *hose = dev->sysdata;
692 if (dev->bus->number != hose->first_busno) {
695 pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
696 /* Move up the chain of bridges. */
697 dev = dev->bus->self;
698 } while (dev->bus->self);
701 /* The slot is the idsel of the last bridge. */
703 return PCI_SLOT(dev->devfn);
706 unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
707 unsigned long start, unsigned long size)
712 void __init pcibios_fixup_bus(struct pci_bus *bus)
714 struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
715 unsigned long io_offset;
716 struct resource *res;
719 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
720 if (bus->parent == NULL) {
721 /* This is a host bridge - fill in its resources */
724 bus->resource[0] = res = &hose->io_resource;
727 printk(KERN_ERR "I/O resource not set for host"
728 " bridge %d\n", hose->index);
730 res->end = IO_SPACE_LIMIT;
731 res->flags = IORESOURCE_IO;
733 res->start += io_offset;
734 res->end += io_offset;
736 for (i = 0; i < 3; ++i) {
737 res = &hose->mem_resources[i];
741 printk(KERN_ERR "Memory resource not set for "
742 "host bridge %d\n", hose->index);
743 res->start = hose->pci_mem_offset;
745 res->flags = IORESOURCE_MEM;
747 bus->resource[i+1] = res;
750 /* This is a subordinate bridge */
751 pci_read_bridge_bases(bus);
753 for (i = 0; i < 4; ++i) {
754 if ((res = bus->resource[i]) == NULL)
758 if (io_offset && (res->flags & IORESOURCE_IO)) {
759 res->start += io_offset;
760 res->end += io_offset;
761 } else if (hose->pci_mem_offset
762 && (res->flags & IORESOURCE_MEM)) {
763 res->start += hose->pci_mem_offset;
764 res->end += hose->pci_mem_offset;
769 if (ppc_md.pcibios_fixup_bus)
770 ppc_md.pcibios_fixup_bus(bus);
773 char __init *pcibios_setup(char *str)
778 /* the next one is stolen from the alpha port... */
780 pcibios_update_irq(struct pci_dev *dev, int irq)
782 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
783 /* XXX FIXME - update OF device tree node interrupt property */
786 int pcibios_enable_device(struct pci_dev *dev, int mask)
792 if (ppc_md.pcibios_enable_device_hook)
793 if (ppc_md.pcibios_enable_device_hook(dev, 0))
796 pci_read_config_word(dev, PCI_COMMAND, &cmd);
798 for (idx=0; idx<6; idx++) {
799 r = &dev->resource[idx];
800 if (r->flags & IORESOURCE_UNSET) {
801 printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
804 if (r->flags & IORESOURCE_IO)
805 cmd |= PCI_COMMAND_IO;
806 if (r->flags & IORESOURCE_MEM)
807 cmd |= PCI_COMMAND_MEMORY;
809 if (cmd != old_cmd) {
810 printk("PCI: Enabling device %s (%04x -> %04x)\n",
811 pci_name(dev), old_cmd, cmd);
812 pci_write_config_word(dev, PCI_COMMAND, cmd);
817 struct pci_controller*
818 pci_bus_to_hose(int bus)
820 struct pci_controller* hose = hose_head;
822 for (; hose; hose = hose->next)
823 if (bus >= hose->first_busno && bus <= hose->last_busno)
829 pci_bus_io_base(unsigned int bus)
831 struct pci_controller *hose;
833 hose = pci_bus_to_hose(bus);
836 return hose->io_base_virt;
840 pci_bus_io_base_phys(unsigned int bus)
842 struct pci_controller *hose;
844 hose = pci_bus_to_hose(bus);
847 return hose->io_base_phys;
851 pci_bus_mem_base_phys(unsigned int bus)
853 struct pci_controller *hose;
855 hose = pci_bus_to_hose(bus);
858 return hose->pci_mem_offset;
862 pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
864 /* Hack alert again ! See comments in chrp_pci.c
866 struct pci_controller* hose =
867 (struct pci_controller *)pdev->sysdata;
868 if (hose && res->flags & IORESOURCE_MEM)
869 return res->start - hose->pci_mem_offset;
870 /* We may want to do something with IOs here... */
875 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
876 resource_size_t *offset,
877 enum pci_mmap_state mmap_state)
879 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
880 unsigned long io_offset = 0;
884 return NULL; /* should never happen */
886 /* If memory, add on the PCI bridge address offset */
887 if (mmap_state == pci_mmap_mem) {
888 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */
889 *offset += hose->pci_mem_offset;
891 res_bit = IORESOURCE_MEM;
893 io_offset = hose->io_base_virt - ___IO_BASE;
894 *offset += io_offset;
895 res_bit = IORESOURCE_IO;
899 * Check that the offset requested corresponds to one of the
900 * resources of the device.
902 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
903 struct resource *rp = &dev->resource[i];
904 int flags = rp->flags;
906 /* treat ROM as memory (should be already) */
907 if (i == PCI_ROM_RESOURCE)
908 flags |= IORESOURCE_MEM;
910 /* Active and same type? */
911 if ((flags & res_bit) == 0)
914 /* In the range of this resource? */
915 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
918 /* found it! construct the final physical address */
919 if (mmap_state == pci_mmap_io)
920 *offset += hose->io_base_phys - io_offset;
928 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
931 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
933 enum pci_mmap_state mmap_state,
936 unsigned long prot = pgprot_val(protection);
938 /* Write combine is always 0 on non-memory space mappings. On
939 * memory space, if the user didn't pass 1, we check for a
940 * "prefetchable" resource. This is a bit hackish, but we use
941 * this to workaround the inability of /sysfs to provide a write
944 if (mmap_state != pci_mmap_mem)
946 else if (write_combine == 0) {
947 if (rp->flags & IORESOURCE_PREFETCH)
951 /* XXX would be nice to have a way to ask for write-through */
952 prot |= _PAGE_NO_CACHE;
954 prot &= ~_PAGE_GUARDED;
956 prot |= _PAGE_GUARDED;
958 printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
959 (unsigned long long)rp->start, prot);
961 return __pgprot(prot);
965 * This one is used by /dev/mem and fbdev who have no clue about the
966 * PCI device, it tries to find the PCI device first and calls the
969 pgprot_t pci_phys_mem_access_prot(struct file *file,
974 struct pci_dev *pdev = NULL;
975 struct resource *found = NULL;
976 unsigned long prot = pgprot_val(protection);
977 unsigned long offset = pfn << PAGE_SHIFT;
980 if (page_is_ram(pfn))
983 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
985 for_each_pci_dev(pdev) {
986 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
987 struct resource *rp = &pdev->resource[i];
988 int flags = rp->flags;
990 /* Active and same type? */
991 if ((flags & IORESOURCE_MEM) == 0)
993 /* In the range of this resource? */
994 if (offset < (rp->start & PAGE_MASK) ||
1004 if (found->flags & IORESOURCE_PREFETCH)
1005 prot &= ~_PAGE_GUARDED;
1009 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
1011 return __pgprot(prot);
1016 * Perform the actual remap of the pages for a PCI device mapping, as
1017 * appropriate for this architecture. The region in the process to map
1018 * is described by vm_start and vm_end members of VMA, the base physical
1019 * address is found in vm_pgoff.
1020 * The pci device structure is provided so that architectures may make mapping
1021 * decisions on a per-device or per-bus basis.
1023 * Returns a negative error code on failure, zero on success.
1025 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1026 enum pci_mmap_state mmap_state,
1029 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
1030 struct resource *rp;
1033 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
1037 vma->vm_pgoff = offset >> PAGE_SHIFT;
1038 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1040 mmap_state, write_combine);
1042 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1043 vma->vm_end - vma->vm_start, vma->vm_page_prot);
1048 /* Obsolete functions. Should be removed once the symbios driver
1052 phys_to_bus(unsigned long pa)
1054 struct pci_controller *hose;
1057 for (hose = hose_head; hose; hose = hose->next) {
1058 for (i = 0; i < 3; ++i) {
1059 if (pa >= hose->mem_resources[i].start
1060 && pa <= hose->mem_resources[i].end) {
1062 * XXX the hose->pci_mem_offset really
1063 * only applies to mem_resources[0].
1064 * We need a way to store an offset for
1065 * the others. -- paulus
1068 pa -= hose->pci_mem_offset;
1073 /* hmmm, didn't find it */
1078 pci_phys_to_bus(unsigned long pa, int busnr)
1080 struct pci_controller* hose = pci_bus_to_hose(busnr);
1083 return pa - hose->pci_mem_offset;
1087 pci_bus_to_phys(unsigned int ba, int busnr)
1089 struct pci_controller* hose = pci_bus_to_hose(busnr);
1092 return ba + hose->pci_mem_offset;
1095 /* Provide information on locations of various I/O regions in physical
1096 * memory. Do this on a per-card basis so that we choose the right
1098 * Note that the returned IO or memory base is a physical address
1101 long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1103 struct pci_controller* hose;
1104 long result = -EOPNOTSUPP;
1106 hose = pci_bus_to_hose(bus);
1111 case IOBASE_BRIDGE_NUMBER:
1112 return (long)hose->first_busno;
1114 return (long)hose->pci_mem_offset;
1116 return (long)hose->io_base_phys;
1118 return (long)isa_io_base;
1119 case IOBASE_ISA_MEM:
1120 return (long)isa_mem_base;
1126 void pci_resource_to_user(const struct pci_dev *dev, int bar,
1127 const struct resource *rsrc,
1128 resource_size_t *start, resource_size_t *end)
1130 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
1131 resource_size_t offset = 0;
1136 if (rsrc->flags & IORESOURCE_IO)
1137 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1139 /* We pass a fully fixed up address to userland for MMIO instead of
1140 * a BAR value because X is lame and expects to be able to use that
1141 * to pass to /dev/mem !
1143 * That means that we'll have potentially 64 bits values where some
1144 * userland apps only expect 32 (like X itself since it thinks only
1145 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
1148 * Hopefully, the sysfs insterface is immune to that gunk. Once X
1149 * has been fixed (and the fix spread enough), we can re-enable the
1150 * 2 lines below and pass down a BAR value to userland. In that case
1151 * we'll also have to re-enable the matching code in
1152 * __pci_mmap_make_offset().
1157 else if (rsrc->flags & IORESOURCE_MEM)
1158 offset = hose->pci_mem_offset;
1161 *start = rsrc->start - offset;
1162 *end = rsrc->end - offset;
1165 void __init pci_init_resource(struct resource *res, resource_size_t start,
1166 resource_size_t end, int flags, char *name)
1173 res->sibling = NULL;
1177 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
1179 unsigned long start = pci_resource_start(dev, bar);
1180 unsigned long len = pci_resource_len(dev, bar);
1181 unsigned long flags = pci_resource_flags(dev, bar);
1185 if (max && len > max)
1187 if (flags & IORESOURCE_IO)
1188 return ioport_map(start, len);
1189 if (flags & IORESOURCE_MEM)
1190 /* Not checking IORESOURCE_CACHEABLE because PPC does
1191 * not currently distinguish between ioremap and
1194 return ioremap(start, len);
1199 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1203 EXPORT_SYMBOL(pci_iomap);
1204 EXPORT_SYMBOL(pci_iounmap);
1206 unsigned long pci_address_to_pio(phys_addr_t address)
1208 struct pci_controller* hose = hose_head;
1210 for (; hose; hose = hose->next) {
1211 unsigned int size = hose->io_resource.end -
1212 hose->io_resource.start + 1;
1213 if (address >= hose->io_base_phys &&
1214 address < (hose->io_base_phys + size)) {
1215 unsigned long base =
1216 (unsigned long)hose->io_base_virt - _IO_BASE;
1217 return base + (address - hose->io_base_phys);
1220 return (unsigned int)-1;
1222 EXPORT_SYMBOL(pci_address_to_pio);
1225 * Null PCI config access functions, for the case when we can't
1228 #define NULL_PCI_OP(rw, size, type) \
1230 null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1232 return PCIBIOS_DEVICE_NOT_FOUND; \
1236 null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1239 return PCIBIOS_DEVICE_NOT_FOUND;
1243 null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1246 return PCIBIOS_DEVICE_NOT_FOUND;
1249 static struct pci_ops null_pci_ops =
1256 * These functions are used early on before PCI scanning is done
1257 * and all of the pci_dev and pci_bus structures have been created.
1259 static struct pci_bus *
1260 fake_pci_bus(struct pci_controller *hose, int busnr)
1262 static struct pci_bus bus;
1265 hose = pci_bus_to_hose(busnr);
1267 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1271 bus.ops = hose? hose->ops: &null_pci_ops;
1275 #define EARLY_PCI_OP(rw, size, type) \
1276 int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1277 int devfn, int offset, type value) \
1279 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1280 devfn, offset, value); \
1283 EARLY_PCI_OP(read, byte, u8 *)
1284 EARLY_PCI_OP(read, word, u16 *)
1285 EARLY_PCI_OP(read, dword, u32 *)
1286 EARLY_PCI_OP(write, byte, u8)
1287 EARLY_PCI_OP(write, word, u16)
1288 EARLY_PCI_OP(write, dword, u32)