[ARM] Support register switch in nommu mode
[linux-2.6/verdex.git] / arch / powerpc / platforms / iseries / pci.c
bloba19833b880e4096273c5e5360d01af7638ddf9e2
1 /*
2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
4 * iSeries specific routines for PCI.
6 * Based on code from pci.c and iSeries_pci.c 32bit
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/ide.h>
28 #include <linux/pci.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <asm/prom.h>
33 #include <asm/machdep.h>
34 #include <asm/pci-bridge.h>
35 #include <asm/iommu.h>
36 #include <asm/abs_addr.h>
38 #include <asm/iseries/hv_call_xm.h>
39 #include <asm/iseries/mf.h>
41 #include <asm/ppc-pci.h>
43 #include "irq.h"
44 #include "pci.h"
45 #include "call_pci.h"
46 #include "iommu.h"
48 extern unsigned long io_page_mask;
51 * Forward declares of prototypes.
53 static struct device_node *find_Device_Node(int bus, int devfn);
54 static void scan_PHB_slots(struct pci_controller *Phb);
55 static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
56 static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
58 LIST_HEAD(iSeries_Global_Device_List);
60 static int DeviceCount;
62 /* Counters and control flags. */
63 static long Pci_Io_Read_Count;
64 static long Pci_Io_Write_Count;
65 #if 0
66 static long Pci_Cfg_Read_Count;
67 static long Pci_Cfg_Write_Count;
68 #endif
69 static long Pci_Error_Count;
71 static int Pci_Retry_Max = 3; /* Only retry 3 times */
72 static int Pci_Error_Flag = 1; /* Set Retry Error on. */
74 static struct pci_ops iSeries_pci_ops;
77 * Table defines
78 * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
80 #define IOMM_TABLE_MAX_ENTRIES 1024
81 #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
82 #define BASE_IO_MEMORY 0xE000000000000000UL
84 static unsigned long max_io_memory = 0xE000000000000000UL;
85 static long current_iomm_table_entry;
88 * Lookup Tables.
90 static struct device_node **iomm_table;
91 static u8 *iobar_table;
94 * Static and Global variables
96 static char *pci_io_text = "iSeries PCI I/O";
97 static DEFINE_SPINLOCK(iomm_table_lock);
100 * iomm_table_initialize
102 * Allocates and initalizes the Address Translation Table and Bar
103 * Tables to get them ready for use. Must be called before any
104 * I/O space is handed out to the device BARs.
106 static void iomm_table_initialize(void)
108 spin_lock(&iomm_table_lock);
109 iomm_table = kmalloc(sizeof(*iomm_table) * IOMM_TABLE_MAX_ENTRIES,
110 GFP_KERNEL);
111 iobar_table = kmalloc(sizeof(*iobar_table) * IOMM_TABLE_MAX_ENTRIES,
112 GFP_KERNEL);
113 spin_unlock(&iomm_table_lock);
114 if ((iomm_table == NULL) || (iobar_table == NULL))
115 panic("PCI: I/O tables allocation failed.\n");
119 * iomm_table_allocate_entry
121 * Adds pci_dev entry in address translation table
123 * - Allocates the number of entries required in table base on BAR
124 * size.
125 * - Allocates starting at BASE_IO_MEMORY and increases.
126 * - The size is round up to be a multiple of entry size.
127 * - CurrentIndex is incremented to keep track of the last entry.
128 * - Builds the resource entry for allocated BARs.
130 static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
132 struct resource *bar_res = &dev->resource[bar_num];
133 long bar_size = pci_resource_len(dev, bar_num);
136 * No space to allocate, quick exit, skip Allocation.
138 if (bar_size == 0)
139 return;
141 * Set Resource values.
143 spin_lock(&iomm_table_lock);
144 bar_res->name = pci_io_text;
145 bar_res->start =
146 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
147 bar_res->start += BASE_IO_MEMORY;
148 bar_res->end = bar_res->start + bar_size - 1;
150 * Allocate the number of table entries needed for BAR.
152 while (bar_size > 0 ) {
153 iomm_table[current_iomm_table_entry] = dev->sysdata;
154 iobar_table[current_iomm_table_entry] = bar_num;
155 bar_size -= IOMM_TABLE_ENTRY_SIZE;
156 ++current_iomm_table_entry;
158 max_io_memory = BASE_IO_MEMORY +
159 (IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry);
160 spin_unlock(&iomm_table_lock);
164 * allocate_device_bars
166 * - Allocates ALL pci_dev BAR's and updates the resources with the
167 * BAR value. BARS with zero length will have the resources
168 * The HvCallPci_getBarParms is used to get the size of the BAR
169 * space. It calls iomm_table_allocate_entry to allocate
170 * each entry.
171 * - Loops through The Bar resources(0 - 5) including the ROM
172 * is resource(6).
174 static void allocate_device_bars(struct pci_dev *dev)
176 struct resource *bar_res;
177 int bar_num;
179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
180 bar_res = &dev->resource[bar_num];
181 iomm_table_allocate_entry(dev, bar_num);
186 * Log error information to system console.
187 * Filter out the device not there errors.
188 * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
189 * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
190 * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
192 static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
193 int AgentId, int HvRc)
195 if (HvRc == 0x0302)
196 return;
197 printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
198 Error_Text, Bus, SubBus, AgentId, HvRc);
202 * build_device_node(u16 Bus, int SubBus, u8 DevFn)
204 static struct device_node *build_device_node(HvBusNumber Bus,
205 HvSubBusNumber SubBus, int AgentId, int Function)
207 struct device_node *node;
208 struct pci_dn *pdn;
210 node = kmalloc(sizeof(struct device_node), GFP_KERNEL);
211 if (node == NULL)
212 return NULL;
213 memset(node, 0, sizeof(struct device_node));
214 pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
215 if (pdn == NULL) {
216 kfree(node);
217 return NULL;
219 node->data = pdn;
220 pdn->node = node;
221 list_add_tail(&pdn->Device_List, &iSeries_Global_Device_List);
222 pdn->busno = Bus;
223 pdn->bussubno = SubBus;
224 pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
225 return node;
229 * unsigned long __init find_and_init_phbs(void)
231 * Description:
232 * This function checks for all possible system PCI host bridges that connect
233 * PCI buses. The system hypervisor is queried as to the guest partition
234 * ownership status. A pci_controller is built for any bus which is partially
235 * owned or fully owned by this guest partition.
237 unsigned long __init find_and_init_phbs(void)
239 struct pci_controller *phb;
240 HvBusNumber bus;
242 /* Check all possible buses. */
243 for (bus = 0; bus < 256; bus++) {
244 int ret = HvCallXm_testBus(bus);
245 if (ret == 0) {
246 printk("bus %d appears to exist\n", bus);
248 phb = pcibios_alloc_controller(NULL);
249 if (phb == NULL)
250 return -ENOMEM;
252 phb->pci_mem_offset = phb->local_number = bus;
253 phb->first_busno = bus;
254 phb->last_busno = bus;
255 phb->ops = &iSeries_pci_ops;
257 /* Find and connect the devices. */
258 scan_PHB_slots(phb);
261 * Check for Unexpected Return code, a clue that something
262 * has gone wrong.
264 else if (ret != 0x0301)
265 printk(KERN_ERR "Unexpected Return on Probe(0x%04X): 0x%04X",
266 bus, ret);
268 return 0;
272 * iSeries_pcibios_init
274 * Chance to initialize and structures or variable before PCI Bus walk.
276 void iSeries_pcibios_init(void)
278 iomm_table_initialize();
279 find_and_init_phbs();
280 io_page_mask = -1;
284 * iSeries_pci_final_fixup(void)
286 void __init iSeries_pci_final_fixup(void)
288 struct pci_dev *pdev = NULL;
289 struct device_node *node;
290 int DeviceCount = 0;
292 /* Fix up at the device node and pci_dev relationship */
293 mf_display_src(0xC9000100);
295 printk("pcibios_final_fixup\n");
296 for_each_pci_dev(pdev) {
297 node = find_Device_Node(pdev->bus->number, pdev->devfn);
298 printk("pci dev %p (%x.%x), node %p\n", pdev,
299 pdev->bus->number, pdev->devfn, node);
301 if (node != NULL) {
302 ++DeviceCount;
303 pdev->sysdata = (void *)node;
304 PCI_DN(node)->pcidev = pdev;
305 allocate_device_bars(pdev);
306 iSeries_Device_Information(pdev, DeviceCount);
307 iommu_devnode_init_iSeries(node);
308 } else
309 printk("PCI: Device Tree not found for 0x%016lX\n",
310 (unsigned long)pdev);
311 pdev->irq = PCI_DN(node)->Irq;
313 iSeries_activate_IRQs();
314 mf_display_src(0xC9000200);
317 void pcibios_fixup_bus(struct pci_bus *PciBus)
321 void pcibios_fixup_resources(struct pci_dev *pdev)
326 * Loop through each node function to find usable EADs bridges.
328 static void scan_PHB_slots(struct pci_controller *Phb)
330 struct HvCallPci_DeviceInfo *DevInfo;
331 HvBusNumber bus = Phb->local_number; /* System Bus */
332 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */
333 int HvRc = 0;
334 int IdSel;
335 const int MaxAgents = 8;
337 DevInfo = (struct HvCallPci_DeviceInfo*)
338 kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL);
339 if (DevInfo == NULL)
340 return;
343 * Probe for EADs Bridges
345 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
346 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
347 iseries_hv_addr(DevInfo),
348 sizeof(struct HvCallPci_DeviceInfo));
349 if (HvRc == 0) {
350 if (DevInfo->deviceType == HvCallPci_NodeDevice)
351 scan_EADS_bridge(bus, SubBus, IdSel);
352 else
353 printk("PCI: Invalid System Configuration(0x%02X)"
354 " for bus 0x%02x id 0x%02x.\n",
355 DevInfo->deviceType, bus, IdSel);
357 else
358 pci_Log_Error("getDeviceInfo", bus, SubBus, IdSel, HvRc);
360 kfree(DevInfo);
363 static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
364 int IdSel)
366 struct HvCallPci_BridgeInfo *BridgeInfo;
367 HvAgentId AgentId;
368 int Function;
369 int HvRc;
371 BridgeInfo = (struct HvCallPci_BridgeInfo *)
372 kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL);
373 if (BridgeInfo == NULL)
374 return;
376 /* Note: hvSubBus and irq is always be 0 at this level! */
377 for (Function = 0; Function < 8; ++Function) {
378 AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
379 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
380 if (HvRc == 0) {
381 printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
382 bus, IdSel, Function, AgentId);
383 /* Connect EADs: 0x18.00.12 = 0x00 */
384 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
385 iseries_hv_addr(BridgeInfo),
386 sizeof(struct HvCallPci_BridgeInfo));
387 if (HvRc == 0) {
388 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
389 BridgeInfo->busUnitInfo.deviceType,
390 BridgeInfo->subBusNumber,
391 BridgeInfo->maxAgents,
392 BridgeInfo->maxSubBusNumber,
393 BridgeInfo->logicalSlotNumber);
394 if (BridgeInfo->busUnitInfo.deviceType ==
395 HvCallPci_BridgeDevice) {
396 /* Scan_Bridge_Slot...: 0x18.00.12 */
397 scan_bridge_slot(bus, BridgeInfo);
398 } else
399 printk("PCI: Invalid Bridge Configuration(0x%02X)",
400 BridgeInfo->busUnitInfo.deviceType);
402 } else if (HvRc != 0x000B)
403 pci_Log_Error("EADs Connect",
404 bus, SubBus, AgentId, HvRc);
406 kfree(BridgeInfo);
410 * This assumes that the node slot is always on the primary bus!
412 static int scan_bridge_slot(HvBusNumber Bus,
413 struct HvCallPci_BridgeInfo *BridgeInfo)
415 struct device_node *node;
416 HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
417 u16 VendorId = 0;
418 int HvRc = 0;
419 u8 Irq = 0;
420 int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus);
421 int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus);
422 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
424 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
425 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
428 * Connect all functions of any device found.
430 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
431 for (Function = 0; Function < 8; ++Function) {
432 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
433 HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
434 AgentId, Irq);
435 if (HvRc != 0) {
436 pci_Log_Error("Connect Bus Unit",
437 Bus, SubBus, AgentId, HvRc);
438 continue;
441 HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId,
442 PCI_VENDOR_ID, &VendorId);
443 if (HvRc != 0) {
444 pci_Log_Error("Read Vendor",
445 Bus, SubBus, AgentId, HvRc);
446 continue;
448 printk("read vendor ID: %x\n", VendorId);
450 /* FoundDevice: 0x18.28.10 = 0x12AE */
451 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
452 PCI_INTERRUPT_LINE, Irq);
453 if (HvRc != 0)
454 pci_Log_Error("PciCfgStore Irq Failed!",
455 Bus, SubBus, AgentId, HvRc);
457 ++DeviceCount;
458 node = build_device_node(Bus, SubBus, EADsIdSel, Function);
459 PCI_DN(node)->Irq = Irq;
460 PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber;
462 } /* for (Function = 0; Function < 8; ++Function) */
463 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
464 return HvRc;
468 * I/0 Memory copy MUST use mmio commands on iSeries
469 * To do; For performance, include the hv call directly
471 void iSeries_memset_io(volatile void __iomem *dest, char c, size_t Count)
473 u8 ByteValue = c;
474 long NumberOfBytes = Count;
476 while (NumberOfBytes > 0) {
477 iSeries_Write_Byte(ByteValue, dest++);
478 -- NumberOfBytes;
481 EXPORT_SYMBOL(iSeries_memset_io);
483 void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t count)
485 char *src = source;
486 long NumberOfBytes = count;
488 while (NumberOfBytes > 0) {
489 iSeries_Write_Byte(*src++, dest++);
490 -- NumberOfBytes;
493 EXPORT_SYMBOL(iSeries_memcpy_toio);
495 void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *src, size_t count)
497 char *dst = dest;
498 long NumberOfBytes = count;
500 while (NumberOfBytes > 0) {
501 *dst++ = iSeries_Read_Byte(src++);
502 -- NumberOfBytes;
505 EXPORT_SYMBOL(iSeries_memcpy_fromio);
508 * Look down the chain to find the matching Device Device
510 static struct device_node *find_Device_Node(int bus, int devfn)
512 struct pci_dn *pdn;
514 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
515 if ((bus == pdn->busno) && (devfn == pdn->devfn))
516 return pdn->node;
518 return NULL;
521 #if 0
523 * Returns the device node for the passed pci_dev
524 * Sanity Check Node PciDev to passed pci_dev
525 * If none is found, returns a NULL which the client must handle.
527 static struct device_node *get_Device_Node(struct pci_dev *pdev)
529 struct device_node *node;
531 node = pdev->sysdata;
532 if (node == NULL || PCI_DN(node)->pcidev != pdev)
533 node = find_Device_Node(pdev->bus->number, pdev->devfn);
534 return node;
536 #endif
539 * Config space read and write functions.
540 * For now at least, we look for the device node for the bus and devfn
541 * that we are asked to access. It may be possible to translate the devfn
542 * to a subbus and deviceid more directly.
544 static u64 hv_cfg_read_func[4] = {
545 HvCallPciConfigLoad8, HvCallPciConfigLoad16,
546 HvCallPciConfigLoad32, HvCallPciConfigLoad32
549 static u64 hv_cfg_write_func[4] = {
550 HvCallPciConfigStore8, HvCallPciConfigStore16,
551 HvCallPciConfigStore32, HvCallPciConfigStore32
555 * Read PCI config space
557 static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
558 int offset, int size, u32 *val)
560 struct device_node *node = find_Device_Node(bus->number, devfn);
561 u64 fn;
562 struct HvCallPci_LoadReturn ret;
564 if (node == NULL)
565 return PCIBIOS_DEVICE_NOT_FOUND;
566 if (offset > 255) {
567 *val = ~0;
568 return PCIBIOS_BAD_REGISTER_NUMBER;
571 fn = hv_cfg_read_func[(size - 1) & 3];
572 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
574 if (ret.rc != 0) {
575 *val = ~0;
576 return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
579 *val = ret.value;
580 return 0;
584 * Write PCI config space
587 static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
588 int offset, int size, u32 val)
590 struct device_node *node = find_Device_Node(bus->number, devfn);
591 u64 fn;
592 u64 ret;
594 if (node == NULL)
595 return PCIBIOS_DEVICE_NOT_FOUND;
596 if (offset > 255)
597 return PCIBIOS_BAD_REGISTER_NUMBER;
599 fn = hv_cfg_write_func[(size - 1) & 3];
600 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
602 if (ret != 0)
603 return PCIBIOS_DEVICE_NOT_FOUND;
605 return 0;
608 static struct pci_ops iSeries_pci_ops = {
609 .read = iSeries_pci_read_config,
610 .write = iSeries_pci_write_config
614 * Check Return Code
615 * -> On Failure, print and log information.
616 * Increment Retry Count, if exceeds max, panic partition.
618 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
619 * PCI: Device 23.90 ReadL Retry( 1)
620 * PCI: Device 23.90 ReadL Retry Successful(1)
622 static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
623 int *retry, u64 ret)
625 if (ret != 0) {
626 struct pci_dn *pdn = PCI_DN(DevNode);
628 ++Pci_Error_Count;
629 (*retry)++;
630 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
631 TextHdr, pdn->busno, pdn->devfn,
632 *retry, (int)ret);
634 * Bump the retry and check for retry count exceeded.
635 * If, Exceeded, panic the system.
637 if (((*retry) > Pci_Retry_Max) &&
638 (Pci_Error_Flag > 0)) {
639 mf_display_src(0xB6000103);
640 panic_timeout = 0;
641 panic("PCI: Hardware I/O Error, SRC B6000103, "
642 "Automatic Reboot Disabled.\n");
644 return -1; /* Retry Try */
646 return 0;
650 * Translate the I/O Address into a device node, bar, and bar offset.
651 * Note: Make sure the passed variable end up on the stack to avoid
652 * the exposure of being device global.
654 static inline struct device_node *xlate_iomm_address(
655 const volatile void __iomem *IoAddress,
656 u64 *dsaptr, u64 *BarOffsetPtr)
658 unsigned long OrigIoAddr;
659 unsigned long BaseIoAddr;
660 unsigned long TableIndex;
661 struct device_node *DevNode;
663 OrigIoAddr = (unsigned long __force)IoAddress;
664 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
665 return NULL;
666 BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY;
667 TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE;
668 DevNode = iomm_table[TableIndex];
670 if (DevNode != NULL) {
671 int barnum = iobar_table[TableIndex];
672 *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
673 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
674 } else
675 panic("PCI: Invalid PCI IoAddress detected!\n");
676 return DevNode;
680 * Read MM I/O Instructions for the iSeries
681 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
682 * else, data is returned in big Endian format.
684 * iSeries_Read_Byte = Read Byte ( 8 bit)
685 * iSeries_Read_Word = Read Word (16 bit)
686 * iSeries_Read_Long = Read Long (32 bit)
688 u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
690 u64 BarOffset;
691 u64 dsa;
692 int retry = 0;
693 struct HvCallPci_LoadReturn ret;
694 struct device_node *DevNode =
695 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
697 if (DevNode == NULL) {
698 static unsigned long last_jiffies;
699 static int num_printed;
701 if ((jiffies - last_jiffies) > 60 * HZ) {
702 last_jiffies = jiffies;
703 num_printed = 0;
705 if (num_printed++ < 10)
706 printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
707 return 0xff;
709 do {
710 ++Pci_Io_Read_Count;
711 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
712 } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
714 return (u8)ret.value;
716 EXPORT_SYMBOL(iSeries_Read_Byte);
718 u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
720 u64 BarOffset;
721 u64 dsa;
722 int retry = 0;
723 struct HvCallPci_LoadReturn ret;
724 struct device_node *DevNode =
725 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
727 if (DevNode == NULL) {
728 static unsigned long last_jiffies;
729 static int num_printed;
731 if ((jiffies - last_jiffies) > 60 * HZ) {
732 last_jiffies = jiffies;
733 num_printed = 0;
735 if (num_printed++ < 10)
736 printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
737 return 0xffff;
739 do {
740 ++Pci_Io_Read_Count;
741 HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
742 BarOffset, 0);
743 } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
745 return swab16((u16)ret.value);
747 EXPORT_SYMBOL(iSeries_Read_Word);
749 u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
751 u64 BarOffset;
752 u64 dsa;
753 int retry = 0;
754 struct HvCallPci_LoadReturn ret;
755 struct device_node *DevNode =
756 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
758 if (DevNode == NULL) {
759 static unsigned long last_jiffies;
760 static int num_printed;
762 if ((jiffies - last_jiffies) > 60 * HZ) {
763 last_jiffies = jiffies;
764 num_printed = 0;
766 if (num_printed++ < 10)
767 printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
768 return 0xffffffff;
770 do {
771 ++Pci_Io_Read_Count;
772 HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
773 BarOffset, 0);
774 } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
776 return swab32((u32)ret.value);
778 EXPORT_SYMBOL(iSeries_Read_Long);
781 * Write MM I/O Instructions for the iSeries
783 * iSeries_Write_Byte = Write Byte (8 bit)
784 * iSeries_Write_Word = Write Word(16 bit)
785 * iSeries_Write_Long = Write Long(32 bit)
787 void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
789 u64 BarOffset;
790 u64 dsa;
791 int retry = 0;
792 u64 rc;
793 struct device_node *DevNode =
794 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
796 if (DevNode == NULL) {
797 static unsigned long last_jiffies;
798 static int num_printed;
800 if ((jiffies - last_jiffies) > 60 * HZ) {
801 last_jiffies = jiffies;
802 num_printed = 0;
804 if (num_printed++ < 10)
805 printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress);
806 return;
808 do {
809 ++Pci_Io_Write_Count;
810 rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
811 } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
813 EXPORT_SYMBOL(iSeries_Write_Byte);
815 void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
817 u64 BarOffset;
818 u64 dsa;
819 int retry = 0;
820 u64 rc;
821 struct device_node *DevNode =
822 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
824 if (DevNode == NULL) {
825 static unsigned long last_jiffies;
826 static int num_printed;
828 if ((jiffies - last_jiffies) > 60 * HZ) {
829 last_jiffies = jiffies;
830 num_printed = 0;
832 if (num_printed++ < 10)
833 printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
834 return;
836 do {
837 ++Pci_Io_Write_Count;
838 rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
839 } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
841 EXPORT_SYMBOL(iSeries_Write_Word);
843 void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
845 u64 BarOffset;
846 u64 dsa;
847 int retry = 0;
848 u64 rc;
849 struct device_node *DevNode =
850 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
852 if (DevNode == NULL) {
853 static unsigned long last_jiffies;
854 static int num_printed;
856 if ((jiffies - last_jiffies) > 60 * HZ) {
857 last_jiffies = jiffies;
858 num_printed = 0;
860 if (num_printed++ < 10)
861 printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
862 return;
864 do {
865 ++Pci_Io_Write_Count;
866 rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
867 } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
869 EXPORT_SYMBOL(iSeries_Write_Long);