[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / arch / s390 / mm / ioremap.c
blobc6c39d868bc87dc95d6c5c6fcc8d531dbc9a8c45
1 /*
2 * arch/s390/mm/ioremap.c
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
8 * Derived from "arch/i386/mm/extable.c"
9 * (C) Copyright 1995 1996 Linus Torvalds
11 * Re-map IO memory to kernel address space so that we can access it.
12 * This is needed for high PCI addresses that aren't mapped in the
13 * 640k-1MB IO memory area on PC's
16 #include <linux/vmalloc.h>
17 #include <linux/mm.h>
18 #include <asm/io.h>
19 #include <asm/pgalloc.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
24 unsigned long phys_addr, unsigned long flags)
26 unsigned long end;
27 unsigned long pfn;
29 address &= ~PMD_MASK;
30 end = address + size;
31 if (end > PMD_SIZE)
32 end = PMD_SIZE;
33 if (address >= end)
34 BUG();
35 pfn = phys_addr >> PAGE_SHIFT;
36 do {
37 if (!pte_none(*pte)) {
38 printk("remap_area_pte: page already exists\n");
39 BUG();
41 set_pte(pte, pfn_pte(pfn, __pgprot(flags)));
42 address += PAGE_SIZE;
43 pfn++;
44 pte++;
45 } while (address && (address < end));
48 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
49 unsigned long phys_addr, unsigned long flags)
51 unsigned long end;
53 address &= ~PGDIR_MASK;
54 end = address + size;
55 if (end > PGDIR_SIZE)
56 end = PGDIR_SIZE;
57 phys_addr -= address;
58 if (address >= end)
59 BUG();
60 do {
61 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
62 if (!pte)
63 return -ENOMEM;
64 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
65 address = (address + PMD_SIZE) & PMD_MASK;
66 pmd++;
67 } while (address && (address < end));
68 return 0;
71 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
72 unsigned long size, unsigned long flags)
74 int error;
75 pgd_t * dir;
76 unsigned long end = address + size;
78 phys_addr -= address;
79 dir = pgd_offset(&init_mm, address);
80 flush_cache_all();
81 if (address >= end)
82 BUG();
83 spin_lock(&init_mm.page_table_lock);
84 do {
85 pmd_t *pmd;
86 pmd = pmd_alloc(&init_mm, dir, address);
87 error = -ENOMEM;
88 if (!pmd)
89 break;
90 if (remap_area_pmd(pmd, address, end - address,
91 phys_addr + address, flags))
92 break;
93 error = 0;
94 address = (address + PGDIR_SIZE) & PGDIR_MASK;
95 dir++;
96 } while (address && (address < end));
97 spin_unlock(&init_mm.page_table_lock);
98 flush_tlb_all();
99 return 0;
103 * Generic mapping function (not visible outside):
107 * Remap an arbitrary physical address space into the kernel virtual
108 * address space. Needed when the kernel wants to access high addresses
109 * directly.
111 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
113 void * addr;
114 struct vm_struct * area;
116 if (phys_addr < virt_to_phys(high_memory))
117 return phys_to_virt(phys_addr);
118 if (phys_addr & ~PAGE_MASK)
119 return NULL;
120 size = PAGE_ALIGN(size);
121 if (!size || size > phys_addr + size)
122 return NULL;
123 area = get_vm_area(size, VM_IOREMAP);
124 if (!area)
125 return NULL;
126 addr = area->addr;
127 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
128 vfree(addr);
129 return NULL;
131 return addr;
134 void iounmap(void *addr)
136 if (addr > high_memory)
137 vfree(addr);