4 * Copyright (C) 2008 Magnus Damm
6 * Intercept io operations by trapping.
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/vmalloc.h>
16 #include <linux/module.h>
17 #include <asm/system.h>
18 #include <asm/mmu_context.h>
19 #include <asm/uaccess.h>
21 #include <asm/io_trapped.h>
23 #define TRAPPED_PAGES_MAX 16
25 #ifdef CONFIG_HAS_IOPORT
26 LIST_HEAD(trapped_io
);
27 EXPORT_SYMBOL_GPL(trapped_io
);
29 #ifdef CONFIG_HAS_IOMEM
30 LIST_HEAD(trapped_mem
);
31 EXPORT_SYMBOL_GPL(trapped_mem
);
33 static DEFINE_SPINLOCK(trapped_lock
);
35 int __init
register_trapped_io(struct trapped_io
*tiop
)
38 unsigned long len
= 0, flags
= 0;
39 struct page
*pages
[TRAPPED_PAGES_MAX
];
42 /* structure must be page aligned */
43 if ((unsigned long)tiop
& (PAGE_SIZE
- 1))
46 for (k
= 0; k
< tiop
->num_resources
; k
++) {
47 res
= tiop
->resource
+ k
;
48 len
+= roundup((res
->end
- res
->start
) + 1, PAGE_SIZE
);
52 /* support IORESOURCE_IO _or_ MEM, not both */
53 if (hweight_long(flags
) != 1)
56 n
= len
>> PAGE_SHIFT
;
58 if (n
>= TRAPPED_PAGES_MAX
)
61 for (k
= 0; k
< n
; k
++)
62 pages
[k
] = virt_to_page(tiop
);
64 tiop
->virt_base
= vmap(pages
, n
, VM_MAP
, PAGE_NONE
);
69 for (k
= 0; k
< tiop
->num_resources
; k
++) {
70 res
= tiop
->resource
+ k
;
71 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
72 (unsigned long)(tiop
->virt_base
+ len
),
73 res
->flags
& IORESOURCE_IO
? "io" : "mmio",
74 (unsigned long)res
->start
);
75 len
+= roundup((res
->end
- res
->start
) + 1, PAGE_SIZE
);
78 tiop
->magic
= IO_TRAPPED_MAGIC
;
79 INIT_LIST_HEAD(&tiop
->list
);
80 spin_lock_irq(&trapped_lock
);
81 if (flags
& IORESOURCE_IO
)
82 list_add(&tiop
->list
, &trapped_io
);
83 if (flags
& IORESOURCE_MEM
)
84 list_add(&tiop
->list
, &trapped_mem
);
85 spin_unlock_irq(&trapped_lock
);
89 pr_warning("unable to install trapped io filter\n");
92 EXPORT_SYMBOL_GPL(register_trapped_io
);
94 void __iomem
*match_trapped_io_handler(struct list_head
*list
,
99 struct trapped_io
*tiop
;
100 struct resource
*res
;
103 spin_lock_irq(&trapped_lock
);
104 list_for_each_entry(tiop
, list
, list
) {
106 for (k
= 0; k
< tiop
->num_resources
; k
++) {
107 res
= tiop
->resource
+ k
;
108 if (res
->start
== offset
) {
109 spin_unlock_irq(&trapped_lock
);
110 return tiop
->virt_base
+ voffs
;
113 len
= (res
->end
- res
->start
) + 1;
114 voffs
+= roundup(len
, PAGE_SIZE
);
117 spin_unlock_irq(&trapped_lock
);
120 EXPORT_SYMBOL_GPL(match_trapped_io_handler
);
122 static struct trapped_io
*lookup_tiop(unsigned long address
)
130 pgd_k
= swapper_pg_dir
+ pgd_index(address
);
131 if (!pgd_present(*pgd_k
))
134 pud_k
= pud_offset(pgd_k
, address
);
135 if (!pud_present(*pud_k
))
138 pmd_k
= pmd_offset(pud_k
, address
);
139 if (!pmd_present(*pmd_k
))
142 pte_k
= pte_offset_kernel(pmd_k
, address
);
145 return pfn_to_kaddr(pte_pfn(entry
));
148 static unsigned long lookup_address(struct trapped_io
*tiop
,
149 unsigned long address
)
151 struct resource
*res
;
152 unsigned long vaddr
= (unsigned long)tiop
->virt_base
;
156 for (k
= 0; k
< tiop
->num_resources
; k
++) {
157 res
= tiop
->resource
+ k
;
158 len
= roundup((res
->end
- res
->start
) + 1, PAGE_SIZE
);
159 if (address
< (vaddr
+ len
))
160 return res
->start
+ (address
- vaddr
);
166 static unsigned long long copy_word(unsigned long src_addr
, int src_len
,
167 unsigned long dst_addr
, int dst_len
)
169 unsigned long long tmp
= 0;
173 tmp
= ctrl_inb(src_addr
);
176 tmp
= ctrl_inw(src_addr
);
179 tmp
= ctrl_inl(src_addr
);
182 tmp
= ctrl_inq(src_addr
);
188 ctrl_outb(tmp
, dst_addr
);
191 ctrl_outw(tmp
, dst_addr
);
194 ctrl_outl(tmp
, dst_addr
);
197 ctrl_outq(tmp
, dst_addr
);
204 static unsigned long from_device(void *dst
, const void *src
, unsigned long cnt
)
206 struct trapped_io
*tiop
;
207 unsigned long src_addr
= (unsigned long)src
;
208 unsigned long long tmp
;
210 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr
, cnt
);
211 tiop
= lookup_tiop(src_addr
);
212 WARN_ON(!tiop
|| (tiop
->magic
!= IO_TRAPPED_MAGIC
));
214 src_addr
= lookup_address(tiop
, src_addr
);
218 tmp
= copy_word(src_addr
,
219 max_t(unsigned long, cnt
,
220 (tiop
->minimum_bus_width
/ 8)),
221 (unsigned long)dst
, cnt
);
223 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr
, tmp
);
227 static unsigned long to_device(void *dst
, const void *src
, unsigned long cnt
)
229 struct trapped_io
*tiop
;
230 unsigned long dst_addr
= (unsigned long)dst
;
231 unsigned long long tmp
;
233 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr
, cnt
);
234 tiop
= lookup_tiop(dst_addr
);
235 WARN_ON(!tiop
|| (tiop
->magic
!= IO_TRAPPED_MAGIC
));
237 dst_addr
= lookup_address(tiop
, dst_addr
);
241 tmp
= copy_word((unsigned long)src
, cnt
,
242 dst_addr
, max_t(unsigned long, cnt
,
243 (tiop
->minimum_bus_width
/ 8)));
245 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr
, tmp
);
249 static struct mem_access trapped_io_access
= {
254 int handle_trapped_io(struct pt_regs
*regs
, unsigned long address
)
257 opcode_t instruction
;
260 if (!lookup_tiop(address
))
263 WARN_ON(user_mode(regs
));
267 if (copy_from_user(&instruction
, (void *)(regs
->pc
),
268 sizeof(instruction
))) {
273 tmp
= handle_unaligned_access(instruction
, regs
, &trapped_io_access
);