1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2008 Magnus Damm
7 * Intercept io operations by trapping.
9 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <asm/mmu_context.h>
16 #include <linux/uaccess.h>
18 #include <asm/io_trapped.h>
20 #define TRAPPED_PAGES_MAX 16
22 #ifdef CONFIG_HAS_IOPORT_MAP
23 LIST_HEAD(trapped_io
);
24 EXPORT_SYMBOL_GPL(trapped_io
);
26 #ifdef CONFIG_HAS_IOMEM
27 LIST_HEAD(trapped_mem
);
28 EXPORT_SYMBOL_GPL(trapped_mem
);
30 static DEFINE_SPINLOCK(trapped_lock
);
32 static int trapped_io_disable __read_mostly
;
34 static int __init
trapped_io_setup(char *__unused
)
36 trapped_io_disable
= 1;
39 __setup("noiotrap", trapped_io_setup
);
41 int register_trapped_io(struct trapped_io
*tiop
)
44 unsigned long len
= 0, flags
= 0;
45 struct page
*pages
[TRAPPED_PAGES_MAX
];
48 if (unlikely(trapped_io_disable
))
51 /* structure must be page aligned */
52 if ((unsigned long)tiop
& (PAGE_SIZE
- 1))
55 for (k
= 0; k
< tiop
->num_resources
; k
++) {
56 res
= tiop
->resource
+ k
;
57 len
+= roundup(resource_size(res
), PAGE_SIZE
);
61 /* support IORESOURCE_IO _or_ MEM, not both */
62 if (hweight_long(flags
) != 1)
65 n
= len
>> PAGE_SHIFT
;
67 if (n
>= TRAPPED_PAGES_MAX
)
70 for (k
= 0; k
< n
; k
++)
71 pages
[k
] = virt_to_page(tiop
);
73 tiop
->virt_base
= vmap(pages
, n
, VM_MAP
, PAGE_NONE
);
78 for (k
= 0; k
< tiop
->num_resources
; k
++) {
79 res
= tiop
->resource
+ k
;
80 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
81 (unsigned long)(tiop
->virt_base
+ len
),
82 res
->flags
& IORESOURCE_IO
? "io" : "mmio",
83 (unsigned long)res
->start
);
84 len
+= roundup(resource_size(res
), PAGE_SIZE
);
87 tiop
->magic
= IO_TRAPPED_MAGIC
;
88 INIT_LIST_HEAD(&tiop
->list
);
89 spin_lock_irq(&trapped_lock
);
90 #ifdef CONFIG_HAS_IOPORT_MAP
91 if (flags
& IORESOURCE_IO
)
92 list_add(&tiop
->list
, &trapped_io
);
94 #ifdef CONFIG_HAS_IOMEM
95 if (flags
& IORESOURCE_MEM
)
96 list_add(&tiop
->list
, &trapped_mem
);
98 spin_unlock_irq(&trapped_lock
);
102 pr_warn("unable to install trapped io filter\n");
106 void __iomem
*match_trapped_io_handler(struct list_head
*list
,
107 unsigned long offset
,
111 struct trapped_io
*tiop
;
112 struct resource
*res
;
116 spin_lock_irqsave(&trapped_lock
, flags
);
117 list_for_each_entry(tiop
, list
, list
) {
119 for (k
= 0; k
< tiop
->num_resources
; k
++) {
120 res
= tiop
->resource
+ k
;
121 if (res
->start
== offset
) {
122 spin_unlock_irqrestore(&trapped_lock
, flags
);
123 return tiop
->virt_base
+ voffs
;
126 len
= resource_size(res
);
127 voffs
+= roundup(len
, PAGE_SIZE
);
130 spin_unlock_irqrestore(&trapped_lock
, flags
);
134 static struct trapped_io
*lookup_tiop(unsigned long address
)
143 pgd_k
= swapper_pg_dir
+ pgd_index(address
);
144 if (!pgd_present(*pgd_k
))
147 p4d_k
= p4d_offset(pgd_k
, address
);
148 if (!p4d_present(*p4d_k
))
151 pud_k
= pud_offset(p4d_k
, address
);
152 if (!pud_present(*pud_k
))
155 pmd_k
= pmd_offset(pud_k
, address
);
156 if (!pmd_present(*pmd_k
))
159 pte_k
= pte_offset_kernel(pmd_k
, address
);
162 return pfn_to_kaddr(pte_pfn(entry
));
165 static unsigned long lookup_address(struct trapped_io
*tiop
,
166 unsigned long address
)
168 struct resource
*res
;
169 unsigned long vaddr
= (unsigned long)tiop
->virt_base
;
173 for (k
= 0; k
< tiop
->num_resources
; k
++) {
174 res
= tiop
->resource
+ k
;
175 len
= roundup(resource_size(res
), PAGE_SIZE
);
176 if (address
< (vaddr
+ len
))
177 return res
->start
+ (address
- vaddr
);
183 static unsigned long long copy_word(unsigned long src_addr
, int src_len
,
184 unsigned long dst_addr
, int dst_len
)
186 unsigned long long tmp
= 0;
190 tmp
= __raw_readb(src_addr
);
193 tmp
= __raw_readw(src_addr
);
196 tmp
= __raw_readl(src_addr
);
199 tmp
= __raw_readq(src_addr
);
205 __raw_writeb(tmp
, dst_addr
);
208 __raw_writew(tmp
, dst_addr
);
211 __raw_writel(tmp
, dst_addr
);
214 __raw_writeq(tmp
, dst_addr
);
221 static unsigned long from_device(void *dst
, const void *src
, unsigned long cnt
)
223 struct trapped_io
*tiop
;
224 unsigned long src_addr
= (unsigned long)src
;
225 unsigned long long tmp
;
227 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr
, cnt
);
228 tiop
= lookup_tiop(src_addr
);
229 WARN_ON(!tiop
|| (tiop
->magic
!= IO_TRAPPED_MAGIC
));
231 src_addr
= lookup_address(tiop
, src_addr
);
235 tmp
= copy_word(src_addr
,
236 max_t(unsigned long, cnt
,
237 (tiop
->minimum_bus_width
/ 8)),
238 (unsigned long)dst
, cnt
);
240 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr
, tmp
);
244 static unsigned long to_device(void *dst
, const void *src
, unsigned long cnt
)
246 struct trapped_io
*tiop
;
247 unsigned long dst_addr
= (unsigned long)dst
;
248 unsigned long long tmp
;
250 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr
, cnt
);
251 tiop
= lookup_tiop(dst_addr
);
252 WARN_ON(!tiop
|| (tiop
->magic
!= IO_TRAPPED_MAGIC
));
254 dst_addr
= lookup_address(tiop
, dst_addr
);
258 tmp
= copy_word((unsigned long)src
, cnt
,
259 dst_addr
, max_t(unsigned long, cnt
,
260 (tiop
->minimum_bus_width
/ 8)));
262 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr
, tmp
);
266 static struct mem_access trapped_io_access
= {
271 int handle_trapped_io(struct pt_regs
*regs
, unsigned long address
)
273 insn_size_t instruction
;
276 if (trapped_io_disable
)
278 if (!lookup_tiop(address
))
281 WARN_ON(user_mode(regs
));
283 if (copy_from_kernel_nofault(&instruction
, (void *)(regs
->pc
),
284 sizeof(instruction
))) {
288 tmp
= handle_unaligned_access(instruction
, regs
,
289 &trapped_io_access
, 1, address
);