1 // SPDX-License-Identifier: GPL-2.0
3 * ioport.c: Simple io mapping allocator.
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
11 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
13 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
14 * pointer into the big page mapping
15 * <rth> zait: so what?
16 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
18 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
20 * <zaitcev> Now, driver calls pci_free_consistent(with result of
22 * <zaitcev> How do you find the address to pass to free_pages()?
23 * <rth> zait: walk the page tables? It's only two or three level after all.
24 * <rth> zait: you have to walk them anyway to remove the mapping.
26 * <zaitcev> Sounds reasonable
29 #include <linux/module.h>
30 #include <linux/sched.h>
31 #include <linux/kernel.h>
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/ioport.h>
36 #include <linux/slab.h>
37 #include <linux/pci.h> /* struct pci_dev */
38 #include <linux/proc_fs.h>
39 #include <linux/seq_file.h>
40 #include <linux/scatterlist.h>
41 #include <linux/dma-map-ops.h>
45 #include <asm/vaddrs.h>
46 #include <asm/oplib.h>
49 #include <asm/pgalloc.h>
51 #include <asm/iommu.h>
52 #include <asm/io-unit.h>
55 static void __iomem
*_sparc_ioremap(struct resource
*res
, u32 bus
, u32 pa
, int sz
);
56 static void __iomem
*_sparc_alloc_io(unsigned int busno
, unsigned long phys
,
57 unsigned long size
, char *name
);
58 static void _sparc_free_io(struct resource
*res
);
60 static void register_proc_sparc_ioport(void);
62 /* This points to the next to use virtual memory for DVMA mappings */
63 static struct resource _sparc_dvma
= {
64 .name
= "sparc_dvma", .start
= DVMA_VADDR
, .end
= DVMA_END
- 1
66 /* This points to the start of I/O mappings, cluable from outside. */
67 /*ext*/ struct resource sparc_iomap
= {
68 .name
= "sparc_iomap", .start
= IOBASE_VADDR
, .end
= IOBASE_END
- 1
72 * Our mini-allocator...
73 * Boy this is gross! We need it because we must map I/O for
74 * timers and interrupt controller before the kmalloc is available.
78 #define XNRES 10 /* SS-10 uses 8 */
81 struct resource xres
; /* Must be first */
82 int xflag
; /* 1 == used */
86 static struct xresource xresv
[XNRES
];
88 static struct xresource
*xres_alloc(void) {
89 struct xresource
*xrp
;
93 for (n
= 0; n
< XNRES
; n
++) {
94 if (xrp
->xflag
== 0) {
103 static void xres_free(struct xresource
*xrp
) {
108 * These are typically used in PCI drivers
109 * which are trying to be cross-platform.
111 * Bus type is always zero on IIep.
113 void __iomem
*ioremap(phys_addr_t offset
, size_t size
)
117 sprintf(name
, "phys_%08x", (u32
)offset
);
118 return _sparc_alloc_io(0, (unsigned long)offset
, size
, name
);
120 EXPORT_SYMBOL(ioremap
);
123 * Complementary to ioremap().
125 void iounmap(volatile void __iomem
*virtual)
127 unsigned long vaddr
= (unsigned long) virtual & PAGE_MASK
;
128 struct resource
*res
;
131 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
132 * This probably warrants some sort of hashing.
134 if ((res
= lookup_resource(&sparc_iomap
, vaddr
)) == NULL
) {
135 printk("free_io/iounmap: cannot free %lx\n", vaddr
);
140 if ((char *)res
>= (char*)xresv
&& (char *)res
< (char *)&xresv
[XNRES
]) {
141 xres_free((struct xresource
*)res
);
146 EXPORT_SYMBOL(iounmap
);
148 void __iomem
*of_ioremap(struct resource
*res
, unsigned long offset
,
149 unsigned long size
, char *name
)
151 return _sparc_alloc_io(res
->flags
& 0xF,
155 EXPORT_SYMBOL(of_ioremap
);
157 void of_iounmap(struct resource
*res
, void __iomem
*base
, unsigned long size
)
161 EXPORT_SYMBOL(of_iounmap
);
166 static void __iomem
*_sparc_alloc_io(unsigned int busno
, unsigned long phys
,
167 unsigned long size
, char *name
)
169 static int printed_full
;
170 struct xresource
*xres
;
171 struct resource
*res
;
174 void __iomem
*va
; /* P3 diag */
176 if (name
== NULL
) name
= "???";
178 if ((xres
= xres_alloc()) != NULL
) {
183 printk("ioremap: done with statics, switching to malloc\n");
187 tack
= kmalloc(sizeof (struct resource
) + tlen
+ 1, GFP_KERNEL
);
188 if (tack
== NULL
) return NULL
;
189 memset(tack
, 0, sizeof(struct resource
));
190 res
= (struct resource
*) tack
;
191 tack
+= sizeof (struct resource
);
194 strscpy(tack
, name
, XNMLN
+1);
197 va
= _sparc_ioremap(res
, busno
, phys
, size
);
198 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
204 static void __iomem
*
205 _sparc_ioremap(struct resource
*res
, u32 bus
, u32 pa
, int sz
)
207 unsigned long offset
= ((unsigned long) pa
) & (~PAGE_MASK
);
209 if (allocate_resource(&sparc_iomap
, res
,
210 (offset
+ sz
+ PAGE_SIZE
-1) & PAGE_MASK
,
211 sparc_iomap
.start
, sparc_iomap
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
212 /* Usually we cannot see printks in this case. */
213 prom_printf("alloc_io_res(%s): cannot occupy\n",
214 (res
->name
!= NULL
)? res
->name
: "???");
219 srmmu_mapiorange(bus
, pa
, res
->start
, resource_size(res
));
221 return (void __iomem
*)(unsigned long)(res
->start
+ offset
);
225 * Complementary to _sparc_ioremap().
227 static void _sparc_free_io(struct resource
*res
)
231 plen
= resource_size(res
);
232 BUG_ON((plen
& (PAGE_SIZE
-1)) != 0);
233 srmmu_unmapiorange(res
->start
, plen
);
234 release_resource(res
);
237 unsigned long sparc_dma_alloc_resource(struct device
*dev
, size_t len
)
239 struct resource
*res
;
241 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
244 res
->name
= dev
->of_node
->full_name
;
246 if (allocate_resource(&_sparc_dvma
, res
, len
, _sparc_dvma
.start
,
247 _sparc_dvma
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
248 printk("%s: cannot occupy 0x%zx", __func__
, len
);
256 bool sparc_dma_free_resource(void *cpu_addr
, size_t size
)
258 unsigned long addr
= (unsigned long)cpu_addr
;
259 struct resource
*res
;
261 res
= lookup_resource(&_sparc_dvma
, addr
);
263 printk("%s: cannot free %p\n", __func__
, cpu_addr
);
267 if ((addr
& (PAGE_SIZE
- 1)) != 0) {
268 printk("%s: unaligned va %p\n", __func__
, cpu_addr
);
272 size
= PAGE_ALIGN(size
);
273 if (resource_size(res
) != size
) {
274 printk("%s: region 0x%lx asked 0x%zx\n",
275 __func__
, (long)resource_size(res
), size
);
279 release_resource(res
);
286 void sbus_set_sbus64(struct device
*dev
, int x
)
288 printk("sbus_set_sbus64: unsupported\n");
290 EXPORT_SYMBOL(sbus_set_sbus64
);
292 static int __init
sparc_register_ioport(void)
294 register_proc_sparc_ioport();
299 arch_initcall(sparc_register_ioport
);
301 #endif /* CONFIG_SBUS */
304 * IIep is write-through, not flushing on cpu to device transfer.
306 * On LEON systems without cache snooping, the entire D-CACHE must be flushed to
307 * make DMA to cacheable memory coherent.
309 void arch_sync_dma_for_cpu(phys_addr_t paddr
, size_t size
,
310 enum dma_data_direction dir
)
312 if (dir
!= DMA_TO_DEVICE
&&
313 sparc_cpu_model
== sparc_leon
&&
314 !sparc_leon3_snooping_enabled())
315 leon_flush_dcache_all();
318 #ifdef CONFIG_PROC_FS
320 static int sparc_io_proc_show(struct seq_file
*m
, void *v
)
322 struct resource
*root
= m
->private, *r
;
325 for (r
= root
->child
; r
!= NULL
; r
= r
->sibling
) {
326 if ((nm
= r
->name
) == NULL
) nm
= "???";
327 seq_printf(m
, "%016llx-%016llx: %s\n",
328 (unsigned long long)r
->start
,
329 (unsigned long long)r
->end
, nm
);
334 #endif /* CONFIG_PROC_FS */
336 static void register_proc_sparc_ioport(void)
338 #ifdef CONFIG_PROC_FS
339 proc_create_single_data("io_map", 0, NULL
, sparc_io_proc_show
,
341 proc_create_single_data("dvma_map", 0, NULL
, sparc_io_proc_show
,