1 // SPDX-License-Identifier: GPL-2.0
3 * ioport.c: Simple io mapping allocator.
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
11 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
13 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
14 * pointer into the big page mapping
15 * <rth> zait: so what?
16 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
18 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
20 * <zaitcev> Now, driver calls pci_free_consistent(with result of
22 * <zaitcev> How do you find the address to pass to free_pages()?
23 * <rth> zait: walk the page tables? It's only two or three level after all.
24 * <rth> zait: you have to walk them anyway to remove the mapping.
26 * <zaitcev> Sounds reasonable
29 #include <linux/module.h>
30 #include <linux/sched.h>
31 #include <linux/kernel.h>
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/ioport.h>
36 #include <linux/slab.h>
37 #include <linux/pci.h> /* struct pci_dev */
38 #include <linux/proc_fs.h>
39 #include <linux/seq_file.h>
40 #include <linux/scatterlist.h>
41 #include <linux/dma-noncoherent.h>
42 #include <linux/of_device.h>
45 #include <asm/vaddrs.h>
46 #include <asm/oplib.h>
49 #include <asm/pgalloc.h>
51 #include <asm/iommu.h>
52 #include <asm/io-unit.h>
55 /* This function must make sure that caches and memory are coherent after DMA
56 * On LEON systems without cache snooping it flushes the entire D-CACHE.
58 static inline void dma_make_coherent(unsigned long pa
, unsigned long len
)
60 if (sparc_cpu_model
== sparc_leon
) {
61 if (!sparc_leon3_snooping_enabled())
62 leon_flush_dcache_all();
66 static void __iomem
*_sparc_ioremap(struct resource
*res
, u32 bus
, u32 pa
, int sz
);
67 static void __iomem
*_sparc_alloc_io(unsigned int busno
, unsigned long phys
,
68 unsigned long size
, char *name
);
69 static void _sparc_free_io(struct resource
*res
);
71 static void register_proc_sparc_ioport(void);
73 /* This points to the next to use virtual memory for DVMA mappings */
74 static struct resource _sparc_dvma
= {
75 .name
= "sparc_dvma", .start
= DVMA_VADDR
, .end
= DVMA_END
- 1
77 /* This points to the start of I/O mappings, cluable from outside. */
78 /*ext*/ struct resource sparc_iomap
= {
79 .name
= "sparc_iomap", .start
= IOBASE_VADDR
, .end
= IOBASE_END
- 1
83 * Our mini-allocator...
84 * Boy this is gross! We need it because we must map I/O for
85 * timers and interrupt controller before the kmalloc is available.
89 #define XNRES 10 /* SS-10 uses 8 */
92 struct resource xres
; /* Must be first */
93 int xflag
; /* 1 == used */
97 static struct xresource xresv
[XNRES
];
99 static struct xresource
*xres_alloc(void) {
100 struct xresource
*xrp
;
104 for (n
= 0; n
< XNRES
; n
++) {
105 if (xrp
->xflag
== 0) {
114 static void xres_free(struct xresource
*xrp
) {
119 * These are typically used in PCI drivers
120 * which are trying to be cross-platform.
122 * Bus type is always zero on IIep.
124 void __iomem
*ioremap(phys_addr_t offset
, size_t size
)
128 sprintf(name
, "phys_%08x", (u32
)offset
);
129 return _sparc_alloc_io(0, (unsigned long)offset
, size
, name
);
131 EXPORT_SYMBOL(ioremap
);
134 * Complementary to ioremap().
136 void iounmap(volatile void __iomem
*virtual)
138 unsigned long vaddr
= (unsigned long) virtual & PAGE_MASK
;
139 struct resource
*res
;
142 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
143 * This probably warrants some sort of hashing.
145 if ((res
= lookup_resource(&sparc_iomap
, vaddr
)) == NULL
) {
146 printk("free_io/iounmap: cannot free %lx\n", vaddr
);
151 if ((char *)res
>= (char*)xresv
&& (char *)res
< (char *)&xresv
[XNRES
]) {
152 xres_free((struct xresource
*)res
);
157 EXPORT_SYMBOL(iounmap
);
159 void __iomem
*of_ioremap(struct resource
*res
, unsigned long offset
,
160 unsigned long size
, char *name
)
162 return _sparc_alloc_io(res
->flags
& 0xF,
166 EXPORT_SYMBOL(of_ioremap
);
168 void of_iounmap(struct resource
*res
, void __iomem
*base
, unsigned long size
)
172 EXPORT_SYMBOL(of_iounmap
);
177 static void __iomem
*_sparc_alloc_io(unsigned int busno
, unsigned long phys
,
178 unsigned long size
, char *name
)
180 static int printed_full
;
181 struct xresource
*xres
;
182 struct resource
*res
;
185 void __iomem
*va
; /* P3 diag */
187 if (name
== NULL
) name
= "???";
189 if ((xres
= xres_alloc()) != NULL
) {
194 printk("ioremap: done with statics, switching to malloc\n");
198 tack
= kmalloc(sizeof (struct resource
) + tlen
+ 1, GFP_KERNEL
);
199 if (tack
== NULL
) return NULL
;
200 memset(tack
, 0, sizeof(struct resource
));
201 res
= (struct resource
*) tack
;
202 tack
+= sizeof (struct resource
);
205 strlcpy(tack
, name
, XNMLN
+1);
208 va
= _sparc_ioremap(res
, busno
, phys
, size
);
209 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
215 static void __iomem
*
216 _sparc_ioremap(struct resource
*res
, u32 bus
, u32 pa
, int sz
)
218 unsigned long offset
= ((unsigned long) pa
) & (~PAGE_MASK
);
220 if (allocate_resource(&sparc_iomap
, res
,
221 (offset
+ sz
+ PAGE_SIZE
-1) & PAGE_MASK
,
222 sparc_iomap
.start
, sparc_iomap
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
223 /* Usually we cannot see printks in this case. */
224 prom_printf("alloc_io_res(%s): cannot occupy\n",
225 (res
->name
!= NULL
)? res
->name
: "???");
230 srmmu_mapiorange(bus
, pa
, res
->start
, resource_size(res
));
232 return (void __iomem
*)(unsigned long)(res
->start
+ offset
);
236 * Complementary to _sparc_ioremap().
238 static void _sparc_free_io(struct resource
*res
)
242 plen
= resource_size(res
);
243 BUG_ON((plen
& (PAGE_SIZE
-1)) != 0);
244 srmmu_unmapiorange(res
->start
, plen
);
245 release_resource(res
);
248 unsigned long sparc_dma_alloc_resource(struct device
*dev
, size_t len
)
250 struct resource
*res
;
252 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
255 res
->name
= dev
->of_node
->full_name
;
257 if (allocate_resource(&_sparc_dvma
, res
, len
, _sparc_dvma
.start
,
258 _sparc_dvma
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
259 printk("%s: cannot occupy 0x%zx", __func__
, len
);
267 bool sparc_dma_free_resource(void *cpu_addr
, size_t size
)
269 unsigned long addr
= (unsigned long)cpu_addr
;
270 struct resource
*res
;
272 res
= lookup_resource(&_sparc_dvma
, addr
);
274 printk("%s: cannot free %p\n", __func__
, cpu_addr
);
278 if ((addr
& (PAGE_SIZE
- 1)) != 0) {
279 printk("%s: unaligned va %p\n", __func__
, cpu_addr
);
283 size
= PAGE_ALIGN(size
);
284 if (resource_size(res
) != size
) {
285 printk("%s: region 0x%lx asked 0x%zx\n",
286 __func__
, (long)resource_size(res
), size
);
290 release_resource(res
);
297 void sbus_set_sbus64(struct device
*dev
, int x
)
299 printk("sbus_set_sbus64: unsupported\n");
301 EXPORT_SYMBOL(sbus_set_sbus64
);
303 static int __init
sparc_register_ioport(void)
305 register_proc_sparc_ioport();
310 arch_initcall(sparc_register_ioport
);
312 #endif /* CONFIG_SBUS */
315 /* Allocate and map kernel buffer using consistent mode DMA for a device.
316 * hwdev should be valid struct pci_dev pointer for PCI devices.
318 void *arch_dma_alloc(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
319 gfp_t gfp
, unsigned long attrs
)
324 if (!size
|| size
> 256 * 1024) /* __get_free_pages() limit */
327 size
= PAGE_ALIGN(size
);
328 va
= (void *) __get_free_pages(gfp
| __GFP_ZERO
, get_order(size
));
330 printk("%s: no %zd pages\n", __func__
, size
>> PAGE_SHIFT
);
334 addr
= sparc_dma_alloc_resource(dev
, size
);
338 srmmu_mapiorange(0, virt_to_phys(va
), addr
, size
);
340 *dma_handle
= virt_to_phys(va
);
344 free_pages((unsigned long)va
, get_order(size
));
348 /* Free and unmap a consistent DMA buffer.
349 * cpu_addr is what was returned arch_dma_alloc, size must be the same as what
350 * was passed into arch_dma_alloc, and likewise dma_addr must be the same as
351 * what *dma_ndler was set to.
353 * References to the memory and mappings associated with cpu_addr/dma_addr
354 * past this call are illegal.
356 void arch_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
357 dma_addr_t dma_addr
, unsigned long attrs
)
359 if (!sparc_dma_free_resource(cpu_addr
, PAGE_ALIGN(size
)))
362 dma_make_coherent(dma_addr
, size
);
363 srmmu_unmapiorange((unsigned long)cpu_addr
, size
);
364 free_pages((unsigned long)phys_to_virt(dma_addr
), get_order(size
));
367 /* IIep is write-through, not flushing on cpu to device transfer. */
369 void arch_sync_dma_for_cpu(phys_addr_t paddr
, size_t size
,
370 enum dma_data_direction dir
)
372 if (dir
!= PCI_DMA_TODEVICE
)
373 dma_make_coherent(paddr
, PAGE_ALIGN(size
));
376 #ifdef CONFIG_PROC_FS
378 static int sparc_io_proc_show(struct seq_file
*m
, void *v
)
380 struct resource
*root
= m
->private, *r
;
383 for (r
= root
->child
; r
!= NULL
; r
= r
->sibling
) {
384 if ((nm
= r
->name
) == NULL
) nm
= "???";
385 seq_printf(m
, "%016llx-%016llx: %s\n",
386 (unsigned long long)r
->start
,
387 (unsigned long long)r
->end
, nm
);
392 #endif /* CONFIG_PROC_FS */
394 static void register_proc_sparc_ioport(void)
396 #ifdef CONFIG_PROC_FS
397 proc_create_single_data("io_map", 0, NULL
, sparc_io_proc_show
,
399 proc_create_single_data("dvma_map", 0, NULL
, sparc_io_proc_show
,