printf: Remove unused 'bprintf'
[drm/drm-misc.git] / include / linux / io-mapping.h
blob7376c1df9c9017a644f7976d2095114a636c57f4
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright © 2008 Keith Packard <keithp@keithp.com>
4 */
6 #ifndef _LINUX_IO_MAPPING_H
7 #define _LINUX_IO_MAPPING_H
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bug.h>
12 #include <linux/io.h>
13 #include <linux/pgtable.h>
14 #include <asm/page.h>
17 * The io_mapping mechanism provides an abstraction for mapping
18 * individual pages from an io device to the CPU in an efficient fashion.
20 * See Documentation/driver-api/io-mapping.rst
23 struct io_mapping {
24 resource_size_t base;
25 unsigned long size;
26 pgprot_t prot;
27 void __iomem *iomem;
30 #ifdef CONFIG_HAVE_ATOMIC_IOMAP
32 #include <linux/pfn.h>
33 #include <asm/iomap.h>
35 * For small address space machines, mapping large objects
36 * into the kernel virtual space isn't practical. Where
37 * available, use fixmap support to dynamically map pages
38 * of the object at run time.
41 static inline struct io_mapping *
42 io_mapping_init_wc(struct io_mapping *iomap,
43 resource_size_t base,
44 unsigned long size)
46 pgprot_t prot;
48 if (iomap_create_wc(base, size, &prot))
49 return NULL;
51 iomap->base = base;
52 iomap->size = size;
53 iomap->prot = prot;
54 return iomap;
57 static inline void
58 io_mapping_fini(struct io_mapping *mapping)
60 iomap_free(mapping->base, mapping->size);
63 /* Atomic map/unmap */
64 static inline void __iomem *
65 io_mapping_map_atomic_wc(struct io_mapping *mapping,
66 unsigned long offset)
68 resource_size_t phys_addr;
70 BUG_ON(offset >= mapping->size);
71 phys_addr = mapping->base + offset;
72 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
73 preempt_disable();
74 else
75 migrate_disable();
76 pagefault_disable();
77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
80 static inline void
81 io_mapping_unmap_atomic(void __iomem *vaddr)
83 kunmap_local_indexed((void __force *)vaddr);
84 pagefault_enable();
85 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
86 preempt_enable();
87 else
88 migrate_enable();
91 static inline void __iomem *
92 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
94 resource_size_t phys_addr;
96 BUG_ON(offset >= mapping->size);
97 phys_addr = mapping->base + offset;
98 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
101 static inline void io_mapping_unmap_local(void __iomem *vaddr)
103 kunmap_local_indexed((void __force *)vaddr);
106 static inline void __iomem *
107 io_mapping_map_wc(struct io_mapping *mapping,
108 unsigned long offset,
109 unsigned long size)
111 resource_size_t phys_addr;
113 BUG_ON(offset >= mapping->size);
114 phys_addr = mapping->base + offset;
116 return ioremap_wc(phys_addr, size);
119 static inline void
120 io_mapping_unmap(void __iomem *vaddr)
122 iounmap(vaddr);
125 #else /* HAVE_ATOMIC_IOMAP */
127 #include <linux/uaccess.h>
129 /* Create the io_mapping object*/
130 static inline struct io_mapping *
131 io_mapping_init_wc(struct io_mapping *iomap,
132 resource_size_t base,
133 unsigned long size)
135 iomap->iomem = ioremap_wc(base, size);
136 if (!iomap->iomem)
137 return NULL;
139 iomap->base = base;
140 iomap->size = size;
141 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
143 return iomap;
146 static inline void
147 io_mapping_fini(struct io_mapping *mapping)
149 iounmap(mapping->iomem);
152 /* Non-atomic map/unmap */
153 static inline void __iomem *
154 io_mapping_map_wc(struct io_mapping *mapping,
155 unsigned long offset,
156 unsigned long size)
158 return mapping->iomem + offset;
161 static inline void
162 io_mapping_unmap(void __iomem *vaddr)
166 /* Atomic map/unmap */
167 static inline void __iomem *
168 io_mapping_map_atomic_wc(struct io_mapping *mapping,
169 unsigned long offset)
171 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
172 preempt_disable();
173 else
174 migrate_disable();
175 pagefault_disable();
176 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
179 static inline void
180 io_mapping_unmap_atomic(void __iomem *vaddr)
182 io_mapping_unmap(vaddr);
183 pagefault_enable();
184 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
185 preempt_enable();
186 else
187 migrate_enable();
190 static inline void __iomem *
191 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
193 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
196 static inline void io_mapping_unmap_local(void __iomem *vaddr)
198 io_mapping_unmap(vaddr);
201 #endif /* !HAVE_ATOMIC_IOMAP */
203 static inline struct io_mapping *
204 io_mapping_create_wc(resource_size_t base,
205 unsigned long size)
207 struct io_mapping *iomap;
209 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
210 if (!iomap)
211 return NULL;
213 if (!io_mapping_init_wc(iomap, base, size)) {
214 kfree(iomap);
215 return NULL;
218 return iomap;
221 static inline void
222 io_mapping_free(struct io_mapping *iomap)
224 io_mapping_fini(iomap);
225 kfree(iomap);
228 int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
229 unsigned long addr, unsigned long pfn, unsigned long size);
231 #endif /* _LINUX_IO_MAPPING_H */