1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright © 2008 Keith Packard <keithp@keithp.com>
6 #ifndef _LINUX_IO_MAPPING_H
7 #define _LINUX_IO_MAPPING_H
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bug.h>
13 #include <linux/pgtable.h>
17 * The io_mapping mechanism provides an abstraction for mapping
18 * individual pages from an io device to the CPU in an efficient fashion.
20 * See Documentation/driver-api/io-mapping.rst
30 #ifdef CONFIG_HAVE_ATOMIC_IOMAP
32 #include <linux/pfn.h>
33 #include <asm/iomap.h>
35 * For small address space machines, mapping large objects
36 * into the kernel virtual space isn't practical. Where
37 * available, use fixmap support to dynamically map pages
38 * of the object at run time.
41 static inline struct io_mapping
*
42 io_mapping_init_wc(struct io_mapping
*iomap
,
48 if (iomap_create_wc(base
, size
, &prot
))
58 io_mapping_fini(struct io_mapping
*mapping
)
60 iomap_free(mapping
->base
, mapping
->size
);
63 /* Atomic map/unmap */
64 static inline void __iomem
*
65 io_mapping_map_atomic_wc(struct io_mapping
*mapping
,
68 resource_size_t phys_addr
;
70 BUG_ON(offset
>= mapping
->size
);
71 phys_addr
= mapping
->base
+ offset
;
72 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
77 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr
), mapping
->prot
);
81 io_mapping_unmap_atomic(void __iomem
*vaddr
)
83 kunmap_local_indexed((void __force
*)vaddr
);
85 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
91 static inline void __iomem
*
92 io_mapping_map_local_wc(struct io_mapping
*mapping
, unsigned long offset
)
94 resource_size_t phys_addr
;
96 BUG_ON(offset
>= mapping
->size
);
97 phys_addr
= mapping
->base
+ offset
;
98 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr
), mapping
->prot
);
101 static inline void io_mapping_unmap_local(void __iomem
*vaddr
)
103 kunmap_local_indexed((void __force
*)vaddr
);
106 static inline void __iomem
*
107 io_mapping_map_wc(struct io_mapping
*mapping
,
108 unsigned long offset
,
111 resource_size_t phys_addr
;
113 BUG_ON(offset
>= mapping
->size
);
114 phys_addr
= mapping
->base
+ offset
;
116 return ioremap_wc(phys_addr
, size
);
120 io_mapping_unmap(void __iomem
*vaddr
)
125 #else /* HAVE_ATOMIC_IOMAP */
127 #include <linux/uaccess.h>
129 /* Create the io_mapping object*/
130 static inline struct io_mapping
*
131 io_mapping_init_wc(struct io_mapping
*iomap
,
132 resource_size_t base
,
135 iomap
->iomem
= ioremap_wc(base
, size
);
141 iomap
->prot
= pgprot_writecombine(PAGE_KERNEL
);
147 io_mapping_fini(struct io_mapping
*mapping
)
149 iounmap(mapping
->iomem
);
152 /* Non-atomic map/unmap */
153 static inline void __iomem
*
154 io_mapping_map_wc(struct io_mapping
*mapping
,
155 unsigned long offset
,
158 return mapping
->iomem
+ offset
;
162 io_mapping_unmap(void __iomem
*vaddr
)
166 /* Atomic map/unmap */
167 static inline void __iomem
*
168 io_mapping_map_atomic_wc(struct io_mapping
*mapping
,
169 unsigned long offset
)
171 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
176 return io_mapping_map_wc(mapping
, offset
, PAGE_SIZE
);
180 io_mapping_unmap_atomic(void __iomem
*vaddr
)
182 io_mapping_unmap(vaddr
);
184 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
190 static inline void __iomem
*
191 io_mapping_map_local_wc(struct io_mapping
*mapping
, unsigned long offset
)
193 return io_mapping_map_wc(mapping
, offset
, PAGE_SIZE
);
196 static inline void io_mapping_unmap_local(void __iomem
*vaddr
)
198 io_mapping_unmap(vaddr
);
201 #endif /* !HAVE_ATOMIC_IOMAP */
203 static inline struct io_mapping
*
204 io_mapping_create_wc(resource_size_t base
,
207 struct io_mapping
*iomap
;
209 iomap
= kmalloc(sizeof(*iomap
), GFP_KERNEL
);
213 if (!io_mapping_init_wc(iomap
, base
, size
)) {
222 io_mapping_free(struct io_mapping
*iomap
)
224 io_mapping_fini(iomap
);
228 int io_mapping_map_user(struct io_mapping
*iomap
, struct vm_area_struct
*vma
,
229 unsigned long addr
, unsigned long pfn
, unsigned long size
);
231 #endif /* _LINUX_IO_MAPPING_H */