Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-stable.git] / lib / logic_iomem.c
blobb247d412ddef779196746c408dba28f65f239e49
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Intel Corporation
4 * Author: Johannes Berg <johannes@sipsolutions.net>
5 */
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/logic_iomem.h>
9 #include <asm/io.h>
11 struct logic_iomem_region {
12 const struct resource *res;
13 const struct logic_iomem_region_ops *ops;
14 struct list_head list;
17 struct logic_iomem_area {
18 const struct logic_iomem_ops *ops;
19 void *priv;
22 #define AREA_SHIFT 24
23 #define MAX_AREA_SIZE (1 << AREA_SHIFT)
24 #define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE)
25 #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT)
26 #define AREA_MASK (MAX_AREA_SIZE - 1)
27 #ifdef CONFIG_64BIT
28 #define IOREMAP_BIAS 0xDEAD000000000000UL
29 #define IOREMAP_MASK 0xFFFFFFFF00000000UL
30 #else
31 #define IOREMAP_BIAS 0x80000000UL
32 #define IOREMAP_MASK 0x80000000UL
33 #endif
35 static DEFINE_MUTEX(regions_mtx);
36 static LIST_HEAD(regions_list);
37 static struct logic_iomem_area mapped_areas[MAX_AREAS];
39 int logic_iomem_add_region(struct resource *resource,
40 const struct logic_iomem_region_ops *ops)
42 struct logic_iomem_region *rreg;
43 int err;
45 if (WARN_ON(!resource || !ops))
46 return -EINVAL;
48 if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
49 return -EINVAL;
51 rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
52 if (!rreg)
53 return -ENOMEM;
55 err = request_resource(&iomem_resource, resource);
56 if (err) {
57 kfree(rreg);
58 return -ENOMEM;
61 mutex_lock(&regions_mtx);
62 rreg->res = resource;
63 rreg->ops = ops;
64 list_add_tail(&rreg->list, &regions_list);
65 mutex_unlock(&regions_mtx);
67 return 0;
69 EXPORT_SYMBOL(logic_iomem_add_region);
71 #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
72 static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
74 WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
75 (unsigned long long)offset, size);
76 return NULL;
79 static void real_iounmap(volatile void __iomem *addr)
81 WARN(1, "invalid iounmap for addr 0x%llx\n",
82 (unsigned long long)(uintptr_t __force)addr);
84 #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
86 void __iomem *ioremap(phys_addr_t offset, size_t size)
88 void __iomem *ret = NULL;
89 struct logic_iomem_region *rreg, *found = NULL;
90 int i;
92 mutex_lock(&regions_mtx);
93 list_for_each_entry(rreg, &regions_list, list) {
94 if (rreg->res->start > offset)
95 continue;
96 if (rreg->res->end < offset + size - 1)
97 continue;
98 found = rreg;
99 break;
102 if (!found)
103 goto out;
105 for (i = 0; i < MAX_AREAS; i++) {
106 long offs;
108 if (mapped_areas[i].ops)
109 continue;
111 offs = rreg->ops->map(offset - found->res->start,
112 size, &mapped_areas[i].ops,
113 &mapped_areas[i].priv);
114 if (offs < 0) {
115 mapped_areas[i].ops = NULL;
116 break;
119 if (WARN_ON(!mapped_areas[i].ops)) {
120 mapped_areas[i].ops = NULL;
121 break;
124 ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
125 break;
127 out:
128 mutex_unlock(&regions_mtx);
129 if (ret)
130 return ret;
131 return real_ioremap(offset, size);
133 EXPORT_SYMBOL(ioremap);
135 static inline struct logic_iomem_area *
136 get_area(const volatile void __iomem *addr)
138 unsigned long a = (unsigned long)addr;
139 unsigned int idx;
141 if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
142 return NULL;
144 idx = (a & AREA_BITS) >> AREA_SHIFT;
146 if (mapped_areas[idx].ops)
147 return &mapped_areas[idx];
149 return NULL;
152 void iounmap(volatile void __iomem *addr)
154 struct logic_iomem_area *area = get_area(addr);
156 if (!area) {
157 real_iounmap(addr);
158 return;
161 if (area->ops->unmap)
162 area->ops->unmap(area->priv);
164 mutex_lock(&regions_mtx);
165 area->ops = NULL;
166 area->priv = NULL;
167 mutex_unlock(&regions_mtx);
169 EXPORT_SYMBOL(iounmap);
171 #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
172 #define MAKE_FALLBACK(op, sz) \
173 static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
175 WARN(1, "Invalid read" #op " at address %llx\n", \
176 (unsigned long long)(uintptr_t __force)addr); \
177 return (u ## sz)~0ULL; \
180 static void real_raw_write ## op(u ## sz val, \
181 volatile void __iomem *addr) \
183 WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
184 (unsigned long long)val, \
185 (unsigned long long)(uintptr_t __force)addr);\
188 MAKE_FALLBACK(b, 8);
189 MAKE_FALLBACK(w, 16);
190 MAKE_FALLBACK(l, 32);
191 #ifdef CONFIG_64BIT
192 MAKE_FALLBACK(q, 64);
193 #endif
195 static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
197 WARN(1, "Invalid memset_io at address 0x%llx\n",
198 (unsigned long long)(uintptr_t __force)addr);
201 static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
202 size_t size)
204 WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
205 (unsigned long long)(uintptr_t __force)addr);
207 memset(buffer, 0xff, size);
210 static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
211 size_t size)
213 WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
214 (unsigned long long)(uintptr_t __force)addr);
216 #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
218 #define MAKE_OP(op, sz) \
219 u##sz __raw_read ## op(const volatile void __iomem *addr) \
221 struct logic_iomem_area *area = get_area(addr); \
223 if (!area) \
224 return real_raw_read ## op(addr); \
226 return (u ## sz) area->ops->read(area->priv, \
227 (unsigned long)addr & AREA_MASK,\
228 sz / 8); \
230 EXPORT_SYMBOL(__raw_read ## op); \
232 void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \
234 struct logic_iomem_area *area = get_area(addr); \
236 if (!area) { \
237 real_raw_write ## op(val, addr); \
238 return; \
241 area->ops->write(area->priv, \
242 (unsigned long)addr & AREA_MASK, \
243 sz / 8, val); \
245 EXPORT_SYMBOL(__raw_write ## op)
247 MAKE_OP(b, 8);
248 MAKE_OP(w, 16);
249 MAKE_OP(l, 32);
250 #ifdef CONFIG_64BIT
251 MAKE_OP(q, 64);
252 #endif
254 void memset_io(volatile void __iomem *addr, int value, size_t size)
256 struct logic_iomem_area *area = get_area(addr);
257 unsigned long offs, start;
259 if (!area) {
260 real_memset_io(addr, value, size);
261 return;
264 start = (unsigned long)addr & AREA_MASK;
266 if (area->ops->set) {
267 area->ops->set(area->priv, start, value, size);
268 return;
271 for (offs = 0; offs < size; offs++)
272 area->ops->write(area->priv, start + offs, 1, value);
274 EXPORT_SYMBOL(memset_io);
276 void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
277 size_t size)
279 struct logic_iomem_area *area = get_area(addr);
280 u8 *buf = buffer;
281 unsigned long offs, start;
283 if (!area) {
284 real_memcpy_fromio(buffer, addr, size);
285 return;
288 start = (unsigned long)addr & AREA_MASK;
290 if (area->ops->copy_from) {
291 area->ops->copy_from(area->priv, buffer, start, size);
292 return;
295 for (offs = 0; offs < size; offs++)
296 buf[offs] = area->ops->read(area->priv, start + offs, 1);
298 EXPORT_SYMBOL(memcpy_fromio);
300 void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
302 struct logic_iomem_area *area = get_area(addr);
303 const u8 *buf = buffer;
304 unsigned long offs, start;
306 if (!area) {
307 real_memcpy_toio(addr, buffer, size);
308 return;
311 start = (unsigned long)addr & AREA_MASK;
313 if (area->ops->copy_to) {
314 area->ops->copy_to(area->priv, start, buffer, size);
315 return;
318 for (offs = 0; offs < size; offs++)
319 area->ops->write(area->priv, start + offs, 1, buf[offs]);
321 EXPORT_SYMBOL(memcpy_toio);