1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/memremap.h>
6 #include <linux/rculist.h>
7 #include <linux/export.h>
8 #include <linux/ioport.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/pfn_t.h>
12 #include <linux/acpi.h>
15 #include "nfit_test.h"
17 static LIST_HEAD(iomap_head
);
19 static struct iomap_ops
{
20 nfit_test_lookup_fn nfit_test_lookup
;
21 nfit_test_evaluate_dsm_fn evaluate_dsm
;
22 struct list_head list
;
24 .list
= LIST_HEAD_INIT(iomap_ops
.list
),
27 void nfit_test_setup(nfit_test_lookup_fn lookup
,
28 nfit_test_evaluate_dsm_fn evaluate
)
30 iomap_ops
.nfit_test_lookup
= lookup
;
31 iomap_ops
.evaluate_dsm
= evaluate
;
32 list_add_rcu(&iomap_ops
.list
, &iomap_head
);
34 EXPORT_SYMBOL(nfit_test_setup
);
36 void nfit_test_teardown(void)
38 list_del_rcu(&iomap_ops
.list
);
41 EXPORT_SYMBOL(nfit_test_teardown
);
43 static struct nfit_test_resource
*__get_nfit_res(resource_size_t resource
)
45 struct iomap_ops
*ops
;
47 ops
= list_first_or_null_rcu(&iomap_head
, typeof(*ops
), list
);
49 return ops
->nfit_test_lookup(resource
);
53 struct nfit_test_resource
*get_nfit_res(resource_size_t resource
)
55 struct nfit_test_resource
*res
;
58 res
= __get_nfit_res(resource
);
63 EXPORT_SYMBOL(get_nfit_res
);
65 void __iomem
*__nfit_test_ioremap(resource_size_t offset
, unsigned long size
,
66 void __iomem
*(*fallback_fn
)(resource_size_t
, unsigned long))
68 struct nfit_test_resource
*nfit_res
= get_nfit_res(offset
);
71 return (void __iomem
*) nfit_res
->buf
+ offset
72 - nfit_res
->res
.start
;
73 return fallback_fn(offset
, size
);
76 void __iomem
*__wrap_devm_ioremap(struct device
*dev
,
77 resource_size_t offset
, unsigned long size
)
79 struct nfit_test_resource
*nfit_res
= get_nfit_res(offset
);
82 return (void __iomem
*) nfit_res
->buf
+ offset
83 - nfit_res
->res
.start
;
84 return devm_ioremap(dev
, offset
, size
);
86 EXPORT_SYMBOL(__wrap_devm_ioremap
);
88 void *__wrap_devm_memremap(struct device
*dev
, resource_size_t offset
,
89 size_t size
, unsigned long flags
)
91 struct nfit_test_resource
*nfit_res
= get_nfit_res(offset
);
94 return nfit_res
->buf
+ offset
- nfit_res
->res
.start
;
95 return devm_memremap(dev
, offset
, size
, flags
);
97 EXPORT_SYMBOL(__wrap_devm_memremap
);
99 static void nfit_test_kill(void *_pgmap
)
101 struct dev_pagemap
*pgmap
= _pgmap
;
103 WARN_ON(!pgmap
|| !pgmap
->ref
);
105 if (pgmap
->ops
&& pgmap
->ops
->kill
)
106 pgmap
->ops
->kill(pgmap
);
108 percpu_ref_kill(pgmap
->ref
);
110 if (pgmap
->ops
&& pgmap
->ops
->cleanup
) {
111 pgmap
->ops
->cleanup(pgmap
);
113 wait_for_completion(&pgmap
->done
);
114 percpu_ref_exit(pgmap
->ref
);
118 static void dev_pagemap_percpu_release(struct percpu_ref
*ref
)
120 struct dev_pagemap
*pgmap
=
121 container_of(ref
, struct dev_pagemap
, internal_ref
);
123 complete(&pgmap
->done
);
126 void *__wrap_devm_memremap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
129 resource_size_t offset
= pgmap
->range
.start
;
130 struct nfit_test_resource
*nfit_res
= get_nfit_res(offset
);
133 return devm_memremap_pages(dev
, pgmap
);
136 if (pgmap
->ops
&& (pgmap
->ops
->kill
|| pgmap
->ops
->cleanup
))
137 return ERR_PTR(-EINVAL
);
139 init_completion(&pgmap
->done
);
140 error
= percpu_ref_init(&pgmap
->internal_ref
,
141 dev_pagemap_percpu_release
, 0, GFP_KERNEL
);
143 return ERR_PTR(error
);
144 pgmap
->ref
= &pgmap
->internal_ref
;
146 if (!pgmap
->ops
|| !pgmap
->ops
->kill
|| !pgmap
->ops
->cleanup
) {
147 WARN(1, "Missing reference count teardown definition\n");
148 return ERR_PTR(-EINVAL
);
152 error
= devm_add_action_or_reset(dev
, nfit_test_kill
, pgmap
);
154 return ERR_PTR(error
);
155 return nfit_res
->buf
+ offset
- nfit_res
->res
.start
;
157 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages
);
159 pfn_t
__wrap_phys_to_pfn_t(phys_addr_t addr
, unsigned long flags
)
161 struct nfit_test_resource
*nfit_res
= get_nfit_res(addr
);
165 return phys_to_pfn_t(addr
, flags
);
167 EXPORT_SYMBOL(__wrap_phys_to_pfn_t
);
169 void *__wrap_memremap(resource_size_t offset
, size_t size
,
172 struct nfit_test_resource
*nfit_res
= get_nfit_res(offset
);
175 return nfit_res
->buf
+ offset
- nfit_res
->res
.start
;
176 return memremap(offset
, size
, flags
);
178 EXPORT_SYMBOL(__wrap_memremap
);
180 void __wrap_devm_memunmap(struct device
*dev
, void *addr
)
182 struct nfit_test_resource
*nfit_res
= get_nfit_res((long) addr
);
186 return devm_memunmap(dev
, addr
);
188 EXPORT_SYMBOL(__wrap_devm_memunmap
);
190 void __iomem
*__wrap_ioremap(resource_size_t offset
, unsigned long size
)
192 return __nfit_test_ioremap(offset
, size
, ioremap
);
194 EXPORT_SYMBOL(__wrap_ioremap
);
196 void __iomem
*__wrap_ioremap_wc(resource_size_t offset
, unsigned long size
)
198 return __nfit_test_ioremap(offset
, size
, ioremap_wc
);
200 EXPORT_SYMBOL(__wrap_ioremap_wc
);
202 void __wrap_iounmap(volatile void __iomem
*addr
)
204 struct nfit_test_resource
*nfit_res
= get_nfit_res((long) addr
);
207 return iounmap(addr
);
209 EXPORT_SYMBOL(__wrap_iounmap
);
211 void __wrap_memunmap(void *addr
)
213 struct nfit_test_resource
*nfit_res
= get_nfit_res((long) addr
);
217 return memunmap(addr
);
219 EXPORT_SYMBOL(__wrap_memunmap
);
221 static bool nfit_test_release_region(struct device
*dev
,
222 struct resource
*parent
, resource_size_t start
,
225 static void nfit_devres_release(struct device
*dev
, void *data
)
227 struct resource
*res
= *((struct resource
**) data
);
229 WARN_ON(!nfit_test_release_region(NULL
, &iomem_resource
, res
->start
,
230 resource_size(res
)));
233 static int match(struct device
*dev
, void *__res
, void *match_data
)
235 struct resource
*res
= *((struct resource
**) __res
);
236 resource_size_t start
= *((resource_size_t
*) match_data
);
238 return res
->start
== start
;
241 static bool nfit_test_release_region(struct device
*dev
,
242 struct resource
*parent
, resource_size_t start
,
245 if (parent
== &iomem_resource
) {
246 struct nfit_test_resource
*nfit_res
= get_nfit_res(start
);
249 struct nfit_test_request
*req
;
250 struct resource
*res
= NULL
;
253 devres_release(dev
, nfit_devres_release
, match
,
258 spin_lock(&nfit_res
->lock
);
259 list_for_each_entry(req
, &nfit_res
->requests
, list
)
260 if (req
->res
.start
== start
) {
262 list_del(&req
->list
);
265 spin_unlock(&nfit_res
->lock
);
267 WARN(!res
|| resource_size(res
) != n
,
268 "%s: start: %llx n: %llx mismatch: %pr\n",
269 __func__
, start
, n
, res
);
278 static struct resource
*nfit_test_request_region(struct device
*dev
,
279 struct resource
*parent
, resource_size_t start
,
280 resource_size_t n
, const char *name
, int flags
)
282 struct nfit_test_resource
*nfit_res
;
284 if (parent
== &iomem_resource
) {
285 nfit_res
= get_nfit_res(start
);
287 struct nfit_test_request
*req
;
288 struct resource
*res
= NULL
;
290 if (start
+ n
> nfit_res
->res
.start
291 + resource_size(&nfit_res
->res
)) {
292 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
298 spin_lock(&nfit_res
->lock
);
299 list_for_each_entry(req
, &nfit_res
->requests
, list
)
300 if (start
== req
->res
.start
) {
304 spin_unlock(&nfit_res
->lock
);
307 WARN(1, "%pr already busy\n", res
);
311 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
314 INIT_LIST_HEAD(&req
->list
);
318 res
->end
= start
+ n
- 1;
320 res
->flags
= resource_type(parent
);
321 res
->flags
|= IORESOURCE_BUSY
| flags
;
322 spin_lock(&nfit_res
->lock
);
323 list_add(&req
->list
, &nfit_res
->requests
);
324 spin_unlock(&nfit_res
->lock
);
329 d
= devres_alloc(nfit_devres_release
,
330 sizeof(struct resource
*),
338 pr_debug("%s: %pr\n", __func__
, res
);
343 return __devm_request_region(dev
, parent
, start
, n
, name
);
344 return __request_region(parent
, start
, n
, name
, flags
);
347 struct resource
*__wrap___request_region(struct resource
*parent
,
348 resource_size_t start
, resource_size_t n
, const char *name
,
351 return nfit_test_request_region(NULL
, parent
, start
, n
, name
, flags
);
353 EXPORT_SYMBOL(__wrap___request_region
);
355 int __wrap_insert_resource(struct resource
*parent
, struct resource
*res
)
357 if (get_nfit_res(res
->start
))
359 return insert_resource(parent
, res
);
361 EXPORT_SYMBOL(__wrap_insert_resource
);
363 int __wrap_remove_resource(struct resource
*res
)
365 if (get_nfit_res(res
->start
))
367 return remove_resource(res
);
369 EXPORT_SYMBOL(__wrap_remove_resource
);
371 struct resource
*__wrap___devm_request_region(struct device
*dev
,
372 struct resource
*parent
, resource_size_t start
,
373 resource_size_t n
, const char *name
)
377 return nfit_test_request_region(dev
, parent
, start
, n
, name
, 0);
379 EXPORT_SYMBOL(__wrap___devm_request_region
);
381 void __wrap___release_region(struct resource
*parent
, resource_size_t start
,
384 if (!nfit_test_release_region(NULL
, parent
, start
, n
))
385 __release_region(parent
, start
, n
);
387 EXPORT_SYMBOL(__wrap___release_region
);
389 void __wrap___devm_release_region(struct device
*dev
, struct resource
*parent
,
390 resource_size_t start
, resource_size_t n
)
392 if (!nfit_test_release_region(dev
, parent
, start
, n
))
393 __devm_release_region(dev
, parent
, start
, n
);
395 EXPORT_SYMBOL(__wrap___devm_release_region
);
397 acpi_status
__wrap_acpi_evaluate_object(acpi_handle handle
, acpi_string path
,
398 struct acpi_object_list
*p
, struct acpi_buffer
*buf
)
400 struct nfit_test_resource
*nfit_res
= get_nfit_res((long) handle
);
401 union acpi_object
**obj
;
403 if (!nfit_res
|| strcmp(path
, "_FIT") || !buf
)
404 return acpi_evaluate_object(handle
, path
, p
, buf
);
407 buf
->length
= sizeof(union acpi_object
);
411 EXPORT_SYMBOL(__wrap_acpi_evaluate_object
);
413 union acpi_object
* __wrap_acpi_evaluate_dsm(acpi_handle handle
, const guid_t
*guid
,
414 u64 rev
, u64 func
, union acpi_object
*argv4
)
416 union acpi_object
*obj
= ERR_PTR(-ENXIO
);
417 struct iomap_ops
*ops
;
420 ops
= list_first_or_null_rcu(&iomap_head
, typeof(*ops
), list
);
422 obj
= ops
->evaluate_dsm(handle
, guid
, rev
, func
, argv4
);
426 return acpi_evaluate_dsm(handle
, guid
, rev
, func
, argv4
);
429 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm
);
431 MODULE_LICENSE("GPL v2");