1 #ifndef _LINUX_MEMREMAP_H_
2 #define _LINUX_MEMREMAP_H_
4 #include <linux/ioport.h>
5 #include <linux/percpu-refcount.h>
11 * struct vmem_altmap - pre-allocated storage for vmemmap_populate
12 * @base_pfn: base of the entire dev_pagemap mapping
13 * @reserve: pages mapped, but reserved for driver use (relative to @base)
14 * @free: free pages set aside in the mapping for memmap storage
15 * @align: pages reserved to meet allocation alignments
16 * @alloc: track pages consumed, private to vmemmap_populate()
19 const unsigned long base_pfn
;
20 const unsigned long reserve
;
26 unsigned long vmem_altmap_offset(struct vmem_altmap
*altmap
);
27 void vmem_altmap_free(struct vmem_altmap
*altmap
, unsigned long nr_pfns
);
29 #if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_ZONE_DEVICE)
30 struct vmem_altmap
*to_vmem_altmap(unsigned long memmap_start
);
32 static inline struct vmem_altmap
*to_vmem_altmap(unsigned long memmap_start
)
39 * struct dev_pagemap - metadata for ZONE_DEVICE mappings
40 * @altmap: pre-allocated/reserved memory for vmemmap allocations
41 * @res: physical address range covered by @ref
42 * @ref: reference count that pins the devm_memremap_pages() mapping
43 * @dev: host device of the mapping for debug
46 struct vmem_altmap
*altmap
;
47 const struct resource
*res
;
48 struct percpu_ref
*ref
;
52 #ifdef CONFIG_ZONE_DEVICE
53 void *devm_memremap_pages(struct device
*dev
, struct resource
*res
,
54 struct percpu_ref
*ref
, struct vmem_altmap
*altmap
);
55 struct dev_pagemap
*find_dev_pagemap(resource_size_t phys
);
57 static inline void *devm_memremap_pages(struct device
*dev
,
58 struct resource
*res
, struct percpu_ref
*ref
,
59 struct vmem_altmap
*altmap
)
62 * Fail attempts to call devm_memremap_pages() without
63 * ZONE_DEVICE support enabled, this requires callers to fall
64 * back to plain devm_memremap() based on config
67 return ERR_PTR(-ENXIO
);
70 static inline struct dev_pagemap
*find_dev_pagemap(resource_size_t phys
)
77 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
78 * @pfn: page frame number to lookup page_map
79 * @pgmap: optional known pgmap that already has a reference
81 * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
84 static inline struct dev_pagemap
*get_dev_pagemap(unsigned long pfn
,
85 struct dev_pagemap
*pgmap
)
87 const struct resource
*res
= pgmap
? pgmap
->res
: NULL
;
88 resource_size_t phys
= PFN_PHYS(pfn
);
91 * In the cached case we're already holding a live reference so
92 * we can simply do a blind increment
94 if (res
&& phys
>= res
->start
&& phys
<= res
->end
) {
95 percpu_ref_get(pgmap
->ref
);
99 /* fall back to slow path lookup */
101 pgmap
= find_dev_pagemap(phys
);
102 if (pgmap
&& !percpu_ref_tryget_live(pgmap
->ref
))
109 static inline void put_dev_pagemap(struct dev_pagemap
*pgmap
)
112 percpu_ref_put(pgmap
->ref
);
114 #endif /* _LINUX_MEMREMAP_H_ */