2 * Virtual Memory Map support
4 * (C) 2007 sgi. Christoph Lameter.
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset
8 * calculation without memory access.
10 * However, virtual mappings need a page table and TLBs. Many Linux
11 * architectures already map their physical space using 1-1 mappings
12 * via TLBs. For those arches the virtual memory map is essentially
13 * for free if we use the same page size as the 1-1 mappings. In that
14 * case the overhead consists of a few additional pages that are
15 * allocated to create a view of memory for vmemmap.
17 * The architecture is expected to provide a vmemmap_populate() function
18 * to instantiate the mapping.
21 #include <linux/mmzone.h>
22 #include <linux/bootmem.h>
23 #include <linux/memremap.h>
24 #include <linux/highmem.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sched.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pgtable.h>
34 * Allocate a block of memory to be used to back the virtual memory map
35 * or to back the page tables that are used to create the mapping.
36 * Uses the main allocators if they are available, else bootmem.
39 static void * __ref
__earlyonly_bootmem_alloc(int node
,
44 return memblock_virt_alloc_try_nid(size
, align
, goal
,
45 BOOTMEM_ALLOC_ACCESSIBLE
, node
);
48 static void *vmemmap_buf
;
49 static void *vmemmap_buf_end
;
51 void * __meminit
vmemmap_alloc_block(unsigned long size
, int node
)
53 /* If the main allocator is up use that, fallback to bootmem. */
54 if (slab_is_available()) {
57 page
= alloc_pages_node(node
,
58 GFP_KERNEL
| __GFP_ZERO
| __GFP_RETRY_MAYFAIL
,
61 return page_address(page
);
64 return __earlyonly_bootmem_alloc(node
, size
, size
,
65 __pa(MAX_DMA_ADDRESS
));
68 /* need to make sure size is all the same during early stage */
69 static void * __meminit
alloc_block_buf(unsigned long size
, int node
)
74 return vmemmap_alloc_block(size
, node
);
76 /* take the from buf */
77 ptr
= (void *)ALIGN((unsigned long)vmemmap_buf
, size
);
78 if (ptr
+ size
> vmemmap_buf_end
)
79 return vmemmap_alloc_block(size
, node
);
81 vmemmap_buf
= ptr
+ size
;
86 static unsigned long __meminit
vmem_altmap_next_pfn(struct vmem_altmap
*altmap
)
88 return altmap
->base_pfn
+ altmap
->reserve
+ altmap
->alloc
92 static unsigned long __meminit
vmem_altmap_nr_free(struct vmem_altmap
*altmap
)
94 unsigned long allocated
= altmap
->alloc
+ altmap
->align
;
96 if (altmap
->free
> allocated
)
97 return altmap
->free
- allocated
;
102 * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
103 * @altmap - reserved page pool for the allocation
104 * @nr_pfns - size (in pages) of the allocation
106 * Allocations are aligned to the size of the request
108 static unsigned long __meminit
vmem_altmap_alloc(struct vmem_altmap
*altmap
,
109 unsigned long nr_pfns
)
111 unsigned long pfn
= vmem_altmap_next_pfn(altmap
);
112 unsigned long nr_align
;
114 nr_align
= 1UL << find_first_bit(&nr_pfns
, BITS_PER_LONG
);
115 nr_align
= ALIGN(pfn
, nr_align
) - pfn
;
117 if (nr_pfns
+ nr_align
> vmem_altmap_nr_free(altmap
))
119 altmap
->alloc
+= nr_pfns
;
120 altmap
->align
+= nr_align
;
121 return pfn
+ nr_align
;
124 static void * __meminit
altmap_alloc_block_buf(unsigned long size
,
125 struct vmem_altmap
*altmap
)
127 unsigned long pfn
, nr_pfns
;
130 if (size
& ~PAGE_MASK
) {
131 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
136 nr_pfns
= size
>> PAGE_SHIFT
;
137 pfn
= vmem_altmap_alloc(altmap
, nr_pfns
);
139 ptr
= __va(__pfn_to_phys(pfn
));
142 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
143 __func__
, pfn
, altmap
->alloc
, altmap
->align
, nr_pfns
);
148 /* need to make sure size is all the same during early stage */
149 void * __meminit
__vmemmap_alloc_block_buf(unsigned long size
, int node
,
150 struct vmem_altmap
*altmap
)
153 return altmap_alloc_block_buf(size
, altmap
);
154 return alloc_block_buf(size
, node
);
157 void __meminit
vmemmap_verify(pte_t
*pte
, int node
,
158 unsigned long start
, unsigned long end
)
160 unsigned long pfn
= pte_pfn(*pte
);
161 int actual_node
= early_pfn_to_nid(pfn
);
163 if (node_distance(actual_node
, node
) > LOCAL_DISTANCE
)
164 pr_warn("[%lx-%lx] potential offnode page_structs\n",
168 pte_t
* __meminit
vmemmap_pte_populate(pmd_t
*pmd
, unsigned long addr
, int node
)
170 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
171 if (pte_none(*pte
)) {
173 void *p
= alloc_block_buf(PAGE_SIZE
, node
);
176 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
, PAGE_KERNEL
);
177 set_pte_at(&init_mm
, addr
, pte
, entry
);
182 pmd_t
* __meminit
vmemmap_pmd_populate(pud_t
*pud
, unsigned long addr
, int node
)
184 pmd_t
*pmd
= pmd_offset(pud
, addr
);
185 if (pmd_none(*pmd
)) {
186 void *p
= vmemmap_alloc_block(PAGE_SIZE
, node
);
189 pmd_populate_kernel(&init_mm
, pmd
, p
);
194 pud_t
* __meminit
vmemmap_pud_populate(p4d_t
*p4d
, unsigned long addr
, int node
)
196 pud_t
*pud
= pud_offset(p4d
, addr
);
197 if (pud_none(*pud
)) {
198 void *p
= vmemmap_alloc_block(PAGE_SIZE
, node
);
201 pud_populate(&init_mm
, pud
, p
);
206 p4d_t
* __meminit
vmemmap_p4d_populate(pgd_t
*pgd
, unsigned long addr
, int node
)
208 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
209 if (p4d_none(*p4d
)) {
210 void *p
= vmemmap_alloc_block(PAGE_SIZE
, node
);
213 p4d_populate(&init_mm
, p4d
, p
);
218 pgd_t
* __meminit
vmemmap_pgd_populate(unsigned long addr
, int node
)
220 pgd_t
*pgd
= pgd_offset_k(addr
);
221 if (pgd_none(*pgd
)) {
222 void *p
= vmemmap_alloc_block(PAGE_SIZE
, node
);
225 pgd_populate(&init_mm
, pgd
, p
);
230 int __meminit
vmemmap_populate_basepages(unsigned long start
,
231 unsigned long end
, int node
)
233 unsigned long addr
= start
;
240 for (; addr
< end
; addr
+= PAGE_SIZE
) {
241 pgd
= vmemmap_pgd_populate(addr
, node
);
244 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
247 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
250 pmd
= vmemmap_pmd_populate(pud
, addr
, node
);
253 pte
= vmemmap_pte_populate(pmd
, addr
, node
);
256 vmemmap_verify(pte
, node
, addr
, addr
+ PAGE_SIZE
);
262 struct page
* __meminit
sparse_mem_map_populate(unsigned long pnum
, int nid
)
268 map
= pfn_to_page(pnum
* PAGES_PER_SECTION
);
269 start
= (unsigned long)map
;
270 end
= (unsigned long)(map
+ PAGES_PER_SECTION
);
272 if (vmemmap_populate(start
, end
, nid
))
278 void __init
sparse_mem_maps_populate_node(struct page
**map_map
,
279 unsigned long pnum_begin
,
280 unsigned long pnum_end
,
281 unsigned long map_count
, int nodeid
)
284 unsigned long size
= sizeof(struct page
) * PAGES_PER_SECTION
;
285 void *vmemmap_buf_start
;
287 size
= ALIGN(size
, PMD_SIZE
);
288 vmemmap_buf_start
= __earlyonly_bootmem_alloc(nodeid
, size
* map_count
,
289 PMD_SIZE
, __pa(MAX_DMA_ADDRESS
));
291 if (vmemmap_buf_start
) {
292 vmemmap_buf
= vmemmap_buf_start
;
293 vmemmap_buf_end
= vmemmap_buf_start
+ size
* map_count
;
296 for (pnum
= pnum_begin
; pnum
< pnum_end
; pnum
++) {
297 struct mem_section
*ms
;
299 if (!present_section_nr(pnum
))
302 map_map
[pnum
] = sparse_mem_map_populate(pnum
, nodeid
);
305 ms
= __nr_to_section(pnum
);
306 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
308 ms
->section_mem_map
= 0;
311 if (vmemmap_buf_start
) {
312 /* need to free left buf */
313 memblock_free_early(__pa(vmemmap_buf
),
314 vmemmap_buf_end
- vmemmap_buf
);
316 vmemmap_buf_end
= NULL
;