2 * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
8 * A simple uncached page allocator using the generic allocator. This
9 * allocator first utilizes the spare (spill) pages found in the EFI
10 * memmap and will then start converting cached pages to uncached ones
11 * at a granule at a time. Node awareness is implemented by having a
12 * pool of pages per node.
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/efi.h>
23 #include <linux/genalloc.h>
26 #include <asm/system.h>
27 #include <asm/pgtable.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/sn/arch.h>
33 extern void __init
efi_memmap_walk_uc(efi_freemem_callback_t
, void *);
35 struct uncached_pool
{
36 struct gen_pool
*pool
;
37 struct mutex add_chunk_mutex
; /* serialize adding a converted chunk */
38 int nchunks_added
; /* #of converted chunks added to pool */
39 atomic_t status
; /* smp called function's return status*/
42 #define MAX_CONVERTED_CHUNKS_PER_NODE 2
44 struct uncached_pool uncached_pools
[MAX_NUMNODES
];
47 static void uncached_ipi_visibility(void *data
)
50 struct uncached_pool
*uc_pool
= (struct uncached_pool
*)data
;
52 status
= ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL
);
53 if ((status
!= PAL_VISIBILITY_OK
) &&
54 (status
!= PAL_VISIBILITY_OK_REMOTE_NEEDED
))
55 atomic_inc(&uc_pool
->status
);
59 static void uncached_ipi_mc_drain(void *data
)
62 struct uncached_pool
*uc_pool
= (struct uncached_pool
*)data
;
64 status
= ia64_pal_mc_drain();
65 if (status
!= PAL_STATUS_SUCCESS
)
66 atomic_inc(&uc_pool
->status
);
71 * Add a new chunk of uncached memory pages to the specified pool.
73 * @pool: pool to add new chunk of uncached memory to
74 * @nid: node id of node to allocate memory from, or -1
76 * This is accomplished by first allocating a granule of cached memory pages
77 * and then converting them to uncached memory pages.
79 static int uncached_add_chunk(struct uncached_pool
*uc_pool
, int nid
)
82 int status
, i
, nchunks_added
= uc_pool
->nchunks_added
;
83 unsigned long c_addr
, uc_addr
;
85 if (mutex_lock_interruptible(&uc_pool
->add_chunk_mutex
) != 0)
86 return -1; /* interrupted by a signal */
88 if (uc_pool
->nchunks_added
> nchunks_added
) {
89 /* someone added a new chunk while we were waiting */
90 mutex_unlock(&uc_pool
->add_chunk_mutex
);
94 if (uc_pool
->nchunks_added
>= MAX_CONVERTED_CHUNKS_PER_NODE
) {
95 mutex_unlock(&uc_pool
->add_chunk_mutex
);
99 /* attempt to allocate a granule's worth of cached memory pages */
101 page
= alloc_pages_exact_node(nid
,
102 GFP_KERNEL
| __GFP_ZERO
| GFP_THISNODE
,
103 IA64_GRANULE_SHIFT
-PAGE_SHIFT
);
105 mutex_unlock(&uc_pool
->add_chunk_mutex
);
109 /* convert the memory pages from cached to uncached */
111 c_addr
= (unsigned long)page_address(page
);
112 uc_addr
= c_addr
- PAGE_OFFSET
+ __IA64_UNCACHED_OFFSET
;
115 * There's a small race here where it's possible for someone to
116 * access the page through /dev/mem halfway through the conversion
117 * to uncached - not sure it's really worth bothering about
119 for (i
= 0; i
< (IA64_GRANULE_SIZE
/ PAGE_SIZE
); i
++)
120 SetPageUncached(&page
[i
]);
122 flush_tlb_kernel_range(uc_addr
, uc_addr
+ IA64_GRANULE_SIZE
);
124 status
= ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL
);
125 if (status
== PAL_VISIBILITY_OK_REMOTE_NEEDED
) {
126 atomic_set(&uc_pool
->status
, 0);
127 status
= smp_call_function(uncached_ipi_visibility
, uc_pool
, 1);
128 if (status
|| atomic_read(&uc_pool
->status
))
130 } else if (status
!= PAL_VISIBILITY_OK
)
135 if (ia64_platform_is("sn2"))
136 sn_flush_all_caches(uc_addr
, IA64_GRANULE_SIZE
);
138 flush_icache_range(uc_addr
, uc_addr
+ IA64_GRANULE_SIZE
);
140 /* flush the just introduced uncached translation from the TLB */
141 local_flush_tlb_all();
145 status
= ia64_pal_mc_drain();
146 if (status
!= PAL_STATUS_SUCCESS
)
148 atomic_set(&uc_pool
->status
, 0);
149 status
= smp_call_function(uncached_ipi_mc_drain
, uc_pool
, 1);
150 if (status
|| atomic_read(&uc_pool
->status
))
154 * The chunk of memory pages has been converted to uncached so now we
155 * can add it to the pool.
157 status
= gen_pool_add(uc_pool
->pool
, uc_addr
, IA64_GRANULE_SIZE
, nid
);
161 uc_pool
->nchunks_added
++;
162 mutex_unlock(&uc_pool
->add_chunk_mutex
);
165 /* failed to convert or add the chunk so give it back to the kernel */
167 for (i
= 0; i
< (IA64_GRANULE_SIZE
/ PAGE_SIZE
); i
++)
168 ClearPageUncached(&page
[i
]);
170 free_pages(c_addr
, IA64_GRANULE_SHIFT
-PAGE_SHIFT
);
171 mutex_unlock(&uc_pool
->add_chunk_mutex
);
177 * uncached_alloc_page
179 * @starting_nid: node id of node to start with, or -1
180 * @n_pages: number of contiguous pages to allocate
182 * Allocate the specified number of contiguous uncached pages on the
183 * the requested node. If not enough contiguous uncached pages are available
184 * on the requested node, roundrobin starting with the next higher node.
186 unsigned long uncached_alloc_page(int starting_nid
, int n_pages
)
188 unsigned long uc_addr
;
189 struct uncached_pool
*uc_pool
;
192 if (unlikely(starting_nid
>= MAX_NUMNODES
))
195 if (starting_nid
< 0)
196 starting_nid
= numa_node_id();
200 if (!node_state(nid
, N_HIGH_MEMORY
))
202 uc_pool
= &uncached_pools
[nid
];
203 if (uc_pool
->pool
== NULL
)
206 uc_addr
= gen_pool_alloc(uc_pool
->pool
,
207 n_pages
* PAGE_SIZE
);
210 } while (uncached_add_chunk(uc_pool
, nid
) == 0);
212 } while ((nid
= (nid
+ 1) % MAX_NUMNODES
) != starting_nid
);
216 EXPORT_SYMBOL(uncached_alloc_page
);
222 * @uc_addr: uncached address of first page to free
223 * @n_pages: number of contiguous pages to free
225 * Free the specified number of uncached pages.
227 void uncached_free_page(unsigned long uc_addr
, int n_pages
)
229 int nid
= paddr_to_nid(uc_addr
- __IA64_UNCACHED_OFFSET
);
230 struct gen_pool
*pool
= uncached_pools
[nid
].pool
;
232 if (unlikely(pool
== NULL
))
235 if ((uc_addr
& (0XFUL
<< 60)) != __IA64_UNCACHED_OFFSET
)
236 panic("uncached_free_page invalid address %lx\n", uc_addr
);
238 gen_pool_free(pool
, uc_addr
, n_pages
* PAGE_SIZE
);
240 EXPORT_SYMBOL(uncached_free_page
);
244 * uncached_build_memmap,
246 * @uc_start: uncached starting address of a chunk of uncached memory
247 * @uc_end: uncached ending address of a chunk of uncached memory
248 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
250 * Called at boot time to build a map of pages that can be used for
251 * memory special operations.
253 static int __init
uncached_build_memmap(u64 uc_start
, u64 uc_end
, void *arg
)
255 int nid
= paddr_to_nid(uc_start
- __IA64_UNCACHED_OFFSET
);
256 struct gen_pool
*pool
= uncached_pools
[nid
].pool
;
257 size_t size
= uc_end
- uc_start
;
259 touch_softlockup_watchdog();
262 memset((char *)uc_start
, 0, size
);
263 (void) gen_pool_add(pool
, uc_start
, size
, nid
);
269 static int __init
uncached_init(void)
273 for_each_node_state(nid
, N_ONLINE
) {
274 uncached_pools
[nid
].pool
= gen_pool_create(PAGE_SHIFT
, nid
);
275 mutex_init(&uncached_pools
[nid
].add_chunk_mutex
);
278 efi_memmap_walk_uc(uncached_build_memmap
, NULL
);
282 __initcall(uncached_init
);