[PATCH] docs: small kbuild cleanup
[pv_ops_mirror.git] / include / asm-ia64 / pgalloc.h
blob9cb68e9b377e2c831f71187dc1314643b83fc933
1 #ifndef _ASM_IA64_PGALLOC_H
2 #define _ASM_IA64_PGALLOC_H
4 /*
5 * This file contains the functions and defines necessary to allocate
6 * page tables.
8 * This hopefully works with any (fixed) ia-64 page-size, as defined
9 * in <asm/page.h> (currently 8192).
11 * Copyright (C) 1998-2001 Hewlett-Packard Co
12 * David Mosberger-Tang <davidm@hpl.hp.com>
13 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
17 #include <linux/compiler.h>
18 #include <linux/mm.h>
19 #include <linux/page-flags.h>
20 #include <linux/threads.h>
22 #include <asm/mmu_context.h>
24 DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
25 #define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
26 DECLARE_PER_CPU(long, __pgtable_quicklist_size);
27 #define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
29 static inline long pgtable_quicklist_total_size(void)
31 long ql_size = 0;
32 int cpuid;
34 for_each_online_cpu(cpuid) {
35 ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
37 return ql_size;
40 static inline void *pgtable_quicklist_alloc(void)
42 unsigned long *ret = NULL;
44 preempt_disable();
46 ret = pgtable_quicklist;
47 if (likely(ret != NULL)) {
48 pgtable_quicklist = (unsigned long *)(*ret);
49 ret[0] = 0;
50 --pgtable_quicklist_size;
51 preempt_enable();
52 } else {
53 preempt_enable();
54 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
57 return ret;
60 static inline void pgtable_quicklist_free(void *pgtable_entry)
62 #ifdef CONFIG_NUMA
63 unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
65 if (unlikely(nid != numa_node_id())) {
66 free_page((unsigned long)pgtable_entry);
67 return;
69 #endif
71 preempt_disable();
72 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
73 pgtable_quicklist = (unsigned long *)pgtable_entry;
74 ++pgtable_quicklist_size;
75 preempt_enable();
78 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
80 return pgtable_quicklist_alloc();
83 static inline void pgd_free(pgd_t * pgd)
85 pgtable_quicklist_free(pgd);
88 #ifdef CONFIG_PGTABLE_4
89 static inline void
90 pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
92 pgd_val(*pgd_entry) = __pa(pud);
95 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
97 return pgtable_quicklist_alloc();
100 static inline void pud_free(pud_t * pud)
102 pgtable_quicklist_free(pud);
104 #define __pud_free_tlb(tlb, pud) pud_free(pud)
105 #endif /* CONFIG_PGTABLE_4 */
107 static inline void
108 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
110 pud_val(*pud_entry) = __pa(pmd);
113 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
115 return pgtable_quicklist_alloc();
118 static inline void pmd_free(pmd_t * pmd)
120 pgtable_quicklist_free(pmd);
123 #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
125 static inline void
126 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
128 pmd_val(*pmd_entry) = page_to_phys(pte);
131 static inline void
132 pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
134 pmd_val(*pmd_entry) = __pa(pte);
137 static inline struct page *pte_alloc_one(struct mm_struct *mm,
138 unsigned long addr)
140 return virt_to_page(pgtable_quicklist_alloc());
143 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
144 unsigned long addr)
146 return pgtable_quicklist_alloc();
149 static inline void pte_free(struct page *pte)
151 pgtable_quicklist_free(page_address(pte));
154 static inline void pte_free_kernel(pte_t * pte)
156 pgtable_quicklist_free(pte);
159 #define __pte_free_tlb(tlb, pte) pte_free(pte)
161 extern void check_pgt_cache(void);
163 #endif /* _ASM_IA64_PGALLOC_H */