dccp: do not assume DCCP code is non preemptible
[linux/fpc-iii.git] / arch / ia64 / mm / hugetlbpage.c
blob85de86d36fdf2a71783697212e2e5cdb1e2dbc46
1 /*
2 * IA-64 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7 * Sep, 2003: add numa support
8 * Feb, 2004: dynamic hugetlb page size via boot parameter
9 */
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15 #include <linux/pagemap.h>
16 #include <linux/module.h>
17 #include <linux/sysctl.h>
18 #include <linux/log2.h>
19 #include <asm/mman.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
24 unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
25 EXPORT_SYMBOL(hpage_shift);
27 pte_t *
28 huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
30 unsigned long taddr = htlbpage_to_page(addr);
31 pgd_t *pgd;
32 pud_t *pud;
33 pmd_t *pmd;
34 pte_t *pte = NULL;
36 pgd = pgd_offset(mm, taddr);
37 pud = pud_alloc(mm, pgd, taddr);
38 if (pud) {
39 pmd = pmd_alloc(mm, pud, taddr);
40 if (pmd)
41 pte = pte_alloc_map(mm, pmd, taddr);
43 return pte;
46 pte_t *
47 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
49 unsigned long taddr = htlbpage_to_page(addr);
50 pgd_t *pgd;
51 pud_t *pud;
52 pmd_t *pmd;
53 pte_t *pte = NULL;
55 pgd = pgd_offset(mm, taddr);
56 if (pgd_present(*pgd)) {
57 pud = pud_offset(pgd, taddr);
58 if (pud_present(*pud)) {
59 pmd = pmd_offset(pud, taddr);
60 if (pmd_present(*pmd))
61 pte = pte_offset_map(pmd, taddr);
65 return pte;
68 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
71 * Don't actually need to do any preparation, but need to make sure
72 * the address is in the right region.
74 int prepare_hugepage_range(struct file *file,
75 unsigned long addr, unsigned long len)
77 if (len & ~HPAGE_MASK)
78 return -EINVAL;
79 if (addr & ~HPAGE_MASK)
80 return -EINVAL;
81 if (REGION_NUMBER(addr) != RGN_HPAGE)
82 return -EINVAL;
84 return 0;
87 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
89 struct page *page;
90 pte_t *ptep;
92 if (REGION_NUMBER(addr) != RGN_HPAGE)
93 return ERR_PTR(-EINVAL);
95 ptep = huge_pte_offset(mm, addr);
96 if (!ptep || pte_none(*ptep))
97 return NULL;
98 page = pte_page(*ptep);
99 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
100 return page;
102 int pmd_huge(pmd_t pmd)
104 return 0;
107 int pud_huge(pud_t pud)
109 return 0;
112 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
113 unsigned long addr, unsigned long end,
114 unsigned long floor, unsigned long ceiling)
117 * This is called to free hugetlb page tables.
119 * The offset of these addresses from the base of the hugetlb
120 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
121 * the standard free_pgd_range will free the right page tables.
123 * If floor and ceiling are also in the hugetlb region, they
124 * must likewise be scaled down; but if outside, left unchanged.
127 addr = htlbpage_to_page(addr);
128 end = htlbpage_to_page(end);
129 if (REGION_NUMBER(floor) == RGN_HPAGE)
130 floor = htlbpage_to_page(floor);
131 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
132 ceiling = htlbpage_to_page(ceiling);
134 free_pgd_range(tlb, addr, end, floor, ceiling);
137 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
138 unsigned long pgoff, unsigned long flags)
140 struct vm_unmapped_area_info info;
142 if (len > RGN_MAP_LIMIT)
143 return -ENOMEM;
144 if (len & ~HPAGE_MASK)
145 return -EINVAL;
147 /* Handle MAP_FIXED */
148 if (flags & MAP_FIXED) {
149 if (prepare_hugepage_range(file, addr, len))
150 return -EINVAL;
151 return addr;
154 /* This code assumes that RGN_HPAGE != 0. */
155 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
156 addr = HPAGE_REGION_BASE;
158 info.flags = 0;
159 info.length = len;
160 info.low_limit = addr;
161 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
162 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
163 info.align_offset = 0;
164 return vm_unmapped_area(&info);
167 static int __init hugetlb_setup_sz(char *str)
169 u64 tr_pages;
170 unsigned long long size;
172 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
174 * shouldn't happen, but just in case.
176 tr_pages = 0x15557000UL;
178 size = memparse(str, &str);
179 if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
180 size <= PAGE_SIZE ||
181 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
182 printk(KERN_WARNING "Invalid huge page size specified\n");
183 return 1;
186 hpage_shift = __ffs(size);
188 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
189 * override here with new page shift.
191 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
192 return 0;
194 early_param("hugepagesz", hugetlb_setup_sz);