bzip2/lzma: config and initramfs support for bzip2/lzma decompression
[linux/fpc-iii.git] / arch / ia64 / mm / hugetlbpage.c
blobb0f615759e97461052eb154c431487c90d4cbad3
1 /*
2 * IA-64 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7 * Sep, 2003: add numa support
8 * Feb, 2004: dynamic hugetlb page size via boot parameter
9 */
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15 #include <linux/pagemap.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/log2.h>
20 #include <asm/mman.h>
21 #include <asm/pgalloc.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
25 unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
26 EXPORT_SYMBOL(hpage_shift);
28 pte_t *
29 huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
31 unsigned long taddr = htlbpage_to_page(addr);
32 pgd_t *pgd;
33 pud_t *pud;
34 pmd_t *pmd;
35 pte_t *pte = NULL;
37 pgd = pgd_offset(mm, taddr);
38 pud = pud_alloc(mm, pgd, taddr);
39 if (pud) {
40 pmd = pmd_alloc(mm, pud, taddr);
41 if (pmd)
42 pte = pte_alloc_map(mm, pmd, taddr);
44 return pte;
47 pte_t *
48 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
50 unsigned long taddr = htlbpage_to_page(addr);
51 pgd_t *pgd;
52 pud_t *pud;
53 pmd_t *pmd;
54 pte_t *pte = NULL;
56 pgd = pgd_offset(mm, taddr);
57 if (pgd_present(*pgd)) {
58 pud = pud_offset(pgd, taddr);
59 if (pud_present(*pud)) {
60 pmd = pmd_offset(pud, taddr);
61 if (pmd_present(*pmd))
62 pte = pte_offset_map(pmd, taddr);
66 return pte;
69 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
71 return 0;
74 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
77 * Don't actually need to do any preparation, but need to make sure
78 * the address is in the right region.
80 int prepare_hugepage_range(struct file *file,
81 unsigned long addr, unsigned long len)
83 if (len & ~HPAGE_MASK)
84 return -EINVAL;
85 if (addr & ~HPAGE_MASK)
86 return -EINVAL;
87 if (REGION_NUMBER(addr) != RGN_HPAGE)
88 return -EINVAL;
90 return 0;
93 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
95 struct page *page;
96 pte_t *ptep;
98 if (REGION_NUMBER(addr) != RGN_HPAGE)
99 return ERR_PTR(-EINVAL);
101 ptep = huge_pte_offset(mm, addr);
102 if (!ptep || pte_none(*ptep))
103 return NULL;
104 page = pte_page(*ptep);
105 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
106 return page;
108 int pmd_huge(pmd_t pmd)
110 return 0;
113 int pud_huge(pud_t pud)
115 return 0;
118 struct page *
119 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
121 return NULL;
124 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
125 unsigned long addr, unsigned long end,
126 unsigned long floor, unsigned long ceiling)
129 * This is called to free hugetlb page tables.
131 * The offset of these addresses from the base of the hugetlb
132 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
133 * the standard free_pgd_range will free the right page tables.
135 * If floor and ceiling are also in the hugetlb region, they
136 * must likewise be scaled down; but if outside, left unchanged.
139 addr = htlbpage_to_page(addr);
140 end = htlbpage_to_page(end);
141 if (REGION_NUMBER(floor) == RGN_HPAGE)
142 floor = htlbpage_to_page(floor);
143 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
144 ceiling = htlbpage_to_page(ceiling);
146 free_pgd_range(tlb, addr, end, floor, ceiling);
149 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
150 unsigned long pgoff, unsigned long flags)
152 struct vm_area_struct *vmm;
154 if (len > RGN_MAP_LIMIT)
155 return -ENOMEM;
156 if (len & ~HPAGE_MASK)
157 return -EINVAL;
159 /* Handle MAP_FIXED */
160 if (flags & MAP_FIXED) {
161 if (prepare_hugepage_range(file, addr, len))
162 return -EINVAL;
163 return addr;
166 /* This code assumes that RGN_HPAGE != 0. */
167 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
168 addr = HPAGE_REGION_BASE;
169 else
170 addr = ALIGN(addr, HPAGE_SIZE);
171 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
172 /* At this point: (!vmm || addr < vmm->vm_end). */
173 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
174 return -ENOMEM;
175 if (!vmm || (addr + len) <= vmm->vm_start)
176 return addr;
177 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
181 static int __init hugetlb_setup_sz(char *str)
183 u64 tr_pages;
184 unsigned long long size;
186 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
188 * shouldn't happen, but just in case.
190 tr_pages = 0x15557000UL;
192 size = memparse(str, &str);
193 if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
194 size <= PAGE_SIZE ||
195 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
196 printk(KERN_WARNING "Invalid huge page size specified\n");
197 return 1;
200 hpage_shift = __ffs(size);
202 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
203 * override here with new page shift.
205 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
206 return 0;
208 early_param("hugepagesz", hugetlb_setup_sz);