2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #ifndef _ASM_TILE_PGALLOC_H
16 #define _ASM_TILE_PGALLOC_H
18 #include <linux/threads.h>
20 #include <linux/mmzone.h>
21 #include <asm/fixmap.h>
22 #include <hv/hypervisor.h>
24 /* Bits for the size of the second-level page table. */
25 #define L2_KERNEL_PGTABLE_SHIFT \
26 (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
28 /* We currently allocate user L2 page tables by page (unlike kernel L2s). */
29 #if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
30 #define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
32 #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
35 /* How many pages do we need, as an "order", for a user L2 page table? */
36 #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)
38 /* How big is a kernel L2 page table? */
39 #define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
41 static inline void set_pmd(pmd_t
*pmdp
, pmd_t pmd
)
46 set_pte(&pmdp
->pud
.pgd
, pmd
.pud
.pgd
);
50 static inline void pmd_populate_kernel(struct mm_struct
*mm
,
51 pmd_t
*pmd
, pte_t
*ptep
)
53 set_pmd(pmd
, ptfn_pmd(__pa(ptep
) >> HV_LOG2_PAGE_TABLE_ALIGN
,
54 __pgprot(_PAGE_PRESENT
)));
57 static inline void pmd_populate(struct mm_struct
*mm
, pmd_t
*pmd
,
60 set_pmd(pmd
, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page
)),
61 __pgprot(_PAGE_PRESENT
)));
65 * Allocate and free page tables.
68 extern pgd_t
*pgd_alloc(struct mm_struct
*mm
);
69 extern void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
);
71 extern pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
);
72 extern void pte_free(struct mm_struct
*mm
, struct page
*pte
);
74 #define pmd_pgtable(pmd) pmd_page(pmd)
77 pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
79 return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm
, address
)));
82 static inline void pte_free_kernel(struct mm_struct
*mm
, pte_t
*pte
)
84 BUG_ON((unsigned long)pte
& (PAGE_SIZE
-1));
85 pte_free(mm
, virt_to_page(pte
));
88 extern void __pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
,
89 unsigned long address
);
91 #define check_pgt_cache() do { } while (0)
94 * Get the small-page pte_t lowmem entry for a given pfn.
95 * This may or may not be in use, depending on whether the initial
96 * huge-page entry for the page has already been shattered.
98 pte_t
*get_prealloc_pte(unsigned long pfn
);
100 /* During init, we can shatter kernel huge pages if needed. */
101 void shatter_pmd(pmd_t
*pmd
);
103 /* After init, a more complex technique is required. */
104 void shatter_huge_page(unsigned long addr
);
107 /* We share a single page allocator for both L1 and L2 page tables. */
108 #if HV_L1_SIZE != HV_L2_SIZE
109 # error Rework assumption that L1 and L2 page tables are same size.
111 #define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
112 #define pud_populate(mm, pud, pmd) \
113 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
114 #define pmd_alloc_one(mm, addr) \
115 ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
116 #define pmd_free(mm, pmdp) \
117 pte_free((mm), virt_to_page(pmdp))
118 #define __pmd_free_tlb(tlb, pmdp, address) \
119 __pte_free_tlb((tlb), virt_to_page(pmdp), (address))
122 #endif /* _ASM_TILE_PGALLOC_H */