1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/const.h>
9 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
13 #include <asm/types.h>
14 #include <asm/cache.h>
16 #define clear_page(page) clear_page_asm((void *)(page))
17 #define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from))
20 struct vm_area_struct
;
22 void clear_page_asm(void *page
);
23 void copy_page_asm(void *to
, void *from
);
24 #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
25 void copy_user_highpage(struct page
*to
, struct page
*from
, unsigned long vaddr
,
26 struct vm_area_struct
*vma
);
27 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
30 * These are used to make use of C type-checking..
32 #define STRICT_MM_TYPECHECKS
33 #ifdef STRICT_MM_TYPECHECKS
34 typedef struct { unsigned long pte
; } pte_t
; /* either 32 or 64bit */
36 /* NOTE: even on 64 bits, these entries are __u32 because we allocate
37 * the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
38 typedef struct { __u32 pgd
; } pgd_t
;
39 typedef struct { unsigned long pgprot
; } pgprot_t
;
41 #if CONFIG_PGTABLE_LEVELS == 3
42 typedef struct { __u32 pmd
; } pmd_t
;
43 #define __pmd(x) ((pmd_t) { (x) } )
44 /* pXd_val() do not work as lvalues, so make sure we don't use them as such. */
45 #define pmd_val(x) ((x).pmd + 0)
48 #define pte_val(x) ((x).pte)
49 #define pgd_val(x) ((x).pgd + 0)
50 #define pgprot_val(x) ((x).pgprot)
52 #define __pte(x) ((pte_t) { (x) } )
53 #define __pgd(x) ((pgd_t) { (x) } )
54 #define __pgprot(x) ((pgprot_t) { (x) } )
58 * .. while these make it easier on the compiler
60 typedef unsigned long pte_t
;
62 #if CONFIG_PGTABLE_LEVELS == 3
64 #define pmd_val(x) (x)
69 typedef unsigned long pgprot_t
;
71 #define pte_val(x) (x)
72 #define pgd_val(x) (x)
73 #define pgprot_val(x) (x)
77 #define __pgprot(x) (x)
79 #endif /* STRICT_MM_TYPECHECKS */
81 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
82 #if CONFIG_PGTABLE_LEVELS == 3
83 #define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
86 typedef struct page
*pgtable_t
;
88 typedef struct __physmem_range
{
89 unsigned long start_pfn
;
90 unsigned long pages
; /* PAGE_SIZE pages */
93 extern physmem_range_t pmem_ranges
[];
94 extern int npmem_ranges
;
96 #endif /* !__ASSEMBLY__ */
98 /* WARNING: The definitions below must match exactly to sizeof(pte_t)
102 #define BITS_PER_PTE_ENTRY 3
103 #define BITS_PER_PMD_ENTRY 2
104 #define BITS_PER_PGD_ENTRY 2
106 #define BITS_PER_PTE_ENTRY 2
107 #define BITS_PER_PMD_ENTRY 2
108 #define BITS_PER_PGD_ENTRY 2
110 #define PGD_ENTRY_SIZE (1UL << BITS_PER_PGD_ENTRY)
111 #define PMD_ENTRY_SIZE (1UL << BITS_PER_PMD_ENTRY)
112 #define PTE_ENTRY_SIZE (1UL << BITS_PER_PTE_ENTRY)
114 #define LINUX_GATEWAY_SPACE 0
116 /* This governs the relationship between virtual and physical addresses.
117 * If you alter it, make sure to take care of our various fixed mapping
118 * segments in fixmap.h */
120 #define __PAGE_OFFSET_DEFAULT (0x40000000) /* 1GB */
122 #define __PAGE_OFFSET_DEFAULT (0x10000000) /* 256MB */
125 #if defined(BOOTLOADER)
126 #define __PAGE_OFFSET (0) /* bootloader uses physical addresses */
128 #define __PAGE_OFFSET __PAGE_OFFSET_DEFAULT
129 #endif /* BOOTLOADER */
131 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
133 /* The size of the gateway page (we leave lots of room for expansion) */
134 #define GATEWAY_PAGE_SIZE 0x4000
136 /* The start of the actual kernel binary---used in vmlinux.lds.S
137 * Leave some space after __PAGE_OFFSET for detecting kernel null
139 #define KERNEL_BINARY_TEXT_START (__PAGE_OFFSET + 0x100000)
141 /* These macros don't work for 64-bit C code -- don't allow in C at all */
143 # define PA(x) ((x)-__PAGE_OFFSET)
144 # define VA(x) ((x)+__PAGE_OFFSET)
146 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
147 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
149 #ifdef CONFIG_HUGETLB_PAGE
150 #define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
151 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
152 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
153 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
155 #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
156 # define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
157 # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
158 #elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
159 # define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
160 # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
162 # define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
163 # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
165 #endif /* CONFIG_HUGETLB_PAGE */
167 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
169 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
171 #include <asm-generic/memory_model.h>
172 #include <asm-generic/getorder.h>
175 #define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
177 /* DEFINITION OF THE ZERO-PAGE (PAG0) */
178 /* based on work by Jason Eckhardt (jason@equator.com) */
180 #endif /* _PARISC_PAGE_H */