Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / hexagon / include / asm / page.h
blob7cbf719c578ec455fa3243d7193718dd2462f150
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Page management definitions for the Hexagon architecture
5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6 */
8 #ifndef _ASM_PAGE_H
9 #define _ASM_PAGE_H
11 #include <linux/const.h>
13 /* This is probably not the most graceful way to handle this. */
15 #ifdef CONFIG_PAGE_SIZE_4KB
16 #define PAGE_SHIFT 12
17 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
18 #endif
20 #ifdef CONFIG_PAGE_SIZE_16KB
21 #define PAGE_SHIFT 14
22 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
23 #endif
25 #ifdef CONFIG_PAGE_SIZE_64KB
26 #define PAGE_SHIFT 16
27 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
28 #endif
30 #ifdef CONFIG_PAGE_SIZE_256KB
31 #define PAGE_SHIFT 18
32 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
33 #endif
35 #ifdef CONFIG_PAGE_SIZE_1MB
36 #define PAGE_SHIFT 20
37 #define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
38 #endif
41 * These should be defined in hugetlb.h, but apparently not.
42 * "Huge" for us should be 4MB or 16MB, which are both represented
43 * in L1 PTE's. Right now, it's set up for 4MB.
45 #ifdef CONFIG_HUGETLB_PAGE
46 #define HPAGE_SHIFT 22
47 #define HPAGE_SIZE (1UL << HPAGE_SHIFT)
48 #define HPAGE_MASK (~(HPAGE_SIZE-1))
49 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
50 #define HVM_HUGEPAGE_SIZE 0x5
51 #endif
53 #define PAGE_SIZE (1UL << PAGE_SHIFT)
54 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
56 #ifdef __KERNEL__
57 #ifndef __ASSEMBLY__
60 * This is for PFN_DOWN, which mm.h needs. Seems the right place to pull it in.
62 #include <linux/pfn.h>
65 * We implement a two-level architecture-specific page table structure.
66 * Null intermediate page table level (pmd, pud) definitions will come from
67 * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
69 typedef struct { unsigned long pte; } pte_t;
70 typedef struct { unsigned long pgd; } pgd_t;
71 typedef struct { unsigned long pgprot; } pgprot_t;
72 typedef struct page *pgtable_t;
74 #define pte_val(x) ((x).pte)
75 #define pgd_val(x) ((x).pgd)
76 #define pgprot_val(x) ((x).pgprot)
77 #define __pte(x) ((pte_t) { (x) })
78 #define __pgd(x) ((pgd_t) { (x) })
79 #define __pgprot(x) ((pgprot_t) { (x) })
82 * We need a __pa and a __va routine for kernel space.
83 * MIPS says they're only used during mem_init.
84 * also, check if we need a PHYS_OFFSET.
86 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
87 #define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
89 /* The "page frame" descriptor is defined in linux/mm.h */
90 struct page;
92 /* Returns page frame descriptor for virtual address. */
93 #define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
95 /* Default vm area behavior is non-executable. */
96 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
98 #define pfn_valid(pfn) ((pfn) < max_mapnr)
99 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
101 /* Need to not use a define for linesize; may move this to another file. */
102 static inline void clear_page(void *page)
104 /* This can only be done on pages with L1 WB cache */
105 asm volatile(
106 " loop0(1f,%1);\n"
107 "1: { dczeroa(%0);\n"
108 " %0 = add(%0,#32); }:endloop0\n"
109 : "+r" (page)
110 : "r" (PAGE_SIZE/32)
111 : "lc0", "sa0", "memory"
115 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
118 * Under assumption that kernel always "sees" user map...
120 #define clear_user_page(page, vaddr, pg) clear_page(page)
121 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
124 * page_to_phys - convert page to physical address
125 * @page - pointer to page entry in mem_map
127 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
129 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
130 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
132 #define page_to_virt(page) __va(page_to_phys(page))
135 * For port to Hexagon Virtual Machine, MAYBE we check for attempts
136 * to reference reserved HVM space, but in any case, the VM will be
137 * protected.
139 #define kern_addr_valid(addr) (1)
141 #include <asm/mem-layout.h>
142 #include <asm-generic/memory_model.h>
143 /* XXX Todo: implement assembly-optimized version of getorder. */
144 #include <asm-generic/getorder.h>
146 #endif /* ifdef __ASSEMBLY__ */
147 #endif /* ifdef __KERNEL__ */
149 #endif