Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / metag / mm / mmu-meta1.c
blob53190b13dc54425b0b24ba74a315abdd1eabf7b0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2005,2006,2007,2008,2009 Imagination Technologies
5 * Meta 1 MMU handling code.
7 */
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/io.h>
13 #include <asm/mmu.h>
15 #define DM3_BASE (LINSYSDIRECT_BASE + (MMCU_DIRECTMAPn_ADDR_SCALE * 3))
18 * This contains the physical address of the top level 2k pgd table.
20 static unsigned long mmu_base_phys;
23 * Given a physical address, return a mapped virtual address that can be used
24 * to access that location.
25 * In practice, we use the DirectMap region to make this happen.
27 static unsigned long map_addr(unsigned long phys)
29 static unsigned long dm_base = 0xFFFFFFFF;
30 int offset;
32 offset = phys - dm_base;
34 /* Are we in the current map range ? */
35 if ((offset < 0) || (offset >= MMCU_DIRECTMAPn_ADDR_SCALE)) {
36 /* Calculate new DM area */
37 dm_base = phys & ~(MMCU_DIRECTMAPn_ADDR_SCALE - 1);
39 /* Actually map it in! */
40 metag_out32(dm_base, MMCU_DIRECTMAP3_ADDR);
42 /* And calculate how far into that area our reference is */
43 offset = phys - dm_base;
46 return DM3_BASE + offset;
50 * Return the physical address of the base of our pgd table.
52 static inline unsigned long __get_mmu_base(void)
54 unsigned long base_phys;
55 unsigned int stride;
57 if (is_global_space(PAGE_OFFSET))
58 stride = 4;
59 else
60 stride = hard_processor_id(); /* [0..3] */
62 base_phys = metag_in32(MMCU_TABLE_PHYS_ADDR);
63 base_phys += (0x800 * stride);
65 return base_phys;
68 /* Given a virtual address, return the virtual address of the relevant pgd */
69 static unsigned long pgd_entry_addr(unsigned long virt)
71 unsigned long pgd_phys;
72 unsigned long pgd_virt;
74 if (!mmu_base_phys)
75 mmu_base_phys = __get_mmu_base();
78 * Are we trying to map a global address. If so, then index
79 * the global pgd table instead of our local one.
81 if (is_global_space(virt)) {
82 /* Scale into 2gig map */
83 virt &= ~0x80000000;
86 /* Base of the pgd table plus our 4Meg entry, 4bytes each */
87 pgd_phys = mmu_base_phys + ((virt >> PGDIR_SHIFT) * 4);
89 pgd_virt = map_addr(pgd_phys);
91 return pgd_virt;
94 /* Given a virtual address, return the virtual address of the relevant pte */
95 static unsigned long pgtable_entry_addr(unsigned long virt)
97 unsigned long pgtable_phys;
98 unsigned long pgtable_virt, pte_virt;
100 /* Find the physical address of the 4MB page table*/
101 pgtable_phys = metag_in32(pgd_entry_addr(virt)) & MMCU_ENTRY_ADDR_BITS;
103 /* Map it to a virtual address */
104 pgtable_virt = map_addr(pgtable_phys);
106 /* And index into it for our pte */
107 pte_virt = pgtable_virt + ((virt >> PAGE_SHIFT) & 0x3FF) * 4;
109 return pte_virt;
112 unsigned long mmu_read_first_level_page(unsigned long vaddr)
114 return metag_in32(pgd_entry_addr(vaddr));
117 unsigned long mmu_read_second_level_page(unsigned long vaddr)
119 return metag_in32(pgtable_entry_addr(vaddr));
122 unsigned long mmu_get_base(void)
124 static unsigned long __base;
126 /* Find the base of our MMU pgd table */
127 if (!__base)
128 __base = pgd_entry_addr(0);
130 return __base;
133 void __init mmu_init(unsigned long mem_end)
135 unsigned long entry, addr;
136 pgd_t *p_swapper_pg_dir;
139 * Now copy over any MMU pgd entries already in the mmu page tables
140 * over to our root init process (swapper_pg_dir) map. This map is
141 * then inherited by all other processes, which means all processes
142 * inherit a map of the kernel space.
144 addr = PAGE_OFFSET;
145 entry = pgd_index(PAGE_OFFSET);
146 p_swapper_pg_dir = pgd_offset_k(0) + entry;
148 while (addr <= META_MEMORY_LIMIT) {
149 unsigned long pgd_entry;
150 /* copy over the current MMU value */
151 pgd_entry = mmu_read_first_level_page(addr);
152 pgd_val(*p_swapper_pg_dir) = pgd_entry;
154 p_swapper_pg_dir++;
155 addr += PGDIR_SIZE;