Linux 4.18.10
[linux/fpc-iii.git] / arch / xtensa / include / asm / initialize_mmu.h
blob42410f253597d5e772da628df4d042ed485cde4f
1 /*
2 * arch/xtensa/include/asm/initialize_mmu.h
4 * Initializes MMU:
6 * For the new V3 MMU we remap the TLB from virtual == physical
7 * to the standard Linux mapping used in earlier MMU's.
9 * The the MMU we also support a new configuration register that
10 * specifies how the S32C1I instruction operates with the cache
11 * controller.
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file "COPYING" in the main directory of
15 * this archive for more details.
17 * Copyright (C) 2008 - 2012 Tensilica, Inc.
19 * Marc Gauthier <marc@tensilica.com>
20 * Pete Delaney <piet@tensilica.com>
23 #ifndef _XTENSA_INITIALIZE_MMU_H
24 #define _XTENSA_INITIALIZE_MMU_H
26 #include <asm/pgtable.h>
27 #include <asm/vectors.h>
29 #if XCHAL_HAVE_PTP_MMU
30 #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
31 #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
32 #else
33 #define CA_WRITEBACK (0x4)
34 #endif
36 #ifndef XCHAL_SPANNING_WAY
37 #define XCHAL_SPANNING_WAY 0
38 #endif
40 #ifdef __ASSEMBLY__
42 #define XTENSA_HWVERSION_RC_2009_0 230000
44 .macro initialize_mmu
46 #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
48 * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
49 * For details see Documentation/xtensa/atomctl.txt
51 #if XCHAL_DCACHE_IS_COHERENT
52 movi a3, 0x25 /* For SMP/MX -- internal for writeback,
53 * RCW otherwise
55 #else
56 movi a3, 0x29 /* non-MX -- Most cores use Std Memory
57 * Controlers which usually can't use RCW
59 #endif
60 wsr a3, atomctl
61 #endif /* XCHAL_HAVE_S32C1I &&
62 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
65 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
67 * Have MMU v3
70 #if !XCHAL_HAVE_VECBASE
71 # error "MMU v3 requires reloc vectors"
72 #endif
74 movi a1, 0
75 _call0 1f
76 _j 2f
78 .align 4
79 1: movi a2, 0x10000000
81 #if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
82 #define TEMP_MAPPING_VADDR 0x40000000
83 #else
84 #define TEMP_MAPPING_VADDR 0x00000000
85 #endif
87 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
89 movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
90 idtlb a2
91 iitlb a2
92 isync
94 /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
95 * and jump to the new mapping.
98 srli a3, a0, 27
99 slli a3, a3, 27
100 addi a3, a3, CA_BYPASS
101 addi a7, a2, 5 - XCHAL_SPANNING_WAY
102 wdtlb a3, a7
103 witlb a3, a7
104 isync
106 slli a4, a0, 5
107 srli a4, a4, 5
108 addi a5, a2, -XCHAL_SPANNING_WAY
109 add a4, a4, a5
110 jx a4
112 /* Step 3: unmap everything other than current area.
113 * Start at 0x60000000, wrap around, and end with 0x20000000
115 2: movi a4, 0x20000000
116 add a5, a2, a4
117 3: idtlb a5
118 iitlb a5
119 add a5, a5, a4
120 bne a5, a2, 3b
122 /* Step 4: Setup MMU with the requested static mappings. */
124 movi a6, 0x01000000
125 wsr a6, ITLBCFG
126 wsr a6, DTLBCFG
127 isync
129 movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
130 movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
131 wdtlb a4, a5
132 witlb a4, a5
134 movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
135 movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
136 wdtlb a4, a5
137 witlb a4, a5
139 #ifdef CONFIG_XTENSA_KSEG_512M
140 movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
141 movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
142 wdtlb a4, a5
143 witlb a4, a5
145 movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
146 movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
147 wdtlb a4, a5
148 witlb a4, a5
149 #endif
151 movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
152 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
153 wdtlb a4, a5
154 witlb a4, a5
156 movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
157 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
158 wdtlb a4, a5
159 witlb a4, a5
161 isync
163 /* Jump to self, using final mappings. */
164 movi a4, 1f
165 jx a4
168 /* Step 5: remove temporary mapping. */
169 idtlb a7
170 iitlb a7
171 isync
173 movi a0, 0
174 wsr a0, ptevaddr
175 rsync
177 #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
178 XCHAL_HAVE_SPANNING_WAY */
180 #if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS && \
181 (XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE)
182 /* Enable data and instruction cache in the DEFAULT_MEMORY region
183 * if the processor has DTLB and ITLB.
186 movi a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY
187 movi a6, ~_PAGE_ATTRIB_MASK
188 movi a7, CA_WRITEBACK
189 movi a8, 0x20000000
190 movi a9, PLATFORM_DEFAULT_MEM_SIZE
191 j 2f
193 sub a9, a9, a8
195 #if XCHAL_DCACHE_SIZE
196 rdtlb1 a3, a5
197 and a3, a3, a6
198 or a3, a3, a7
199 wdtlb a3, a5
200 #endif
201 #if XCHAL_ICACHE_SIZE
202 ritlb1 a4, a5
203 and a4, a4, a6
204 or a4, a4, a7
205 witlb a4, a5
206 #endif
207 add a5, a5, a8
208 bltu a8, a9, 1b
210 #endif
212 .endm
214 #endif /*__ASSEMBLY__*/
216 #endif /* _XTENSA_INITIALIZE_MMU_H */