WIP FPC-III support
[linux/fpc-iii.git] / arch / xtensa / include / asm / initialize_mmu.h
blob05cb13dfe6f4c0f591265017832e2e3dffbe1e4a
1 /*
2 * arch/xtensa/include/asm/initialize_mmu.h
4 * Initializes MMU:
6 * For the new V3 MMU we remap the TLB from virtual == physical
7 * to the standard Linux mapping used in earlier MMU's.
9 * For the MMU we also support a new configuration register that
10 * specifies how the S32C1I instruction operates with the cache
11 * controller.
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file "COPYING" in the main directory of
15 * this archive for more details.
17 * Copyright (C) 2008 - 2012 Tensilica, Inc.
19 * Marc Gauthier <marc@tensilica.com>
20 * Pete Delaney <piet@tensilica.com>
23 #ifndef _XTENSA_INITIALIZE_MMU_H
24 #define _XTENSA_INITIALIZE_MMU_H
26 #include <linux/init.h>
27 #include <linux/pgtable.h>
28 #include <asm/vectors.h>
30 #if XCHAL_HAVE_PTP_MMU
31 #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
32 #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
33 #else
34 #define CA_WRITEBACK (0x4)
35 #endif
37 #ifdef __ASSEMBLY__
39 #define XTENSA_HWVERSION_RC_2009_0 230000
41 .macro initialize_mmu
43 #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
45 * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
46 * For details see Documentation/xtensa/atomctl.rst
48 #if XCHAL_DCACHE_IS_COHERENT
49 movi a3, 0x25 /* For SMP/MX -- internal for writeback,
50 * RCW otherwise
52 #else
53 movi a3, 0x29 /* non-MX -- Most cores use Std Memory
54 * Controlers which usually can't use RCW
56 #endif
57 wsr a3, atomctl
58 #endif /* XCHAL_HAVE_S32C1I &&
59 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
62 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
64 * Have MMU v3
67 #if !XCHAL_HAVE_VECBASE
68 # error "MMU v3 requires reloc vectors"
69 #endif
71 movi a1, 0
72 _call0 1f
73 _j 2f
75 .align 4
76 1: movi a2, 0x10000000
78 #if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
79 #define TEMP_MAPPING_VADDR 0x40000000
80 #else
81 #define TEMP_MAPPING_VADDR 0x00000000
82 #endif
84 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
86 movi a2, TEMP_MAPPING_VADDR | XCHAL_SPANNING_WAY
87 idtlb a2
88 iitlb a2
89 isync
91 /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
92 * and jump to the new mapping.
95 srli a3, a0, 27
96 slli a3, a3, 27
97 addi a3, a3, CA_BYPASS
98 addi a7, a2, 5 - XCHAL_SPANNING_WAY
99 wdtlb a3, a7
100 witlb a3, a7
101 isync
103 slli a4, a0, 5
104 srli a4, a4, 5
105 addi a5, a2, -XCHAL_SPANNING_WAY
106 add a4, a4, a5
107 jx a4
109 /* Step 3: unmap everything other than current area.
110 * Start at 0x60000000, wrap around, and end with 0x20000000
112 2: movi a4, 0x20000000
113 add a5, a2, a4
114 3: idtlb a5
115 iitlb a5
116 add a5, a5, a4
117 bne a5, a2, 3b
119 /* Step 4: Setup MMU with the requested static mappings. */
121 movi a6, 0x01000000
122 wsr a6, ITLBCFG
123 wsr a6, DTLBCFG
124 isync
126 movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
127 movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
128 wdtlb a4, a5
129 witlb a4, a5
131 movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
132 movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
133 wdtlb a4, a5
134 witlb a4, a5
136 #ifdef CONFIG_XTENSA_KSEG_512M
137 movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
138 movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
139 wdtlb a4, a5
140 witlb a4, a5
142 movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
143 movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
144 wdtlb a4, a5
145 witlb a4, a5
146 #endif
148 movi a5, XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_TLB_WAY
149 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
150 wdtlb a4, a5
151 witlb a4, a5
153 movi a5, XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_TLB_WAY
154 movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS
155 wdtlb a4, a5
156 witlb a4, a5
158 isync
160 /* Jump to self, using final mappings. */
161 movi a4, 1f
162 jx a4
165 /* Step 5: remove temporary mapping. */
166 idtlb a7
167 iitlb a7
168 isync
170 movi a0, 0
171 wsr a0, ptevaddr
172 rsync
174 #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
175 XCHAL_HAVE_SPANNING_WAY */
177 .endm
179 .macro initialize_cacheattr
181 #if !defined(CONFIG_MMU) && (XCHAL_HAVE_TLBS || XCHAL_HAVE_MPU)
182 #if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
183 #error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
184 #endif
186 #if XCHAL_HAVE_MPU
187 __REFCONST
188 .align 4
189 .Lattribute_table:
190 .long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
191 .long 0x006600, 0x000000, 0x000000, 0x000000
192 .long 0x000000, 0x000000, 0x000000, 0x000000
193 .long 0x000000, 0x000000, 0x000000, 0x000000
194 .previous
196 movi a3, .Lattribute_table
197 movi a4, CONFIG_MEMMAP_CACHEATTR
198 movi a5, 1
199 movi a6, XCHAL_MPU_ENTRIES
200 movi a10, 0x20000000
201 movi a11, -1
203 sub a5, a5, a10
204 extui a8, a4, 28, 4
205 beq a8, a11, 2f
206 addi a6, a6, -1
207 mov a11, a8
209 addx4 a9, a8, a3
210 l32i a9, a9, 0
211 or a9, a9, a6
212 wptlb a9, a5
213 slli a4, a4, 4
214 bgeu a5, a10, 1b
216 #else
217 movi a5, XCHAL_SPANNING_WAY
218 movi a6, ~_PAGE_ATTRIB_MASK
219 movi a4, CONFIG_MEMMAP_CACHEATTR
220 movi a8, 0x20000000
222 rdtlb1 a3, a5
223 xor a3, a3, a4
224 and a3, a3, a6
225 xor a3, a3, a4
226 wdtlb a3, a5
227 ritlb1 a3, a5
228 xor a3, a3, a4
229 and a3, a3, a6
230 xor a3, a3, a4
231 witlb a3, a5
233 add a5, a5, a8
234 srli a4, a4, 4
235 bgeu a5, a8, 1b
237 isync
238 #endif
239 #endif
241 .endm
243 #endif /*__ASSEMBLY__*/
245 #endif /* _XTENSA_INITIALIZE_MMU_H */