2 * arch/xtensa/include/asm/initialize_mmu.h
6 * For the new V3 MMU we remap the TLB from virtual == physical
7 * to the standard Linux mapping used in earlier MMU's.
9 * For the MMU we also support a new configuration register that
10 * specifies how the S32C1I instruction operates with the cache
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file "COPYING" in the main directory of
15 * this archive for more details.
17 * Copyright (C) 2008 - 2012 Tensilica, Inc.
19 * Marc Gauthier <marc@tensilica.com>
20 * Pete Delaney <piet@tensilica.com>
23 #ifndef _XTENSA_INITIALIZE_MMU_H
24 #define _XTENSA_INITIALIZE_MMU_H
26 #include <linux/init.h>
27 #include <linux/pgtable.h>
28 #include <asm/vectors.h>
30 #if XCHAL_HAVE_PTP_MMU
31 #define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
32 #define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
34 #define CA_WRITEBACK (0x4)
39 #define XTENSA_HWVERSION_RC_2009_0 230000
43 #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
45 * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
46 * For details see Documentation/xtensa/atomctl.rst
48 #if XCHAL_DCACHE_IS_COHERENT
49 movi a3
, 0x25 /* For SMP/MX -- internal for writeback,
53 movi a3
, 0x29 /* non-MX -- Most cores use Std Memory
54 * Controlers which usually can't use RCW
58 #endif /* XCHAL_HAVE_S32C1I &&
59 * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
62 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
67 #if !XCHAL_HAVE_VECBASE
68 # error "MMU v3 requires reloc vectors"
76 1: movi a2
, 0x10000000
78 #if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
79 #define TEMP_MAPPING_VADDR 0x40000000
81 #define TEMP_MAPPING_VADDR 0x00000000
84 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
86 movi a2
, TEMP_MAPPING_VADDR
| XCHAL_SPANNING_WAY
91 /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
92 * and jump to the new mapping.
97 addi a3
, a3
, CA_BYPASS
98 addi a7
, a2
, 5 - XCHAL_SPANNING_WAY
105 addi a5
, a2
, -XCHAL_SPANNING_WAY
109 /* Step 3: unmap everything other than current area.
110 * Start at 0x60000000, wrap around, and end with 0x20000000
112 2: movi a4
, 0x20000000
119 /* Step 4: Setup MMU with the requested static mappings. */
126 movi a5
, XCHAL_KSEG_CACHED_VADDR
+ XCHAL_KSEG_TLB_WAY
127 movi a4
, XCHAL_KSEG_PADDR
+ CA_WRITEBACK
131 movi a5
, XCHAL_KSEG_BYPASS_VADDR
+ XCHAL_KSEG_TLB_WAY
132 movi a4
, XCHAL_KSEG_PADDR
+ CA_BYPASS
136 #ifdef CONFIG_XTENSA_KSEG_512M
137 movi a5
, XCHAL_KSEG_CACHED_VADDR
+ 0x10000000 + XCHAL_KSEG_TLB_WAY
138 movi a4
, XCHAL_KSEG_PADDR
+ 0x10000000 + CA_WRITEBACK
142 movi a5
, XCHAL_KSEG_BYPASS_VADDR
+ 0x10000000 + XCHAL_KSEG_TLB_WAY
143 movi a4
, XCHAL_KSEG_PADDR
+ 0x10000000 + CA_BYPASS
148 movi a5
, XCHAL_KIO_CACHED_VADDR
+ XCHAL_KIO_TLB_WAY
149 movi a4
, XCHAL_KIO_DEFAULT_PADDR
+ CA_WRITEBACK
153 movi a5
, XCHAL_KIO_BYPASS_VADDR
+ XCHAL_KIO_TLB_WAY
154 movi a4
, XCHAL_KIO_DEFAULT_PADDR
+ CA_BYPASS
160 /* Jump to self, using final mappings. */
165 /* Step 5: remove temporary mapping. */
174 #endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
175 XCHAL_HAVE_SPANNING_WAY */
179 .macro initialize_cacheattr
181 #if !defined(CONFIG_MMU) && (XCHAL_HAVE_TLBS || XCHAL_HAVE_MPU)
182 #if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
183 #error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
190 .long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
191 .long 0x006600, 0x000000, 0x000000, 0x000000
192 .long 0x000000, 0x000000, 0x000000, 0x000000
193 .long 0x000000, 0x000000, 0x000000, 0x000000
196 movi a3
, .Lattribute_table
197 movi a4
, CONFIG_MEMMAP_CACHEATTR
199 movi a6
, XCHAL_MPU_ENTRIES
217 movi a5
, XCHAL_SPANNING_WAY
218 movi a6
, ~_PAGE_ATTRIB_MASK
219 movi a4
, CONFIG_MEMMAP_CACHEATTR
243 #endif /*__ASSEMBLY__*/
245 #endif /* _XTENSA_INITIALIZE_MMU_H */