1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
9 #include <linux/cacheinfo.h>
10 #include <linux/export.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
16 #include <linux/sched.h>
17 #include <linux/syscalls.h>
19 #include <asm/bootinfo.h>
20 #include <asm/cacheflush.h>
22 #include <asm/cpu-features.h>
23 #include <asm/loongarch.h>
25 #include <asm/processor.h>
26 #include <asm/setup.h>
28 void cache_error_setup(void)
30 extern char __weak except_vec_cex
;
31 set_merr_handler(0x0, &except_vec_cex
, 0x80);
35 * LoongArch maintains ICache/DCache coherency by hardware,
36 * we just need "ibar" to avoid instruction hazard here.
38 void local_flush_icache_range(unsigned long start
, unsigned long end
)
40 asm volatile ("\tibar 0\n"::);
42 EXPORT_SYMBOL(local_flush_icache_range
);
44 static void flush_cache_leaf(unsigned int leaf
)
47 uint64_t addr
= CSR_DMW0_BASE
;
48 struct cache_desc
*cdesc
= current_cpu_data
.cache_leaves
+ leaf
;
50 nr_nodes
= cache_private(cdesc
) ? 1 : loongson_sysconf
.nr_nodes
;
53 for (i
= 0; i
< cdesc
->sets
; i
++) {
54 for (j
= 0; j
< cdesc
->ways
; j
++) {
55 flush_cache_line(leaf
, addr
);
60 addr
+= cdesc
->linesz
;
62 addr
+= (1ULL << NODE_ADDRSPACE_SHIFT
);
63 } while (--nr_nodes
> 0);
66 asmlinkage __visible
void __flush_cache_all(void)
69 struct cache_desc
*cdesc
= current_cpu_data
.cache_leaves
;
70 unsigned int cache_present
= current_cpu_data
.cache_leaves_present
;
72 leaf
= cache_present
- 1;
73 if (cache_inclusive(cdesc
+ leaf
)) {
74 flush_cache_leaf(leaf
);
78 for (leaf
= 0; leaf
< cache_present
; leaf
++)
79 flush_cache_leaf(leaf
);
82 #define L1IUPRE (1 << 0)
83 #define L1IUUNIFY (1 << 1)
84 #define L1DPRE (1 << 2)
86 #define LXIUPRE (1 << 0)
87 #define LXIUUNIFY (1 << 1)
88 #define LXIUPRIV (1 << 2)
89 #define LXIUINCL (1 << 3)
90 #define LXDPRE (1 << 4)
91 #define LXDPRIV (1 << 5)
92 #define LXDINCL (1 << 6)
94 #define populate_cache_properties(cfg0, cdesc, level, leaf) \
98 cfg1 = read_cpucfg(LOONGARCH_CPUCFG17 + leaf); \
100 cdesc->flags |= CACHE_PRIVATE; \
102 if (cfg0 & LXIUPRIV) \
103 cdesc->flags |= CACHE_PRIVATE; \
104 if (cfg0 & LXIUINCL) \
105 cdesc->flags |= CACHE_INCLUSIVE; \
107 cdesc->level = level; \
108 cdesc->flags |= CACHE_PRESENT; \
109 cdesc->ways = ((cfg1 & CPUCFG_CACHE_WAYS_M) >> CPUCFG_CACHE_WAYS) + 1; \
110 cdesc->sets = 1 << ((cfg1 & CPUCFG_CACHE_SETS_M) >> CPUCFG_CACHE_SETS); \
111 cdesc->linesz = 1 << ((cfg1 & CPUCFG_CACHE_LSIZE_M) >> CPUCFG_CACHE_LSIZE); \
115 void cpu_cache_init(void)
117 unsigned int leaf
= 0, level
= 1;
118 unsigned int config
= read_cpucfg(LOONGARCH_CPUCFG16
);
119 struct cache_desc
*cdesc
= current_cpu_data
.cache_leaves
;
121 if (config
& L1IUPRE
) {
122 if (config
& L1IUUNIFY
)
123 cdesc
->type
= CACHE_TYPE_UNIFIED
;
125 cdesc
->type
= CACHE_TYPE_INST
;
126 populate_cache_properties(config
, cdesc
, level
, leaf
);
129 if (config
& L1DPRE
) {
130 cdesc
->type
= CACHE_TYPE_DATA
;
131 populate_cache_properties(config
, cdesc
, level
, leaf
);
134 config
= config
>> 3;
135 for (level
= 2; level
<= CACHE_LEVEL_MAX
; level
++) {
139 if (config
& LXIUPRE
) {
140 if (config
& LXIUUNIFY
)
141 cdesc
->type
= CACHE_TYPE_UNIFIED
;
143 cdesc
->type
= CACHE_TYPE_INST
;
144 populate_cache_properties(config
, cdesc
, level
, leaf
);
147 if (config
& LXDPRE
) {
148 cdesc
->type
= CACHE_TYPE_DATA
;
149 populate_cache_properties(config
, cdesc
, level
, leaf
);
152 config
= config
>> 7;
155 BUG_ON(leaf
> CACHE_LEAVES_MAX
);
157 current_cpu_data
.cache_leaves_present
= leaf
;
158 current_cpu_data
.options
|= LOONGARCH_CPU_PREFETCH
;
161 static const pgprot_t protection_map
[16] = {
162 [VM_NONE
] = __pgprot(_CACHE_CC
| _PAGE_USER
|
163 _PAGE_PROTNONE
| _PAGE_NO_EXEC
|
165 [VM_READ
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
166 _PAGE_USER
| _PAGE_PRESENT
|
168 [VM_WRITE
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
169 _PAGE_USER
| _PAGE_PRESENT
|
171 [VM_WRITE
| VM_READ
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
172 _PAGE_USER
| _PAGE_PRESENT
|
174 [VM_EXEC
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
175 _PAGE_USER
| _PAGE_PRESENT
),
176 [VM_EXEC
| VM_READ
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
177 _PAGE_USER
| _PAGE_PRESENT
),
178 [VM_EXEC
| VM_WRITE
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
179 _PAGE_USER
| _PAGE_PRESENT
),
180 [VM_EXEC
| VM_WRITE
| VM_READ
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
181 _PAGE_USER
| _PAGE_PRESENT
),
182 [VM_SHARED
] = __pgprot(_CACHE_CC
| _PAGE_USER
|
183 _PAGE_PROTNONE
| _PAGE_NO_EXEC
|
185 [VM_SHARED
| VM_READ
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
186 _PAGE_USER
| _PAGE_PRESENT
|
188 [VM_SHARED
| VM_WRITE
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
189 _PAGE_USER
| _PAGE_PRESENT
|
190 _PAGE_NO_EXEC
| _PAGE_WRITE
),
191 [VM_SHARED
| VM_WRITE
| VM_READ
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
192 _PAGE_USER
| _PAGE_PRESENT
|
193 _PAGE_NO_EXEC
| _PAGE_WRITE
),
194 [VM_SHARED
| VM_EXEC
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
195 _PAGE_USER
| _PAGE_PRESENT
),
196 [VM_SHARED
| VM_EXEC
| VM_READ
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
197 _PAGE_USER
| _PAGE_PRESENT
),
198 [VM_SHARED
| VM_EXEC
| VM_WRITE
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
199 _PAGE_USER
| _PAGE_PRESENT
|
201 [VM_SHARED
| VM_EXEC
| VM_WRITE
| VM_READ
] = __pgprot(_CACHE_CC
| _PAGE_VALID
|
202 _PAGE_USER
| _PAGE_PRESENT
|
205 DECLARE_VM_GET_PAGE_PROT