2 * arch/score/mm/cache.c
4 * Score Processor version.
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include <linux/init.h>
27 #include <linux/linkage.h>
28 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/sched.h>
33 #include <asm/mmu_context.h>
36 Just flush entire Dcache!!
37 You must ensure the page doesn't include instructions, because
38 the function will not flush the Icache.
39 The addr must be cache aligned.
41 static void flush_data_cache_page(unsigned long addr
)
44 for (i
= 0; i
< (PAGE_SIZE
/ L1_CACHE_BYTES
); i
+= L1_CACHE_BYTES
) {
46 "cache 0x0e, [%0, 0]\n"
47 "cache 0x1a, [%0, 0]\n"
50 addr
+= L1_CACHE_BYTES
;
54 /* called by update_mmu_cache. */
55 void __update_cache(struct vm_area_struct
*vma
, unsigned long address
,
59 unsigned long pfn
, addr
;
60 int exec
= (vma
->vm_flags
& VM_EXEC
);
63 if (unlikely(!pfn_valid(pfn
)))
65 page
= pfn_to_page(pfn
);
66 if (page_mapping(page
) && test_bit(PG_arch_1
, &page
->flags
)) {
67 addr
= (unsigned long) page_address(page
);
69 flush_data_cache_page(addr
);
70 clear_bit(PG_arch_1
, &page
->flags
);
74 static inline void setup_protection_map(void)
76 protection_map
[0] = PAGE_NONE
;
77 protection_map
[1] = PAGE_READONLY
;
78 protection_map
[2] = PAGE_COPY
;
79 protection_map
[3] = PAGE_COPY
;
80 protection_map
[4] = PAGE_READONLY
;
81 protection_map
[5] = PAGE_READONLY
;
82 protection_map
[6] = PAGE_COPY
;
83 protection_map
[7] = PAGE_COPY
;
84 protection_map
[8] = PAGE_NONE
;
85 protection_map
[9] = PAGE_READONLY
;
86 protection_map
[10] = PAGE_SHARED
;
87 protection_map
[11] = PAGE_SHARED
;
88 protection_map
[12] = PAGE_READONLY
;
89 protection_map
[13] = PAGE_READONLY
;
90 protection_map
[14] = PAGE_SHARED
;
91 protection_map
[15] = PAGE_SHARED
;
94 void __devinit
cpu_cache_init(void)
96 setup_protection_map();
99 void flush_icache_all(void)
101 __asm__
__volatile__(
102 "la r8, flush_icache_all\n"
103 "cache 0x10, [r8, 0]\n"
104 "nop\nnop\nnop\nnop\nnop\nnop\n"
108 void flush_dcache_all(void)
110 __asm__
__volatile__(
111 "la r8, flush_dcache_all\n"
112 "cache 0x1f, [r8, 0]\n"
113 "nop\nnop\nnop\nnop\nnop\nnop\n"
114 "cache 0x1a, [r8, 0]\n"
115 "nop\nnop\nnop\nnop\nnop\nnop\n"
119 void flush_cache_all(void)
121 __asm__
__volatile__(
122 "la r8, flush_cache_all\n"
123 "cache 0x10, [r8, 0]\n"
124 "nop\nnop\nnop\nnop\nnop\nnop\n"
125 "cache 0x1f, [r8, 0]\n"
126 "nop\nnop\nnop\nnop\nnop\nnop\n"
127 "cache 0x1a, [r8, 0]\n"
128 "nop\nnop\nnop\nnop\nnop\nnop\n"
132 void flush_cache_mm(struct mm_struct
*mm
)
139 /*if we flush a range precisely , the processing may be very long.
140 We must check each page in the range whether present. If the page is present,
141 we can flush the range in the page. Be careful, the range may be cross two
142 page, a page is present and another is not present.
145 The interface is provided in hopes that the port can find
146 a suitably efficient method for removing multiple page
147 sized regions from the cache.
149 void flush_cache_range(struct vm_area_struct
*vma
,
150 unsigned long start
, unsigned long end
)
152 struct mm_struct
*mm
= vma
->vm_mm
;
153 int exec
= vma
->vm_flags
& VM_EXEC
;
162 pgdp
= pgd_offset(mm
, start
);
163 pudp
= pud_offset(pgdp
, start
);
164 pmdp
= pmd_offset(pudp
, start
);
165 ptep
= pte_offset(pmdp
, start
);
167 while (start
<= end
) {
168 unsigned long tmpend
;
169 pgdp
= pgd_offset(mm
, start
);
170 pudp
= pud_offset(pgdp
, start
);
171 pmdp
= pmd_offset(pudp
, start
);
172 ptep
= pte_offset(pmdp
, start
);
174 if (!(pte_val(*ptep
) & _PAGE_PRESENT
)) {
175 start
= (start
+ PAGE_SIZE
) & ~(PAGE_SIZE
- 1);
178 tmpend
= (start
| (PAGE_SIZE
-1)) > end
?
179 end
: (start
| (PAGE_SIZE
-1));
181 flush_dcache_range(start
, tmpend
);
183 flush_icache_range(start
, tmpend
);
184 start
= (start
+ PAGE_SIZE
) & ~(PAGE_SIZE
- 1);
188 void flush_cache_page(struct vm_area_struct
*vma
,
189 unsigned long addr
, unsigned long pfn
)
191 int exec
= vma
->vm_flags
& VM_EXEC
;
192 unsigned long kaddr
= 0xa0000000 | (pfn
<< PAGE_SHIFT
);
194 flush_dcache_range(kaddr
, kaddr
+ PAGE_SIZE
);
197 flush_icache_range(kaddr
, kaddr
+ PAGE_SIZE
);
200 void flush_cache_sigtramp(unsigned long addr
)
202 __asm__
__volatile__(
203 "cache 0x02, [%0, 0]\n"
204 "nop\nnop\nnop\nnop\nnop\n"
205 "cache 0x02, [%0, 0x4]\n"
206 "nop\nnop\nnop\nnop\nnop\n"
208 "cache 0x0d, [%0, 0]\n"
209 "nop\nnop\nnop\nnop\nnop\n"
210 "cache 0x0d, [%0, 0x4]\n"
211 "nop\nnop\nnop\nnop\nnop\n"
213 "cache 0x1a, [%0, 0]\n"
214 "nop\nnop\nnop\nnop\nnop\n"
219 1. WB and invalid a cache line of Dcache
220 2. Drain Write Buffer
221 the range must be smaller than PAGE_SIZE
223 void flush_dcache_range(unsigned long start
, unsigned long end
)
227 start
= start
& ~(L1_CACHE_BYTES
- 1);
228 end
= end
& ~(L1_CACHE_BYTES
- 1);
230 /* flush dcache to ram, and invalidate dcache lines. */
231 for (i
= 0; i
< size
; i
+= L1_CACHE_BYTES
) {
232 __asm__
__volatile__(
233 "cache 0x0e, [%0, 0]\n"
234 "nop\nnop\nnop\nnop\nnop\n"
235 "cache 0x1a, [%0, 0]\n"
236 "nop\nnop\nnop\nnop\nnop\n"
238 start
+= L1_CACHE_BYTES
;
242 void flush_icache_range(unsigned long start
, unsigned long end
)
245 start
= start
& ~(L1_CACHE_BYTES
- 1);
246 end
= end
& ~(L1_CACHE_BYTES
- 1);
249 /* invalidate icache lines. */
250 for (i
= 0; i
< size
; i
+= L1_CACHE_BYTES
) {
251 __asm__
__volatile__(
252 "cache 0x02, [%0, 0]\n"
253 "nop\nnop\nnop\nnop\nnop\n"
255 start
+= L1_CACHE_BYTES
;