2 * Blackfin CPLB exception handling for when MPU in on
4 * Copyright 2008-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
9 #include <linux/module.h>
12 #include <asm/blackfin.h>
13 #include <asm/cacheflush.h>
15 #include <asm/cplbinit.h>
16 #include <asm/mmu_context.h>
21 * This file is compiled with certain -ffixed-reg options. We have to
22 * make sure not to call any functions here that could clobber these
28 unsigned long *current_rwx_mask
[NR_CPUS
];
30 int nr_dcplb_miss
[NR_CPUS
], nr_icplb_miss
[NR_CPUS
];
31 int nr_icplb_supv_miss
[NR_CPUS
], nr_dcplb_prot
[NR_CPUS
];
32 int nr_cplb_flush
[NR_CPUS
];
35 * Given the contents of the status register, return the index of the
36 * CPLB that caused the fault.
38 static inline int faulting_cplb_index(int status
)
40 int signbits
= __builtin_bfin_norm_fr1x32(status
& 0xFFFF);
45 * Given the contents of the status register and the DCPLB_DATA contents,
46 * return true if a write access should be permitted.
48 static inline int write_permitted(int status
, unsigned long data
)
50 if (status
& FAULT_USERSUPV
)
51 return !!(data
& CPLB_SUPV_WR
);
53 return !!(data
& CPLB_USER_WR
);
56 /* Counters to implement round-robin replacement. */
57 static int icplb_rr_index
[NR_CPUS
], dcplb_rr_index
[NR_CPUS
];
60 * Find an ICPLB entry to be evicted and return its index.
62 static int evict_one_icplb(unsigned int cpu
)
65 for (i
= first_switched_icplb
; i
< MAX_CPLBS
; i
++)
66 if ((icplb_tbl
[cpu
][i
].data
& CPLB_VALID
) == 0)
68 i
= first_switched_icplb
+ icplb_rr_index
[cpu
];
70 i
-= MAX_CPLBS
- first_switched_icplb
;
71 icplb_rr_index
[cpu
] -= MAX_CPLBS
- first_switched_icplb
;
73 icplb_rr_index
[cpu
]++;
77 static int evict_one_dcplb(unsigned int cpu
)
80 for (i
= first_switched_dcplb
; i
< MAX_CPLBS
; i
++)
81 if ((dcplb_tbl
[cpu
][i
].data
& CPLB_VALID
) == 0)
83 i
= first_switched_dcplb
+ dcplb_rr_index
[cpu
];
85 i
-= MAX_CPLBS
- first_switched_dcplb
;
86 dcplb_rr_index
[cpu
] -= MAX_CPLBS
- first_switched_dcplb
;
88 dcplb_rr_index
[cpu
]++;
92 static noinline
int dcplb_miss(unsigned int cpu
)
94 unsigned long addr
= bfin_read_DCPLB_FAULT_ADDR();
95 int status
= bfin_read_DCPLB_STATUS();
100 nr_dcplb_miss
[cpu
]++;
102 d_data
= CPLB_SUPV_WR
| CPLB_VALID
| CPLB_DIRTY
| PAGE_SIZE_4KB
;
103 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
104 if (bfin_addr_dcacheable(addr
)) {
105 d_data
|= CPLB_L1_CHBL
| ANOMALY_05000158_WORKAROUND
;
106 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
107 d_data
|= CPLB_L1_AOW
| CPLB_WT
;
112 if (L2_LENGTH
&& addr
>= L2_START
&& addr
< L2_START
+ L2_LENGTH
) {
115 } else if (addr
>= physical_mem_end
) {
116 if (addr
>= ASYNC_BANK0_BASE
&& addr
< ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
117 && (status
& FAULT_USERSUPV
)) {
119 d_data
&= ~PAGE_SIZE_4KB
;
120 d_data
|= PAGE_SIZE_4MB
;
121 } else if (addr
>= BOOT_ROM_START
&& addr
< BOOT_ROM_START
+ BOOT_ROM_LENGTH
122 && (status
& (FAULT_RW
| FAULT_USERSUPV
)) == FAULT_USERSUPV
) {
123 addr
&= ~(1 * 1024 * 1024 - 1);
124 d_data
&= ~PAGE_SIZE_4KB
;
125 d_data
|= PAGE_SIZE_1MB
;
127 return CPLB_PROT_VIOL
;
128 } else if (addr
>= _ramend
) {
129 d_data
|= CPLB_USER_RD
| CPLB_USER_WR
;
131 mask
= current_rwx_mask
[cpu
];
133 int page
= addr
>> PAGE_SHIFT
;
135 int bit
= 1 << (page
& 31);
138 d_data
|= CPLB_USER_RD
;
140 mask
+= page_mask_nelts
;
142 d_data
|= CPLB_USER_WR
;
145 idx
= evict_one_dcplb(cpu
);
148 dcplb_tbl
[cpu
][idx
].addr
= addr
;
149 dcplb_tbl
[cpu
][idx
].data
= d_data
;
152 bfin_write32(DCPLB_DATA0
+ idx
* 4, d_data
);
153 bfin_write32(DCPLB_ADDR0
+ idx
* 4, addr
);
159 static noinline
int icplb_miss(unsigned int cpu
)
161 unsigned long addr
= bfin_read_ICPLB_FAULT_ADDR();
162 int status
= bfin_read_ICPLB_STATUS();
164 unsigned long i_data
;
166 nr_icplb_miss
[cpu
]++;
168 /* If inside the uncached DMA region, fault. */
169 if (addr
>= _ramend
- DMA_UNCACHED_REGION
&& addr
< _ramend
)
170 return CPLB_PROT_VIOL
;
172 if (status
& FAULT_USERSUPV
)
173 nr_icplb_supv_miss
[cpu
]++;
176 * First, try to find a CPLB that matches this address. If we
177 * find one, then the fact that we're in the miss handler means
178 * that the instruction crosses a page boundary.
180 for (idx
= first_switched_icplb
; idx
< MAX_CPLBS
; idx
++) {
181 if (icplb_tbl
[cpu
][idx
].data
& CPLB_VALID
) {
182 unsigned long this_addr
= icplb_tbl
[cpu
][idx
].addr
;
183 if (this_addr
<= addr
&& this_addr
+ PAGE_SIZE
> addr
) {
190 i_data
= CPLB_VALID
| CPLB_PORTPRIO
| PAGE_SIZE_4KB
;
192 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
194 * Normal RAM, and possibly the reserved memory area, are
197 if (addr
< _ramend
||
198 (addr
< physical_mem_end
&& reserved_mem_icache_on
))
199 i_data
|= CPLB_L1_CHBL
| ANOMALY_05000158_WORKAROUND
;
202 if (L2_LENGTH
&& addr
>= L2_START
&& addr
< L2_START
+ L2_LENGTH
) {
205 } else if (addr
>= physical_mem_end
) {
206 if (addr
>= BOOT_ROM_START
&& addr
< BOOT_ROM_START
+ BOOT_ROM_LENGTH
207 && (status
& FAULT_USERSUPV
)) {
208 addr
&= ~(1 * 1024 * 1024 - 1);
209 i_data
&= ~PAGE_SIZE_4KB
;
210 i_data
|= PAGE_SIZE_1MB
;
212 return CPLB_PROT_VIOL
;
213 } else if (addr
>= _ramend
) {
214 i_data
|= CPLB_USER_RD
;
217 * Two cases to distinguish - a supervisor access must
218 * necessarily be for a module page; we grant it
219 * unconditionally (could do better here in the future).
220 * Otherwise, check the x bitmap of the current process.
222 if (!(status
& FAULT_USERSUPV
)) {
223 unsigned long *mask
= current_rwx_mask
[cpu
];
226 int page
= addr
>> PAGE_SHIFT
;
228 int bit
= 1 << (page
& 31);
230 mask
+= 2 * page_mask_nelts
;
232 i_data
|= CPLB_USER_RD
;
236 idx
= evict_one_icplb(cpu
);
238 icplb_tbl
[cpu
][idx
].addr
= addr
;
239 icplb_tbl
[cpu
][idx
].data
= i_data
;
242 bfin_write32(ICPLB_DATA0
+ idx
* 4, i_data
);
243 bfin_write32(ICPLB_ADDR0
+ idx
* 4, addr
);
249 static noinline
int dcplb_protection_fault(unsigned int cpu
)
251 int status
= bfin_read_DCPLB_STATUS();
253 nr_dcplb_prot
[cpu
]++;
255 if (status
& FAULT_RW
) {
256 int idx
= faulting_cplb_index(status
);
257 unsigned long data
= dcplb_tbl
[cpu
][idx
].data
;
258 if (!(data
& CPLB_WT
) && !(data
& CPLB_DIRTY
) &&
259 write_permitted(status
, data
)) {
261 dcplb_tbl
[cpu
][idx
].data
= data
;
262 bfin_write32(DCPLB_DATA0
+ idx
* 4, data
);
266 return CPLB_PROT_VIOL
;
269 int cplb_hdr(int seqstat
, struct pt_regs
*regs
)
271 int cause
= seqstat
& 0x3f;
272 unsigned int cpu
= raw_smp_processor_id();
275 return dcplb_protection_fault(cpu
);
277 return icplb_miss(cpu
);
279 return dcplb_miss(cpu
);
285 void flush_switched_cplbs(unsigned int cpu
)
290 nr_cplb_flush
[cpu
]++;
292 local_irq_save_hw(flags
);
294 for (i
= first_switched_icplb
; i
< MAX_CPLBS
; i
++) {
295 icplb_tbl
[cpu
][i
].data
= 0;
296 bfin_write32(ICPLB_DATA0
+ i
* 4, 0);
301 for (i
= first_switched_dcplb
; i
< MAX_CPLBS
; i
++) {
302 dcplb_tbl
[cpu
][i
].data
= 0;
303 bfin_write32(DCPLB_DATA0
+ i
* 4, 0);
306 local_irq_restore_hw(flags
);
310 void set_mask_dcplbs(unsigned long *masks
, unsigned int cpu
)
313 unsigned long addr
= (unsigned long)masks
;
314 unsigned long d_data
;
318 current_rwx_mask
[cpu
] = masks
;
322 local_irq_save_hw(flags
);
323 current_rwx_mask
[cpu
] = masks
;
325 if (L2_LENGTH
&& addr
>= L2_START
&& addr
< L2_START
+ L2_LENGTH
) {
329 d_data
= CPLB_SUPV_WR
| CPLB_VALID
| CPLB_DIRTY
| PAGE_SIZE_4KB
;
330 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
331 d_data
|= CPLB_L1_CHBL
;
332 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
333 d_data
|= CPLB_L1_AOW
| CPLB_WT
;
339 for (i
= first_mask_dcplb
; i
< first_switched_dcplb
; i
++) {
340 dcplb_tbl
[cpu
][i
].addr
= addr
;
341 dcplb_tbl
[cpu
][i
].data
= d_data
;
342 bfin_write32(DCPLB_DATA0
+ i
* 4, d_data
);
343 bfin_write32(DCPLB_ADDR0
+ i
* 4, addr
);
347 local_irq_restore_hw(flags
);