2 * Blackfin CPLB exception handling for when MPU in on
4 * Copyright 2008-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
9 #include <linux/module.h>
12 #include <asm/blackfin.h>
13 #include <asm/cacheflush.h>
15 #include <asm/cplbinit.h>
16 #include <asm/mmu_context.h>
21 * This file is compiled with certain -ffixed-reg options. We have to
22 * make sure not to call any functions here that could clobber these
28 unsigned long *current_rwx_mask
[NR_CPUS
];
30 int nr_dcplb_miss
[NR_CPUS
], nr_icplb_miss
[NR_CPUS
];
31 int nr_icplb_supv_miss
[NR_CPUS
], nr_dcplb_prot
[NR_CPUS
];
32 int nr_cplb_flush
[NR_CPUS
];
34 #ifdef CONFIG_EXCPT_IRQ_SYSC_L1
35 #define MGR_ATTR __attribute__((l1_text))
41 * Given the contents of the status register, return the index of the
42 * CPLB that caused the fault.
44 static inline int faulting_cplb_index(int status
)
46 int signbits
= __builtin_bfin_norm_fr1x32(status
& 0xFFFF);
51 * Given the contents of the status register and the DCPLB_DATA contents,
52 * return true if a write access should be permitted.
54 static inline int write_permitted(int status
, unsigned long data
)
56 if (status
& FAULT_USERSUPV
)
57 return !!(data
& CPLB_SUPV_WR
);
59 return !!(data
& CPLB_USER_WR
);
62 /* Counters to implement round-robin replacement. */
63 static int icplb_rr_index
[NR_CPUS
], dcplb_rr_index
[NR_CPUS
];
66 * Find an ICPLB entry to be evicted and return its index.
68 MGR_ATTR
static int evict_one_icplb(unsigned int cpu
)
71 for (i
= first_switched_icplb
; i
< MAX_CPLBS
; i
++)
72 if ((icplb_tbl
[cpu
][i
].data
& CPLB_VALID
) == 0)
74 i
= first_switched_icplb
+ icplb_rr_index
[cpu
];
76 i
-= MAX_CPLBS
- first_switched_icplb
;
77 icplb_rr_index
[cpu
] -= MAX_CPLBS
- first_switched_icplb
;
79 icplb_rr_index
[cpu
]++;
83 MGR_ATTR
static int evict_one_dcplb(unsigned int cpu
)
86 for (i
= first_switched_dcplb
; i
< MAX_CPLBS
; i
++)
87 if ((dcplb_tbl
[cpu
][i
].data
& CPLB_VALID
) == 0)
89 i
= first_switched_dcplb
+ dcplb_rr_index
[cpu
];
91 i
-= MAX_CPLBS
- first_switched_dcplb
;
92 dcplb_rr_index
[cpu
] -= MAX_CPLBS
- first_switched_dcplb
;
94 dcplb_rr_index
[cpu
]++;
98 MGR_ATTR
static noinline
int dcplb_miss(unsigned int cpu
)
100 unsigned long addr
= bfin_read_DCPLB_FAULT_ADDR();
101 int status
= bfin_read_DCPLB_STATUS();
104 unsigned long d_data
;
106 nr_dcplb_miss
[cpu
]++;
108 d_data
= CPLB_SUPV_WR
| CPLB_VALID
| CPLB_DIRTY
| PAGE_SIZE_4KB
;
109 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
110 if (bfin_addr_dcacheable(addr
)) {
111 d_data
|= CPLB_L1_CHBL
| ANOMALY_05000158_WORKAROUND
;
112 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
113 d_data
|= CPLB_L1_AOW
| CPLB_WT
;
118 if (L2_LENGTH
&& addr
>= L2_START
&& addr
< L2_START
+ L2_LENGTH
) {
121 } else if (addr
>= physical_mem_end
) {
122 if (addr
>= ASYNC_BANK0_BASE
&& addr
< ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
) {
123 mask
= current_rwx_mask
[cpu
];
125 int page
= (addr
- (ASYNC_BANK0_BASE
- _ramend
)) >> PAGE_SHIFT
;
127 int bit
= 1 << (page
& 31);
130 d_data
|= CPLB_USER_RD
;
132 } else if (addr
>= BOOT_ROM_START
&& addr
< BOOT_ROM_START
+ BOOT_ROM_LENGTH
133 && (status
& (FAULT_RW
| FAULT_USERSUPV
)) == FAULT_USERSUPV
) {
134 addr
&= ~(1 * 1024 * 1024 - 1);
135 d_data
&= ~PAGE_SIZE_4KB
;
136 d_data
|= PAGE_SIZE_1MB
;
138 return CPLB_PROT_VIOL
;
139 } else if (addr
>= _ramend
) {
140 d_data
|= CPLB_USER_RD
| CPLB_USER_WR
;
141 if (reserved_mem_dcache_on
)
142 d_data
|= CPLB_L1_CHBL
;
144 mask
= current_rwx_mask
[cpu
];
146 int page
= addr
>> PAGE_SHIFT
;
148 int bit
= 1 << (page
& 31);
151 d_data
|= CPLB_USER_RD
;
153 mask
+= page_mask_nelts
;
155 d_data
|= CPLB_USER_WR
;
158 idx
= evict_one_dcplb(cpu
);
161 dcplb_tbl
[cpu
][idx
].addr
= addr
;
162 dcplb_tbl
[cpu
][idx
].data
= d_data
;
165 bfin_write32(DCPLB_DATA0
+ idx
* 4, d_data
);
166 bfin_write32(DCPLB_ADDR0
+ idx
* 4, addr
);
172 MGR_ATTR
static noinline
int icplb_miss(unsigned int cpu
)
174 unsigned long addr
= bfin_read_ICPLB_FAULT_ADDR();
175 int status
= bfin_read_ICPLB_STATUS();
177 unsigned long i_data
;
179 nr_icplb_miss
[cpu
]++;
181 /* If inside the uncached DMA region, fault. */
182 if (addr
>= _ramend
- DMA_UNCACHED_REGION
&& addr
< _ramend
)
183 return CPLB_PROT_VIOL
;
185 if (status
& FAULT_USERSUPV
)
186 nr_icplb_supv_miss
[cpu
]++;
189 * First, try to find a CPLB that matches this address. If we
190 * find one, then the fact that we're in the miss handler means
191 * that the instruction crosses a page boundary.
193 for (idx
= first_switched_icplb
; idx
< MAX_CPLBS
; idx
++) {
194 if (icplb_tbl
[cpu
][idx
].data
& CPLB_VALID
) {
195 unsigned long this_addr
= icplb_tbl
[cpu
][idx
].addr
;
196 if (this_addr
<= addr
&& this_addr
+ PAGE_SIZE
> addr
) {
203 i_data
= CPLB_VALID
| CPLB_PORTPRIO
| PAGE_SIZE_4KB
;
205 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
207 * Normal RAM, and possibly the reserved memory area, are
210 if (addr
< _ramend
||
211 (addr
< physical_mem_end
&& reserved_mem_icache_on
))
212 i_data
|= CPLB_L1_CHBL
| ANOMALY_05000158_WORKAROUND
;
215 if (L2_LENGTH
&& addr
>= L2_START
&& addr
< L2_START
+ L2_LENGTH
) {
218 } else if (addr
>= physical_mem_end
) {
219 if (addr
>= ASYNC_BANK0_BASE
&& addr
< ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
) {
220 if (!(status
& FAULT_USERSUPV
)) {
221 unsigned long *mask
= current_rwx_mask
[cpu
];
224 int page
= (addr
- (ASYNC_BANK0_BASE
- _ramend
)) >> PAGE_SHIFT
;
226 int bit
= 1 << (page
& 31);
228 mask
+= 2 * page_mask_nelts
;
230 i_data
|= CPLB_USER_RD
;
233 } else if (addr
>= BOOT_ROM_START
&& addr
< BOOT_ROM_START
+ BOOT_ROM_LENGTH
234 && (status
& FAULT_USERSUPV
)) {
235 addr
&= ~(1 * 1024 * 1024 - 1);
236 i_data
&= ~PAGE_SIZE_4KB
;
237 i_data
|= PAGE_SIZE_1MB
;
239 return CPLB_PROT_VIOL
;
240 } else if (addr
>= _ramend
) {
241 i_data
|= CPLB_USER_RD
;
242 if (reserved_mem_icache_on
)
243 i_data
|= CPLB_L1_CHBL
;
246 * Two cases to distinguish - a supervisor access must
247 * necessarily be for a module page; we grant it
248 * unconditionally (could do better here in the future).
249 * Otherwise, check the x bitmap of the current process.
251 if (!(status
& FAULT_USERSUPV
)) {
252 unsigned long *mask
= current_rwx_mask
[cpu
];
255 int page
= addr
>> PAGE_SHIFT
;
257 int bit
= 1 << (page
& 31);
259 mask
+= 2 * page_mask_nelts
;
261 i_data
|= CPLB_USER_RD
;
265 idx
= evict_one_icplb(cpu
);
267 icplb_tbl
[cpu
][idx
].addr
= addr
;
268 icplb_tbl
[cpu
][idx
].data
= i_data
;
271 bfin_write32(ICPLB_DATA0
+ idx
* 4, i_data
);
272 bfin_write32(ICPLB_ADDR0
+ idx
* 4, addr
);
278 MGR_ATTR
static noinline
int dcplb_protection_fault(unsigned int cpu
)
280 int status
= bfin_read_DCPLB_STATUS();
282 nr_dcplb_prot
[cpu
]++;
284 if (status
& FAULT_RW
) {
285 int idx
= faulting_cplb_index(status
);
286 unsigned long data
= dcplb_tbl
[cpu
][idx
].data
;
287 if (!(data
& CPLB_WT
) && !(data
& CPLB_DIRTY
) &&
288 write_permitted(status
, data
)) {
290 dcplb_tbl
[cpu
][idx
].data
= data
;
291 bfin_write32(DCPLB_DATA0
+ idx
* 4, data
);
295 return CPLB_PROT_VIOL
;
298 MGR_ATTR
int cplb_hdr(int seqstat
, struct pt_regs
*regs
)
300 int cause
= seqstat
& 0x3f;
301 unsigned int cpu
= raw_smp_processor_id();
304 return dcplb_protection_fault(cpu
);
306 return icplb_miss(cpu
);
308 return dcplb_miss(cpu
);
314 void flush_switched_cplbs(unsigned int cpu
)
319 nr_cplb_flush
[cpu
]++;
321 flags
= hard_local_irq_save();
323 for (i
= first_switched_icplb
; i
< MAX_CPLBS
; i
++) {
324 icplb_tbl
[cpu
][i
].data
= 0;
325 bfin_write32(ICPLB_DATA0
+ i
* 4, 0);
330 for (i
= first_switched_dcplb
; i
< MAX_CPLBS
; i
++) {
331 dcplb_tbl
[cpu
][i
].data
= 0;
332 bfin_write32(DCPLB_DATA0
+ i
* 4, 0);
335 hard_local_irq_restore(flags
);
339 void set_mask_dcplbs(unsigned long *masks
, unsigned int cpu
)
342 unsigned long addr
= (unsigned long)masks
;
343 unsigned long d_data
;
347 current_rwx_mask
[cpu
] = masks
;
351 flags
= hard_local_irq_save();
352 current_rwx_mask
[cpu
] = masks
;
354 if (L2_LENGTH
&& addr
>= L2_START
&& addr
< L2_START
+ L2_LENGTH
) {
358 d_data
= CPLB_SUPV_WR
| CPLB_VALID
| CPLB_DIRTY
| PAGE_SIZE_4KB
;
359 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
360 d_data
|= CPLB_L1_CHBL
;
361 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
362 d_data
|= CPLB_L1_AOW
| CPLB_WT
;
368 for (i
= first_mask_dcplb
; i
< first_switched_dcplb
; i
++) {
369 dcplb_tbl
[cpu
][i
].addr
= addr
;
370 dcplb_tbl
[cpu
][i
].data
= d_data
;
371 bfin_write32(DCPLB_DATA0
+ i
* 4, d_data
);
372 bfin_write32(DCPLB_ADDR0
+ i
* 4, addr
);
376 hard_local_irq_restore(flags
);