fed up with those stupid warnings
[mmotm.git] / arch / blackfin / kernel / cplb-mpu / cplbmgr.c
blob69e0e530d70ff2940c468c5bf34e62ddd0cac76b
1 /*
2 * Blackfin CPLB exception handling for when MPU in on
4 * Copyright 2008-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
7 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
12 #include <asm/blackfin.h>
13 #include <asm/cacheflush.h>
14 #include <asm/cplb.h>
15 #include <asm/cplbinit.h>
16 #include <asm/mmu_context.h>
19 * WARNING
21 * This file is compiled with certain -ffixed-reg options. We have to
22 * make sure not to call any functions here that could clobber these
23 * registers.
26 int page_mask_nelts;
27 int page_mask_order;
28 unsigned long *current_rwx_mask[NR_CPUS];
30 int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
31 int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
32 int nr_cplb_flush[NR_CPUS];
35 * Given the contents of the status register, return the index of the
36 * CPLB that caused the fault.
38 static inline int faulting_cplb_index(int status)
40 int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
41 return 30 - signbits;
45 * Given the contents of the status register and the DCPLB_DATA contents,
46 * return true if a write access should be permitted.
48 static inline int write_permitted(int status, unsigned long data)
50 if (status & FAULT_USERSUPV)
51 return !!(data & CPLB_SUPV_WR);
52 else
53 return !!(data & CPLB_USER_WR);
56 /* Counters to implement round-robin replacement. */
57 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
60 * Find an ICPLB entry to be evicted and return its index.
62 static int evict_one_icplb(unsigned int cpu)
64 int i;
65 for (i = first_switched_icplb; i < MAX_CPLBS; i++)
66 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
67 return i;
68 i = first_switched_icplb + icplb_rr_index[cpu];
69 if (i >= MAX_CPLBS) {
70 i -= MAX_CPLBS - first_switched_icplb;
71 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
73 icplb_rr_index[cpu]++;
74 return i;
77 static int evict_one_dcplb(unsigned int cpu)
79 int i;
80 for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
81 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
82 return i;
83 i = first_switched_dcplb + dcplb_rr_index[cpu];
84 if (i >= MAX_CPLBS) {
85 i -= MAX_CPLBS - first_switched_dcplb;
86 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
88 dcplb_rr_index[cpu]++;
89 return i;
92 static noinline int dcplb_miss(unsigned int cpu)
94 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
95 int status = bfin_read_DCPLB_STATUS();
96 unsigned long *mask;
97 int idx;
98 unsigned long d_data;
100 nr_dcplb_miss[cpu]++;
102 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
103 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
104 if (bfin_addr_dcacheable(addr)) {
105 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
106 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
107 d_data |= CPLB_L1_AOW | CPLB_WT;
108 # endif
110 #endif
112 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
113 addr = L2_START;
114 d_data = L2_DMEMORY;
115 } else if (addr >= physical_mem_end) {
116 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
117 && (status & FAULT_USERSUPV)) {
118 addr &= ~0x3fffff;
119 d_data &= ~PAGE_SIZE_4KB;
120 d_data |= PAGE_SIZE_4MB;
121 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
122 && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
123 addr &= ~(1 * 1024 * 1024 - 1);
124 d_data &= ~PAGE_SIZE_4KB;
125 d_data |= PAGE_SIZE_1MB;
126 } else
127 return CPLB_PROT_VIOL;
128 } else if (addr >= _ramend) {
129 d_data |= CPLB_USER_RD | CPLB_USER_WR;
130 } else {
131 mask = current_rwx_mask[cpu];
132 if (mask) {
133 int page = addr >> PAGE_SHIFT;
134 int idx = page >> 5;
135 int bit = 1 << (page & 31);
137 if (mask[idx] & bit)
138 d_data |= CPLB_USER_RD;
140 mask += page_mask_nelts;
141 if (mask[idx] & bit)
142 d_data |= CPLB_USER_WR;
145 idx = evict_one_dcplb(cpu);
147 addr &= PAGE_MASK;
148 dcplb_tbl[cpu][idx].addr = addr;
149 dcplb_tbl[cpu][idx].data = d_data;
151 _disable_dcplb();
152 bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
153 bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
154 _enable_dcplb();
156 return 0;
159 static noinline int icplb_miss(unsigned int cpu)
161 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
162 int status = bfin_read_ICPLB_STATUS();
163 int idx;
164 unsigned long i_data;
166 nr_icplb_miss[cpu]++;
168 /* If inside the uncached DMA region, fault. */
169 if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
170 return CPLB_PROT_VIOL;
172 if (status & FAULT_USERSUPV)
173 nr_icplb_supv_miss[cpu]++;
176 * First, try to find a CPLB that matches this address. If we
177 * find one, then the fact that we're in the miss handler means
178 * that the instruction crosses a page boundary.
180 for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
181 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
182 unsigned long this_addr = icplb_tbl[cpu][idx].addr;
183 if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
184 addr += PAGE_SIZE;
185 break;
190 i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
192 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
194 * Normal RAM, and possibly the reserved memory area, are
195 * cacheable.
197 if (addr < _ramend ||
198 (addr < physical_mem_end && reserved_mem_icache_on))
199 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
200 #endif
202 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
203 addr = L2_START;
204 i_data = L2_IMEMORY;
205 } else if (addr >= physical_mem_end) {
206 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
207 && (status & FAULT_USERSUPV)) {
208 addr &= ~(1 * 1024 * 1024 - 1);
209 i_data &= ~PAGE_SIZE_4KB;
210 i_data |= PAGE_SIZE_1MB;
211 } else
212 return CPLB_PROT_VIOL;
213 } else if (addr >= _ramend) {
214 i_data |= CPLB_USER_RD;
215 } else {
217 * Two cases to distinguish - a supervisor access must
218 * necessarily be for a module page; we grant it
219 * unconditionally (could do better here in the future).
220 * Otherwise, check the x bitmap of the current process.
222 if (!(status & FAULT_USERSUPV)) {
223 unsigned long *mask = current_rwx_mask[cpu];
225 if (mask) {
226 int page = addr >> PAGE_SHIFT;
227 int idx = page >> 5;
228 int bit = 1 << (page & 31);
230 mask += 2 * page_mask_nelts;
231 if (mask[idx] & bit)
232 i_data |= CPLB_USER_RD;
236 idx = evict_one_icplb(cpu);
237 addr &= PAGE_MASK;
238 icplb_tbl[cpu][idx].addr = addr;
239 icplb_tbl[cpu][idx].data = i_data;
241 _disable_icplb();
242 bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
243 bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
244 _enable_icplb();
246 return 0;
249 static noinline int dcplb_protection_fault(unsigned int cpu)
251 int status = bfin_read_DCPLB_STATUS();
253 nr_dcplb_prot[cpu]++;
255 if (status & FAULT_RW) {
256 int idx = faulting_cplb_index(status);
257 unsigned long data = dcplb_tbl[cpu][idx].data;
258 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
259 write_permitted(status, data)) {
260 data |= CPLB_DIRTY;
261 dcplb_tbl[cpu][idx].data = data;
262 bfin_write32(DCPLB_DATA0 + idx * 4, data);
263 return 0;
266 return CPLB_PROT_VIOL;
269 int cplb_hdr(int seqstat, struct pt_regs *regs)
271 int cause = seqstat & 0x3f;
272 unsigned int cpu = raw_smp_processor_id();
273 switch (cause) {
274 case 0x23:
275 return dcplb_protection_fault(cpu);
276 case 0x2C:
277 return icplb_miss(cpu);
278 case 0x26:
279 return dcplb_miss(cpu);
280 default:
281 return 1;
285 void flush_switched_cplbs(unsigned int cpu)
287 int i;
288 unsigned long flags;
290 nr_cplb_flush[cpu]++;
292 local_irq_save_hw(flags);
293 _disable_icplb();
294 for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
295 icplb_tbl[cpu][i].data = 0;
296 bfin_write32(ICPLB_DATA0 + i * 4, 0);
298 _enable_icplb();
300 _disable_dcplb();
301 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
302 dcplb_tbl[cpu][i].data = 0;
303 bfin_write32(DCPLB_DATA0 + i * 4, 0);
305 _enable_dcplb();
306 local_irq_restore_hw(flags);
310 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
312 int i;
313 unsigned long addr = (unsigned long)masks;
314 unsigned long d_data;
315 unsigned long flags;
317 if (!masks) {
318 current_rwx_mask[cpu] = masks;
319 return;
322 local_irq_save_hw(flags);
323 current_rwx_mask[cpu] = masks;
325 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
326 addr = L2_START;
327 d_data = L2_DMEMORY;
328 } else {
329 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
330 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
331 d_data |= CPLB_L1_CHBL;
332 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
333 d_data |= CPLB_L1_AOW | CPLB_WT;
334 # endif
335 #endif
338 _disable_dcplb();
339 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
340 dcplb_tbl[cpu][i].addr = addr;
341 dcplb_tbl[cpu][i].data = d_data;
342 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
343 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
344 addr += PAGE_SIZE;
346 _enable_dcplb();
347 local_irq_restore_hw(flags);