spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / m68k / mm / cache.c
blob3d84c1f2ffb2ef8765b5adae4bd8d334b8c78b6d
1 /*
2 * linux/arch/m68k/mm/cache.c
4 * Instruction cache handling
6 * Copyright (C) 1995 Hamish Macdonald
7 */
9 #include <linux/module.h>
10 #include <asm/pgalloc.h>
11 #include <asm/traps.h>
14 static unsigned long virt_to_phys_slow(unsigned long vaddr)
16 if (CPU_IS_060) {
17 unsigned long paddr;
19 /* The PLPAR instruction causes an access error if the translation
20 * is not possible. To catch this we use the same exception mechanism
21 * as for user space accesses in <asm/uaccess.h>. */
22 asm volatile (".chip 68060\n"
23 "1: plpar (%0)\n"
24 ".chip 68k\n"
25 "2:\n"
26 ".section .fixup,\"ax\"\n"
27 " .even\n"
28 "3: sub.l %0,%0\n"
29 " jra 2b\n"
30 ".previous\n"
31 ".section __ex_table,\"a\"\n"
32 " .align 4\n"
33 " .long 1b,3b\n"
34 ".previous"
35 : "=a" (paddr)
36 : "0" (vaddr));
37 return paddr;
38 } else if (CPU_IS_040) {
39 unsigned long mmusr;
41 asm volatile (".chip 68040\n\t"
42 "ptestr (%1)\n\t"
43 "movec %%mmusr, %0\n\t"
44 ".chip 68k"
45 : "=r" (mmusr)
46 : "a" (vaddr));
48 if (mmusr & MMU_R_040)
49 return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
50 } else {
51 unsigned short mmusr;
52 unsigned long *descaddr;
54 asm volatile ("ptestr %3,%2@,#7,%0\n\t"
55 "pmove %%psr,%1"
56 : "=a&" (descaddr), "=m" (mmusr)
57 : "a" (vaddr), "d" (get_fs().seg));
58 if (mmusr & (MMU_I|MMU_B|MMU_L))
59 return 0;
60 descaddr = phys_to_virt((unsigned long)descaddr);
61 switch (mmusr & MMU_NUM) {
62 case 1:
63 return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
64 case 2:
65 return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
66 case 3:
67 return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
70 return 0;
73 /* Push n pages at kernel virtual address and clear the icache */
74 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
75 void flush_icache_range(unsigned long address, unsigned long endaddr)
77 if (CPU_IS_COLDFIRE) {
78 unsigned long start, end;
79 start = address & ICACHE_SET_MASK;
80 end = endaddr & ICACHE_SET_MASK;
81 if (start > end) {
82 flush_cf_icache(0, end);
83 end = ICACHE_MAX_ADDR;
85 flush_cf_icache(start, end);
86 } else if (CPU_IS_040_OR_060) {
87 address &= PAGE_MASK;
89 do {
90 asm volatile ("nop\n\t"
91 ".chip 68040\n\t"
92 "cpushp %%bc,(%0)\n\t"
93 ".chip 68k"
94 : : "a" (virt_to_phys_slow(address)));
95 address += PAGE_SIZE;
96 } while (address < endaddr);
97 } else {
98 unsigned long tmp;
99 asm volatile ("movec %%cacr,%0\n\t"
100 "orw %1,%0\n\t"
101 "movec %0,%%cacr"
102 : "=&d" (tmp)
103 : "di" (FLUSH_I));
106 EXPORT_SYMBOL(flush_icache_range);
108 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
109 unsigned long addr, int len)
111 if (CPU_IS_COLDFIRE) {
112 unsigned long start, end;
113 start = addr & ICACHE_SET_MASK;
114 end = (addr + len) & ICACHE_SET_MASK;
115 if (start > end) {
116 flush_cf_icache(0, end);
117 end = ICACHE_MAX_ADDR;
119 flush_cf_icache(start, end);
121 } else if (CPU_IS_040_OR_060) {
122 asm volatile ("nop\n\t"
123 ".chip 68040\n\t"
124 "cpushp %%bc,(%0)\n\t"
125 ".chip 68k"
126 : : "a" (page_to_phys(page)));
127 } else {
128 unsigned long tmp;
129 asm volatile ("movec %%cacr,%0\n\t"
130 "orw %1,%0\n\t"
131 "movec %0,%%cacr"
132 : "=&d" (tmp)
133 : "di" (FLUSH_I));