1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/errno.h>
6 #include <asm/memory.h>
7 #include <asm/ptrace.h>
8 #include <asm/cacheflush.h>
11 extern struct cpu_cache_fns blk_cache_fns
;
16 * blk_flush_kern_dcache_page(kaddr)
18 * Ensure that the data held in the page kaddr is written back
19 * to the page in question.
21 * - kaddr - kernel address (guaranteed to be page aligned)
23 static void __attribute__((naked
))
24 blk_flush_kern_dcache_page(void *kaddr
)
28 1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
30 mcr p15, 0, r0, c7, c5, 0 \n\
31 mcr p15, 0, r0, c7, c10, 4 \n\
38 * blk_dma_inv_range(start,end)
40 * Invalidate the data cache within the specified region; we will
41 * be performing a DMA operation in this region and we want to
42 * purge old data in the cache.
44 * - start - virtual start address of region
45 * - end - virtual end address of region
47 static void __attribute__((naked
))
48 blk_dma_inv_range_unified(unsigned long start
, unsigned long end
)
52 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
54 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
55 .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
57 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
60 : "I" (L1_CACHE_BYTES
- 1));
63 static void __attribute__((naked
))
64 blk_dma_inv_range_harvard(unsigned long start
, unsigned long end
)
68 mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
70 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
71 .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
73 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
76 : "I" (L1_CACHE_BYTES
- 1));
80 * blk_dma_clean_range(start,end)
81 * - start - virtual start address of region
82 * - end - virtual end address of region
84 static void __attribute__((naked
))
85 blk_dma_clean_range(unsigned long start
, unsigned long end
)
88 ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
90 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
95 * blk_dma_flush_range(start,end)
96 * - start - virtual start address of region
97 * - end - virtual end address of region
99 static void __attribute__((naked
))
100 blk_dma_flush_range(unsigned long start
, unsigned long end
)
103 ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
107 static int blockops_trap(struct pt_regs
*regs
, unsigned int instr
)
109 regs
->ARM_r4
|= regs
->ARM_r2
;
114 static char *func
[] = {
115 "Prefetch data range",
116 "Clean+Invalidate data range",
118 "Invalidate data range",
119 "Invalidate instr range"
122 static struct undef_hook blockops_hook __initdata
= {
123 .instr_mask
= 0x0fffffd0,
124 .instr_val
= 0x0c401f00,
125 .cpsr_mask
= PSR_T_BIT
,
130 static int __init
blockops_check(void)
132 register unsigned int err
asm("r4") = 0;
133 unsigned int err_pos
= 1;
134 unsigned int cache_type
;
137 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_type
));
139 printk("Checking V6 block cache operations:\n");
140 register_undef_hook(&blockops_hook
);
142 __asm__ ("mov r0, %0\n\t"
145 ".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
147 ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
149 ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
151 ".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
153 ".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
155 : "r" (PAGE_OFFSET
), "r" (PAGE_OFFSET
+ 128)
158 unregister_undef_hook(&blockops_hook
);
160 for (i
= 0; i
< ARRAY_SIZE(func
); i
++, err_pos
<<= 1)
161 printk("%30s: %ssupported\n", func
[i
], err
& err_pos
? "not " : "");
163 if ((err
& 8) == 0) {
164 printk(" --> Using %s block cache invalidate\n",
165 cache_type
& (1 << 24) ? "harvard" : "unified");
166 if (cache_type
& (1 << 24))
167 cpu_cache
.dma_inv_range
= blk_dma_inv_range_harvard
;
169 cpu_cache
.dma_inv_range
= blk_dma_inv_range_unified
;
171 if ((err
& 4) == 0) {
172 printk(" --> Using block cache clean\n");
173 cpu_cache
.dma_clean_range
= blk_dma_clean_range
;
175 if ((err
& 2) == 0) {
176 printk(" --> Using block cache clean+invalidate\n");
177 cpu_cache
.dma_flush_range
= blk_dma_flush_range
;
178 cpu_cache
.flush_kern_dcache_page
= blk_flush_kern_dcache_page
;
184 __initcall(blockops_check
);