2 * linux/arch/arm/mm/cache-v0.S
4 * Copyright (C) 2004 Tobias Lorenz
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/linkage.h>
11 #include <linux/init.h>
12 #include <asm/hardware.h>
14 #include "proc-macros.S"
17 * flush_user_cache_all()
19 * Invalidate all cache entries in a particular address
22 * - mm - mm_struct describing address space
24 ENTRY(v0_flush_user_cache_all)
27 * flush_kern_cache_all()
29 * Clean and invalidate the entire cache.
31 ENTRY(v0_flush_kern_cache_all)
35 * flush_user_cache_range(start, end, flags)
37 * Invalidate a range of cache entries in the specified
40 * - start - start address (may not be aligned)
41 * - end - end address (exclusive, may not be aligned)
42 * - flags - vma_area_struct flags describing address space
44 ENTRY(v0_flush_user_cache_range)
48 * coherent_kern_range(start, end)
50 * Ensure coherency between the Icache and the Dcache in the
51 * region described by start. If you have non-snooping
52 * Harvard caches, you need to implement this function.
54 * - start - virtual start address
55 * - end - virtual end address
57 ENTRY(v0_coherent_kern_range)
61 * coherent_user_range(start, end)
63 * Ensure coherency between the Icache and the Dcache in the
64 * region described by start. If you have non-snooping
65 * Harvard caches, you need to implement this function.
67 * - start - virtual start address
68 * - end - virtual end address
70 ENTRY(v0_coherent_user_range)
74 * flush_kern_dcache_page(void *page)
76 * Ensure no D cache aliasing occurs, either with itself or
79 * - addr - page aligned address
81 ENTRY(v0_flush_kern_dcache_page)
85 * dma_inv_range(start, end)
87 * Invalidate (discard) the specified virtual address range.
88 * May not write back any entries. If 'start' or 'end'
89 * are not cache line aligned, those lines must be written
92 * - start - virtual start address
93 * - end - virtual end address
95 ENTRY(v0_dma_inv_range)
99 * dma_flush_range(start, end)
101 * Clean and invalidate the specified virtual address range.
103 * - start - virtual start address
104 * - end - virtual end address
106 ENTRY(v0_dma_flush_range)
110 * dma_clean_range(start, end)
112 * Clean (write back) the specified virtual address range.
114 * - start - virtual start address
115 * - end - virtual end address
117 ENTRY(v0_dma_clean_range)
122 .type v0_cache_fns, #object
124 .long v0_flush_kern_cache_all
125 .long v0_flush_user_cache_all
126 .long v0_flush_user_cache_range
127 .long v0_coherent_kern_range
128 .long v0_flush_kern_dcache_page
129 .long v0_dma_inv_range
130 .long v0_dma_clean_range
131 .long v0_dma_flush_range
132 .size v0_cache_fns, . - v0_cache_fns