x86: introduce native_set_pte_atomic() on 64-bit too
[wrt350n-kernel.git] / include / asm-powerpc / cache.h
blob53507046a1b101adcbda66dad89fc715ad9d0908
1 #ifndef _ASM_POWERPC_CACHE_H
2 #define _ASM_POWERPC_CACHE_H
4 #ifdef __KERNEL__
7 /* bytes per L1 cache line */
8 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
9 #define L1_CACHE_SHIFT 4
10 #define MAX_COPY_PREFETCH 1
11 #elif defined(CONFIG_PPC32)
12 #define L1_CACHE_SHIFT 5
13 #define MAX_COPY_PREFETCH 4
14 #else /* CONFIG_PPC64 */
15 #define L1_CACHE_SHIFT 7
16 #endif
18 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
20 #define SMP_CACHE_BYTES L1_CACHE_BYTES
22 #if defined(__powerpc64__) && !defined(__ASSEMBLY__)
23 struct ppc64_caches {
24 u32 dsize; /* L1 d-cache size */
25 u32 dline_size; /* L1 d-cache line size */
26 u32 log_dline_size;
27 u32 dlines_per_page;
28 u32 isize; /* L1 i-cache size */
29 u32 iline_size; /* L1 i-cache line size */
30 u32 log_iline_size;
31 u32 ilines_per_page;
34 extern struct ppc64_caches ppc64_caches;
35 #endif /* __powerpc64__ && ! __ASSEMBLY__ */
37 #if !defined(__ASSEMBLY__)
38 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
39 #endif
41 #endif /* __KERNEL__ */
42 #endif /* _ASM_POWERPC_CACHE_H */