sched: cleanup, use NSEC_PER_MSEC and NSEC_PER_SEC
[pv_ops_mirror.git] / include / asm-ia64 / cache.h
blobe7482bd628ff21ab21d98277d1c2d79f14cfea38
1 #ifndef _ASM_IA64_CACHE_H
2 #define _ASM_IA64_CACHE_H
5 /*
6 * Copyright (C) 1998-2000 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
10 /* Bytes per L1 (data) cache line. */
11 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
12 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
14 #ifdef CONFIG_SMP
15 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
16 # define SMP_CACHE_BYTES L1_CACHE_BYTES
17 #else
19 * The "aligned" directive can only _increase_ alignment, so this is
20 * safe and provides an easy way to avoid wasting space on a
21 * uni-processor:
23 # define SMP_CACHE_SHIFT 3
24 # define SMP_CACHE_BYTES (1 << 3)
25 #endif
27 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
29 #endif /* _ASM_IA64_CACHE_H */