x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / tile / include / asm / percpu.h
blob4f7ae39fa2022c537770bd10e1cbb0e231249b53
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
15 #ifndef _ASM_TILE_PERCPU_H
16 #define _ASM_TILE_PERCPU_H
18 register unsigned long my_cpu_offset_reg asm("tp");
20 #ifdef CONFIG_PREEMPT
22 * For full preemption, we can't just use the register variable
23 * directly, since we need barrier() to hazard against it, causing the
24 * compiler to reload anything computed from a previous "tp" value.
25 * But we also don't want to use volatile asm, since we'd like the
26 * compiler to be able to cache the value across multiple percpu reads.
27 * So we use a fake stack read as a hazard against barrier().
28 * The 'U' constraint is like 'm' but disallows postincrement.
30 static inline unsigned long __my_cpu_offset(void)
32 unsigned long tp;
33 register unsigned long *sp asm("sp");
34 asm("move %0, tp" : "=r" (tp) : "U" (*sp));
35 return tp;
37 #define __my_cpu_offset __my_cpu_offset()
38 #else
40 * We don't need to hazard against barrier() since "tp" doesn't ever
41 * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
42 * changes at function call points, at which we are already re-reading
43 * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
45 #define __my_cpu_offset my_cpu_offset_reg
46 #endif
48 #define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
50 #include <asm-generic/percpu.h>
52 #endif /* _ASM_TILE_PERCPU_H */