x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / tile / include / asm / barrier.h
blob4c419ab95ab772d0b3190c9d93222348f647aae9
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
15 #ifndef _ASM_TILE_BARRIER_H
16 #define _ASM_TILE_BARRIER_H
18 #ifndef __ASSEMBLY__
20 #include <linux/types.h>
21 #include <arch/chip.h>
22 #include <arch/spr_def.h>
23 #include <asm/timex.h>
25 #define __sync() __insn_mf()
27 #include <hv/syscall_public.h>
29 * Issue an uncacheable load to each memory controller, then
30 * wait until those loads have completed.
32 static inline void __mb_incoherent(void)
34 long clobber_r10;
35 asm volatile("swint2"
36 : "=R10" (clobber_r10)
37 : "R10" (HV_SYS_fence_incoherent)
38 : "r0", "r1", "r2", "r3", "r4",
39 "r5", "r6", "r7", "r8", "r9",
40 "r11", "r12", "r13", "r14",
41 "r15", "r16", "r17", "r18", "r19",
42 "r20", "r21", "r22", "r23", "r24",
43 "r25", "r26", "r27", "r28", "r29");
46 /* Fence to guarantee visibility of stores to incoherent memory. */
47 static inline void
48 mb_incoherent(void)
50 __insn_mf();
53 #if CHIP_HAS_TILE_WRITE_PENDING()
54 const unsigned long WRITE_TIMEOUT_CYCLES = 400;
55 unsigned long start = get_cycles_low();
56 do {
57 if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
58 return;
59 } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
60 #endif /* CHIP_HAS_TILE_WRITE_PENDING() */
61 (void) __mb_incoherent();
65 #define fast_wmb() __sync()
66 #define fast_rmb() __sync()
67 #define fast_mb() __sync()
68 #define fast_iob() mb_incoherent()
70 #define wmb() fast_wmb()
71 #define rmb() fast_rmb()
72 #define mb() fast_mb()
73 #define iob() fast_iob()
75 #ifndef __tilegx__ /* 32 bit */
77 * We need to barrier before modifying the word, since the _atomic_xxx()
78 * routines just tns the lock and then read/modify/write of the word.
79 * But after the word is updated, the routine issues an "mf" before returning,
80 * and since it's a function call, we don't even need a compiler barrier.
82 #define __smp_mb__before_atomic() __smp_mb()
83 #define __smp_mb__after_atomic() do { } while (0)
84 #define smp_mb__after_atomic() __smp_mb__after_atomic()
85 #else /* 64 bit */
86 #define __smp_mb__before_atomic() __smp_mb()
87 #define __smp_mb__after_atomic() __smp_mb()
88 #endif
91 * The TILE architecture does not do speculative reads; this ensures
92 * that a control dependency also orders against loads and already provides
93 * a LOAD->{LOAD,STORE} order and can forgo the additional RMB.
95 #define smp_acquire__after_ctrl_dep() barrier()
97 #include <asm-generic/barrier.h>
99 #endif /* !__ASSEMBLY__ */
100 #endif /* _ASM_TILE_BARRIER_H */