Staging: hv: mousevsc: Change the allocation flags to reflect interrupt context
[zen-stable.git] / arch / sparc / include / asm / tsb.h
blob1a8afd1ad04f4f64da95670e4c4bd21b01dfdb5c
1 #ifndef _SPARC64_TSB_H
2 #define _SPARC64_TSB_H
4 /* The sparc64 TSB is similar to the powerpc hashtables. It's a
5 * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes
6 * pointers into this table for 8K and 64K page sizes, and also a
7 * comparison TAG based upon the virtual address and context which
8 * faults.
10 * TLB miss trap handler software does the actual lookup via something
11 * of the form:
13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
14 * ldxa [%g0] ASI_{D,I}MMU, %g6
15 * sllx %g6, 22, %g6
16 * srlx %g6, 22, %g6
17 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4
18 * cmp %g4, %g6
19 * bne,pn %xcc, tsb_miss_{d,i}tlb
20 * mov FAULT_CODE_{D,I}TLB, %g3
21 * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN
22 * retry
25 * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
26 * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu
27 * register which is:
29 * -------------------------------------------------
30 * | - | CONTEXT | - | VADDR bits 63:22 |
31 * -------------------------------------------------
32 * 63 61 60 48 47 42 41 0
34 * But actually, since we use per-mm TSB's, we zero out the CONTEXT
35 * field.
37 * Like the powerpc hashtables we need to use locking in order to
38 * synchronize while we update the entries. PTE updates need locking
39 * as well.
41 * We need to carefully choose a lock bits for the TSB entry. We
42 * choose to use bit 47 in the tag. Also, since we never map anything
43 * at page zero in context zero, we use zero as an invalid tag entry.
44 * When the lock bit is set, this forces a tag comparison failure.
47 #define TSB_TAG_LOCK_BIT 47
48 #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32))
50 #define TSB_TAG_INVALID_BIT 46
51 #define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32))
53 /* Some cpus support physical address quad loads. We want to use
54 * those if possible so we don't need to hard-lock the TSB mapping
55 * into the TLB. We encode some instruction patching in order to
56 * support this.
58 * The kernel TSB is locked into the TLB by virtue of being in the
59 * kernel image, so we don't play these games for swapper_tsb access.
61 #ifndef __ASSEMBLY__
62 struct tsb_ldquad_phys_patch_entry {
63 unsigned int addr;
64 unsigned int sun4u_insn;
65 unsigned int sun4v_insn;
67 extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch,
68 __tsb_ldquad_phys_patch_end;
70 struct tsb_phys_patch_entry {
71 unsigned int addr;
72 unsigned int insn;
74 extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
75 #endif
76 #define TSB_LOAD_QUAD(TSB, REG) \
77 661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
78 .section .tsb_ldquad_phys_patch, "ax"; \
79 .word 661b; \
80 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
81 ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \
82 .previous
84 #define TSB_LOAD_TAG_HIGH(TSB, REG) \
85 661: lduwa [TSB] ASI_N, REG; \
86 .section .tsb_phys_patch, "ax"; \
87 .word 661b; \
88 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
89 .previous
91 #define TSB_LOAD_TAG(TSB, REG) \
92 661: ldxa [TSB] ASI_N, REG; \
93 .section .tsb_phys_patch, "ax"; \
94 .word 661b; \
95 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
96 .previous
98 #define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
99 661: casa [TSB] ASI_N, REG1, REG2; \
100 .section .tsb_phys_patch, "ax"; \
101 .word 661b; \
102 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
103 .previous
105 #define TSB_CAS_TAG(TSB, REG1, REG2) \
106 661: casxa [TSB] ASI_N, REG1, REG2; \
107 .section .tsb_phys_patch, "ax"; \
108 .word 661b; \
109 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
110 .previous
112 #define TSB_STORE(ADDR, VAL) \
113 661: stxa VAL, [ADDR] ASI_N; \
114 .section .tsb_phys_patch, "ax"; \
115 .word 661b; \
116 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
117 .previous
119 #define TSB_LOCK_TAG(TSB, REG1, REG2) \
120 99: TSB_LOAD_TAG_HIGH(TSB, REG1); \
121 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
122 andcc REG1, REG2, %g0; \
123 bne,pn %icc, 99b; \
124 nop; \
125 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
126 cmp REG1, REG2; \
127 bne,pn %icc, 99b; \
128 nop; \
130 #define TSB_WRITE(TSB, TTE, TAG) \
131 add TSB, 0x8, TSB; \
132 TSB_STORE(TSB, TTE); \
133 sub TSB, 0x8, TSB; \
134 TSB_STORE(TSB, TAG);
136 /* Do a kernel page table walk. Leaves physical PTE pointer in
137 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
138 * VADDR will not be clobbered, but REG2 will.
140 #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
141 sethi %hi(swapper_pg_dir), REG1; \
142 or REG1, %lo(swapper_pg_dir), REG1; \
143 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
144 srlx REG2, 64 - PAGE_SHIFT, REG2; \
145 andn REG2, 0x3, REG2; \
146 lduw [REG1 + REG2], REG1; \
147 brz,pn REG1, FAIL_LABEL; \
148 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
149 srlx REG2, 64 - PAGE_SHIFT, REG2; \
150 sllx REG1, 11, REG1; \
151 andn REG2, 0x3, REG2; \
152 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
153 brz,pn REG1, FAIL_LABEL; \
154 sllx VADDR, 64 - PMD_SHIFT, REG2; \
155 srlx REG2, 64 - PAGE_SHIFT, REG2; \
156 sllx REG1, 11, REG1; \
157 andn REG2, 0x7, REG2; \
158 add REG1, REG2, REG1;
160 /* Do a user page table walk in MMU globals. Leaves physical PTE
161 * pointer in REG1. Jumps to FAIL_LABEL on early page table walk
162 * termination. Physical base of page tables is in PHYS_PGD which
163 * will not be modified.
165 * VADDR will not be clobbered, but REG1 and REG2 will.
167 #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
168 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
169 srlx REG2, 64 - PAGE_SHIFT, REG2; \
170 andn REG2, 0x3, REG2; \
171 lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
172 brz,pn REG1, FAIL_LABEL; \
173 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
174 srlx REG2, 64 - PAGE_SHIFT, REG2; \
175 sllx REG1, 11, REG1; \
176 andn REG2, 0x3, REG2; \
177 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
178 brz,pn REG1, FAIL_LABEL; \
179 sllx VADDR, 64 - PMD_SHIFT, REG2; \
180 srlx REG2, 64 - PAGE_SHIFT, REG2; \
181 sllx REG1, 11, REG1; \
182 andn REG2, 0x7, REG2; \
183 add REG1, REG2, REG1;
185 /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
186 * If no entry is found, FAIL_LABEL will be branched to. On success
187 * the resulting PTE value will be left in REG1. VADDR is preserved
188 * by this routine.
190 #define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \
191 sethi %hi(prom_trans), REG1; \
192 or REG1, %lo(prom_trans), REG1; \
193 97: ldx [REG1 + 0x00], REG2; \
194 brz,pn REG2, FAIL_LABEL; \
195 nop; \
196 ldx [REG1 + 0x08], REG3; \
197 add REG2, REG3, REG3; \
198 cmp REG2, VADDR; \
199 bgu,pt %xcc, 98f; \
200 cmp VADDR, REG3; \
201 bgeu,pt %xcc, 98f; \
202 ldx [REG1 + 0x10], REG3; \
203 sub VADDR, REG2, REG2; \
204 ba,pt %xcc, 99f; \
205 add REG3, REG2, REG1; \
206 98: ba,pt %xcc, 97b; \
207 add REG1, (3 * 8), REG1; \
210 /* We use a 32K TSB for the whole kernel, this allows to
211 * handle about 16MB of modules and vmalloc mappings without
212 * incurring many hash conflicts.
214 #define KERNEL_TSB_SIZE_BYTES (32 * 1024)
215 #define KERNEL_TSB_NENTRIES \
216 (KERNEL_TSB_SIZE_BYTES / 16)
217 #define KERNEL_TSB4M_NENTRIES 4096
219 #define KTSB_PHYS_SHIFT 15
221 /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
222 * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
223 * and the found TTE will be left in REG1. REG3 and REG4 must
224 * be an even/odd pair of registers.
226 * VADDR and TAG will be preserved and not clobbered by this macro.
228 #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
229 661: sethi %hi(swapper_tsb), REG1; \
230 or REG1, %lo(swapper_tsb), REG1; \
231 .section .swapper_tsb_phys_patch, "ax"; \
232 .word 661b; \
233 .previous; \
234 661: nop; \
235 .section .tsb_ldquad_phys_patch, "ax"; \
236 .word 661b; \
237 sllx REG1, KTSB_PHYS_SHIFT, REG1; \
238 sllx REG1, KTSB_PHYS_SHIFT, REG1; \
239 .previous; \
240 srlx VADDR, PAGE_SHIFT, REG2; \
241 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
242 sllx REG2, 4, REG2; \
243 add REG1, REG2, REG2; \
244 TSB_LOAD_QUAD(REG2, REG3); \
245 cmp REG3, TAG; \
246 be,a,pt %xcc, OK_LABEL; \
247 mov REG4, REG1;
249 #ifndef CONFIG_DEBUG_PAGEALLOC
250 /* This version uses a trick, the TAG is already (VADDR >> 22) so
251 * we can make use of that for the index computation.
253 #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
254 661: sethi %hi(swapper_4m_tsb), REG1; \
255 or REG1, %lo(swapper_4m_tsb), REG1; \
256 .section .swapper_4m_tsb_phys_patch, "ax"; \
257 .word 661b; \
258 .previous; \
259 661: nop; \
260 .section .tsb_ldquad_phys_patch, "ax"; \
261 .word 661b; \
262 sllx REG1, KTSB_PHYS_SHIFT, REG1; \
263 sllx REG1, KTSB_PHYS_SHIFT, REG1; \
264 .previous; \
265 and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
266 sllx REG2, 4, REG2; \
267 add REG1, REG2, REG2; \
268 TSB_LOAD_QUAD(REG2, REG3); \
269 cmp REG3, TAG; \
270 be,a,pt %xcc, OK_LABEL; \
271 mov REG4, REG1;
272 #endif
274 #endif /* !(_SPARC64_TSB_H) */