1 /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
2 * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 #ifndef _SPARC64_SPITFIRE_H
8 #define _SPARC64_SPITFIRE_H
12 /* The following register addresses are accessible via ASI_DMMU
13 * and ASI_IMMU, that is there is a distinct and unique copy of
14 * each these registers for each TLB.
16 #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
17 #define TLB_SFSR 0x0000000000000018 /* All chips */
18 #define TSB_REG 0x0000000000000028 /* All chips */
19 #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
20 #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
21 #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
22 #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
23 #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
24 #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
25 #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
27 /* These registers only exist as one entity, and are accessed
30 #define PRIMARY_CONTEXT 0x0000000000000008
31 #define SECONDARY_CONTEXT 0x0000000000000010
32 #define DMMU_SFAR 0x0000000000000020
33 #define VIRT_WATCHPOINT 0x0000000000000038
34 #define PHYS_WATCHPOINT 0x0000000000000040
36 #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
37 #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
39 #define L1DCACHE_SIZE 0x4000
43 enum ultra_tlb_layout
{
49 extern enum ultra_tlb_layout tlb_type
;
51 extern int cheetah_pcache_forced_on
;
52 extern void cheetah_enable_pcache(void);
54 #define sparc64_highest_locked_tlbent() \
55 (tlb_type == spitfire ? \
56 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
57 CHEETAH_HIGHEST_LOCKED_TLBENT)
59 static __inline__
unsigned long spitfire_get_isfsr(void)
63 __asm__
__volatile__("ldxa [%1] %2, %0"
65 : "r" (TLB_SFSR
), "i" (ASI_IMMU
));
69 static __inline__
unsigned long spitfire_get_dsfsr(void)
73 __asm__
__volatile__("ldxa [%1] %2, %0"
75 : "r" (TLB_SFSR
), "i" (ASI_DMMU
));
79 static __inline__
unsigned long spitfire_get_sfar(void)
83 __asm__
__volatile__("ldxa [%1] %2, %0"
85 : "r" (DMMU_SFAR
), "i" (ASI_DMMU
));
89 static __inline__
void spitfire_put_isfsr(unsigned long sfsr
)
91 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
94 : "r" (sfsr
), "r" (TLB_SFSR
), "i" (ASI_IMMU
));
97 static __inline__
void spitfire_put_dsfsr(unsigned long sfsr
)
99 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
102 : "r" (sfsr
), "r" (TLB_SFSR
), "i" (ASI_DMMU
));
105 /* The data cache is write through, so this just invalidates the
108 static __inline__
void spitfire_put_dcache_tag(unsigned long addr
, unsigned long tag
)
110 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
113 : "r" (tag
), "r" (addr
), "i" (ASI_DCACHE_TAG
));
114 __asm__
__volatile__ ("membar #Sync" : : : "memory");
117 /* The instruction cache lines are flushed with this, but note that
118 * this does not flush the pipeline. It is possible for a line to
119 * get flushed but stale instructions to still be in the pipeline,
120 * a flush instruction (to any address) is sufficient to handle
121 * this issue after the line is invalidated.
123 static __inline__
void spitfire_put_icache_tag(unsigned long addr
, unsigned long tag
)
125 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
128 : "r" (tag
), "r" (addr
), "i" (ASI_IC_TAG
));
131 static __inline__
unsigned long spitfire_get_dtlb_data(int entry
)
135 __asm__
__volatile__("ldxa [%1] %2, %0"
137 : "r" (entry
<< 3), "i" (ASI_DTLB_DATA_ACCESS
));
139 /* Clear TTE diag bits. */
140 data
&= ~0x0003fe0000000000UL
;
145 static __inline__
unsigned long spitfire_get_dtlb_tag(int entry
)
149 __asm__
__volatile__("ldxa [%1] %2, %0"
151 : "r" (entry
<< 3), "i" (ASI_DTLB_TAG_READ
));
155 static __inline__
void spitfire_put_dtlb_data(int entry
, unsigned long data
)
157 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
160 : "r" (data
), "r" (entry
<< 3),
161 "i" (ASI_DTLB_DATA_ACCESS
));
164 static __inline__
unsigned long spitfire_get_itlb_data(int entry
)
168 __asm__
__volatile__("ldxa [%1] %2, %0"
170 : "r" (entry
<< 3), "i" (ASI_ITLB_DATA_ACCESS
));
172 /* Clear TTE diag bits. */
173 data
&= ~0x0003fe0000000000UL
;
178 static __inline__
unsigned long spitfire_get_itlb_tag(int entry
)
182 __asm__
__volatile__("ldxa [%1] %2, %0"
184 : "r" (entry
<< 3), "i" (ASI_ITLB_TAG_READ
));
188 static __inline__
void spitfire_put_itlb_data(int entry
, unsigned long data
)
190 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
193 : "r" (data
), "r" (entry
<< 3),
194 "i" (ASI_ITLB_DATA_ACCESS
));
197 /* Spitfire hardware assisted TLB flushes. */
199 /* Context level flushes. */
200 static __inline__
void spitfire_flush_dtlb_primary_context(void)
202 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
205 : "r" (0x40), "i" (ASI_DMMU_DEMAP
));
208 static __inline__
void spitfire_flush_itlb_primary_context(void)
210 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
213 : "r" (0x40), "i" (ASI_IMMU_DEMAP
));
216 static __inline__
void spitfire_flush_dtlb_secondary_context(void)
218 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
221 : "r" (0x50), "i" (ASI_DMMU_DEMAP
));
224 static __inline__
void spitfire_flush_itlb_secondary_context(void)
226 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
229 : "r" (0x50), "i" (ASI_IMMU_DEMAP
));
232 static __inline__
void spitfire_flush_dtlb_nucleus_context(void)
234 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
237 : "r" (0x60), "i" (ASI_DMMU_DEMAP
));
240 static __inline__
void spitfire_flush_itlb_nucleus_context(void)
242 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
245 : "r" (0x60), "i" (ASI_IMMU_DEMAP
));
248 /* Page level flushes. */
249 static __inline__
void spitfire_flush_dtlb_primary_page(unsigned long page
)
251 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
254 : "r" (page
), "i" (ASI_DMMU_DEMAP
));
257 static __inline__
void spitfire_flush_itlb_primary_page(unsigned long page
)
259 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
262 : "r" (page
), "i" (ASI_IMMU_DEMAP
));
265 static __inline__
void spitfire_flush_dtlb_secondary_page(unsigned long page
)
267 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
270 : "r" (page
| 0x10), "i" (ASI_DMMU_DEMAP
));
273 static __inline__
void spitfire_flush_itlb_secondary_page(unsigned long page
)
275 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
278 : "r" (page
| 0x10), "i" (ASI_IMMU_DEMAP
));
281 static __inline__
void spitfire_flush_dtlb_nucleus_page(unsigned long page
)
283 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
286 : "r" (page
| 0x20), "i" (ASI_DMMU_DEMAP
));
289 static __inline__
void spitfire_flush_itlb_nucleus_page(unsigned long page
)
291 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
294 : "r" (page
| 0x20), "i" (ASI_IMMU_DEMAP
));
297 /* Cheetah has "all non-locked" tlb flushes. */
298 static __inline__
void cheetah_flush_dtlb_all(void)
300 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
303 : "r" (0x80), "i" (ASI_DMMU_DEMAP
));
306 static __inline__
void cheetah_flush_itlb_all(void)
308 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
311 : "r" (0x80), "i" (ASI_IMMU_DEMAP
));
314 /* Cheetah has a 4-tlb layout so direct access is a bit different.
315 * The first two TLBs are fully assosciative, hold 16 entries, and are
316 * used only for locked and >8K sized translations. One exists for
317 * data accesses and one for instruction accesses.
319 * The third TLB is for data accesses to 8K non-locked translations, is
320 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
321 * instruction accesses to 8K non-locked translations, is 2 way
322 * assosciative, and holds 128 entries.
324 * Cheetah has some bug where bogus data can be returned from
325 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
326 * the problem for me. -DaveM
328 static __inline__
unsigned long cheetah_get_ldtlb_data(int entry
)
332 __asm__
__volatile__("ldxa [%1] %2, %%g0\n\t"
335 : "r" ((0 << 16) | (entry
<< 3)),
336 "i" (ASI_DTLB_DATA_ACCESS
));
341 static __inline__
unsigned long cheetah_get_litlb_data(int entry
)
345 __asm__
__volatile__("ldxa [%1] %2, %%g0\n\t"
348 : "r" ((0 << 16) | (entry
<< 3)),
349 "i" (ASI_ITLB_DATA_ACCESS
));
354 static __inline__
unsigned long cheetah_get_ldtlb_tag(int entry
)
358 __asm__
__volatile__("ldxa [%1] %2, %0"
360 : "r" ((0 << 16) | (entry
<< 3)),
361 "i" (ASI_DTLB_TAG_READ
));
366 static __inline__
unsigned long cheetah_get_litlb_tag(int entry
)
370 __asm__
__volatile__("ldxa [%1] %2, %0"
372 : "r" ((0 << 16) | (entry
<< 3)),
373 "i" (ASI_ITLB_TAG_READ
));
378 static __inline__
void cheetah_put_ldtlb_data(int entry
, unsigned long data
)
380 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
384 "r" ((0 << 16) | (entry
<< 3)),
385 "i" (ASI_DTLB_DATA_ACCESS
));
388 static __inline__
void cheetah_put_litlb_data(int entry
, unsigned long data
)
390 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
394 "r" ((0 << 16) | (entry
<< 3)),
395 "i" (ASI_ITLB_DATA_ACCESS
));
398 static __inline__
unsigned long cheetah_get_dtlb_data(int entry
, int tlb
)
402 __asm__
__volatile__("ldxa [%1] %2, %%g0\n\t"
405 : "r" ((tlb
<< 16) | (entry
<< 3)), "i" (ASI_DTLB_DATA_ACCESS
));
410 static __inline__
unsigned long cheetah_get_dtlb_tag(int entry
, int tlb
)
414 __asm__
__volatile__("ldxa [%1] %2, %0"
416 : "r" ((tlb
<< 16) | (entry
<< 3)), "i" (ASI_DTLB_TAG_READ
));
420 static __inline__
void cheetah_put_dtlb_data(int entry
, unsigned long data
, int tlb
)
422 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
426 "r" ((tlb
<< 16) | (entry
<< 3)),
427 "i" (ASI_DTLB_DATA_ACCESS
));
430 static __inline__
unsigned long cheetah_get_itlb_data(int entry
)
434 __asm__
__volatile__("ldxa [%1] %2, %%g0\n\t"
437 : "r" ((2 << 16) | (entry
<< 3)),
438 "i" (ASI_ITLB_DATA_ACCESS
));
443 static __inline__
unsigned long cheetah_get_itlb_tag(int entry
)
447 __asm__
__volatile__("ldxa [%1] %2, %0"
449 : "r" ((2 << 16) | (entry
<< 3)), "i" (ASI_ITLB_TAG_READ
));
453 static __inline__
void cheetah_put_itlb_data(int entry
, unsigned long data
)
455 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
458 : "r" (data
), "r" ((2 << 16) | (entry
<< 3)),
459 "i" (ASI_ITLB_DATA_ACCESS
));
462 #endif /* !(__ASSEMBLY__) */
464 #endif /* !(_SPARC64_SPITFIRE_H) */