1 #ifndef _SPARC64_MM_INIT_H
2 #define _SPARC64_MM_INIT_H
4 /* Most of the symbols in this file are defined in init.c and
5 * marked non-static so that assembler code can get at them.
8 #define MAX_PHYS_ADDRESS (1UL << 41UL)
9 #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
10 #define KPTE_BITMAP_BYTES \
11 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4)
12 #define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL)
13 #define VALID_ADDR_BITMAP_BYTES \
14 ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
16 extern unsigned long kern_linear_pte_xor
[4];
17 extern unsigned long kpte_linear_bitmap
[KPTE_BITMAP_BYTES
/ sizeof(unsigned long)];
18 extern unsigned int sparc64_highest_unlocked_tlb_ent
;
19 extern unsigned long sparc64_kern_pri_context
;
20 extern unsigned long sparc64_kern_pri_nuc_bits
;
21 extern unsigned long sparc64_kern_sec_context
;
22 extern void mmu_info(struct seq_file
*m
);
24 struct linux_prom_translation
{
30 /* Exported for kernel TLB miss handling in ktlb.S */
31 extern struct linux_prom_translation prom_trans
[512];
32 extern unsigned int prom_trans_ents
;
34 /* Exported for SMP bootup purposes. */
35 extern unsigned long kern_locked_tte_data
;
37 extern void prom_world(int enter
);
39 #ifdef CONFIG_SPARSEMEM_VMEMMAP
40 #define VMEMMAP_CHUNK_SHIFT 22
41 #define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
42 #define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
43 #define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
45 #define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
46 sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
47 extern unsigned long vmemmap_table
[VMEMMAP_SIZE
];
50 #endif /* _SPARC64_MM_INIT_H */