Fixed a few warnings.
[tangerine.git] / arch / ppc-sam440 / kernel / mmu.c
blob42b2293c0652ec0550320fc31b59cd545d8eadd1
1 #include <asm/amcc440.h>
2 #include <asm/io.h>
3 #include <aros/kernel.h>
4 #include <exec/memory.h>
5 #include "memory.h"
7 #include "kernel_intern.h"
9 static long tlb_bitmap[2];
10 static long tlb_free = 64;
12 /* Alloc TLB in the bitmap. Returns -1 if the allocation cannot be done */
13 static int alloc_tlb()
15 /* It should be done in locked state only! */
17 int bit = __builtin_clz(tlb_bitmap[0]);
18 if (bit == 32)
20 bit += __builtin_clz(tlb_bitmap[1]);
22 else
24 tlb_bitmap[0] &= ~(0x80000000 >> bit);
26 if (bit == 64)
28 return -1;
30 else
32 tlb_bitmap[1] &= ~(0x80000000 >> (bit-32));
34 tlb_free--;
35 return bit;
38 static int free_tlb(int entry)
40 if (entry >=0 && entry < 32)
42 if (tlb_bitmap[0] & (0x80000000 >> entry))
44 D(bug("[KRN] Freeing already free TLB!!!\n"));
46 else
48 tlb_bitmap[0] |= (0x80000000 >> entry);
49 tlb_free++;
52 else if (entry < 64)
54 entry -= 32;
55 if (tlb_bitmap[1] & (0x80000000 >> entry))
57 D(bug("[KRN] Freeing already free TLB!!!\n"));
59 else
61 tlb_bitmap[1] |= (0x80000000 >> entry);
62 tlb_free++;
65 else
67 D(bug("[KRN] Wrong TLB\n"));
71 static struct mmu_page_size {
72 uint8_t code;
73 uintptr_t mask;
74 } allowable_pages[] = {
75 { 0x90, 0x0fffffff }, /* 256MB */
76 { 0x70, 0x00ffffff }, /* 16MB */
77 { 0x50, 0x000fffff }, /* 1MB */
78 { 0x40, 0x0003ffff }, /* 256KB */
79 { 0x30, 0x0000ffff }, /* 64KB */
80 { 0x20, 0x00003fff }, /* 16KB */
81 { 0x10, 0x00000fff }, /* 4KB */
82 { 0x00, 0x000003ff }, /* 1KB */
83 { 0xff, 0xffffffff }, /* END MARKER */
86 void map_region(uintptr_t physbase, uintptr_t virtbase, uintptr_t length, uint32_t prot)
88 int i;
89 int tlb;
90 long tlb_temp = tlb_free;
92 D(bug("[KRN] map_region(%08x, %08x, %08x, %04x): ", physbase, virtbase, length, prot));
94 /* While there is still something to map */
95 while (length)
97 i = 0;
98 /* Check all available page sizes and try to match the best (the biggest) usable TLB entry */
99 while (allowable_pages[i].code != 0xff)
101 if ((length > allowable_pages[i].mask) && !(physbase & allowable_pages[i].mask) && !(virtbase & allowable_pages[i].mask))
102 break;
103 i++;
106 if (allowable_pages[i].code == 0xff)
108 D(bug("\n[KRN] map_region failed\n"));
109 return;
112 /* get free TLB */
113 tlb = alloc_tlb();
114 if (tlb == -1)
116 D(bug("\n[KRN] map_region: No more free TLB entries\n"));
117 return;
120 // D(bug("[KRN] TLB%02x: %08x - %08x : %08x - %08x: ", tlb,
121 // physbase, physbase + allowable_pages[i].mask,
122 // virtbase, virtbase + allowable_pages[i].mask));
124 /* Do really write to the tlb */
125 asm volatile("tlbwe %0,%3,0; tlbwe %1,%3,1; tlbwe %2,%3,2"
126 ::"r"(virtbase | allowable_pages[i].code | TLB_V), "r"(physbase), "r"(prot), "r"(tlb));
127 // D(bug("%08x %08x %08x\n", virtbase | allowable_pages[i].code | 0x200, physbase, prot));
129 length -= allowable_pages[i].mask + 1;
130 physbase += allowable_pages[i].mask + 1;
131 virtbase += allowable_pages[i].mask + 1;
134 tlb_temp -= tlb_free;
135 D(bug("%2d TLB%s\n", tlb_temp, tlb_temp > 1 ? "s":""));
138 void mmu_init(struct TagItem *tags)
140 tlb_free = 64;
141 tlb_bitmap[0] = 0xffffffff;
142 tlb_bitmap[1] = 0xffffffff;
145 * In order to reduce the usage of TLB entries, align the kernel
146 * regions to the 64KB boundary. It wastes a tiny bit of RAM but saves a lot of
147 * TLB entries
149 uintptr_t krn_lowest = krnGetTagData(KRN_KernelLowest, 0, tags);
150 uintptr_t krn_highest = krnGetTagData(KRN_KernelHighest, 0, tags);
151 uintptr_t krn_base = krnGetTagData(KRN_KernelBase, 0, tags);
152 struct MemHeader *mh;
154 D(bug("[KRN] MMU Init\n"));
155 D(bug("[KRN] lowest = %p, highest = %p\n", krn_lowest, krn_highest));
156 D(bug("[KRN] Kernel size: %dKB code, %dKB data\n", (krn_highest - krn_base)/1024, (krn_base - krn_lowest)/1024));
158 /* 4K granularity for data sections */
159 krn_lowest &= 0xfffff000;
160 /* 64K granularity for code sections */
161 krn_highest = (krn_highest + 0xffff) & 0xffff0000;
164 * The very first entry has to cover the executable part of kernel,
165 * where exception handlers are located
167 map_region(krn_base, 0xff000000 + krn_base, krn_highest - krn_base, TLB_SR | TLB_SX | TLB_UR | TLB_UX);
168 /* Now the data area for kernel. Make it read/write for both user and supervisor. no execution allowed */
169 map_region(krn_lowest, 0xff000000 + krn_lowest, krn_base - krn_lowest, TLB_SR | TLB_SW | TLB_UR | TLB_UW);
170 /* The low memory will be RW assigned to the supervisor mode. No access from usermode! */
171 map_region(0, 0xff000000, krn_lowest, TLB_SR | TLB_SW);
173 /* Prepare the MemHeader structure for this region */
174 mh = (struct MemHeader *)0xff000000;
175 mh->mh_First = (struct MemChunk *)((uint8_t *)mh + MEMHEADER_TOTAL);
176 mh->mh_Free = (krn_lowest - MEMHEADER_TOTAL) & ~(MEMCHUNK_TOTAL-1);
177 mh->mh_First->mc_Next = NULL;
178 mh->mh_First->mc_Bytes = mh->mh_Free;
180 /* The regular RAM, make 1GB of it - amcc440 cannot do more. */
181 map_region(krn_highest, krn_highest, 0x40000000 - krn_highest, TLB_SR | TLB_SW | TLB_UR | TLB_UW | TLB_SX | TLB_UX);// | TLB_W );
183 /* map the PCI bus */
184 map_region(0xa0000000, 0xa0000000, 0x40000000, TLB_SR | TLB_SW | TLB_UR | TLB_UW | TLB_G | TLB_I );
186 /* PCI control registers and onboard devices */
187 map_region(0xe0000000, 0xe0000000, 0x10000000, TLB_SR | TLB_SW | TLB_UR | TLB_UW | TLB_G | TLB_I);
189 D(bug("[KRN] TLB status: %d used, %d free\n", 64 - tlb_free, tlb_free));
191 /* flush TLB shadow regs */
192 asm volatile("isync;");