vmalloc: walk vmap_areas by sorted list instead of rb_next()
[linux/fpc-iii.git] / arch / blackfin / kernel / cplb-nompu / cplbinit.c
blob34e96ce02aa9671701197e7bc3b8f65902fbb5d5
1 /*
2 * Blackfin CPLB initialization
4 * Copyright 2007-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
7 */
9 #include <linux/module.h>
11 #include <asm/blackfin.h>
12 #include <asm/cacheflush.h>
13 #include <asm/cplb.h>
14 #include <asm/cplbinit.h>
15 #include <asm/mem_map.h>
17 struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
18 struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
20 int first_switched_icplb PDT_ATTR;
21 int first_switched_dcplb PDT_ATTR;
23 struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
24 struct cplb_boundary icplb_bounds[9] PDT_ATTR;
26 int icplb_nr_bounds PDT_ATTR;
27 int dcplb_nr_bounds PDT_ATTR;
29 void __init generate_cplb_tables_cpu(unsigned int cpu)
31 int i_d, i_i;
32 unsigned long addr;
34 struct cplb_entry *d_tbl = dcplb_tbl[cpu];
35 struct cplb_entry *i_tbl = icplb_tbl[cpu];
37 printk(KERN_INFO "NOMPU: setting up cplb tables\n");
39 i_d = i_i = 0;
41 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
42 /* Set up the zero page. */
43 d_tbl[i_d].addr = 0;
44 d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
45 i_tbl[i_i].addr = 0;
46 i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
47 #endif
49 /* Cover kernel memory with 4M pages. */
50 addr = 0;
52 for (; addr < memory_start; addr += 4 * 1024 * 1024) {
53 d_tbl[i_d].addr = addr;
54 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
55 i_tbl[i_i].addr = addr;
56 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
59 #ifdef CONFIG_ROMKERNEL
60 /* Cover kernel XIP flash area */
61 #ifdef CONFIG_BF60x
62 addr = CONFIG_ROM_BASE & ~(16 * 1024 * 1024 - 1);
63 d_tbl[i_d].addr = addr;
64 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_16MB;
65 i_tbl[i_i].addr = addr;
66 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_16MB;
67 #else
68 addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
69 d_tbl[i_d].addr = addr;
70 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
71 i_tbl[i_i].addr = addr;
72 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
73 #endif
74 #endif
76 /* Cover L1 memory. One 4M area for code and data each is enough. */
77 if (cpu == 0) {
78 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
79 d_tbl[i_d].addr = L1_DATA_A_START;
80 d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
82 i_tbl[i_i].addr = L1_CODE_START;
83 i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
85 #ifdef CONFIG_SMP
86 else {
87 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
88 d_tbl[i_d].addr = COREB_L1_DATA_A_START;
89 d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
91 i_tbl[i_i].addr = COREB_L1_CODE_START;
92 i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
94 #endif
95 first_switched_dcplb = i_d;
96 first_switched_icplb = i_i;
98 BUG_ON(first_switched_dcplb > MAX_CPLBS);
99 BUG_ON(first_switched_icplb > MAX_CPLBS);
101 while (i_d < MAX_CPLBS)
102 d_tbl[i_d++].data = 0;
103 while (i_i < MAX_CPLBS)
104 i_tbl[i_i++].data = 0;
107 void __init generate_cplb_tables_all(void)
109 unsigned long uncached_end;
110 int i_d, i_i;
112 i_d = 0;
113 /* Normal RAM, including MTD FS. */
114 #ifdef CONFIG_MTD_UCLINUX
115 uncached_end = memory_mtd_start + mtd_size;
116 #else
117 uncached_end = memory_end;
118 #endif
120 * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
121 * so that we don't have to use 4kB pages and cause CPLB thrashing
123 if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
124 ((_ramend - uncached_end) >= 1 * 1024 * 1024))
125 dcplb_bounds[i_d].eaddr = uncached_end;
126 else
127 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
128 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
129 /* DMA uncached region. */
130 if (DMA_UNCACHED_REGION) {
131 dcplb_bounds[i_d].eaddr = _ramend;
132 dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
134 if (_ramend != physical_mem_end) {
135 /* Reserved memory. */
136 dcplb_bounds[i_d].eaddr = physical_mem_end;
137 dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
138 SDRAM_DGENERIC : SDRAM_DNON_CHBL);
140 /* Addressing hole up to the async bank. */
141 dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
142 dcplb_bounds[i_d++].data = 0;
143 /* ASYNC banks. */
144 dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
145 dcplb_bounds[i_d++].data = SDRAM_EBIU;
146 /* Addressing hole up to BootROM. */
147 dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
148 dcplb_bounds[i_d++].data = 0;
149 /* BootROM -- largest one should be less than 1 meg. */
150 dcplb_bounds[i_d].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
151 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
152 if (L2_LENGTH) {
153 /* Addressing hole up to L2 SRAM. */
154 dcplb_bounds[i_d].eaddr = L2_START;
155 dcplb_bounds[i_d++].data = 0;
156 /* L2 SRAM. */
157 dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
158 dcplb_bounds[i_d++].data = L2_DMEMORY;
160 dcplb_nr_bounds = i_d;
161 BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
163 i_i = 0;
164 /* Normal RAM, including MTD FS. */
165 icplb_bounds[i_i].eaddr = uncached_end;
166 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
167 if (_ramend != physical_mem_end) {
168 /* DMA uncached region. */
169 if (DMA_UNCACHED_REGION) {
170 /* Normally this hole is caught by the async below. */
171 icplb_bounds[i_i].eaddr = _ramend;
172 icplb_bounds[i_i++].data = 0;
174 /* Reserved memory. */
175 icplb_bounds[i_i].eaddr = physical_mem_end;
176 icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
177 SDRAM_IGENERIC : SDRAM_INON_CHBL);
179 /* Addressing hole up to the async bank. */
180 icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
181 icplb_bounds[i_i++].data = 0;
182 /* ASYNC banks. */
183 icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
184 icplb_bounds[i_i++].data = SDRAM_EBIU;
185 /* Addressing hole up to BootROM. */
186 icplb_bounds[i_i].eaddr = BOOT_ROM_START;
187 icplb_bounds[i_i++].data = 0;
188 /* BootROM -- largest one should be less than 1 meg. */
189 icplb_bounds[i_i].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
190 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
192 if (L2_LENGTH) {
193 /* Addressing hole up to L2 SRAM. */
194 icplb_bounds[i_i].eaddr = L2_START;
195 icplb_bounds[i_i++].data = 0;
196 /* L2 SRAM. */
197 icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
198 icplb_bounds[i_i++].data = L2_IMEMORY;
200 icplb_nr_bounds = i_i;
201 BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));