conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / arch / blackfin / kernel / cplb-nompu / cplbinit.c
blob685f160a5a369789671f948e2132d36fb5c2e29b
1 /*
2 * Blackfin CPLB initialization
4 * Copyright 2004-2007 Analog Devices Inc.
6 * Bugs: Enter bugs at http://blackfin.uclinux.org/
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see the file COPYING, or write
20 * to the Free Software Foundation, Inc.,
21 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <linux/module.h>
26 #include <asm/blackfin.h>
27 #include <asm/cacheflush.h>
28 #include <asm/cplb.h>
29 #include <asm/cplbinit.h>
30 #include <asm/mem_map.h>
32 struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
33 struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
35 int first_switched_icplb PDT_ATTR;
36 int first_switched_dcplb PDT_ATTR;
38 struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
39 struct cplb_boundary icplb_bounds[7] PDT_ATTR;
41 int icplb_nr_bounds PDT_ATTR;
42 int dcplb_nr_bounds PDT_ATTR;
44 void __init generate_cplb_tables_cpu(unsigned int cpu)
46 int i_d, i_i;
47 unsigned long addr;
49 struct cplb_entry *d_tbl = dcplb_tbl[cpu];
50 struct cplb_entry *i_tbl = icplb_tbl[cpu];
52 printk(KERN_INFO "NOMPU: setting up cplb tables\n");
54 i_d = i_i = 0;
56 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
57 /* Set up the zero page. */
58 d_tbl[i_d].addr = 0;
59 d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
60 i_tbl[i_i].addr = 0;
61 i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
62 #endif
64 /* Cover kernel memory with 4M pages. */
65 addr = 0;
67 for (; addr < memory_start; addr += 4 * 1024 * 1024) {
68 d_tbl[i_d].addr = addr;
69 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
70 i_tbl[i_i].addr = addr;
71 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
74 /* Cover L1 memory. One 4M area for code and data each is enough. */
75 if (cpu == 0) {
76 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
77 d_tbl[i_d].addr = L1_DATA_A_START;
78 d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
80 i_tbl[i_i].addr = L1_CODE_START;
81 i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
83 #ifdef CONFIG_SMP
84 else {
85 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
86 d_tbl[i_d].addr = COREB_L1_DATA_A_START;
87 d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
89 i_tbl[i_i].addr = COREB_L1_CODE_START;
90 i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
92 #endif
93 first_switched_dcplb = i_d;
94 first_switched_icplb = i_i;
96 BUG_ON(first_switched_dcplb > MAX_CPLBS);
97 BUG_ON(first_switched_icplb > MAX_CPLBS);
99 while (i_d < MAX_CPLBS)
100 d_tbl[i_d++].data = 0;
101 while (i_i < MAX_CPLBS)
102 i_tbl[i_i++].data = 0;
105 void __init generate_cplb_tables_all(void)
107 int i_d, i_i;
109 i_d = 0;
110 /* Normal RAM, including MTD FS. */
111 #ifdef CONFIG_MTD_UCLINUX
112 dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size;
113 #else
114 dcplb_bounds[i_d].eaddr = memory_end;
115 #endif
116 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
117 /* DMA uncached region. */
118 if (DMA_UNCACHED_REGION) {
119 dcplb_bounds[i_d].eaddr = _ramend;
120 dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
122 if (_ramend != physical_mem_end) {
123 /* Reserved memory. */
124 dcplb_bounds[i_d].eaddr = physical_mem_end;
125 dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
126 SDRAM_DGENERIC : SDRAM_DNON_CHBL);
128 /* Addressing hole up to the async bank. */
129 dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
130 dcplb_bounds[i_d++].data = 0;
131 /* ASYNC banks. */
132 dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
133 dcplb_bounds[i_d++].data = SDRAM_EBIU;
134 /* Addressing hole up to BootROM. */
135 dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
136 dcplb_bounds[i_d++].data = 0;
137 /* BootROM -- largest one should be less than 1 meg. */
138 dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
139 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
140 if (L2_LENGTH) {
141 /* Addressing hole up to L2 SRAM. */
142 dcplb_bounds[i_d].eaddr = L2_START;
143 dcplb_bounds[i_d++].data = 0;
144 /* L2 SRAM. */
145 dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
146 dcplb_bounds[i_d++].data = L2_DMEMORY;
148 dcplb_nr_bounds = i_d;
149 BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
151 i_i = 0;
152 /* Normal RAM, including MTD FS. */
153 #ifdef CONFIG_MTD_UCLINUX
154 icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
155 #else
156 icplb_bounds[i_i].eaddr = memory_end;
157 #endif
158 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
159 /* DMA uncached region. */
160 if (DMA_UNCACHED_REGION) {
161 icplb_bounds[i_i].eaddr = _ramend;
162 icplb_bounds[i_i++].data = 0;
164 if (_ramend != physical_mem_end) {
165 /* Reserved memory. */
166 icplb_bounds[i_i].eaddr = physical_mem_end;
167 icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
168 SDRAM_IGENERIC : SDRAM_INON_CHBL);
170 /* Addressing hole up to BootROM. */
171 icplb_bounds[i_i].eaddr = BOOT_ROM_START;
172 icplb_bounds[i_i++].data = 0;
173 /* BootROM -- largest one should be less than 1 meg. */
174 icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
175 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
176 if (L2_LENGTH) {
177 /* Addressing hole up to L2 SRAM, including the async bank. */
178 icplb_bounds[i_i].eaddr = L2_START;
179 icplb_bounds[i_i++].data = 0;
180 /* L2 SRAM. */
181 icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
182 icplb_bounds[i_i++].data = L2_IMEMORY;
184 icplb_nr_bounds = i_i;
185 BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));