2 * File: arch/blackfin/mm/init.c
10 * Copyright 2004-2007 Analog Devices Inc.
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #include <linux/swap.h>
31 #include <linux/bootmem.h>
32 #include <linux/uaccess.h>
33 #include <asm/bfin-global.h>
35 #include <asm/cplbinit.h>
36 #include "blackfin_sram.h"
39 * BAD_PAGE is the page that is used for page faults when linux
40 * is out-of-memory. Older versions of linux just did a
41 * do_exit(), but using this instead means there is less risk
42 * for a process dying in kernel mode, possibly leaving a inode
45 * BAD_PAGETABLE is the accompanying page-table: it is initialized
46 * to point to BAD_PAGE entries.
48 * ZERO_PAGE is a special page that is used for zero-initialized
51 static unsigned long empty_bad_page_table
;
53 static unsigned long empty_bad_page
;
55 static unsigned long empty_zero_page
;
57 #ifndef CONFIG_EXCEPTION_L1_SCRATCH
58 #if defined CONFIG_SYSCALL_TAB_L1
59 __attribute__((l1_data
))
61 static unsigned long exception_stack
[NR_CPUS
][1024];
64 struct blackfin_pda cpu_pda
[NR_CPUS
];
65 EXPORT_SYMBOL(cpu_pda
);
68 * paging_init() continues the virtual memory environment setup which
69 * was begun by the code in arch/head.S.
70 * The parameters are pointers to where to stick the starting and ending
71 * addresses of available kernel virtual memory.
73 void __init
paging_init(void)
76 * make sure start_mem is page aligned, otherwise bootmem and
77 * page_alloc get different views og the world
79 unsigned long end_mem
= memory_end
& PAGE_MASK
;
81 pr_debug("start_mem is %#lx virtual_end is %#lx\n", PAGE_ALIGN(memory_start
), end_mem
);
84 * initialize the bad page table and bad page to point
85 * to a couple of allocated pages
87 empty_bad_page_table
= (unsigned long)alloc_bootmem_pages(PAGE_SIZE
);
88 empty_bad_page
= (unsigned long)alloc_bootmem_pages(PAGE_SIZE
);
89 empty_zero_page
= (unsigned long)alloc_bootmem_pages(PAGE_SIZE
);
90 memset((void *)empty_zero_page
, 0, PAGE_SIZE
);
93 * Set up SFC/DFC registers (user data space)
97 pr_debug("free_area_init -> start_mem is %#lx virtual_end is %#lx\n",
98 PAGE_ALIGN(memory_start
), end_mem
);
101 unsigned long zones_size
[MAX_NR_ZONES
] = { 0, };
103 zones_size
[ZONE_DMA
] = (end_mem
- PAGE_OFFSET
) >> PAGE_SHIFT
;
104 zones_size
[ZONE_NORMAL
] = 0;
105 #ifdef CONFIG_HIGHMEM
106 zones_size
[ZONE_HIGHMEM
] = 0;
108 free_area_init(zones_size
);
112 asmlinkage
void __init
init_pda(void)
114 unsigned int cpu
= raw_smp_processor_id();
116 /* Initialize the PDA fields holding references to other parts
117 of the memory. The content of such memory is still
118 undefined at the time of the call, we are only setting up
119 valid pointers to it. */
120 memset(&cpu_pda
[cpu
], 0, sizeof(cpu_pda
[cpu
]));
122 cpu_pda
[0].next
= &cpu_pda
[1];
123 cpu_pda
[1].next
= &cpu_pda
[0];
125 #ifdef CONFIG_EXCEPTION_L1_SCRATCH
126 cpu_pda
[cpu
].ex_stack
= (unsigned long *)(L1_SCRATCH_START
+ \
129 cpu_pda
[cpu
].ex_stack
= exception_stack
[cpu
+ 1];
133 cpu_pda
[cpu
].imask
= 0x1f;
137 void __init
mem_init(void)
139 unsigned int codek
= 0, datak
= 0, initk
= 0;
140 unsigned int reservedpages
= 0, freepages
= 0;
142 unsigned long start_mem
= memory_start
;
143 unsigned long end_mem
= memory_end
;
145 end_mem
&= PAGE_MASK
;
146 high_memory
= (void *)end_mem
;
148 start_mem
= PAGE_ALIGN(start_mem
);
149 max_mapnr
= num_physpages
= MAP_NR(high_memory
);
150 printk(KERN_DEBUG
"Kernel managed physical pages: %lu\n", num_physpages
);
152 /* This will put all memory onto the freelists. */
153 totalram_pages
= free_all_bootmem();
156 for (tmp
= 0; tmp
< max_mapnr
; tmp
++)
157 if (PageReserved(pfn_to_page(tmp
)))
159 freepages
= max_mapnr
- reservedpages
;
161 /* do not count in kernel image between _rambase and _ramstart */
162 reservedpages
-= (_ramstart
- _rambase
) >> PAGE_SHIFT
;
163 #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
164 reservedpages
+= (_ramend
- memory_end
- DMA_UNCACHED_REGION
) >> PAGE_SHIFT
;
167 codek
= (_etext
- _stext
) >> 10;
168 initk
= (__init_end
- __init_begin
) >> 10;
169 datak
= ((_ramstart
- _rambase
) >> 10) - codek
- initk
;
172 "Memory available: %luk/%luk RAM, "
173 "(%uk init code, %uk kernel code, %uk data, %uk dma, %uk reserved)\n",
174 (unsigned long) freepages
<< (PAGE_SHIFT
-10), _ramend
>> 10,
175 initk
, codek
, datak
, DMA_UNCACHED_REGION
>> 10, (reservedpages
<< (PAGE_SHIFT
-10)));
178 static void __init
free_init_pages(const char *what
, unsigned long begin
, unsigned long end
)
181 /* next to check that the page we free is not a partial page */
182 for (addr
= begin
; addr
+ PAGE_SIZE
<= end
; addr
+= PAGE_SIZE
) {
183 ClearPageReserved(virt_to_page(addr
));
184 init_page_count(virt_to_page(addr
));
188 printk(KERN_INFO
"Freeing %s: %ldk freed\n", what
, (end
- begin
) >> 10);
191 #ifdef CONFIG_BLK_DEV_INITRD
192 void __init
free_initrd_mem(unsigned long start
, unsigned long end
)
195 free_init_pages("initrd memory", start
, end
);
200 void __init_refok
free_initmem(void)
202 #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
203 free_init_pages("unused kernel memory",
204 (unsigned long)(&__init_begin
),
205 (unsigned long)(&__init_end
));