2 * Set up paging and the MMU.
4 * Copyright (C) 2000-2003, Axis Communications AB.
6 * Authors: Bjorn Wesen <bjornw@axis.com>
7 * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
9 #include <linux/mmzone.h>
10 #include <linux/init.h>
11 #include <linux/bootmem.h>
13 #include <asm/pgtable.h>
15 #include <asm/types.h>
18 #include <asm/mmu_context.h>
19 #include <arch/hwregs/asm/mmu_defs_asm.h>
20 #include <arch/hwregs/supp_reg.h>
22 extern void tlb_init(void);
25 * The kernel is already mapped with linear mapping at kseg_c so there's no
26 * need to map it with a page table. However, head.S also temporarily mapped it
27 * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
30 void __init
cris_mmu_init(void)
32 unsigned long mmu_config
;
33 unsigned long mmu_kbase_hi
;
34 unsigned long mmu_kbase_lo
;
35 unsigned short mmu_page_id
;
38 * Make sure the current pgd table points to something sane, even if it
39 * is most probably not used until the next switch_mm.
41 per_cpu(current_pgd
, smp_processor_id()) = init_mm
.pgd
;
43 /* Initialise the TLB. Function found in tlb.c. */
47 * Enable exceptions and initialize the kernel segments.
48 * See head.S for differences between ARTPEC-3 and ETRAX FS.
50 mmu_config
= ( REG_STATE(mmu
, rw_mm_cfg
, we
, on
) |
51 REG_STATE(mmu
, rw_mm_cfg
, acc
, on
) |
52 REG_STATE(mmu
, rw_mm_cfg
, ex
, on
) |
53 REG_STATE(mmu
, rw_mm_cfg
, inv
, on
) |
54 #ifdef CONFIG_CRIS_MACH_ARTPEC3
55 REG_STATE(mmu
, rw_mm_cfg
, seg_f
, page
) |
56 REG_STATE(mmu
, rw_mm_cfg
, seg_e
, page
) |
57 REG_STATE(mmu
, rw_mm_cfg
, seg_d
, linear
) |
59 REG_STATE(mmu
, rw_mm_cfg
, seg_f
, linear
) |
60 REG_STATE(mmu
, rw_mm_cfg
, seg_e
, linear
) |
61 REG_STATE(mmu
, rw_mm_cfg
, seg_d
, page
) |
63 REG_STATE(mmu
, rw_mm_cfg
, seg_c
, linear
) |
64 REG_STATE(mmu
, rw_mm_cfg
, seg_b
, linear
) |
65 REG_STATE(mmu
, rw_mm_cfg
, seg_a
, page
) |
66 REG_STATE(mmu
, rw_mm_cfg
, seg_9
, page
) |
67 REG_STATE(mmu
, rw_mm_cfg
, seg_8
, page
) |
68 REG_STATE(mmu
, rw_mm_cfg
, seg_7
, page
) |
69 REG_STATE(mmu
, rw_mm_cfg
, seg_6
, page
) |
70 REG_STATE(mmu
, rw_mm_cfg
, seg_5
, page
) |
71 REG_STATE(mmu
, rw_mm_cfg
, seg_4
, page
) |
72 REG_STATE(mmu
, rw_mm_cfg
, seg_3
, page
) |
73 REG_STATE(mmu
, rw_mm_cfg
, seg_2
, page
) |
74 REG_STATE(mmu
, rw_mm_cfg
, seg_1
, page
) |
75 REG_STATE(mmu
, rw_mm_cfg
, seg_0
, page
));
77 /* See head.S for differences between ARTPEC-3 and ETRAX FS. */
78 mmu_kbase_hi
= ( REG_FIELD(mmu
, rw_mm_kbase_hi
, base_f
, 0x0) |
79 #ifdef CONFIG_CRIS_MACH_ARTPEC3
80 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_e
, 0x0) |
81 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_d
, 0x5) |
83 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_e
, 0x8) |
84 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_d
, 0x0) |
86 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_c
, 0x4) |
87 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_b
, 0xb) |
88 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_a
, 0x0) |
89 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_9
, 0x0) |
90 REG_FIELD(mmu
, rw_mm_kbase_hi
, base_8
, 0x0));
92 mmu_kbase_lo
= ( REG_FIELD(mmu
, rw_mm_kbase_lo
, base_7
, 0x0) |
93 REG_FIELD(mmu
, rw_mm_kbase_lo
, base_6
, 0x0) |
94 REG_FIELD(mmu
, rw_mm_kbase_lo
, base_5
, 0x0) |
95 REG_FIELD(mmu
, rw_mm_kbase_lo
, base_4
, 0x0) |
96 REG_FIELD(mmu
, rw_mm_kbase_lo
, base_3
, 0x0) |
97 REG_FIELD(mmu
, rw_mm_kbase_lo
, base_2
, 0x0) |
98 REG_FIELD(mmu
, rw_mm_kbase_lo
, base_1
, 0x0) |
99 REG_FIELD(mmu
, rw_mm_kbase_lo
, base_0
, 0x0));
101 mmu_page_id
= REG_FIELD(mmu
, rw_mm_tlb_hi
, pid
, 0);
103 /* Update the instruction MMU. */
104 SUPP_BANK_SEL(BANK_IM
);
105 SUPP_REG_WR(RW_MM_CFG
, mmu_config
);
106 SUPP_REG_WR(RW_MM_KBASE_HI
, mmu_kbase_hi
);
107 SUPP_REG_WR(RW_MM_KBASE_LO
, mmu_kbase_lo
);
108 SUPP_REG_WR(RW_MM_TLB_HI
, mmu_page_id
);
110 /* Update the data MMU. */
111 SUPP_BANK_SEL(BANK_DM
);
112 SUPP_REG_WR(RW_MM_CFG
, mmu_config
);
113 SUPP_REG_WR(RW_MM_KBASE_HI
, mmu_kbase_hi
);
114 SUPP_REG_WR(RW_MM_KBASE_LO
, mmu_kbase_lo
);
115 SUPP_REG_WR(RW_MM_TLB_HI
, mmu_page_id
);
117 SPEC_REG_WR(SPEC_REG_PID
, 0);
120 * The MMU has been enabled ever since head.S but just to make it
121 * totally obvious enable it here as well.
123 SUPP_BANK_SEL(BANK_GC
);
124 SUPP_REG_WR(RW_GC_CFG
, 0xf); /* IMMU, DMMU, ICache, DCache on */
127 void __init
paging_init(void)
130 unsigned long zones_size
[MAX_NR_ZONES
];
132 printk("Setting up paging and the MMU.\n");
134 /* Clear out the init_mm.pgd that will contain the kernel's mappings. */
135 for(i
= 0; i
< PTRS_PER_PGD
; i
++)
136 swapper_pg_dir
[i
] = __pgd(0);
141 * Initialize the bad page table and bad page to point to a couple of
144 empty_zero_page
= (unsigned long) alloc_bootmem_pages(PAGE_SIZE
);
145 memset((void *) empty_zero_page
, 0, PAGE_SIZE
);
147 /* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
148 zones_size
[0] = ((unsigned long) high_memory
- PAGE_OFFSET
) >> PAGE_SHIFT
;
150 for (i
= 1; i
< MAX_NR_ZONES
; i
++)
154 * Use free_area_init_node instead of free_area_init, because it is
155 * designed for systems where the DRAM starts at an address
156 * substantially higher than 0, like us (we start at PAGE_OFFSET). This
157 * saves space in the mem_map page array.
159 free_area_init_node(0, zones_size
, PAGE_OFFSET
>> PAGE_SHIFT
, 0);
161 mem_map
= contig_page_data
.node_mem_map
;