2 * linux/arch/cris/arch-v10/mm/tlb.c
4 * Low level TLB handling
7 * Copyright (C) 2000-2002 Axis Communications AB
9 * Authors: Bjorn Wesen (bjornw@axis.com)
14 #include <asm/mmu_context.h>
15 #include <asm/arch/svinto.h>
19 /* The TLB can host up to 64 different mm contexts at the same time.
20 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
21 * page_id that has to match to give a hit. In page_id_map, we keep track
22 * of which mm's we have assigned which page_id's, so that we know when
23 * to invalidate TLB entries.
25 * The last page_id is never running - it is used as an invalid page_id
26 * so we can make TLB entries that will never match.
28 * Notice that we need to make the flushes atomic, otherwise an interrupt
29 * handler that uses vmalloced memory might cause a TLB load in the middle
33 /* invalidate all TLB entries */
41 /* the vpn of i & 0xf is so we dont write similar TLB entries
42 * in the same 4-way entry group. details..
45 local_save_flags(flags
);
47 for(i
= 0; i
< NUM_TLB_ENTRIES
; i
++) {
48 *R_TLB_SELECT
= ( IO_FIELD(R_TLB_SELECT
, index
, i
) );
49 *R_TLB_HI
= ( IO_FIELD(R_TLB_HI
, page_id
, INVALID_PAGEID
) |
50 IO_FIELD(R_TLB_HI
, vpn
, i
& 0xf ) );
52 *R_TLB_LO
= ( IO_STATE(R_TLB_LO
, global
,no
) |
53 IO_STATE(R_TLB_LO
, valid
, no
) |
54 IO_STATE(R_TLB_LO
, kernel
,no
) |
55 IO_STATE(R_TLB_LO
, we
, no
) |
56 IO_FIELD(R_TLB_LO
, pfn
, 0 ) );
58 local_irq_restore(flags
);
59 D(printk("tlb: flushed all\n"));
62 /* invalidate the selected mm context only */
65 flush_tlb_mm(struct mm_struct
*mm
)
68 int page_id
= mm
->context
.page_id
;
71 D(printk("tlb: flush mm context %d (%p)\n", page_id
, mm
));
73 if(page_id
== NO_CONTEXT
)
76 /* mark the TLB entries that match the page_id as invalid.
77 * here we could also check the _PAGE_GLOBAL bit and NOT flush
78 * global pages. is it worth the extra I/O ?
81 local_save_flags(flags
);
83 for(i
= 0; i
< NUM_TLB_ENTRIES
; i
++) {
84 *R_TLB_SELECT
= IO_FIELD(R_TLB_SELECT
, index
, i
);
85 if (IO_EXTRACT(R_TLB_HI
, page_id
, *R_TLB_HI
) == page_id
) {
86 *R_TLB_HI
= ( IO_FIELD(R_TLB_HI
, page_id
, INVALID_PAGEID
) |
87 IO_FIELD(R_TLB_HI
, vpn
, i
& 0xf ) );
89 *R_TLB_LO
= ( IO_STATE(R_TLB_LO
, global
,no
) |
90 IO_STATE(R_TLB_LO
, valid
, no
) |
91 IO_STATE(R_TLB_LO
, kernel
,no
) |
92 IO_STATE(R_TLB_LO
, we
, no
) |
93 IO_FIELD(R_TLB_LO
, pfn
, 0 ) );
96 local_irq_restore(flags
);
99 /* invalidate a single page */
102 flush_tlb_page(struct vm_area_struct
*vma
,
105 struct mm_struct
*mm
= vma
->vm_mm
;
106 int page_id
= mm
->context
.page_id
;
110 D(printk("tlb: flush page %p in context %d (%p)\n", addr
, page_id
, mm
));
112 if(page_id
== NO_CONTEXT
)
115 addr
&= PAGE_MASK
; /* perhaps not necessary */
117 /* invalidate those TLB entries that match both the mm context
118 * and the virtual address requested
121 local_save_flags(flags
);
123 for(i
= 0; i
< NUM_TLB_ENTRIES
; i
++) {
124 unsigned long tlb_hi
;
125 *R_TLB_SELECT
= IO_FIELD(R_TLB_SELECT
, index
, i
);
127 if (IO_EXTRACT(R_TLB_HI
, page_id
, tlb_hi
) == page_id
&&
128 (tlb_hi
& PAGE_MASK
) == addr
) {
129 *R_TLB_HI
= IO_FIELD(R_TLB_HI
, page_id
, INVALID_PAGEID
) |
130 addr
; /* same addr as before works. */
132 *R_TLB_LO
= ( IO_STATE(R_TLB_LO
, global
,no
) |
133 IO_STATE(R_TLB_LO
, valid
, no
) |
134 IO_STATE(R_TLB_LO
, kernel
,no
) |
135 IO_STATE(R_TLB_LO
, we
, no
) |
136 IO_FIELD(R_TLB_LO
, pfn
, 0 ) );
139 local_irq_restore(flags
);
142 /* dump the entire TLB for debug purposes */
151 printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n");
153 local_save_flags(flags
);
155 for(i
= 0; i
< NUM_TLB_ENTRIES
; i
++) {
156 *R_TLB_SELECT
= ( IO_FIELD(R_TLB_SELECT
, index
, i
) );
157 printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n",
158 i
, *R_TLB_HI
, *R_TLB_LO
);
160 local_irq_restore(flags
);
165 * Initialize the context related info for a new mm_struct
170 init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
172 mm
->context
.page_id
= NO_CONTEXT
;
176 /* called in schedule() just before actually doing the switch_to */
179 switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
180 struct task_struct
*tsk
)
182 /* make sure we have a context */
184 get_mmu_context(next
);
186 /* remember the pgd for the fault handlers
187 * this is similar to the pgd register in some other CPU's.
188 * we need our own copy of it because current and active_mm
189 * might be invalid at points where we still need to derefer
193 per_cpu(current_pgd
, smp_processor_id()) = next
->pgd
;
195 /* switch context in the MMU */
197 D(printk("switching mmu_context to %d (%p)\n", next
->context
, next
));
199 *R_MMU_CONTEXT
= IO_FIELD(R_MMU_CONTEXT
, page_id
, next
->context
.page_id
);