Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / cris / arch-v10 / mm / tlb.c
blob7f1f752f2445b695f7d6a6fdca65f150da274291
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/cris/arch-v10/mm/tlb.c
5 * Low level TLB handling
8 * Copyright (C) 2000-2007 Axis Communications AB
10 * Authors: Bjorn Wesen (bjornw@axis.com)
14 #include <linux/mm_types.h>
16 #include <asm/tlb.h>
17 #include <asm/mmu_context.h>
18 #include <arch/svinto.h>
20 #define D(x)
22 /* The TLB can host up to 64 different mm contexts at the same time.
23 * The running context is R_MMU_CONTEXT, and each TLB entry contains a
24 * page_id that has to match to give a hit. In page_id_map, we keep track
25 * of which mm's we have assigned which page_id's, so that we know when
26 * to invalidate TLB entries.
28 * The last page_id is never running - it is used as an invalid page_id
29 * so we can make TLB entries that will never match.
31 * Notice that we need to make the flushes atomic, otherwise an interrupt
32 * handler that uses vmalloced memory might cause a TLB load in the middle
33 * of a flush causing.
36 /* invalidate all TLB entries */
38 void
39 flush_tlb_all(void)
41 int i;
42 unsigned long flags;
44 /* the vpn of i & 0xf is so we dont write similar TLB entries
45 * in the same 4-way entry group. details...
48 local_irq_save(flags);
49 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
50 *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
51 *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
52 IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
54 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
55 IO_STATE(R_TLB_LO, valid, no ) |
56 IO_STATE(R_TLB_LO, kernel,no ) |
57 IO_STATE(R_TLB_LO, we, no ) |
58 IO_FIELD(R_TLB_LO, pfn, 0 ) );
60 local_irq_restore(flags);
61 D(printk("tlb: flushed all\n"));
64 /* invalidate the selected mm context only */
66 void
67 flush_tlb_mm(struct mm_struct *mm)
69 int i;
70 int page_id = mm->context.page_id;
71 unsigned long flags;
73 D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
75 if(page_id == NO_CONTEXT)
76 return;
78 /* mark the TLB entries that match the page_id as invalid.
79 * here we could also check the _PAGE_GLOBAL bit and NOT flush
80 * global pages. is it worth the extra I/O ?
83 local_irq_save(flags);
84 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
85 *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
86 if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
87 *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
88 IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
90 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
91 IO_STATE(R_TLB_LO, valid, no ) |
92 IO_STATE(R_TLB_LO, kernel,no ) |
93 IO_STATE(R_TLB_LO, we, no ) |
94 IO_FIELD(R_TLB_LO, pfn, 0 ) );
97 local_irq_restore(flags);
100 /* invalidate a single page */
102 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
104 struct mm_struct *mm = vma->vm_mm;
105 int page_id = mm->context.page_id;
106 int i;
107 unsigned long flags;
109 D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
111 if(page_id == NO_CONTEXT)
112 return;
114 addr &= PAGE_MASK; /* perhaps not necessary */
116 /* invalidate those TLB entries that match both the mm context
117 * and the virtual address requested
120 local_irq_save(flags);
121 for(i = 0; i < NUM_TLB_ENTRIES; i++) {
122 unsigned long tlb_hi;
123 *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
124 tlb_hi = *R_TLB_HI;
125 if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
126 (tlb_hi & PAGE_MASK) == addr) {
127 *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
128 addr; /* same addr as before works. */
130 *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
131 IO_STATE(R_TLB_LO, valid, no ) |
132 IO_STATE(R_TLB_LO, kernel,no ) |
133 IO_STATE(R_TLB_LO, we, no ) |
134 IO_FIELD(R_TLB_LO, pfn, 0 ) );
137 local_irq_restore(flags);
141 * Initialize the context related info for a new mm_struct
142 * instance.
146 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
148 mm->context.page_id = NO_CONTEXT;
149 return 0;
152 /* called in schedule() just before actually doing the switch_to */
154 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
155 struct task_struct *tsk)
157 if (prev != next) {
158 /* make sure we have a context */
159 get_mmu_context(next);
161 /* remember the pgd for the fault handlers
162 * this is similar to the pgd register in some other CPU's.
163 * we need our own copy of it because current and active_mm
164 * might be invalid at points where we still need to derefer
165 * the pgd.
168 per_cpu(current_pgd, smp_processor_id()) = next->pgd;
170 /* switch context in the MMU */
172 D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n",
173 next->context, next));
175 *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT,
176 page_id, next->context.page_id);