treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / nios2 / mm / mmu_context.c
blob45d6b9c58d677a93b3bbe38148693e02394aac87
1 /*
2 * MMU context handling.
4 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
5 * Copyright (C) 2009 Wind River Systems Inc
6 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
13 #include <linux/mm.h>
15 #include <asm/cpuinfo.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlb.h>
19 /* The pids position and mask in context */
20 #define PID_SHIFT 0
21 #define PID_BITS (cpuinfo.tlb_pid_num_bits)
22 #define PID_MASK ((1UL << PID_BITS) - 1)
24 /* The versions position and mask in context */
25 #define VERSION_BITS (32 - PID_BITS)
26 #define VERSION_SHIFT (PID_SHIFT + PID_BITS)
27 #define VERSION_MASK ((1UL << VERSION_BITS) - 1)
29 /* Return the version part of a context */
30 #define CTX_VERSION(c) (((c) >> VERSION_SHIFT) & VERSION_MASK)
32 /* Return the pid part of a context */
33 #define CTX_PID(c) (((c) >> PID_SHIFT) & PID_MASK)
35 /* Value of the first context (version 1, pid 0) */
36 #define FIRST_CTX ((1UL << VERSION_SHIFT) | (0 << PID_SHIFT))
38 static mm_context_t next_mmu_context;
41 * Initialize MMU context management stuff.
43 void __init mmu_context_init(void)
45 /* We need to set this here because the value depends on runtime data
46 * from cpuinfo */
47 next_mmu_context = FIRST_CTX;
51 * Set new context (pid), keep way
53 static void set_context(mm_context_t context)
55 set_mmu_pid(CTX_PID(context));
58 static mm_context_t get_new_context(void)
60 /* Return the next pid */
61 next_mmu_context += (1UL << PID_SHIFT);
63 /* If the pid field wraps around we increase the version and
64 * flush the tlb */
65 if (unlikely(CTX_PID(next_mmu_context) == 0)) {
66 /* Version is incremented since the pid increment above
67 * overflows info version */
68 flush_cache_all();
69 flush_tlb_all();
72 /* If the version wraps we start over with the first generation, we do
73 * not need to flush the tlb here since it's always done above */
74 if (unlikely(CTX_VERSION(next_mmu_context) == 0))
75 next_mmu_context = FIRST_CTX;
77 return next_mmu_context;
80 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
81 struct task_struct *tsk)
83 unsigned long flags;
85 local_irq_save(flags);
87 /* If the process context we are swapping in has a different context
88 * generation then we have it should get a new generation/pid */
89 if (unlikely(CTX_VERSION(next->context) !=
90 CTX_VERSION(next_mmu_context)))
91 next->context = get_new_context();
93 /* Save the current pgd so the fast tlb handler can find it */
94 pgd_current = next->pgd;
96 /* Set the current context */
97 set_context(next->context);
99 local_irq_restore(flags);
103 * After we have set current->mm to a new value, this activates
104 * the context for the new mm so we see the new mappings.
106 void activate_mm(struct mm_struct *prev, struct mm_struct *next)
108 next->context = get_new_context();
109 set_context(next->context);
110 pgd_current = next->pgd;
113 unsigned long get_pid_from_context(mm_context_t *context)
115 return CTX_PID((*context));