2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later.
7 #ifndef __BLACKFIN_MMU_CONTEXT_H__
8 #define __BLACKFIN_MMU_CONTEXT_H__
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/mm_types.h>
14 #include <asm/setup.h>
16 #include <asm/pgalloc.h>
17 #include <asm/cplbinit.h>
18 #include <asm/sections.h>
20 /* Note: L1 stacks are CPU-private things, so we bluntly disable this
21 feature in SMP mode, and use the per-CPU scratch SRAM bank only to
22 store the PDA instead. */
24 extern void *current_l1_stack_save
;
25 extern int nr_l1stack_tasks
;
26 extern void *l1_stack_base
;
27 extern unsigned long l1_stack_len
;
29 extern int l1sram_free(const void*);
30 extern void *l1sram_alloc_max(void*);
32 static inline void free_l1stack(void)
35 if (nr_l1stack_tasks
== 0) {
36 l1sram_free(l1_stack_base
);
42 static inline unsigned long
43 alloc_l1stack(unsigned long length
, unsigned long *stack_base
)
45 if (nr_l1stack_tasks
== 0) {
46 l1_stack_base
= l1sram_alloc_max(&l1_stack_len
);
51 if (l1_stack_len
< length
) {
52 if (nr_l1stack_tasks
== 0)
53 l1sram_free(l1_stack_base
);
56 *stack_base
= (unsigned long)l1_stack_base
;
62 activate_l1stack(struct mm_struct
*mm
, unsigned long sp_base
)
64 if (current_l1_stack_save
)
65 memcpy(current_l1_stack_save
, l1_stack_base
, l1_stack_len
);
66 mm
->context
.l1_stack_save
= current_l1_stack_save
= (void*)sp_base
;
67 memcpy(l1_stack_base
, current_l1_stack_save
, l1_stack_len
);
71 #define deactivate_mm(tsk,mm) do { } while (0)
73 #define activate_mm(prev, next) switch_mm(prev, next, NULL)
75 static inline void __switch_mm(struct mm_struct
*prev_mm
, struct mm_struct
*next_mm
,
76 struct task_struct
*tsk
)
79 unsigned int cpu
= smp_processor_id();
81 if (prev_mm
== next_mm
)
84 if (prev_mm
->context
.page_rwx_mask
== current_rwx_mask
[cpu
]) {
85 flush_switched_cplbs(cpu
);
86 set_mask_dcplbs(next_mm
->context
.page_rwx_mask
, cpu
);
90 #ifdef CONFIG_APP_STACK_L1
91 /* L1 stack switching. */
92 if (!next_mm
->context
.l1_stack_save
)
94 if (next_mm
->context
.l1_stack_save
== current_l1_stack_save
)
96 if (current_l1_stack_save
) {
97 memcpy(current_l1_stack_save
, l1_stack_base
, l1_stack_len
);
99 current_l1_stack_save
= next_mm
->context
.l1_stack_save
;
100 memcpy(l1_stack_base
, current_l1_stack_save
, l1_stack_len
);
105 #define lock_mm_switch(flags) flags = hard_local_irq_save_cond()
106 #define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags)
108 #define lock_mm_switch(flags) do { (void)(flags); } while (0)
109 #define unlock_mm_switch(flags) do { (void)(flags); } while (0)
110 #endif /* CONFIG_IPIPE */
113 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
114 struct task_struct
*tsk
)
117 lock_mm_switch(flags
);
118 __switch_mm(prev
, next
, tsk
);
119 unlock_mm_switch(flags
);
122 static inline void protect_page(struct mm_struct
*mm
, unsigned long addr
,
125 unsigned long *mask
= mm
->context
.page_rwx_mask
;
130 if (unlikely(addr
>= ASYNC_BANK0_BASE
&& addr
< ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
))
131 page
= (addr
- (ASYNC_BANK0_BASE
- _ramend
)) >> 12;
135 bit
= 1 << (page
& 31);
141 mask
+= page_mask_nelts
;
142 if (flags
& VM_WRITE
)
146 mask
+= page_mask_nelts
;
153 static inline void update_protections(struct mm_struct
*mm
)
155 unsigned int cpu
= smp_processor_id();
156 if (mm
->context
.page_rwx_mask
== current_rwx_mask
[cpu
]) {
157 flush_switched_cplbs(cpu
);
158 set_mask_dcplbs(mm
->context
.page_rwx_mask
, cpu
);
161 #else /* !CONFIG_MPU */
162 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
163 struct task_struct
*tsk
)
165 __switch_mm(prev
, next
, tsk
);
169 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
173 /* Called when creating a new context during fork() or execve(). */
175 init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
178 unsigned long p
= __get_free_pages(GFP_KERNEL
, page_mask_order
);
179 mm
->context
.page_rwx_mask
= (unsigned long *)p
;
180 memset(mm
->context
.page_rwx_mask
, 0,
181 page_mask_nelts
* 3 * sizeof(long));
186 static inline void destroy_context(struct mm_struct
*mm
)
188 struct sram_list_struct
*tmp
;
190 unsigned int cpu
= smp_processor_id();
193 #ifdef CONFIG_APP_STACK_L1
194 if (current_l1_stack_save
== mm
->context
.l1_stack_save
)
195 current_l1_stack_save
= 0;
196 if (mm
->context
.l1_stack_save
)
200 while ((tmp
= mm
->context
.sram_list
)) {
201 mm
->context
.sram_list
= tmp
->next
;
202 sram_free(tmp
->addr
);
206 if (current_rwx_mask
[cpu
] == mm
->context
.page_rwx_mask
)
207 current_rwx_mask
[cpu
] = NULL
;
208 free_pages((unsigned long)mm
->context
.page_rwx_mask
, page_mask_order
);
212 #define ipipe_mm_switch_protect(flags) \
213 flags = hard_local_irq_save_cond()
215 #define ipipe_mm_switch_unprotect(flags) \
216 hard_local_irq_restore_cond(flags)