2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later.
7 #ifndef __BLACKFIN_MMU_CONTEXT_H__
8 #define __BLACKFIN_MMU_CONTEXT_H__
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <asm/setup.h>
14 #include <asm/pgalloc.h>
15 #include <asm/cplbinit.h>
16 #include <asm/sections.h>
18 /* Note: L1 stacks are CPU-private things, so we bluntly disable this
19 feature in SMP mode, and use the per-CPU scratch SRAM bank only to
20 store the PDA instead. */
22 extern void *current_l1_stack_save
;
23 extern int nr_l1stack_tasks
;
24 extern void *l1_stack_base
;
25 extern unsigned long l1_stack_len
;
27 extern int l1sram_free(const void*);
28 extern void *l1sram_alloc_max(void*);
30 static inline void free_l1stack(void)
33 if (nr_l1stack_tasks
== 0) {
34 l1sram_free(l1_stack_base
);
40 static inline unsigned long
41 alloc_l1stack(unsigned long length
, unsigned long *stack_base
)
43 if (nr_l1stack_tasks
== 0) {
44 l1_stack_base
= l1sram_alloc_max(&l1_stack_len
);
49 if (l1_stack_len
< length
) {
50 if (nr_l1stack_tasks
== 0)
51 l1sram_free(l1_stack_base
);
54 *stack_base
= (unsigned long)l1_stack_base
;
60 activate_l1stack(struct mm_struct
*mm
, unsigned long sp_base
)
62 if (current_l1_stack_save
)
63 memcpy(current_l1_stack_save
, l1_stack_base
, l1_stack_len
);
64 mm
->context
.l1_stack_save
= current_l1_stack_save
= (void*)sp_base
;
65 memcpy(l1_stack_base
, current_l1_stack_save
, l1_stack_len
);
69 #define deactivate_mm(tsk,mm) do { } while (0)
71 #define activate_mm(prev, next) switch_mm(prev, next, NULL)
73 static inline void __switch_mm(struct mm_struct
*prev_mm
, struct mm_struct
*next_mm
,
74 struct task_struct
*tsk
)
77 unsigned int cpu
= smp_processor_id();
79 if (prev_mm
== next_mm
)
82 if (prev_mm
->context
.page_rwx_mask
== current_rwx_mask
[cpu
]) {
83 flush_switched_cplbs(cpu
);
84 set_mask_dcplbs(next_mm
->context
.page_rwx_mask
, cpu
);
88 #ifdef CONFIG_APP_STACK_L1
89 /* L1 stack switching. */
90 if (!next_mm
->context
.l1_stack_save
)
92 if (next_mm
->context
.l1_stack_save
== current_l1_stack_save
)
94 if (current_l1_stack_save
) {
95 memcpy(current_l1_stack_save
, l1_stack_base
, l1_stack_len
);
97 current_l1_stack_save
= next_mm
->context
.l1_stack_save
;
98 memcpy(l1_stack_base
, current_l1_stack_save
, l1_stack_len
);
103 #define lock_mm_switch(flags) flags = hard_local_irq_save_cond()
104 #define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags)
106 #define lock_mm_switch(flags) do { (void)(flags); } while (0)
107 #define unlock_mm_switch(flags) do { (void)(flags); } while (0)
108 #endif /* CONFIG_IPIPE */
111 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
112 struct task_struct
*tsk
)
115 lock_mm_switch(flags
);
116 __switch_mm(prev
, next
, tsk
);
117 unlock_mm_switch(flags
);
120 static inline void protect_page(struct mm_struct
*mm
, unsigned long addr
,
123 unsigned long *mask
= mm
->context
.page_rwx_mask
;
128 if (unlikely(addr
>= ASYNC_BANK0_BASE
&& addr
< ASYNC_BANK3_BASE
+ ASYNC_BANK3_SIZE
))
129 page
= (addr
- (ASYNC_BANK0_BASE
- _ramend
)) >> 12;
133 bit
= 1 << (page
& 31);
139 mask
+= page_mask_nelts
;
140 if (flags
& VM_WRITE
)
144 mask
+= page_mask_nelts
;
151 static inline void update_protections(struct mm_struct
*mm
)
153 unsigned int cpu
= smp_processor_id();
154 if (mm
->context
.page_rwx_mask
== current_rwx_mask
[cpu
]) {
155 flush_switched_cplbs(cpu
);
156 set_mask_dcplbs(mm
->context
.page_rwx_mask
, cpu
);
159 #else /* !CONFIG_MPU */
160 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
161 struct task_struct
*tsk
)
163 __switch_mm(prev
, next
, tsk
);
167 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
171 /* Called when creating a new context during fork() or execve(). */
173 init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
)
176 unsigned long p
= __get_free_pages(GFP_KERNEL
, page_mask_order
);
177 mm
->context
.page_rwx_mask
= (unsigned long *)p
;
178 memset(mm
->context
.page_rwx_mask
, 0,
179 page_mask_nelts
* 3 * sizeof(long));
184 static inline void destroy_context(struct mm_struct
*mm
)
186 struct sram_list_struct
*tmp
;
188 unsigned int cpu
= smp_processor_id();
191 #ifdef CONFIG_APP_STACK_L1
192 if (current_l1_stack_save
== mm
->context
.l1_stack_save
)
193 current_l1_stack_save
= 0;
194 if (mm
->context
.l1_stack_save
)
198 while ((tmp
= mm
->context
.sram_list
)) {
199 mm
->context
.sram_list
= tmp
->next
;
200 sram_free(tmp
->addr
);
204 if (current_rwx_mask
[cpu
] == mm
->context
.page_rwx_mask
)
205 current_rwx_mask
[cpu
] = NULL
;
206 free_pages((unsigned long)mm
->context
.page_rwx_mask
, page_mask_order
);
210 #define ipipe_mm_switch_protect(flags) \
211 flags = hard_local_irq_save_cond()
213 #define ipipe_mm_switch_unprotect(flags) \
214 hard_local_irq_restore_cond(flags)