SLUB: Fix memory leak by not reusing cpu_slab
[pv_ops_mirror.git] / arch / arm / kernel / crunch-bits.S
bloba26886758c6737590e674c22b61515e7df9814e0
1 /*
2  * arch/arm/kernel/crunch-bits.S
3  * Cirrus MaverickCrunch context switching and handling
4  *
5  * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6  *
7  * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
8  * Copyright (c) 2003-2004, MontaVista Software, Inc.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
15 #include <linux/linkage.h>
16 #include <asm/ptrace.h>
17 #include <asm/thread_info.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/arch/ep93xx-regs.h>
22  * We can't use hex constants here due to a bug in gas.
23  */
24 #define CRUNCH_MVDX0            0
25 #define CRUNCH_MVDX1            8
26 #define CRUNCH_MVDX2            16
27 #define CRUNCH_MVDX3            24
28 #define CRUNCH_MVDX4            32
29 #define CRUNCH_MVDX5            40
30 #define CRUNCH_MVDX6            48
31 #define CRUNCH_MVDX7            56
32 #define CRUNCH_MVDX8            64
33 #define CRUNCH_MVDX9            72
34 #define CRUNCH_MVDX10           80
35 #define CRUNCH_MVDX11           88
36 #define CRUNCH_MVDX12           96
37 #define CRUNCH_MVDX13           104
38 #define CRUNCH_MVDX14           112
39 #define CRUNCH_MVDX15           120
40 #define CRUNCH_MVAX0L           128
41 #define CRUNCH_MVAX0M           132
42 #define CRUNCH_MVAX0H           136
43 #define CRUNCH_MVAX1L           140
44 #define CRUNCH_MVAX1M           144
45 #define CRUNCH_MVAX1H           148
46 #define CRUNCH_MVAX2L           152
47 #define CRUNCH_MVAX2M           156
48 #define CRUNCH_MVAX2H           160
49 #define CRUNCH_MVAX3L           164
50 #define CRUNCH_MVAX3M           168
51 #define CRUNCH_MVAX3H           172
52 #define CRUNCH_DSPSC            176
54 #define CRUNCH_SIZE             184
56         .text
59  * Lazy switching of crunch coprocessor context
60  *
61  * r10 = struct thread_info pointer
62  * r9  = ret_from_exception
63  * lr  = undefined instr exit
64  *
65  * called from prefetch exception handler with interrupts disabled
66  */
67 ENTRY(crunch_task_enable)
68         ldr     r8, =(EP93XX_APB_VIRT_BASE + 0x00130000)        @ syscon addr
70         ldr     r1, [r8, #0x80]
71         tst     r1, #0x00800000                 @ access to crunch enabled?
72         movne   pc, lr                          @ if so no business here
73         mov     r3, #0xaa                       @ unlock syscon swlock
74         str     r3, [r8, #0xc0]
75         orr     r1, r1, #0x00800000             @ enable access to crunch
76         str     r1, [r8, #0x80]
78         ldr     r3, =crunch_owner
79         add     r0, r10, #TI_CRUNCH_STATE       @ get task crunch save area
80         ldr     r2, [sp, #60]                   @ current task pc value
81         ldr     r1, [r3]                        @ get current crunch owner
82         str     r0, [r3]                        @ this task now owns crunch
83         sub     r2, r2, #4                      @ adjust pc back
84         str     r2, [sp, #60]
86         ldr     r2, [r8, #0x80]
87         mov     r2, r2                          @ flush out enable (@@@)
89         teq     r1, #0                          @ test for last ownership
90         mov     lr, r9                          @ normal exit from exception
91         beq     crunch_load                     @ no owner, skip save
93 crunch_save:
94         cfstr64         mvdx0, [r1, #CRUNCH_MVDX0]      @ save 64b registers
95         cfstr64         mvdx1, [r1, #CRUNCH_MVDX1]
96         cfstr64         mvdx2, [r1, #CRUNCH_MVDX2]
97         cfstr64         mvdx3, [r1, #CRUNCH_MVDX3]
98         cfstr64         mvdx4, [r1, #CRUNCH_MVDX4]
99         cfstr64         mvdx5, [r1, #CRUNCH_MVDX5]
100         cfstr64         mvdx6, [r1, #CRUNCH_MVDX6]
101         cfstr64         mvdx7, [r1, #CRUNCH_MVDX7]
102         cfstr64         mvdx8, [r1, #CRUNCH_MVDX8]
103         cfstr64         mvdx9, [r1, #CRUNCH_MVDX9]
104         cfstr64         mvdx10, [r1, #CRUNCH_MVDX10]
105         cfstr64         mvdx11, [r1, #CRUNCH_MVDX11]
106         cfstr64         mvdx12, [r1, #CRUNCH_MVDX12]
107         cfstr64         mvdx13, [r1, #CRUNCH_MVDX13]
108         cfstr64         mvdx14, [r1, #CRUNCH_MVDX14]
109         cfstr64         mvdx15, [r1, #CRUNCH_MVDX15]
111 #ifdef __ARMEB__
112 #error fix me for ARMEB
113 #endif
115         cfmv32al        mvfx0, mvax0                    @ save 72b accumulators
116         cfstr32         mvfx0, [r1, #CRUNCH_MVAX0L]
117         cfmv32am        mvfx0, mvax0
118         cfstr32         mvfx0, [r1, #CRUNCH_MVAX0M]
119         cfmv32ah        mvfx0, mvax0
120         cfstr32         mvfx0, [r1, #CRUNCH_MVAX0H]
121         cfmv32al        mvfx0, mvax1
122         cfstr32         mvfx0, [r1, #CRUNCH_MVAX1L]
123         cfmv32am        mvfx0, mvax1
124         cfstr32         mvfx0, [r1, #CRUNCH_MVAX1M]
125         cfmv32ah        mvfx0, mvax1
126         cfstr32         mvfx0, [r1, #CRUNCH_MVAX1H]
127         cfmv32al        mvfx0, mvax2
128         cfstr32         mvfx0, [r1, #CRUNCH_MVAX2L]
129         cfmv32am        mvfx0, mvax2
130         cfstr32         mvfx0, [r1, #CRUNCH_MVAX2M]
131         cfmv32ah        mvfx0, mvax2
132         cfstr32         mvfx0, [r1, #CRUNCH_MVAX2H]
133         cfmv32al        mvfx0, mvax3
134         cfstr32         mvfx0, [r1, #CRUNCH_MVAX3L]
135         cfmv32am        mvfx0, mvax3
136         cfstr32         mvfx0, [r1, #CRUNCH_MVAX3M]
137         cfmv32ah        mvfx0, mvax3
138         cfstr32         mvfx0, [r1, #CRUNCH_MVAX3H]
140         cfmv32sc        mvdx0, dspsc                    @ save status word
141         cfstr64         mvdx0, [r1, #CRUNCH_DSPSC]
143         teq             r0, #0                          @ anything to load?
144         cfldr64eq       mvdx0, [r1, #CRUNCH_MVDX0]      @ mvdx0 was clobbered
145         moveq           pc, lr
147 crunch_load:
148         cfldr64         mvdx0, [r0, #CRUNCH_DSPSC]      @ load status word
149         cfmvsc32        dspsc, mvdx0
151         cfldr32         mvfx0, [r0, #CRUNCH_MVAX0L]     @ load 72b accumulators
152         cfmval32        mvax0, mvfx0
153         cfldr32         mvfx0, [r0, #CRUNCH_MVAX0M]
154         cfmvam32        mvax0, mvfx0
155         cfldr32         mvfx0, [r0, #CRUNCH_MVAX0H]
156         cfmvah32        mvax0, mvfx0
157         cfldr32         mvfx0, [r0, #CRUNCH_MVAX1L]
158         cfmval32        mvax1, mvfx0
159         cfldr32         mvfx0, [r0, #CRUNCH_MVAX1M]
160         cfmvam32        mvax1, mvfx0
161         cfldr32         mvfx0, [r0, #CRUNCH_MVAX1H]
162         cfmvah32        mvax1, mvfx0
163         cfldr32         mvfx0, [r0, #CRUNCH_MVAX2L]
164         cfmval32        mvax2, mvfx0
165         cfldr32         mvfx0, [r0, #CRUNCH_MVAX2M]
166         cfmvam32        mvax2, mvfx0
167         cfldr32         mvfx0, [r0, #CRUNCH_MVAX2H]
168         cfmvah32        mvax2, mvfx0
169         cfldr32         mvfx0, [r0, #CRUNCH_MVAX3L]
170         cfmval32        mvax3, mvfx0
171         cfldr32         mvfx0, [r0, #CRUNCH_MVAX3M]
172         cfmvam32        mvax3, mvfx0
173         cfldr32         mvfx0, [r0, #CRUNCH_MVAX3H]
174         cfmvah32        mvax3, mvfx0
176         cfldr64         mvdx0, [r0, #CRUNCH_MVDX0]      @ load 64b registers
177         cfldr64         mvdx1, [r0, #CRUNCH_MVDX1]
178         cfldr64         mvdx2, [r0, #CRUNCH_MVDX2]
179         cfldr64         mvdx3, [r0, #CRUNCH_MVDX3]
180         cfldr64         mvdx4, [r0, #CRUNCH_MVDX4]
181         cfldr64         mvdx5, [r0, #CRUNCH_MVDX5]
182         cfldr64         mvdx6, [r0, #CRUNCH_MVDX6]
183         cfldr64         mvdx7, [r0, #CRUNCH_MVDX7]
184         cfldr64         mvdx8, [r0, #CRUNCH_MVDX8]
185         cfldr64         mvdx9, [r0, #CRUNCH_MVDX9]
186         cfldr64         mvdx10, [r0, #CRUNCH_MVDX10]
187         cfldr64         mvdx11, [r0, #CRUNCH_MVDX11]
188         cfldr64         mvdx12, [r0, #CRUNCH_MVDX12]
189         cfldr64         mvdx13, [r0, #CRUNCH_MVDX13]
190         cfldr64         mvdx14, [r0, #CRUNCH_MVDX14]
191         cfldr64         mvdx15, [r0, #CRUNCH_MVDX15]
193         mov     pc, lr
196  * Back up crunch regs to save area and disable access to them
197  * (mainly for gdb or sleep mode usage)
199  * r0 = struct thread_info pointer of target task or NULL for any
200  */
201 ENTRY(crunch_task_disable)
202         stmfd   sp!, {r4, r5, lr}
204         mrs     ip, cpsr
205         orr     r2, ip, #PSR_I_BIT              @ disable interrupts
206         msr     cpsr_c, r2
208         ldr     r4, =(EP93XX_APB_VIRT_BASE + 0x00130000)        @ syscon addr
210         ldr     r3, =crunch_owner
211         add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
212         ldr     r1, [r3]                        @ get current crunch owner
213         teq     r1, #0                          @ any current owner?
214         beq     1f                              @ no: quit
215         teq     r0, #0                          @ any owner?
216         teqne   r1, r2                          @ or specified one?
217         bne     1f                              @ no: quit
219         ldr     r5, [r4, #0x80]                 @ enable access to crunch
220         mov     r2, #0xaa
221         str     r2, [r4, #0xc0]
222         orr     r5, r5, #0x00800000
223         str     r5, [r4, #0x80]
225         mov     r0, #0                          @ nothing to load
226         str     r0, [r3]                        @ no more current owner
227         ldr     r2, [r4, #0x80]                 @ flush out enable (@@@)
228         mov     r2, r2
229         bl      crunch_save
231         mov     r2, #0xaa                       @ disable access to crunch
232         str     r2, [r4, #0xc0]
233         bic     r5, r5, #0x00800000
234         str     r5, [r4, #0x80]
235         ldr     r5, [r4, #0x80]                 @ flush out enable (@@@)
236         mov     r5, r5
238 1:      msr     cpsr_c, ip                      @ restore interrupt mode
239         ldmfd   sp!, {r4, r5, pc}
242  * Copy crunch state to given memory address
244  * r0 = struct thread_info pointer of target task
245  * r1 = memory address where to store crunch state
247  * this is called mainly in the creation of signal stack frames
248  */
249 ENTRY(crunch_task_copy)
250         mrs     ip, cpsr
251         orr     r2, ip, #PSR_I_BIT              @ disable interrupts
252         msr     cpsr_c, r2
254         ldr     r3, =crunch_owner
255         add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
256         ldr     r3, [r3]                        @ get current crunch owner
257         teq     r2, r3                          @ does this task own it...
258         beq     1f
260         @ current crunch values are in the task save area
261         msr     cpsr_c, ip                      @ restore interrupt mode
262         mov     r0, r1
263         mov     r1, r2
264         mov     r2, #CRUNCH_SIZE
265         b       memcpy
267 1:      @ this task owns crunch regs -- grab a copy from there
268         mov     r0, #0                          @ nothing to load
269         mov     r3, lr                          @ preserve return address
270         bl      crunch_save
271         msr     cpsr_c, ip                      @ restore interrupt mode
272         mov     pc, r3
275  * Restore crunch state from given memory address
277  * r0 = struct thread_info pointer of target task
278  * r1 = memory address where to get crunch state from
280  * this is used to restore crunch state when unwinding a signal stack frame
281  */
282 ENTRY(crunch_task_restore)
283         mrs     ip, cpsr
284         orr     r2, ip, #PSR_I_BIT              @ disable interrupts
285         msr     cpsr_c, r2
287         ldr     r3, =crunch_owner
288         add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
289         ldr     r3, [r3]                        @ get current crunch owner
290         teq     r2, r3                          @ does this task own it...
291         beq     1f
293         @ this task doesn't own crunch regs -- use its save area
294         msr     cpsr_c, ip                      @ restore interrupt mode
295         mov     r0, r2
296         mov     r2, #CRUNCH_SIZE
297         b       memcpy
299 1:      @ this task owns crunch regs -- load them directly
300         mov     r0, r1
301         mov     r1, #0                          @ nothing to save
302         mov     r3, lr                          @ preserve return address
303         bl      crunch_load
304         msr     cpsr_c, ip                      @ restore interrupt mode
305         mov     pc, r3