staging:iio:adc:ad7606 move to info_mask_(shared_by_type/separate)
[linux/fpc-iii.git] / arch / ia64 / lib / flush.S
blob1d8c88860063d303c7efa807078326744b55e118
1 /*
2  * Cache flushing routines.
3  *
4  * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
5  *      David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * 05/28/05 Zoltan Menyhart     Dynamic stride size
8  */
10 #include <asm/asmmacro.h>
13         /*
14          * flush_icache_range(start,end)
15          *
16          *      Make i-cache(s) coherent with d-caches.
17          *
18          *      Must deal with range from start to end-1 but nothing else (need to
19          *      be careful not to touch addresses that may be unmapped).
20          *
21          *      Note: "in0" and "in1" are preserved for debugging purposes.
22          */
23         .section .kprobes.text,"ax"
24 GLOBAL_ENTRY(flush_icache_range)
26         .prologue
27         alloc   r2=ar.pfs,2,0,0,0
28         movl    r3=ia64_i_cache_stride_shift
29         mov     r21=1
30         ;;
31         ld8     r20=[r3]                // r20: stride shift
32         sub     r22=in1,r0,1            // last byte address
33         ;;
34         shr.u   r23=in0,r20             // start / (stride size)
35         shr.u   r22=r22,r20             // (last byte address) / (stride size)
36         shl     r21=r21,r20             // r21: stride size of the i-cache(s)
37         ;;
38         sub     r8=r22,r23              // number of strides - 1
39         shl     r24=r23,r20             // r24: addresses for "fc.i" =
40                                         //      "start" rounded down to stride boundary
41         .save   ar.lc,r3
42         mov     r3=ar.lc                // save ar.lc
43         ;;
45         .body
46         mov     ar.lc=r8
47         ;;
48         /*
49          * 32 byte aligned loop, even number of (actually 2) bundles
50          */
51 .Loop:  fc.i    r24                     // issuable on M0 only
52         add     r24=r21,r24             // we flush "stride size" bytes per iteration
53         nop.i   0
54         br.cloop.sptk.few .Loop
55         ;;
56         sync.i
57         ;;
58         srlz.i
59         ;;
60         mov     ar.lc=r3                // restore ar.lc
61         br.ret.sptk.many rp
62 END(flush_icache_range)
64         /*
65          * clflush_cache_range(start,size)
66          *
67          *      Flush cache lines from start to start+size-1.
68          *
69          *      Must deal with range from start to start+size-1 but nothing else
70          *      (need to be careful not to touch addresses that may be
71          *      unmapped).
72          *
73          *      Note: "in0" and "in1" are preserved for debugging purposes.
74          */
75         .section .kprobes.text,"ax"
76 GLOBAL_ENTRY(clflush_cache_range)
78         .prologue
79         alloc   r2=ar.pfs,2,0,0,0
80         movl    r3=ia64_cache_stride_shift
81         mov     r21=1
82         add     r22=in1,in0
83         ;;
84         ld8     r20=[r3]                // r20: stride shift
85         sub     r22=r22,r0,1            // last byte address
86         ;;
87         shr.u   r23=in0,r20             // start / (stride size)
88         shr.u   r22=r22,r20             // (last byte address) / (stride size)
89         shl     r21=r21,r20             // r21: stride size of the i-cache(s)
90         ;;
91         sub     r8=r22,r23              // number of strides - 1
92         shl     r24=r23,r20             // r24: addresses for "fc" =
93                                         //      "start" rounded down to stride
94                                         //      boundary
95         .save   ar.lc,r3
96         mov     r3=ar.lc                // save ar.lc
97         ;;
99         .body
100         mov     ar.lc=r8
101         ;;
102         /*
103          * 32 byte aligned loop, even number of (actually 2) bundles
104          */
105 .Loop_fc:
106         fc      r24             // issuable on M0 only
107         add     r24=r21,r24     // we flush "stride size" bytes per iteration
108         nop.i   0
109         br.cloop.sptk.few .Loop_fc
110         ;;
111         sync.i
112         ;;
113         srlz.i
114         ;;
115         mov     ar.lc=r3                // restore ar.lc
116         br.ret.sptk.many rp
117 END(clflush_cache_range)