1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Cache flushing routines.
5 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * 05/28/05 Zoltan Menyhart Dynamic stride size
11 #include <asm/asmmacro.h>
12 #include <asm/export.h>
16 * flush_icache_range(start,end)
18 * Make i-cache(s) coherent with d-caches.
20 * Must deal with range from start to end-1 but nothing else (need to
21 * be careful not to touch addresses that may be unmapped).
23 * Note: "in0" and "in1" are preserved for debugging purposes.
25 .section .kprobes.text,"ax"
26 GLOBAL_ENTRY(flush_icache_range)
29 alloc r2=ar.pfs,2,0,0,0
30 movl r3=ia64_i_cache_stride_shift
33 ld8 r20=[r3] // r20: stride shift
34 sub r22=in1,r0,1 // last byte address
36 shr.u r23=in0,r20 // start / (stride size)
37 shr.u r22=r22,r20 // (last byte address) / (stride size)
38 shl r21=r21,r20 // r21: stride size of the i-cache(s)
40 sub r8=r22,r23 // number of strides - 1
41 shl r24=r23,r20 // r24: addresses for "fc.i" =
42 // "start" rounded down to stride boundary
44 mov r3=ar.lc // save ar.lc
51 * 32 byte aligned loop, even number of (actually 2) bundles
53 .Loop: fc.i r24 // issuable on M0 only
54 add r24=r21,r24 // we flush "stride size" bytes per iteration
56 br.cloop.sptk.few .Loop
62 mov ar.lc=r3 // restore ar.lc
64 END(flush_icache_range)
65 EXPORT_SYMBOL_GPL(flush_icache_range)
68 * clflush_cache_range(start,size)
70 * Flush cache lines from start to start+size-1.
72 * Must deal with range from start to start+size-1 but nothing else
73 * (need to be careful not to touch addresses that may be
76 * Note: "in0" and "in1" are preserved for debugging purposes.
78 .section .kprobes.text,"ax"
79 GLOBAL_ENTRY(clflush_cache_range)
82 alloc r2=ar.pfs,2,0,0,0
83 movl r3=ia64_cache_stride_shift
87 ld8 r20=[r3] // r20: stride shift
88 sub r22=r22,r0,1 // last byte address
90 shr.u r23=in0,r20 // start / (stride size)
91 shr.u r22=r22,r20 // (last byte address) / (stride size)
92 shl r21=r21,r20 // r21: stride size of the i-cache(s)
94 sub r8=r22,r23 // number of strides - 1
95 shl r24=r23,r20 // r24: addresses for "fc" =
96 // "start" rounded down to stride
99 mov r3=ar.lc // save ar.lc
106 * 32 byte aligned loop, even number of (actually 2) bundles
109 fc r24 // issuable on M0 only
110 add r24=r21,r24 // we flush "stride size" bytes per iteration
112 br.cloop.sptk.few .Loop_fc
118 mov ar.lc=r3 // restore ar.lc
120 END(clflush_cache_range)