2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 Maciej W. Rozycki
8 * Copyright (C) 2008 Thiemo Seufer
9 * Copyright (C) 2012 MIPS Technologies, Inc.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
15 #include <linux/proc_fs.h>
18 #include <asm/cacheops.h>
19 #include <asm/cpu-type.h>
23 #include <asm/prefetch.h>
24 #include <asm/bootinfo.h>
25 #include <asm/mipsregs.h>
26 #include <asm/mmu_context.h>
30 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
31 #include <asm/sibyte/sb1250.h>
32 #include <asm/sibyte/sb1250_regs.h>
33 #include <asm/sibyte/sb1250_dma.h>
38 /* Registers used in the assembled routines. */
51 /* Handle labels (which must be positive integers). */
53 label_clear_nopref
= 1,
57 label_copy_pref_store
,
60 UASM_L_LA(_clear_nopref
)
61 UASM_L_LA(_clear_pref
)
62 UASM_L_LA(_copy_nopref
)
63 UASM_L_LA(_copy_pref_both
)
64 UASM_L_LA(_copy_pref_store
)
66 /* We need one branch and therefore one relocation per target label. */
67 static struct uasm_label labels
[5];
68 static struct uasm_reloc relocs
[5];
70 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
71 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
74 * R6 has a limited offset of the pref instruction.
75 * Skip it if the offset is more than 9 bits.
77 #define _uasm_i_pref(a, b, c, d) \
79 if (cpu_has_mips_r6) { \
80 if (c <= 0xff && c >= -0x100) \
81 uasm_i_pref(a, b, c, d);\
83 uasm_i_pref(a, b, c, d); \
87 static int pref_bias_clear_store
;
88 static int pref_bias_copy_load
;
89 static int pref_bias_copy_store
;
91 static u32 pref_src_mode
;
92 static u32 pref_dst_mode
;
94 static int clear_word_size
;
95 static int copy_word_size
;
97 static int half_clear_loop_size
;
98 static int half_copy_loop_size
;
100 static int cache_line_size
;
101 #define cache_line_mask() (cache_line_size - 1)
104 pg_addiu(u32
**buf
, unsigned int reg1
, unsigned int reg2
, unsigned int off
)
106 if (cpu_has_64bit_gp_regs
&& DADDI_WAR
&& r4k_daddiu_bug()) {
108 uasm_i_lui(buf
, T9
, uasm_rel_hi(off
));
109 uasm_i_addiu(buf
, T9
, T9
, uasm_rel_lo(off
));
111 uasm_i_addiu(buf
, T9
, ZERO
, off
);
112 uasm_i_daddu(buf
, reg1
, reg2
, T9
);
115 uasm_i_lui(buf
, T9
, uasm_rel_hi(off
));
116 uasm_i_addiu(buf
, T9
, T9
, uasm_rel_lo(off
));
117 UASM_i_ADDU(buf
, reg1
, reg2
, T9
);
119 UASM_i_ADDIU(buf
, reg1
, reg2
, off
);
123 static void set_prefetch_parameters(void)
125 if (cpu_has_64bit_gp_regs
|| cpu_has_64bit_zero_reg
)
130 if (cpu_has_64bit_gp_regs
)
136 * The pref's used here are using "streaming" hints, which cause the
137 * copied data to be kicked out of the cache sooner. A page copy often
138 * ends up copying a lot more data than is commonly used, so this seems
139 * to make sense in terms of reducing cache pollution, but I've no real
140 * performance data to back this up.
142 if (cpu_has_prefetch
) {
144 * XXX: Most prefetch bias values in here are based on
147 cache_line_size
= cpu_dcache_line_size();
148 switch (current_cpu_type()) {
151 /* These processors only support the Pref_Load. */
152 pref_bias_copy_load
= 256;
160 * Those values have been experimentally tuned for an
163 pref_bias_clear_store
= 512;
164 pref_bias_copy_load
= 256;
165 pref_bias_copy_store
= 256;
166 pref_src_mode
= Pref_LoadStreamed
;
167 pref_dst_mode
= Pref_StoreStreamed
;
172 pref_bias_clear_store
= 128;
173 pref_bias_copy_load
= 128;
174 pref_bias_copy_store
= 128;
176 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
179 if (current_cpu_type() == CPU_SB1
&&
180 (current_cpu_data
.processor_id
& 0xff) < 0x02) {
181 pref_src_mode
= Pref_Load
;
182 pref_dst_mode
= Pref_Store
;
184 pref_src_mode
= Pref_LoadStreamed
;
185 pref_dst_mode
= Pref_StoreStreamed
;
190 /* Loongson-3 only support the Pref_Load/Pref_Store. */
191 pref_bias_clear_store
= 128;
192 pref_bias_copy_load
= 128;
193 pref_bias_copy_store
= 128;
194 pref_src_mode
= Pref_Load
;
195 pref_dst_mode
= Pref_Store
;
199 pref_bias_clear_store
= 128;
200 pref_bias_copy_load
= 256;
201 pref_bias_copy_store
= 128;
202 pref_src_mode
= Pref_LoadStreamed
;
205 * Bit 30 (Pref_PrepareForStore) has been
206 * removed from MIPS R6. Use bit 5
207 * (Pref_StoreStreamed).
209 pref_dst_mode
= Pref_StoreStreamed
;
211 pref_dst_mode
= Pref_PrepareForStore
;
215 if (cpu_has_cache_cdex_s
)
216 cache_line_size
= cpu_scache_line_size();
217 else if (cpu_has_cache_cdex_p
)
218 cache_line_size
= cpu_dcache_line_size();
221 * Too much unrolling will overflow the available space in
222 * clear_space_array / copy_page_array.
224 half_clear_loop_size
= min(16 * clear_word_size
,
225 max(cache_line_size
>> 1,
226 4 * clear_word_size
));
227 half_copy_loop_size
= min(16 * copy_word_size
,
228 max(cache_line_size
>> 1,
229 4 * copy_word_size
));
232 static void build_clear_store(u32
**buf
, int off
)
234 if (cpu_has_64bit_gp_regs
|| cpu_has_64bit_zero_reg
) {
235 uasm_i_sd(buf
, ZERO
, off
, A0
);
237 uasm_i_sw(buf
, ZERO
, off
, A0
);
241 static inline void build_clear_pref(u32
**buf
, int off
)
243 if (off
& cache_line_mask())
246 if (pref_bias_clear_store
) {
247 _uasm_i_pref(buf
, pref_dst_mode
, pref_bias_clear_store
+ off
,
249 } else if (cache_line_size
== (half_clear_loop_size
<< 1)) {
250 if (cpu_has_cache_cdex_s
) {
251 uasm_i_cache(buf
, Create_Dirty_Excl_SD
, off
, A0
);
252 } else if (cpu_has_cache_cdex_p
) {
253 if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP
) &&
254 cpu_is_r4600_v1_x()) {
261 if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP
) &&
263 uasm_i_lw(buf
, ZERO
, ZERO
, AT
);
265 uasm_i_cache(buf
, Create_Dirty_Excl_D
, off
, A0
);
270 extern u32 __clear_page_start
;
271 extern u32 __clear_page_end
;
272 extern u32 __copy_page_start
;
273 extern u32 __copy_page_end
;
275 void build_clear_page(void)
278 u32
*buf
= &__clear_page_start
;
279 struct uasm_label
*l
= labels
;
280 struct uasm_reloc
*r
= relocs
;
282 static atomic_t run_once
= ATOMIC_INIT(0);
284 if (atomic_xchg(&run_once
, 1)) {
288 memset(labels
, 0, sizeof(labels
));
289 memset(relocs
, 0, sizeof(relocs
));
291 set_prefetch_parameters();
294 * This algorithm makes the following assumptions:
295 * - The prefetch bias is a multiple of 2 words.
296 * - The prefetch bias is less than one page.
298 BUG_ON(pref_bias_clear_store
% (2 * clear_word_size
));
299 BUG_ON(PAGE_SIZE
< pref_bias_clear_store
);
301 off
= PAGE_SIZE
- pref_bias_clear_store
;
302 if (off
> 0xffff || !pref_bias_clear_store
)
303 pg_addiu(&buf
, A2
, A0
, off
);
305 uasm_i_ori(&buf
, A2
, A0
, off
);
307 if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP
) && cpu_is_r4600_v2_x())
308 uasm_i_lui(&buf
, AT
, uasm_rel_hi(0xa0000000));
310 off
= cache_line_size
? min(8, pref_bias_clear_store
/ cache_line_size
)
311 * cache_line_size
: 0;
313 build_clear_pref(&buf
, -off
);
314 off
-= cache_line_size
;
316 uasm_l_clear_pref(&l
, buf
);
318 build_clear_pref(&buf
, off
);
319 build_clear_store(&buf
, off
);
320 off
+= clear_word_size
;
321 } while (off
< half_clear_loop_size
);
322 pg_addiu(&buf
, A0
, A0
, 2 * off
);
325 build_clear_pref(&buf
, off
);
326 if (off
== -clear_word_size
)
327 uasm_il_bne(&buf
, &r
, A0
, A2
, label_clear_pref
);
328 build_clear_store(&buf
, off
);
329 off
+= clear_word_size
;
332 if (pref_bias_clear_store
) {
333 pg_addiu(&buf
, A2
, A0
, pref_bias_clear_store
);
334 uasm_l_clear_nopref(&l
, buf
);
337 build_clear_store(&buf
, off
);
338 off
+= clear_word_size
;
339 } while (off
< half_clear_loop_size
);
340 pg_addiu(&buf
, A0
, A0
, 2 * off
);
343 if (off
== -clear_word_size
)
344 uasm_il_bne(&buf
, &r
, A0
, A2
,
346 build_clear_store(&buf
, off
);
347 off
+= clear_word_size
;
354 BUG_ON(buf
> &__clear_page_end
);
356 uasm_resolve_relocs(relocs
, labels
);
358 pr_debug("Synthesized clear page handler (%u instructions).\n",
359 (u32
)(buf
- &__clear_page_start
));
361 pr_debug("\t.set push\n");
362 pr_debug("\t.set noreorder\n");
363 for (i
= 0; i
< (buf
- &__clear_page_start
); i
++)
364 pr_debug("\t.word 0x%08x\n", (&__clear_page_start
)[i
]);
365 pr_debug("\t.set pop\n");
368 static void build_copy_load(u32
**buf
, int reg
, int off
)
370 if (cpu_has_64bit_gp_regs
) {
371 uasm_i_ld(buf
, reg
, off
, A1
);
373 uasm_i_lw(buf
, reg
, off
, A1
);
377 static void build_copy_store(u32
**buf
, int reg
, int off
)
379 if (cpu_has_64bit_gp_regs
) {
380 uasm_i_sd(buf
, reg
, off
, A0
);
382 uasm_i_sw(buf
, reg
, off
, A0
);
386 static inline void build_copy_load_pref(u32
**buf
, int off
)
388 if (off
& cache_line_mask())
391 if (pref_bias_copy_load
)
392 _uasm_i_pref(buf
, pref_src_mode
, pref_bias_copy_load
+ off
, A1
);
395 static inline void build_copy_store_pref(u32
**buf
, int off
)
397 if (off
& cache_line_mask())
400 if (pref_bias_copy_store
) {
401 _uasm_i_pref(buf
, pref_dst_mode
, pref_bias_copy_store
+ off
,
403 } else if (cache_line_size
== (half_copy_loop_size
<< 1)) {
404 if (cpu_has_cache_cdex_s
) {
405 uasm_i_cache(buf
, Create_Dirty_Excl_SD
, off
, A0
);
406 } else if (cpu_has_cache_cdex_p
) {
407 if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP
) &&
408 cpu_is_r4600_v1_x()) {
415 if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP
) &&
417 uasm_i_lw(buf
, ZERO
, ZERO
, AT
);
419 uasm_i_cache(buf
, Create_Dirty_Excl_D
, off
, A0
);
424 void build_copy_page(void)
427 u32
*buf
= &__copy_page_start
;
428 struct uasm_label
*l
= labels
;
429 struct uasm_reloc
*r
= relocs
;
431 static atomic_t run_once
= ATOMIC_INIT(0);
433 if (atomic_xchg(&run_once
, 1)) {
437 memset(labels
, 0, sizeof(labels
));
438 memset(relocs
, 0, sizeof(relocs
));
440 set_prefetch_parameters();
443 * This algorithm makes the following assumptions:
444 * - All prefetch biases are multiples of 8 words.
445 * - The prefetch biases are less than one page.
446 * - The store prefetch bias isn't greater than the load
449 BUG_ON(pref_bias_copy_load
% (8 * copy_word_size
));
450 BUG_ON(pref_bias_copy_store
% (8 * copy_word_size
));
451 BUG_ON(PAGE_SIZE
< pref_bias_copy_load
);
452 BUG_ON(pref_bias_copy_store
> pref_bias_copy_load
);
454 off
= PAGE_SIZE
- pref_bias_copy_load
;
455 if (off
> 0xffff || !pref_bias_copy_load
)
456 pg_addiu(&buf
, A2
, A0
, off
);
458 uasm_i_ori(&buf
, A2
, A0
, off
);
460 if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP
) && cpu_is_r4600_v2_x())
461 uasm_i_lui(&buf
, AT
, uasm_rel_hi(0xa0000000));
463 off
= cache_line_size
? min(8, pref_bias_copy_load
/ cache_line_size
) *
466 build_copy_load_pref(&buf
, -off
);
467 off
-= cache_line_size
;
469 off
= cache_line_size
? min(8, pref_bias_copy_store
/ cache_line_size
) *
472 build_copy_store_pref(&buf
, -off
);
473 off
-= cache_line_size
;
475 uasm_l_copy_pref_both(&l
, buf
);
477 build_copy_load_pref(&buf
, off
);
478 build_copy_load(&buf
, T0
, off
);
479 build_copy_load_pref(&buf
, off
+ copy_word_size
);
480 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
481 build_copy_load_pref(&buf
, off
+ 2 * copy_word_size
);
482 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
483 build_copy_load_pref(&buf
, off
+ 3 * copy_word_size
);
484 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
485 build_copy_store_pref(&buf
, off
);
486 build_copy_store(&buf
, T0
, off
);
487 build_copy_store_pref(&buf
, off
+ copy_word_size
);
488 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
489 build_copy_store_pref(&buf
, off
+ 2 * copy_word_size
);
490 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
491 build_copy_store_pref(&buf
, off
+ 3 * copy_word_size
);
492 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
493 off
+= 4 * copy_word_size
;
494 } while (off
< half_copy_loop_size
);
495 pg_addiu(&buf
, A1
, A1
, 2 * off
);
496 pg_addiu(&buf
, A0
, A0
, 2 * off
);
499 build_copy_load_pref(&buf
, off
);
500 build_copy_load(&buf
, T0
, off
);
501 build_copy_load_pref(&buf
, off
+ copy_word_size
);
502 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
503 build_copy_load_pref(&buf
, off
+ 2 * copy_word_size
);
504 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
505 build_copy_load_pref(&buf
, off
+ 3 * copy_word_size
);
506 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
507 build_copy_store_pref(&buf
, off
);
508 build_copy_store(&buf
, T0
, off
);
509 build_copy_store_pref(&buf
, off
+ copy_word_size
);
510 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
511 build_copy_store_pref(&buf
, off
+ 2 * copy_word_size
);
512 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
513 build_copy_store_pref(&buf
, off
+ 3 * copy_word_size
);
514 if (off
== -(4 * copy_word_size
))
515 uasm_il_bne(&buf
, &r
, A2
, A0
, label_copy_pref_both
);
516 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
517 off
+= 4 * copy_word_size
;
520 if (pref_bias_copy_load
- pref_bias_copy_store
) {
521 pg_addiu(&buf
, A2
, A0
,
522 pref_bias_copy_load
- pref_bias_copy_store
);
523 uasm_l_copy_pref_store(&l
, buf
);
526 build_copy_load(&buf
, T0
, off
);
527 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
528 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
529 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
530 build_copy_store_pref(&buf
, off
);
531 build_copy_store(&buf
, T0
, off
);
532 build_copy_store_pref(&buf
, off
+ copy_word_size
);
533 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
534 build_copy_store_pref(&buf
, off
+ 2 * copy_word_size
);
535 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
536 build_copy_store_pref(&buf
, off
+ 3 * copy_word_size
);
537 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
538 off
+= 4 * copy_word_size
;
539 } while (off
< half_copy_loop_size
);
540 pg_addiu(&buf
, A1
, A1
, 2 * off
);
541 pg_addiu(&buf
, A0
, A0
, 2 * off
);
544 build_copy_load(&buf
, T0
, off
);
545 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
546 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
547 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
548 build_copy_store_pref(&buf
, off
);
549 build_copy_store(&buf
, T0
, off
);
550 build_copy_store_pref(&buf
, off
+ copy_word_size
);
551 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
552 build_copy_store_pref(&buf
, off
+ 2 * copy_word_size
);
553 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
554 build_copy_store_pref(&buf
, off
+ 3 * copy_word_size
);
555 if (off
== -(4 * copy_word_size
))
556 uasm_il_bne(&buf
, &r
, A2
, A0
,
557 label_copy_pref_store
);
558 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
559 off
+= 4 * copy_word_size
;
563 if (pref_bias_copy_store
) {
564 pg_addiu(&buf
, A2
, A0
, pref_bias_copy_store
);
565 uasm_l_copy_nopref(&l
, buf
);
568 build_copy_load(&buf
, T0
, off
);
569 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
570 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
571 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
572 build_copy_store(&buf
, T0
, off
);
573 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
574 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
575 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
576 off
+= 4 * copy_word_size
;
577 } while (off
< half_copy_loop_size
);
578 pg_addiu(&buf
, A1
, A1
, 2 * off
);
579 pg_addiu(&buf
, A0
, A0
, 2 * off
);
582 build_copy_load(&buf
, T0
, off
);
583 build_copy_load(&buf
, T1
, off
+ copy_word_size
);
584 build_copy_load(&buf
, T2
, off
+ 2 * copy_word_size
);
585 build_copy_load(&buf
, T3
, off
+ 3 * copy_word_size
);
586 build_copy_store(&buf
, T0
, off
);
587 build_copy_store(&buf
, T1
, off
+ copy_word_size
);
588 build_copy_store(&buf
, T2
, off
+ 2 * copy_word_size
);
589 if (off
== -(4 * copy_word_size
))
590 uasm_il_bne(&buf
, &r
, A2
, A0
,
592 build_copy_store(&buf
, T3
, off
+ 3 * copy_word_size
);
593 off
+= 4 * copy_word_size
;
600 BUG_ON(buf
> &__copy_page_end
);
602 uasm_resolve_relocs(relocs
, labels
);
604 pr_debug("Synthesized copy page handler (%u instructions).\n",
605 (u32
)(buf
- &__copy_page_start
));
607 pr_debug("\t.set push\n");
608 pr_debug("\t.set noreorder\n");
609 for (i
= 0; i
< (buf
- &__copy_page_start
); i
++)
610 pr_debug("\t.word 0x%08x\n", (&__copy_page_start
)[i
]);
611 pr_debug("\t.set pop\n");
614 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
615 extern void clear_page_cpu(void *page
);
616 extern void copy_page_cpu(void *to
, void *from
);
619 * Pad descriptors to cacheline, since each is exclusively owned by a
627 } ____cacheline_aligned_in_smp page_descr
[DM_NUM_CHANNELS
];
629 void clear_page(void *page
)
631 u64 to_phys
= CPHYSADDR((unsigned long)page
);
632 unsigned int cpu
= smp_processor_id();
634 /* if the page is not in KSEG0, use old way */
635 if ((long)KSEGX((unsigned long)page
) != (long)CKSEG0
)
636 return clear_page_cpu(page
);
638 page_descr
[cpu
].dscr_a
= to_phys
| M_DM_DSCRA_ZERO_MEM
|
639 M_DM_DSCRA_L2C_DEST
| M_DM_DSCRA_INTERRUPT
;
640 page_descr
[cpu
].dscr_b
= V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE
);
641 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_COUNT
)));
644 * Don't really want to do it this way, but there's no
645 * reliable way to delay completion detection.
647 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_BASE_DEBUG
)))
648 & M_DM_DSCR_BASE_INTERRUPT
))
650 __raw_readq(IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_BASE
)));
652 EXPORT_SYMBOL(clear_page
);
654 void copy_page(void *to
, void *from
)
656 u64 from_phys
= CPHYSADDR((unsigned long)from
);
657 u64 to_phys
= CPHYSADDR((unsigned long)to
);
658 unsigned int cpu
= smp_processor_id();
660 /* if any page is not in KSEG0, use old way */
661 if ((long)KSEGX((unsigned long)to
) != (long)CKSEG0
662 || (long)KSEGX((unsigned long)from
) != (long)CKSEG0
)
663 return copy_page_cpu(to
, from
);
665 page_descr
[cpu
].dscr_a
= to_phys
| M_DM_DSCRA_L2C_DEST
|
666 M_DM_DSCRA_INTERRUPT
;
667 page_descr
[cpu
].dscr_b
= from_phys
| V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE
);
668 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_COUNT
)));
671 * Don't really want to do it this way, but there's no
672 * reliable way to delay completion detection.
674 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_BASE_DEBUG
)))
675 & M_DM_DSCR_BASE_INTERRUPT
))
677 __raw_readq(IOADDR(A_DM_REGISTER(cpu
, R_DM_DSCR_BASE
)));
679 EXPORT_SYMBOL(copy_page
);
681 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */