Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / arch / mips / mm / page.c
blob58033c44690d75db10637e757168f96b5bb5dea7
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 Maciej W. Rozycki
8 * Copyright (C) 2008 Thiemo Seufer
9 * Copyright (C) 2012 MIPS Technologies, Inc.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
18 #include <asm/bugs.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu-type.h>
21 #include <asm/inst.h>
22 #include <asm/io.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/prefetch.h>
26 #include <asm/bootinfo.h>
27 #include <asm/mipsregs.h>
28 #include <asm/mmu_context.h>
29 #include <asm/cpu.h>
30 #include <asm/war.h>
32 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
33 #include <asm/sibyte/sb1250.h>
34 #include <asm/sibyte/sb1250_regs.h>
35 #include <asm/sibyte/sb1250_dma.h>
36 #endif
38 #include <asm/uasm.h>
40 /* Registers used in the assembled routines. */
41 #define ZERO 0
42 #define AT 2
43 #define A0 4
44 #define A1 5
45 #define A2 6
46 #define T0 8
47 #define T1 9
48 #define T2 10
49 #define T3 11
50 #define T9 25
51 #define RA 31
53 /* Handle labels (which must be positive integers). */
54 enum label_id {
55 label_clear_nopref = 1,
56 label_clear_pref,
57 label_copy_nopref,
58 label_copy_pref_both,
59 label_copy_pref_store,
62 UASM_L_LA(_clear_nopref)
63 UASM_L_LA(_clear_pref)
64 UASM_L_LA(_copy_nopref)
65 UASM_L_LA(_copy_pref_both)
66 UASM_L_LA(_copy_pref_store)
68 /* We need one branch and therefore one relocation per target label. */
69 static struct uasm_label labels[5];
70 static struct uasm_reloc relocs[5];
72 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
73 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
75 static int pref_bias_clear_store;
76 static int pref_bias_copy_load;
77 static int pref_bias_copy_store;
79 static u32 pref_src_mode;
80 static u32 pref_dst_mode;
82 static int clear_word_size;
83 static int copy_word_size;
85 static int half_clear_loop_size;
86 static int half_copy_loop_size;
88 static int cache_line_size;
89 #define cache_line_mask() (cache_line_size - 1)
91 static inline void
92 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
94 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
95 if (off > 0x7fff) {
96 uasm_i_lui(buf, T9, uasm_rel_hi(off));
97 uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
98 } else
99 uasm_i_addiu(buf, T9, ZERO, off);
100 uasm_i_daddu(buf, reg1, reg2, T9);
101 } else {
102 if (off > 0x7fff) {
103 uasm_i_lui(buf, T9, uasm_rel_hi(off));
104 uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
105 UASM_i_ADDU(buf, reg1, reg2, T9);
106 } else
107 UASM_i_ADDIU(buf, reg1, reg2, off);
111 static void set_prefetch_parameters(void)
113 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
114 clear_word_size = 8;
115 else
116 clear_word_size = 4;
118 if (cpu_has_64bit_gp_regs)
119 copy_word_size = 8;
120 else
121 copy_word_size = 4;
124 * The pref's used here are using "streaming" hints, which cause the
125 * copied data to be kicked out of the cache sooner. A page copy often
126 * ends up copying a lot more data than is commonly used, so this seems
127 * to make sense in terms of reducing cache pollution, but I've no real
128 * performance data to back this up.
130 if (cpu_has_prefetch) {
132 * XXX: Most prefetch bias values in here are based on
133 * guesswork.
135 cache_line_size = cpu_dcache_line_size();
136 switch (current_cpu_type()) {
137 case CPU_R5500:
138 case CPU_TX49XX:
139 /* These processors only support the Pref_Load. */
140 pref_bias_copy_load = 256;
141 break;
143 case CPU_R10000:
144 case CPU_R12000:
145 case CPU_R14000:
147 * Those values have been experimentally tuned for an
148 * Origin 200.
150 pref_bias_clear_store = 512;
151 pref_bias_copy_load = 256;
152 pref_bias_copy_store = 256;
153 pref_src_mode = Pref_LoadStreamed;
154 pref_dst_mode = Pref_StoreStreamed;
155 break;
157 case CPU_SB1:
158 case CPU_SB1A:
159 pref_bias_clear_store = 128;
160 pref_bias_copy_load = 128;
161 pref_bias_copy_store = 128;
163 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
164 * hints are broken.
166 if (current_cpu_type() == CPU_SB1 &&
167 (current_cpu_data.processor_id & 0xff) < 0x02) {
168 pref_src_mode = Pref_Load;
169 pref_dst_mode = Pref_Store;
170 } else {
171 pref_src_mode = Pref_LoadStreamed;
172 pref_dst_mode = Pref_StoreStreamed;
174 break;
176 default:
177 pref_bias_clear_store = 128;
178 pref_bias_copy_load = 256;
179 pref_bias_copy_store = 128;
180 pref_src_mode = Pref_LoadStreamed;
181 pref_dst_mode = Pref_PrepareForStore;
182 break;
184 } else {
185 if (cpu_has_cache_cdex_s)
186 cache_line_size = cpu_scache_line_size();
187 else if (cpu_has_cache_cdex_p)
188 cache_line_size = cpu_dcache_line_size();
191 * Too much unrolling will overflow the available space in
192 * clear_space_array / copy_page_array.
194 half_clear_loop_size = min(16 * clear_word_size,
195 max(cache_line_size >> 1,
196 4 * clear_word_size));
197 half_copy_loop_size = min(16 * copy_word_size,
198 max(cache_line_size >> 1,
199 4 * copy_word_size));
202 static void build_clear_store(u32 **buf, int off)
204 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
205 uasm_i_sd(buf, ZERO, off, A0);
206 } else {
207 uasm_i_sw(buf, ZERO, off, A0);
211 static inline void build_clear_pref(u32 **buf, int off)
213 if (off & cache_line_mask())
214 return;
216 if (pref_bias_clear_store) {
217 uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
218 A0);
219 } else if (cache_line_size == (half_clear_loop_size << 1)) {
220 if (cpu_has_cache_cdex_s) {
221 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
222 } else if (cpu_has_cache_cdex_p) {
223 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
224 uasm_i_nop(buf);
225 uasm_i_nop(buf);
226 uasm_i_nop(buf);
227 uasm_i_nop(buf);
230 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
231 uasm_i_lw(buf, ZERO, ZERO, AT);
233 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
238 extern u32 __clear_page_start;
239 extern u32 __clear_page_end;
240 extern u32 __copy_page_start;
241 extern u32 __copy_page_end;
243 void build_clear_page(void)
245 int off;
246 u32 *buf = &__clear_page_start;
247 struct uasm_label *l = labels;
248 struct uasm_reloc *r = relocs;
249 int i;
250 static atomic_t run_once = ATOMIC_INIT(0);
252 if (atomic_xchg(&run_once, 1)) {
253 return;
256 memset(labels, 0, sizeof(labels));
257 memset(relocs, 0, sizeof(relocs));
259 set_prefetch_parameters();
262 * This algorithm makes the following assumptions:
263 * - The prefetch bias is a multiple of 2 words.
264 * - The prefetch bias is less than one page.
266 BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
267 BUG_ON(PAGE_SIZE < pref_bias_clear_store);
269 off = PAGE_SIZE - pref_bias_clear_store;
270 if (off > 0xffff || !pref_bias_clear_store)
271 pg_addiu(&buf, A2, A0, off);
272 else
273 uasm_i_ori(&buf, A2, A0, off);
275 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
276 uasm_i_lui(&buf, AT, 0xa000);
278 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
279 * cache_line_size : 0;
280 while (off) {
281 build_clear_pref(&buf, -off);
282 off -= cache_line_size;
284 uasm_l_clear_pref(&l, buf);
285 do {
286 build_clear_pref(&buf, off);
287 build_clear_store(&buf, off);
288 off += clear_word_size;
289 } while (off < half_clear_loop_size);
290 pg_addiu(&buf, A0, A0, 2 * off);
291 off = -off;
292 do {
293 build_clear_pref(&buf, off);
294 if (off == -clear_word_size)
295 uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
296 build_clear_store(&buf, off);
297 off += clear_word_size;
298 } while (off < 0);
300 if (pref_bias_clear_store) {
301 pg_addiu(&buf, A2, A0, pref_bias_clear_store);
302 uasm_l_clear_nopref(&l, buf);
303 off = 0;
304 do {
305 build_clear_store(&buf, off);
306 off += clear_word_size;
307 } while (off < half_clear_loop_size);
308 pg_addiu(&buf, A0, A0, 2 * off);
309 off = -off;
310 do {
311 if (off == -clear_word_size)
312 uasm_il_bne(&buf, &r, A0, A2,
313 label_clear_nopref);
314 build_clear_store(&buf, off);
315 off += clear_word_size;
316 } while (off < 0);
319 uasm_i_jr(&buf, RA);
320 uasm_i_nop(&buf);
322 BUG_ON(buf > &__clear_page_end);
324 uasm_resolve_relocs(relocs, labels);
326 pr_debug("Synthesized clear page handler (%u instructions).\n",
327 (u32)(buf - &__clear_page_start));
329 pr_debug("\t.set push\n");
330 pr_debug("\t.set noreorder\n");
331 for (i = 0; i < (buf - &__clear_page_start); i++)
332 pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
333 pr_debug("\t.set pop\n");
336 static void build_copy_load(u32 **buf, int reg, int off)
338 if (cpu_has_64bit_gp_regs) {
339 uasm_i_ld(buf, reg, off, A1);
340 } else {
341 uasm_i_lw(buf, reg, off, A1);
345 static void build_copy_store(u32 **buf, int reg, int off)
347 if (cpu_has_64bit_gp_regs) {
348 uasm_i_sd(buf, reg, off, A0);
349 } else {
350 uasm_i_sw(buf, reg, off, A0);
354 static inline void build_copy_load_pref(u32 **buf, int off)
356 if (off & cache_line_mask())
357 return;
359 if (pref_bias_copy_load)
360 uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
363 static inline void build_copy_store_pref(u32 **buf, int off)
365 if (off & cache_line_mask())
366 return;
368 if (pref_bias_copy_store) {
369 uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
370 A0);
371 } else if (cache_line_size == (half_copy_loop_size << 1)) {
372 if (cpu_has_cache_cdex_s) {
373 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
374 } else if (cpu_has_cache_cdex_p) {
375 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
376 uasm_i_nop(buf);
377 uasm_i_nop(buf);
378 uasm_i_nop(buf);
379 uasm_i_nop(buf);
382 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
383 uasm_i_lw(buf, ZERO, ZERO, AT);
385 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
390 void build_copy_page(void)
392 int off;
393 u32 *buf = &__copy_page_start;
394 struct uasm_label *l = labels;
395 struct uasm_reloc *r = relocs;
396 int i;
397 static atomic_t run_once = ATOMIC_INIT(0);
399 if (atomic_xchg(&run_once, 1)) {
400 return;
403 memset(labels, 0, sizeof(labels));
404 memset(relocs, 0, sizeof(relocs));
406 set_prefetch_parameters();
409 * This algorithm makes the following assumptions:
410 * - All prefetch biases are multiples of 8 words.
411 * - The prefetch biases are less than one page.
412 * - The store prefetch bias isn't greater than the load
413 * prefetch bias.
415 BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
416 BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
417 BUG_ON(PAGE_SIZE < pref_bias_copy_load);
418 BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
420 off = PAGE_SIZE - pref_bias_copy_load;
421 if (off > 0xffff || !pref_bias_copy_load)
422 pg_addiu(&buf, A2, A0, off);
423 else
424 uasm_i_ori(&buf, A2, A0, off);
426 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
427 uasm_i_lui(&buf, AT, 0xa000);
429 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
430 cache_line_size : 0;
431 while (off) {
432 build_copy_load_pref(&buf, -off);
433 off -= cache_line_size;
435 off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
436 cache_line_size : 0;
437 while (off) {
438 build_copy_store_pref(&buf, -off);
439 off -= cache_line_size;
441 uasm_l_copy_pref_both(&l, buf);
442 do {
443 build_copy_load_pref(&buf, off);
444 build_copy_load(&buf, T0, off);
445 build_copy_load_pref(&buf, off + copy_word_size);
446 build_copy_load(&buf, T1, off + copy_word_size);
447 build_copy_load_pref(&buf, off + 2 * copy_word_size);
448 build_copy_load(&buf, T2, off + 2 * copy_word_size);
449 build_copy_load_pref(&buf, off + 3 * copy_word_size);
450 build_copy_load(&buf, T3, off + 3 * copy_word_size);
451 build_copy_store_pref(&buf, off);
452 build_copy_store(&buf, T0, off);
453 build_copy_store_pref(&buf, off + copy_word_size);
454 build_copy_store(&buf, T1, off + copy_word_size);
455 build_copy_store_pref(&buf, off + 2 * copy_word_size);
456 build_copy_store(&buf, T2, off + 2 * copy_word_size);
457 build_copy_store_pref(&buf, off + 3 * copy_word_size);
458 build_copy_store(&buf, T3, off + 3 * copy_word_size);
459 off += 4 * copy_word_size;
460 } while (off < half_copy_loop_size);
461 pg_addiu(&buf, A1, A1, 2 * off);
462 pg_addiu(&buf, A0, A0, 2 * off);
463 off = -off;
464 do {
465 build_copy_load_pref(&buf, off);
466 build_copy_load(&buf, T0, off);
467 build_copy_load_pref(&buf, off + copy_word_size);
468 build_copy_load(&buf, T1, off + copy_word_size);
469 build_copy_load_pref(&buf, off + 2 * copy_word_size);
470 build_copy_load(&buf, T2, off + 2 * copy_word_size);
471 build_copy_load_pref(&buf, off + 3 * copy_word_size);
472 build_copy_load(&buf, T3, off + 3 * copy_word_size);
473 build_copy_store_pref(&buf, off);
474 build_copy_store(&buf, T0, off);
475 build_copy_store_pref(&buf, off + copy_word_size);
476 build_copy_store(&buf, T1, off + copy_word_size);
477 build_copy_store_pref(&buf, off + 2 * copy_word_size);
478 build_copy_store(&buf, T2, off + 2 * copy_word_size);
479 build_copy_store_pref(&buf, off + 3 * copy_word_size);
480 if (off == -(4 * copy_word_size))
481 uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
482 build_copy_store(&buf, T3, off + 3 * copy_word_size);
483 off += 4 * copy_word_size;
484 } while (off < 0);
486 if (pref_bias_copy_load - pref_bias_copy_store) {
487 pg_addiu(&buf, A2, A0,
488 pref_bias_copy_load - pref_bias_copy_store);
489 uasm_l_copy_pref_store(&l, buf);
490 off = 0;
491 do {
492 build_copy_load(&buf, T0, off);
493 build_copy_load(&buf, T1, off + copy_word_size);
494 build_copy_load(&buf, T2, off + 2 * copy_word_size);
495 build_copy_load(&buf, T3, off + 3 * copy_word_size);
496 build_copy_store_pref(&buf, off);
497 build_copy_store(&buf, T0, off);
498 build_copy_store_pref(&buf, off + copy_word_size);
499 build_copy_store(&buf, T1, off + copy_word_size);
500 build_copy_store_pref(&buf, off + 2 * copy_word_size);
501 build_copy_store(&buf, T2, off + 2 * copy_word_size);
502 build_copy_store_pref(&buf, off + 3 * copy_word_size);
503 build_copy_store(&buf, T3, off + 3 * copy_word_size);
504 off += 4 * copy_word_size;
505 } while (off < half_copy_loop_size);
506 pg_addiu(&buf, A1, A1, 2 * off);
507 pg_addiu(&buf, A0, A0, 2 * off);
508 off = -off;
509 do {
510 build_copy_load(&buf, T0, off);
511 build_copy_load(&buf, T1, off + copy_word_size);
512 build_copy_load(&buf, T2, off + 2 * copy_word_size);
513 build_copy_load(&buf, T3, off + 3 * copy_word_size);
514 build_copy_store_pref(&buf, off);
515 build_copy_store(&buf, T0, off);
516 build_copy_store_pref(&buf, off + copy_word_size);
517 build_copy_store(&buf, T1, off + copy_word_size);
518 build_copy_store_pref(&buf, off + 2 * copy_word_size);
519 build_copy_store(&buf, T2, off + 2 * copy_word_size);
520 build_copy_store_pref(&buf, off + 3 * copy_word_size);
521 if (off == -(4 * copy_word_size))
522 uasm_il_bne(&buf, &r, A2, A0,
523 label_copy_pref_store);
524 build_copy_store(&buf, T3, off + 3 * copy_word_size);
525 off += 4 * copy_word_size;
526 } while (off < 0);
529 if (pref_bias_copy_store) {
530 pg_addiu(&buf, A2, A0, pref_bias_copy_store);
531 uasm_l_copy_nopref(&l, buf);
532 off = 0;
533 do {
534 build_copy_load(&buf, T0, off);
535 build_copy_load(&buf, T1, off + copy_word_size);
536 build_copy_load(&buf, T2, off + 2 * copy_word_size);
537 build_copy_load(&buf, T3, off + 3 * copy_word_size);
538 build_copy_store(&buf, T0, off);
539 build_copy_store(&buf, T1, off + copy_word_size);
540 build_copy_store(&buf, T2, off + 2 * copy_word_size);
541 build_copy_store(&buf, T3, off + 3 * copy_word_size);
542 off += 4 * copy_word_size;
543 } while (off < half_copy_loop_size);
544 pg_addiu(&buf, A1, A1, 2 * off);
545 pg_addiu(&buf, A0, A0, 2 * off);
546 off = -off;
547 do {
548 build_copy_load(&buf, T0, off);
549 build_copy_load(&buf, T1, off + copy_word_size);
550 build_copy_load(&buf, T2, off + 2 * copy_word_size);
551 build_copy_load(&buf, T3, off + 3 * copy_word_size);
552 build_copy_store(&buf, T0, off);
553 build_copy_store(&buf, T1, off + copy_word_size);
554 build_copy_store(&buf, T2, off + 2 * copy_word_size);
555 if (off == -(4 * copy_word_size))
556 uasm_il_bne(&buf, &r, A2, A0,
557 label_copy_nopref);
558 build_copy_store(&buf, T3, off + 3 * copy_word_size);
559 off += 4 * copy_word_size;
560 } while (off < 0);
563 uasm_i_jr(&buf, RA);
564 uasm_i_nop(&buf);
566 BUG_ON(buf > &__copy_page_end);
568 uasm_resolve_relocs(relocs, labels);
570 pr_debug("Synthesized copy page handler (%u instructions).\n",
571 (u32)(buf - &__copy_page_start));
573 pr_debug("\t.set push\n");
574 pr_debug("\t.set noreorder\n");
575 for (i = 0; i < (buf - &__copy_page_start); i++)
576 pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
577 pr_debug("\t.set pop\n");
580 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
581 extern void clear_page_cpu(void *page);
582 extern void copy_page_cpu(void *to, void *from);
585 * Pad descriptors to cacheline, since each is exclusively owned by a
586 * particular CPU.
588 struct dmadscr {
589 u64 dscr_a;
590 u64 dscr_b;
591 u64 pad_a;
592 u64 pad_b;
593 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
595 void sb1_dma_init(void)
597 int i;
599 for (i = 0; i < DM_NUM_CHANNELS; i++) {
600 const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
601 V_DM_DSCR_BASE_RINGSZ(1);
602 void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
604 __raw_writeq(base_val, base_reg);
605 __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
606 __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
610 void clear_page(void *page)
612 u64 to_phys = CPHYSADDR((unsigned long)page);
613 unsigned int cpu = smp_processor_id();
615 /* if the page is not in KSEG0, use old way */
616 if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
617 return clear_page_cpu(page);
619 page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
620 M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
621 page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
622 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
625 * Don't really want to do it this way, but there's no
626 * reliable way to delay completion detection.
628 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
629 & M_DM_DSCR_BASE_INTERRUPT))
631 __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
634 void copy_page(void *to, void *from)
636 u64 from_phys = CPHYSADDR((unsigned long)from);
637 u64 to_phys = CPHYSADDR((unsigned long)to);
638 unsigned int cpu = smp_processor_id();
640 /* if any page is not in KSEG0, use old way */
641 if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
642 || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
643 return copy_page_cpu(to, from);
645 page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
646 M_DM_DSCRA_INTERRUPT;
647 page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
648 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
651 * Don't really want to do it this way, but there's no
652 * reliable way to delay completion detection.
654 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
655 & M_DM_DSCR_BASE_INTERRUPT))
657 __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
660 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */