4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <sys/param.h>
30 #include <sys/errno.h>
31 #include <sys/asm_linkage.h>
32 #include <sys/vtrace.h>
33 #include <sys/machthread.h>
34 #include <sys/clock.h>
37 #include <sys/privregs.h>
38 #include <sys/fpras_impl.h>
45 * Pseudo-code to aid in understanding the control flow of the
46 * bcopy/copyin/copyout routines.
50 * ! Determine whether to use the FP register version
51 * ! or the leaf routine version depending on size
52 * ! of copy and flags. Set up error handling accordingly.
53 * ! The transition point depends on whether the src and
54 * ! dst addresses can be aligned to long word, word,
55 * ! half word, or byte boundaries.
57 * ! WARNING: <Register usage convention>
58 * ! For FP version, %l6 holds previous error handling and
59 * ! a flag: TRAMP_FLAG (low bits)
60 * ! for leaf routine version, %o4 holds those values.
61 * ! So either %l6 or %o4 is reserved and not available for
64 * if (length <= VIS_COPY_THRESHOLD) ! start with a quick test
65 * go to small_copy; ! to speed short copies
67 * ! src, dst long word alignable
68 * if (hw_copy_limit_8 == 0) ! hw_copy disabled
70 * if (length <= hw_copy_limit_8)
74 * if (src,dst not alignable) {
75 * if (hw_copy_limit_1 == 0) ! hw_copy disabled
77 * if (length <= hw_copy_limit_1)
81 * if (src,dst halfword alignable) {
82 * if (hw_copy_limit_2 == 0) ! hw_copy disabled
84 * if (length <= hw_copy_limit_2)
88 * if (src,dst word alignable) {
89 * if (hw_copy_limit_4 == 0) ! hw_copy disabled
91 * if (length <= hw_copy_limit_4)
97 * Setup_leaf_rtn_error_handler; ! diffs for each entry point
99 * if (count <= 3) ! fast path for tiny copies
100 * go to sm_left; ! special finish up code
102 * if (count > CHKSIZE) ! medium sized copies
103 * go to sm_med ! tuned by alignment
104 * if(src&dst not both word aligned) {
106 * move byte by byte in 4-way unrolled loop
109 * move 0-3 bytes byte at a time as needed.
110 * restore error handler and exit.
112 * } else { ! src&dst are word aligned
113 * check for at least 8 bytes left,
114 * move word at a time, unrolled by 2
115 * when fewer than 8 bytes left,
116 * sm_half: move half word at a time while 2 or more bytes left
117 * sm_byte: move final byte if necessary
119 * restore error handler and exit.
122 * ! Medium length cases with at least CHKSIZE bytes available
123 * ! method: line up src and dst as best possible, then
124 * ! move data in 4-way unrolled loops.
127 * if(src&dst unalignable)
129 * if(src&dst halfword alignable)
131 * if(src&dst word alignable)
133 * ! fall into long word movement
134 * move bytes until src is word aligned
135 * if not long word aligned, move a word
136 * move long words in 4-way unrolled loop until < 32 bytes left
137 * move long words in 1-way unrolled loop until < 8 bytes left
138 * if zero bytes left, goto sm_exit
139 * if one byte left, go to sm_byte
143 * move bytes until src is word aligned
144 * move words in 4-way unrolled loop until < 16 bytes left
145 * move words in 1-way unrolled loop until < 4 bytes left
146 * if zero bytes left, goto sm_exit
147 * if one byte left, go to sm_byte
151 * move a byte if needed to align src on halfword
152 * move halfwords in 4-way unrolled loop until < 8 bytes left
153 * if zero bytes left, goto sm_exit
154 * if one byte left, go to sm_byte
159 * %l6 = curthread->t_lofault;
162 * curthread->t_lofault = .copyerr;
163 * caller_error_handler = TRUE ! %l6 |= 2
166 * ! for FPU testing we must not migrate cpus
167 * if (curthread->t_lwp == NULL) {
168 * ! Kernel threads do not have pcb's in which to store
169 * ! the floating point state, so disallow preemption during
170 * ! the copy. This also prevents cpu migration.
171 * kpreempt_disable(curthread);
173 * thread_nomigrate();
180 * save current fpregs on stack using blockstore
188 * In lofault handler:
189 * curthread->t_lofault = .copyerr2;
190 * Continue on with the normal exit handler
194 * if (old_fprs & FPRS_FEF)
195 * restore fpregs from stack using blockload
200 * curthread->t_lofault = (%l6 & ~3);
201 * ! following test omitted from copyin/copyout as they
202 * ! will always have a current thread
203 * if (curthread->t_lwp == NULL)
204 * kpreempt_enable(curthread);
206 * thread_allowmigrate();
209 * In second lofault handler (.copyerr2):
210 * We've tried to restore fp state from the stack and failed. To
211 * prevent from returning with a corrupted fp state, we will panic.
215 * Comments about optimization choices
217 * The initial optimization decision in this code is to determine
218 * whether to use the FP registers for a copy or not. If we don't
219 * use the FP registers, we can execute the copy as a leaf routine,
220 * saving a register save and restore. Also, less elaborate setup
221 * is required, allowing short copies to be completed more quickly.
222 * For longer copies, especially unaligned ones (where the src and
223 * dst do not align to allow simple ldx,stx operation), the FP
224 * registers allow much faster copy operations.
226 * The estimated extra cost of the FP path will vary depending on
227 * src/dst alignment, dst offset from the next 64 byte FPblock store
228 * boundary, remaining src data after the last full dst cache line is
229 * moved whether the FP registers need to be saved, and some other
230 * minor issues. The average additional overhead is estimated to be
231 * 400 clocks. Since each non-repeated/predicted tst and branch costs
232 * around 10 clocks, elaborate calculation would slow down to all
233 * longer copies and only benefit a small portion of medium sized
234 * copies. Rather than incur such cost, we chose fixed transition
235 * points for each of the alignment choices.
237 * For the inner loop, here is a comparison of the per cache line
238 * costs for each alignment when src&dst are in cache:
240 * byte aligned: 108 clocks slower for non-FPBLK
241 * half aligned: 44 clocks slower for non-FPBLK
242 * word aligned: 12 clocks slower for non-FPBLK
243 * long aligned: 4 clocks >>faster<< for non-FPBLK
245 * The long aligned loop runs faster because it does no prefetching.
246 * That wins if the data is not in cache or there is too little
247 * data to gain much benefit from prefetching. But when there
248 * is more data and that data is not in cache, failing to prefetch
249 * can run much slower. In addition, there is a 2 Kbyte store queue
250 * which will cause the non-FPBLK inner loop to slow for larger copies.
251 * The exact tradeoff is strongly load and application dependent, with
252 * increasing risk of a customer visible performance regression if the
253 * non-FPBLK code is used for larger copies. Studies of synthetic in-cache
254 * vs out-of-cache copy tests in user space suggest 1024 bytes as a safe
255 * upper limit for the non-FPBLK code. To minimize performance regression
256 * risk while still gaining the primary benefits of the improvements to
257 * the non-FPBLK code, we set an upper bound of 1024 bytes for the various
258 * hw_copy_limit_*. Later experimental studies using different values
259 * of hw_copy_limit_* can be used to make further adjustments if
262 * hw_copy_limit_1 = src and dst are byte aligned but not halfword aligned
263 * hw_copy_limit_2 = src and dst are halfword aligned but not word aligned
264 * hw_copy_limit_4 = src and dst are word aligned but not longword aligned
265 * hw_copy_limit_8 = src and dst are longword aligned
267 * To say that src and dst are word aligned means that after
268 * some initial alignment activity of moving 0 to 3 bytes,
269 * both the src and dst will be on word boundaries so that
270 * word loads and stores may be used.
272 * Recommended initial values as of Mar 2004, includes testing
273 * on Cheetah+ (900MHz), Cheetah++ (1200MHz), and Jaguar(1050MHz):
274 * hw_copy_limit_1 = 256
275 * hw_copy_limit_2 = 512
276 * hw_copy_limit_4 = 1024
277 * hw_copy_limit_8 = 1024 (or 1536 on some systems)
280 * If hw_copy_limit_? is set to zero, then use of FPBLK copy is
281 * disabled for that alignment choice.
282 * If hw_copy_limit_? is set to a value between 1 and VIS_COPY_THRESHOLD (256)
283 * the value of VIS_COPY_THRESHOLD is used.
284 * It is not envisioned that hw_copy_limit_? will be changed in the field
285 * It is provided to allow for disabling FPBLK copies and to allow
286 * easy testing of alternate values on future HW implementations
287 * that might have different cache sizes, clock rates or instruction
290 * Our first test for FPBLK copies vs non-FPBLK copies checks a minimum
291 * threshold to speedup all shorter copies (less than 256). That
292 * saves an alignment test, memory reference, and enabling test
293 * for all short copies, or an estimated 24 clocks.
295 * The order in which these limits are checked does matter since each
296 * non-predicted tst and branch costs around 10 clocks.
297 * If src and dst are randomly selected addresses,
298 * 4 of 8 will not be alignable.
299 * 2 of 8 will be half word alignable.
300 * 1 of 8 will be word alignable.
301 * 1 of 8 will be long word alignable.
302 * But, tests on running kernels show that src and dst to copy code
303 * are typically not on random alignments. Structure copies and
304 * copies of larger data sizes are often on long word boundaries.
305 * So we test the long word alignment case first, then
306 * the byte alignment, then halfword, then word alignment.
308 * Several times, tests for length are made to split the code
309 * into subcases. These tests often allow later tests to be
310 * avoided. For example, within the non-FPBLK copy, we first
311 * check for tiny copies of 3 bytes or less. That allows us
312 * to use a 4-way unrolled loop for the general byte copy case
313 * without a test on loop entry.
314 * We subdivide the non-FPBLK case further into CHKSIZE bytes and less
315 * vs longer cases. For the really short case, we don't attempt
316 * align src and dst. We try to minimize special case tests in
317 * the shortest loops as each test adds a significant percentage
320 * For the medium sized cases, we allow ourselves to adjust the
321 * src and dst alignment and provide special cases for each of
322 * the four adjusted alignment cases. The CHKSIZE that was used
323 * to decide between short and medium size was chosen to be 39
324 * as that allows for the worst case of 7 bytes of alignment
325 * shift and 4 times 8 bytes for the first long word unrolling.
326 * That knowledge saves an initial test for length on entry into
327 * the medium cases. If the general loop unrolling factor were
328 * to be increases, this number would also need to be adjusted.
330 * For all cases in the non-FPBLK code where it is known that at
331 * least 4 chunks of data are available for movement, the
332 * loop is unrolled by four. This 4-way loop runs in 8 clocks
333 * or 2 clocks per data element. Due to limitations of the
334 * branch instruction on Cheetah, Jaguar, and Panther, the
335 * minimum time for a small, tight loop is 3 clocks. So
336 * the 4-way loop runs 50% faster than the fastest non-unrolled
339 * Instruction alignment is forced by used of .align 16 directives
340 * and nops which are not executed in the code. This
341 * combination of operations shifts the alignment of following
342 * loops to insure that loops are aligned so that their instructions
343 * fall within the minimum number of 4 instruction fetch groups.
344 * If instructions are inserted or removed between the .align
345 * instruction and the unrolled loops, then the alignment needs
346 * to be readjusted. Misaligned loops can add a clock per loop
347 * iteration to the loop timing.
349 * In a few cases, code is duplicated to avoid a branch. Since
350 * a non-predicted tst and branch takes 10 clocks, this savings
351 * is judged an appropriate time-space tradeoff.
353 * Within the FPBLK-code, the prefetch method in the inner
354 * loop needs to be explained as it is not standard. Two
355 * prefetches are issued for each cache line instead of one.
356 * The primary one is at the maximum reach of 8 cache lines.
357 * Most of the time, that maximum prefetch reach gives the
358 * cache line more time to reach the processor for systems with
359 * higher processor clocks. But, sometimes memory interference
360 * can cause that prefetch to be dropped. Putting a second
361 * prefetch at a reach of 5 cache lines catches the drops
362 * three iterations later and shows a measured improvement
363 * in performance over any similar loop with a single prefetch.
364 * The prefetches are placed in the loop so they overlap with
365 * non-memory instructions, so that there is no extra cost
366 * when the data is already in-cache.
371 * Notes on preserving existing fp state and on membars.
373 * When a copyOP decides to use fp we may have to preserve existing
374 * floating point state. It is not the caller's state that we need to
375 * preserve - the rest of the kernel does not use fp and, anyway, fp
376 * registers are volatile across a call. Some examples:
378 * - userland has fp state and is interrupted (device interrupt
379 * or trap) and within the interrupt/trap handling we use
381 * - another (higher level) interrupt or trap handler uses bcopy
382 * while a bcopy from an earlier interrupt is still active
383 * - an asynchronous error trap occurs while fp state exists (in
384 * userland or in kernel copy) and the tl0 component of the handling
386 * - a user process with fp state incurs a copy-on-write fault and
387 * hwblkpagecopy always uses fp
389 * We therefore need a per-call place in which to preserve fp state -
390 * using our stack is ideal (and since fp copy cannot be leaf optimized
391 * because of calls it makes, this is no hardship).
393 * The following membar BLD/BST discussion is Cheetah pipeline specific.
394 * In Cheetah BLD is blocking, #LoadLoad/#LoadStore/#StoreStore are
395 * nops (those semantics always apply) and #StoreLoad is implemented
398 * It is possible that the owner of the fp state has a block load or
399 * block store still "in flight" at the time we come to preserve that
400 * state. Block loads are blocking in Cheetah pipelines so we do not
401 * need to sync with them. In preserving fp regs we will use block stores
402 * (which are not blocking in Cheetah pipelines) so we require a membar #Sync
403 * after storing state (so that our subsequent use of those registers
404 * does not modify them before the block stores complete); this membar
405 * also serves to sync with block stores the owner of the fp state has
408 * When we have finished fp copy (with it's repeated block stores)
409 * we must membar #Sync so that our block stores may complete before
410 * we either restore the original fp state into the fp registers or
411 * return to a caller which may initiate other fp operations that could
412 * modify the fp regs we used before the block stores complete.
414 * Synchronous faults (eg, unresolvable DMMU miss) that occur while
415 * t_lofault is not NULL will not panic but will instead trampoline
416 * to the registered lofault handler. There is no need for any
417 * membars for these - eg, our store to t_lofault will always be visible to
418 * ourselves and it is our cpu which will take any trap.
420 * Asynchronous faults (eg, uncorrectable ECC error from memory) that occur
421 * while t_lofault is not NULL will also not panic. Since we're copying
422 * to or from userland the extent of the damage is known - the destination
423 * buffer is incomplete. So trap handlers will trampoline to the lofault
424 * handler in this case which should take some form of error action to
425 * avoid using the incomplete buffer. The trap handler also flags the
426 * fault so that later return-from-trap handling (for the trap that brought
427 * this thread into the kernel in the first place) can notify the process
428 * and reboot the system (or restart the service with Greenline/Contracts).
430 * Asynchronous faults (eg, uncorrectable ECC error from memory) can
431 * result in deferred error traps - the trap is taken sometime after
432 * the event and the trap PC may not be the PC of the faulting access.
433 * Delivery of such pending traps can be forced by a membar #Sync, acting
434 * as an "error barrier" in this role. To accurately apply the user/kernel
435 * separation described in the preceding paragraph we must force delivery
436 * of deferred traps affecting kernel state before we install a lofault
437 * handler (if we interpose a new lofault handler on an existing one there
438 * is no need to repeat this), and we must force delivery of deferred
439 * errors affecting the lofault-protected region before we clear t_lofault.
440 * Failure to do so results in lost kernel state being interpreted as
441 * affecting a copyin/copyout only, or of an error that really only
442 * affects copy data being interpreted as losing kernel state.
444 * Since the copy operations may preserve and later restore floating
445 * point state that does not belong to the caller (see examples above),
446 * we must be careful in how we do this in order to prevent corruption
447 * of another program.
449 * To make sure that floating point state is always saved and restored
450 * correctly, the following "big rules" must be followed when the floating
451 * point registers will be used:
453 * 1. %l6 always holds the caller's lofault handler. Also in this register,
454 * Bit 1 (FPUSED_FLAG) indicates that the floating point registers are in
455 * use. Bit 2 (TRAMP_FLAG) indicates that the call was to bcopy, and a
456 * lofault handler was set coming in.
458 * 2. The FPUSED flag indicates that all FP state has been successfully stored
459 * on the stack. It should not be set until this save has been completed.
461 * 3. The FPUSED flag should not be cleared on exit until all FP state has
462 * been restored from the stack. If an error occurs while restoring
463 * data from the stack, the error handler can check this flag to see if
464 * a restore is necessary.
466 * 4. Code run under the new lofault handler must be kept to a minimum. In
467 * particular, any calls to FP_ALLOWMIGRATE, which could result in a call
468 * to kpreempt(), should not be made until after the lofault handler has
473 * VIS_COPY_THRESHOLD indicates the minimum number of bytes needed
474 * to "break even" using FP/VIS-accelerated memory operations.
475 * The FPBLK code assumes a minimum number of bytes are available
476 * to be moved on entry. Check that code carefully before
477 * reducing VIS_COPY_THRESHOLD below 256.
480 * This shadows sys/machsystm.h which can't be included due to the lack of
481 * _ASM guards in include files it references. Change it here, change it there.
483 #define VIS_COPY_THRESHOLD 256
486 * TEST for very short copies
487 * Be aware that the maximum unroll for the short unaligned case
494 * Indicates that we're to trampoline to the error handler.
495 * Entry points bcopy, copyin_noerr, and copyout_noerr use this flag.
496 * kcopy, copyout, xcopyout, copyin, and xcopyin do not set this flag.
498 #define FPUSED_FLAG 1
503 * Number of outstanding prefetches.
504 * Testing with 1200 MHz Cheetah+ and Jaguar gives best results with
505 * two prefetches, one with a reach of 8*BLOCK_SIZE+8 and one with a
506 * reach of 5*BLOCK_SIZE. The double prefetch gives an typical improvement
507 * of 5% for large copies as compared to a single prefetch. The reason
508 * for the improvement is that with Cheetah and Jaguar, some prefetches
509 * are dropped due to the prefetch queue being full. The second prefetch
510 * reduces the number of cache lines that are dropped.
511 * Do not remove the double prefetch or change either CHEETAH_PREFETCH
512 * or CHEETAH_2ND_PREFETCH without extensive performance tests to prove
513 * there is no loss of performance.
515 #define CHEETAH_PREFETCH 8
516 #define CHEETAH_2ND_PREFETCH 5
518 #define VIS_BLOCKSIZE 64
521 * Size of stack frame in order to accomodate a 64-byte aligned
522 * floating-point register save area and 2 64-bit temp locations.
523 * All copy functions use two quadrants of fp registers; to assure a
524 * block-aligned two block buffer in which to save we must reserve
525 * three blocks on stack. Not all functions preserve %pfrs on stack
526 * or need to preserve %gsr but we use HWCOPYFRAMESIZE for all.
528 * _______________________________________ <-- %fp + STACK_BIAS
529 * | We may need to preserve 2 quadrants |
530 * | of fp regs, but since we do so with |
531 * | BST/BLD we need room in which to |
532 * | align to VIS_BLOCKSIZE bytes. So |
533 * | this area is 3 * VIS_BLOCKSIZE. | <-- - SAVED_FPREGS_OFFSET
534 * |-------------------------------------|
535 * | 8 bytes to save %fprs | <-- - SAVED_FPRS_OFFSET
536 * |-------------------------------------|
537 * | 8 bytes to save %gsr | <-- - SAVED_GSR_OFFSET
538 * ---------------------------------------
540 #define HWCOPYFRAMESIZE ((VIS_BLOCKSIZE * (2 + 1)) + (2 * 8))
541 #define SAVED_FPREGS_OFFSET (VIS_BLOCKSIZE * 3)
542 #define SAVED_FPREGS_ADJUST ((VIS_BLOCKSIZE * 2) - 1)
543 #define SAVED_FPRS_OFFSET (SAVED_FPREGS_OFFSET + 8)
544 #define SAVED_GSR_OFFSET (SAVED_FPRS_OFFSET + 8)
547 * Common macros used by the various versions of the block copy
548 * routines in this file.
552 * In FP copies if we do not have preserved data to restore over
553 * the fp regs we used then we must zero those regs to avoid
554 * exposing portions of the data to later threads (data security).
556 * Copy functions use either quadrants 1 and 3 or 2 and 4.
558 * FZEROQ1Q3: Zero quadrants 1 and 3, ie %f0 - %f15 and %f32 - %f47
559 * FZEROQ2Q4: Zero quadrants 2 and 4, ie %f16 - %f31 and %f48 - %f63
561 * The instructions below are quicker than repeated fzero instructions
562 * since they can dispatch down two fp pipelines.
567 faddd
%f0, %f2, %f4 ;\
568 fmuld
%f0, %f2, %f6 ;\
569 faddd
%f0, %f2, %f8 ;\
570 fmuld
%f0, %f2, %f10 ;\
571 faddd
%f0, %f2, %f12 ;\
572 fmuld
%f0, %f2, %f14 ;\
573 faddd
%f0, %f2, %f32 ;\
574 fmuld
%f0, %f2, %f34 ;\
575 faddd
%f0, %f2, %f36 ;\
576 fmuld
%f0, %f2, %f38 ;\
577 faddd
%f0, %f2, %f40 ;\
578 fmuld
%f0, %f2, %f42 ;\
579 faddd
%f0, %f2, %f44 ;\
585 faddd
%f16, %f18, %f20 ;\
586 fmuld
%f16, %f18, %f22 ;\
587 faddd
%f16, %f18, %f24 ;\
588 fmuld
%f16, %f18, %f26 ;\
589 faddd
%f16, %f18, %f28 ;\
590 fmuld
%f16, %f18, %f30 ;\
591 faddd
%f16, %f18, %f48 ;\
592 fmuld
%f16, %f18, %f50 ;\
593 faddd
%f16, %f18, %f52 ;\
594 fmuld
%f16, %f18, %f54 ;\
595 faddd
%f16, %f18, %f56 ;\
596 fmuld
%f16, %f18, %f58 ;\
597 faddd
%f16, %f18, %f60 ;\
598 fmuld
%f16, %f18, %f62
601 * Macros to save and restore quadrants 1 and 3 or 2 and 4 to/from the stack.
602 * Used to save and restore in-use fp registers when we want to use FP
603 * and find fp already in use and copy size still large enough to justify
604 * the additional overhead of this save and restore.
606 * A membar #Sync is needed before save to sync fp ops initiated before
607 * the call to the copy function (by whoever has fp in use); for example
608 * an earlier block load to the quadrant we are about to save may still be
609 * "in flight". A membar #Sync is required at the end of the save to
610 * sync our block store (the copy code is about to begin ldd's to the
611 * first quadrant). Note, however, that since Cheetah pipeline block load
612 * is blocking we can omit the initial membar before saving fp state (they're
613 * commented below in case of future porting to a chip that does not block
616 * Similarly: a membar #Sync before restore allows the block stores of
617 * the copy operation to complete before we fill the quadrants with their
618 * original data, and a membar #Sync after restore lets the block loads
619 * of the restore complete before we return to whoever has the fp regs
620 * in use. To avoid repeated membar #Sync we make it the responsibility
621 * of the copy code to membar #Sync immediately after copy is complete
622 * and before using the BLD_*_FROMSTACK macro.
625 #define BST_FPQ1Q3_TOSTACK(tmp1) \
626 /* membar #Sync */ ;\
627 add %fp
, STACK_BIAS
- SAVED_FPREGS_ADJUST
, tmp1 ;\
628 and tmp1
, -VIS_BLOCKSIZE
, tmp1
/* block align */ ;\
629 stda
%f0, [tmp1
]ASI_BLK_P ;\
630 add tmp1
, VIS_BLOCKSIZE
, tmp1 ;\
631 stda
%f32
, [tmp1
]ASI_BLK_P ;\
634 #define BLD_FPQ1Q3_FROMSTACK(tmp1) \
635 /* membar #Sync - provided at copy completion */ ;\
636 add %fp
, STACK_BIAS
- SAVED_FPREGS_ADJUST
, tmp1 ;\
637 and tmp1
, -VIS_BLOCKSIZE
, tmp1
/* block align */ ;\
638 ldda
[tmp1
]ASI_BLK_P
, %f0 ;\
639 add tmp1
, VIS_BLOCKSIZE
, tmp1 ;\
640 ldda
[tmp1
]ASI_BLK_P
, %f32 ;\
643 #define BST_FPQ2Q4_TOSTACK(tmp1) \
644 /* membar #Sync */ ;\
645 add %fp
, STACK_BIAS
- SAVED_FPREGS_ADJUST
, tmp1 ;\
646 and tmp1
, -VIS_BLOCKSIZE
, tmp1
/* block align */ ;\
647 stda
%f16, [tmp1
]ASI_BLK_P ;\
648 add tmp1
, VIS_BLOCKSIZE
, tmp1 ;\
649 stda
%f48
, [tmp1
]ASI_BLK_P ;\
652 #define BLD_FPQ2Q4_FROMSTACK(tmp1) \
653 /* membar #Sync - provided at copy completion */ ;\
654 add %fp
, STACK_BIAS
- SAVED_FPREGS_ADJUST
, tmp1 ;\
655 and tmp1
, -VIS_BLOCKSIZE
, tmp1
/* block align */ ;\
656 ldda
[tmp1
]ASI_BLK_P
, %f16 ;\
657 add tmp1
, VIS_BLOCKSIZE
, tmp1 ;\
658 ldda
[tmp1
]ASI_BLK_P
, %f48 ;\
663 * FP_NOMIGRATE and FP_ALLOWMIGRATE. Prevent migration (or, stronger,
664 * prevent preemption if there is no t_lwp to save FP state to on context
665 * switch) before commencing a FP copy, and reallow it on completion or
666 * in error trampoline paths when we were using FP copy.
668 * Both macros may call other functions, so be aware that all outputs are
669 * forfeit after using these macros. For this reason we do not pass registers
670 * to use - we just use any outputs we want.
672 * For fpRAS we need to perform the fpRAS mechanism test on the same
673 * CPU as we use for the copy operation, both so that we validate the
674 * CPU we perform the copy on and so that we know which CPU failed
675 * if a failure is detected. Hence we need to be bound to "our" CPU.
676 * This could be achieved through disabling preemption (and we have do it that
677 * way for threads with no t_lwp) but for larger copies this may hold
678 * higher priority threads off of cpu for too long (eg, realtime). So we
679 * make use of the lightweight t_nomigrate mechanism where we can (ie, when
686 * if (curthread->t_lwp) {
687 * thread_nomigrate();
689 * kpreempt_disable();
694 * if (curthread->t_lwp) {
695 * thread_allowmigrate();
701 #define FP_NOMIGRATE(label1, label2) \
702 ldn
[THREAD_REG
+ T_LWP
], %o0 ;\
703 brz
,a,pn
%o0
, label1
/**/f ;\
704 ldsb
[THREAD_REG
+ T_PREEMPT
], %o1 ;\
705 call thread_nomigrate ;\
711 stb %o1
, [THREAD_REG
+ T_PREEMPT
] ;\
714 #define FP_ALLOWMIGRATE(label1, label2) \
715 ldn
[THREAD_REG
+ T_LWP
], %o0 ;\
716 brz
,a,pn
%o0
, label1
/**/f ;\
717 ldsb
[THREAD_REG
+ T_PREEMPT
], %o1 ;\
718 call thread_allowmigrate ;\
724 brnz
,pn
%o1
, label2
/**/f ;\
725 stb %o1
, [THREAD_REG
+ T_PREEMPT
] ;\
726 ldn
[THREAD_REG
+ T_CPU
], %o0 ;\
727 ldub
[%o0
+ CPU_KPRUNRUN
], %o0 ;\
728 brz
,pt
%o0
, label2
/**/f ;\
735 * Copy a block of storage, returning an error code if `from' or
736 * `to' takes a kernel pagefault which cannot be resolved.
737 * Returns errno value on pagefault error, 0 if all ok
744 kcopy
(const void
*from
, void
*to
, size_t count
)
754 cmp %o2
, VIS_COPY_THRESHOLD
! check for leaf rtn case
755 bleu
,pt
%ncc
, .kcopy_small ! go to larger cases
756 xor %o0
, %o1
, %o3
! are src
, dst alignable?
758 bz
,pt
%ncc
, .kcopy_8 ! check for longword alignment
761 bz
,pt
%ncc
, .kcopy_2 ! check for half-word
763 sethi
%hi
(hw_copy_limit_1
), %o3
! Check copy limit
764 ld [%o3
+ %lo
(hw_copy_limit_1
)], %o3
766 bz
,pn
%icc
, .kcopy_small ! if zero, disable HW copy
767 cmp %o2
, %o3
! if length
<= limit
768 bleu
,pt
%ncc
, .kcopy_small ! go to small copy
770 ba,pt
%ncc
, .kcopy_more ! otherwise go to large copy
774 bz
,pt
%ncc
, .kcopy_4 ! check for word alignment
776 sethi
%hi
(hw_copy_limit_2
), %o3
! Check copy limit
777 ld [%o3
+ %lo
(hw_copy_limit_2
)], %o3
779 bz
,pn
%icc
, .kcopy_small ! if zero, disable HW copy
780 cmp %o2
, %o3
! if length
<= limit
781 bleu
,pt
%ncc
, .kcopy_small ! go to small copy
783 ba,pt
%ncc
, .kcopy_more ! otherwise go to large copy
786 ! already checked longword
, must
be word aligned
787 sethi
%hi
(hw_copy_limit_4
), %o3
! Check copy limit
788 ld [%o3
+ %lo
(hw_copy_limit_4
)], %o3
790 bz
,pn
%icc
, .kcopy_small ! if zero, disable HW copy
791 cmp %o2
, %o3
! if length
<= limit
792 bleu
,pt
%ncc
, .kcopy_small ! go to small copy
794 ba,pt
%ncc
, .kcopy_more ! otherwise go to large copy
797 sethi
%hi
(hw_copy_limit_8
), %o3
! Check copy limit
798 ld [%o3
+ %lo
(hw_copy_limit_8
)], %o3
800 bz
,pn
%icc
, .kcopy_small ! if zero, disable HW copy
801 cmp %o2
, %o3
! if length
<= limit
802 bleu
,pt
%ncc
, .kcopy_small ! go to small copy
804 ba,pt
%ncc
, .kcopy_more ! otherwise go to large copy
808 sethi
%hi
(.sm_copyerr), %o5 ! sm_copyerr is lofault value
809 or %o5
, %lo
(.sm_copyerr), %o5
810 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
! save existing handler
811 membar
#Sync ! sync error barrier
812 ba,pt
%ncc
, .sm_do_copy ! common code
813 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! set t_lofault
816 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
817 sethi
%hi
(.copyerr), %l7 ! copyerr is lofault value
818 or %l7
, %lo
(.copyerr), %l7
819 ldn
[THREAD_REG
+ T_LOFAULT
], %l6
! save existing handler
820 membar
#Sync ! sync error barrier
821 ba,pt
%ncc
, .do_copy ! common code
822 stn
%l7
, [THREAD_REG
+ T_LOFAULT
] ! set t_lofault
826 * We got here because of a fault during bcopy_more, called from kcopy or bcopy.
827 * Errno value is in %g1. bcopy_more uses fp quadrants 1 and 3.
831 membar
#Sync ! sync error barrier
832 stn
%l0
, [THREAD_REG
+ T_LOFAULT
] ! set t_lofault
833 btst FPUSED_FLAG
, %l6
835 and %l6
, TRAMP_FLAG
, %l0
! copy trampoline flag to
%l0
837 ldx [%fp
+ STACK_BIAS
- SAVED_GSR_OFFSET
], %o2
! restore gsr
840 ld [%fp
+ STACK_BIAS
- SAVED_FPRS_OFFSET
], %o3
845 BLD_FPQ1Q3_FROMSTACK
(%o2
)
848 wr
%o3
, 0, %fprs
! restore fprs
852 wr
%o3
, 0, %fprs
! restore fprs
855 ! Need to cater for the different expectations of kcopy
856 ! and bcopy. kcopy will
*always
* set
a t_lofault handler
857 ! If it fires
, we
're expected to just return the error code
858 ! and *not* to invoke any existing error handler. As far as
859 ! bcopy is concerned, we only set t_lofault if there was an
860 ! existing lofault handler. In that case we're expected to
861 ! invoke the previously existing handler after resetting the
865 andn
%l6
, MASK_FLAGS
, %l6
! turn trampoline flag off
866 membar
#Sync ! sync error barrier
867 stn
%l6
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
868 FP_ALLOWMIGRATE
(5, 6)
878 ! We
're here via bcopy. There *must* have been an error handler
879 ! in place otherwise we would have died a nasty death already.
881 jmp %l6 ! goto real handler
882 restore %g0, 0, %o0 ! dispose of copy window
885 * We got here because of a fault in .copyerr. We can't safely restore fp
886 * state
, so we panic.
889 .asciz "Unable to restore fp state after copy operation"
893 set fp_panic_msg
, %o0
898 * We got here because of a fault during a small kcopy or bcopy.
899 * No floating point registers are used by the small copies.
900 * Errno value is in %g1.
906 andn
%o4
, TRAMP_FLAG
, %o4
908 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
912 jmp
%o4
! goto real handler
920 * Copy a block of storage - must not overlap (from + len <= to).
921 * Registers: l6 - saved t_lofault
922 * (for short copies, o4 - saved t_lofault)
924 * Copy a page of memory.
925 * Assumes double word alignment and a count >= 256.
931 bcopy
(const void
*from
, void
*to
, size_t count
)
938 cmp %o2
, VIS_COPY_THRESHOLD
! check for leaf rtn case
939 bleu
,pt
%ncc
, .bcopy_small ! go to larger cases
940 xor %o0
, %o1
, %o3
! are src
, dst alignable?
942 bz
,pt
%ncc
, .bcopy_8 ! check for longword alignment
945 bz
,pt
%ncc
, .bcopy_2 ! check for half-word
947 sethi
%hi
(hw_copy_limit_1
), %o3
! Check copy limit
948 ld [%o3
+ %lo
(hw_copy_limit_1
)], %o3
950 bz
,pn
%icc
, .bcopy_small ! if zero, disable HW copy
951 cmp %o2
, %o3
! if length
<= limit
952 bleu
,pt
%ncc
, .bcopy_small ! go to small copy
954 ba,pt
%ncc
, .bcopy_more ! otherwise go to large copy
958 bz
,pt
%ncc
, .bcopy_4 ! check for word alignment
960 sethi
%hi
(hw_copy_limit_2
), %o3
! Check copy limit
961 ld [%o3
+ %lo
(hw_copy_limit_2
)], %o3
963 bz
,pn
%icc
, .bcopy_small ! if zero, disable HW copy
964 cmp %o2
, %o3
! if length
<= limit
965 bleu
,pt
%ncc
, .bcopy_small ! go to small copy
967 ba,pt
%ncc
, .bcopy_more ! otherwise go to large copy
970 ! already checked longword
, must
be word aligned
971 sethi
%hi
(hw_copy_limit_4
), %o3
! Check copy limit
972 ld [%o3
+ %lo
(hw_copy_limit_4
)], %o3
974 bz
,pn
%icc
, .bcopy_small ! if zero, disable HW copy
975 cmp %o2
, %o3
! if length
<= limit
976 bleu
,pt
%ncc
, .bcopy_small ! go to small copy
978 ba,pt
%ncc
, .bcopy_more ! otherwise go to large copy
981 sethi
%hi
(hw_copy_limit_8
), %o3
! Check copy limit
982 ld [%o3
+ %lo
(hw_copy_limit_8
)], %o3
984 bz
,pn
%icc
, .bcopy_small ! if zero, disable HW copy
985 cmp %o2
, %o3
! if length
<= limit
986 bleu
,pt
%ncc
, .bcopy_small ! go to small copy
988 ba,pt
%ncc
, .bcopy_more ! otherwise go to large copy
993 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
! save t_lofault
995 bz
,pt
%icc
, .sm_do_copy
997 sethi
%hi
(.sm_copyerr), %o5
998 or %o5
, %lo
(.sm_copyerr), %o5
999 membar
#Sync ! sync error barrier
1000 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! install new vector
1001 or %o4
, TRAMP_FLAG
, %o4
! error should trampoline
1003 cmp %o2
, SHORTCOPY
! check for really short case
1004 bleu
,pt
%ncc
, .bc_sm_left !
1005 cmp %o2
, CHKSIZE
! check for medium length cases
1006 bgu
,pn
%ncc
, .bc_med !
1007 or %o0
, %o1
, %o3
! prepare alignment check
1008 andcc
%o3
, 0x3, %g0
! test for alignment
1009 bz
,pt
%ncc
, .bc_sm_word ! branch to word aligned case
1011 sub %o2
, 3, %o2
! adjust count to allow cc zero test
1013 ldub
[%o0
], %o3
! read byte
1014 stb %o3
, [%o1
] ! write byte
1015 subcc
%o2
, 4, %o2
! reduce count by
4
1016 ldub
[%o0
+ 1], %o3
! repeat for
a total of
4 bytes
1017 add %o0
, 4, %o0
! advance SRC by
4
1020 add %o1
, 4, %o1
! advance
DST by
4
1023 bgt,pt
%ncc
, .bc_sm_notalign4 ! loop til 3 or fewer bytes remain
1025 add %o2
, 3, %o2
! restore count
1028 bz
,pt
%ncc
, .bc_sm_exit ! check for zero length
1029 deccc
%o2
! reduce count for cc test
1030 ldub
[%o0
], %o3
! move one byte
1031 bz
,pt
%ncc
, .bc_sm_exit
1033 ldub
[%o0
+ 1], %o3
! move another byte
1034 deccc
%o2
! check for more
1035 bz
,pt
%ncc
, .bc_sm_exit
1037 ldub
[%o0
+ 2], %o3
! move final byte
1039 membar
#Sync ! sync error barrier
1040 andn
%o4
, TRAMP_FLAG
, %o4
1041 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
1043 mov
%g0
, %o0
! return
0
1045 nop ! instruction alignment
1046 ! see discussion at start of file
1048 lduw
[%o0
], %o3
! read word
1050 subcc
%o2
, 8, %o2
! update count
1051 stw %o3
, [%o1
] ! write word
1052 add %o0
, 8, %o0
! update SRC
1053 lduw
[%o0
- 4], %o3
! read word
1054 add %o1
, 8, %o1
! update
DST
1055 bgt,pt
%ncc
, .bc_sm_words ! loop til done
1056 stw %o3
, [%o1
- 4] ! write word
1057 addcc
%o2
, 7, %o2
! restore count
1058 bz
,pt
%ncc
, .bc_sm_exit
1060 bz
,pt
%ncc
, .bc_sm_byte
1062 subcc
%o2
, 2, %o2
! reduce count by
2
1063 add %o0
, 2, %o0
! advance SRC by
2
1064 lduh
[%o0
- 2], %o3
! read half word
1065 add %o1
, 2, %o1
! advance
DST by
2
1066 bgt,pt
%ncc
, .bc_sm_half ! loop til done
1067 sth %o3
, [%o1
- 2] ! write half word
1068 addcc
%o2
, 1, %o2
! restore count
1069 bz
,pt
%ncc
, .bc_sm_exit
1074 membar
#Sync ! sync error barrier
1075 andn
%o4
, TRAMP_FLAG
, %o4
1076 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
1078 mov
%g0
, %o0
! return
0
1081 subcc
%o2
, 4, %o2
! update count
1082 bgt,pt
%ncc
, .bc_sm_wordx
1083 lduw
[%o0
], %o3
! read word
1084 addcc
%o2
, 3, %o2
! restore count
1085 bz
,pt
%ncc
, .bc_sm_exit
1086 stw %o3
, [%o1
] ! write word
1087 deccc
%o2
! reduce count for cc test
1088 ldub
[%o0
+ 4], %o3
! load one byte
1089 bz
,pt
%ncc
, .bc_sm_exit
1090 stb %o3
, [%o1
+ 4] ! store one byte
1091 ldub
[%o0
+ 5], %o3
! load second byte
1093 bz
,pt
%ncc
, .bc_sm_exit
1094 stb %o3
, [%o1
+ 5] ! store second byte
1095 ldub
[%o0
+ 6], %o3
! load third byte
1096 stb %o3
, [%o1
+ 6] ! store third byte
1098 membar
#Sync ! sync error barrier
1099 andn
%o4
, TRAMP_FLAG
, %o4
1100 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
1102 mov
%g0
, %o0
! return
0
1106 xor %o0
, %o1
, %o3
! setup alignment check
1108 bnz
,pt
%ncc
, .bc_sm_movebytes ! unaligned
1111 bnz
,pt
%ncc
, .bc_med_half ! halfword aligned
1114 bnz
,pt
%ncc
, .bc_med_word ! word aligned
1117 btst
3, %o0
! check for
1118 bz
,pt
%ncc
, .bc_med_long1 ! word alignment
1121 ldub
[%o0
], %o3
! load one byte
1123 stb %o3
,[%o1
] ! store byte
1126 bnz
,pt
%ncc
, .bc_med_long0
1128 .bc_med_long1: ! word aligned
1129 btst
7, %o0
! check for long word
1130 bz
,pt
%ncc
, .bc_med_long2
1132 lduw
[%o0
], %o3
! load word
1133 add %o0
, 4, %o0
! advance SRC by
4
1134 stw %o3
, [%o1
] ! store word
1135 add %o1
, 4, %o1
! advance
DST by
4
1136 sub %o2
, 4, %o2
! reduce count by
4
1138 ! Now long word aligned
and have at least
32 bytes to move
1141 sub %o2
, 31, %o2
! adjust count to allow cc zero test
1143 ldx [%o0
], %o3
! read long word
1144 stx %o3
, [%o1
] ! write long word
1145 subcc
%o2
, 32, %o2
! reduce count by
32
1146 ldx [%o0
+ 8], %o3
! repeat for
a total for
4 long words
1147 add %o0
, 32, %o0
! advance SRC by
32
1150 add %o1
, 32, %o1
! advance
DST by
32
1153 bgt,pt
%ncc
, .bc_med_lmove ! loop til 31 or fewer bytes left
1155 addcc
%o2
, 24, %o2
! restore count to long word offset
1156 ble,pt
%ncc
, .bc_med_lextra ! check for more long words to move
1159 ldx [%o0
], %o3
! read long word
1160 subcc
%o2
, 8, %o2
! reduce count by
8
1161 stx %o3
, [%o1
] ! write long word
1162 add %o0
, 8, %o0
! advance SRC by
8
1163 bgt,pt
%ncc
, .bc_med_lword ! loop til 7 or fewer bytes left
1164 add %o1
, 8, %o1
! advance
DST by
8
1166 addcc
%o2
, 7, %o2
! restore rest of count
1167 bz
,pt
%ncc
, .bc_sm_exit ! if zero, then done
1169 bz
,pt
%ncc
, .bc_sm_byte
1171 ba,pt
%ncc
, .bc_sm_half
1176 btst
3, %o0
! check for
1177 bz
,pt
%ncc
, .bc_med_word1 ! word alignment
1180 ldub
[%o0
], %o3
! load one byte
1182 stb %o3
,[%o1
] ! store byte
1185 bnz
,pt
%ncc
, .bc_med_word0
1188 ! Now word aligned
and have at least
36 bytes to move
1191 sub %o2
, 15, %o2
! adjust count to allow cc zero test
1193 lduw
[%o0
], %o3
! read word
1194 stw %o3
, [%o1
] ! write word
1195 subcc
%o2
, 16, %o2
! reduce count by
16
1196 lduw
[%o0
+ 4], %o3
! repeat for
a total for
4 words
1197 add %o0
, 16, %o0
! advance SRC by
16
1200 add %o1
, 16, %o1
! advance
DST by
16
1203 bgt,pt
%ncc
, .bc_med_wmove ! loop til 15 or fewer bytes left
1205 addcc
%o2
, 12, %o2
! restore count to word offset
1206 ble,pt
%ncc
, .bc_med_wextra ! check for more words to move
1209 lduw
[%o0
], %o3
! read word
1210 subcc
%o2
, 4, %o2
! reduce count by
4
1211 stw %o3
, [%o1
] ! write word
1212 add %o0
, 4, %o0
! advance SRC by
4
1213 bgt,pt
%ncc
, .bc_med_word2 ! loop til 3 or fewer bytes left
1214 add %o1
, 4, %o1
! advance
DST by
4
1216 addcc
%o2
, 3, %o2
! restore rest of count
1217 bz
,pt
%ncc
, .bc_sm_exit ! if zero, then done
1219 bz
,pt
%ncc
, .bc_sm_byte
1221 ba,pt
%ncc
, .bc_sm_half
1226 btst
1, %o0
! check for
1227 bz
,pt
%ncc
, .bc_med_half1 ! half word alignment
1229 ldub
[%o0
], %o3
! load one byte
1231 stb %o3
,[%o1
] ! store byte
1235 ! Now half word aligned
and have at least
38 bytes to move
1238 sub %o2
, 7, %o2
! adjust count to allow cc zero test
1240 lduh
[%o0
], %o3
! read half word
1241 sth %o3
, [%o1
] ! write half word
1242 subcc
%o2
, 8, %o2
! reduce count by
8
1243 lduh
[%o0
+ 2], %o3
! repeat for
a total for
4 halfwords
1244 add %o0
, 8, %o0
! advance SRC by
8
1247 add %o1
, 8, %o1
! advance
DST by
8
1250 bgt,pt
%ncc
, .bc_med_hmove ! loop til 7 or fewer bytes left
1252 addcc
%o2
, 7, %o2
! restore count
1253 bz
,pt
%ncc
, .bc_sm_exit
1255 bz
,pt
%ncc
, .bc_sm_byte
1257 ba,pt
%ncc
, .bc_sm_half
1263 * The _more entry points are not intended to be used directly by
1264 * any caller from outside this file. They are provided to allow
1265 * profiling and dtrace of the portions of the copy code that uses
1266 * the floating point registers.
1267 * This entry is particularly important as DTRACE (at least as of
1268 * 4/2004) does not support leaf functions.
1273 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
1274 ldn
[THREAD_REG
+ T_LOFAULT
], %l6
! save t_lofault
1276 bz
,pt
%ncc
, .do_copy
1278 sethi
%hi
(.copyerr), %o2
1279 or %o2
, %lo
(.copyerr), %o2
1280 membar
#Sync ! sync error barrier
1281 stn
%o2
, [THREAD_REG
+ T_LOFAULT
] ! install new vector
1283 ! We
've already captured whether t_lofault was zero on entry.
1284 ! We need to mark ourselves as being from bcopy since both
1285 ! kcopy and bcopy use the same code path. If TRAMP_FLAG is set
1286 ! and the saved lofault was zero, we won't reset lofault on
1289 or %l6
, TRAMP_FLAG
, %l6
1292 * Copies that reach here are larger than VIS_COPY_THRESHOLD bytes
1293 * Also, use of FP registers has been tested to be enabled
1298 rd
%fprs
, %o2
! check for unused fp
1299 st %o2
, [%fp
+ STACK_BIAS
- SAVED_FPRS_OFFSET
] ! save orig
%fprs
1301 bz
,a,pt
%icc
, .do_blockcopy
1302 wr
%g0
, FPRS_FEF
, %fprs
1304 BST_FPQ1Q3_TOSTACK
(%o2
)
1308 stx %o2
, [%fp
+ STACK_BIAS
- SAVED_GSR_OFFSET
] ! save gsr
1309 or %l6
, FPUSED_FLAG
, %l6
1317 andcc
DST, VIS_BLOCKSIZE
- 1, TMP
1320 add TMP
, VIS_BLOCKSIZE
, TMP
1322 ! TMP
= bytes required to align
DST on FP_BLOCK boundary
1323 ! Using SRC as
a tmp here
1326 sub CNT
,TMP
,CNT
! adjust main count
1327 sub TMP
, 3, TMP
! adjust for end of loop test
1329 ldub
[REALSRC
], SRC
! move
4 bytes per loop iteration
1332 ldub
[REALSRC
+ 1], SRC
1333 add REALSRC
, 4, REALSRC
1335 ldub
[REALSRC
- 2], SRC
1338 ldub
[REALSRC
- 1], SRC
1339 bgu
,pt
%ncc
, .bc_blkalign
1342 addcc TMP
, 3, TMP
! restore count adjustment
1343 bz
,pt
%ncc
, 2f
! no bytes left?
1345 1: ldub
[REALSRC
], SRC
1353 andn REALSRC
, 0x7, SRC
1354 alignaddr REALSRC
, %g0
, %g0
1356 ! SRC
- 8-byte aligned
1357 ! DST - 64-byte aligned
1358 prefetch
[SRC
], #one_read
1359 prefetch
[SRC
+ (1 * VIS_BLOCKSIZE
)], #one_read
1360 prefetch
[SRC
+ (2 * VIS_BLOCKSIZE
)], #one_read
1361 prefetch
[SRC
+ (3 * VIS_BLOCKSIZE
)], #one_read
1363 #if CHEETAH_PREFETCH > 4
1364 prefetch
[SRC
+ (4 * VIS_BLOCKSIZE
)], #one_read
1366 ldd
[SRC
+ 0x08], %f2
1367 #if CHEETAH_PREFETCH > 5
1368 prefetch
[SRC
+ (5 * VIS_BLOCKSIZE
)], #one_read
1370 ldd
[SRC
+ 0x10], %f4
1371 #if CHEETAH_PREFETCH > 6
1372 prefetch
[SRC
+ (6 * VIS_BLOCKSIZE
)], #one_read
1374 faligndata
%f0, %f2, %f32
1375 ldd
[SRC
+ 0x18], %f6
1376 #if CHEETAH_PREFETCH > 7
1377 prefetch
[SRC
+ (7 * VIS_BLOCKSIZE
)], #one_read
1379 faligndata
%f2, %f4, %f34
1380 ldd
[SRC
+ 0x20], %f8
1381 faligndata
%f4, %f6, %f36
1382 ldd
[SRC
+ 0x28], %f10
1383 faligndata
%f6, %f8, %f38
1384 ldd
[SRC
+ 0x30], %f12
1385 faligndata
%f8, %f10, %f40
1386 ldd
[SRC
+ 0x38], %f14
1387 faligndata
%f10, %f12, %f42
1388 ldd
[SRC
+ VIS_BLOCKSIZE
], %f0
1389 sub CNT
, VIS_BLOCKSIZE
, CNT
1390 add SRC
, VIS_BLOCKSIZE
, SRC
1391 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
1396 ldd
[SRC
+ 0x08], %f2
1397 faligndata
%f12, %f14, %f44
1398 ldd
[SRC
+ 0x10], %f4
1399 faligndata
%f14, %f0, %f46
1400 stda
%f32
, [DST]ASI_BLK_P
1401 ldd
[SRC
+ 0x18], %f6
1402 faligndata
%f0, %f2, %f32
1403 ldd
[SRC
+ 0x20], %f8
1404 faligndata
%f2, %f4, %f34
1405 ldd
[SRC
+ 0x28], %f10
1406 faligndata
%f4, %f6, %f36
1407 ldd
[SRC
+ 0x30], %f12
1408 faligndata
%f6, %f8, %f38
1409 ldd
[SRC
+ 0x38], %f14
1410 faligndata
%f8, %f10, %f40
1411 sub CNT
, VIS_BLOCKSIZE
, CNT
1412 ldd
[SRC
+ VIS_BLOCKSIZE
], %f0
1413 faligndata
%f10, %f12, %f42
1414 prefetch
[SRC
+ ((CHEETAH_PREFETCH
) * VIS_BLOCKSIZE
) + 8], #one_read
1415 add DST, VIS_BLOCKSIZE
, DST
1416 prefetch
[SRC
+ ((CHEETAH_2ND_PREFETCH
) * VIS_BLOCKSIZE
)], #one_read
1417 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
1418 cmp CNT
, VIS_BLOCKSIZE
+ 8
1420 add SRC
, VIS_BLOCKSIZE
, SRC
1422 ! only if REALSRC
& 0x7 is
0
1423 cmp CNT
, VIS_BLOCKSIZE
1425 andcc REALSRC
, 0x7, %g0
1429 faligndata
%f12, %f14, %f44
1430 faligndata
%f14, %f0, %f46
1431 stda
%f32
, [DST]ASI_BLK_P
1432 add DST, VIS_BLOCKSIZE
, DST
1436 ldd
[SRC
+ 0x08], %f2
1438 ldd
[SRC
+ 0x10], %f4
1440 stda
%f32
, [DST]ASI_BLK_P
1441 ldd
[SRC
+ 0x18], %f6
1443 ldd
[SRC
+ 0x20], %f8
1445 ldd
[SRC
+ 0x28], %f10
1447 ldd
[SRC
+ 0x30], %f12
1449 ldd
[SRC
+ 0x38], %f14
1451 sub CNT
, VIS_BLOCKSIZE
, CNT
1452 add DST, VIS_BLOCKSIZE
, DST
1453 add SRC
, VIS_BLOCKSIZE
, SRC
1454 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
1458 stda
%f32
, [DST]ASI_BLK_P
1459 add DST, VIS_BLOCKSIZE
, DST
1460 ba,a,pt
%ncc
, .bcb_exit
1464 bz
,a,pt
%ncc
, .bcb_exit
1467 5: ldub
[REALSRC
], TMP
1476 FPRAS_INTERVAL
(FPRAS_BCOPY
, 0, %l5
, %o2
, %o3
, %o4
, %o5
, 8)
1477 FPRAS_REWRITE_TYPE2Q1
(0, %l5
, %o2
, %o3
, 8, 9)
1478 FPRAS_CHECK
(FPRAS_BCOPY
, %l5
, 9) ! outputs lost
1480 ldx [%fp
+ STACK_BIAS
- SAVED_GSR_OFFSET
], %o2
! restore gsr
1483 ld [%fp
+ STACK_BIAS
- SAVED_FPRS_OFFSET
], %o3
1488 BLD_FPQ1Q3_FROMSTACK
(%o2
)
1491 wr
%o3
, 0, %fprs
! restore fprs
1494 wr
%o3
, 0, %fprs
! restore fprs
1496 membar
#Sync ! sync error barrier
1497 andn
%l6
, MASK_FLAGS
, %l6
1498 stn
%l6
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
1499 FP_ALLOWMIGRATE
(5, 6)
1503 SET_SIZE
(bcopy_more
)
1508 * Block copy with possibly overlapped operands.
1515 ovbcopy
(const void
*from
, void
*to
, size_t count
)
1521 tst
%o2
! check count
1522 bgu
,a %ncc
, 1f
! nothing to do
or bad arguments
1523 subcc
%o0
, %o1
, %o3
! difference of from
and to address
1529 neg %o3
! if
< 0, make it positive
1530 2: cmp %o2
, %o3
! cmp size
and abs(from
- to
)
1531 bleu
%ncc
, bcopy
! if size
<= abs(diff
): use bcopy
,
1533 cmp %o0
, %o1
! compare from
and to addresses
1534 blu
%ncc
, .ov_bkwd ! if from < to, copy backwards
1540 ldub
[%o0
], %o3
! read from address
1541 inc
%o0
! inc from address
1542 stb %o3
, [%o1
] ! write to address
1543 deccc
%o2
! dec count
1544 bgu
%ncc
, .ov_fwd ! loop till done
1545 inc
%o1
! inc to address
1553 deccc
%o2
! dec count
1554 ldub
[%o0
+ %o2
], %o3
! get byte at end of src
1555 bgu
%ncc
, .ov_bkwd ! loop till done
1556 stb %o3
, [%o1
+ %o2
] ! delay slot
, store at end of
dst
1569 * Copies exactly one page. This routine assumes the caller (ppcopy)
1570 * has already disabled kernel preemption and has checked
1571 * use_hw_bcopy. Preventing preemption also prevents cpu migration.
1576 hwblkpagecopy
(const void
*src
, void
*dst)
1579 ENTRY
(hwblkpagecopy
)
1580 ! get another window w
/space for three aligned blocks of saved fpregs
1581 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
1583 ! %i0
- source address
(arg
)
1584 ! %i1
- destination address
(arg
)
1585 ! %i2
- length of region
(not arg
)
1587 ! %l1
- pointer to saved fpregs
1589 rd
%fprs
, %l0
! check for unused fp
1592 wr
%g0
, FPRS_FEF
, %fprs
1594 BST_FPQ1Q3_TOSTACK
(%l1
)
1596 1: set PAGESIZE
, CNT
1599 prefetch
[SRC
], #one_read
1600 prefetch
[SRC
+ (1 * VIS_BLOCKSIZE
)], #one_read
1601 prefetch
[SRC
+ (2 * VIS_BLOCKSIZE
)], #one_read
1602 prefetch
[SRC
+ (3 * VIS_BLOCKSIZE
)], #one_read
1604 #if CHEETAH_PREFETCH > 4
1605 prefetch
[SRC
+ (4 * VIS_BLOCKSIZE
)], #one_read
1607 ldd
[SRC
+ 0x08], %f2
1608 #if CHEETAH_PREFETCH > 5
1609 prefetch
[SRC
+ (5 * VIS_BLOCKSIZE
)], #one_read
1611 ldd
[SRC
+ 0x10], %f4
1612 #if CHEETAH_PREFETCH > 6
1613 prefetch
[SRC
+ (6 * VIS_BLOCKSIZE
)], #one_read
1616 ldd
[SRC
+ 0x18], %f6
1617 #if CHEETAH_PREFETCH > 7
1618 prefetch
[SRC
+ (7 * VIS_BLOCKSIZE
)], #one_read
1621 ldd
[SRC
+ 0x20], %f8
1623 ldd
[SRC
+ 0x28], %f10
1625 ldd
[SRC
+ 0x30], %f12
1627 ldd
[SRC
+ 0x38], %f14
1629 ldd
[SRC
+ VIS_BLOCKSIZE
], %f0
1630 sub CNT
, VIS_BLOCKSIZE
, CNT
1631 add SRC
, VIS_BLOCKSIZE
, SRC
1636 ldd
[SRC
+ 0x08], %f2
1638 ldd
[SRC
+ 0x10], %f4
1640 stda
%f32
, [DST]ASI_BLK_P
1641 ldd
[SRC
+ 0x18], %f6
1643 ldd
[SRC
+ 0x20], %f8
1645 ldd
[SRC
+ 0x28], %f10
1647 ldd
[SRC
+ 0x30], %f12
1649 ldd
[SRC
+ 0x38], %f14
1651 ldd
[SRC
+ VIS_BLOCKSIZE
], %f0
1653 prefetch
[SRC
+ ((CHEETAH_PREFETCH
) * VIS_BLOCKSIZE
) + 8], #one_read
1654 sub CNT
, VIS_BLOCKSIZE
, CNT
1655 add DST, VIS_BLOCKSIZE
, DST
1656 cmp CNT
, VIS_BLOCKSIZE
+ 8
1657 prefetch
[SRC
+ ((CHEETAH_2ND_PREFETCH
) * VIS_BLOCKSIZE
)], #one_read
1659 add SRC
, VIS_BLOCKSIZE
, SRC
1662 ldd
[SRC
+ 0x08], %f2
1664 ldd
[SRC
+ 0x10], %f4
1666 stda
%f32
, [DST]ASI_BLK_P
1667 ldd
[SRC
+ 0x18], %f6
1669 ldd
[SRC
+ 0x20], %f8
1671 ldd
[SRC
+ 0x28], %f10
1673 ldd
[SRC
+ 0x30], %f12
1675 ldd
[SRC
+ 0x38], %f14
1677 sub CNT
, VIS_BLOCKSIZE
, CNT
1678 add DST, VIS_BLOCKSIZE
, DST
1679 add SRC
, VIS_BLOCKSIZE
, SRC
1683 stda
%f32
, [DST]ASI_BLK_P
1687 FPRAS_INTERVAL
(FPRAS_PGCOPY
, 1, %l5
, %o2
, %o3
, %o4
, %o5
, 8)
1688 FPRAS_REWRITE_TYPE1
(1, %l5
, %f32
, %o2
, 9)
1689 FPRAS_CHECK
(FPRAS_PGCOPY
, %l5
, 9) ! lose outputs
1695 BLD_FPQ1Q3_FROMSTACK
(%l3
)
1701 3: wr
%l0
, 0, %fprs
! restore fprs
1705 SET_SIZE
(hwblkpagecopy
)
1710 * Transfer data to and from user space -
1711 * Note that these routines can cause faults
1712 * It is assumed that the kernel has nothing at
1713 * less than KERNELBASE in the virtual address space.
1715 * Note that copyin(9F) and copyout(9F) are part of the
1716 * DDI/DKI which specifies that they return '-1' on "errors."
1720 * So there's two extremely similar routines - xcopyin() and xcopyout()
1721 * which return the errno that we've faithfully computed. This
1722 * allows other callers (e.g. uiomove(9F)) to work correctly.
1723 * Given that these are used pretty heavily, we expand the calling
1724 * sequences inline for all flavours (rather than making wrappers).
1726 * There are also stub routines for xcopyout_little and xcopyin_little,
1727 * which currently are intended to handle requests of <= 16 bytes from
1728 * do_unaligned. Future enhancement to make them handle 8k pages efficiently
1729 * is left as an exercise...
1733 * Copy user data to kernel space (copyOP/xcopyOP/copyOP_noerr)
1735 * General theory of operation:
1737 * The only difference between copy{in,out} and
1738 * xcopy{in,out} is in the error handling routine they invoke
1739 * when a memory access error occurs. xcopyOP returns the errno
1740 * while copyOP returns -1 (see above). copy{in,out}_noerr set
1741 * a special flag (by oring the TRAMP_FLAG into the fault handler address)
1742 * if they are called with a fault handler already in place. That flag
1743 * causes the default handlers to trampoline to the previous handler
1746 * None of the copyops routines grab a window until it's decided that
1747 * we need to do a HW block copy operation. This saves a window
1748 * spill/fill when we're called during socket ops. The typical IO
1749 * path won't cause spill/fill traps.
1751 * This code uses a set of 4 limits for the maximum size that will
1752 * be copied given a particular input/output address alignment.
1753 * If the value for a particular limit is zero, the copy will be performed
1754 * by the plain copy loops rather than FPBLK.
1756 * See the description of bcopy above for more details of the
1757 * data copying algorithm and the default limits.
1762 * Copy kernel data to user space (copyout/xcopyout/xcopyout_little).
1770 * We save the arguments in the following registers in case of a fault:
1775 #define SAVE_SRC %l1
1776 #define SAVE_DST %l2
1777 #define SAVE_COUNT %l3
1779 #define SM_SAVE_SRC %g4
1780 #define SM_SAVE_DST %g5
1781 #define SM_SAVE_COUNT %o5
1785 #define REAL_LOFAULT %l4
1787 * Generic copyio fault handler. This is the first line of defense when a
1788 * fault occurs in (x)copyin/(x)copyout. In order for this to function
1789 * properly, the value of the 'real' lofault handler should be in REAL_LOFAULT.
1790 * This allows us to share common code for all the flavors of the copy
1791 * operations, including the _noerr versions.
1793 * Note that this function will restore the original input parameters before
1794 * calling REAL_LOFAULT. So the real handler can vector to the appropriate
1795 * member of the t_copyop structure, if needed.
1799 mov
%g1
,ERRNO
! save errno in ERRNO
1800 btst FPUSED_FLAG
, %l6
1804 ldx [%fp
+ STACK_BIAS
- SAVED_GSR_OFFSET
], %o2
1805 wr
%o2
, 0, %gsr
! restore gsr
1807 ld [%fp
+ STACK_BIAS
- SAVED_FPRS_OFFSET
], %o3
1812 BLD_FPQ2Q4_FROMSTACK
(%o2
)
1815 wr
%o3
, 0, %fprs
! restore fprs
1819 wr
%o3
, 0, %fprs
! restore fprs
1822 andn
%l6
, FPUSED_FLAG
, %l6
1824 stn
%l6
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
1825 FP_ALLOWMIGRATE
(5, 6)
1832 SET_SIZE
(copyio_fault
)
1841 copyout
(const void
*kaddr
, void
*uaddr
, size_t count
)
1848 cmp %o2
, VIS_COPY_THRESHOLD
! check for leaf rtn case
1849 bleu
,pt
%ncc
, .copyout_small ! go to larger cases
1850 xor %o0
, %o1
, %o3
! are src
, dst alignable?
1852 bz
,pt
%ncc
, .copyout_8 ! check for longword alignment
1855 bz
,pt
%ncc
, .copyout_2 ! check for half-word
1857 sethi
%hi
(hw_copy_limit_1
), %o3
! Check copy limit
1858 ld [%o3
+ %lo
(hw_copy_limit_1
)], %o3
1860 bz
,pn
%icc
, .copyout_small ! if zero, disable HW copy
1861 cmp %o2
, %o3
! if length
<= limit
1862 bleu
,pt
%ncc
, .copyout_small ! go to small copy
1864 ba,pt
%ncc
, .copyout_more ! otherwise go to large copy
1868 bz
,pt
%ncc
, .copyout_4 ! check for word alignment
1870 sethi
%hi
(hw_copy_limit_2
), %o3
! Check copy limit
1871 ld [%o3
+ %lo
(hw_copy_limit_2
)], %o3
1873 bz
,pn
%icc
, .copyout_small ! if zero, disable HW copy
1874 cmp %o2
, %o3
! if length
<= limit
1875 bleu
,pt
%ncc
, .copyout_small ! go to small copy
1877 ba,pt
%ncc
, .copyout_more ! otherwise go to large copy
1880 ! already checked longword
, must
be word aligned
1881 sethi
%hi
(hw_copy_limit_4
), %o3
! Check copy limit
1882 ld [%o3
+ %lo
(hw_copy_limit_4
)], %o3
1884 bz
,pn
%icc
, .copyout_small ! if zero, disable HW copy
1885 cmp %o2
, %o3
! if length
<= limit
1886 bleu
,pt
%ncc
, .copyout_small ! go to small copy
1888 ba,pt
%ncc
, .copyout_more ! otherwise go to large copy
1891 sethi
%hi
(hw_copy_limit_8
), %o3
! Check copy limit
1892 ld [%o3
+ %lo
(hw_copy_limit_8
)], %o3
1894 bz
,pn
%icc
, .copyout_small ! if zero, disable HW copy
1895 cmp %o2
, %o3
! if length
<= limit
1896 bleu
,pt
%ncc
, .copyout_small ! go to small copy
1898 ba,pt
%ncc
, .copyout_more ! otherwise go to large copy
1902 nop ! instruction alignment
1903 ! see discussion at start of file
1905 sethi
%hi
(.sm_copyout_err), %o5 ! .sm_copyout_err is lofault
1906 or %o5
, %lo
(.sm_copyout_err), %o5
1907 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
! save existing handler
1908 membar
#Sync ! sync error barrier
1909 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! set t_lofault
1911 mov
%o0
, SM_SAVE_SRC
1912 mov
%o1
, SM_SAVE_DST
1913 cmp %o2
, SHORTCOPY
! check for really short case
1914 bleu
,pt
%ncc
, .co_sm_left !
1915 mov
%o2
, SM_SAVE_COUNT
1916 cmp %o2
, CHKSIZE
! check for medium length cases
1917 bgu
,pn
%ncc
, .co_med !
1918 or %o0
, %o1
, %o3
! prepare alignment check
1919 andcc
%o3
, 0x3, %g0
! test for alignment
1920 bz
,pt
%ncc
, .co_sm_word ! branch to word aligned case
1922 sub %o2
, 3, %o2
! adjust count to allow cc zero test
1924 ldub
[%o0
], %o3
! read byte
1925 subcc
%o2
, 4, %o2
! reduce count by
4
1926 stba
%o3
, [%o1
]ASI_USER
! write byte
1927 inc
%o1
! advance
DST by
1
1928 ldub
[%o0
+ 1], %o3
! repeat for
a total of
4 bytes
1929 add %o0
, 4, %o0
! advance SRC by
4
1930 stba
%o3
, [%o1
]ASI_USER
1931 inc
%o1
! advance
DST by
1
1933 stba
%o3
, [%o1
]ASI_USER
1934 inc
%o1
! advance
DST by
1
1936 stba
%o3
, [%o1
]ASI_USER
1937 bgt,pt
%ncc
, .co_sm_notalign4 ! loop til 3 or fewer bytes remain
1938 inc
%o1
! advance
DST by
1
1939 add %o2
, 3, %o2
! restore count
1942 bz
,pt
%ncc
, .co_sm_exit ! check for zero length
1944 ldub
[%o0
], %o3
! load one byte
1945 deccc
%o2
! reduce count for cc test
1946 bz
,pt
%ncc
, .co_sm_exit
1947 stba
%o3
,[%o1
]ASI_USER
! store one byte
1948 ldub
[%o0
+ 1], %o3
! load second byte
1951 bz
,pt
%ncc
, .co_sm_exit
1952 stba
%o3
,[%o1
]ASI_USER
! store second byte
1953 ldub
[%o0
+ 2], %o3
! load third byte
1955 stba
%o3
,[%o1
]ASI_USER
! store third byte
1956 membar
#Sync ! sync error barrier
1957 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
1959 mov
%g0
, %o0
! return
0
1962 lduw
[%o0
], %o3
! read word
1964 subcc
%o2
, 8, %o2
! update count
1965 stwa
%o3
, [%o1
]ASI_USER
! write word
1966 add %o0
, 8, %o0
! update SRC
1967 lduw
[%o0
- 4], %o3
! read word
1968 add %o1
, 4, %o1
! update
DST
1969 stwa
%o3
, [%o1
]ASI_USER
! write word
1970 bgt,pt
%ncc
, .co_sm_words ! loop til done
1971 add %o1
, 4, %o1
! update
DST
1972 addcc
%o2
, 7, %o2
! restore count
1973 bz
,pt
%ncc
, .co_sm_exit
1976 bz
,pt
%ncc
, .co_sm_byte
1978 subcc
%o2
, 2, %o2
! reduce count by
2
1979 lduh
[%o0
], %o3
! read half word
1980 add %o0
, 2, %o0
! advance SRC by
2
1981 stha
%o3
, [%o1
]ASI_USER
! write half word
1982 bgt,pt
%ncc
, .co_sm_half ! loop til done
1983 add %o1
, 2, %o1
! advance
DST by
2
1984 addcc
%o2
, 1, %o2
! restore count
1985 bz
,pt
%ncc
, .co_sm_exit
1989 stba
%o3
, [%o1
]ASI_USER
1990 membar
#Sync ! sync error barrier
1991 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
1993 mov
%g0
, %o0
! return
0
1996 subcc
%o2
, 4, %o2
! update count
1997 bgt,pt
%ncc
, .co_sm_wordx
1998 lduw
[%o0
], %o3
! read word
1999 addcc
%o2
, 3, %o2
! restore count
2000 bz
,pt
%ncc
, .co_sm_exit
2001 stwa
%o3
, [%o1
]ASI_USER
! write word
2002 deccc
%o2
! reduce count for cc test
2003 ldub
[%o0
+ 4], %o3
! load one byte
2005 bz
,pt
%ncc
, .co_sm_exit
2006 stba
%o3
, [%o1
]ASI_USER
! store one byte
2007 ldub
[%o0
+ 5], %o3
! load second byte
2010 bz
,pt
%ncc
, .co_sm_exit
2011 stba
%o3
, [%o1
]ASI_USER
! store second byte
2012 ldub
[%o0
+ 6], %o3
! load third byte
2014 stba
%o3
, [%o1
]ASI_USER
! store third byte
2016 membar
#Sync ! sync error barrier
2017 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2019 mov
%g0
, %o0
! return
0
2023 xor %o0
, %o1
, %o3
! setup alignment check
2025 bnz
,pt
%ncc
, .co_sm_movebytes ! unaligned
2028 bnz
,pt
%ncc
, .co_med_half ! halfword aligned
2031 bnz
,pt
%ncc
, .co_med_word ! word aligned
2034 btst
3, %o0
! check for
2035 bz
,pt
%ncc
, .co_med_long1 ! word alignment
2038 ldub
[%o0
], %o3
! load one byte
2040 stba
%o3
,[%o1
]ASI_USER
! store byte
2043 bnz
,pt
%ncc
, .co_med_long0
2045 .co_med_long1: ! word aligned
2046 btst
7, %o0
! check for long word
2047 bz
,pt
%ncc
, .co_med_long2
2049 lduw
[%o0
], %o3
! load word
2050 add %o0
, 4, %o0
! advance SRC by
4
2051 stwa
%o3
, [%o1
]ASI_USER
! store word
2052 add %o1
, 4, %o1
! advance
DST by
4
2053 sub %o2
, 4, %o2
! reduce count by
4
2055 ! Now long word aligned
and have at least
32 bytes to move
2058 sub %o2
, 31, %o2
! adjust count to allow cc zero test
2059 sub %o1
, 8, %o1
! adjust pointer to allow store in
2060 ! branch delay slot instead of
add
2062 add %o1
, 8, %o1
! advance
DST by
8
2063 ldx [%o0
], %o3
! read long word
2064 subcc
%o2
, 32, %o2
! reduce count by
32
2065 stxa
%o3
, [%o1
]ASI_USER
! write long word
2066 add %o1
, 8, %o1
! advance
DST by
8
2067 ldx [%o0
+ 8], %o3
! repeat for
a total for
4 long words
2068 add %o0
, 32, %o0
! advance SRC by
32
2069 stxa
%o3
, [%o1
]ASI_USER
2071 add %o1
, 8, %o1
! advance
DST by
8
2072 stxa
%o3
, [%o1
]ASI_USER
2074 add %o1
, 8, %o1
! advance
DST by
8
2075 bgt,pt
%ncc
, .co_med_lmove ! loop til 31 or fewer bytes left
2076 stxa
%o3
, [%o1
]ASI_USER
2077 add %o1
, 8, %o1
! advance
DST by
8
2078 addcc
%o2
, 24, %o2
! restore count to long word offset
2079 ble,pt
%ncc
, .co_med_lextra ! check for more long words to move
2082 ldx [%o0
], %o3
! read long word
2083 subcc
%o2
, 8, %o2
! reduce count by
8
2084 stxa
%o3
, [%o1
]ASI_USER
! write long word
2085 add %o0
, 8, %o0
! advance SRC by
8
2086 bgt,pt
%ncc
, .co_med_lword ! loop til 7 or fewer bytes left
2087 add %o1
, 8, %o1
! advance
DST by
8
2089 addcc
%o2
, 7, %o2
! restore rest of count
2090 bz
,pt
%ncc
, .co_sm_exit ! if zero, then done
2092 bz
,pt
%ncc
, .co_sm_byte
2094 ba,pt
%ncc
, .co_sm_half
2098 nop ! instruction alignment
2099 ! see discussion at start of file
2101 btst
3, %o0
! check for
2102 bz
,pt
%ncc
, .co_med_word1 ! word alignment
2105 ldub
[%o0
], %o3
! load one byte
2107 stba
%o3
,[%o1
]ASI_USER
! store byte
2110 bnz
,pt
%ncc
, .co_med_word0
2113 ! Now word aligned
and have at least
36 bytes to move
2116 sub %o2
, 15, %o2
! adjust count to allow cc zero test
2118 lduw
[%o0
], %o3
! read word
2119 subcc
%o2
, 16, %o2
! reduce count by
16
2120 stwa
%o3
, [%o1
]ASI_USER
! write word
2121 add %o1
, 4, %o1
! advance
DST by
4
2122 lduw
[%o0
+ 4], %o3
! repeat for
a total for
4 words
2123 add %o0
, 16, %o0
! advance SRC by
16
2124 stwa
%o3
, [%o1
]ASI_USER
2125 add %o1
, 4, %o1
! advance
DST by
4
2127 stwa
%o3
, [%o1
]ASI_USER
2128 add %o1
, 4, %o1
! advance
DST by
4
2130 stwa
%o3
, [%o1
]ASI_USER
2131 bgt,pt
%ncc
, .co_med_wmove ! loop til 15 or fewer bytes left
2132 add %o1
, 4, %o1
! advance
DST by
4
2133 addcc
%o2
, 12, %o2
! restore count to word offset
2134 ble,pt
%ncc
, .co_med_wextra ! check for more words to move
2137 lduw
[%o0
], %o3
! read word
2138 subcc
%o2
, 4, %o2
! reduce count by
4
2139 stwa
%o3
, [%o1
]ASI_USER
! write word
2140 add %o0
, 4, %o0
! advance SRC by
4
2141 bgt,pt
%ncc
, .co_med_word2 ! loop til 3 or fewer bytes left
2142 add %o1
, 4, %o1
! advance
DST by
4
2144 addcc
%o2
, 3, %o2
! restore rest of count
2145 bz
,pt
%ncc
, .co_sm_exit ! if zero, then done
2147 bz
,pt
%ncc
, .co_sm_byte
2149 ba,pt
%ncc
, .co_sm_half
2153 nop ! instruction alignment
2154 nop ! see discussion at start of file
2157 btst
1, %o0
! check for
2158 bz
,pt
%ncc
, .co_med_half1 ! half word alignment
2160 ldub
[%o0
], %o3
! load one byte
2162 stba
%o3
,[%o1
]ASI_USER
! store byte
2166 ! Now half word aligned
and have at least
38 bytes to move
2169 sub %o2
, 7, %o2
! adjust count to allow cc zero test
2171 lduh
[%o0
], %o3
! read half word
2172 subcc
%o2
, 8, %o2
! reduce count by
8
2173 stha
%o3
, [%o1
]ASI_USER
! write half word
2174 add %o1
, 2, %o1
! advance
DST by
2
2175 lduh
[%o0
+ 2], %o3
! repeat for
a total for
4 halfwords
2176 add %o0
, 8, %o0
! advance SRC by
8
2177 stha
%o3
, [%o1
]ASI_USER
2178 add %o1
, 2, %o1
! advance
DST by
2
2180 stha
%o3
, [%o1
]ASI_USER
2181 add %o1
, 2, %o1
! advance
DST by
2
2183 stha
%o3
, [%o1
]ASI_USER
2184 bgt,pt
%ncc
, .co_med_hmove ! loop til 7 or fewer bytes left
2185 add %o1
, 2, %o1
! advance
DST by
2
2186 addcc
%o2
, 7, %o2
! restore count
2187 bz
,pt
%ncc
, .co_sm_exit
2189 bz
,pt
%ncc
, .co_sm_byte
2191 ba,pt
%ncc
, .co_sm_half
2195 * We got here because of a fault during short copyout.
2196 * Errno value is in ERRNO, but DDI/DKI says return -1 (sigh).
2200 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2201 mov SM_SAVE_SRC
, %o0
2202 mov SM_SAVE_DST
, %o1
2203 mov SM_SAVE_COUNT
, %o2
2204 ldn
[THREAD_REG
+ T_COPYOPS
], %o3
! check for copyop handler
2206 bz
,pt
%ncc
, 3f
! if
not, return error
2208 ldn
[%o3
+ CP_COPYOUT
], %o5
! if handler
, invoke it with
2209 jmp
%o5
! original arguments
2213 or %g0
, -1, %o0
! return error value
2218 * The _more entry points are not intended to be used directly by
2219 * any caller from outside this file. They are provided to allow
2220 * profiling and dtrace of the portions of the copy code that uses
2221 * the floating point registers.
2222 * This entry is particularly important as DTRACE (at least as of
2223 * 4/2004) does not support leaf functions.
2228 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
2229 set
.copyout_err, REAL_LOFAULT
2232 * Copy outs that reach here are larger than VIS_COPY_THRESHOLD bytes
2235 set copyio_fault
, %l7
! .copyio_fault is lofault val
2237 ldn
[THREAD_REG
+ T_LOFAULT
], %l6
! save existing handler
2238 membar
#Sync ! sync error barrier
2239 stn
%l7
, [THREAD_REG
+ T_LOFAULT
] ! set t_lofault
2247 rd
%fprs
, %o2
! check for unused fp
2248 st %o2
, [%fp
+ STACK_BIAS
- SAVED_FPRS_OFFSET
] ! save orig
%fprs
2250 bz
,a,pt
%icc
, .do_blockcopyout
2251 wr
%g0
, FPRS_FEF
, %fprs
2253 BST_FPQ2Q4_TOSTACK
(%o2
)
2257 stx %o2
, [%fp
+ STACK_BIAS
- SAVED_GSR_OFFSET
] ! save gsr
2258 or %l6
, FPUSED_FLAG
, %l6
2260 andcc
DST, VIS_BLOCKSIZE
- 1, TMP
2264 add TMP
, VIS_BLOCKSIZE
, TMP
2266 ! TMP
= bytes required to align
DST on FP_BLOCK boundary
2267 ! Using SRC as
a tmp here
2270 sub CNT
,TMP
,CNT
! adjust main count
2271 sub TMP
, 3, TMP
! adjust for end of loop test
2273 ldub
[REALSRC
], SRC
! move
4 bytes per loop iteration
2276 ldub
[REALSRC
+ 1], SRC
2277 add REALSRC
, 4, REALSRC
2278 stba SRC
, [DST + 1]%asi
2279 ldub
[REALSRC
- 2], SRC
2281 stba SRC
, [DST - 2]%asi
2282 ldub
[REALSRC
- 1], SRC
2283 bgu
,pt
%ncc
, .co_blkalign
2284 stba SRC
, [DST - 1]%asi
2286 addcc TMP
, 3, TMP
! restore count adjustment
2287 bz
,pt
%ncc
, 2f
! no bytes left?
2289 1: ldub
[REALSRC
], SRC
2294 stba SRC
, [DST - 1]%asi
2297 andn REALSRC
, 0x7, SRC
2298 alignaddr REALSRC
, %g0
, %g0
2300 ! SRC
- 8-byte aligned
2301 ! DST - 64-byte aligned
2302 prefetch
[SRC
], #one_read
2303 prefetch
[SRC
+ (1 * VIS_BLOCKSIZE
)], #one_read
2304 prefetch
[SRC
+ (2 * VIS_BLOCKSIZE
)], #one_read
2305 prefetch
[SRC
+ (3 * VIS_BLOCKSIZE
)], #one_read
2307 #if CHEETAH_PREFETCH > 4
2308 prefetch
[SRC
+ (4 * VIS_BLOCKSIZE
)], #one_read
2310 ldd
[SRC
+ 0x08], %f18
2311 #if CHEETAH_PREFETCH > 5
2312 prefetch
[SRC
+ (5 * VIS_BLOCKSIZE
)], #one_read
2314 ldd
[SRC
+ 0x10], %f20
2315 #if CHEETAH_PREFETCH > 6
2316 prefetch
[SRC
+ (6 * VIS_BLOCKSIZE
)], #one_read
2318 faligndata
%f16, %f18, %f48
2319 ldd
[SRC
+ 0x18], %f22
2320 #if CHEETAH_PREFETCH > 7
2321 prefetch
[SRC
+ (7 * VIS_BLOCKSIZE
)], #one_read
2323 faligndata
%f18, %f20, %f50
2324 ldd
[SRC
+ 0x20], %f24
2325 faligndata
%f20, %f22, %f52
2326 ldd
[SRC
+ 0x28], %f26
2327 faligndata
%f22, %f24, %f54
2328 ldd
[SRC
+ 0x30], %f28
2329 faligndata
%f24, %f26, %f56
2330 ldd
[SRC
+ 0x38], %f30
2331 faligndata
%f26, %f28, %f58
2332 ldd
[SRC
+ VIS_BLOCKSIZE
], %f16
2333 sub CNT
, VIS_BLOCKSIZE
, CNT
2334 add SRC
, VIS_BLOCKSIZE
, SRC
2335 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
2340 ldd
[SRC
+ 0x08], %f18
2341 faligndata
%f28, %f30, %f60
2342 ldd
[SRC
+ 0x10], %f20
2343 faligndata
%f30, %f16, %f62
2344 stda
%f48
, [DST]ASI_BLK_AIUS
2345 ldd
[SRC
+ 0x18], %f22
2346 faligndata
%f16, %f18, %f48
2347 ldd
[SRC
+ 0x20], %f24
2348 faligndata
%f18, %f20, %f50
2349 ldd
[SRC
+ 0x28], %f26
2350 faligndata
%f20, %f22, %f52
2351 ldd
[SRC
+ 0x30], %f28
2352 faligndata
%f22, %f24, %f54
2353 ldd
[SRC
+ 0x38], %f30
2354 faligndata
%f24, %f26, %f56
2355 sub CNT
, VIS_BLOCKSIZE
, CNT
2356 ldd
[SRC
+ VIS_BLOCKSIZE
], %f16
2357 faligndata
%f26, %f28, %f58
2358 prefetch
[SRC
+ ((CHEETAH_PREFETCH
) * VIS_BLOCKSIZE
) + 8], #one_read
2359 add DST, VIS_BLOCKSIZE
, DST
2360 prefetch
[SRC
+ ((CHEETAH_2ND_PREFETCH
) * VIS_BLOCKSIZE
)], #one_read
2361 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
2362 cmp CNT
, VIS_BLOCKSIZE
+ 8
2364 add SRC
, VIS_BLOCKSIZE
, SRC
2366 ! only if REALSRC
& 0x7 is
0
2367 cmp CNT
, VIS_BLOCKSIZE
2369 andcc REALSRC
, 0x7, %g0
2373 faligndata
%f28, %f30, %f60
2374 faligndata
%f30, %f16, %f62
2375 stda
%f48
, [DST]ASI_BLK_AIUS
2376 add DST, VIS_BLOCKSIZE
, DST
2380 ldd
[SRC
+ 0x08], %f18
2382 ldd
[SRC
+ 0x10], %f20
2384 stda
%f48
, [DST]ASI_BLK_AIUS
2385 ldd
[SRC
+ 0x18], %f22
2387 ldd
[SRC
+ 0x20], %f24
2389 ldd
[SRC
+ 0x28], %f26
2391 ldd
[SRC
+ 0x30], %f28
2393 ldd
[SRC
+ 0x38], %f30
2395 sub CNT
, VIS_BLOCKSIZE
, CNT
2396 add DST, VIS_BLOCKSIZE
, DST
2397 add SRC
, VIS_BLOCKSIZE
, SRC
2398 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
2402 stda
%f48
, [DST]ASI_BLK_AIUS
2403 add DST, VIS_BLOCKSIZE
, DST
2411 5: ldub
[REALSRC
], TMP
2416 stba TMP
, [DST - 1]%asi
2422 FPRAS_INTERVAL
(FPRAS_COPYOUT
, 0, %l5
, %o2
, %o3
, %o4
, %o5
, 8)
2423 FPRAS_REWRITE_TYPE2Q2
(0, %l5
, %o2
, %o3
, 8, 9)
2424 FPRAS_CHECK
(FPRAS_COPYOUT
, %l5
, 9) ! lose outputs
2426 ldx [%fp
+ STACK_BIAS
- SAVED_GSR_OFFSET
], %o2
2427 wr
%o2
, 0, %gsr
! restore gsr
2429 ld [%fp
+ STACK_BIAS
- SAVED_FPRS_OFFSET
], %o3
2434 BLD_FPQ2Q4_FROMSTACK
(%o2
)
2437 wr
%o3
, 0, %fprs
! restore fprs
2441 wr
%o3
, 0, %fprs
! restore fprs
2445 andn
%l6
, FPUSED_FLAG
, %l6
2446 stn
%l6
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2447 FP_ALLOWMIGRATE
(5, 6)
2452 * We got here because of a fault during copyout.
2453 * Errno value is in ERRNO, but DDI/DKI says return -1 (sigh).
2456 ldn
[THREAD_REG
+ T_COPYOPS
], %o4
! check for copyop handler
2458 bz
,pt
%ncc
, 2f
! if
not, return error
2460 ldn
[%o4
+ CP_COPYOUT
], %g2
! if handler
, invoke it with
2461 jmp
%g2
! original arguments
2462 restore
%g0
, 0, %g0
! dispose of copy window
2465 restore
%g0
, -1, %o0
! return error value
2468 SET_SIZE
(copyout_more
)
2477 xcopyout
(const void
*kaddr
, void
*uaddr
, size_t count
)
2483 cmp %o2
, VIS_COPY_THRESHOLD
! check for leaf rtn case
2484 bleu
,pt
%ncc
, .xcopyout_small ! go to larger cases
2485 xor %o0
, %o1
, %o3
! are src
, dst alignable?
2487 bz
,pt
%ncc
, .xcopyout_8 !
2490 bz
,pt
%ncc
, .xcopyout_2 ! check for half-word
2492 sethi
%hi
(hw_copy_limit_1
), %o3
! Check copy limit
2493 ld [%o3
+ %lo
(hw_copy_limit_1
)], %o3
2495 bz
,pn
%icc
, .xcopyout_small ! if zero, disable HW copy
2496 cmp %o2
, %o3
! if length
<= limit
2497 bleu
,pt
%ncc
, .xcopyout_small ! go to small copy
2499 ba,pt
%ncc
, .xcopyout_more ! otherwise go to large copy
2503 bz
,pt
%ncc
, .xcopyout_4 ! check for word alignment
2505 sethi
%hi
(hw_copy_limit_2
), %o3
! Check copy limit
2506 ld [%o3
+ %lo
(hw_copy_limit_2
)], %o3
2508 bz
,pn
%icc
, .xcopyout_small ! if zero, disable HW copy
2509 cmp %o2
, %o3
! if length
<= limit
2510 bleu
,pt
%ncc
, .xcopyout_small ! go to small copy
2512 ba,pt
%ncc
, .xcopyout_more ! otherwise go to large copy
2515 ! already checked longword
, must
be word aligned
2516 sethi
%hi
(hw_copy_limit_4
), %o3
! Check copy limit
2517 ld [%o3
+ %lo
(hw_copy_limit_4
)], %o3
2519 bz
,pn
%icc
, .xcopyout_small ! if zero, disable HW copy
2520 cmp %o2
, %o3
! if length
<= limit
2521 bleu
,pt
%ncc
, .xcopyout_small ! go to small copy
2523 ba,pt
%ncc
, .xcopyout_more ! otherwise go to large copy
2526 sethi
%hi
(hw_copy_limit_8
), %o3
! Check copy limit
2527 ld [%o3
+ %lo
(hw_copy_limit_8
)], %o3
2529 bz
,pn
%icc
, .xcopyout_small ! if zero, disable HW copy
2530 cmp %o2
, %o3
! if length
<= limit
2531 bleu
,pt
%ncc
, .xcopyout_small ! go to small copy
2533 ba,pt
%ncc
, .xcopyout_more ! otherwise go to large copy
2537 sethi
%hi
(.sm_xcopyout_err), %o5 ! .sm_xcopyout_err is lofault
2538 or %o5
, %lo
(.sm_xcopyout_err), %o5
2539 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
! save existing handler
2540 membar
#Sync ! sync error barrier
2541 ba,pt
%ncc
, .sm_do_copyout ! common code
2542 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! set t_lofault
2545 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
2546 sethi
%hi
(.xcopyout_err), REAL_LOFAULT
2547 ba,pt
%ncc
, .do_copyout ! common code
2548 or REAL_LOFAULT
, %lo
(.xcopyout_err), REAL_LOFAULT
2551 * We got here because of fault during xcopyout
2552 * Errno value is in ERRNO
2555 ldn
[THREAD_REG
+ T_COPYOPS
], %o4
! check for copyop handler
2557 bz
,pt
%ncc
, 2f
! if
not, return error
2559 ldn
[%o4
+ CP_XCOPYOUT
], %g2
! if handler
, invoke it with
2560 jmp
%g2
! original arguments
2561 restore
%g0
, 0, %g0
! dispose of copy window
2564 restore ERRNO
, 0, %o0
! return errno value
2569 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2570 mov SM_SAVE_SRC
, %o0
2571 mov SM_SAVE_DST
, %o1
2572 mov SM_SAVE_COUNT
, %o2
2573 ldn
[THREAD_REG
+ T_COPYOPS
], %o3
! check for copyop handler
2575 bz
,pt
%ncc
, 3f
! if
not, return error
2577 ldn
[%o3
+ CP_XCOPYOUT
], %o5
! if handler
, invoke it with
2578 jmp
%o5
! original arguments
2582 or %g1
, 0, %o0
! return errno value
2592 xcopyout_little
(const void
*kaddr
, void
*uaddr
, size_t count
)
2597 ENTRY
(xcopyout_little
)
2598 sethi
%hi
(.xcopyio_err), %o5
2599 or %o5
, %lo
(.xcopyio_err), %o5
2600 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
2601 membar
#Sync ! sync error barrier
2602 stn
%o5
, [THREAD_REG
+ T_LOFAULT
]
2607 bz
,pn
%ncc
, 2f
! check for zero bytes
2609 add %o0
, %o4
, %o0
! start w
/last byte
2611 ldub
[%o0
+ %o3
], %o4
2613 1: stba
%o4
, [%o1
+ %o3
]ASI_AIUSL
2615 sub %o0
, 2, %o0
! get next byte
2617 ldub
[%o0
+ %o3
], %o4
2620 membar
#Sync ! sync error barrier
2621 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2623 mov
%g0
, %o0
! return
(0)
2625 SET_SIZE
(xcopyout_little
)
2630 * Copy user data to kernel space (copyin/xcopyin/xcopyin_little)
2637 copyin
(const void
*uaddr
, void
*kaddr
, size_t count
)
2643 cmp %o2
, VIS_COPY_THRESHOLD
! check for leaf rtn case
2644 bleu
,pt
%ncc
, .copyin_small ! go to larger cases
2645 xor %o0
, %o1
, %o3
! are src
, dst alignable?
2647 bz
,pt
%ncc
, .copyin_8 ! check for longword alignment
2650 bz
,pt
%ncc
, .copyin_2 ! check for half-word
2652 sethi
%hi
(hw_copy_limit_1
), %o3
! Check copy limit
2653 ld [%o3
+ %lo
(hw_copy_limit_1
)], %o3
2655 bz
,pn
%icc
, .copyin_small ! if zero, disable HW copy
2656 cmp %o2
, %o3
! if length
<= limit
2657 bleu
,pt
%ncc
, .copyin_small ! go to small copy
2659 ba,pt
%ncc
, .copyin_more ! otherwise go to large copy
2663 bz
,pt
%ncc
, .copyin_4 ! check for word alignment
2665 sethi
%hi
(hw_copy_limit_2
), %o3
! Check copy limit
2666 ld [%o3
+ %lo
(hw_copy_limit_2
)], %o3
2668 bz
,pn
%icc
, .copyin_small ! if zero, disable HW copy
2669 cmp %o2
, %o3
! if length
<= limit
2670 bleu
,pt
%ncc
, .copyin_small ! go to small copy
2672 ba,pt
%ncc
, .copyin_more ! otherwise go to large copy
2675 ! already checked longword
, must
be word aligned
2676 sethi
%hi
(hw_copy_limit_4
), %o3
! Check copy limit
2677 ld [%o3
+ %lo
(hw_copy_limit_4
)], %o3
2679 bz
,pn
%icc
, .copyin_small ! if zero, disable HW copy
2680 cmp %o2
, %o3
! if length
<= limit
2681 bleu
,pt
%ncc
, .copyin_small ! go to small copy
2683 ba,pt
%ncc
, .copyin_more ! otherwise go to large copy
2686 sethi
%hi
(hw_copy_limit_8
), %o3
! Check copy limit
2687 ld [%o3
+ %lo
(hw_copy_limit_8
)], %o3
2689 bz
,pn
%icc
, .copyin_small ! if zero, disable HW copy
2690 cmp %o2
, %o3
! if length
<= limit
2691 bleu
,pt
%ncc
, .copyin_small ! go to small copy
2693 ba,pt
%ncc
, .copyin_more ! otherwise go to large copy
2697 nop ! instruction alignment
2698 ! see discussion at start of file
2700 sethi
%hi
(.sm_copyin_err), %o5 ! .sm_copyin_err is lofault
2701 or %o5
, %lo
(.sm_copyin_err), %o5
2702 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
! set
/save t_lofault
, no tramp
2703 membar
#Sync ! sync error barrier
2704 stn
%o5
, [THREAD_REG
+ T_LOFAULT
]
2706 mov
%o0
, SM_SAVE_SRC
2707 mov
%o1
, SM_SAVE_DST
2708 cmp %o2
, SHORTCOPY
! check for really short case
2709 bleu
,pt
%ncc
, .ci_sm_left !
2710 mov
%o2
, SM_SAVE_COUNT
2711 cmp %o2
, CHKSIZE
! check for medium length cases
2712 bgu
,pn
%ncc
, .ci_med !
2713 or %o0
, %o1
, %o3
! prepare alignment check
2714 andcc
%o3
, 0x3, %g0
! test for alignment
2715 bz
,pt
%ncc
, .ci_sm_word ! branch to word aligned case
2717 sub %o2
, 3, %o2
! adjust count to allow cc zero test
2719 lduba
[%o0
]ASI_USER
, %o3
! read byte
2720 subcc
%o2
, 4, %o2
! reduce count by
4
2721 stb %o3
, [%o1
] ! write byte
2722 add %o0
, 1, %o0
! advance SRC by
1
2723 lduba
[%o0
]ASI_USER
, %o3
! repeat for
a total of
4 bytes
2724 add %o0
, 1, %o0
! advance SRC by
1
2726 add %o1
, 4, %o1
! advance
DST by
4
2727 lduba
[%o0
]ASI_USER
, %o3
2728 add %o0
, 1, %o0
! advance SRC by
1
2730 lduba
[%o0
]ASI_USER
, %o3
2731 add %o0
, 1, %o0
! advance SRC by
1
2732 bgt,pt
%ncc
, .ci_sm_notalign4 ! loop til 3 or fewer bytes remain
2734 add %o2
, 3, %o2
! restore count
2737 bz
,pt
%ncc
, .ci_sm_exit
2739 lduba
[%o0
]ASI_USER
, %o3
! load one byte
2740 deccc
%o2
! reduce count for cc test
2741 bz
,pt
%ncc
, .ci_sm_exit
2742 stb %o3
,[%o1
] ! store one byte
2744 lduba
[%o0
]ASI_USER
, %o3
! load second byte
2746 bz
,pt
%ncc
, .ci_sm_exit
2747 stb %o3
,[%o1
+ 1] ! store second byte
2749 lduba
[%o0
]ASI_USER
, %o3
! load third byte
2750 stb %o3
,[%o1
+ 2] ! store third byte
2751 membar
#Sync ! sync error barrier
2752 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2754 mov
%g0
, %o0
! return
0
2757 lduwa
[%o0
]ASI_USER
, %o3
! read word
2759 subcc
%o2
, 8, %o2
! update count
2760 stw %o3
, [%o1
] ! write word
2761 add %o0
, 4, %o0
! update SRC
2762 add %o1
, 8, %o1
! update
DST
2763 lduwa
[%o0
]ASI_USER
, %o3
! read word
2764 add %o0
, 4, %o0
! update SRC
2765 bgt,pt
%ncc
, .ci_sm_words ! loop til done
2766 stw %o3
, [%o1
- 4] ! write word
2767 addcc
%o2
, 7, %o2
! restore count
2768 bz
,pt
%ncc
, .ci_sm_exit
2771 bz
,pt
%ncc
, .ci_sm_byte
2773 subcc
%o2
, 2, %o2
! reduce count by
2
2774 lduha
[%o0
]ASI_USER
, %o3
! read half word
2775 add %o0
, 2, %o0
! advance SRC by
2
2776 add %o1
, 2, %o1
! advance
DST by
2
2777 bgt,pt
%ncc
, .ci_sm_half ! loop til done
2778 sth %o3
, [%o1
- 2] ! write half word
2779 addcc
%o2
, 1, %o2
! restore count
2780 bz
,pt
%ncc
, .ci_sm_exit
2783 lduba
[%o0
]ASI_USER
, %o3
2785 membar
#Sync ! sync error barrier
2786 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2788 mov
%g0
, %o0
! return
0
2791 subcc
%o2
, 4, %o2
! update count
2792 bgt,pt
%ncc
, .ci_sm_wordx
2793 lduwa
[%o0
]ASI_USER
, %o3
! read word
2794 addcc
%o2
, 3, %o2
! restore count
2795 bz
,pt
%ncc
, .ci_sm_exit
2796 stw %o3
, [%o1
] ! write word
2797 deccc
%o2
! reduce count for cc test
2799 lduba
[%o0
]ASI_USER
, %o3
! load one byte
2800 bz
,pt
%ncc
, .ci_sm_exit
2801 stb %o3
, [%o1
+ 4] ! store one byte
2803 lduba
[%o0
]ASI_USER
, %o3
! load second byte
2805 bz
,pt
%ncc
, .ci_sm_exit
2806 stb %o3
, [%o1
+ 5] ! store second byte
2808 lduba
[%o0
]ASI_USER
, %o3
! load third byte
2809 stb %o3
, [%o1
+ 6] ! store third byte
2811 membar
#Sync ! sync error barrier
2812 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2814 mov
%g0
, %o0
! return
0
2818 xor %o0
, %o1
, %o3
! setup alignment check
2820 bnz
,pt
%ncc
, .ci_sm_movebytes ! unaligned
2823 bnz
,pt
%ncc
, .ci_med_half ! halfword aligned
2826 bnz
,pt
%ncc
, .ci_med_word ! word aligned
2829 btst
3, %o0
! check for
2830 bz
,pt
%ncc
, .ci_med_long1 ! word alignment
2833 lduba
[%o0
]ASI_USER
, %o3
! load one byte
2835 stb %o3
,[%o1
] ! store byte
2838 bnz
,pt
%ncc
, .ci_med_long0
2840 .ci_med_long1: ! word aligned
2841 btst
7, %o0
! check for long word
2842 bz
,pt
%ncc
, .ci_med_long2
2844 lduwa
[%o0
]ASI_USER
, %o3
! load word
2845 add %o0
, 4, %o0
! advance SRC by
4
2846 stw %o3
, [%o1
] ! store word
2847 add %o1
, 4, %o1
! advance
DST by
4
2848 sub %o2
, 4, %o2
! reduce count by
4
2850 ! Now long word aligned
and have at least
32 bytes to move
2853 sub %o2
, 31, %o2
! adjust count to allow cc zero test
2855 ldxa
[%o0
]ASI_USER
, %o3
! read long word
2856 subcc
%o2
, 32, %o2
! reduce count by
32
2857 stx %o3
, [%o1
] ! write long word
2858 add %o0
, 8, %o0
! advance SRC by
8
2859 ldxa
[%o0
]ASI_USER
, %o3
! repeat for
a total for
4 long words
2860 add %o0
, 8, %o0
! advance SRC by
8
2862 add %o1
, 32, %o1
! advance
DST by
32
2863 ldxa
[%o0
]ASI_USER
, %o3
2864 add %o0
, 8, %o0
! advance SRC by
8
2866 ldxa
[%o0
]ASI_USER
, %o3
2867 add %o0
, 8, %o0
! advance SRC by
8
2868 bgt,pt
%ncc
, .ci_med_lmove ! loop til 31 or fewer bytes left
2870 addcc
%o2
, 24, %o2
! restore count to long word offset
2871 ble,pt
%ncc
, .ci_med_lextra ! check for more long words to move
2874 ldxa
[%o0
]ASI_USER
, %o3
! read long word
2875 subcc
%o2
, 8, %o2
! reduce count by
8
2876 stx %o3
, [%o1
] ! write long word
2877 add %o0
, 8, %o0
! advance SRC by
8
2878 bgt,pt
%ncc
, .ci_med_lword ! loop til 7 or fewer bytes left
2879 add %o1
, 8, %o1
! advance
DST by
8
2881 addcc
%o2
, 7, %o2
! restore rest of count
2882 bz
,pt
%ncc
, .ci_sm_exit ! if zero, then done
2884 bz
,pt
%ncc
, .ci_sm_byte
2886 ba,pt
%ncc
, .ci_sm_half
2890 nop ! instruction alignment
2891 ! see discussion at start of file
2893 btst
3, %o0
! check for
2894 bz
,pt
%ncc
, .ci_med_word1 ! word alignment
2897 lduba
[%o0
]ASI_USER
, %o3
! load one byte
2899 stb %o3
,[%o1
] ! store byte
2902 bnz
,pt
%ncc
, .ci_med_word0
2905 ! Now word aligned
and have at least
36 bytes to move
2908 sub %o2
, 15, %o2
! adjust count to allow cc zero test
2910 lduwa
[%o0
]ASI_USER
, %o3
! read word
2911 subcc
%o2
, 16, %o2
! reduce count by
16
2912 stw %o3
, [%o1
] ! write word
2913 add %o0
, 4, %o0
! advance SRC by
4
2914 lduwa
[%o0
]ASI_USER
, %o3
! repeat for
a total for
4 words
2915 add %o0
, 4, %o0
! advance SRC by
4
2917 add %o1
, 16, %o1
! advance
DST by
16
2918 lduwa
[%o0
]ASI_USER
, %o3
2919 add %o0
, 4, %o0
! advance SRC by
4
2921 lduwa
[%o0
]ASI_USER
, %o3
2922 add %o0
, 4, %o0
! advance SRC by
4
2923 bgt,pt
%ncc
, .ci_med_wmove ! loop til 15 or fewer bytes left
2925 addcc
%o2
, 12, %o2
! restore count to word offset
2926 ble,pt
%ncc
, .ci_med_wextra ! check for more words to move
2929 lduwa
[%o0
]ASI_USER
, %o3
! read word
2930 subcc
%o2
, 4, %o2
! reduce count by
4
2931 stw %o3
, [%o1
] ! write word
2932 add %o0
, 4, %o0
! advance SRC by
4
2933 bgt,pt
%ncc
, .ci_med_word2 ! loop til 3 or fewer bytes left
2934 add %o1
, 4, %o1
! advance
DST by
4
2936 addcc
%o2
, 3, %o2
! restore rest of count
2937 bz
,pt
%ncc
, .ci_sm_exit ! if zero, then done
2939 bz
,pt
%ncc
, .ci_sm_byte
2941 ba,pt
%ncc
, .ci_sm_half
2945 nop ! instruction alignment
2946 ! see discussion at start of file
2948 btst
1, %o0
! check for
2949 bz
,pt
%ncc
, .ci_med_half1 ! half word alignment
2951 lduba
[%o0
]ASI_USER
, %o3
! load one byte
2953 stb %o3
,[%o1
] ! store byte
2957 ! Now half word aligned
and have at least
38 bytes to move
2960 sub %o2
, 7, %o2
! adjust count to allow cc zero test
2962 lduha
[%o0
]ASI_USER
, %o3
! read half word
2963 subcc
%o2
, 8, %o2
! reduce count by
8
2964 sth %o3
, [%o1
] ! write half word
2965 add %o0
, 2, %o0
! advance SRC by
2
2966 lduha
[%o0
]ASI_USER
, %o3
! repeat for
a total for
4 halfwords
2967 add %o0
, 2, %o0
! advance SRC by
2
2969 add %o1
, 8, %o1
! advance
DST by
8
2970 lduha
[%o0
]ASI_USER
, %o3
2971 add %o0
, 2, %o0
! advance SRC by
2
2973 lduha
[%o0
]ASI_USER
, %o3
2974 add %o0
, 2, %o0
! advance SRC by
2
2975 bgt,pt
%ncc
, .ci_med_hmove ! loop til 7 or fewer bytes left
2977 addcc
%o2
, 7, %o2
! restore count
2978 bz
,pt
%ncc
, .ci_sm_exit
2980 bz
,pt
%ncc
, .ci_sm_byte
2982 ba,pt
%ncc
, .ci_sm_half
2987 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
2988 mov SM_SAVE_SRC
, %o0
2989 mov SM_SAVE_DST
, %o1
2990 mov SM_SAVE_COUNT
, %o2
2991 ldn
[THREAD_REG
+ T_COPYOPS
], %o3
! check for copyop handler
2993 bz
,pt
%ncc
, 3f
! if
not, return error
2995 ldn
[%o3
+ CP_COPYIN
], %o5
! if handler
, invoke it with
2996 jmp
%o5
! original arguments
3000 or %g0
, -1, %o0
! return errno value
3006 * The _more entry points are not intended to be used directly by
3007 * any caller from outside this file. They are provided to allow
3008 * profiling and dtrace of the portions of the copy code that uses
3009 * the floating point registers.
3010 * This entry is particularly important as DTRACE (at least as of
3011 * 4/2004) does not support leaf functions.
3016 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
3017 set
.copyin_err, REAL_LOFAULT
3020 * Copy ins that reach here are larger than VIS_COPY_THRESHOLD bytes
3023 set copyio_fault
, %l7
! .copyio_fault is lofault val
3025 ldn
[THREAD_REG
+ T_LOFAULT
], %l6
! save existing handler
3026 membar
#Sync ! sync error barrier
3027 stn
%l7
, [THREAD_REG
+ T_LOFAULT
] ! set t_lofault
3035 rd
%fprs
, %o2
! check for unused fp
3036 st %o2
, [%fp
+ STACK_BIAS
- SAVED_FPRS_OFFSET
] ! save orig
%fprs
3038 bz
,a,pt
%icc
, .do_blockcopyin
3039 wr
%g0
, FPRS_FEF
, %fprs
3041 BST_FPQ2Q4_TOSTACK
(%o2
)
3045 stx %o2
, [%fp
+ STACK_BIAS
- SAVED_GSR_OFFSET
] ! save gsr
3046 or %l6
, FPUSED_FLAG
, %l6
3048 andcc
DST, VIS_BLOCKSIZE
- 1, TMP
3052 add TMP
, VIS_BLOCKSIZE
, TMP
3054 ! TMP
= bytes required to align
DST on FP_BLOCK boundary
3055 ! Using SRC as
a tmp here
3058 sub CNT
,TMP
,CNT
! adjust main count
3059 sub TMP
, 3, TMP
! adjust for end of loop test
3061 lduba
[REALSRC
]%asi
, SRC
! move
4 bytes per loop iteration
3064 lduba
[REALSRC
+ 1]%asi
, SRC
3065 add REALSRC
, 4, REALSRC
3067 lduba
[REALSRC
- 2]%asi
, SRC
3070 lduba
[REALSRC
- 1]%asi
, SRC
3071 bgu
,pt
%ncc
, .ci_blkalign
3074 addcc TMP
, 3, TMP
! restore count adjustment
3075 bz
,pt
%ncc
, 2f
! no bytes left?
3077 1: lduba
[REALSRC
]%asi
, SRC
3085 andn REALSRC
, 0x7, SRC
3086 alignaddr REALSRC
, %g0
, %g0
3088 ! SRC
- 8-byte aligned
3089 ! DST - 64-byte aligned
3090 prefetcha
[SRC
]%asi
, #one_read
3091 prefetcha
[SRC
+ (1 * VIS_BLOCKSIZE
)]%asi
, #one_read
3092 prefetcha
[SRC
+ (2 * VIS_BLOCKSIZE
)]%asi
, #one_read
3093 prefetcha
[SRC
+ (3 * VIS_BLOCKSIZE
)]%asi
, #one_read
3094 ldda
[SRC
]%asi
, %f16
3095 #if CHEETAH_PREFETCH > 4
3096 prefetcha
[SRC
+ (4 * VIS_BLOCKSIZE
)]%asi
, #one_read
3098 ldda
[SRC
+ 0x08]%asi
, %f18
3099 #if CHEETAH_PREFETCH > 5
3100 prefetcha
[SRC
+ (5 * VIS_BLOCKSIZE
)]%asi
, #one_read
3102 ldda
[SRC
+ 0x10]%asi
, %f20
3103 #if CHEETAH_PREFETCH > 6
3104 prefetcha
[SRC
+ (6 * VIS_BLOCKSIZE
)]%asi
, #one_read
3106 faligndata
%f16, %f18, %f48
3107 ldda
[SRC
+ 0x18]%asi
, %f22
3108 #if CHEETAH_PREFETCH > 7
3109 prefetcha
[SRC
+ (7 * VIS_BLOCKSIZE
)]%asi
, #one_read
3111 faligndata
%f18, %f20, %f50
3112 ldda
[SRC
+ 0x20]%asi
, %f24
3113 faligndata
%f20, %f22, %f52
3114 ldda
[SRC
+ 0x28]%asi
, %f26
3115 faligndata
%f22, %f24, %f54
3116 ldda
[SRC
+ 0x30]%asi
, %f28
3117 faligndata
%f24, %f26, %f56
3118 ldda
[SRC
+ 0x38]%asi
, %f30
3119 faligndata
%f26, %f28, %f58
3120 ldda
[SRC
+ VIS_BLOCKSIZE
]%asi
, %f16
3121 sub CNT
, VIS_BLOCKSIZE
, CNT
3122 add SRC
, VIS_BLOCKSIZE
, SRC
3123 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
3128 ldda
[SRC
+ 0x08]%asi
, %f18
3129 faligndata
%f28, %f30, %f60
3130 ldda
[SRC
+ 0x10]%asi
, %f20
3131 faligndata
%f30, %f16, %f62
3132 stda
%f48
, [DST]ASI_BLK_P
3133 ldda
[SRC
+ 0x18]%asi
, %f22
3134 faligndata
%f16, %f18, %f48
3135 ldda
[SRC
+ 0x20]%asi
, %f24
3136 faligndata
%f18, %f20, %f50
3137 ldda
[SRC
+ 0x28]%asi
, %f26
3138 faligndata
%f20, %f22, %f52
3139 ldda
[SRC
+ 0x30]%asi
, %f28
3140 faligndata
%f22, %f24, %f54
3141 ldda
[SRC
+ 0x38]%asi
, %f30
3142 faligndata
%f24, %f26, %f56
3143 sub CNT
, VIS_BLOCKSIZE
, CNT
3144 ldda
[SRC
+ VIS_BLOCKSIZE
]%asi
, %f16
3145 faligndata
%f26, %f28, %f58
3146 prefetcha
[SRC
+ ((CHEETAH_PREFETCH
) * VIS_BLOCKSIZE
) + 8]%asi
, #one_read
3147 add DST, VIS_BLOCKSIZE
, DST
3148 prefetcha
[SRC
+ ((CHEETAH_2ND_PREFETCH
) * VIS_BLOCKSIZE
)]%asi
, #one_read
3149 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
3150 cmp CNT
, VIS_BLOCKSIZE
+ 8
3152 add SRC
, VIS_BLOCKSIZE
, SRC
3154 ! only if REALSRC
& 0x7 is
0
3155 cmp CNT
, VIS_BLOCKSIZE
3157 andcc REALSRC
, 0x7, %g0
3161 faligndata
%f28, %f30, %f60
3162 faligndata
%f30, %f16, %f62
3163 stda
%f48
, [DST]ASI_BLK_P
3164 add DST, VIS_BLOCKSIZE
, DST
3168 ldda
[SRC
+ 0x08]%asi
, %f18
3170 ldda
[SRC
+ 0x10]%asi
, %f20
3172 stda
%f48
, [DST]ASI_BLK_P
3173 ldda
[SRC
+ 0x18]%asi
, %f22
3175 ldda
[SRC
+ 0x20]%asi
, %f24
3177 ldda
[SRC
+ 0x28]%asi
, %f26
3179 ldda
[SRC
+ 0x30]%asi
, %f28
3181 ldda
[SRC
+ 0x38]%asi
, %f30
3183 sub CNT
, VIS_BLOCKSIZE
, CNT
3184 add DST, VIS_BLOCKSIZE
, DST
3185 add SRC
, VIS_BLOCKSIZE
, SRC
3186 add REALSRC
, VIS_BLOCKSIZE
, REALSRC
3190 stda
%f48
, [DST]ASI_BLK_P
3191 add DST, VIS_BLOCKSIZE
, DST
3199 5: lduba
[REALSRC
]ASI_USER
, TMP
3210 FPRAS_INTERVAL
(FPRAS_COPYIN
, 1, %l5
, %o2
, %o3
, %o4
, %o5
, 8)
3211 FPRAS_REWRITE_TYPE1
(1, %l5
, %f48
, %o2
, 9)
3212 FPRAS_CHECK
(FPRAS_COPYIN
, %l5
, 9) ! lose outputs
3214 ldx [%fp
+ STACK_BIAS
- SAVED_GSR_OFFSET
], %o2
! restore gsr
3217 ld [%fp
+ STACK_BIAS
- SAVED_FPRS_OFFSET
], %o3
3222 BLD_FPQ2Q4_FROMSTACK
(%o2
)
3225 wr
%o3
, 0, %fprs
! restore fprs
3229 wr
%o3
, 0, %fprs
! restore fprs
3232 membar
#Sync ! sync error barrier
3233 andn
%l6
, FPUSED_FLAG
, %l6
3234 stn
%l6
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
3235 FP_ALLOWMIGRATE
(5, 6)
3239 * We got here because of a fault during copyin
3240 * Errno value is in ERRNO, but DDI/DKI says return -1 (sigh).
3243 ldn
[THREAD_REG
+ T_COPYOPS
], %o4
! check for copyop handler
3245 bz
,pt
%ncc
, 2f
! if
not, return error
3247 ldn
[%o4
+ CP_COPYIN
], %g2
! if handler
, invoke it with
3248 jmp
%g2
! original arguments
3249 restore
%g0
, 0, %g0
! dispose of copy window
3252 restore
%g0
, -1, %o0
! return error value
3255 SET_SIZE
(copyin_more
)
3263 xcopyin
(const void
*uaddr
, void
*kaddr
, size_t count
)
3270 cmp %o2
, VIS_COPY_THRESHOLD
! check for leaf rtn case
3271 bleu
,pt
%ncc
, .xcopyin_small ! go to larger cases
3272 xor %o0
, %o1
, %o3
! are src
, dst alignable?
3274 bz
,pt
%ncc
, .xcopyin_8 ! check for longword alignment
3277 bz
,pt
%ncc
, .xcopyin_2 ! check for half-word
3279 sethi
%hi
(hw_copy_limit_1
), %o3
! Check copy limit
3280 ld [%o3
+ %lo
(hw_copy_limit_1
)], %o3
3282 bz
,pn
%icc
, .xcopyin_small ! if zero, disable HW copy
3283 cmp %o2
, %o3
! if length
<= limit
3284 bleu
,pt
%ncc
, .xcopyin_small ! go to small copy
3286 ba,pt
%ncc
, .xcopyin_more ! otherwise go to large copy
3290 bz
,pt
%ncc
, .xcopyin_4 ! check for word alignment
3292 sethi
%hi
(hw_copy_limit_2
), %o3
! Check copy limit
3293 ld [%o3
+ %lo
(hw_copy_limit_2
)], %o3
3295 bz
,pn
%icc
, .xcopyin_small ! if zero, disable HW copy
3296 cmp %o2
, %o3
! if length
<= limit
3297 bleu
,pt
%ncc
, .xcopyin_small ! go to small copy
3299 ba,pt
%ncc
, .xcopyin_more ! otherwise go to large copy
3302 ! already checked longword
, must
be word aligned
3303 sethi
%hi
(hw_copy_limit_4
), %o3
! Check copy limit
3304 ld [%o3
+ %lo
(hw_copy_limit_4
)], %o3
3306 bz
,pn
%icc
, .xcopyin_small ! if zero, disable HW copy
3307 cmp %o2
, %o3
! if length
<= limit
3308 bleu
,pt
%ncc
, .xcopyin_small ! go to small copy
3310 ba,pt
%ncc
, .xcopyin_more ! otherwise go to large copy
3313 sethi
%hi
(hw_copy_limit_8
), %o3
! Check copy limit
3314 ld [%o3
+ %lo
(hw_copy_limit_8
)], %o3
3316 bz
,pn
%icc
, .xcopyin_small ! if zero, disable HW copy
3317 cmp %o2
, %o3
! if length
<= limit
3318 bleu
,pt
%ncc
, .xcopyin_small ! go to small copy
3320 ba,pt
%ncc
, .xcopyin_more ! otherwise go to large copy
3324 sethi
%hi
(.sm_xcopyin_err), %o5 ! .sm_xcopyin_err is lofault value
3325 or %o5
, %lo
(.sm_xcopyin_err), %o5
3326 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
! set
/save t_lofaul
3327 membar
#Sync ! sync error barrier
3328 ba,pt
%ncc
, .sm_do_copyin ! common code
3329 stn
%o5
, [THREAD_REG
+ T_LOFAULT
]
3332 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
3333 sethi
%hi
(.xcopyin_err), REAL_LOFAULT ! .xcopyin_err is lofault value
3334 ba,pt
%ncc
, .do_copyin
3335 or REAL_LOFAULT
, %lo
(.xcopyin_err), REAL_LOFAULT
3338 * We got here because of fault during xcopyin
3339 * Errno value is in ERRNO
3342 ldn
[THREAD_REG
+ T_COPYOPS
], %o4
! check for copyop handler
3344 bz
,pt
%ncc
, 2f
! if
not, return error
3346 ldn
[%o4
+ CP_XCOPYIN
], %g2
! if handler
, invoke it with
3347 jmp
%g2
! original arguments
3348 restore
%g0
, 0, %g0
! dispose of copy window
3351 restore ERRNO
, 0, %o0
! return errno value
3356 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
3357 mov SM_SAVE_SRC
, %o0
3358 mov SM_SAVE_DST
, %o1
3359 mov SM_SAVE_COUNT
, %o2
3360 ldn
[THREAD_REG
+ T_COPYOPS
], %o3
! check for copyop handler
3362 bz
,pt
%ncc
, 3f
! if
not, return error
3364 ldn
[%o3
+ CP_XCOPYIN
], %o5
! if handler
, invoke it with
3365 jmp
%o5
! original arguments
3369 or %g1
, 0, %o0
! return errno value
3379 xcopyin_little
(const void
*uaddr
, void
*kaddr
, size_t count
)
3384 ENTRY
(xcopyin_little
)
3385 sethi
%hi
(.xcopyio_err), %o5
3386 or %o5
, %lo
(.xcopyio_err), %o5
3387 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
3388 membar
#Sync ! sync error barrier
3389 stn
%o5
, [THREAD_REG
+ T_LOFAULT
]
3394 bz
,pn
%ncc
, 2f
! check for zero bytes
3396 add %o0
, %o4
, %o0
! start w
/last byte
3398 lduba
[%o0
+ %o3
]ASI_AIUSL
, %o4
3400 1: stb %o4
, [%o1
+ %o3
]
3402 sub %o0
, 2, %o0
! get next byte
3404 lduba
[%o0
+ %o3
]ASI_AIUSL
, %o4
3407 membar
#Sync ! sync error barrier
3408 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
3410 mov
%g0
, %o0
! return
(0)
3413 membar
#Sync ! sync error barrier
3414 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! restore old t_lofault
3418 SET_SIZE
(xcopyin_little
)
3424 * Copy a block of storage - must not overlap (from + len <= to).
3425 * No fault handler installed (to be called under on_fault())
3431 copyin_noerr
(const void
*ufrom
, void
*kto
, size_t count
)
3437 cmp %o2
, VIS_COPY_THRESHOLD
! check for leaf rtn case
3438 bleu
,pt
%ncc
, .copyin_ne_small ! go to larger cases
3439 xor %o0
, %o1
, %o3
! are src
, dst alignable?
3441 bz
,pt
%ncc
, .copyin_ne_8 ! check for longword alignment
3444 bz
,pt
%ncc
, .copyin_ne_2 ! check for half-word
3446 sethi
%hi
(hw_copy_limit_1
), %o3
! Check copy limit
3447 ld [%o3
+ %lo
(hw_copy_limit_1
)], %o3
3449 bz
,pn
%icc
, .copyin_ne_small ! if zero, disable HW copy
3450 cmp %o2
, %o3
! if length
<= limit
3451 bleu
,pt
%ncc
, .copyin_ne_small ! go to small copy
3453 ba,pt
%ncc
, .copyin_noerr_more ! otherwise go to large copy
3457 bz
,pt
%ncc
, .copyin_ne_4 ! check for word alignment
3459 sethi
%hi
(hw_copy_limit_2
), %o3
! Check copy limit
3460 ld [%o3
+ %lo
(hw_copy_limit_2
)], %o3
3462 bz
,pn
%icc
, .copyin_ne_small ! if zero, disable HW copy
3463 cmp %o2
, %o3
! if length
<= limit
3464 bleu
,pt
%ncc
, .copyin_ne_small ! go to small copy
3466 ba,pt
%ncc
, .copyin_noerr_more ! otherwise go to large copy
3469 ! already checked longword
, must
be word aligned
3470 sethi
%hi
(hw_copy_limit_4
), %o3
! Check copy limit
3471 ld [%o3
+ %lo
(hw_copy_limit_4
)], %o3
3473 bz
,pn
%icc
, .copyin_ne_small ! if zero, disable HW copy
3474 cmp %o2
, %o3
! if length
<= limit
3475 bleu
,pt
%ncc
, .copyin_ne_small ! go to small copy
3477 ba,pt
%ncc
, .copyin_noerr_more ! otherwise go to large copy
3480 sethi
%hi
(hw_copy_limit_8
), %o3
! Check copy limit
3481 ld [%o3
+ %lo
(hw_copy_limit_8
)], %o3
3483 bz
,pn
%icc
, .copyin_ne_small ! if zero, disable HW copy
3484 cmp %o2
, %o3
! if length
<= limit
3485 bleu
,pt
%ncc
, .copyin_ne_small ! go to small copy
3487 ba,pt
%ncc
, .copyin_noerr_more ! otherwise go to large copy
3491 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
3493 bz
,pn
%ncc
, .sm_do_copyin
3495 sethi
%hi
(.sm_copyio_noerr), %o5
3496 or %o5
, %lo
(.sm_copyio_noerr), %o5
3497 membar
#Sync ! sync error barrier
3498 ba,pt
%ncc
, .sm_do_copyin
3499 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! set
/save t_lofault
3502 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
3503 sethi
%hi
(.copyio_noerr), REAL_LOFAULT
3504 ba,pt
%ncc
, .do_copyin
3505 or REAL_LOFAULT
, %lo
(.copyio_noerr), REAL_LOFAULT
3513 stn
%o4
, [THREAD_REG
+ T_LOFAULT
] ! restore t_lofault
3517 SET_SIZE
(copyin_noerr
)
3521 * Copy a block of storage - must not overlap (from + len <= to).
3522 * No fault handler installed (to be called under on_fault())
3529 copyout_noerr
(const void
*kfrom
, void
*uto
, size_t count
)
3533 ENTRY
(copyout_noerr
)
3535 cmp %o2
, VIS_COPY_THRESHOLD
! check for leaf rtn case
3536 bleu
,pt
%ncc
, .copyout_ne_small ! go to larger cases
3537 xor %o0
, %o1
, %o3
! are src
, dst alignable?
3539 bz
,pt
%ncc
, .copyout_ne_8 ! check for longword alignment
3542 bz
,pt
%ncc
, .copyout_ne_2 ! check for half-word
3544 sethi
%hi
(hw_copy_limit_1
), %o3
! Check copy limit
3545 ld [%o3
+ %lo
(hw_copy_limit_1
)], %o3
3547 bz
,pn
%icc
, .copyout_ne_small ! if zero, disable HW copy
3548 cmp %o2
, %o3
! if length
<= limit
3549 bleu
,pt
%ncc
, .copyout_ne_small ! go to small copy
3551 ba,pt
%ncc
, .copyout_noerr_more ! otherwise go to large copy
3555 bz
,pt
%ncc
, .copyout_ne_4 ! check for word alignment
3557 sethi
%hi
(hw_copy_limit_2
), %o3
! Check copy limit
3558 ld [%o3
+ %lo
(hw_copy_limit_2
)], %o3
3560 bz
,pn
%icc
, .copyout_ne_small ! if zero, disable HW copy
3561 cmp %o2
, %o3
! if length
<= limit
3562 bleu
,pt
%ncc
, .copyout_ne_small ! go to small copy
3564 ba,pt
%ncc
, .copyout_noerr_more ! otherwise go to large copy
3567 ! already checked longword
, must
be word aligned
3568 sethi
%hi
(hw_copy_limit_4
), %o3
! Check copy limit
3569 ld [%o3
+ %lo
(hw_copy_limit_4
)], %o3
3571 bz
,pn
%icc
, .copyout_ne_small ! if zero, disable HW copy
3572 cmp %o2
, %o3
! if length
<= limit
3573 bleu
,pt
%ncc
, .copyout_ne_small ! go to small copy
3575 ba,pt
%ncc
, .copyout_noerr_more ! otherwise go to large copy
3578 sethi
%hi
(hw_copy_limit_8
), %o3
! Check copy limit
3579 ld [%o3
+ %lo
(hw_copy_limit_8
)], %o3
3581 bz
,pn
%icc
, .copyout_ne_small ! if zero, disable HW copy
3582 cmp %o2
, %o3
! if length
<= limit
3583 bleu
,pt
%ncc
, .copyout_ne_small ! go to small copy
3585 ba,pt
%ncc
, .copyout_noerr_more ! otherwise go to large copy
3589 ldn
[THREAD_REG
+ T_LOFAULT
], %o4
3591 bz
,pn
%ncc
, .sm_do_copyout
3593 sethi
%hi
(.sm_copyio_noerr), %o5
3594 or %o5
, %lo
(.sm_copyio_noerr), %o5
3595 membar
#Sync ! sync error barrier
3596 ba,pt
%ncc
, .sm_do_copyout
3597 stn
%o5
, [THREAD_REG
+ T_LOFAULT
] ! set
/save t_lofault
3599 .copyout_noerr_more:
3600 save
%sp
, -SA
(MINFRAME
+ HWCOPYFRAMESIZE
), %sp
3601 sethi
%hi
(.copyio_noerr), REAL_LOFAULT
3602 ba,pt
%ncc
, .do_copyout
3603 or REAL_LOFAULT
, %lo
(.copyio_noerr), REAL_LOFAULT
3605 SET_SIZE
(copyout_noerr
)
3610 * hwblkclr - clears block-aligned, block-multiple-sized regions that are
3611 * longer than 256 bytes in length using spitfire's block stores. If
3612 * the criteria for using this routine are not met then it calls bzero
3613 * and returns 1. Otherwise 0 is returned indicating success.
3614 * Caller is responsible for ensuring use_hw_bzero is true and that
3615 * kpreempt_disable() has been called.
3620 hwblkclr
(void
*addr
, size_t len
)
3625 ! %i0
- start address
3626 ! %i1
- length of region
(multiple of
64)
3628 ! %l1
- pointer to saved
%d0 block
3629 ! %l2
- saved curthread-
>t_lwp
3632 ! get another window w
/space for one aligned block of saved fpregs
3633 save
%sp
, -SA
(MINFRAME
+ 2*VIS_BLOCKSIZE
), %sp
3635 ! Must
be block-aligned
3636 andcc
%i0
, (VIS_BLOCKSIZE-
1), %g0
3640 ! ... and must be 256 bytes or more
3645 ! ... and length must be a multiple of VIS_BLOCKSIZE
3646 andcc
%i1
, (VIS_BLOCKSIZE-
1), %g0
3650 1: ! punt
, call bzero but notify the caller that bzero was used
3655 restore
%g0
, 1, %o0
! return
(1) - did
not use block operations
3657 2: rd
%fprs
, %l0
! check for unused fp
3662 ! save in-use fpregs on stack
3664 add %fp
, STACK_BIAS
- 65, %l1
3665 and %l1
, -VIS_BLOCKSIZE
, %l1
3666 stda
%d0
, [%l1
]ASI_BLK_P
3668 1: membar
#StoreStore|#StoreLoad|#LoadStore
3669 wr
%g0
, FPRS_FEF
, %fprs
3670 wr
%g0
, ASI_BLK_P
, %asi
3683 ba,pt
%ncc
, .pz_doblock
3687 ! stda
%d0
, [%i0
+ 192]%asi
! in dly slot of branch that got us here
3688 stda
%d0
, [%i0
+ 128]%asi
3689 stda
%d0
, [%i0
+ 64]%asi
3696 bgeu
,a %ncc
, .pz_blkstart
3697 stda
%d0
, [%i0
+ 192]%asi
3700 blu
%ncc
, .pz_finish
3702 andn
%i1
, (64-1), %i3
3703 srl
%i3
, 4, %i2
! using blocks
, 1 instr
/ 16 words
3713 wr
%l0
, 0, %fprs
! restore fprs
3715 ! restore fpregs from stack
3716 ldda
[%l1
]ASI_BLK_P
, %d0
3718 wr
%l0
, 0, %fprs
! restore fprs
3722 restore
%g0
, 0, %o0
! return
(bzero
or not)
3730 hw_pa_bcopy32
(uint64_t src
, uint64_t
dst)
3734 * Copy 32 bytes of data from src (%o0) to dst (%o1)
3735 * using physical addresses.
3737 ENTRY_NP
(hw_pa_bcopy32
)
3739 andn
%g1
, PSTATE_IE
, %g2
3740 wrpr
%g0
, %g2
, %pstate
3743 ldxa
[%o0
]ASI_MEM
, %o2
3745 ldxa
[%o0
]ASI_MEM
, %o3
3747 ldxa
[%o0
]ASI_MEM
, %o4
3749 ldxa
[%o0
]ASI_MEM
, %o5
3751 stxa
%g0
, [%o1
]ASI_DC_INVAL
3754 stxa
%o2
, [%o1
]ASI_MEM
3756 stxa
%o3
, [%o1
]ASI_MEM
3758 stxa
%o4
, [%o1
]ASI_MEM
3760 stxa
%o5
, [%o1
]ASI_MEM
3763 wrpr
%g0
, %g1
, %pstate
3765 SET_SIZE
(hw_pa_bcopy32
)
3771 int use_hw_bcopy
= 1;
3772 int use_hw_bzero
= 1;
3773 uint_t hw_copy_limit_1
= 0;
3774 uint_t hw_copy_limit_2
= 0;
3775 uint_t hw_copy_limit_4
= 0;
3776 uint_t hw_copy_limit_8
= 0;
3784 DGDEF
(hw_copy_limit_1
)
3786 DGDEF
(hw_copy_limit_2
)
3788 DGDEF
(hw_copy_limit_4
)
3790 DGDEF
(hw_copy_limit_8
)