2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008-2017, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
12 #include <arch/debug.h>
18 #include <util/AutoLock.h>
20 #include <vm/VMAddressSpace.h>
22 #include "dma_resources.h"
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 # define TRACE(x...) dprintf(x)
29 # define TRACE(x...) ;
33 // partial I/O operation phases
41 struct virtual_vec_cookie
{
43 generic_size_t vec_offset
;
45 void* physical_page_handle
;
46 addr_t virtual_address
;
53 physical_page_handle(NULL
),
54 virtual_address((addr_t
)-1)
58 void PutPhysicalPageIfNeeded()
60 if (virtual_address
!= (addr_t
)-1) {
61 vm_put_physical_page(virtual_address
, physical_page_handle
);
62 virtual_address
= (addr_t
)-1;
71 IORequestChunk::IORequestChunk()
79 IORequestChunk::~IORequestChunk()
88 IOBuffer::Create(uint32 count
, bool vip
)
90 size_t size
= sizeof(IOBuffer
) + sizeof(generic_io_vec
) * (count
- 1);
92 = (IOBuffer
*)(malloc_etc(size
, vip
? HEAP_PRIORITY_VIP
: 0));
96 buffer
->fCapacity
= count
;
97 buffer
->fVecCount
= 0;
98 buffer
->fUser
= false;
99 buffer
->fPhysical
= false;
101 buffer
->fMemoryLocked
= false;
110 free_etc(this, fVIP
? HEAP_PRIORITY_VIP
: 0);
115 IOBuffer::SetVecs(generic_size_t firstVecOffset
, const generic_io_vec
* vecs
,
116 uint32 count
, generic_size_t length
, uint32 flags
)
118 memcpy(fVecs
, vecs
, sizeof(generic_io_vec
) * count
);
120 if (count
> 0 && firstVecOffset
> 0) {
121 fVecs
[0].base
+= firstVecOffset
;
122 fVecs
[0].length
-= firstVecOffset
;
127 fPhysical
= (flags
& B_PHYSICAL_IO_REQUEST
) != 0;
128 fUser
= !fPhysical
&& IS_USER_ADDRESS(vecs
[0].base
);
133 IOBuffer::GetNextVirtualVec(void*& _cookie
, iovec
& vector
)
135 virtual_vec_cookie
* cookie
= (virtual_vec_cookie
*)_cookie
;
136 if (cookie
== NULL
) {
137 cookie
= new(malloc_flags(fVIP
? HEAP_PRIORITY_VIP
: 0))
145 // recycle a potential previously mapped page
146 cookie
->PutPhysicalPageIfNeeded();
148 if (cookie
->vec_index
>= fVecCount
)
152 vector
.iov_base
= (void*)(addr_t
)fVecs
[cookie
->vec_index
].base
;
153 vector
.iov_len
= fVecs
[cookie
->vec_index
++].length
;
157 if (cookie
->vec_index
== 0
158 && (fVecCount
> 1 || fVecs
[0].length
> B_PAGE_SIZE
)) {
162 // TODO: This is a potential violation of the VIP requirement, since
163 // vm_map_physical_memory_vecs() allocates memory without special flags!
164 cookie
->mapped_area
= vm_map_physical_memory_vecs(
165 VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
166 &mappedAddress
, B_ANY_KERNEL_ADDRESS
, &mappedSize
,
167 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, fVecs
, fVecCount
);
169 if (cookie
->mapped_area
>= 0) {
170 vector
.iov_base
= mappedAddress
;
171 vector
.iov_len
= mappedSize
;
174 ktrace_printf("failed to map area: %s\n", strerror(cookie
->mapped_area
));
177 // fallback to page wise mapping
178 generic_io_vec
& currentVec
= fVecs
[cookie
->vec_index
];
179 generic_addr_t address
= currentVec
.base
+ cookie
->vec_offset
;
180 size_t pageOffset
= address
% B_PAGE_SIZE
;
182 // TODO: This is a potential violation of the VIP requirement, since
183 // vm_get_physical_page() may allocate memory without special flags!
184 status_t result
= vm_get_physical_page(address
- pageOffset
,
185 &cookie
->virtual_address
, &cookie
->physical_page_handle
);
189 generic_size_t length
= min_c(currentVec
.length
- cookie
->vec_offset
,
190 B_PAGE_SIZE
- pageOffset
);
192 vector
.iov_base
= (void*)(cookie
->virtual_address
+ pageOffset
);
193 vector
.iov_len
= length
;
195 cookie
->vec_offset
+= length
;
196 if (cookie
->vec_offset
>= currentVec
.length
) {
198 cookie
->vec_offset
= 0;
206 IOBuffer::FreeVirtualVecCookie(void* _cookie
)
208 virtual_vec_cookie
* cookie
= (virtual_vec_cookie
*)_cookie
;
209 if (cookie
->mapped_area
>= 0)
210 delete_area(cookie
->mapped_area
);
212 cookie
->PutPhysicalPageIfNeeded();
214 free_etc(cookie
, fVIP
? HEAP_PRIORITY_VIP
: 0);
219 IOBuffer::LockMemory(team_id team
, bool isWrite
)
222 panic("memory already locked!");
226 for (uint32 i
= 0; i
< fVecCount
; i
++) {
227 status_t status
= lock_memory_etc(team
, (void*)(addr_t
)fVecs
[i
].base
,
228 fVecs
[i
].length
, isWrite
? 0 : B_READ_DEVICE
);
229 if (status
!= B_OK
) {
230 _UnlockMemory(team
, i
, isWrite
);
235 fMemoryLocked
= true;
241 IOBuffer::_UnlockMemory(team_id team
, size_t count
, bool isWrite
)
243 for (uint32 i
= 0; i
< count
; i
++) {
244 unlock_memory_etc(team
, (void*)(addr_t
)fVecs
[i
].base
, fVecs
[i
].length
,
245 isWrite
? 0 : B_READ_DEVICE
);
251 IOBuffer::UnlockMemory(team_id team
, bool isWrite
)
253 if (!fMemoryLocked
) {
254 panic("memory not locked");
258 _UnlockMemory(team
, fVecCount
, isWrite
);
259 fMemoryLocked
= false;
264 IOBuffer::Dump() const
266 kprintf("IOBuffer at %p\n", this);
268 kprintf(" origin: %s\n", fUser
? "user" : "kernel");
269 kprintf(" kind: %s\n", fPhysical
? "physical" : "virtual");
270 kprintf(" length: %" B_PRIuGENADDR
"\n", fLength
);
271 kprintf(" capacity: %" B_PRIuSIZE
"\n", fCapacity
);
272 kprintf(" vecs: %" B_PRIuSIZE
"\n", fVecCount
);
274 for (uint32 i
= 0; i
< fVecCount
; i
++) {
275 kprintf(" [%" B_PRIu32
"] %#" B_PRIxGENADDR
", %" B_PRIuGENADDR
"\n",
276 i
, fVecs
[i
].base
, fVecs
[i
].length
);
285 IOOperation::Finish()
287 TRACE("IOOperation::Finish()\n");
288 if (fStatus
== B_OK
) {
289 if (fParent
->IsWrite()) {
290 TRACE(" is write\n");
291 if (fPhase
== PHASE_READ_BEGIN
) {
292 TRACE(" phase read begin\n");
293 // repair phase adjusted vec
294 fDMABuffer
->VecAt(fSavedVecIndex
).length
= fSavedVecLength
;
296 // partial write: copy partial begin to bounce buffer
297 bool skipReadEndPhase
;
298 status_t error
= _CopyPartialBegin(true, skipReadEndPhase
);
300 // We're done with the first phase only (read in begin).
301 // Get ready for next phase...
302 fPhase
= HasPartialEnd() && !skipReadEndPhase
303 ? PHASE_READ_END
: PHASE_DO_ALL
;
306 // TODO: Is there a race condition, if the request is
307 // aborted at the same time?
312 } else if (fPhase
== PHASE_READ_END
) {
313 TRACE(" phase read end\n");
314 // repair phase adjusted vec
315 generic_io_vec
& vec
= fDMABuffer
->VecAt(fSavedVecIndex
);
316 vec
.base
+= vec
.length
- fSavedVecLength
;
317 vec
.length
= fSavedVecLength
;
319 // partial write: copy partial end to bounce buffer
320 status_t error
= _CopyPartialEnd(true);
322 // We're done with the second phase only (read in end).
323 // Get ready for next phase...
324 fPhase
= PHASE_DO_ALL
;
326 // TODO: Is there a race condition, if the request is
327 // aborted at the same time?
336 if (fParent
->IsRead() && UsesBounceBuffer()) {
337 TRACE(" read with bounce buffer\n");
338 // copy the bounce buffer segments to the final location
339 uint8
* bounceBuffer
= (uint8
*)fDMABuffer
->BounceBufferAddress();
340 phys_addr_t bounceBufferStart
341 = fDMABuffer
->PhysicalBounceBufferAddress();
342 phys_addr_t bounceBufferEnd
= bounceBufferStart
343 + fDMABuffer
->BounceBufferSize();
345 const generic_io_vec
* vecs
= fDMABuffer
->Vecs();
346 uint32 vecCount
= fDMABuffer
->VecCount();
348 status_t error
= B_OK
;
350 // We iterate through the vecs we have read, moving offset (the device
351 // offset) as we go. If [offset, offset + vec.length) intersects with
352 // [startOffset, endOffset) we copy to the final location.
353 off_t offset
= fOffset
;
354 const off_t startOffset
= fOriginalOffset
;
355 const off_t endOffset
= fOriginalOffset
+ fOriginalLength
;
357 for (uint32 i
= 0; error
== B_OK
&& i
< vecCount
; i
++) {
358 const generic_io_vec
& vec
= vecs
[i
];
359 generic_addr_t base
= vec
.base
;
360 generic_size_t length
= vec
.length
;
362 if (offset
< startOffset
) {
363 // If the complete vector is before the start offset, skip it.
364 if (offset
+ (off_t
)length
<= startOffset
) {
369 // The vector starts before the start offset, but intersects
370 // with it. Skip the part we aren't interested in.
371 generic_size_t diff
= startOffset
- offset
;
377 if (offset
+ (off_t
)length
> endOffset
) {
378 // If we're already beyond the end offset, we're done.
379 if (offset
>= endOffset
)
382 // The vector extends beyond the end offset -- cut it.
383 length
= endOffset
- offset
;
386 if (base
>= bounceBufferStart
&& base
< bounceBufferEnd
) {
387 error
= fParent
->CopyData(
388 bounceBuffer
+ (base
- bounceBufferStart
), offset
, length
);
402 /*! Note: SetPartial() must be called first!
405 IOOperation::Prepare(IORequest
* request
)
408 fParent
->RemoveOperation(this);
412 fTransferredBytes
= 0;
415 fPhase
= PHASE_DO_ALL
;
416 if (fParent
->IsWrite()) {
417 // Copy data to bounce buffer segments, save the partial begin/end vec,
418 // which will be copied after their respective read phase.
419 if (UsesBounceBuffer()) {
420 TRACE(" write with bounce buffer\n");
421 uint8
* bounceBuffer
= (uint8
*)fDMABuffer
->BounceBufferAddress();
422 phys_addr_t bounceBufferStart
423 = fDMABuffer
->PhysicalBounceBufferAddress();
424 phys_addr_t bounceBufferEnd
= bounceBufferStart
425 + fDMABuffer
->BounceBufferSize();
427 const generic_io_vec
* vecs
= fDMABuffer
->Vecs();
428 uint32 vecCount
= fDMABuffer
->VecCount();
429 generic_size_t vecOffset
= 0;
432 off_t offset
= fOffset
;
433 off_t endOffset
= fOffset
+ fLength
;
435 if (HasPartialBegin()) {
437 generic_size_t toSkip
= fBlockSize
;
439 if (vecs
[i
].length
<= toSkip
) {
440 toSkip
-= vecs
[i
].length
;
448 offset
+= fBlockSize
;
451 if (HasPartialEnd()) {
453 generic_size_t toSkip
= fBlockSize
;
455 if (vecs
[vecCount
- 1].length
<= toSkip
) {
456 toSkip
-= vecs
[vecCount
- 1].length
;
462 endOffset
-= fBlockSize
;
465 for (; i
< vecCount
; i
++) {
466 const generic_io_vec
& vec
= vecs
[i
];
467 generic_addr_t base
= vec
.base
+ vecOffset
;
468 generic_size_t length
= vec
.length
- vecOffset
;
471 if (base
>= bounceBufferStart
&& base
< bounceBufferEnd
) {
472 if (offset
+ (off_t
)length
> endOffset
)
473 length
= endOffset
- offset
;
474 status_t error
= fParent
->CopyData(offset
,
475 bounceBuffer
+ (base
- bounceBufferStart
), length
);
484 if (HasPartialBegin())
485 fPhase
= PHASE_READ_BEGIN
;
486 else if (HasPartialEnd())
487 fPhase
= PHASE_READ_END
;
495 fParent
->AddOperation(this);
502 IOOperation::SetOriginalRange(off_t offset
, generic_size_t length
)
504 fOriginalOffset
= fOffset
= offset
;
505 fOriginalLength
= fLength
= length
;
510 IOOperation::SetRange(off_t offset
, generic_size_t length
)
518 IOOperation::Offset() const
520 return fPhase
== PHASE_READ_END
? fOffset
+ fLength
- fBlockSize
: fOffset
;
525 IOOperation::Length() const
527 return fPhase
== PHASE_DO_ALL
? fLength
: fBlockSize
;
532 IOOperation::Vecs() const
536 return fDMABuffer
->Vecs() + fSavedVecIndex
;
537 case PHASE_READ_BEGIN
:
540 return fDMABuffer
->Vecs();
546 IOOperation::VecCount() const
549 case PHASE_READ_BEGIN
:
550 return fSavedVecIndex
+ 1;
552 return fDMABuffer
->VecCount() - fSavedVecIndex
;
555 return fDMABuffer
->VecCount();
561 IOOperation::SetPartial(bool partialBegin
, bool partialEnd
)
563 TRACE("partial begin %d, end %d\n", partialBegin
, partialEnd
);
564 fPartialBegin
= partialBegin
;
565 fPartialEnd
= partialEnd
;
570 IOOperation::IsWrite() const
572 return fParent
->IsWrite() && fPhase
== PHASE_DO_ALL
;
577 IOOperation::IsRead() const
579 return fParent
->IsRead();
584 IOOperation::_PrepareVecs()
586 // we need to prepare the vecs for consumption by the drivers
587 if (fPhase
== PHASE_READ_BEGIN
) {
588 generic_io_vec
* vecs
= fDMABuffer
->Vecs();
589 uint32 vecCount
= fDMABuffer
->VecCount();
590 generic_size_t vecLength
= fBlockSize
;
591 for (uint32 i
= 0; i
< vecCount
; i
++) {
592 generic_io_vec
& vec
= vecs
[i
];
593 if (vec
.length
>= vecLength
) {
595 fSavedVecLength
= vec
.length
;
596 vec
.length
= vecLength
;
599 vecLength
-= vec
.length
;
601 } else if (fPhase
== PHASE_READ_END
) {
602 generic_io_vec
* vecs
= fDMABuffer
->Vecs();
603 uint32 vecCount
= fDMABuffer
->VecCount();
604 generic_size_t vecLength
= fBlockSize
;
605 for (int32 i
= vecCount
- 1; i
>= 0; i
--) {
606 generic_io_vec
& vec
= vecs
[i
];
607 if (vec
.length
>= vecLength
) {
609 fSavedVecLength
= vec
.length
;
610 vec
.base
+= vec
.length
- vecLength
;
611 vec
.length
= vecLength
;
614 vecLength
-= vec
.length
;
621 IOOperation::_CopyPartialBegin(bool isWrite
, bool& singleBlockOnly
)
623 generic_size_t relativeOffset
= OriginalOffset() - fOffset
;
624 generic_size_t length
= fBlockSize
- relativeOffset
;
626 singleBlockOnly
= length
>= OriginalLength();
628 length
= OriginalLength();
630 TRACE("_CopyPartialBegin(%s, single only %d)\n",
631 isWrite
? "write" : "read", singleBlockOnly
);
634 return fParent
->CopyData(OriginalOffset(),
635 (uint8
*)fDMABuffer
->BounceBufferAddress() + relativeOffset
, length
);
637 return fParent
->CopyData(
638 (uint8
*)fDMABuffer
->BounceBufferAddress() + relativeOffset
,
639 OriginalOffset(), length
);
645 IOOperation::_CopyPartialEnd(bool isWrite
)
647 TRACE("_CopyPartialEnd(%s)\n", isWrite
? "write" : "read");
649 const generic_io_vec
& lastVec
650 = fDMABuffer
->VecAt(fDMABuffer
->VecCount() - 1);
651 off_t lastVecPos
= fOffset
+ fLength
- fBlockSize
;
652 uint8
* base
= (uint8
*)fDMABuffer
->BounceBufferAddress()
653 + (lastVec
.base
+ lastVec
.length
- fBlockSize
654 - fDMABuffer
->PhysicalBounceBufferAddress());
655 // NOTE: this won't work if we don't use the bounce buffer contiguously
656 // (because of boundary alignments).
657 generic_size_t length
= OriginalOffset() + OriginalLength() - lastVecPos
;
660 return fParent
->CopyData(lastVecPos
, base
, length
);
662 return fParent
->CopyData(base
, lastVecPos
, length
);
667 IOOperation::Dump() const
669 kprintf("io_operation at %p\n", this);
671 kprintf(" parent: %p\n", fParent
);
672 kprintf(" status: %s\n", strerror(fStatus
));
673 kprintf(" dma buffer: %p\n", fDMABuffer
);
674 kprintf(" offset: %-8" B_PRIdOFF
" (original: %" B_PRIdOFF
")\n",
675 fOffset
, fOriginalOffset
);
676 kprintf(" length: %-8" B_PRIuGENADDR
" (original: %"
677 B_PRIuGENADDR
")\n", fLength
, fOriginalLength
);
678 kprintf(" transferred: %" B_PRIuGENADDR
"\n", fTransferredBytes
);
679 kprintf(" block size: %" B_PRIuGENADDR
"\n", fBlockSize
);
680 kprintf(" saved vec index: %u\n", fSavedVecIndex
);
681 kprintf(" saved vec length: %u\n", fSavedVecLength
);
682 kprintf(" r/w: %s\n", IsWrite() ? "write" : "read");
683 kprintf(" phase: %s\n", fPhase
== PHASE_READ_BEGIN
684 ? "read begin" : fPhase
== PHASE_READ_END
? "read end"
685 : fPhase
== PHASE_DO_ALL
? "do all" : "unknown");
686 kprintf(" partial begin: %s\n", fPartialBegin
? "yes" : "no");
687 kprintf(" partial end: %s\n", fPartialEnd
? "yes" : "no");
688 kprintf(" bounce buffer: %s\n", fUsesBounceBuffer
? "yes" : "no");
690 set_debug_variable("_parent", (addr_t
)fParent
);
691 set_debug_variable("_buffer", (addr_t
)fDMABuffer
);
698 IORequest::IORequest()
701 fFinishedCallback(NULL
),
702 fFinishedCookie(NULL
),
703 fIterationCallback(NULL
),
704 fIterationCookie(NULL
)
706 mutex_init(&fLock
, "I/O request lock");
707 fFinishedCondition
.Init(this, "I/O request finished");
711 IORequest::~IORequest()
717 mutex_destroy(&fLock
);
721 /* static */ IORequest
*
722 IORequest::Create(bool vip
)
725 ? new(malloc_flags(HEAP_PRIORITY_VIP
)) IORequest
726 : new(std::nothrow
) IORequest
;
731 IORequest::Init(off_t offset
, generic_addr_t buffer
, generic_size_t length
,
732 bool write
, uint32 flags
)
739 return Init(offset
, &vec
, 1, length
, write
, flags
);
744 IORequest::Init(off_t offset
, generic_size_t firstVecOffset
,
745 const generic_io_vec
* vecs
, size_t count
, generic_size_t length
, bool write
,
750 fBuffer
= IOBuffer::Create(count
, (flags
& B_VIP_IO_REQUEST
) != 0);
754 fBuffer
->SetVecs(firstVecOffset
, vecs
, count
, length
, flags
);
759 fRelativeParentOffset
= 0;
762 Thread
* thread
= thread_get_current_thread();
763 fTeam
= thread
->team
->id
;
764 fThread
= thread
->id
;
766 fPartialTransfer
= false;
767 fSuppressChildNotifications
= false;
769 // these are for iteration
772 fRemainingBytes
= length
;
774 fPendingChildren
= 0;
783 IORequest::CreateSubRequest(off_t parentOffset
, off_t offset
,
784 generic_size_t length
, IORequest
*& _subRequest
)
786 ASSERT(parentOffset
>= fOffset
&& length
<= fLength
787 && parentOffset
- fOffset
<= (off_t
)(fLength
- length
));
790 generic_size_t vecOffset
= parentOffset
- fOffset
;
791 generic_io_vec
* vecs
= fBuffer
->Vecs();
792 int32 vecCount
= fBuffer
->VecCount();
794 for (; startVec
< vecCount
; startVec
++) {
795 const generic_io_vec
& vec
= vecs
[startVec
];
796 if (vecOffset
< vec
.length
)
799 vecOffset
-= vec
.length
;
803 generic_size_t currentVecOffset
= vecOffset
;
804 int32 endVec
= startVec
;
805 generic_size_t remainingLength
= length
;
806 for (; endVec
< vecCount
; endVec
++) {
807 const generic_io_vec
& vec
= vecs
[endVec
];
808 if (vec
.length
- currentVecOffset
>= remainingLength
)
811 remainingLength
-= vec
.length
- currentVecOffset
;
812 currentVecOffset
= 0;
816 IORequest
* subRequest
= Create((fFlags
& B_VIP_IO_REQUEST
) != 0);
817 if (subRequest
== NULL
)
820 status_t error
= subRequest
->Init(offset
, vecOffset
, vecs
+ startVec
,
821 endVec
- startVec
+ 1, length
, fIsWrite
, fFlags
& ~B_DELETE_IO_REQUEST
);
827 subRequest
->fRelativeParentOffset
= parentOffset
- fOffset
;
828 subRequest
->fTeam
= fTeam
;
829 subRequest
->fThread
= fThread
;
831 _subRequest
= subRequest
;
832 subRequest
->SetParent(this);
834 MutexLocker
_(fLock
);
836 fChildren
.Add(subRequest
);
838 TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
846 IORequest::DeleteSubRequests()
848 while (IORequestChunk
* chunk
= fChildren
.RemoveHead())
850 fPendingChildren
= 0;
855 IORequest::SetFinishedCallback(io_request_finished_callback callback
,
858 fFinishedCallback
= callback
;
859 fFinishedCookie
= cookie
;
864 IORequest::SetIterationCallback(io_request_iterate_callback callback
,
867 fIterationCallback
= callback
;
868 fIterationCookie
= cookie
;
872 io_request_finished_callback
873 IORequest::FinishedCallback(void** _cookie
) const
876 *_cookie
= fFinishedCookie
;
877 return fFinishedCallback
;
882 IORequest::Wait(uint32 flags
, bigtime_t timeout
)
884 MutexLocker
locker(fLock
);
886 if (IsFinished() && fIsNotified
)
889 ConditionVariableEntry entry
;
890 fFinishedCondition
.Add(&entry
);
894 status_t error
= entry
.Wait(flags
, timeout
);
903 IORequest::NotifyFinished()
905 TRACE("IORequest::NotifyFinished(): request: %p\n", this);
907 MutexLocker
locker(fLock
);
909 if (fStatus
== B_OK
&& !fPartialTransfer
&& RemainingBytes() > 0) {
910 // The request is not really done yet. If it has an iteration callback,
912 if (fIterationCallback
!= NULL
) {
915 bool partialTransfer
= false;
916 status_t error
= fIterationCallback(fIterationCookie
, this,
918 if (error
== B_OK
&& !partialTransfer
)
921 // Iteration failed, which means we're responsible for notifying the
922 // requests finished.
925 fPartialTransfer
= true;
929 ASSERT(!fIsNotified
);
930 ASSERT(fPendingChildren
== 0);
931 ASSERT(fChildren
.IsEmpty()
932 || dynamic_cast<IOOperation
*>(fChildren
.Head()) == NULL
);
935 if (fBuffer
->IsMemoryLocked())
936 fBuffer
->UnlockMemory(fTeam
, fIsWrite
);
938 // Cache the callbacks before we unblock waiters and unlock. Any of the
939 // following could delete this request, so we don't want to touch it
940 // once we have started telling others that it is done.
941 IORequest
* parent
= fParent
;
942 io_request_finished_callback finishedCallback
= fFinishedCallback
;
943 void* finishedCookie
= fFinishedCookie
;
944 status_t status
= fStatus
;
945 generic_size_t lastTransferredOffset
946 = fRelativeParentOffset
+ fTransferSize
;
947 bool partialTransfer
= status
!= B_OK
|| fPartialTransfer
;
948 bool deleteRequest
= (fFlags
& B_DELETE_IO_REQUEST
) != 0;
952 fFinishedCondition
.NotifyAll();
957 if (finishedCallback
!= NULL
) {
958 finishedCallback(finishedCookie
, this, status
, partialTransfer
,
959 lastTransferredOffset
);
963 if (parent
!= NULL
) {
964 parent
->SubRequestFinished(this, status
, partialTransfer
,
965 lastTransferredOffset
);
973 /*! Returns whether this request or any of it's ancestors has a finished or
974 notification callback. Used to decide whether NotifyFinished() can be called
978 IORequest::HasCallbacks() const
980 if (fFinishedCallback
!= NULL
|| fIterationCallback
!= NULL
)
983 return fParent
!= NULL
&& fParent
->HasCallbacks();
988 IORequest::SetStatusAndNotify(status_t status
)
990 MutexLocker
locker(fLock
);
1004 IORequest::OperationFinished(IOOperation
* operation
, status_t status
,
1005 bool partialTransfer
, generic_size_t transferEndOffset
)
1007 TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32
"): request: %p\n",
1008 operation
, status
, this);
1010 MutexLocker
locker(fLock
);
1012 fChildren
.Remove(operation
);
1013 operation
->SetParent(NULL
);
1015 if (status
!= B_OK
|| partialTransfer
) {
1016 if (fTransferSize
> transferEndOffset
)
1017 fTransferSize
= transferEndOffset
;
1018 fPartialTransfer
= true;
1021 if (status
!= B_OK
&& fStatus
== 1)
1024 if (--fPendingChildren
> 0)
1027 // last child finished
1029 // set status, if not done yet
1036 IORequest::SubRequestFinished(IORequest
* request
, status_t status
,
1037 bool partialTransfer
, generic_size_t transferEndOffset
)
1039 TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32
", %d, %"
1040 B_PRIuGENADDR
"): request: %p\n", request
, status
, partialTransfer
, transferEndOffset
, this);
1042 MutexLocker
locker(fLock
);
1044 if (status
!= B_OK
|| partialTransfer
) {
1045 if (fTransferSize
> transferEndOffset
)
1046 fTransferSize
= transferEndOffset
;
1047 fPartialTransfer
= true;
1050 if (status
!= B_OK
&& fStatus
== 1)
1053 if (--fPendingChildren
> 0 || fSuppressChildNotifications
)
1056 // last child finished
1058 // set status, if not done yet
1069 IORequest::SetUnfinished()
1071 MutexLocker
_(fLock
);
1077 IORequest::SetTransferredBytes(bool partialTransfer
,
1078 generic_size_t transferredBytes
)
1080 TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR
")\n", this,
1081 partialTransfer
, transferredBytes
);
1083 MutexLocker
_(fLock
);
1085 fPartialTransfer
= partialTransfer
;
1086 fTransferSize
= transferredBytes
;
1091 IORequest::SetSuppressChildNotifications(bool suppress
)
1093 fSuppressChildNotifications
= suppress
;
1098 IORequest::Advance(generic_size_t bySize
)
1100 TRACE("IORequest::Advance(%" B_PRIuGENADDR
"): remaining: %" B_PRIuGENADDR
1101 " -> %" B_PRIuGENADDR
"\n", bySize
, fRemainingBytes
,
1102 fRemainingBytes
- bySize
);
1103 fRemainingBytes
-= bySize
;
1104 fTransferSize
+= bySize
;
1106 generic_io_vec
* vecs
= fBuffer
->Vecs();
1107 uint32 vecCount
= fBuffer
->VecCount();
1108 while (fVecIndex
< vecCount
1109 && vecs
[fVecIndex
].length
- fVecOffset
<= bySize
) {
1110 bySize
-= vecs
[fVecIndex
].length
- fVecOffset
;
1115 fVecOffset
+= bySize
;
1120 IORequest::FirstSubRequest()
1122 return dynamic_cast<IORequest
*>(fChildren
.Head());
1127 IORequest::NextSubRequest(IORequest
* previous
)
1129 if (previous
== NULL
)
1131 return dynamic_cast<IORequest
*>(fChildren
.GetNext(previous
));
1136 IORequest::AddOperation(IOOperation
* operation
)
1138 MutexLocker
locker(fLock
);
1139 TRACE("IORequest::AddOperation(%p): request: %p\n", operation
, this);
1140 fChildren
.Add(operation
);
1146 IORequest::RemoveOperation(IOOperation
* operation
)
1148 MutexLocker
locker(fLock
);
1149 fChildren
.Remove(operation
);
1150 operation
->SetParent(NULL
);
1155 IORequest::CopyData(off_t offset
, void* buffer
, size_t size
)
1157 return _CopyData(buffer
, offset
, size
, true);
1162 IORequest::CopyData(const void* buffer
, off_t offset
, size_t size
)
1164 return _CopyData((void*)buffer
, offset
, size
, false);
1169 IORequest::ClearData(off_t offset
, generic_size_t size
)
1174 if (offset
< fOffset
|| offset
+ (off_t
)size
> fOffset
+ (off_t
)fLength
) {
1175 panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1176 ", %" B_PRIuGENADDR
")", offset
, size
);
1180 // If we can, we directly copy from/to the virtual buffer. The memory is
1181 // locked in this case.
1182 status_t (*clearFunction
)(generic_addr_t
, generic_size_t
, team_id
);
1183 if (fBuffer
->IsPhysical()) {
1184 clearFunction
= &IORequest::_ClearDataPhysical
;
1186 clearFunction
= fBuffer
->IsUser() && fTeam
!= team_get_current_team_id()
1187 ? &IORequest::_ClearDataUser
: &IORequest::_ClearDataSimple
;
1190 // skip bytes if requested
1191 generic_io_vec
* vecs
= fBuffer
->Vecs();
1192 generic_size_t skipBytes
= offset
- fOffset
;
1193 generic_size_t vecOffset
= 0;
1194 while (skipBytes
> 0) {
1195 if (vecs
[0].length
> skipBytes
) {
1196 vecOffset
= skipBytes
;
1200 skipBytes
-= vecs
[0].length
;
1204 // clear vector-wise
1206 generic_size_t toClear
= min_c(size
, vecs
[0].length
- vecOffset
);
1207 status_t error
= clearFunction(vecs
[0].base
+ vecOffset
, toClear
,
1223 IORequest::_CopyData(void* _buffer
, off_t offset
, size_t size
, bool copyIn
)
1228 uint8
* buffer
= (uint8
*)_buffer
;
1230 if (offset
< fOffset
|| offset
+ (off_t
)size
> fOffset
+ (off_t
)fLength
) {
1231 panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF
", %lu)",
1236 // If we can, we directly copy from/to the virtual buffer. The memory is
1237 // locked in this case.
1238 status_t (*copyFunction
)(void*, generic_addr_t
, size_t, team_id
, bool);
1239 if (fBuffer
->IsPhysical()) {
1240 copyFunction
= &IORequest::_CopyPhysical
;
1242 copyFunction
= fBuffer
->IsUser() && fTeam
!= team_get_current_team_id()
1243 ? &IORequest::_CopyUser
: &IORequest::_CopySimple
;
1246 // skip bytes if requested
1247 generic_io_vec
* vecs
= fBuffer
->Vecs();
1248 generic_size_t skipBytes
= offset
- fOffset
;
1249 generic_size_t vecOffset
= 0;
1250 while (skipBytes
> 0) {
1251 if (vecs
[0].length
> skipBytes
) {
1252 vecOffset
= skipBytes
;
1256 skipBytes
-= vecs
[0].length
;
1262 generic_size_t toCopy
= min_c(size
, vecs
[0].length
- vecOffset
);
1263 status_t error
= copyFunction(buffer
, vecs
[0].base
+ vecOffset
, toCopy
,
1278 /* static */ status_t
1279 IORequest::_CopySimple(void* bounceBuffer
, generic_addr_t external
, size_t size
,
1280 team_id team
, bool copyIn
)
1282 TRACE(" IORequest::_CopySimple(%p, %#" B_PRIxGENADDR
", %lu, %d)\n",
1283 bounceBuffer
, external
, size
, copyIn
);
1285 memcpy(bounceBuffer
, (void*)(addr_t
)external
, size
);
1287 memcpy((void*)(addr_t
)external
, bounceBuffer
, size
);
1292 /* static */ status_t
1293 IORequest::_CopyPhysical(void* bounceBuffer
, generic_addr_t external
,
1294 size_t size
, team_id team
, bool copyIn
)
1297 return vm_memcpy_from_physical(bounceBuffer
, external
, size
, false);
1299 return vm_memcpy_to_physical(external
, bounceBuffer
, size
, false);
1303 /* static */ status_t
1304 IORequest::_CopyUser(void* _bounceBuffer
, generic_addr_t _external
, size_t size
,
1305 team_id team
, bool copyIn
)
1307 uint8
* bounceBuffer
= (uint8
*)_bounceBuffer
;
1308 uint8
* external
= (uint8
*)(addr_t
)_external
;
1311 static const int32 kEntryCount
= 8;
1312 physical_entry entries
[kEntryCount
];
1314 uint32 count
= kEntryCount
;
1315 status_t error
= get_memory_map_etc(team
, external
, size
, entries
,
1317 if (error
!= B_OK
&& error
!= B_BUFFER_OVERFLOW
) {
1318 panic("IORequest::_CopyUser(): Failed to get physical memory for "
1319 "user memory %p\n", external
);
1320 return B_BAD_ADDRESS
;
1323 for (uint32 i
= 0; i
< count
; i
++) {
1324 const physical_entry
& entry
= entries
[i
];
1325 error
= _CopyPhysical(bounceBuffer
, entry
.address
, entry
.size
, team
,
1331 bounceBuffer
+= entry
.size
;
1332 external
+= entry
.size
;
1341 IORequest::_ClearDataSimple(generic_addr_t external
, generic_size_t size
,
1344 memset((void*)(addr_t
)external
, 0, (size_t)size
);
1350 IORequest::_ClearDataPhysical(generic_addr_t external
, generic_size_t size
,
1353 return vm_memset_physical((phys_addr_t
)external
, 0, (phys_size_t
)size
);
1358 IORequest::_ClearDataUser(generic_addr_t _external
, generic_size_t size
,
1361 uint8
* external
= (uint8
*)(addr_t
)_external
;
1364 static const int32 kEntryCount
= 8;
1365 physical_entry entries
[kEntryCount
];
1367 uint32 count
= kEntryCount
;
1368 status_t error
= get_memory_map_etc(team
, external
, size
, entries
,
1370 if (error
!= B_OK
&& error
!= B_BUFFER_OVERFLOW
) {
1371 panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1372 "for user memory %p\n", external
);
1373 return B_BAD_ADDRESS
;
1376 for (uint32 i
= 0; i
< count
; i
++) {
1377 const physical_entry
& entry
= entries
[i
];
1378 error
= _ClearDataPhysical(entry
.address
, entry
.size
, team
);
1383 external
+= entry
.size
;
1392 IORequest::Dump() const
1394 kprintf("io_request at %p\n", this);
1396 kprintf(" owner: %p\n", fOwner
);
1397 kprintf(" parent: %p\n", fParent
);
1398 kprintf(" status: %s\n", strerror(fStatus
));
1399 kprintf(" mutex: %p\n", &fLock
);
1400 kprintf(" IOBuffer: %p\n", fBuffer
);
1401 kprintf(" offset: %" B_PRIdOFF
"\n", fOffset
);
1402 kprintf(" length: %" B_PRIuGENADDR
"\n", fLength
);
1403 kprintf(" transfer size: %" B_PRIuGENADDR
"\n", fTransferSize
);
1404 kprintf(" relative offset: %" B_PRIuGENADDR
"\n", fRelativeParentOffset
);
1405 kprintf(" pending children: %" B_PRId32
"\n", fPendingChildren
);
1406 kprintf(" flags: %#" B_PRIx32
"\n", fFlags
);
1407 kprintf(" team: %" B_PRId32
"\n", fTeam
);
1408 kprintf(" thread: %" B_PRId32
"\n", fThread
);
1409 kprintf(" r/w: %s\n", fIsWrite
? "write" : "read");
1410 kprintf(" partial transfer: %s\n", fPartialTransfer
? "yes" : "no");
1411 kprintf(" finished cvar: %p\n", &fFinishedCondition
);
1412 kprintf(" iteration:\n");
1413 kprintf(" vec index: %" B_PRIu32
"\n", fVecIndex
);
1414 kprintf(" vec offset: %" B_PRIuGENADDR
"\n", fVecOffset
);
1415 kprintf(" remaining bytes: %" B_PRIuGENADDR
"\n", fRemainingBytes
);
1416 kprintf(" callbacks:\n");
1417 kprintf(" finished %p, cookie %p\n", fFinishedCallback
, fFinishedCookie
);
1418 kprintf(" iteration %p, cookie %p\n", fIterationCallback
,
1420 kprintf(" children:\n");
1422 IORequestChunkList::ConstIterator iterator
= fChildren
.GetIterator();
1423 while (iterator
.HasNext()) {
1424 kprintf(" %p\n", iterator
.Next());
1427 set_debug_variable("_parent", (addr_t
)fParent
);
1428 set_debug_variable("_mutex", (addr_t
)&fLock
);
1429 set_debug_variable("_buffer", (addr_t
)fBuffer
);
1430 set_debug_variable("_cvar", (addr_t
)&fFinishedCondition
);