2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
12 #include <arch/debug.h>
18 #include <util/AutoLock.h>
20 #include <vm/VMAddressSpace.h>
22 #include "dma_resources.h"
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 # define TRACE(x...) dprintf(x)
29 # define TRACE(x...) ;
33 // partial I/O operation phases
44 IORequestChunk::IORequestChunk()
52 IORequestChunk::~IORequestChunk()
60 struct virtual_vec_cookie
{
62 generic_size_t vec_offset
;
64 void* physical_page_handle
;
65 addr_t virtual_address
;
70 IOBuffer::Create(uint32 count
, bool vip
)
72 size_t size
= sizeof(IOBuffer
) + sizeof(generic_io_vec
) * (count
- 1);
74 = (IOBuffer
*)(malloc_etc(size
, vip
? HEAP_PRIORITY_VIP
: 0));
78 buffer
->fCapacity
= count
;
79 buffer
->fVecCount
= 0;
80 buffer
->fUser
= false;
81 buffer
->fPhysical
= false;
83 buffer
->fMemoryLocked
= false;
95 free_etc(this, fVIP
? HEAP_PRIORITY_VIP
: 0);
100 IOBuffer::SetVecs(generic_size_t firstVecOffset
, const generic_io_vec
* vecs
,
101 uint32 count
, generic_size_t length
, uint32 flags
)
103 memcpy(fVecs
, vecs
, sizeof(generic_io_vec
) * count
);
105 if (count
> 0 && firstVecOffset
> 0) {
106 fVecs
[0].base
+= firstVecOffset
;
107 fVecs
[0].length
-= firstVecOffset
;
112 fPhysical
= (flags
& B_PHYSICAL_IO_REQUEST
) != 0;
113 fUser
= !fPhysical
&& IS_USER_ADDRESS(vecs
[0].base
);
118 IOBuffer::GetNextVirtualVec(void*& _cookie
, iovec
& vector
)
120 virtual_vec_cookie
* cookie
= (virtual_vec_cookie
*)_cookie
;
121 if (cookie
== NULL
) {
122 cookie
= new(malloc_flags(fVIP
? HEAP_PRIORITY_VIP
: 0))
127 cookie
->vec_index
= 0;
128 cookie
->vec_offset
= 0;
129 cookie
->mapped_area
= -1;
130 cookie
->physical_page_handle
= NULL
;
131 cookie
->virtual_address
= 0;
135 // recycle a potential previously mapped page
136 if (cookie
->physical_page_handle
!= NULL
) {
137 // TODO: This check is invalid! The physical page mapper is not required to
138 // return a non-NULL handle (the generic implementation does not)!
139 vm_put_physical_page(cookie
->virtual_address
,
140 cookie
->physical_page_handle
);
143 if (cookie
->vec_index
>= fVecCount
)
147 vector
.iov_base
= (void*)(addr_t
)fVecs
[cookie
->vec_index
].base
;
148 vector
.iov_len
= fVecs
[cookie
->vec_index
++].length
;
152 if (cookie
->vec_index
== 0
153 && (fVecCount
> 1 || fVecs
[0].length
> B_PAGE_SIZE
)) {
157 // TODO: This is a potential violation of the VIP requirement, since
158 // vm_map_physical_memory_vecs() allocates memory without special flags!
159 cookie
->mapped_area
= vm_map_physical_memory_vecs(
160 VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
161 &mappedAddress
, B_ANY_KERNEL_ADDRESS
, &mappedSize
,
162 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, fVecs
, fVecCount
);
164 if (cookie
->mapped_area
>= 0) {
165 vector
.iov_base
= mappedAddress
;
166 vector
.iov_len
= mappedSize
;
169 ktrace_printf("failed to map area: %s\n", strerror(cookie
->mapped_area
));
172 // fallback to page wise mapping
173 generic_io_vec
& currentVec
= fVecs
[cookie
->vec_index
];
174 generic_addr_t address
= currentVec
.base
+ cookie
->vec_offset
;
175 size_t pageOffset
= address
% B_PAGE_SIZE
;
177 // TODO: This is a potential violation of the VIP requirement, since
178 // vm_get_physical_page() may allocate memory without special flags!
179 status_t result
= vm_get_physical_page(address
- pageOffset
,
180 &cookie
->virtual_address
, &cookie
->physical_page_handle
);
184 generic_size_t length
= min_c(currentVec
.length
- cookie
->vec_offset
,
185 B_PAGE_SIZE
- pageOffset
);
187 vector
.iov_base
= (void*)(cookie
->virtual_address
+ pageOffset
);
188 vector
.iov_len
= length
;
190 cookie
->vec_offset
+= length
;
191 if (cookie
->vec_offset
>= currentVec
.length
) {
193 cookie
->vec_offset
= 0;
201 IOBuffer::FreeVirtualVecCookie(void* _cookie
)
203 virtual_vec_cookie
* cookie
= (virtual_vec_cookie
*)_cookie
;
204 if (cookie
->mapped_area
>= 0)
205 delete_area(cookie
->mapped_area
);
206 // TODO: A vm_get_physical_page() may still be unmatched!
208 free_etc(cookie
, fVIP
? HEAP_PRIORITY_VIP
: 0);
213 IOBuffer::LockMemory(team_id team
, bool isWrite
)
216 panic("memory already locked!");
220 for (uint32 i
= 0; i
< fVecCount
; i
++) {
221 status_t status
= lock_memory_etc(team
, (void*)(addr_t
)fVecs
[i
].base
,
222 fVecs
[i
].length
, isWrite
? 0 : B_READ_DEVICE
);
223 if (status
!= B_OK
) {
224 _UnlockMemory(team
, i
, isWrite
);
229 fMemoryLocked
= true;
235 IOBuffer::_UnlockMemory(team_id team
, size_t count
, bool isWrite
)
237 for (uint32 i
= 0; i
< count
; i
++) {
238 unlock_memory_etc(team
, (void*)(addr_t
)fVecs
[i
].base
, fVecs
[i
].length
,
239 isWrite
? 0 : B_READ_DEVICE
);
245 IOBuffer::UnlockMemory(team_id team
, bool isWrite
)
247 if (!fMemoryLocked
) {
248 panic("memory not locked");
252 _UnlockMemory(team
, fVecCount
, isWrite
);
253 fMemoryLocked
= false;
258 IOBuffer::Dump() const
260 kprintf("IOBuffer at %p\n", this);
262 kprintf(" origin: %s\n", fUser
? "user" : "kernel");
263 kprintf(" kind: %s\n", fPhysical
? "physical" : "virtual");
264 kprintf(" length: %" B_PRIuGENADDR
"\n", fLength
);
265 kprintf(" capacity: %" B_PRIuSIZE
"\n", fCapacity
);
266 kprintf(" vecs: %" B_PRIuSIZE
"\n", fVecCount
);
268 for (uint32 i
= 0; i
< fVecCount
; i
++) {
269 kprintf(" [%" B_PRIu32
"] %#" B_PRIxGENADDR
", %" B_PRIuGENADDR
"\n",
270 i
, fVecs
[i
].base
, fVecs
[i
].length
);
279 IOOperation::Finish()
281 TRACE("IOOperation::Finish()\n");
282 if (fStatus
== B_OK
) {
283 if (fParent
->IsWrite()) {
284 TRACE(" is write\n");
285 if (fPhase
== PHASE_READ_BEGIN
) {
286 TRACE(" phase read begin\n");
287 // repair phase adjusted vec
288 fDMABuffer
->VecAt(fSavedVecIndex
).length
= fSavedVecLength
;
290 // partial write: copy partial begin to bounce buffer
291 bool skipReadEndPhase
;
292 status_t error
= _CopyPartialBegin(true, skipReadEndPhase
);
294 // We're done with the first phase only (read in begin).
295 // Get ready for next phase...
296 fPhase
= HasPartialEnd() && !skipReadEndPhase
297 ? PHASE_READ_END
: PHASE_DO_ALL
;
300 // TODO: Is there a race condition, if the request is
301 // aborted at the same time?
306 } else if (fPhase
== PHASE_READ_END
) {
307 TRACE(" phase read end\n");
308 // repair phase adjusted vec
309 generic_io_vec
& vec
= fDMABuffer
->VecAt(fSavedVecIndex
);
310 vec
.base
+= vec
.length
- fSavedVecLength
;
311 vec
.length
= fSavedVecLength
;
313 // partial write: copy partial end to bounce buffer
314 status_t error
= _CopyPartialEnd(true);
316 // We're done with the second phase only (read in end).
317 // Get ready for next phase...
318 fPhase
= PHASE_DO_ALL
;
320 // TODO: Is there a race condition, if the request is
321 // aborted at the same time?
330 if (fParent
->IsRead() && UsesBounceBuffer()) {
331 TRACE(" read with bounce buffer\n");
332 // copy the bounce buffer segments to the final location
333 uint8
* bounceBuffer
= (uint8
*)fDMABuffer
->BounceBufferAddress();
334 phys_addr_t bounceBufferStart
335 = fDMABuffer
->PhysicalBounceBufferAddress();
336 phys_addr_t bounceBufferEnd
= bounceBufferStart
337 + fDMABuffer
->BounceBufferSize();
339 const generic_io_vec
* vecs
= fDMABuffer
->Vecs();
340 uint32 vecCount
= fDMABuffer
->VecCount();
342 status_t error
= B_OK
;
344 // We iterate through the vecs we have read, moving offset (the device
345 // offset) as we go. If [offset, offset + vec.length) intersects with
346 // [startOffset, endOffset) we copy to the final location.
347 off_t offset
= fOffset
;
348 const off_t startOffset
= fOriginalOffset
;
349 const off_t endOffset
= fOriginalOffset
+ fOriginalLength
;
351 for (uint32 i
= 0; error
== B_OK
&& i
< vecCount
; i
++) {
352 const generic_io_vec
& vec
= vecs
[i
];
353 generic_addr_t base
= vec
.base
;
354 generic_size_t length
= vec
.length
;
356 if (offset
< startOffset
) {
357 // If the complete vector is before the start offset, skip it.
358 if (offset
+ (off_t
)length
<= startOffset
) {
363 // The vector starts before the start offset, but intersects
364 // with it. Skip the part we aren't interested in.
365 generic_size_t diff
= startOffset
- offset
;
371 if (offset
+ (off_t
)length
> endOffset
) {
372 // If we're already beyond the end offset, we're done.
373 if (offset
>= endOffset
)
376 // The vector extends beyond the end offset -- cut it.
377 length
= endOffset
- offset
;
380 if (base
>= bounceBufferStart
&& base
< bounceBufferEnd
) {
381 error
= fParent
->CopyData(
382 bounceBuffer
+ (base
- bounceBufferStart
), offset
, length
);
396 /*! Note: SetPartial() must be called first!
399 IOOperation::Prepare(IORequest
* request
)
402 fParent
->RemoveOperation(this);
406 fTransferredBytes
= 0;
409 fPhase
= PHASE_DO_ALL
;
410 if (fParent
->IsWrite()) {
411 // Copy data to bounce buffer segments, save the partial begin/end vec,
412 // which will be copied after their respective read phase.
413 if (UsesBounceBuffer()) {
414 TRACE(" write with bounce buffer\n");
415 uint8
* bounceBuffer
= (uint8
*)fDMABuffer
->BounceBufferAddress();
416 phys_addr_t bounceBufferStart
417 = fDMABuffer
->PhysicalBounceBufferAddress();
418 phys_addr_t bounceBufferEnd
= bounceBufferStart
419 + fDMABuffer
->BounceBufferSize();
421 const generic_io_vec
* vecs
= fDMABuffer
->Vecs();
422 uint32 vecCount
= fDMABuffer
->VecCount();
423 generic_size_t vecOffset
= 0;
426 off_t offset
= fOffset
;
427 off_t endOffset
= fOffset
+ fLength
;
429 if (HasPartialBegin()) {
431 generic_size_t toSkip
= fBlockSize
;
433 if (vecs
[i
].length
<= toSkip
) {
434 toSkip
-= vecs
[i
].length
;
442 offset
+= fBlockSize
;
445 if (HasPartialEnd()) {
447 generic_size_t toSkip
= fBlockSize
;
449 if (vecs
[vecCount
- 1].length
<= toSkip
) {
450 toSkip
-= vecs
[vecCount
- 1].length
;
456 endOffset
-= fBlockSize
;
459 for (; i
< vecCount
; i
++) {
460 const generic_io_vec
& vec
= vecs
[i
];
461 generic_addr_t base
= vec
.base
+ vecOffset
;
462 generic_size_t length
= vec
.length
- vecOffset
;
465 if (base
>= bounceBufferStart
&& base
< bounceBufferEnd
) {
466 if (offset
+ (off_t
)length
> endOffset
)
467 length
= endOffset
- offset
;
468 status_t error
= fParent
->CopyData(offset
,
469 bounceBuffer
+ (base
- bounceBufferStart
), length
);
478 if (HasPartialBegin())
479 fPhase
= PHASE_READ_BEGIN
;
480 else if (HasPartialEnd())
481 fPhase
= PHASE_READ_END
;
489 fParent
->AddOperation(this);
496 IOOperation::SetOriginalRange(off_t offset
, generic_size_t length
)
498 fOriginalOffset
= fOffset
= offset
;
499 fOriginalLength
= fLength
= length
;
504 IOOperation::SetRange(off_t offset
, generic_size_t length
)
512 IOOperation::Offset() const
514 return fPhase
== PHASE_READ_END
? fOffset
+ fLength
- fBlockSize
: fOffset
;
519 IOOperation::Length() const
521 return fPhase
== PHASE_DO_ALL
? fLength
: fBlockSize
;
526 IOOperation::Vecs() const
530 return fDMABuffer
->Vecs() + fSavedVecIndex
;
531 case PHASE_READ_BEGIN
:
534 return fDMABuffer
->Vecs();
540 IOOperation::VecCount() const
543 case PHASE_READ_BEGIN
:
544 return fSavedVecIndex
+ 1;
546 return fDMABuffer
->VecCount() - fSavedVecIndex
;
549 return fDMABuffer
->VecCount();
555 IOOperation::SetPartial(bool partialBegin
, bool partialEnd
)
557 TRACE("partial begin %d, end %d\n", partialBegin
, partialEnd
);
558 fPartialBegin
= partialBegin
;
559 fPartialEnd
= partialEnd
;
564 IOOperation::IsWrite() const
566 return fParent
->IsWrite() && fPhase
== PHASE_DO_ALL
;
571 IOOperation::IsRead() const
573 return fParent
->IsRead();
578 IOOperation::_PrepareVecs()
580 // we need to prepare the vecs for consumption by the drivers
581 if (fPhase
== PHASE_READ_BEGIN
) {
582 generic_io_vec
* vecs
= fDMABuffer
->Vecs();
583 uint32 vecCount
= fDMABuffer
->VecCount();
584 generic_size_t vecLength
= fBlockSize
;
585 for (uint32 i
= 0; i
< vecCount
; i
++) {
586 generic_io_vec
& vec
= vecs
[i
];
587 if (vec
.length
>= vecLength
) {
589 fSavedVecLength
= vec
.length
;
590 vec
.length
= vecLength
;
593 vecLength
-= vec
.length
;
595 } else if (fPhase
== PHASE_READ_END
) {
596 generic_io_vec
* vecs
= fDMABuffer
->Vecs();
597 uint32 vecCount
= fDMABuffer
->VecCount();
598 generic_size_t vecLength
= fBlockSize
;
599 for (int32 i
= vecCount
- 1; i
>= 0; i
--) {
600 generic_io_vec
& vec
= vecs
[i
];
601 if (vec
.length
>= vecLength
) {
603 fSavedVecLength
= vec
.length
;
604 vec
.base
+= vec
.length
- vecLength
;
605 vec
.length
= vecLength
;
608 vecLength
-= vec
.length
;
615 IOOperation::_CopyPartialBegin(bool isWrite
, bool& singleBlockOnly
)
617 generic_size_t relativeOffset
= OriginalOffset() - fOffset
;
618 generic_size_t length
= fBlockSize
- relativeOffset
;
620 singleBlockOnly
= length
>= OriginalLength();
622 length
= OriginalLength();
624 TRACE("_CopyPartialBegin(%s, single only %d)\n",
625 isWrite
? "write" : "read", singleBlockOnly
);
628 return fParent
->CopyData(OriginalOffset(),
629 (uint8
*)fDMABuffer
->BounceBufferAddress() + relativeOffset
, length
);
631 return fParent
->CopyData(
632 (uint8
*)fDMABuffer
->BounceBufferAddress() + relativeOffset
,
633 OriginalOffset(), length
);
639 IOOperation::_CopyPartialEnd(bool isWrite
)
641 TRACE("_CopyPartialEnd(%s)\n", isWrite
? "write" : "read");
643 const generic_io_vec
& lastVec
644 = fDMABuffer
->VecAt(fDMABuffer
->VecCount() - 1);
645 off_t lastVecPos
= fOffset
+ fLength
- fBlockSize
;
646 uint8
* base
= (uint8
*)fDMABuffer
->BounceBufferAddress()
647 + (lastVec
.base
+ lastVec
.length
- fBlockSize
648 - fDMABuffer
->PhysicalBounceBufferAddress());
649 // NOTE: this won't work if we don't use the bounce buffer contiguously
650 // (because of boundary alignments).
651 generic_size_t length
= OriginalOffset() + OriginalLength() - lastVecPos
;
654 return fParent
->CopyData(lastVecPos
, base
, length
);
656 return fParent
->CopyData(base
, lastVecPos
, length
);
661 IOOperation::Dump() const
663 kprintf("io_operation at %p\n", this);
665 kprintf(" parent: %p\n", fParent
);
666 kprintf(" status: %s\n", strerror(fStatus
));
667 kprintf(" dma buffer: %p\n", fDMABuffer
);
668 kprintf(" offset: %-8" B_PRIdOFF
" (original: %" B_PRIdOFF
")\n",
669 fOffset
, fOriginalOffset
);
670 kprintf(" length: %-8" B_PRIuGENADDR
" (original: %"
671 B_PRIuGENADDR
")\n", fLength
, fOriginalLength
);
672 kprintf(" transferred: %" B_PRIuGENADDR
"\n", fTransferredBytes
);
673 kprintf(" block size: %" B_PRIuGENADDR
"\n", fBlockSize
);
674 kprintf(" saved vec index: %u\n", fSavedVecIndex
);
675 kprintf(" saved vec length: %u\n", fSavedVecLength
);
676 kprintf(" r/w: %s\n", IsWrite() ? "write" : "read");
677 kprintf(" phase: %s\n", fPhase
== PHASE_READ_BEGIN
678 ? "read begin" : fPhase
== PHASE_READ_END
? "read end"
679 : fPhase
== PHASE_DO_ALL
? "do all" : "unknown");
680 kprintf(" partial begin: %s\n", fPartialBegin
? "yes" : "no");
681 kprintf(" partial end: %s\n", fPartialEnd
? "yes" : "no");
682 kprintf(" bounce buffer: %s\n", fUsesBounceBuffer
? "yes" : "no");
684 set_debug_variable("_parent", (addr_t
)fParent
);
685 set_debug_variable("_buffer", (addr_t
)fDMABuffer
);
692 IORequest::IORequest()
695 fFinishedCallback(NULL
),
696 fFinishedCookie(NULL
),
697 fIterationCallback(NULL
),
698 fIterationCookie(NULL
)
700 mutex_init(&fLock
, "I/O request lock");
701 fFinishedCondition
.Init(this, "I/O request finished");
705 IORequest::~IORequest()
710 mutex_destroy(&fLock
);
714 /* static */ IORequest
*
715 IORequest::Create(bool vip
)
718 ? new(malloc_flags(HEAP_PRIORITY_VIP
)) IORequest
719 : new(std::nothrow
) IORequest
;
724 IORequest::Init(off_t offset
, generic_addr_t buffer
, generic_size_t length
,
725 bool write
, uint32 flags
)
732 return Init(offset
, &vec
, 1, length
, write
, flags
);
737 IORequest::Init(off_t offset
, generic_size_t firstVecOffset
,
738 const generic_io_vec
* vecs
, size_t count
, generic_size_t length
, bool write
,
743 fBuffer
= IOBuffer::Create(count
, (flags
& B_VIP_IO_REQUEST
) != 0);
747 fBuffer
->SetVecs(firstVecOffset
, vecs
, count
, length
, flags
);
752 fRelativeParentOffset
= 0;
755 Thread
* thread
= thread_get_current_thread();
756 fTeam
= thread
->team
->id
;
757 fThread
= thread
->id
;
759 fPartialTransfer
= false;
760 fSuppressChildNotifications
= false;
762 // these are for iteration
765 fRemainingBytes
= length
;
767 fPendingChildren
= 0;
776 IORequest::CreateSubRequest(off_t parentOffset
, off_t offset
,
777 generic_size_t length
, IORequest
*& _subRequest
)
779 ASSERT(parentOffset
>= fOffset
&& length
<= fLength
780 && parentOffset
- fOffset
<= (off_t
)(fLength
- length
));
783 generic_size_t vecOffset
= parentOffset
- fOffset
;
784 generic_io_vec
* vecs
= fBuffer
->Vecs();
785 int32 vecCount
= fBuffer
->VecCount();
787 for (; startVec
< vecCount
; startVec
++) {
788 const generic_io_vec
& vec
= vecs
[startVec
];
789 if (vecOffset
< vec
.length
)
792 vecOffset
-= vec
.length
;
796 generic_size_t currentVecOffset
= vecOffset
;
797 int32 endVec
= startVec
;
798 generic_size_t remainingLength
= length
;
799 for (; endVec
< vecCount
; endVec
++) {
800 const generic_io_vec
& vec
= vecs
[endVec
];
801 if (vec
.length
- currentVecOffset
>= remainingLength
)
804 remainingLength
-= vec
.length
- currentVecOffset
;
805 currentVecOffset
= 0;
809 IORequest
* subRequest
= Create((fFlags
& B_VIP_IO_REQUEST
) != 0);
810 if (subRequest
== NULL
)
813 status_t error
= subRequest
->Init(offset
, vecOffset
, vecs
+ startVec
,
814 endVec
- startVec
+ 1, length
, fIsWrite
, fFlags
& ~B_DELETE_IO_REQUEST
);
820 subRequest
->fRelativeParentOffset
= parentOffset
- fOffset
;
821 subRequest
->fTeam
= fTeam
;
822 subRequest
->fThread
= fThread
;
824 _subRequest
= subRequest
;
825 subRequest
->SetParent(this);
827 MutexLocker
_(fLock
);
829 fChildren
.Add(subRequest
);
831 TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
839 IORequest::DeleteSubRequests()
841 while (IORequestChunk
* chunk
= fChildren
.RemoveHead())
843 fPendingChildren
= 0;
848 IORequest::SetFinishedCallback(io_request_finished_callback callback
,
851 fFinishedCallback
= callback
;
852 fFinishedCookie
= cookie
;
857 IORequest::SetIterationCallback(io_request_iterate_callback callback
,
860 fIterationCallback
= callback
;
861 fIterationCookie
= cookie
;
865 io_request_finished_callback
866 IORequest::FinishedCallback(void** _cookie
) const
869 *_cookie
= fFinishedCookie
;
870 return fFinishedCallback
;
875 IORequest::Wait(uint32 flags
, bigtime_t timeout
)
877 MutexLocker
locker(fLock
);
879 if (IsFinished() && fIsNotified
)
882 ConditionVariableEntry entry
;
883 fFinishedCondition
.Add(&entry
);
887 status_t error
= entry
.Wait(flags
, timeout
);
896 IORequest::NotifyFinished()
898 TRACE("IORequest::NotifyFinished(): request: %p\n", this);
900 MutexLocker
locker(fLock
);
902 if (fStatus
== B_OK
&& !fPartialTransfer
&& RemainingBytes() > 0) {
903 // The request is not really done yet. If it has an iteration callback,
905 if (fIterationCallback
!= NULL
) {
908 bool partialTransfer
= false;
909 status_t error
= fIterationCallback(fIterationCookie
, this,
911 if (error
== B_OK
&& !partialTransfer
)
914 // Iteration failed, which means we're responsible for notifying the
915 // requests finished.
918 fPartialTransfer
= true;
922 ASSERT(!fIsNotified
);
923 ASSERT(fPendingChildren
== 0);
924 ASSERT(fChildren
.IsEmpty()
925 || dynamic_cast<IOOperation
*>(fChildren
.Head()) == NULL
);
928 if (fBuffer
->IsMemoryLocked())
929 fBuffer
->UnlockMemory(fTeam
, fIsWrite
);
931 // Cache the callbacks before we unblock waiters and unlock. Any of the
932 // following could delete this request, so we don't want to touch it
933 // once we have started telling others that it is done.
934 IORequest
* parent
= fParent
;
935 io_request_finished_callback finishedCallback
= fFinishedCallback
;
936 void* finishedCookie
= fFinishedCookie
;
937 status_t status
= fStatus
;
938 generic_size_t lastTransferredOffset
939 = fRelativeParentOffset
+ fTransferSize
;
940 bool partialTransfer
= status
!= B_OK
|| fPartialTransfer
;
941 bool deleteRequest
= (fFlags
& B_DELETE_IO_REQUEST
) != 0;
945 fFinishedCondition
.NotifyAll();
950 if (finishedCallback
!= NULL
) {
951 finishedCallback(finishedCookie
, this, status
, partialTransfer
,
952 lastTransferredOffset
);
956 if (parent
!= NULL
) {
957 parent
->SubRequestFinished(this, status
, partialTransfer
,
958 lastTransferredOffset
);
966 /*! Returns whether this request or any of it's ancestors has a finished or
967 notification callback. Used to decide whether NotifyFinished() can be called
971 IORequest::HasCallbacks() const
973 if (fFinishedCallback
!= NULL
|| fIterationCallback
!= NULL
)
976 return fParent
!= NULL
&& fParent
->HasCallbacks();
981 IORequest::SetStatusAndNotify(status_t status
)
983 MutexLocker
locker(fLock
);
997 IORequest::OperationFinished(IOOperation
* operation
, status_t status
,
998 bool partialTransfer
, generic_size_t transferEndOffset
)
1000 TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32
"): request: %p\n",
1001 operation
, status
, this);
1003 MutexLocker
locker(fLock
);
1005 fChildren
.Remove(operation
);
1006 operation
->SetParent(NULL
);
1008 if (status
!= B_OK
|| partialTransfer
) {
1009 if (fTransferSize
> transferEndOffset
)
1010 fTransferSize
= transferEndOffset
;
1011 fPartialTransfer
= true;
1014 if (status
!= B_OK
&& fStatus
== 1)
1017 if (--fPendingChildren
> 0)
1020 // last child finished
1022 // set status, if not done yet
1029 IORequest::SubRequestFinished(IORequest
* request
, status_t status
,
1030 bool partialTransfer
, generic_size_t transferEndOffset
)
1032 TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32
", %d, %"
1033 B_PRIuGENADDR
"): request: %p\n", request
, status
, partialTransfer
, transferEndOffset
, this);
1035 MutexLocker
locker(fLock
);
1037 if (status
!= B_OK
|| partialTransfer
) {
1038 if (fTransferSize
> transferEndOffset
)
1039 fTransferSize
= transferEndOffset
;
1040 fPartialTransfer
= true;
1043 if (status
!= B_OK
&& fStatus
== 1)
1046 if (--fPendingChildren
> 0 || fSuppressChildNotifications
)
1049 // last child finished
1051 // set status, if not done yet
1062 IORequest::SetUnfinished()
1064 MutexLocker
_(fLock
);
1070 IORequest::SetTransferredBytes(bool partialTransfer
,
1071 generic_size_t transferredBytes
)
1073 TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR
")\n", this,
1074 partialTransfer
, transferredBytes
);
1076 MutexLocker
_(fLock
);
1078 fPartialTransfer
= partialTransfer
;
1079 fTransferSize
= transferredBytes
;
1084 IORequest::SetSuppressChildNotifications(bool suppress
)
1086 fSuppressChildNotifications
= suppress
;
1091 IORequest::Advance(generic_size_t bySize
)
1093 TRACE("IORequest::Advance(%" B_PRIuGENADDR
"): remaining: %" B_PRIuGENADDR
1094 " -> %" B_PRIuGENADDR
"\n", bySize
, fRemainingBytes
,
1095 fRemainingBytes
- bySize
);
1096 fRemainingBytes
-= bySize
;
1097 fTransferSize
+= bySize
;
1099 generic_io_vec
* vecs
= fBuffer
->Vecs();
1100 uint32 vecCount
= fBuffer
->VecCount();
1101 while (fVecIndex
< vecCount
1102 && vecs
[fVecIndex
].length
- fVecOffset
<= bySize
) {
1103 bySize
-= vecs
[fVecIndex
].length
- fVecOffset
;
1108 fVecOffset
+= bySize
;
1113 IORequest::FirstSubRequest()
1115 return dynamic_cast<IORequest
*>(fChildren
.Head());
1120 IORequest::NextSubRequest(IORequest
* previous
)
1122 if (previous
== NULL
)
1124 return dynamic_cast<IORequest
*>(fChildren
.GetNext(previous
));
1129 IORequest::AddOperation(IOOperation
* operation
)
1131 MutexLocker
locker(fLock
);
1132 TRACE("IORequest::AddOperation(%p): request: %p\n", operation
, this);
1133 fChildren
.Add(operation
);
1139 IORequest::RemoveOperation(IOOperation
* operation
)
1141 MutexLocker
locker(fLock
);
1142 fChildren
.Remove(operation
);
1143 operation
->SetParent(NULL
);
1148 IORequest::CopyData(off_t offset
, void* buffer
, size_t size
)
1150 return _CopyData(buffer
, offset
, size
, true);
1155 IORequest::CopyData(const void* buffer
, off_t offset
, size_t size
)
1157 return _CopyData((void*)buffer
, offset
, size
, false);
1162 IORequest::ClearData(off_t offset
, generic_size_t size
)
1167 if (offset
< fOffset
|| offset
+ (off_t
)size
> fOffset
+ (off_t
)fLength
) {
1168 panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1169 ", %" B_PRIuGENADDR
")", offset
, size
);
1173 // If we can, we directly copy from/to the virtual buffer. The memory is
1174 // locked in this case.
1175 status_t (*clearFunction
)(generic_addr_t
, generic_size_t
, team_id
);
1176 if (fBuffer
->IsPhysical()) {
1177 clearFunction
= &IORequest::_ClearDataPhysical
;
1179 clearFunction
= fBuffer
->IsUser() && fTeam
!= team_get_current_team_id()
1180 ? &IORequest::_ClearDataUser
: &IORequest::_ClearDataSimple
;
1183 // skip bytes if requested
1184 generic_io_vec
* vecs
= fBuffer
->Vecs();
1185 generic_size_t skipBytes
= offset
- fOffset
;
1186 generic_size_t vecOffset
= 0;
1187 while (skipBytes
> 0) {
1188 if (vecs
[0].length
> skipBytes
) {
1189 vecOffset
= skipBytes
;
1193 skipBytes
-= vecs
[0].length
;
1197 // clear vector-wise
1199 generic_size_t toClear
= min_c(size
, vecs
[0].length
- vecOffset
);
1200 status_t error
= clearFunction(vecs
[0].base
+ vecOffset
, toClear
,
1216 IORequest::_CopyData(void* _buffer
, off_t offset
, size_t size
, bool copyIn
)
1221 uint8
* buffer
= (uint8
*)_buffer
;
1223 if (offset
< fOffset
|| offset
+ (off_t
)size
> fOffset
+ (off_t
)fLength
) {
1224 panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF
", %lu)",
1229 // If we can, we directly copy from/to the virtual buffer. The memory is
1230 // locked in this case.
1231 status_t (*copyFunction
)(void*, generic_addr_t
, size_t, team_id
, bool);
1232 if (fBuffer
->IsPhysical()) {
1233 copyFunction
= &IORequest::_CopyPhysical
;
1235 copyFunction
= fBuffer
->IsUser() && fTeam
!= team_get_current_team_id()
1236 ? &IORequest::_CopyUser
: &IORequest::_CopySimple
;
1239 // skip bytes if requested
1240 generic_io_vec
* vecs
= fBuffer
->Vecs();
1241 generic_size_t skipBytes
= offset
- fOffset
;
1242 generic_size_t vecOffset
= 0;
1243 while (skipBytes
> 0) {
1244 if (vecs
[0].length
> skipBytes
) {
1245 vecOffset
= skipBytes
;
1249 skipBytes
-= vecs
[0].length
;
1255 generic_size_t toCopy
= min_c(size
, vecs
[0].length
- vecOffset
);
1256 status_t error
= copyFunction(buffer
, vecs
[0].base
+ vecOffset
, toCopy
,
1271 /* static */ status_t
1272 IORequest::_CopySimple(void* bounceBuffer
, generic_addr_t external
, size_t size
,
1273 team_id team
, bool copyIn
)
1275 TRACE(" IORequest::_CopySimple(%p, %#" B_PRIxGENADDR
", %lu, %d)\n",
1276 bounceBuffer
, external
, size
, copyIn
);
1278 memcpy(bounceBuffer
, (void*)(addr_t
)external
, size
);
1280 memcpy((void*)(addr_t
)external
, bounceBuffer
, size
);
1285 /* static */ status_t
1286 IORequest::_CopyPhysical(void* bounceBuffer
, generic_addr_t external
,
1287 size_t size
, team_id team
, bool copyIn
)
1290 return vm_memcpy_from_physical(bounceBuffer
, external
, size
, false);
1292 return vm_memcpy_to_physical(external
, bounceBuffer
, size
, false);
1296 /* static */ status_t
1297 IORequest::_CopyUser(void* _bounceBuffer
, generic_addr_t _external
, size_t size
,
1298 team_id team
, bool copyIn
)
1300 uint8
* bounceBuffer
= (uint8
*)_bounceBuffer
;
1301 uint8
* external
= (uint8
*)(addr_t
)_external
;
1304 static const int32 kEntryCount
= 8;
1305 physical_entry entries
[kEntryCount
];
1307 uint32 count
= kEntryCount
;
1308 status_t error
= get_memory_map_etc(team
, external
, size
, entries
,
1310 if (error
!= B_OK
&& error
!= B_BUFFER_OVERFLOW
) {
1311 panic("IORequest::_CopyUser(): Failed to get physical memory for "
1312 "user memory %p\n", external
);
1313 return B_BAD_ADDRESS
;
1316 for (uint32 i
= 0; i
< count
; i
++) {
1317 const physical_entry
& entry
= entries
[i
];
1318 error
= _CopyPhysical(bounceBuffer
, entry
.address
, entry
.size
, team
,
1324 bounceBuffer
+= entry
.size
;
1325 external
+= entry
.size
;
1334 IORequest::_ClearDataSimple(generic_addr_t external
, generic_size_t size
,
1337 memset((void*)(addr_t
)external
, 0, (size_t)size
);
1343 IORequest::_ClearDataPhysical(generic_addr_t external
, generic_size_t size
,
1346 return vm_memset_physical((phys_addr_t
)external
, 0, (phys_size_t
)size
);
1351 IORequest::_ClearDataUser(generic_addr_t _external
, generic_size_t size
,
1354 uint8
* external
= (uint8
*)(addr_t
)_external
;
1357 static const int32 kEntryCount
= 8;
1358 physical_entry entries
[kEntryCount
];
1360 uint32 count
= kEntryCount
;
1361 status_t error
= get_memory_map_etc(team
, external
, size
, entries
,
1363 if (error
!= B_OK
&& error
!= B_BUFFER_OVERFLOW
) {
1364 panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1365 "for user memory %p\n", external
);
1366 return B_BAD_ADDRESS
;
1369 for (uint32 i
= 0; i
< count
; i
++) {
1370 const physical_entry
& entry
= entries
[i
];
1371 error
= _ClearDataPhysical(entry
.address
, entry
.size
, team
);
1376 external
+= entry
.size
;
1385 IORequest::Dump() const
1387 kprintf("io_request at %p\n", this);
1389 kprintf(" owner: %p\n", fOwner
);
1390 kprintf(" parent: %p\n", fParent
);
1391 kprintf(" status: %s\n", strerror(fStatus
));
1392 kprintf(" mutex: %p\n", &fLock
);
1393 kprintf(" IOBuffer: %p\n", fBuffer
);
1394 kprintf(" offset: %" B_PRIdOFF
"\n", fOffset
);
1395 kprintf(" length: %" B_PRIuGENADDR
"\n", fLength
);
1396 kprintf(" transfer size: %" B_PRIuGENADDR
"\n", fTransferSize
);
1397 kprintf(" relative offset: %" B_PRIuGENADDR
"\n", fRelativeParentOffset
);
1398 kprintf(" pending children: %" B_PRId32
"\n", fPendingChildren
);
1399 kprintf(" flags: %#" B_PRIx32
"\n", fFlags
);
1400 kprintf(" team: %" B_PRId32
"\n", fTeam
);
1401 kprintf(" thread: %" B_PRId32
"\n", fThread
);
1402 kprintf(" r/w: %s\n", fIsWrite
? "write" : "read");
1403 kprintf(" partial transfer: %s\n", fPartialTransfer
? "yes" : "no");
1404 kprintf(" finished cvar: %p\n", &fFinishedCondition
);
1405 kprintf(" iteration:\n");
1406 kprintf(" vec index: %" B_PRIu32
"\n", fVecIndex
);
1407 kprintf(" vec offset: %" B_PRIuGENADDR
"\n", fVecOffset
);
1408 kprintf(" remaining bytes: %" B_PRIuGENADDR
"\n", fRemainingBytes
);
1409 kprintf(" callbacks:\n");
1410 kprintf(" finished %p, cookie %p\n", fFinishedCallback
, fFinishedCookie
);
1411 kprintf(" iteration %p, cookie %p\n", fIterationCallback
,
1413 kprintf(" children:\n");
1415 IORequestChunkList::ConstIterator iterator
= fChildren
.GetIterator();
1416 while (iterator
.HasNext()) {
1417 kprintf(" %p\n", iterator
.Next());
1420 set_debug_variable("_parent", (addr_t
)fParent
);
1421 set_debug_variable("_mutex", (addr_t
)&fLock
);
1422 set_debug_variable("_buffer", (addr_t
)fBuffer
);
1423 set_debug_variable("_cvar", (addr_t
)&fFinishedCondition
);