BPicture: Fix archive constructor.
[haiku.git] / src / system / kernel / device_manager / IORequest.cpp
blobf39f213bfb09694806944eacde7f684fb042ff62
1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
8 #include "IORequest.h"
10 #include <string.h>
12 #include <arch/debug.h>
13 #include <debug.h>
14 #include <heap.h>
15 #include <kernel.h>
16 #include <team.h>
17 #include <thread.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
22 #include "dma_resources.h"
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 # define TRACE(x...) dprintf(x)
28 #else
29 # define TRACE(x...) ;
30 #endif
33 // partial I/O operation phases
34 enum {
35 PHASE_READ_BEGIN = 0,
36 PHASE_READ_END = 1,
37 PHASE_DO_ALL = 2
41 // #pragma mark -
44 IORequestChunk::IORequestChunk()
46 fParent(NULL),
47 fStatus(1)
52 IORequestChunk::~IORequestChunk()
57 // #pragma mark -
60 struct virtual_vec_cookie {
61 uint32 vec_index;
62 generic_size_t vec_offset;
63 area_id mapped_area;
64 void* physical_page_handle;
65 addr_t virtual_address;
69 IOBuffer*
70 IOBuffer::Create(uint32 count, bool vip)
72 size_t size = sizeof(IOBuffer) + sizeof(generic_io_vec) * (count - 1);
73 IOBuffer* buffer
74 = (IOBuffer*)(malloc_etc(size, vip ? HEAP_PRIORITY_VIP : 0));
75 if (buffer == NULL)
76 return NULL;
78 buffer->fCapacity = count;
79 buffer->fVecCount = 0;
80 buffer->fUser = false;
81 buffer->fPhysical = false;
82 buffer->fVIP = vip;
83 buffer->fMemoryLocked = false;
85 return buffer;
89 void
90 IOBuffer::Delete()
92 if (this == NULL)
93 return;
95 free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
99 void
100 IOBuffer::SetVecs(generic_size_t firstVecOffset, const generic_io_vec* vecs,
101 uint32 count, generic_size_t length, uint32 flags)
103 memcpy(fVecs, vecs, sizeof(generic_io_vec) * count);
105 if (count > 0 && firstVecOffset > 0) {
106 fVecs[0].base += firstVecOffset;
107 fVecs[0].length -= firstVecOffset;
110 fVecCount = count;
111 fLength = length;
112 fPhysical = (flags & B_PHYSICAL_IO_REQUEST) != 0;
113 fUser = !fPhysical && IS_USER_ADDRESS(vecs[0].base);
117 status_t
118 IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
120 virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
121 if (cookie == NULL) {
122 cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
123 virtual_vec_cookie;
124 if (cookie == NULL)
125 return B_NO_MEMORY;
127 cookie->vec_index = 0;
128 cookie->vec_offset = 0;
129 cookie->mapped_area = -1;
130 cookie->physical_page_handle = NULL;
131 cookie->virtual_address = 0;
132 _cookie = cookie;
135 // recycle a potential previously mapped page
136 if (cookie->physical_page_handle != NULL) {
137 // TODO: This check is invalid! The physical page mapper is not required to
138 // return a non-NULL handle (the generic implementation does not)!
139 vm_put_physical_page(cookie->virtual_address,
140 cookie->physical_page_handle);
143 if (cookie->vec_index >= fVecCount)
144 return B_BAD_INDEX;
146 if (!fPhysical) {
147 vector.iov_base = (void*)(addr_t)fVecs[cookie->vec_index].base;
148 vector.iov_len = fVecs[cookie->vec_index++].length;
149 return B_OK;
152 if (cookie->vec_index == 0
153 && (fVecCount > 1 || fVecs[0].length > B_PAGE_SIZE)) {
154 void* mappedAddress;
155 addr_t mappedSize;
157 // TODO: This is a potential violation of the VIP requirement, since
158 // vm_map_physical_memory_vecs() allocates memory without special flags!
159 cookie->mapped_area = vm_map_physical_memory_vecs(
160 VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
161 &mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
162 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);
164 if (cookie->mapped_area >= 0) {
165 vector.iov_base = mappedAddress;
166 vector.iov_len = mappedSize;
167 return B_OK;
168 } else
169 ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
172 // fallback to page wise mapping
173 generic_io_vec& currentVec = fVecs[cookie->vec_index];
174 generic_addr_t address = currentVec.base + cookie->vec_offset;
175 size_t pageOffset = address % B_PAGE_SIZE;
177 // TODO: This is a potential violation of the VIP requirement, since
178 // vm_get_physical_page() may allocate memory without special flags!
179 status_t result = vm_get_physical_page(address - pageOffset,
180 &cookie->virtual_address, &cookie->physical_page_handle);
181 if (result != B_OK)
182 return result;
184 generic_size_t length = min_c(currentVec.length - cookie->vec_offset,
185 B_PAGE_SIZE - pageOffset);
187 vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
188 vector.iov_len = length;
190 cookie->vec_offset += length;
191 if (cookie->vec_offset >= currentVec.length) {
192 cookie->vec_index++;
193 cookie->vec_offset = 0;
196 return B_OK;
200 void
201 IOBuffer::FreeVirtualVecCookie(void* _cookie)
203 virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
204 if (cookie->mapped_area >= 0)
205 delete_area(cookie->mapped_area);
206 // TODO: A vm_get_physical_page() may still be unmatched!
208 free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
212 status_t
213 IOBuffer::LockMemory(team_id team, bool isWrite)
215 if (fMemoryLocked) {
216 panic("memory already locked!");
217 return B_BAD_VALUE;
220 for (uint32 i = 0; i < fVecCount; i++) {
221 status_t status = lock_memory_etc(team, (void*)(addr_t)fVecs[i].base,
222 fVecs[i].length, isWrite ? 0 : B_READ_DEVICE);
223 if (status != B_OK) {
224 _UnlockMemory(team, i, isWrite);
225 return status;
229 fMemoryLocked = true;
230 return B_OK;
234 void
235 IOBuffer::_UnlockMemory(team_id team, size_t count, bool isWrite)
237 for (uint32 i = 0; i < count; i++) {
238 unlock_memory_etc(team, (void*)(addr_t)fVecs[i].base, fVecs[i].length,
239 isWrite ? 0 : B_READ_DEVICE);
244 void
245 IOBuffer::UnlockMemory(team_id team, bool isWrite)
247 if (!fMemoryLocked) {
248 panic("memory not locked");
249 return;
252 _UnlockMemory(team, fVecCount, isWrite);
253 fMemoryLocked = false;
257 void
258 IOBuffer::Dump() const
260 kprintf("IOBuffer at %p\n", this);
262 kprintf(" origin: %s\n", fUser ? "user" : "kernel");
263 kprintf(" kind: %s\n", fPhysical ? "physical" : "virtual");
264 kprintf(" length: %" B_PRIuGENADDR "\n", fLength);
265 kprintf(" capacity: %" B_PRIuSIZE "\n", fCapacity);
266 kprintf(" vecs: %" B_PRIuSIZE "\n", fVecCount);
268 for (uint32 i = 0; i < fVecCount; i++) {
269 kprintf(" [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
270 i, fVecs[i].base, fVecs[i].length);
275 // #pragma mark -
278 bool
279 IOOperation::Finish()
281 TRACE("IOOperation::Finish()\n");
282 if (fStatus == B_OK) {
283 if (fParent->IsWrite()) {
284 TRACE(" is write\n");
285 if (fPhase == PHASE_READ_BEGIN) {
286 TRACE(" phase read begin\n");
287 // repair phase adjusted vec
288 fDMABuffer->VecAt(fSavedVecIndex).length = fSavedVecLength;
290 // partial write: copy partial begin to bounce buffer
291 bool skipReadEndPhase;
292 status_t error = _CopyPartialBegin(true, skipReadEndPhase);
293 if (error == B_OK) {
294 // We're done with the first phase only (read in begin).
295 // Get ready for next phase...
296 fPhase = HasPartialEnd() && !skipReadEndPhase
297 ? PHASE_READ_END : PHASE_DO_ALL;
298 _PrepareVecs();
299 ResetStatus();
300 // TODO: Is there a race condition, if the request is
301 // aborted at the same time?
302 return false;
305 SetStatus(error);
306 } else if (fPhase == PHASE_READ_END) {
307 TRACE(" phase read end\n");
308 // repair phase adjusted vec
309 generic_io_vec& vec = fDMABuffer->VecAt(fSavedVecIndex);
310 vec.base += vec.length - fSavedVecLength;
311 vec.length = fSavedVecLength;
313 // partial write: copy partial end to bounce buffer
314 status_t error = _CopyPartialEnd(true);
315 if (error == B_OK) {
316 // We're done with the second phase only (read in end).
317 // Get ready for next phase...
318 fPhase = PHASE_DO_ALL;
319 ResetStatus();
320 // TODO: Is there a race condition, if the request is
321 // aborted at the same time?
322 return false;
325 SetStatus(error);
330 if (fParent->IsRead() && UsesBounceBuffer()) {
331 TRACE(" read with bounce buffer\n");
332 // copy the bounce buffer segments to the final location
333 uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
334 phys_addr_t bounceBufferStart
335 = fDMABuffer->PhysicalBounceBufferAddress();
336 phys_addr_t bounceBufferEnd = bounceBufferStart
337 + fDMABuffer->BounceBufferSize();
339 const generic_io_vec* vecs = fDMABuffer->Vecs();
340 uint32 vecCount = fDMABuffer->VecCount();
342 status_t error = B_OK;
344 // We iterate through the vecs we have read, moving offset (the device
345 // offset) as we go. If [offset, offset + vec.length) intersects with
346 // [startOffset, endOffset) we copy to the final location.
347 off_t offset = fOffset;
348 const off_t startOffset = fOriginalOffset;
349 const off_t endOffset = fOriginalOffset + fOriginalLength;
351 for (uint32 i = 0; error == B_OK && i < vecCount; i++) {
352 const generic_io_vec& vec = vecs[i];
353 generic_addr_t base = vec.base;
354 generic_size_t length = vec.length;
356 if (offset < startOffset) {
357 // If the complete vector is before the start offset, skip it.
358 if (offset + (off_t)length <= startOffset) {
359 offset += length;
360 continue;
363 // The vector starts before the start offset, but intersects
364 // with it. Skip the part we aren't interested in.
365 generic_size_t diff = startOffset - offset;
366 offset += diff;
367 base += diff;
368 length -= diff;
371 if (offset + (off_t)length > endOffset) {
372 // If we're already beyond the end offset, we're done.
373 if (offset >= endOffset)
374 break;
376 // The vector extends beyond the end offset -- cut it.
377 length = endOffset - offset;
380 if (base >= bounceBufferStart && base < bounceBufferEnd) {
381 error = fParent->CopyData(
382 bounceBuffer + (base - bounceBufferStart), offset, length);
385 offset += length;
388 if (error != B_OK)
389 SetStatus(error);
392 return true;
396 /*! Note: SetPartial() must be called first!
398 status_t
399 IOOperation::Prepare(IORequest* request)
401 if (fParent != NULL)
402 fParent->RemoveOperation(this);
404 fParent = request;
406 fTransferredBytes = 0;
408 // set initial phase
409 fPhase = PHASE_DO_ALL;
410 if (fParent->IsWrite()) {
411 // Copy data to bounce buffer segments, save the partial begin/end vec,
412 // which will be copied after their respective read phase.
413 if (UsesBounceBuffer()) {
414 TRACE(" write with bounce buffer\n");
415 uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
416 phys_addr_t bounceBufferStart
417 = fDMABuffer->PhysicalBounceBufferAddress();
418 phys_addr_t bounceBufferEnd = bounceBufferStart
419 + fDMABuffer->BounceBufferSize();
421 const generic_io_vec* vecs = fDMABuffer->Vecs();
422 uint32 vecCount = fDMABuffer->VecCount();
423 generic_size_t vecOffset = 0;
424 uint32 i = 0;
426 off_t offset = fOffset;
427 off_t endOffset = fOffset + fLength;
429 if (HasPartialBegin()) {
430 // skip first block
431 generic_size_t toSkip = fBlockSize;
432 while (toSkip > 0) {
433 if (vecs[i].length <= toSkip) {
434 toSkip -= vecs[i].length;
435 i++;
436 } else {
437 vecOffset = toSkip;
438 break;
442 offset += fBlockSize;
445 if (HasPartialEnd()) {
446 // skip last block
447 generic_size_t toSkip = fBlockSize;
448 while (toSkip > 0) {
449 if (vecs[vecCount - 1].length <= toSkip) {
450 toSkip -= vecs[vecCount - 1].length;
451 vecCount--;
452 } else
453 break;
456 endOffset -= fBlockSize;
459 for (; i < vecCount; i++) {
460 const generic_io_vec& vec = vecs[i];
461 generic_addr_t base = vec.base + vecOffset;
462 generic_size_t length = vec.length - vecOffset;
463 vecOffset = 0;
465 if (base >= bounceBufferStart && base < bounceBufferEnd) {
466 if (offset + (off_t)length > endOffset)
467 length = endOffset - offset;
468 status_t error = fParent->CopyData(offset,
469 bounceBuffer + (base - bounceBufferStart), length);
470 if (error != B_OK)
471 return error;
474 offset += length;
478 if (HasPartialBegin())
479 fPhase = PHASE_READ_BEGIN;
480 else if (HasPartialEnd())
481 fPhase = PHASE_READ_END;
483 _PrepareVecs();
486 ResetStatus();
488 if (fParent != NULL)
489 fParent->AddOperation(this);
491 return B_OK;
495 void
496 IOOperation::SetOriginalRange(off_t offset, generic_size_t length)
498 fOriginalOffset = fOffset = offset;
499 fOriginalLength = fLength = length;
503 void
504 IOOperation::SetRange(off_t offset, generic_size_t length)
506 fOffset = offset;
507 fLength = length;
511 off_t
512 IOOperation::Offset() const
514 return fPhase == PHASE_READ_END ? fOffset + fLength - fBlockSize : fOffset;
518 generic_size_t
519 IOOperation::Length() const
521 return fPhase == PHASE_DO_ALL ? fLength : fBlockSize;
525 generic_io_vec*
526 IOOperation::Vecs() const
528 switch (fPhase) {
529 case PHASE_READ_END:
530 return fDMABuffer->Vecs() + fSavedVecIndex;
531 case PHASE_READ_BEGIN:
532 case PHASE_DO_ALL:
533 default:
534 return fDMABuffer->Vecs();
539 uint32
540 IOOperation::VecCount() const
542 switch (fPhase) {
543 case PHASE_READ_BEGIN:
544 return fSavedVecIndex + 1;
545 case PHASE_READ_END:
546 return fDMABuffer->VecCount() - fSavedVecIndex;
547 case PHASE_DO_ALL:
548 default:
549 return fDMABuffer->VecCount();
554 void
555 IOOperation::SetPartial(bool partialBegin, bool partialEnd)
557 TRACE("partial begin %d, end %d\n", partialBegin, partialEnd);
558 fPartialBegin = partialBegin;
559 fPartialEnd = partialEnd;
563 bool
564 IOOperation::IsWrite() const
566 return fParent->IsWrite() && fPhase == PHASE_DO_ALL;
570 bool
571 IOOperation::IsRead() const
573 return fParent->IsRead();
577 void
578 IOOperation::_PrepareVecs()
580 // we need to prepare the vecs for consumption by the drivers
581 if (fPhase == PHASE_READ_BEGIN) {
582 generic_io_vec* vecs = fDMABuffer->Vecs();
583 uint32 vecCount = fDMABuffer->VecCount();
584 generic_size_t vecLength = fBlockSize;
585 for (uint32 i = 0; i < vecCount; i++) {
586 generic_io_vec& vec = vecs[i];
587 if (vec.length >= vecLength) {
588 fSavedVecIndex = i;
589 fSavedVecLength = vec.length;
590 vec.length = vecLength;
591 break;
593 vecLength -= vec.length;
595 } else if (fPhase == PHASE_READ_END) {
596 generic_io_vec* vecs = fDMABuffer->Vecs();
597 uint32 vecCount = fDMABuffer->VecCount();
598 generic_size_t vecLength = fBlockSize;
599 for (int32 i = vecCount - 1; i >= 0; i--) {
600 generic_io_vec& vec = vecs[i];
601 if (vec.length >= vecLength) {
602 fSavedVecIndex = i;
603 fSavedVecLength = vec.length;
604 vec.base += vec.length - vecLength;
605 vec.length = vecLength;
606 break;
608 vecLength -= vec.length;
614 status_t
615 IOOperation::_CopyPartialBegin(bool isWrite, bool& singleBlockOnly)
617 generic_size_t relativeOffset = OriginalOffset() - fOffset;
618 generic_size_t length = fBlockSize - relativeOffset;
620 singleBlockOnly = length >= OriginalLength();
621 if (singleBlockOnly)
622 length = OriginalLength();
624 TRACE("_CopyPartialBegin(%s, single only %d)\n",
625 isWrite ? "write" : "read", singleBlockOnly);
627 if (isWrite) {
628 return fParent->CopyData(OriginalOffset(),
629 (uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset, length);
630 } else {
631 return fParent->CopyData(
632 (uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset,
633 OriginalOffset(), length);
638 status_t
639 IOOperation::_CopyPartialEnd(bool isWrite)
641 TRACE("_CopyPartialEnd(%s)\n", isWrite ? "write" : "read");
643 const generic_io_vec& lastVec
644 = fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
645 off_t lastVecPos = fOffset + fLength - fBlockSize;
646 uint8* base = (uint8*)fDMABuffer->BounceBufferAddress()
647 + (lastVec.base + lastVec.length - fBlockSize
648 - fDMABuffer->PhysicalBounceBufferAddress());
649 // NOTE: this won't work if we don't use the bounce buffer contiguously
650 // (because of boundary alignments).
651 generic_size_t length = OriginalOffset() + OriginalLength() - lastVecPos;
653 if (isWrite)
654 return fParent->CopyData(lastVecPos, base, length);
656 return fParent->CopyData(base, lastVecPos, length);
660 void
661 IOOperation::Dump() const
663 kprintf("io_operation at %p\n", this);
665 kprintf(" parent: %p\n", fParent);
666 kprintf(" status: %s\n", strerror(fStatus));
667 kprintf(" dma buffer: %p\n", fDMABuffer);
668 kprintf(" offset: %-8" B_PRIdOFF " (original: %" B_PRIdOFF ")\n",
669 fOffset, fOriginalOffset);
670 kprintf(" length: %-8" B_PRIuGENADDR " (original: %"
671 B_PRIuGENADDR ")\n", fLength, fOriginalLength);
672 kprintf(" transferred: %" B_PRIuGENADDR "\n", fTransferredBytes);
673 kprintf(" block size: %" B_PRIuGENADDR "\n", fBlockSize);
674 kprintf(" saved vec index: %u\n", fSavedVecIndex);
675 kprintf(" saved vec length: %u\n", fSavedVecLength);
676 kprintf(" r/w: %s\n", IsWrite() ? "write" : "read");
677 kprintf(" phase: %s\n", fPhase == PHASE_READ_BEGIN
678 ? "read begin" : fPhase == PHASE_READ_END ? "read end"
679 : fPhase == PHASE_DO_ALL ? "do all" : "unknown");
680 kprintf(" partial begin: %s\n", fPartialBegin ? "yes" : "no");
681 kprintf(" partial end: %s\n", fPartialEnd ? "yes" : "no");
682 kprintf(" bounce buffer: %s\n", fUsesBounceBuffer ? "yes" : "no");
684 set_debug_variable("_parent", (addr_t)fParent);
685 set_debug_variable("_buffer", (addr_t)fDMABuffer);
689 // #pragma mark -
692 IORequest::IORequest()
694 fIsNotified(false),
695 fFinishedCallback(NULL),
696 fFinishedCookie(NULL),
697 fIterationCallback(NULL),
698 fIterationCookie(NULL)
700 mutex_init(&fLock, "I/O request lock");
701 fFinishedCondition.Init(this, "I/O request finished");
705 IORequest::~IORequest()
707 mutex_lock(&fLock);
708 DeleteSubRequests();
709 fBuffer->Delete();
710 mutex_destroy(&fLock);
714 /* static */ IORequest*
715 IORequest::Create(bool vip)
717 return vip
718 ? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
719 : new(std::nothrow) IORequest;
723 status_t
724 IORequest::Init(off_t offset, generic_addr_t buffer, generic_size_t length,
725 bool write, uint32 flags)
727 ASSERT(offset >= 0);
729 generic_io_vec vec;
730 vec.base = buffer;
731 vec.length = length;
732 return Init(offset, &vec, 1, length, write, flags);
736 status_t
737 IORequest::Init(off_t offset, generic_size_t firstVecOffset,
738 const generic_io_vec* vecs, size_t count, generic_size_t length, bool write,
739 uint32 flags)
741 ASSERT(offset >= 0);
743 fBuffer = IOBuffer::Create(count, (flags & B_VIP_IO_REQUEST) != 0);
744 if (fBuffer == NULL)
745 return B_NO_MEMORY;
747 fBuffer->SetVecs(firstVecOffset, vecs, count, length, flags);
749 fOwner = NULL;
750 fOffset = offset;
751 fLength = length;
752 fRelativeParentOffset = 0;
753 fTransferSize = 0;
754 fFlags = flags;
755 Thread* thread = thread_get_current_thread();
756 fTeam = thread->team->id;
757 fThread = thread->id;
758 fIsWrite = write;
759 fPartialTransfer = false;
760 fSuppressChildNotifications = false;
762 // these are for iteration
763 fVecIndex = 0;
764 fVecOffset = 0;
765 fRemainingBytes = length;
767 fPendingChildren = 0;
769 fStatus = 1;
771 return B_OK;
775 status_t
776 IORequest::CreateSubRequest(off_t parentOffset, off_t offset,
777 generic_size_t length, IORequest*& _subRequest)
779 ASSERT(parentOffset >= fOffset && length <= fLength
780 && parentOffset - fOffset <= (off_t)(fLength - length));
782 // find start vec
783 generic_size_t vecOffset = parentOffset - fOffset;
784 generic_io_vec* vecs = fBuffer->Vecs();
785 int32 vecCount = fBuffer->VecCount();
786 int32 startVec = 0;
787 for (; startVec < vecCount; startVec++) {
788 const generic_io_vec& vec = vecs[startVec];
789 if (vecOffset < vec.length)
790 break;
792 vecOffset -= vec.length;
795 // count vecs
796 generic_size_t currentVecOffset = vecOffset;
797 int32 endVec = startVec;
798 generic_size_t remainingLength = length;
799 for (; endVec < vecCount; endVec++) {
800 const generic_io_vec& vec = vecs[endVec];
801 if (vec.length - currentVecOffset >= remainingLength)
802 break;
804 remainingLength -= vec.length - currentVecOffset;
805 currentVecOffset = 0;
808 // create subrequest
809 IORequest* subRequest = Create((fFlags & B_VIP_IO_REQUEST) != 0);
810 if (subRequest == NULL)
811 return B_NO_MEMORY;
813 status_t error = subRequest->Init(offset, vecOffset, vecs + startVec,
814 endVec - startVec + 1, length, fIsWrite, fFlags & ~B_DELETE_IO_REQUEST);
815 if (error != B_OK) {
816 delete subRequest;
817 return error;
820 subRequest->fRelativeParentOffset = parentOffset - fOffset;
821 subRequest->fTeam = fTeam;
822 subRequest->fThread = fThread;
824 _subRequest = subRequest;
825 subRequest->SetParent(this);
827 MutexLocker _(fLock);
829 fChildren.Add(subRequest);
830 fPendingChildren++;
831 TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
832 subRequest);
834 return B_OK;
838 void
839 IORequest::DeleteSubRequests()
841 while (IORequestChunk* chunk = fChildren.RemoveHead())
842 delete chunk;
843 fPendingChildren = 0;
847 void
848 IORequest::SetFinishedCallback(io_request_finished_callback callback,
849 void* cookie)
851 fFinishedCallback = callback;
852 fFinishedCookie = cookie;
856 void
857 IORequest::SetIterationCallback(io_request_iterate_callback callback,
858 void* cookie)
860 fIterationCallback = callback;
861 fIterationCookie = cookie;
865 io_request_finished_callback
866 IORequest::FinishedCallback(void** _cookie) const
868 if (_cookie != NULL)
869 *_cookie = fFinishedCookie;
870 return fFinishedCallback;
874 status_t
875 IORequest::Wait(uint32 flags, bigtime_t timeout)
877 MutexLocker locker(fLock);
879 if (IsFinished() && fIsNotified)
880 return Status();
882 ConditionVariableEntry entry;
883 fFinishedCondition.Add(&entry);
885 locker.Unlock();
887 status_t error = entry.Wait(flags, timeout);
888 if (error != B_OK)
889 return error;
891 return Status();
895 void
896 IORequest::NotifyFinished()
898 TRACE("IORequest::NotifyFinished(): request: %p\n", this);
900 MutexLocker locker(fLock);
902 if (fStatus == B_OK && !fPartialTransfer && RemainingBytes() > 0) {
903 // The request is not really done yet. If it has an iteration callback,
904 // call it.
905 if (fIterationCallback != NULL) {
906 ResetStatus();
907 locker.Unlock();
908 bool partialTransfer = false;
909 status_t error = fIterationCallback(fIterationCookie, this,
910 &partialTransfer);
911 if (error == B_OK && !partialTransfer)
912 return;
914 // Iteration failed, which means we're responsible for notifying the
915 // requests finished.
916 locker.Lock();
917 fStatus = error;
918 fPartialTransfer = true;
922 ASSERT(!fIsNotified);
923 ASSERT(fPendingChildren == 0);
924 ASSERT(fChildren.IsEmpty()
925 || dynamic_cast<IOOperation*>(fChildren.Head()) == NULL);
927 // unlock the memory
928 if (fBuffer->IsMemoryLocked())
929 fBuffer->UnlockMemory(fTeam, fIsWrite);
931 // Cache the callbacks before we unblock waiters and unlock. Any of the
932 // following could delete this request, so we don't want to touch it
933 // once we have started telling others that it is done.
934 IORequest* parent = fParent;
935 io_request_finished_callback finishedCallback = fFinishedCallback;
936 void* finishedCookie = fFinishedCookie;
937 status_t status = fStatus;
938 generic_size_t lastTransferredOffset
939 = fRelativeParentOffset + fTransferSize;
940 bool partialTransfer = status != B_OK || fPartialTransfer;
941 bool deleteRequest = (fFlags & B_DELETE_IO_REQUEST) != 0;
943 // unblock waiters
944 fIsNotified = true;
945 fFinishedCondition.NotifyAll();
947 locker.Unlock();
949 // notify callback
950 if (finishedCallback != NULL) {
951 finishedCallback(finishedCookie, this, status, partialTransfer,
952 lastTransferredOffset);
955 // notify parent
956 if (parent != NULL) {
957 parent->SubRequestFinished(this, status, partialTransfer,
958 lastTransferredOffset);
961 if (deleteRequest)
962 delete this;
966 /*! Returns whether this request or any of it's ancestors has a finished or
967 notification callback. Used to decide whether NotifyFinished() can be called
968 synchronously.
970 bool
971 IORequest::HasCallbacks() const
973 if (fFinishedCallback != NULL || fIterationCallback != NULL)
974 return true;
976 return fParent != NULL && fParent->HasCallbacks();
980 void
981 IORequest::SetStatusAndNotify(status_t status)
983 MutexLocker locker(fLock);
985 if (fStatus != 1)
986 return;
988 fStatus = status;
990 locker.Unlock();
992 NotifyFinished();
996 void
997 IORequest::OperationFinished(IOOperation* operation, status_t status,
998 bool partialTransfer, generic_size_t transferEndOffset)
1000 TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32 "): request: %p\n",
1001 operation, status, this);
1003 MutexLocker locker(fLock);
1005 fChildren.Remove(operation);
1006 operation->SetParent(NULL);
1008 if (status != B_OK || partialTransfer) {
1009 if (fTransferSize > transferEndOffset)
1010 fTransferSize = transferEndOffset;
1011 fPartialTransfer = true;
1014 if (status != B_OK && fStatus == 1)
1015 fStatus = status;
1017 if (--fPendingChildren > 0)
1018 return;
1020 // last child finished
1022 // set status, if not done yet
1023 if (fStatus == 1)
1024 fStatus = B_OK;
1028 void
1029 IORequest::SubRequestFinished(IORequest* request, status_t status,
1030 bool partialTransfer, generic_size_t transferEndOffset)
1032 TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32 ", %d, %"
1033 B_PRIuGENADDR "): request: %p\n", request, status, partialTransfer, transferEndOffset, this);
1035 MutexLocker locker(fLock);
1037 if (status != B_OK || partialTransfer) {
1038 if (fTransferSize > transferEndOffset)
1039 fTransferSize = transferEndOffset;
1040 fPartialTransfer = true;
1043 if (status != B_OK && fStatus == 1)
1044 fStatus = status;
1046 if (--fPendingChildren > 0 || fSuppressChildNotifications)
1047 return;
1049 // last child finished
1051 // set status, if not done yet
1052 if (fStatus == 1)
1053 fStatus = B_OK;
1055 locker.Unlock();
1057 NotifyFinished();
1061 void
1062 IORequest::SetUnfinished()
1064 MutexLocker _(fLock);
1065 ResetStatus();
1069 void
1070 IORequest::SetTransferredBytes(bool partialTransfer,
1071 generic_size_t transferredBytes)
1073 TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR ")\n", this,
1074 partialTransfer, transferredBytes);
1076 MutexLocker _(fLock);
1078 fPartialTransfer = partialTransfer;
1079 fTransferSize = transferredBytes;
1083 void
1084 IORequest::SetSuppressChildNotifications(bool suppress)
1086 fSuppressChildNotifications = suppress;
1090 void
1091 IORequest::Advance(generic_size_t bySize)
1093 TRACE("IORequest::Advance(%" B_PRIuGENADDR "): remaining: %" B_PRIuGENADDR
1094 " -> %" B_PRIuGENADDR "\n", bySize, fRemainingBytes,
1095 fRemainingBytes - bySize);
1096 fRemainingBytes -= bySize;
1097 fTransferSize += bySize;
1099 generic_io_vec* vecs = fBuffer->Vecs();
1100 uint32 vecCount = fBuffer->VecCount();
1101 while (fVecIndex < vecCount
1102 && vecs[fVecIndex].length - fVecOffset <= bySize) {
1103 bySize -= vecs[fVecIndex].length - fVecOffset;
1104 fVecOffset = 0;
1105 fVecIndex++;
1108 fVecOffset += bySize;
1112 IORequest*
1113 IORequest::FirstSubRequest()
1115 return dynamic_cast<IORequest*>(fChildren.Head());
1119 IORequest*
1120 IORequest::NextSubRequest(IORequest* previous)
1122 if (previous == NULL)
1123 return NULL;
1124 return dynamic_cast<IORequest*>(fChildren.GetNext(previous));
1128 void
1129 IORequest::AddOperation(IOOperation* operation)
1131 MutexLocker locker(fLock);
1132 TRACE("IORequest::AddOperation(%p): request: %p\n", operation, this);
1133 fChildren.Add(operation);
1134 fPendingChildren++;
1138 void
1139 IORequest::RemoveOperation(IOOperation* operation)
1141 MutexLocker locker(fLock);
1142 fChildren.Remove(operation);
1143 operation->SetParent(NULL);
1147 status_t
1148 IORequest::CopyData(off_t offset, void* buffer, size_t size)
1150 return _CopyData(buffer, offset, size, true);
1154 status_t
1155 IORequest::CopyData(const void* buffer, off_t offset, size_t size)
1157 return _CopyData((void*)buffer, offset, size, false);
1161 status_t
1162 IORequest::ClearData(off_t offset, generic_size_t size)
1164 if (size == 0)
1165 return B_OK;
1167 if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1168 panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1169 ", %" B_PRIuGENADDR ")", offset, size);
1170 return B_BAD_VALUE;
1173 // If we can, we directly copy from/to the virtual buffer. The memory is
1174 // locked in this case.
1175 status_t (*clearFunction)(generic_addr_t, generic_size_t, team_id);
1176 if (fBuffer->IsPhysical()) {
1177 clearFunction = &IORequest::_ClearDataPhysical;
1178 } else {
1179 clearFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1180 ? &IORequest::_ClearDataUser : &IORequest::_ClearDataSimple;
1183 // skip bytes if requested
1184 generic_io_vec* vecs = fBuffer->Vecs();
1185 generic_size_t skipBytes = offset - fOffset;
1186 generic_size_t vecOffset = 0;
1187 while (skipBytes > 0) {
1188 if (vecs[0].length > skipBytes) {
1189 vecOffset = skipBytes;
1190 break;
1193 skipBytes -= vecs[0].length;
1194 vecs++;
1197 // clear vector-wise
1198 while (size > 0) {
1199 generic_size_t toClear = min_c(size, vecs[0].length - vecOffset);
1200 status_t error = clearFunction(vecs[0].base + vecOffset, toClear,
1201 fTeam);
1202 if (error != B_OK)
1203 return error;
1205 size -= toClear;
1206 vecs++;
1207 vecOffset = 0;
1210 return B_OK;
1215 status_t
1216 IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
1218 if (size == 0)
1219 return B_OK;
1221 uint8* buffer = (uint8*)_buffer;
1223 if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1224 panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF ", %lu)",
1225 offset, size);
1226 return B_BAD_VALUE;
1229 // If we can, we directly copy from/to the virtual buffer. The memory is
1230 // locked in this case.
1231 status_t (*copyFunction)(void*, generic_addr_t, size_t, team_id, bool);
1232 if (fBuffer->IsPhysical()) {
1233 copyFunction = &IORequest::_CopyPhysical;
1234 } else {
1235 copyFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1236 ? &IORequest::_CopyUser : &IORequest::_CopySimple;
1239 // skip bytes if requested
1240 generic_io_vec* vecs = fBuffer->Vecs();
1241 generic_size_t skipBytes = offset - fOffset;
1242 generic_size_t vecOffset = 0;
1243 while (skipBytes > 0) {
1244 if (vecs[0].length > skipBytes) {
1245 vecOffset = skipBytes;
1246 break;
1249 skipBytes -= vecs[0].length;
1250 vecs++;
1253 // copy vector-wise
1254 while (size > 0) {
1255 generic_size_t toCopy = min_c(size, vecs[0].length - vecOffset);
1256 status_t error = copyFunction(buffer, vecs[0].base + vecOffset, toCopy,
1257 fTeam, copyIn);
1258 if (error != B_OK)
1259 return error;
1261 buffer += toCopy;
1262 size -= toCopy;
1263 vecs++;
1264 vecOffset = 0;
1267 return B_OK;
1271 /* static */ status_t
1272 IORequest::_CopySimple(void* bounceBuffer, generic_addr_t external, size_t size,
1273 team_id team, bool copyIn)
1275 TRACE(" IORequest::_CopySimple(%p, %#" B_PRIxGENADDR ", %lu, %d)\n",
1276 bounceBuffer, external, size, copyIn);
1277 if (copyIn)
1278 memcpy(bounceBuffer, (void*)(addr_t)external, size);
1279 else
1280 memcpy((void*)(addr_t)external, bounceBuffer, size);
1281 return B_OK;
1285 /* static */ status_t
1286 IORequest::_CopyPhysical(void* bounceBuffer, generic_addr_t external,
1287 size_t size, team_id team, bool copyIn)
1289 if (copyIn)
1290 return vm_memcpy_from_physical(bounceBuffer, external, size, false);
1292 return vm_memcpy_to_physical(external, bounceBuffer, size, false);
1296 /* static */ status_t
1297 IORequest::_CopyUser(void* _bounceBuffer, generic_addr_t _external, size_t size,
1298 team_id team, bool copyIn)
1300 uint8* bounceBuffer = (uint8*)_bounceBuffer;
1301 uint8* external = (uint8*)(addr_t)_external;
1303 while (size > 0) {
1304 static const int32 kEntryCount = 8;
1305 physical_entry entries[kEntryCount];
1307 uint32 count = kEntryCount;
1308 status_t error = get_memory_map_etc(team, external, size, entries,
1309 &count);
1310 if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1311 panic("IORequest::_CopyUser(): Failed to get physical memory for "
1312 "user memory %p\n", external);
1313 return B_BAD_ADDRESS;
1316 for (uint32 i = 0; i < count; i++) {
1317 const physical_entry& entry = entries[i];
1318 error = _CopyPhysical(bounceBuffer, entry.address, entry.size, team,
1319 copyIn);
1320 if (error != B_OK)
1321 return error;
1323 size -= entry.size;
1324 bounceBuffer += entry.size;
1325 external += entry.size;
1329 return B_OK;
1333 /*static*/ status_t
1334 IORequest::_ClearDataSimple(generic_addr_t external, generic_size_t size,
1335 team_id team)
1337 memset((void*)(addr_t)external, 0, (size_t)size);
1338 return B_OK;
1342 /*static*/ status_t
1343 IORequest::_ClearDataPhysical(generic_addr_t external, generic_size_t size,
1344 team_id team)
1346 return vm_memset_physical((phys_addr_t)external, 0, (phys_size_t)size);
1350 /*static*/ status_t
1351 IORequest::_ClearDataUser(generic_addr_t _external, generic_size_t size,
1352 team_id team)
1354 uint8* external = (uint8*)(addr_t)_external;
1356 while (size > 0) {
1357 static const int32 kEntryCount = 8;
1358 physical_entry entries[kEntryCount];
1360 uint32 count = kEntryCount;
1361 status_t error = get_memory_map_etc(team, external, size, entries,
1362 &count);
1363 if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1364 panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1365 "for user memory %p\n", external);
1366 return B_BAD_ADDRESS;
1369 for (uint32 i = 0; i < count; i++) {
1370 const physical_entry& entry = entries[i];
1371 error = _ClearDataPhysical(entry.address, entry.size, team);
1372 if (error != B_OK)
1373 return error;
1375 size -= entry.size;
1376 external += entry.size;
1380 return B_OK;
1384 void
1385 IORequest::Dump() const
1387 kprintf("io_request at %p\n", this);
1389 kprintf(" owner: %p\n", fOwner);
1390 kprintf(" parent: %p\n", fParent);
1391 kprintf(" status: %s\n", strerror(fStatus));
1392 kprintf(" mutex: %p\n", &fLock);
1393 kprintf(" IOBuffer: %p\n", fBuffer);
1394 kprintf(" offset: %" B_PRIdOFF "\n", fOffset);
1395 kprintf(" length: %" B_PRIuGENADDR "\n", fLength);
1396 kprintf(" transfer size: %" B_PRIuGENADDR "\n", fTransferSize);
1397 kprintf(" relative offset: %" B_PRIuGENADDR "\n", fRelativeParentOffset);
1398 kprintf(" pending children: %" B_PRId32 "\n", fPendingChildren);
1399 kprintf(" flags: %#" B_PRIx32 "\n", fFlags);
1400 kprintf(" team: %" B_PRId32 "\n", fTeam);
1401 kprintf(" thread: %" B_PRId32 "\n", fThread);
1402 kprintf(" r/w: %s\n", fIsWrite ? "write" : "read");
1403 kprintf(" partial transfer: %s\n", fPartialTransfer ? "yes" : "no");
1404 kprintf(" finished cvar: %p\n", &fFinishedCondition);
1405 kprintf(" iteration:\n");
1406 kprintf(" vec index: %" B_PRIu32 "\n", fVecIndex);
1407 kprintf(" vec offset: %" B_PRIuGENADDR "\n", fVecOffset);
1408 kprintf(" remaining bytes: %" B_PRIuGENADDR "\n", fRemainingBytes);
1409 kprintf(" callbacks:\n");
1410 kprintf(" finished %p, cookie %p\n", fFinishedCallback, fFinishedCookie);
1411 kprintf(" iteration %p, cookie %p\n", fIterationCallback,
1412 fIterationCookie);
1413 kprintf(" children:\n");
1415 IORequestChunkList::ConstIterator iterator = fChildren.GetIterator();
1416 while (iterator.HasNext()) {
1417 kprintf(" %p\n", iterator.Next());
1420 set_debug_variable("_parent", (addr_t)fParent);
1421 set_debug_variable("_mutex", (addr_t)&fLock);
1422 set_debug_variable("_buffer", (addr_t)fBuffer);
1423 set_debug_variable("_cvar", (addr_t)&fFinishedCondition);