libroot/posix/stdio: Remove unused portions.
[haiku.git] / src / system / kernel / device_manager / IORequest.cpp
blobd80ba2a2b25915f0842ff350ea188ca372e16df8
1 /*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008-2017, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
8 #include "IORequest.h"
10 #include <string.h>
12 #include <arch/debug.h>
13 #include <debug.h>
14 #include <heap.h>
15 #include <kernel.h>
16 #include <team.h>
17 #include <thread.h>
18 #include <util/AutoLock.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
22 #include "dma_resources.h"
25 //#define TRACE_IO_REQUEST
26 #ifdef TRACE_IO_REQUEST
27 # define TRACE(x...) dprintf(x)
28 #else
29 # define TRACE(x...) ;
30 #endif
33 // partial I/O operation phases
34 enum {
35 PHASE_READ_BEGIN = 0,
36 PHASE_READ_END = 1,
37 PHASE_DO_ALL = 2
41 struct virtual_vec_cookie {
42 uint32 vec_index;
43 generic_size_t vec_offset;
44 area_id mapped_area;
45 void* physical_page_handle;
46 addr_t virtual_address;
48 virtual_vec_cookie()
50 vec_index(0),
51 vec_offset(0),
52 mapped_area(-1),
53 physical_page_handle(NULL),
54 virtual_address((addr_t)-1)
58 void PutPhysicalPageIfNeeded()
60 if (virtual_address != (addr_t)-1) {
61 vm_put_physical_page(virtual_address, physical_page_handle);
62 virtual_address = (addr_t)-1;
68 // #pragma mark -
71 IORequestChunk::IORequestChunk()
73 fParent(NULL),
74 fStatus(1)
79 IORequestChunk::~IORequestChunk()
84 // #pragma mark -
87 IOBuffer*
88 IOBuffer::Create(uint32 count, bool vip)
90 size_t size = sizeof(IOBuffer) + sizeof(generic_io_vec) * (count - 1);
91 IOBuffer* buffer
92 = (IOBuffer*)(malloc_etc(size, vip ? HEAP_PRIORITY_VIP : 0));
93 if (buffer == NULL)
94 return NULL;
96 buffer->fCapacity = count;
97 buffer->fVecCount = 0;
98 buffer->fUser = false;
99 buffer->fPhysical = false;
100 buffer->fVIP = vip;
101 buffer->fMemoryLocked = false;
103 return buffer;
107 void
108 IOBuffer::Delete()
110 free_etc(this, fVIP ? HEAP_PRIORITY_VIP : 0);
114 void
115 IOBuffer::SetVecs(generic_size_t firstVecOffset, const generic_io_vec* vecs,
116 uint32 count, generic_size_t length, uint32 flags)
118 memcpy(fVecs, vecs, sizeof(generic_io_vec) * count);
120 if (count > 0 && firstVecOffset > 0) {
121 fVecs[0].base += firstVecOffset;
122 fVecs[0].length -= firstVecOffset;
125 fVecCount = count;
126 fLength = length;
127 fPhysical = (flags & B_PHYSICAL_IO_REQUEST) != 0;
128 fUser = !fPhysical && IS_USER_ADDRESS(vecs[0].base);
132 status_t
133 IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
135 virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
136 if (cookie == NULL) {
137 cookie = new(malloc_flags(fVIP ? HEAP_PRIORITY_VIP : 0))
138 virtual_vec_cookie;
139 if (cookie == NULL)
140 return B_NO_MEMORY;
142 _cookie = cookie;
145 // recycle a potential previously mapped page
146 cookie->PutPhysicalPageIfNeeded();
148 if (cookie->vec_index >= fVecCount)
149 return B_BAD_INDEX;
151 if (!fPhysical) {
152 vector.iov_base = (void*)(addr_t)fVecs[cookie->vec_index].base;
153 vector.iov_len = fVecs[cookie->vec_index++].length;
154 return B_OK;
157 if (cookie->vec_index == 0
158 && (fVecCount > 1 || fVecs[0].length > B_PAGE_SIZE)) {
159 void* mappedAddress;
160 addr_t mappedSize;
162 // TODO: This is a potential violation of the VIP requirement, since
163 // vm_map_physical_memory_vecs() allocates memory without special flags!
164 cookie->mapped_area = vm_map_physical_memory_vecs(
165 VMAddressSpace::KernelID(), "io buffer mapped physical vecs",
166 &mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
167 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);
169 if (cookie->mapped_area >= 0) {
170 vector.iov_base = mappedAddress;
171 vector.iov_len = mappedSize;
172 return B_OK;
173 } else
174 ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
177 // fallback to page wise mapping
178 generic_io_vec& currentVec = fVecs[cookie->vec_index];
179 generic_addr_t address = currentVec.base + cookie->vec_offset;
180 size_t pageOffset = address % B_PAGE_SIZE;
182 // TODO: This is a potential violation of the VIP requirement, since
183 // vm_get_physical_page() may allocate memory without special flags!
184 status_t result = vm_get_physical_page(address - pageOffset,
185 &cookie->virtual_address, &cookie->physical_page_handle);
186 if (result != B_OK)
187 return result;
189 generic_size_t length = min_c(currentVec.length - cookie->vec_offset,
190 B_PAGE_SIZE - pageOffset);
192 vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
193 vector.iov_len = length;
195 cookie->vec_offset += length;
196 if (cookie->vec_offset >= currentVec.length) {
197 cookie->vec_index++;
198 cookie->vec_offset = 0;
201 return B_OK;
205 void
206 IOBuffer::FreeVirtualVecCookie(void* _cookie)
208 virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
209 if (cookie->mapped_area >= 0)
210 delete_area(cookie->mapped_area);
212 cookie->PutPhysicalPageIfNeeded();
214 free_etc(cookie, fVIP ? HEAP_PRIORITY_VIP : 0);
218 status_t
219 IOBuffer::LockMemory(team_id team, bool isWrite)
221 if (fMemoryLocked) {
222 panic("memory already locked!");
223 return B_BAD_VALUE;
226 for (uint32 i = 0; i < fVecCount; i++) {
227 status_t status = lock_memory_etc(team, (void*)(addr_t)fVecs[i].base,
228 fVecs[i].length, isWrite ? 0 : B_READ_DEVICE);
229 if (status != B_OK) {
230 _UnlockMemory(team, i, isWrite);
231 return status;
235 fMemoryLocked = true;
236 return B_OK;
240 void
241 IOBuffer::_UnlockMemory(team_id team, size_t count, bool isWrite)
243 for (uint32 i = 0; i < count; i++) {
244 unlock_memory_etc(team, (void*)(addr_t)fVecs[i].base, fVecs[i].length,
245 isWrite ? 0 : B_READ_DEVICE);
250 void
251 IOBuffer::UnlockMemory(team_id team, bool isWrite)
253 if (!fMemoryLocked) {
254 panic("memory not locked");
255 return;
258 _UnlockMemory(team, fVecCount, isWrite);
259 fMemoryLocked = false;
263 void
264 IOBuffer::Dump() const
266 kprintf("IOBuffer at %p\n", this);
268 kprintf(" origin: %s\n", fUser ? "user" : "kernel");
269 kprintf(" kind: %s\n", fPhysical ? "physical" : "virtual");
270 kprintf(" length: %" B_PRIuGENADDR "\n", fLength);
271 kprintf(" capacity: %" B_PRIuSIZE "\n", fCapacity);
272 kprintf(" vecs: %" B_PRIuSIZE "\n", fVecCount);
274 for (uint32 i = 0; i < fVecCount; i++) {
275 kprintf(" [%" B_PRIu32 "] %#" B_PRIxGENADDR ", %" B_PRIuGENADDR "\n",
276 i, fVecs[i].base, fVecs[i].length);
281 // #pragma mark -
284 bool
285 IOOperation::Finish()
287 TRACE("IOOperation::Finish()\n");
288 if (fStatus == B_OK) {
289 if (fParent->IsWrite()) {
290 TRACE(" is write\n");
291 if (fPhase == PHASE_READ_BEGIN) {
292 TRACE(" phase read begin\n");
293 // repair phase adjusted vec
294 fDMABuffer->VecAt(fSavedVecIndex).length = fSavedVecLength;
296 // partial write: copy partial begin to bounce buffer
297 bool skipReadEndPhase;
298 status_t error = _CopyPartialBegin(true, skipReadEndPhase);
299 if (error == B_OK) {
300 // We're done with the first phase only (read in begin).
301 // Get ready for next phase...
302 fPhase = HasPartialEnd() && !skipReadEndPhase
303 ? PHASE_READ_END : PHASE_DO_ALL;
304 _PrepareVecs();
305 ResetStatus();
306 // TODO: Is there a race condition, if the request is
307 // aborted at the same time?
308 return false;
311 SetStatus(error);
312 } else if (fPhase == PHASE_READ_END) {
313 TRACE(" phase read end\n");
314 // repair phase adjusted vec
315 generic_io_vec& vec = fDMABuffer->VecAt(fSavedVecIndex);
316 vec.base += vec.length - fSavedVecLength;
317 vec.length = fSavedVecLength;
319 // partial write: copy partial end to bounce buffer
320 status_t error = _CopyPartialEnd(true);
321 if (error == B_OK) {
322 // We're done with the second phase only (read in end).
323 // Get ready for next phase...
324 fPhase = PHASE_DO_ALL;
325 ResetStatus();
326 // TODO: Is there a race condition, if the request is
327 // aborted at the same time?
328 return false;
331 SetStatus(error);
336 if (fParent->IsRead() && UsesBounceBuffer()) {
337 TRACE(" read with bounce buffer\n");
338 // copy the bounce buffer segments to the final location
339 uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
340 phys_addr_t bounceBufferStart
341 = fDMABuffer->PhysicalBounceBufferAddress();
342 phys_addr_t bounceBufferEnd = bounceBufferStart
343 + fDMABuffer->BounceBufferSize();
345 const generic_io_vec* vecs = fDMABuffer->Vecs();
346 uint32 vecCount = fDMABuffer->VecCount();
348 status_t error = B_OK;
350 // We iterate through the vecs we have read, moving offset (the device
351 // offset) as we go. If [offset, offset + vec.length) intersects with
352 // [startOffset, endOffset) we copy to the final location.
353 off_t offset = fOffset;
354 const off_t startOffset = fOriginalOffset;
355 const off_t endOffset = fOriginalOffset + fOriginalLength;
357 for (uint32 i = 0; error == B_OK && i < vecCount; i++) {
358 const generic_io_vec& vec = vecs[i];
359 generic_addr_t base = vec.base;
360 generic_size_t length = vec.length;
362 if (offset < startOffset) {
363 // If the complete vector is before the start offset, skip it.
364 if (offset + (off_t)length <= startOffset) {
365 offset += length;
366 continue;
369 // The vector starts before the start offset, but intersects
370 // with it. Skip the part we aren't interested in.
371 generic_size_t diff = startOffset - offset;
372 offset += diff;
373 base += diff;
374 length -= diff;
377 if (offset + (off_t)length > endOffset) {
378 // If we're already beyond the end offset, we're done.
379 if (offset >= endOffset)
380 break;
382 // The vector extends beyond the end offset -- cut it.
383 length = endOffset - offset;
386 if (base >= bounceBufferStart && base < bounceBufferEnd) {
387 error = fParent->CopyData(
388 bounceBuffer + (base - bounceBufferStart), offset, length);
391 offset += length;
394 if (error != B_OK)
395 SetStatus(error);
398 return true;
402 /*! Note: SetPartial() must be called first!
404 status_t
405 IOOperation::Prepare(IORequest* request)
407 if (fParent != NULL)
408 fParent->RemoveOperation(this);
410 fParent = request;
412 fTransferredBytes = 0;
414 // set initial phase
415 fPhase = PHASE_DO_ALL;
416 if (fParent->IsWrite()) {
417 // Copy data to bounce buffer segments, save the partial begin/end vec,
418 // which will be copied after their respective read phase.
419 if (UsesBounceBuffer()) {
420 TRACE(" write with bounce buffer\n");
421 uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBufferAddress();
422 phys_addr_t bounceBufferStart
423 = fDMABuffer->PhysicalBounceBufferAddress();
424 phys_addr_t bounceBufferEnd = bounceBufferStart
425 + fDMABuffer->BounceBufferSize();
427 const generic_io_vec* vecs = fDMABuffer->Vecs();
428 uint32 vecCount = fDMABuffer->VecCount();
429 generic_size_t vecOffset = 0;
430 uint32 i = 0;
432 off_t offset = fOffset;
433 off_t endOffset = fOffset + fLength;
435 if (HasPartialBegin()) {
436 // skip first block
437 generic_size_t toSkip = fBlockSize;
438 while (toSkip > 0) {
439 if (vecs[i].length <= toSkip) {
440 toSkip -= vecs[i].length;
441 i++;
442 } else {
443 vecOffset = toSkip;
444 break;
448 offset += fBlockSize;
451 if (HasPartialEnd()) {
452 // skip last block
453 generic_size_t toSkip = fBlockSize;
454 while (toSkip > 0) {
455 if (vecs[vecCount - 1].length <= toSkip) {
456 toSkip -= vecs[vecCount - 1].length;
457 vecCount--;
458 } else
459 break;
462 endOffset -= fBlockSize;
465 for (; i < vecCount; i++) {
466 const generic_io_vec& vec = vecs[i];
467 generic_addr_t base = vec.base + vecOffset;
468 generic_size_t length = vec.length - vecOffset;
469 vecOffset = 0;
471 if (base >= bounceBufferStart && base < bounceBufferEnd) {
472 if (offset + (off_t)length > endOffset)
473 length = endOffset - offset;
474 status_t error = fParent->CopyData(offset,
475 bounceBuffer + (base - bounceBufferStart), length);
476 if (error != B_OK)
477 return error;
480 offset += length;
484 if (HasPartialBegin())
485 fPhase = PHASE_READ_BEGIN;
486 else if (HasPartialEnd())
487 fPhase = PHASE_READ_END;
489 _PrepareVecs();
492 ResetStatus();
494 if (fParent != NULL)
495 fParent->AddOperation(this);
497 return B_OK;
501 void
502 IOOperation::SetOriginalRange(off_t offset, generic_size_t length)
504 fOriginalOffset = fOffset = offset;
505 fOriginalLength = fLength = length;
509 void
510 IOOperation::SetRange(off_t offset, generic_size_t length)
512 fOffset = offset;
513 fLength = length;
517 off_t
518 IOOperation::Offset() const
520 return fPhase == PHASE_READ_END ? fOffset + fLength - fBlockSize : fOffset;
524 generic_size_t
525 IOOperation::Length() const
527 return fPhase == PHASE_DO_ALL ? fLength : fBlockSize;
531 generic_io_vec*
532 IOOperation::Vecs() const
534 switch (fPhase) {
535 case PHASE_READ_END:
536 return fDMABuffer->Vecs() + fSavedVecIndex;
537 case PHASE_READ_BEGIN:
538 case PHASE_DO_ALL:
539 default:
540 return fDMABuffer->Vecs();
545 uint32
546 IOOperation::VecCount() const
548 switch (fPhase) {
549 case PHASE_READ_BEGIN:
550 return fSavedVecIndex + 1;
551 case PHASE_READ_END:
552 return fDMABuffer->VecCount() - fSavedVecIndex;
553 case PHASE_DO_ALL:
554 default:
555 return fDMABuffer->VecCount();
560 void
561 IOOperation::SetPartial(bool partialBegin, bool partialEnd)
563 TRACE("partial begin %d, end %d\n", partialBegin, partialEnd);
564 fPartialBegin = partialBegin;
565 fPartialEnd = partialEnd;
569 bool
570 IOOperation::IsWrite() const
572 return fParent->IsWrite() && fPhase == PHASE_DO_ALL;
576 bool
577 IOOperation::IsRead() const
579 return fParent->IsRead();
583 void
584 IOOperation::_PrepareVecs()
586 // we need to prepare the vecs for consumption by the drivers
587 if (fPhase == PHASE_READ_BEGIN) {
588 generic_io_vec* vecs = fDMABuffer->Vecs();
589 uint32 vecCount = fDMABuffer->VecCount();
590 generic_size_t vecLength = fBlockSize;
591 for (uint32 i = 0; i < vecCount; i++) {
592 generic_io_vec& vec = vecs[i];
593 if (vec.length >= vecLength) {
594 fSavedVecIndex = i;
595 fSavedVecLength = vec.length;
596 vec.length = vecLength;
597 break;
599 vecLength -= vec.length;
601 } else if (fPhase == PHASE_READ_END) {
602 generic_io_vec* vecs = fDMABuffer->Vecs();
603 uint32 vecCount = fDMABuffer->VecCount();
604 generic_size_t vecLength = fBlockSize;
605 for (int32 i = vecCount - 1; i >= 0; i--) {
606 generic_io_vec& vec = vecs[i];
607 if (vec.length >= vecLength) {
608 fSavedVecIndex = i;
609 fSavedVecLength = vec.length;
610 vec.base += vec.length - vecLength;
611 vec.length = vecLength;
612 break;
614 vecLength -= vec.length;
620 status_t
621 IOOperation::_CopyPartialBegin(bool isWrite, bool& singleBlockOnly)
623 generic_size_t relativeOffset = OriginalOffset() - fOffset;
624 generic_size_t length = fBlockSize - relativeOffset;
626 singleBlockOnly = length >= OriginalLength();
627 if (singleBlockOnly)
628 length = OriginalLength();
630 TRACE("_CopyPartialBegin(%s, single only %d)\n",
631 isWrite ? "write" : "read", singleBlockOnly);
633 if (isWrite) {
634 return fParent->CopyData(OriginalOffset(),
635 (uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset, length);
636 } else {
637 return fParent->CopyData(
638 (uint8*)fDMABuffer->BounceBufferAddress() + relativeOffset,
639 OriginalOffset(), length);
644 status_t
645 IOOperation::_CopyPartialEnd(bool isWrite)
647 TRACE("_CopyPartialEnd(%s)\n", isWrite ? "write" : "read");
649 const generic_io_vec& lastVec
650 = fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
651 off_t lastVecPos = fOffset + fLength - fBlockSize;
652 uint8* base = (uint8*)fDMABuffer->BounceBufferAddress()
653 + (lastVec.base + lastVec.length - fBlockSize
654 - fDMABuffer->PhysicalBounceBufferAddress());
655 // NOTE: this won't work if we don't use the bounce buffer contiguously
656 // (because of boundary alignments).
657 generic_size_t length = OriginalOffset() + OriginalLength() - lastVecPos;
659 if (isWrite)
660 return fParent->CopyData(lastVecPos, base, length);
662 return fParent->CopyData(base, lastVecPos, length);
666 void
667 IOOperation::Dump() const
669 kprintf("io_operation at %p\n", this);
671 kprintf(" parent: %p\n", fParent);
672 kprintf(" status: %s\n", strerror(fStatus));
673 kprintf(" dma buffer: %p\n", fDMABuffer);
674 kprintf(" offset: %-8" B_PRIdOFF " (original: %" B_PRIdOFF ")\n",
675 fOffset, fOriginalOffset);
676 kprintf(" length: %-8" B_PRIuGENADDR " (original: %"
677 B_PRIuGENADDR ")\n", fLength, fOriginalLength);
678 kprintf(" transferred: %" B_PRIuGENADDR "\n", fTransferredBytes);
679 kprintf(" block size: %" B_PRIuGENADDR "\n", fBlockSize);
680 kprintf(" saved vec index: %u\n", fSavedVecIndex);
681 kprintf(" saved vec length: %u\n", fSavedVecLength);
682 kprintf(" r/w: %s\n", IsWrite() ? "write" : "read");
683 kprintf(" phase: %s\n", fPhase == PHASE_READ_BEGIN
684 ? "read begin" : fPhase == PHASE_READ_END ? "read end"
685 : fPhase == PHASE_DO_ALL ? "do all" : "unknown");
686 kprintf(" partial begin: %s\n", fPartialBegin ? "yes" : "no");
687 kprintf(" partial end: %s\n", fPartialEnd ? "yes" : "no");
688 kprintf(" bounce buffer: %s\n", fUsesBounceBuffer ? "yes" : "no");
690 set_debug_variable("_parent", (addr_t)fParent);
691 set_debug_variable("_buffer", (addr_t)fDMABuffer);
695 // #pragma mark -
698 IORequest::IORequest()
700 fIsNotified(false),
701 fFinishedCallback(NULL),
702 fFinishedCookie(NULL),
703 fIterationCallback(NULL),
704 fIterationCookie(NULL)
706 mutex_init(&fLock, "I/O request lock");
707 fFinishedCondition.Init(this, "I/O request finished");
711 IORequest::~IORequest()
713 mutex_lock(&fLock);
714 DeleteSubRequests();
715 if (fBuffer != NULL)
716 fBuffer->Delete();
717 mutex_destroy(&fLock);
721 /* static */ IORequest*
722 IORequest::Create(bool vip)
724 return vip
725 ? new(malloc_flags(HEAP_PRIORITY_VIP)) IORequest
726 : new(std::nothrow) IORequest;
730 status_t
731 IORequest::Init(off_t offset, generic_addr_t buffer, generic_size_t length,
732 bool write, uint32 flags)
734 ASSERT(offset >= 0);
736 generic_io_vec vec;
737 vec.base = buffer;
738 vec.length = length;
739 return Init(offset, &vec, 1, length, write, flags);
743 status_t
744 IORequest::Init(off_t offset, generic_size_t firstVecOffset,
745 const generic_io_vec* vecs, size_t count, generic_size_t length, bool write,
746 uint32 flags)
748 ASSERT(offset >= 0);
750 fBuffer = IOBuffer::Create(count, (flags & B_VIP_IO_REQUEST) != 0);
751 if (fBuffer == NULL)
752 return B_NO_MEMORY;
754 fBuffer->SetVecs(firstVecOffset, vecs, count, length, flags);
756 fOwner = NULL;
757 fOffset = offset;
758 fLength = length;
759 fRelativeParentOffset = 0;
760 fTransferSize = 0;
761 fFlags = flags;
762 Thread* thread = thread_get_current_thread();
763 fTeam = thread->team->id;
764 fThread = thread->id;
765 fIsWrite = write;
766 fPartialTransfer = false;
767 fSuppressChildNotifications = false;
769 // these are for iteration
770 fVecIndex = 0;
771 fVecOffset = 0;
772 fRemainingBytes = length;
774 fPendingChildren = 0;
776 fStatus = 1;
778 return B_OK;
782 status_t
783 IORequest::CreateSubRequest(off_t parentOffset, off_t offset,
784 generic_size_t length, IORequest*& _subRequest)
786 ASSERT(parentOffset >= fOffset && length <= fLength
787 && parentOffset - fOffset <= (off_t)(fLength - length));
789 // find start vec
790 generic_size_t vecOffset = parentOffset - fOffset;
791 generic_io_vec* vecs = fBuffer->Vecs();
792 int32 vecCount = fBuffer->VecCount();
793 int32 startVec = 0;
794 for (; startVec < vecCount; startVec++) {
795 const generic_io_vec& vec = vecs[startVec];
796 if (vecOffset < vec.length)
797 break;
799 vecOffset -= vec.length;
802 // count vecs
803 generic_size_t currentVecOffset = vecOffset;
804 int32 endVec = startVec;
805 generic_size_t remainingLength = length;
806 for (; endVec < vecCount; endVec++) {
807 const generic_io_vec& vec = vecs[endVec];
808 if (vec.length - currentVecOffset >= remainingLength)
809 break;
811 remainingLength -= vec.length - currentVecOffset;
812 currentVecOffset = 0;
815 // create subrequest
816 IORequest* subRequest = Create((fFlags & B_VIP_IO_REQUEST) != 0);
817 if (subRequest == NULL)
818 return B_NO_MEMORY;
820 status_t error = subRequest->Init(offset, vecOffset, vecs + startVec,
821 endVec - startVec + 1, length, fIsWrite, fFlags & ~B_DELETE_IO_REQUEST);
822 if (error != B_OK) {
823 delete subRequest;
824 return error;
827 subRequest->fRelativeParentOffset = parentOffset - fOffset;
828 subRequest->fTeam = fTeam;
829 subRequest->fThread = fThread;
831 _subRequest = subRequest;
832 subRequest->SetParent(this);
834 MutexLocker _(fLock);
836 fChildren.Add(subRequest);
837 fPendingChildren++;
838 TRACE("IORequest::CreateSubRequest(): request: %p, subrequest: %p\n", this,
839 subRequest);
841 return B_OK;
845 void
846 IORequest::DeleteSubRequests()
848 while (IORequestChunk* chunk = fChildren.RemoveHead())
849 delete chunk;
850 fPendingChildren = 0;
854 void
855 IORequest::SetFinishedCallback(io_request_finished_callback callback,
856 void* cookie)
858 fFinishedCallback = callback;
859 fFinishedCookie = cookie;
863 void
864 IORequest::SetIterationCallback(io_request_iterate_callback callback,
865 void* cookie)
867 fIterationCallback = callback;
868 fIterationCookie = cookie;
872 io_request_finished_callback
873 IORequest::FinishedCallback(void** _cookie) const
875 if (_cookie != NULL)
876 *_cookie = fFinishedCookie;
877 return fFinishedCallback;
881 status_t
882 IORequest::Wait(uint32 flags, bigtime_t timeout)
884 MutexLocker locker(fLock);
886 if (IsFinished() && fIsNotified)
887 return Status();
889 ConditionVariableEntry entry;
890 fFinishedCondition.Add(&entry);
892 locker.Unlock();
894 status_t error = entry.Wait(flags, timeout);
895 if (error != B_OK)
896 return error;
898 return Status();
902 void
903 IORequest::NotifyFinished()
905 TRACE("IORequest::NotifyFinished(): request: %p\n", this);
907 MutexLocker locker(fLock);
909 if (fStatus == B_OK && !fPartialTransfer && RemainingBytes() > 0) {
910 // The request is not really done yet. If it has an iteration callback,
911 // call it.
912 if (fIterationCallback != NULL) {
913 ResetStatus();
914 locker.Unlock();
915 bool partialTransfer = false;
916 status_t error = fIterationCallback(fIterationCookie, this,
917 &partialTransfer);
918 if (error == B_OK && !partialTransfer)
919 return;
921 // Iteration failed, which means we're responsible for notifying the
922 // requests finished.
923 locker.Lock();
924 fStatus = error;
925 fPartialTransfer = true;
929 ASSERT(!fIsNotified);
930 ASSERT(fPendingChildren == 0);
931 ASSERT(fChildren.IsEmpty()
932 || dynamic_cast<IOOperation*>(fChildren.Head()) == NULL);
934 // unlock the memory
935 if (fBuffer->IsMemoryLocked())
936 fBuffer->UnlockMemory(fTeam, fIsWrite);
938 // Cache the callbacks before we unblock waiters and unlock. Any of the
939 // following could delete this request, so we don't want to touch it
940 // once we have started telling others that it is done.
941 IORequest* parent = fParent;
942 io_request_finished_callback finishedCallback = fFinishedCallback;
943 void* finishedCookie = fFinishedCookie;
944 status_t status = fStatus;
945 generic_size_t lastTransferredOffset
946 = fRelativeParentOffset + fTransferSize;
947 bool partialTransfer = status != B_OK || fPartialTransfer;
948 bool deleteRequest = (fFlags & B_DELETE_IO_REQUEST) != 0;
950 // unblock waiters
951 fIsNotified = true;
952 fFinishedCondition.NotifyAll();
954 locker.Unlock();
956 // notify callback
957 if (finishedCallback != NULL) {
958 finishedCallback(finishedCookie, this, status, partialTransfer,
959 lastTransferredOffset);
962 // notify parent
963 if (parent != NULL) {
964 parent->SubRequestFinished(this, status, partialTransfer,
965 lastTransferredOffset);
968 if (deleteRequest)
969 delete this;
973 /*! Returns whether this request or any of it's ancestors has a finished or
974 notification callback. Used to decide whether NotifyFinished() can be called
975 synchronously.
977 bool
978 IORequest::HasCallbacks() const
980 if (fFinishedCallback != NULL || fIterationCallback != NULL)
981 return true;
983 return fParent != NULL && fParent->HasCallbacks();
987 void
988 IORequest::SetStatusAndNotify(status_t status)
990 MutexLocker locker(fLock);
992 if (fStatus != 1)
993 return;
995 fStatus = status;
997 locker.Unlock();
999 NotifyFinished();
1003 void
1004 IORequest::OperationFinished(IOOperation* operation, status_t status,
1005 bool partialTransfer, generic_size_t transferEndOffset)
1007 TRACE("IORequest::OperationFinished(%p, %#" B_PRIx32 "): request: %p\n",
1008 operation, status, this);
1010 MutexLocker locker(fLock);
1012 fChildren.Remove(operation);
1013 operation->SetParent(NULL);
1015 if (status != B_OK || partialTransfer) {
1016 if (fTransferSize > transferEndOffset)
1017 fTransferSize = transferEndOffset;
1018 fPartialTransfer = true;
1021 if (status != B_OK && fStatus == 1)
1022 fStatus = status;
1024 if (--fPendingChildren > 0)
1025 return;
1027 // last child finished
1029 // set status, if not done yet
1030 if (fStatus == 1)
1031 fStatus = B_OK;
1035 void
1036 IORequest::SubRequestFinished(IORequest* request, status_t status,
1037 bool partialTransfer, generic_size_t transferEndOffset)
1039 TRACE("IORequest::SubrequestFinished(%p, %#" B_PRIx32 ", %d, %"
1040 B_PRIuGENADDR "): request: %p\n", request, status, partialTransfer, transferEndOffset, this);
1042 MutexLocker locker(fLock);
1044 if (status != B_OK || partialTransfer) {
1045 if (fTransferSize > transferEndOffset)
1046 fTransferSize = transferEndOffset;
1047 fPartialTransfer = true;
1050 if (status != B_OK && fStatus == 1)
1051 fStatus = status;
1053 if (--fPendingChildren > 0 || fSuppressChildNotifications)
1054 return;
1056 // last child finished
1058 // set status, if not done yet
1059 if (fStatus == 1)
1060 fStatus = B_OK;
1062 locker.Unlock();
1064 NotifyFinished();
1068 void
1069 IORequest::SetUnfinished()
1071 MutexLocker _(fLock);
1072 ResetStatus();
1076 void
1077 IORequest::SetTransferredBytes(bool partialTransfer,
1078 generic_size_t transferredBytes)
1080 TRACE("%p->IORequest::SetTransferredBytes(%d, %" B_PRIuGENADDR ")\n", this,
1081 partialTransfer, transferredBytes);
1083 MutexLocker _(fLock);
1085 fPartialTransfer = partialTransfer;
1086 fTransferSize = transferredBytes;
1090 void
1091 IORequest::SetSuppressChildNotifications(bool suppress)
1093 fSuppressChildNotifications = suppress;
1097 void
1098 IORequest::Advance(generic_size_t bySize)
1100 TRACE("IORequest::Advance(%" B_PRIuGENADDR "): remaining: %" B_PRIuGENADDR
1101 " -> %" B_PRIuGENADDR "\n", bySize, fRemainingBytes,
1102 fRemainingBytes - bySize);
1103 fRemainingBytes -= bySize;
1104 fTransferSize += bySize;
1106 generic_io_vec* vecs = fBuffer->Vecs();
1107 uint32 vecCount = fBuffer->VecCount();
1108 while (fVecIndex < vecCount
1109 && vecs[fVecIndex].length - fVecOffset <= bySize) {
1110 bySize -= vecs[fVecIndex].length - fVecOffset;
1111 fVecOffset = 0;
1112 fVecIndex++;
1115 fVecOffset += bySize;
1119 IORequest*
1120 IORequest::FirstSubRequest()
1122 return dynamic_cast<IORequest*>(fChildren.Head());
1126 IORequest*
1127 IORequest::NextSubRequest(IORequest* previous)
1129 if (previous == NULL)
1130 return NULL;
1131 return dynamic_cast<IORequest*>(fChildren.GetNext(previous));
1135 void
1136 IORequest::AddOperation(IOOperation* operation)
1138 MutexLocker locker(fLock);
1139 TRACE("IORequest::AddOperation(%p): request: %p\n", operation, this);
1140 fChildren.Add(operation);
1141 fPendingChildren++;
1145 void
1146 IORequest::RemoveOperation(IOOperation* operation)
1148 MutexLocker locker(fLock);
1149 fChildren.Remove(operation);
1150 operation->SetParent(NULL);
1154 status_t
1155 IORequest::CopyData(off_t offset, void* buffer, size_t size)
1157 return _CopyData(buffer, offset, size, true);
1161 status_t
1162 IORequest::CopyData(const void* buffer, off_t offset, size_t size)
1164 return _CopyData((void*)buffer, offset, size, false);
1168 status_t
1169 IORequest::ClearData(off_t offset, generic_size_t size)
1171 if (size == 0)
1172 return B_OK;
1174 if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1175 panic("IORequest::ClearData(): invalid range: (%" B_PRIdOFF
1176 ", %" B_PRIuGENADDR ")", offset, size);
1177 return B_BAD_VALUE;
1180 // If we can, we directly copy from/to the virtual buffer. The memory is
1181 // locked in this case.
1182 status_t (*clearFunction)(generic_addr_t, generic_size_t, team_id);
1183 if (fBuffer->IsPhysical()) {
1184 clearFunction = &IORequest::_ClearDataPhysical;
1185 } else {
1186 clearFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1187 ? &IORequest::_ClearDataUser : &IORequest::_ClearDataSimple;
1190 // skip bytes if requested
1191 generic_io_vec* vecs = fBuffer->Vecs();
1192 generic_size_t skipBytes = offset - fOffset;
1193 generic_size_t vecOffset = 0;
1194 while (skipBytes > 0) {
1195 if (vecs[0].length > skipBytes) {
1196 vecOffset = skipBytes;
1197 break;
1200 skipBytes -= vecs[0].length;
1201 vecs++;
1204 // clear vector-wise
1205 while (size > 0) {
1206 generic_size_t toClear = min_c(size, vecs[0].length - vecOffset);
1207 status_t error = clearFunction(vecs[0].base + vecOffset, toClear,
1208 fTeam);
1209 if (error != B_OK)
1210 return error;
1212 size -= toClear;
1213 vecs++;
1214 vecOffset = 0;
1217 return B_OK;
1222 status_t
1223 IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
1225 if (size == 0)
1226 return B_OK;
1228 uint8* buffer = (uint8*)_buffer;
1230 if (offset < fOffset || offset + (off_t)size > fOffset + (off_t)fLength) {
1231 panic("IORequest::_CopyData(): invalid range: (%" B_PRIdOFF ", %lu)",
1232 offset, size);
1233 return B_BAD_VALUE;
1236 // If we can, we directly copy from/to the virtual buffer. The memory is
1237 // locked in this case.
1238 status_t (*copyFunction)(void*, generic_addr_t, size_t, team_id, bool);
1239 if (fBuffer->IsPhysical()) {
1240 copyFunction = &IORequest::_CopyPhysical;
1241 } else {
1242 copyFunction = fBuffer->IsUser() && fTeam != team_get_current_team_id()
1243 ? &IORequest::_CopyUser : &IORequest::_CopySimple;
1246 // skip bytes if requested
1247 generic_io_vec* vecs = fBuffer->Vecs();
1248 generic_size_t skipBytes = offset - fOffset;
1249 generic_size_t vecOffset = 0;
1250 while (skipBytes > 0) {
1251 if (vecs[0].length > skipBytes) {
1252 vecOffset = skipBytes;
1253 break;
1256 skipBytes -= vecs[0].length;
1257 vecs++;
1260 // copy vector-wise
1261 while (size > 0) {
1262 generic_size_t toCopy = min_c(size, vecs[0].length - vecOffset);
1263 status_t error = copyFunction(buffer, vecs[0].base + vecOffset, toCopy,
1264 fTeam, copyIn);
1265 if (error != B_OK)
1266 return error;
1268 buffer += toCopy;
1269 size -= toCopy;
1270 vecs++;
1271 vecOffset = 0;
1274 return B_OK;
1278 /* static */ status_t
1279 IORequest::_CopySimple(void* bounceBuffer, generic_addr_t external, size_t size,
1280 team_id team, bool copyIn)
1282 TRACE(" IORequest::_CopySimple(%p, %#" B_PRIxGENADDR ", %lu, %d)\n",
1283 bounceBuffer, external, size, copyIn);
1284 if (copyIn)
1285 memcpy(bounceBuffer, (void*)(addr_t)external, size);
1286 else
1287 memcpy((void*)(addr_t)external, bounceBuffer, size);
1288 return B_OK;
1292 /* static */ status_t
1293 IORequest::_CopyPhysical(void* bounceBuffer, generic_addr_t external,
1294 size_t size, team_id team, bool copyIn)
1296 if (copyIn)
1297 return vm_memcpy_from_physical(bounceBuffer, external, size, false);
1299 return vm_memcpy_to_physical(external, bounceBuffer, size, false);
1303 /* static */ status_t
1304 IORequest::_CopyUser(void* _bounceBuffer, generic_addr_t _external, size_t size,
1305 team_id team, bool copyIn)
1307 uint8* bounceBuffer = (uint8*)_bounceBuffer;
1308 uint8* external = (uint8*)(addr_t)_external;
1310 while (size > 0) {
1311 static const int32 kEntryCount = 8;
1312 physical_entry entries[kEntryCount];
1314 uint32 count = kEntryCount;
1315 status_t error = get_memory_map_etc(team, external, size, entries,
1316 &count);
1317 if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1318 panic("IORequest::_CopyUser(): Failed to get physical memory for "
1319 "user memory %p\n", external);
1320 return B_BAD_ADDRESS;
1323 for (uint32 i = 0; i < count; i++) {
1324 const physical_entry& entry = entries[i];
1325 error = _CopyPhysical(bounceBuffer, entry.address, entry.size, team,
1326 copyIn);
1327 if (error != B_OK)
1328 return error;
1330 size -= entry.size;
1331 bounceBuffer += entry.size;
1332 external += entry.size;
1336 return B_OK;
1340 /*static*/ status_t
1341 IORequest::_ClearDataSimple(generic_addr_t external, generic_size_t size,
1342 team_id team)
1344 memset((void*)(addr_t)external, 0, (size_t)size);
1345 return B_OK;
1349 /*static*/ status_t
1350 IORequest::_ClearDataPhysical(generic_addr_t external, generic_size_t size,
1351 team_id team)
1353 return vm_memset_physical((phys_addr_t)external, 0, (phys_size_t)size);
1357 /*static*/ status_t
1358 IORequest::_ClearDataUser(generic_addr_t _external, generic_size_t size,
1359 team_id team)
1361 uint8* external = (uint8*)(addr_t)_external;
1363 while (size > 0) {
1364 static const int32 kEntryCount = 8;
1365 physical_entry entries[kEntryCount];
1367 uint32 count = kEntryCount;
1368 status_t error = get_memory_map_etc(team, external, size, entries,
1369 &count);
1370 if (error != B_OK && error != B_BUFFER_OVERFLOW) {
1371 panic("IORequest::_ClearDataUser(): Failed to get physical memory "
1372 "for user memory %p\n", external);
1373 return B_BAD_ADDRESS;
1376 for (uint32 i = 0; i < count; i++) {
1377 const physical_entry& entry = entries[i];
1378 error = _ClearDataPhysical(entry.address, entry.size, team);
1379 if (error != B_OK)
1380 return error;
1382 size -= entry.size;
1383 external += entry.size;
1387 return B_OK;
1391 void
1392 IORequest::Dump() const
1394 kprintf("io_request at %p\n", this);
1396 kprintf(" owner: %p\n", fOwner);
1397 kprintf(" parent: %p\n", fParent);
1398 kprintf(" status: %s\n", strerror(fStatus));
1399 kprintf(" mutex: %p\n", &fLock);
1400 kprintf(" IOBuffer: %p\n", fBuffer);
1401 kprintf(" offset: %" B_PRIdOFF "\n", fOffset);
1402 kprintf(" length: %" B_PRIuGENADDR "\n", fLength);
1403 kprintf(" transfer size: %" B_PRIuGENADDR "\n", fTransferSize);
1404 kprintf(" relative offset: %" B_PRIuGENADDR "\n", fRelativeParentOffset);
1405 kprintf(" pending children: %" B_PRId32 "\n", fPendingChildren);
1406 kprintf(" flags: %#" B_PRIx32 "\n", fFlags);
1407 kprintf(" team: %" B_PRId32 "\n", fTeam);
1408 kprintf(" thread: %" B_PRId32 "\n", fThread);
1409 kprintf(" r/w: %s\n", fIsWrite ? "write" : "read");
1410 kprintf(" partial transfer: %s\n", fPartialTransfer ? "yes" : "no");
1411 kprintf(" finished cvar: %p\n", &fFinishedCondition);
1412 kprintf(" iteration:\n");
1413 kprintf(" vec index: %" B_PRIu32 "\n", fVecIndex);
1414 kprintf(" vec offset: %" B_PRIuGENADDR "\n", fVecOffset);
1415 kprintf(" remaining bytes: %" B_PRIuGENADDR "\n", fRemainingBytes);
1416 kprintf(" callbacks:\n");
1417 kprintf(" finished %p, cookie %p\n", fFinishedCallback, fFinishedCookie);
1418 kprintf(" iteration %p, cookie %p\n", fIterationCallback,
1419 fIterationCookie);
1420 kprintf(" children:\n");
1422 IORequestChunkList::ConstIterator iterator = fChildren.GetIterator();
1423 while (iterator.HasNext()) {
1424 kprintf(" %p\n", iterator.Next());
1427 set_debug_variable("_parent", (addr_t)fParent);
1428 set_debug_variable("_mutex", (addr_t)&fLock);
1429 set_debug_variable("_buffer", (addr_t)fBuffer);
1430 set_debug_variable("_cvar", (addr_t)&fFinishedCondition);