2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
8 #include "dma_resources.h"
10 #include <device_manager.h>
13 #include <util/AutoLock.h>
16 #include "IORequest.h"
19 //#define TRACE_DMA_RESOURCE
20 #ifdef TRACE_DMA_RESOURCE
21 # define TRACE(x...) dprintf(x)
23 # define TRACE(x...) ;
27 extern device_manager_info gDeviceManagerModule
;
29 const phys_size_t kMaxBounceBufferSize
= 4 * B_PAGE_SIZE
;
33 DMABuffer::Create(size_t count
)
35 DMABuffer
* buffer
= (DMABuffer
*)malloc(
36 sizeof(DMABuffer
) + sizeof(generic_io_vec
) * (count
- 1));
40 buffer
->fVecCount
= count
;
47 DMABuffer::SetVecCount(uint32 count
)
54 DMABuffer::AddVec(generic_addr_t base
, generic_size_t size
)
56 generic_io_vec
& vec
= fVecs
[fVecCount
++];
63 DMABuffer::UsesBounceBufferAt(uint32 index
)
65 if (index
>= fVecCount
|| fBounceBuffer
== NULL
)
68 return fVecs
[index
].base
>= fBounceBuffer
->physical_address
70 < fBounceBuffer
->physical_address
+ fBounceBuffer
->size
;
75 DMABuffer::Dump() const
77 kprintf("DMABuffer at %p\n", this);
79 kprintf(" bounce buffer: %p (physical %#" B_PRIxPHYSADDR
")\n",
80 fBounceBuffer
->address
, fBounceBuffer
->physical_address
);
81 kprintf(" bounce buffer size: %" B_PRIxPHYSADDR
"\n", fBounceBuffer
->size
);
82 kprintf(" vecs: %" B_PRIu32
"\n", fVecCount
);
84 for (uint32 i
= 0; i
< fVecCount
; i
++) {
85 kprintf(" [%" B_PRIu32
"] %#" B_PRIxGENADDR
", %" B_PRIuGENADDR
"\n",
86 i
, fVecs
[i
].base
, fVecs
[i
].length
);
94 DMAResource::DMAResource()
98 mutex_init(&fLock
, "dma resource");
102 DMAResource::~DMAResource()
105 mutex_destroy(&fLock
);
108 // TODO: Delete DMABuffers and BounceBuffers!
113 DMAResource::Init(device_node
* node
, generic_size_t blockSize
,
114 uint32 bufferCount
, uint32 bounceBufferCount
)
116 dma_restrictions restrictions
;
117 memset(&restrictions
, 0, sizeof(dma_restrictions
));
119 // TODO: add DMA attributes instead of reusing block_io's
122 if (gDeviceManagerModule
.get_attr_uint32(node
,
123 B_DMA_ALIGNMENT
, &value
, true) == B_OK
)
124 restrictions
.alignment
= (generic_size_t
)value
+ 1;
126 if (gDeviceManagerModule
.get_attr_uint32(node
,
127 B_DMA_BOUNDARY
, &value
, true) == B_OK
)
128 restrictions
.boundary
= (generic_size_t
)value
+ 1;
130 if (gDeviceManagerModule
.get_attr_uint32(node
,
131 B_DMA_MAX_SEGMENT_BLOCKS
, &value
, true) == B_OK
)
132 restrictions
.max_segment_size
= (generic_size_t
)value
* blockSize
;
134 if (gDeviceManagerModule
.get_attr_uint32(node
,
135 B_DMA_MAX_TRANSFER_BLOCKS
, &value
, true) == B_OK
)
136 restrictions
.max_transfer_size
= (generic_size_t
)value
* blockSize
;
138 if (gDeviceManagerModule
.get_attr_uint32(node
,
139 B_DMA_MAX_SEGMENT_COUNT
, &value
, true) == B_OK
)
140 restrictions
.max_segment_count
= value
;
143 if (gDeviceManagerModule
.get_attr_uint64(node
,
144 B_DMA_LOW_ADDRESS
, &value64
, true) == B_OK
) {
145 restrictions
.low_address
= value64
;
148 if (gDeviceManagerModule
.get_attr_uint64(node
,
149 B_DMA_HIGH_ADDRESS
, &value64
, true) == B_OK
) {
150 restrictions
.high_address
= value64
;
153 return Init(restrictions
, blockSize
, bufferCount
, bounceBufferCount
);
158 DMAResource::Init(const dma_restrictions
& restrictions
,
159 generic_size_t blockSize
, uint32 bufferCount
, uint32 bounceBufferCount
)
161 fRestrictions
= restrictions
;
162 fBlockSize
= blockSize
== 0 ? 1 : blockSize
;
163 fBufferCount
= bufferCount
;
164 fBounceBufferCount
= bounceBufferCount
;
165 fBounceBufferSize
= 0;
167 if (fRestrictions
.high_address
== 0)
168 fRestrictions
.high_address
= ~(generic_addr_t
)0;
169 if (fRestrictions
.max_segment_count
== 0)
170 fRestrictions
.max_segment_count
= 16;
171 if (fRestrictions
.alignment
== 0)
172 fRestrictions
.alignment
= 1;
173 if (fRestrictions
.max_transfer_size
== 0)
174 fRestrictions
.max_transfer_size
= ~(generic_size_t
)0;
175 if (fRestrictions
.max_segment_size
== 0)
176 fRestrictions
.max_segment_size
= ~(generic_size_t
)0;
178 if (_NeedsBoundsBuffers()) {
179 fBounceBufferSize
= fRestrictions
.max_segment_size
180 * min_c(fRestrictions
.max_segment_count
, 4);
181 if (fBounceBufferSize
> kMaxBounceBufferSize
)
182 fBounceBufferSize
= kMaxBounceBufferSize
;
183 TRACE("DMAResource::Init(): chose bounce buffer size %lu\n",
187 dprintf("DMAResource@%p: low/high %" B_PRIxGENADDR
"/%" B_PRIxGENADDR
188 ", max segment count %" B_PRIu32
", align %" B_PRIuGENADDR
", "
189 "boundary %" B_PRIuGENADDR
", max transfer %" B_PRIuGENADDR
190 ", max segment size %" B_PRIuGENADDR
"\n", this,
191 fRestrictions
.low_address
, fRestrictions
.high_address
,
192 fRestrictions
.max_segment_count
, fRestrictions
.alignment
,
193 fRestrictions
.boundary
, fRestrictions
.max_transfer_size
,
194 fRestrictions
.max_segment_size
);
196 fScratchVecs
= (generic_io_vec
*)malloc(
197 sizeof(generic_io_vec
) * fRestrictions
.max_segment_count
);
198 if (fScratchVecs
== NULL
)
201 for (size_t i
= 0; i
< fBufferCount
; i
++) {
203 status_t error
= CreateBuffer(&buffer
);
207 fDMABuffers
.Add(buffer
);
210 // TODO: create bounce buffers in as few areas as feasible
211 for (size_t i
= 0; i
< fBounceBufferCount
; i
++) {
212 DMABounceBuffer
* buffer
;
213 status_t error
= CreateBounceBuffer(&buffer
);
217 fBounceBuffers
.Add(buffer
);
225 DMAResource::CreateBuffer(DMABuffer
** _buffer
)
227 DMABuffer
* buffer
= DMABuffer::Create(fRestrictions
.max_segment_count
);
237 DMAResource::CreateBounceBuffer(DMABounceBuffer
** _buffer
)
239 void* bounceBuffer
= NULL
;
240 phys_addr_t physicalBase
= 0;
242 phys_size_t size
= ROUNDUP(fBounceBufferSize
, B_PAGE_SIZE
);
244 virtual_address_restrictions virtualRestrictions
= {};
245 virtualRestrictions
.address_specification
= B_ANY_KERNEL_ADDRESS
;
246 physical_address_restrictions physicalRestrictions
= {};
247 physicalRestrictions
.low_address
= fRestrictions
.low_address
;
248 physicalRestrictions
.high_address
= fRestrictions
.high_address
;
249 physicalRestrictions
.alignment
= fRestrictions
.alignment
;
250 physicalRestrictions
.boundary
= fRestrictions
.boundary
;
251 area
= create_area_etc(B_SYSTEM_TEAM
, "dma buffer", size
, B_CONTIGUOUS
,
252 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
, 0, 0, &virtualRestrictions
,
253 &physicalRestrictions
, &bounceBuffer
);
257 physical_entry entry
;
258 if (get_memory_map(bounceBuffer
, size
, &entry
, 1) != B_OK
) {
259 panic("get_memory_map() failed.");
264 physicalBase
= entry
.address
;
266 ASSERT(fRestrictions
.high_address
>= physicalBase
+ size
);
268 DMABounceBuffer
* buffer
= new(std::nothrow
) DMABounceBuffer
;
269 if (buffer
== NULL
) {
274 buffer
->address
= bounceBuffer
;
275 buffer
->physical_address
= physicalBase
;
284 DMAResource::_RestrictBoundaryAndSegmentSize(generic_addr_t base
,
285 generic_addr_t
& length
)
287 if (length
> fRestrictions
.max_segment_size
)
288 length
= fRestrictions
.max_segment_size
;
289 if (fRestrictions
.boundary
> 0) {
290 generic_addr_t baseBoundary
= base
/ fRestrictions
.boundary
;
292 != (base
+ (length
- 1)) / fRestrictions
.boundary
) {
293 length
= (baseBoundary
+ 1) * fRestrictions
.boundary
- base
;
300 DMAResource::_CutBuffer(DMABuffer
& buffer
, phys_addr_t
& physicalBounceBuffer
,
301 phys_size_t
& bounceLeft
, generic_size_t toCut
)
303 int32 vecCount
= buffer
.VecCount();
304 for (int32 i
= vecCount
- 1; toCut
> 0 && i
>= 0; i
--) {
305 generic_io_vec
& vec
= buffer
.VecAt(i
);
306 generic_size_t length
= vec
.length
;
307 bool inBounceBuffer
= buffer
.UsesBounceBufferAt(i
);
309 if (length
<= toCut
) {
313 if (inBounceBuffer
) {
314 bounceLeft
+= length
;
315 physicalBounceBuffer
-= length
;
320 if (inBounceBuffer
) {
322 physicalBounceBuffer
-= toCut
;
328 buffer
.SetVecCount(vecCount
);
332 /*! Adds \a length bytes from the bounce buffer to the DMABuffer \a buffer.
333 Takes care of boundary, and segment restrictions. \a length must be aligned.
334 If \a fixedLength is requested, this function will fail if it cannot
337 \return 0 if the request cannot be satisfied. There could have been some
338 additions to the DMA buffer, and you will need to cut them back.
339 TODO: is that what we want here?
340 \return >0 the number of bytes added to the buffer.
343 DMAResource::_AddBounceBuffer(DMABuffer
& buffer
,
344 phys_addr_t
& physicalBounceBuffer
, phys_size_t
& bounceLeft
,
345 generic_size_t length
, bool fixedLength
)
347 if (bounceLeft
< length
) {
354 phys_size_t bounceUsed
= 0;
356 uint32 vecCount
= buffer
.VecCount();
358 // see if we can join the bounce buffer with the previously last vec
359 generic_io_vec
& vec
= buffer
.VecAt(vecCount
- 1);
360 generic_addr_t vecBase
= vec
.base
;
361 generic_size_t vecLength
= vec
.length
;
363 if (vecBase
+ vecLength
== physicalBounceBuffer
) {
365 _RestrictBoundaryAndSegmentSize(vecBase
, vecLength
);
367 generic_size_t lengthDiff
= vecLength
- vec
.length
;
368 length
-= lengthDiff
;
370 physicalBounceBuffer
+= lengthDiff
;
371 bounceLeft
-= lengthDiff
;
372 bounceUsed
+= lengthDiff
;
374 vec
.length
= vecLength
;
379 // We need to add another bounce vec
381 if (vecCount
== fRestrictions
.max_segment_count
)
382 return fixedLength
? 0 : bounceUsed
;
384 generic_addr_t vecLength
= length
;
385 _RestrictBoundaryAndSegmentSize(physicalBounceBuffer
, vecLength
);
387 buffer
.AddVec(physicalBounceBuffer
, vecLength
);
390 physicalBounceBuffer
+= vecLength
;
391 bounceLeft
-= vecLength
;
392 bounceUsed
+= vecLength
;
401 DMAResource::TranslateNext(IORequest
* request
, IOOperation
* operation
,
402 generic_size_t maxOperationLength
)
404 IOBuffer
* buffer
= request
->Buffer();
405 off_t originalOffset
= request
->Offset() + request
->Length()
406 - request
->RemainingBytes();
407 off_t offset
= originalOffset
;
408 generic_size_t partialBegin
= offset
& (fBlockSize
- 1);
410 // current iteration state
411 uint32 vecIndex
= request
->VecIndex();
412 uint32 vecOffset
= request
->VecOffset();
413 generic_size_t totalLength
= min_c(request
->RemainingBytes(),
414 fRestrictions
.max_transfer_size
);
416 if (maxOperationLength
> 0
417 && maxOperationLength
< totalLength
+ partialBegin
) {
418 totalLength
= maxOperationLength
- partialBegin
;
421 MutexLocker
locker(fLock
);
423 DMABuffer
* dmaBuffer
= fDMABuffers
.RemoveHead();
424 if (dmaBuffer
== NULL
)
427 dmaBuffer
->SetVecCount(0);
429 generic_io_vec
* vecs
= NULL
;
430 uint32 segmentCount
= 0;
432 TRACE(" offset %Ld, remaining size: %lu, block size %lu -> partial: %lu\n",
433 offset
, request
->RemainingBytes(), fBlockSize
, partialBegin
);
435 if (buffer
->IsVirtual()) {
436 // Unless we need the bounce buffer anyway, we have to translate the
437 // virtual addresses to physical addresses, so we can check the DMA
439 TRACE(" buffer is virtual %s\n", buffer
->IsUser() ? "user" : "kernel");
440 // TODO: !partialOperation || totalLength >= fBlockSize
441 // TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize.
443 generic_size_t transferLeft
= totalLength
;
446 TRACE(" create physical map (for %ld vecs)\n", buffer
->VecCount());
447 for (uint32 i
= vecIndex
; i
< buffer
->VecCount(); i
++) {
448 generic_io_vec
& vec
= buffer
->VecAt(i
);
449 generic_addr_t base
= vec
.base
+ vecOffset
;
450 generic_size_t size
= vec
.length
- vecOffset
;
452 if (size
> transferLeft
)
455 while (size
> 0 && segmentCount
456 < fRestrictions
.max_segment_count
) {
457 physical_entry entry
;
459 get_memory_map_etc(request
->TeamID(), (void*)base
, size
,
462 vecs
[segmentCount
].base
= entry
.address
;
463 vecs
[segmentCount
].length
= entry
.size
;
465 transferLeft
-= entry
.size
;
471 if (transferLeft
== 0)
475 totalLength
-= transferLeft
;
481 // We do already have physical addresses.
483 vecs
= buffer
->Vecs();
484 segmentCount
= min_c(buffer
->VecCount() - vecIndex
,
485 fRestrictions
.max_segment_count
);
488 #ifdef TRACE_DMA_RESOURCE
489 TRACE(" physical count %lu\n", segmentCount
);
490 for (uint32 i
= 0; i
< segmentCount
; i
++) {
491 TRACE(" [%" B_PRIu32
"] %#" B_PRIxGENADDR
", %" B_PRIxGENADDR
"\n",
492 i
, vecs
[vecIndex
+ i
].base
, vecs
[vecIndex
+ i
].length
);
496 // check alignment, boundaries, etc. and set vecs in DMA buffer
498 // Fetch a bounce buffer we can use for the DMABuffer.
499 // TODO: We should do that lazily when needed!
500 DMABounceBuffer
* bounceBuffer
= NULL
;
501 if (_NeedsBoundsBuffers()) {
502 bounceBuffer
= fBounceBuffers
.Head();
503 if (bounceBuffer
== NULL
)
506 dmaBuffer
->SetBounceBuffer(bounceBuffer
);
508 generic_size_t dmaLength
= 0;
509 phys_addr_t physicalBounceBuffer
= dmaBuffer
->PhysicalBounceBufferAddress();
510 phys_size_t bounceLeft
= fBounceBufferSize
;
511 generic_size_t transferLeft
= totalLength
;
513 // If the offset isn't block-aligned, use the bounce buffer to bridge the
514 // gap to the start of the vec.
515 if (partialBegin
> 0) {
516 generic_size_t length
;
517 if (request
->IsWrite()) {
518 // we always need to read in a whole block for the partial write
521 length
= (partialBegin
+ fRestrictions
.alignment
- 1)
522 & ~(fRestrictions
.alignment
- 1);
525 if (_AddBounceBuffer(*dmaBuffer
, physicalBounceBuffer
, bounceLeft
,
526 length
, true) == 0) {
527 TRACE(" adding partial begin failed, length %lu!\n", length
);
533 generic_size_t transferred
= length
- partialBegin
;
534 vecOffset
+= transferred
;
535 offset
-= partialBegin
;
537 if (transferLeft
> transferred
)
538 transferLeft
-= transferred
;
542 TRACE(" partial begin, using bounce buffer: offset: %lld, length: "
543 "%lu\n", offset
, length
);
546 for (uint32 i
= vecIndex
;
547 i
< vecIndex
+ segmentCount
&& transferLeft
> 0;) {
548 if (dmaBuffer
->VecCount() >= fRestrictions
.max_segment_count
)
551 const generic_io_vec
& vec
= vecs
[i
];
552 if (vec
.length
<= vecOffset
) {
553 vecOffset
-= vec
.length
;
558 generic_addr_t base
= vec
.base
+ vecOffset
;
559 generic_size_t maxLength
= vec
.length
- vecOffset
;
560 if (maxLength
> transferLeft
)
561 maxLength
= transferLeft
;
562 generic_size_t length
= maxLength
;
564 // Cut the vec according to transfer size, segment size, and boundary.
566 if (dmaLength
+ length
> fRestrictions
.max_transfer_size
) {
567 length
= fRestrictions
.max_transfer_size
- dmaLength
;
568 TRACE(" vec %lu: restricting length to %lu due to transfer size "
569 "limit\n", i
, length
);
571 _RestrictBoundaryAndSegmentSize(base
, length
);
573 phys_size_t useBounceBufferSize
= 0;
575 // Check low address: use bounce buffer for range to low address.
576 // Check alignment: if not aligned, use bounce buffer for complete vec.
577 if (base
< fRestrictions
.low_address
) {
578 useBounceBufferSize
= fRestrictions
.low_address
- base
;
579 TRACE(" vec %lu: below low address, using bounce buffer: %lu\n", i
,
580 useBounceBufferSize
);
581 } else if (base
& (fRestrictions
.alignment
- 1)) {
582 useBounceBufferSize
= length
;
583 TRACE(" vec %lu: misalignment, using bounce buffer: %lu\n", i
,
584 useBounceBufferSize
);
587 // Enforce high address restriction
588 if (base
> fRestrictions
.high_address
)
589 useBounceBufferSize
= length
;
590 else if (base
+ length
> fRestrictions
.high_address
)
591 length
= fRestrictions
.high_address
- base
;
593 // Align length as well
594 if (useBounceBufferSize
== 0)
595 length
&= ~(fRestrictions
.alignment
- 1);
597 // If length is 0, use bounce buffer for complete vec.
600 useBounceBufferSize
= length
;
601 TRACE(" vec %lu: 0 length, using bounce buffer: %lu\n", i
,
602 useBounceBufferSize
);
605 if (useBounceBufferSize
> 0) {
606 // alignment could still be wrong (we round up here)
607 useBounceBufferSize
= (useBounceBufferSize
608 + fRestrictions
.alignment
- 1) & ~(fRestrictions
.alignment
- 1);
610 length
= _AddBounceBuffer(*dmaBuffer
, physicalBounceBuffer
,
611 bounceLeft
, useBounceBufferSize
, false);
613 TRACE(" vec %lu: out of bounce buffer space\n", i
);
614 // We don't have any bounce buffer space left, we need to move
615 // this request to the next I/O operation.
618 TRACE(" vec %lu: final bounce length: %lu\n", i
, length
);
620 TRACE(" vec %lu: final length restriction: %lu\n", i
, length
);
621 dmaBuffer
->AddVec(base
, length
);
626 transferLeft
-= min_c(length
, transferLeft
);
629 // If we're writing partially, we always need to have a block sized bounce
630 // buffer (or else we would overwrite memory to be written on the read in
632 off_t requestEnd
= request
->Offset() + request
->Length();
633 if (request
->IsWrite()) {
634 generic_size_t diff
= dmaLength
& (fBlockSize
- 1);
636 // If the transfer length is block aligned and we're writing past the
637 // end of the given data, we still have to check the whether the last
638 // vec is a bounce buffer segment shorter than the block size. If so, we
639 // have to cut back the complete block and use a bounce buffer for it
641 if (diff
== 0 && offset
+ (off_t
)dmaLength
> requestEnd
) {
642 const generic_io_vec
& dmaVec
643 = dmaBuffer
->VecAt(dmaBuffer
->VecCount() - 1);
644 ASSERT(dmaVec
.base
>= dmaBuffer
->PhysicalBounceBufferAddress()
646 < dmaBuffer
->PhysicalBounceBufferAddress()
647 + fBounceBufferSize
);
648 // We can be certain that the last vec is a bounce buffer vec,
649 // since otherwise the DMA buffer couldn't exceed the end of the
651 if (dmaVec
.length
< fBlockSize
)
656 // Not yet block aligned -- cut back to the previous block and add
657 // a block-sized bounce buffer segment.
658 TRACE(" partial end write: %lu, diff %lu\n", dmaLength
, diff
);
660 _CutBuffer(*dmaBuffer
, physicalBounceBuffer
, bounceLeft
, diff
);
663 if (_AddBounceBuffer(*dmaBuffer
, physicalBounceBuffer
,
664 bounceLeft
, fBlockSize
, true) == 0) {
665 // If we cannot write anything, we can't process the request at
667 TRACE(" adding bounce buffer failed!!!\n");
671 dmaLength
+= fBlockSize
;
675 // If total length not block aligned, use bounce buffer for padding (read
677 while ((dmaLength
& (fBlockSize
- 1)) != 0) {
678 TRACE(" dmaLength not block aligned: %lu\n", dmaLength
);
679 generic_size_t length
680 = (dmaLength
+ fBlockSize
- 1) & ~(fBlockSize
- 1);
682 // If total length > max transfer size, segment count > max segment
684 // TODO: sometimes we can replace the last vec with the bounce buffer
685 // to let it match the restrictions.
686 if (length
> fRestrictions
.max_transfer_size
687 || dmaBuffer
->VecCount() == fRestrictions
.max_segment_count
688 || bounceLeft
< length
- dmaLength
) {
689 // cut the part of dma length
690 TRACE(" can't align length due to max transfer size, segment "
691 "count restrictions, or lacking bounce buffer space\n");
692 generic_size_t toCut
= dmaLength
693 & (max_c(fBlockSize
, fRestrictions
.alignment
) - 1);
695 if (dmaLength
== 0) {
696 // This can only happen, when we have too many small segments
697 // and hit the max segment count. In this case we just use the
698 // bounce buffer for as much as possible of the total length.
699 dmaBuffer
->SetVecCount(0);
700 generic_addr_t base
= dmaBuffer
->PhysicalBounceBufferAddress();
701 dmaLength
= min_c(totalLength
, fBounceBufferSize
)
702 & ~(max_c(fBlockSize
, fRestrictions
.alignment
) - 1);
703 _RestrictBoundaryAndSegmentSize(base
, dmaLength
);
704 dmaBuffer
->AddVec(base
, dmaLength
);
706 physicalBounceBuffer
= base
+ dmaLength
;
707 bounceLeft
= fBounceBufferSize
- dmaLength
;
709 _CutBuffer(*dmaBuffer
, physicalBounceBuffer
, bounceLeft
, toCut
);
712 TRACE(" adding %lu bytes final bounce buffer\n",
715 length
= _AddBounceBuffer(*dmaBuffer
, physicalBounceBuffer
,
716 bounceLeft
, length
, true);
718 panic("don't do this to me!");
723 operation
->SetBuffer(dmaBuffer
);
724 operation
->SetBlockSize(fBlockSize
);
725 operation
->SetOriginalRange(originalOffset
,
726 min_c(offset
+ (off_t
)dmaLength
, requestEnd
) - originalOffset
);
727 operation
->SetRange(offset
, dmaLength
);
728 operation
->SetPartial(partialBegin
!= 0,
729 offset
+ (off_t
)dmaLength
> requestEnd
);
731 // If we don't need the bounce buffer, we put it back, otherwise
732 operation
->SetUsesBounceBuffer(bounceLeft
< fBounceBufferSize
);
733 if (operation
->UsesBounceBuffer())
734 fBounceBuffers
.RemoveHead();
736 dmaBuffer
->SetBounceBuffer(NULL
);
739 status_t error
= operation
->Prepare(request
);
743 request
->Advance(operation
->OriginalLength());
750 DMAResource::RecycleBuffer(DMABuffer
* buffer
)
755 MutexLocker
_(fLock
);
756 fDMABuffers
.Add(buffer
);
757 if (buffer
->BounceBuffer() != NULL
) {
758 fBounceBuffers
.Add(buffer
->BounceBuffer());
759 buffer
->SetBounceBuffer(NULL
);
765 DMAResource::_NeedsBoundsBuffers() const
767 return fRestrictions
.alignment
> 1
768 || fRestrictions
.low_address
!= 0
769 || fRestrictions
.high_address
!= ~(generic_addr_t
)0
780 create_dma_resource(restrictions
)
782 // Restrictions are: transfer size, address space, alignment
783 // segment min/max size, num segments
788 delete_dma_resource(resource
)
793 dma_buffer_alloc(resource
, size
)
798 dma_buffer_free(buffer
)
800 // Allocates or frees memory in that DMA buffer.