BPicture: Fix archive constructor.
[haiku.git] / src / add-ons / kernel / bus_managers / scsi / dma_buffer.cpp
blob06bd010a174903b21f8dcce394335bae8311d3cf
1 /*
2 * Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
3 * Copyright 2002/03, Thomas Kurschel. All rights reserved.
5 * Distributed under the terms of the MIT License.
6 */
8 /*
9 DMA buffer handling.
11 If the peripheral driver hasn't made sure that the data of a request
12 is DMA safe, we check that and copy data to a buffer if needed.
13 The buffer is enlarged on demand and destroyed after a time-out
14 by a daemon. Obviously, it's a good idea to avoid all this, therefore
15 blkman takes care of that for read/write requests.
17 To be able to copy data back after the request was finished, we need a
18 S/G list to the original data as the copying is done in a different
19 thread/process context (namely the service thread).
21 Currently, there is only one buffer per device; in the future,
22 we may support multiple buffers, especially if we want to support
23 more then 4 GB memory, which leads to trouble with 32-bit PCI cards.
27 #include "scsi_internal.h"
28 #include "KernelExport_ext.h"
30 #include <vm/vm.h>
32 #include <string.h>
35 /*! Check whether S/G list of request is supported DMA controller */
36 static bool
37 is_sg_list_dma_safe(scsi_ccb *request)
39 scsi_bus_info *bus = request->bus;
40 const physical_entry *sg_list = request->sg_list;
41 uint32 sg_count = request->sg_count;
42 uint32 dma_boundary = bus->dma_params.dma_boundary;
43 uint32 alignment = bus->dma_params.alignment;
44 uint32 max_sg_block_size = bus->dma_params.max_sg_block_size;
45 uint32 cur_idx;
47 // not too many S/G list entries
48 if (sg_count > bus->dma_params.max_sg_blocks) {
49 SHOW_FLOW0(1, "S/G-list too long");
50 return false;
53 // if there are no further restrictions - be happy
54 if (dma_boundary == ~(uint32)0 && alignment == 0 && max_sg_block_size == 0)
55 return true;
57 // argh - controller is a bit picky, so make sure he likes us
58 for (cur_idx = sg_count; cur_idx >= 1; --cur_idx, ++sg_list) {
59 phys_addr_t max_len;
61 // calculate space upto next dma boundary crossing and
62 // verify that it isn't crossed
63 max_len = (dma_boundary + 1) - (sg_list->address & dma_boundary);
65 if (max_len < sg_list->size) {
66 SHOW_FLOW(0, "S/G-entry crosses DMA boundary @%" B_PRIxPHYSADDR,
67 sg_list->address + max_len);
68 return false;
71 // check both begin and end of entry for alignment
72 if ((sg_list->address & alignment) != 0) {
73 SHOW_FLOW(0, "S/G-entry has bad alignment @%#" B_PRIxPHYSADDR,
74 sg_list->address);
75 return false;
78 if (((sg_list->address + sg_list->size) & alignment) != 0) {
79 SHOW_FLOW(0, "end of S/G-entry has bad alignment @%" B_PRIxPHYSADDR,
80 sg_list->address + sg_list->size);
81 return false;
84 // verify entry size
85 if (sg_list->size > max_sg_block_size) {
86 SHOW_FLOW(0, "S/G-entry is too long (%" B_PRIuPHYSADDR "/%" B_PRIu32
87 " bytes)", sg_list->size, max_sg_block_size);
88 return false;
92 return true;
96 /** copy data from/to DMA buffer */
98 static bool
99 scsi_copy_dma_buffer(scsi_ccb *request, uint32 size, bool to_buffer)
101 dma_buffer *buffer = request->dma_buffer;
102 const physical_entry *sg_list = buffer->sg_list_orig;
103 uint32 num_vecs = buffer->sg_count_orig;
104 uchar *buffer_data = buffer->address;
106 SHOW_FLOW(1, "to_buffer=%d, %" B_PRIu32 " bytes", to_buffer, size);
108 // survive even if controller returned invalid data size
109 size = min_c(size, request->data_length);
111 // we have to use S/G list to original data; the DMA buffer
112 // was allocated in kernel and is thus visible even if the thread
113 // was changed
114 for (; size > 0 && num_vecs > 0; ++sg_list, --num_vecs) {
115 size_t bytes;
117 bytes = min_c( size, sg_list->size );
119 if (to_buffer) {
120 vm_memcpy_from_physical(buffer_data, sg_list->address, bytes,
121 false);
122 } else
123 vm_memcpy_to_physical(sg_list->address, buffer_data, bytes, false);
125 buffer_data += bytes;
128 return true;
132 static void
133 scsi_free_dma_buffer(dma_buffer *buffer)
135 if (buffer->area > 0) {
136 SHOW_FLOW0(1, "Destroying buffer");
138 delete_area(buffer->area);
139 buffer->area = 0;
140 buffer->size = 0;
143 if (buffer->sg_list_area > 0) {
144 delete_area(buffer->sg_list_area);
145 buffer->sg_list_area = 0;
150 /** allocate dma buffer for given device, deleting old one
151 * size - buffer size in bytes
154 static bool
155 scsi_alloc_dma_buffer(dma_buffer *buffer, dma_params *dma_params, uint32 size)
157 // free old buffer first
158 scsi_free_dma_buffer(buffer);
160 // just in case alignment is ridiculously huge
161 size = (size + dma_params->alignment) & ~dma_params->alignment;
163 size = (size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
165 // calculate worst case number of S/G entries, i.e. if they are non-continuous;
166 // there is a controller limit and a limit by our own S/G manager to check
167 if (size / B_PAGE_SIZE > dma_params->max_sg_blocks
168 || size / B_PAGE_SIZE > MAX_TEMP_SG_FRAGMENTS) {
169 uint32 boundary = dma_params->dma_boundary;
171 // alright - a contiguous buffer is required to keep S/G table short
172 SHOW_INFO(1, "need to setup contiguous DMA buffer of size %" B_PRIu32,
173 size);
175 // verify that we don't get problems with dma boundary
176 if (boundary != ~(uint32)0) {
177 if (size > boundary + 1) {
178 SHOW_ERROR(2, "data is longer then maximum DMA transfer len (%"
179 B_PRId32 "/%" B_PRId32 " bytes)", size, boundary + 1);
180 return false;
184 virtual_address_restrictions virtualRestrictions = {};
185 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
186 physical_address_restrictions physicalRestrictions = {};
187 if (dma_params->alignment != ~(uint32)0)
188 physicalRestrictions.alignment = dma_params->alignment + 1;
189 if (boundary != ~(uint32)0)
190 physicalRestrictions.boundary = boundary + 1;
191 #if B_HAIKU_PHYSICAL_BITS > 32
192 physicalRestrictions.high_address = 0x100000000ULL;
193 // TODO: Use 64 bit addresses, if possible!
194 #endif
195 buffer->area = create_area_etc(B_SYSTEM_TEAM, "DMA buffer", size,
196 B_CONTIGUOUS, 0, 0, 0, &virtualRestrictions, &physicalRestrictions,
197 (void**)&buffer->address);
199 if (buffer->area < 0) {
200 SHOW_ERROR(2, "Cannot create contignous DMA buffer of %" B_PRIu32
201 " bytes", size);
202 return false;
205 buffer->size = size;
206 } else {
207 // we can live with a fragmented buffer - very nice
208 buffer->area = create_area("DMA buffer",
209 (void **)&buffer->address, B_ANY_KERNEL_ADDRESS, size,
210 B_32_BIT_FULL_LOCK, 0);
211 // TODO: Use B_FULL_LOCK, if possible!
212 if (buffer->area < 0) {
213 SHOW_ERROR(2, "Cannot create DMA buffer of %" B_PRIu32 " bytes",
214 size);
215 return false;
218 buffer->size = size;
221 // create S/G list
222 // worst case is one entry per page, and size is page-aligned
223 size_t sg_list_size = buffer->size / B_PAGE_SIZE * sizeof( physical_entry );
224 // create_area has page-granularity
225 sg_list_size = (sg_list_size + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
227 buffer->sg_list_area = create_area("DMA buffer S/G table",
228 (void **)&buffer->sg_list, B_ANY_KERNEL_ADDRESS, sg_list_size,
229 B_32_BIT_FULL_LOCK, 0);
230 // TODO: Use B_FULL_LOCK, if possible!
231 if (buffer->sg_list_area < 0) {
232 SHOW_ERROR( 2, "Cannot create DMA buffer S/G list of %" B_PRIuSIZE
233 " bytes", sg_list_size );
235 delete_area(buffer->area);
236 buffer->area = 0;
237 return false;
240 size_t sg_list_entries = sg_list_size / sizeof(physical_entry);
243 size_t mapped_len;
244 status_t res;
245 iovec vec = {
246 buffer->address,
247 buffer->size
250 res = get_iovec_memory_map(
251 &vec, 1, 0, buffer->size,
252 buffer->sg_list, sg_list_entries, &buffer->sg_count,
253 &mapped_len );
255 if( res != B_OK || mapped_len != buffer->size ) {
256 SHOW_ERROR(0, "Error creating S/G list for DMA buffer (%s; wanted "
257 "%" B_PRIuSIZE ", got %" B_PRIuSIZE " bytes)", strerror(res),
258 mapped_len, buffer->size);
262 return true;
266 static void
267 scsi_free_dma_buffer_sg_orig(dma_buffer *buffer)
269 if (buffer->sg_orig > 0) {
270 delete_area(buffer->sg_orig);
271 buffer->sg_orig = 0;
272 buffer->sg_count_max_orig = 0;
277 /** allocate S/G list to original data */
279 static bool
280 scsi_alloc_dma_buffer_sg_orig(dma_buffer *buffer, size_t size)
282 // free old list first
283 scsi_free_dma_buffer_sg_orig(buffer);
285 size = (size * sizeof(physical_entry) + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
287 buffer->sg_orig = create_area("S/G to original data",
288 (void **)&buffer->sg_list_orig,
289 B_ANY_KERNEL_ADDRESS, size,
290 B_NO_LOCK, 0);
291 if (buffer->sg_orig < 0) {
292 SHOW_ERROR(2, "Cannot S/G list buffer to original data of %" B_PRIuSIZE
293 " bytes", size);
294 return false;
297 buffer->sg_count_max_orig = size / sizeof(physical_entry);
299 SHOW_INFO(3, "Got up to %" B_PRIuSIZE " S/G entries to original data",
300 buffer->sg_count_max_orig);
302 return true;
306 /*! dump S/G table */
307 static void
308 dump_sg_table(const physical_entry *sg_list,
309 uint32 sg_list_count)
311 uint32 cur_idx;
313 SHOW_FLOW(1, "count=%" B_PRIu32, sg_list_count);
315 for (cur_idx = sg_list_count; cur_idx >= 1; --cur_idx, ++sg_list) {
316 SHOW_FLOW(1, "addr=%" B_PRIxPHYSADDR ", size=%" B_PRIuPHYSADDR,
317 sg_list->address, sg_list->size);
322 /** compose S/G list to original data of request */
324 static bool
325 scsi_dma_buffer_compose_sg_orig(dma_buffer *buffer, scsi_ccb *request)
327 // enlarge buffer is required
328 if (buffer->sg_count_max_orig < request->sg_count) {
329 if (!scsi_alloc_dma_buffer_sg_orig(buffer, request->sg_count))
330 return false;
333 SHOW_FLOW0(1, "copy S/G list");
335 memcpy(buffer->sg_list_orig, request->sg_list,
336 request->sg_count * sizeof(physical_entry));
338 buffer->sg_count_orig = request->sg_count;
339 return true;
343 /** init DMA buffer and copy data to it if required
344 * note: S/G list of request must already be setup
347 bool
348 scsi_get_dma_buffer(scsi_ccb *request)
350 scsi_device_info *device = request->device;
351 dma_buffer *buffer;
353 request->buffered = false;
355 // perhaps we have luck and no buffering is needed
356 if( is_sg_list_dma_safe( request ))
357 return true;
359 SHOW_FLOW0(1, "Buffer is not DMA safe" );
361 dump_sg_table(request->sg_list, request->sg_count);
363 // only one buffer at a time
364 acquire_sem(device->dma_buffer_owner);
366 // make sure, clean-up daemon doesn't bother us
367 ACQUIRE_BEN(&device->dma_buffer_lock);
369 // there is only one buffer, so no further management
370 buffer = &device->dma_buffer;
372 buffer->inuse = true;
374 RELEASE_BEN(&device->dma_buffer_lock);
376 // memorize buffer for cleanup
377 request->dma_buffer = buffer;
379 // enlarge buffer if too small
380 if (buffer->size < request->data_length) {
381 if (!scsi_alloc_dma_buffer(buffer, &device->bus->dma_params,
382 request->data_length))
383 goto err;
386 // create S/G to original data (necessary for copying from-buffer on end
387 // of request, but also used during copying to-buffer in a second because
388 // of lazyness)
389 scsi_dma_buffer_compose_sg_orig(&device->dma_buffer, request);
391 // copy data to buffer
392 if ((request->flags & SCSI_DIR_MASK) == SCSI_DIR_OUT) {
393 if (!scsi_copy_dma_buffer( request, request->data_length, true))
394 goto err;
397 // replace data address, so noone notices that a buffer is used
398 buffer->orig_data = request->data;
399 buffer->orig_sg_list = request->sg_list;
400 buffer->orig_sg_count = request->sg_count;
402 request->data = buffer->address;
403 request->sg_list = buffer->sg_list;
404 request->sg_count = buffer->sg_count;
406 SHOW_INFO(1, "bytes: %" B_PRIu32, request->data_length);
407 SHOW_INFO0(3, "we can start now");
409 request->buffered = true;
410 return true;
412 err:
413 SHOW_INFO0(3, "error setting up DMA buffer");
415 ACQUIRE_BEN(&device->dma_buffer_lock);
417 // some of this is probably not required, but I'm paranoid
418 buffer->inuse = false;
420 RELEASE_BEN(&device->dma_buffer_lock);
421 release_sem(device->dma_buffer_owner);
423 return false;
427 /*! Copy data back and release DMA buffer;
428 you must have called cleanup_tmp_sg before
430 void
431 scsi_release_dma_buffer(scsi_ccb *request)
433 scsi_device_info *device = request->device;
434 dma_buffer *buffer = request->dma_buffer;
436 SHOW_FLOW(1, "Buffering finished, %x, %" B_PRIx32,
437 request->subsys_status & SCSI_SUBSYS_STATUS_MASK,
438 (request->flags & SCSI_DIR_MASK));
440 // copy data from buffer if required and if operation succeeded
441 if ((request->subsys_status & SCSI_SUBSYS_STATUS_MASK) == SCSI_REQ_CMP
442 && (request->flags & SCSI_DIR_MASK) == SCSI_DIR_IN)
443 scsi_copy_dma_buffer(request, request->data_length - request->data_resid, false);
445 // restore request
446 request->data = buffer->orig_data;
447 request->sg_list = buffer->orig_sg_list;
448 request->sg_count = buffer->orig_sg_count;
450 // free buffer
451 ACQUIRE_BEN(&device->dma_buffer_lock);
453 buffer->last_use = system_time();
454 buffer->inuse = false;
456 RELEASE_BEN(&device->dma_buffer_lock);
458 release_sem(device->dma_buffer_owner);
460 request->buffered = false;
464 /** dameon that deletes DMA buffer if not used for some time */
466 void
467 scsi_dma_buffer_daemon(void *dev, int counter)
469 scsi_device_info *device = (scsi_device_info*)dev;
470 dma_buffer *buffer;
472 ACQUIRE_BEN(&device->dma_buffer_lock);
474 buffer = &device->dma_buffer;
476 if (!buffer->inuse
477 && buffer->last_use - system_time() > SCSI_DMA_BUFFER_CLEANUP_DELAY) {
478 scsi_free_dma_buffer(buffer);
479 scsi_free_dma_buffer_sg_orig(buffer);
482 RELEASE_BEN(&device->dma_buffer_lock);
486 void
487 scsi_dma_buffer_free(dma_buffer *buffer)
489 scsi_free_dma_buffer(buffer);
490 scsi_free_dma_buffer_sg_orig(buffer);
494 void
495 scsi_dma_buffer_init(dma_buffer *buffer)
497 buffer->area = 0;
498 buffer->size = 0;
499 buffer->sg_orig = 0;
500 buffer->sg_count_max_orig = 0;