2 * Copyright 2004-2008, Haiku, Inc. All RightsReserved.
3 * Copyright 2002-2003, Thomas Kurschel. All rights reserved.
5 * Distributed under the terms of the MIT License.
9 Creates temporary Scatter/Gather table if the peripheral
10 driver has provided a simple pointer only.
13 #include "scsi_internal.h"
14 #include "KernelExport_ext.h"
22 static locked_pool_cookie temp_sg_pool
;
26 fill_temp_sg(scsi_ccb
*ccb
)
29 scsi_bus_info
*bus
= ccb
->bus
;
30 uint32 dma_boundary
= bus
->dma_params
.dma_boundary
;
31 uint32 max_sg_block_size
= bus
->dma_params
.max_sg_block_size
;
32 uint32 max_sg_blocks
= std::min(bus
->dma_params
.max_sg_blocks
,
33 (uint32
)MAX_TEMP_SG_FRAGMENTS
);
40 physical_entry
*temp_sg
= (physical_entry
*)ccb
->sg_list
;
42 res
= get_iovec_memory_map(&vec
, 1, 0, ccb
->data_length
, temp_sg
, max_sg_blocks
,
43 &num_entries
, &mapped_len
);
46 SHOW_ERROR(2, "cannot create temporary S/G list for IO request (%s)", strerror(res
));
50 if (mapped_len
!= ccb
->data_length
)
53 if (dma_boundary
!= ~(uint32
)0 || ccb
->data_length
> max_sg_block_size
) {
54 // S/G list may not be controller-compatible:
55 // we have to split offending entries
56 SHOW_FLOW(3, "Checking violation of dma boundary 0x%" B_PRIx32
57 " and entry size 0x%" B_PRIx32
, dma_boundary
, max_sg_block_size
);
59 for (uint32 cur_idx
= 0; cur_idx
< num_entries
; ++cur_idx
) {
62 // calculate space upto next dma boundary crossing
63 max_len
= (dma_boundary
+ 1) -
64 (temp_sg
[cur_idx
].address
& dma_boundary
);
65 // restrict size per sg item
66 max_len
= std::min(max_len
, (addr_t
)max_sg_block_size
);
68 SHOW_FLOW(4, "addr=%#" B_PRIxPHYSADDR
", size=%" B_PRIxPHYSADDR
69 ", max_len=%" B_PRIxADDR
", idx=%" B_PRId32
", num=%"
70 B_PRIuSIZE
, temp_sg
[cur_idx
].address
, temp_sg
[cur_idx
].size
,
71 max_len
, cur_idx
, num_entries
);
73 if (max_len
< temp_sg
[cur_idx
].size
) {
75 if (++num_entries
> max_sg_blocks
)
78 memmove(&temp_sg
[cur_idx
+ 1], &temp_sg
[cur_idx
],
79 (num_entries
- 1 - cur_idx
) * sizeof(physical_entry
));
81 temp_sg
[cur_idx
].size
= max_len
;
82 temp_sg
[cur_idx
+ 1].address
83 = temp_sg
[cur_idx
+ 1].address
+ max_len
;
84 temp_sg
[cur_idx
+ 1].size
-= max_len
;
89 ccb
->sg_count
= num_entries
;
94 SHOW_ERROR( 2, "S/G list to complex for IO request (max %d entries)",
95 MAX_TEMP_SG_FRAGMENTS
);
101 /** create temporary SG for request */
104 create_temp_sg(scsi_ccb
*ccb
)
106 physical_entry
*temp_sg
;
109 SHOW_FLOW(3, "ccb=%p, data=%p, data_length=%" B_PRIu32
, ccb
, ccb
->data
,
112 ccb
->sg_list
= temp_sg
= (physical_entry
*)locked_pool
->alloc(temp_sg_pool
);
113 if (temp_sg
== NULL
) {
114 SHOW_ERROR0(2, "cannot allocate memory for IO request!");
118 res
= lock_memory(ccb
->data
, ccb
->data_length
, B_DMA_IO
119 | ((ccb
->flags
& SCSI_DIR_MASK
) == SCSI_DIR_IN
? B_READ_DEVICE
: 0));
122 SHOW_ERROR(2, "cannot lock memory for IO request (%s)", strerror(res
));
126 if (fill_temp_sg(ccb
))
127 // this is the success path
130 unlock_memory(ccb
->data
, ccb
->data_length
, B_DMA_IO
131 | ((ccb
->flags
& SCSI_DIR_MASK
) == SCSI_DIR_IN
? B_READ_DEVICE
: 0));
134 locked_pool
->free(temp_sg_pool
, temp_sg
);
139 /** cleanup temporary SG list */
144 locked_pool
->destroy(temp_sg_pool
);
148 /** destroy SG list buffer */
151 cleanup_tmp_sg(scsi_ccb
*ccb
)
155 SHOW_FLOW(3, "ccb=%p, data=%p, data_length=%" B_PRId32
,
156 ccb
, ccb
->data
, ccb
->data_length
);
158 res
= unlock_memory(ccb
->data
, ccb
->data_length
, B_DMA_IO
159 | ((ccb
->flags
& SCSI_DIR_MASK
) == SCSI_DIR_IN
? B_READ_DEVICE
: 0));
162 SHOW_FLOW0(3, "Cannot unlock previously locked memory!");
163 panic("Cannot unlock previously locked memory!");
166 locked_pool
->free(temp_sg_pool
, (physical_entry
*)ccb
->sg_list
);
168 // restore previous state
173 /** create SG list buffer */
178 temp_sg_pool
= locked_pool
->create(
179 MAX_TEMP_SG_FRAGMENTS
* sizeof(physical_entry
),
180 sizeof(physical_entry
) - 1, 0,
181 B_PAGE_SIZE
, MAX_TEMP_SG_LISTS
, 1,
182 "scsi_temp_sg_pool", B_CONTIGUOUS
, NULL
, NULL
, NULL
);
184 if (temp_sg_pool
== NULL
)