1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * SCSI RDMA Protocol lib functions
5 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
6 * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
8 ***********************************************************************/
10 #define pr_fmt(fmt) "libsrp: " fmt
12 #include <linux/printk.h>
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/kfifo.h>
16 #include <linux/scatterlist.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/module.h>
20 #include <target/target_core_base.h>
22 #include "ibmvscsi_tgt.h"
24 static int srp_iu_pool_alloc(struct srp_queue
*q
, size_t max
,
25 struct srp_buf
**ring
)
30 q
->pool
= kcalloc(max
, sizeof(struct iu_entry
*), GFP_KERNEL
);
33 q
->items
= kcalloc(max
, sizeof(struct iu_entry
), GFP_KERNEL
);
37 spin_lock_init(&q
->lock
);
38 kfifo_init(&q
->queue
, (void *)q
->pool
, max
* sizeof(void *));
40 for (i
= 0, iue
= q
->items
; i
< max
; i
++) {
41 kfifo_in(&q
->queue
, (void *)&iue
, sizeof(void *));
52 static void srp_iu_pool_free(struct srp_queue
*q
)
58 static struct srp_buf
**srp_ring_alloc(struct device
*dev
,
59 size_t max
, size_t size
)
61 struct srp_buf
**ring
;
64 ring
= kcalloc(max
, sizeof(struct srp_buf
*), GFP_KERNEL
);
68 for (i
= 0; i
< max
; i
++) {
69 ring
[i
] = kzalloc(sizeof(*ring
[i
]), GFP_KERNEL
);
72 ring
[i
]->buf
= dma_alloc_coherent(dev
, size
, &ring
[i
]->dma
,
80 for (i
= 0; i
< max
&& ring
[i
]; i
++) {
82 dma_free_coherent(dev
, size
, ring
[i
]->buf
,
92 static void srp_ring_free(struct device
*dev
, struct srp_buf
**ring
,
93 size_t max
, size_t size
)
97 for (i
= 0; i
< max
; i
++) {
98 dma_free_coherent(dev
, size
, ring
[i
]->buf
, ring
[i
]->dma
);
104 int srp_target_alloc(struct srp_target
*target
, struct device
*dev
,
105 size_t nr
, size_t iu_size
)
109 spin_lock_init(&target
->lock
);
113 target
->srp_iu_size
= iu_size
;
114 target
->rx_ring_size
= nr
;
115 target
->rx_ring
= srp_ring_alloc(target
->dev
, nr
, iu_size
);
116 if (!target
->rx_ring
)
118 err
= srp_iu_pool_alloc(&target
->iu_queue
, nr
, target
->rx_ring
);
122 dev_set_drvdata(target
->dev
, target
);
126 srp_ring_free(target
->dev
, target
->rx_ring
, nr
, iu_size
);
130 void srp_target_free(struct srp_target
*target
)
132 dev_set_drvdata(target
->dev
, NULL
);
133 srp_ring_free(target
->dev
, target
->rx_ring
, target
->rx_ring_size
,
134 target
->srp_iu_size
);
135 srp_iu_pool_free(&target
->iu_queue
);
138 struct iu_entry
*srp_iu_get(struct srp_target
*target
)
140 struct iu_entry
*iue
= NULL
;
142 if (kfifo_out_locked(&target
->iu_queue
.queue
, (void *)&iue
,
144 &target
->iu_queue
.lock
) != sizeof(void *)) {
145 WARN_ONCE(1, "unexpected fifo state");
150 iue
->target
= target
;
155 void srp_iu_put(struct iu_entry
*iue
)
157 kfifo_in_locked(&iue
->target
->iu_queue
.queue
, (void *)&iue
,
158 sizeof(void *), &iue
->target
->iu_queue
.lock
);
161 static int srp_direct_data(struct ibmvscsis_cmd
*cmd
, struct srp_direct_buf
*md
,
162 enum dma_data_direction dir
, srp_rdma_t rdma_io
,
163 int dma_map
, int ext_desc
)
165 struct iu_entry
*iue
= NULL
;
166 struct scatterlist
*sg
= NULL
;
167 int err
, nsg
= 0, len
;
171 sg
= cmd
->se_cmd
.t_data_sg
;
172 nsg
= dma_map_sg(iue
->target
->dev
, sg
, cmd
->se_cmd
.t_data_nents
,
175 pr_err("fail to map %p %d\n", iue
,
176 cmd
->se_cmd
.t_data_nents
);
179 len
= min(cmd
->se_cmd
.data_length
, be32_to_cpu(md
->len
));
181 len
= be32_to_cpu(md
->len
);
184 err
= rdma_io(cmd
, sg
, nsg
, md
, 1, dir
, len
);
187 dma_unmap_sg(iue
->target
->dev
, sg
, nsg
, DMA_BIDIRECTIONAL
);
192 static int srp_indirect_data(struct ibmvscsis_cmd
*cmd
, struct srp_cmd
*srp_cmd
,
193 struct srp_indirect_buf
*id
,
194 enum dma_data_direction dir
, srp_rdma_t rdma_io
,
195 int dma_map
, int ext_desc
)
197 struct iu_entry
*iue
= NULL
;
198 struct srp_direct_buf
*md
= NULL
;
199 struct scatterlist dummy
, *sg
= NULL
;
200 dma_addr_t token
= 0;
202 int nmd
, nsg
= 0, len
;
204 if (dma_map
|| ext_desc
) {
206 sg
= cmd
->se_cmd
.t_data_sg
;
209 nmd
= be32_to_cpu(id
->table_desc
.len
) / sizeof(struct srp_direct_buf
);
211 if ((dir
== DMA_FROM_DEVICE
&& nmd
== srp_cmd
->data_in_desc_cnt
) ||
212 (dir
== DMA_TO_DEVICE
&& nmd
== srp_cmd
->data_out_desc_cnt
)) {
213 md
= &id
->desc_list
[0];
217 if (ext_desc
&& dma_map
) {
218 md
= dma_alloc_coherent(iue
->target
->dev
,
219 be32_to_cpu(id
->table_desc
.len
),
222 pr_err("Can't get dma memory %u\n",
223 be32_to_cpu(id
->table_desc
.len
));
227 sg_init_one(&dummy
, md
, be32_to_cpu(id
->table_desc
.len
));
228 sg_dma_address(&dummy
) = token
;
229 sg_dma_len(&dummy
) = be32_to_cpu(id
->table_desc
.len
);
230 err
= rdma_io(cmd
, &dummy
, 1, &id
->table_desc
, 1, DMA_TO_DEVICE
,
231 be32_to_cpu(id
->table_desc
.len
));
233 pr_err("Error copying indirect table %d\n", err
);
237 pr_err("This command uses external indirect buffer\n");
243 nsg
= dma_map_sg(iue
->target
->dev
, sg
, cmd
->se_cmd
.t_data_nents
,
246 pr_err("fail to map %p %d\n", iue
,
247 cmd
->se_cmd
.t_data_nents
);
251 len
= min(cmd
->se_cmd
.data_length
, be32_to_cpu(id
->len
));
253 len
= be32_to_cpu(id
->len
);
256 err
= rdma_io(cmd
, sg
, nsg
, md
, nmd
, dir
, len
);
259 dma_unmap_sg(iue
->target
->dev
, sg
, nsg
, DMA_BIDIRECTIONAL
);
262 if (token
&& dma_map
) {
263 dma_free_coherent(iue
->target
->dev
,
264 be32_to_cpu(id
->table_desc
.len
), md
, token
);
269 static int data_out_desc_size(struct srp_cmd
*cmd
)
272 u8 fmt
= cmd
->buf_fmt
>> 4;
275 case SRP_NO_DATA_DESC
:
277 case SRP_DATA_DESC_DIRECT
:
278 size
= sizeof(struct srp_direct_buf
);
280 case SRP_DATA_DESC_INDIRECT
:
281 size
= sizeof(struct srp_indirect_buf
) +
282 sizeof(struct srp_direct_buf
) * cmd
->data_out_desc_cnt
;
285 pr_err("client error. Invalid data_out_format %x\n", fmt
);
292 * TODO: this can be called multiple times for a single command if it
293 * has very long data.
295 int srp_transfer_data(struct ibmvscsis_cmd
*cmd
, struct srp_cmd
*srp_cmd
,
296 srp_rdma_t rdma_io
, int dma_map
, int ext_desc
)
298 struct srp_direct_buf
*md
;
299 struct srp_indirect_buf
*id
;
300 enum dma_data_direction dir
;
304 if (!cmd
->se_cmd
.t_data_nents
)
307 offset
= srp_cmd
->add_cdb_len
& ~3;
309 dir
= srp_cmd_direction(srp_cmd
);
310 if (dir
== DMA_FROM_DEVICE
)
311 offset
+= data_out_desc_size(srp_cmd
);
313 if (dir
== DMA_TO_DEVICE
)
314 format
= srp_cmd
->buf_fmt
>> 4;
316 format
= srp_cmd
->buf_fmt
& ((1U << 4) - 1);
319 case SRP_NO_DATA_DESC
:
321 case SRP_DATA_DESC_DIRECT
:
322 md
= (struct srp_direct_buf
*)(srp_cmd
->add_data
+ offset
);
323 err
= srp_direct_data(cmd
, md
, dir
, rdma_io
, dma_map
, ext_desc
);
325 case SRP_DATA_DESC_INDIRECT
:
326 id
= (struct srp_indirect_buf
*)(srp_cmd
->add_data
+ offset
);
327 err
= srp_indirect_data(cmd
, srp_cmd
, id
, dir
, rdma_io
, dma_map
,
331 pr_err("Unknown format %d %x\n", dir
, format
);
338 u64
srp_data_length(struct srp_cmd
*cmd
, enum dma_data_direction dir
)
340 struct srp_direct_buf
*md
;
341 struct srp_indirect_buf
*id
;
343 uint offset
= cmd
->add_cdb_len
& ~3;
346 if (dir
== DMA_TO_DEVICE
) {
347 fmt
= cmd
->buf_fmt
>> 4;
349 fmt
= cmd
->buf_fmt
& ((1U << 4) - 1);
350 offset
+= data_out_desc_size(cmd
);
354 case SRP_NO_DATA_DESC
:
356 case SRP_DATA_DESC_DIRECT
:
357 md
= (struct srp_direct_buf
*)(cmd
->add_data
+ offset
);
358 len
= be32_to_cpu(md
->len
);
360 case SRP_DATA_DESC_INDIRECT
:
361 id
= (struct srp_indirect_buf
*)(cmd
->add_data
+ offset
);
362 len
= be32_to_cpu(id
->len
);
365 pr_err("invalid data format %x\n", fmt
);
371 int srp_get_desc_table(struct srp_cmd
*srp_cmd
, enum dma_data_direction
*dir
,
374 struct srp_indirect_buf
*idb
;
375 struct srp_direct_buf
*db
;
380 * The pointer computations below will only be compiled correctly
381 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
382 * whether srp_cmd::add_data has been declared as a byte pointer.
384 BUILD_BUG_ON(!__same_type(srp_cmd
->add_data
[0], (s8
)0)
385 && !__same_type(srp_cmd
->add_data
[0], (u8
)0));
395 if (srp_cmd
->buf_fmt
& 0xf)
396 *dir
= DMA_FROM_DEVICE
;
397 else if (srp_cmd
->buf_fmt
>> 4)
398 *dir
= DMA_TO_DEVICE
;
400 add_cdb_offset
= srp_cmd
->add_cdb_len
& ~3;
401 if (((srp_cmd
->buf_fmt
& 0xf) == SRP_DATA_DESC_DIRECT
) ||
402 ((srp_cmd
->buf_fmt
>> 4) == SRP_DATA_DESC_DIRECT
)) {
403 db
= (struct srp_direct_buf
*)(srp_cmd
->add_data
405 *data_len
= be32_to_cpu(db
->len
);
406 } else if (((srp_cmd
->buf_fmt
& 0xf) == SRP_DATA_DESC_INDIRECT
) ||
407 ((srp_cmd
->buf_fmt
>> 4) == SRP_DATA_DESC_INDIRECT
)) {
408 idb
= (struct srp_indirect_buf
*)(srp_cmd
->add_data
411 *data_len
= be32_to_cpu(idb
->len
);
416 MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
417 MODULE_AUTHOR("FUJITA Tomonori");
418 MODULE_LICENSE("GPL");