1 /*******************************************************************************
2 * SCSI RDMA Protocol lib functions
4 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 ***********************************************************************/
19 #define pr_fmt(fmt) "libsrp: " fmt
21 #include <linux/printk.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/kfifo.h>
25 #include <linux/scatterlist.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
29 #include <target/target_core_base.h>
31 #include "ibmvscsi_tgt.h"
33 static int srp_iu_pool_alloc(struct srp_queue
*q
, size_t max
,
34 struct srp_buf
**ring
)
39 q
->pool
= kcalloc(max
, sizeof(struct iu_entry
*), GFP_KERNEL
);
42 q
->items
= kcalloc(max
, sizeof(struct iu_entry
), GFP_KERNEL
);
46 spin_lock_init(&q
->lock
);
47 kfifo_init(&q
->queue
, (void *)q
->pool
, max
* sizeof(void *));
49 for (i
= 0, iue
= q
->items
; i
< max
; i
++) {
50 kfifo_in(&q
->queue
, (void *)&iue
, sizeof(void *));
61 static void srp_iu_pool_free(struct srp_queue
*q
)
67 static struct srp_buf
**srp_ring_alloc(struct device
*dev
,
68 size_t max
, size_t size
)
70 struct srp_buf
**ring
;
73 ring
= kcalloc(max
, sizeof(struct srp_buf
*), GFP_KERNEL
);
77 for (i
= 0; i
< max
; i
++) {
78 ring
[i
] = kzalloc(sizeof(*ring
[i
]), GFP_KERNEL
);
81 ring
[i
]->buf
= dma_alloc_coherent(dev
, size
, &ring
[i
]->dma
,
89 for (i
= 0; i
< max
&& ring
[i
]; i
++) {
91 dma_free_coherent(dev
, size
, ring
[i
]->buf
,
101 static void srp_ring_free(struct device
*dev
, struct srp_buf
**ring
,
102 size_t max
, size_t size
)
106 for (i
= 0; i
< max
; i
++) {
107 dma_free_coherent(dev
, size
, ring
[i
]->buf
, ring
[i
]->dma
);
113 int srp_target_alloc(struct srp_target
*target
, struct device
*dev
,
114 size_t nr
, size_t iu_size
)
118 spin_lock_init(&target
->lock
);
122 target
->srp_iu_size
= iu_size
;
123 target
->rx_ring_size
= nr
;
124 target
->rx_ring
= srp_ring_alloc(target
->dev
, nr
, iu_size
);
125 if (!target
->rx_ring
)
127 err
= srp_iu_pool_alloc(&target
->iu_queue
, nr
, target
->rx_ring
);
131 dev_set_drvdata(target
->dev
, target
);
135 srp_ring_free(target
->dev
, target
->rx_ring
, nr
, iu_size
);
139 void srp_target_free(struct srp_target
*target
)
141 dev_set_drvdata(target
->dev
, NULL
);
142 srp_ring_free(target
->dev
, target
->rx_ring
, target
->rx_ring_size
,
143 target
->srp_iu_size
);
144 srp_iu_pool_free(&target
->iu_queue
);
147 struct iu_entry
*srp_iu_get(struct srp_target
*target
)
149 struct iu_entry
*iue
= NULL
;
151 if (kfifo_out_locked(&target
->iu_queue
.queue
, (void *)&iue
,
153 &target
->iu_queue
.lock
) != sizeof(void *)) {
154 WARN_ONCE(1, "unexpected fifo state");
159 iue
->target
= target
;
164 void srp_iu_put(struct iu_entry
*iue
)
166 kfifo_in_locked(&iue
->target
->iu_queue
.queue
, (void *)&iue
,
167 sizeof(void *), &iue
->target
->iu_queue
.lock
);
170 static int srp_direct_data(struct ibmvscsis_cmd
*cmd
, struct srp_direct_buf
*md
,
171 enum dma_data_direction dir
, srp_rdma_t rdma_io
,
172 int dma_map
, int ext_desc
)
174 struct iu_entry
*iue
= NULL
;
175 struct scatterlist
*sg
= NULL
;
176 int err
, nsg
= 0, len
;
180 sg
= cmd
->se_cmd
.t_data_sg
;
181 nsg
= dma_map_sg(iue
->target
->dev
, sg
, cmd
->se_cmd
.t_data_nents
,
184 pr_err("fail to map %p %d\n", iue
,
185 cmd
->se_cmd
.t_data_nents
);
188 len
= min(cmd
->se_cmd
.data_length
, be32_to_cpu(md
->len
));
190 len
= be32_to_cpu(md
->len
);
193 err
= rdma_io(cmd
, sg
, nsg
, md
, 1, dir
, len
);
196 dma_unmap_sg(iue
->target
->dev
, sg
, nsg
, DMA_BIDIRECTIONAL
);
201 static int srp_indirect_data(struct ibmvscsis_cmd
*cmd
, struct srp_cmd
*srp_cmd
,
202 struct srp_indirect_buf
*id
,
203 enum dma_data_direction dir
, srp_rdma_t rdma_io
,
204 int dma_map
, int ext_desc
)
206 struct iu_entry
*iue
= NULL
;
207 struct srp_direct_buf
*md
= NULL
;
208 struct scatterlist dummy
, *sg
= NULL
;
209 dma_addr_t token
= 0;
211 int nmd
, nsg
= 0, len
;
213 if (dma_map
|| ext_desc
) {
215 sg
= cmd
->se_cmd
.t_data_sg
;
218 nmd
= be32_to_cpu(id
->table_desc
.len
) / sizeof(struct srp_direct_buf
);
220 if ((dir
== DMA_FROM_DEVICE
&& nmd
== srp_cmd
->data_in_desc_cnt
) ||
221 (dir
== DMA_TO_DEVICE
&& nmd
== srp_cmd
->data_out_desc_cnt
)) {
222 md
= &id
->desc_list
[0];
226 if (ext_desc
&& dma_map
) {
227 md
= dma_alloc_coherent(iue
->target
->dev
,
228 be32_to_cpu(id
->table_desc
.len
),
231 pr_err("Can't get dma memory %u\n",
232 be32_to_cpu(id
->table_desc
.len
));
236 sg_init_one(&dummy
, md
, be32_to_cpu(id
->table_desc
.len
));
237 sg_dma_address(&dummy
) = token
;
238 sg_dma_len(&dummy
) = be32_to_cpu(id
->table_desc
.len
);
239 err
= rdma_io(cmd
, &dummy
, 1, &id
->table_desc
, 1, DMA_TO_DEVICE
,
240 be32_to_cpu(id
->table_desc
.len
));
242 pr_err("Error copying indirect table %d\n", err
);
246 pr_err("This command uses external indirect buffer\n");
252 nsg
= dma_map_sg(iue
->target
->dev
, sg
, cmd
->se_cmd
.t_data_nents
,
255 pr_err("fail to map %p %d\n", iue
,
256 cmd
->se_cmd
.t_data_nents
);
260 len
= min(cmd
->se_cmd
.data_length
, be32_to_cpu(id
->len
));
262 len
= be32_to_cpu(id
->len
);
265 err
= rdma_io(cmd
, sg
, nsg
, md
, nmd
, dir
, len
);
268 dma_unmap_sg(iue
->target
->dev
, sg
, nsg
, DMA_BIDIRECTIONAL
);
271 if (token
&& dma_map
) {
272 dma_free_coherent(iue
->target
->dev
,
273 be32_to_cpu(id
->table_desc
.len
), md
, token
);
278 static int data_out_desc_size(struct srp_cmd
*cmd
)
281 u8 fmt
= cmd
->buf_fmt
>> 4;
284 case SRP_NO_DATA_DESC
:
286 case SRP_DATA_DESC_DIRECT
:
287 size
= sizeof(struct srp_direct_buf
);
289 case SRP_DATA_DESC_INDIRECT
:
290 size
= sizeof(struct srp_indirect_buf
) +
291 sizeof(struct srp_direct_buf
) * cmd
->data_out_desc_cnt
;
294 pr_err("client error. Invalid data_out_format %x\n", fmt
);
301 * TODO: this can be called multiple times for a single command if it
302 * has very long data.
304 int srp_transfer_data(struct ibmvscsis_cmd
*cmd
, struct srp_cmd
*srp_cmd
,
305 srp_rdma_t rdma_io
, int dma_map
, int ext_desc
)
307 struct srp_direct_buf
*md
;
308 struct srp_indirect_buf
*id
;
309 enum dma_data_direction dir
;
313 if (!cmd
->se_cmd
.t_data_nents
)
316 offset
= srp_cmd
->add_cdb_len
& ~3;
318 dir
= srp_cmd_direction(srp_cmd
);
319 if (dir
== DMA_FROM_DEVICE
)
320 offset
+= data_out_desc_size(srp_cmd
);
322 if (dir
== DMA_TO_DEVICE
)
323 format
= srp_cmd
->buf_fmt
>> 4;
325 format
= srp_cmd
->buf_fmt
& ((1U << 4) - 1);
328 case SRP_NO_DATA_DESC
:
330 case SRP_DATA_DESC_DIRECT
:
331 md
= (struct srp_direct_buf
*)(srp_cmd
->add_data
+ offset
);
332 err
= srp_direct_data(cmd
, md
, dir
, rdma_io
, dma_map
, ext_desc
);
334 case SRP_DATA_DESC_INDIRECT
:
335 id
= (struct srp_indirect_buf
*)(srp_cmd
->add_data
+ offset
);
336 err
= srp_indirect_data(cmd
, srp_cmd
, id
, dir
, rdma_io
, dma_map
,
340 pr_err("Unknown format %d %x\n", dir
, format
);
347 u64
srp_data_length(struct srp_cmd
*cmd
, enum dma_data_direction dir
)
349 struct srp_direct_buf
*md
;
350 struct srp_indirect_buf
*id
;
352 uint offset
= cmd
->add_cdb_len
& ~3;
355 if (dir
== DMA_TO_DEVICE
) {
356 fmt
= cmd
->buf_fmt
>> 4;
358 fmt
= cmd
->buf_fmt
& ((1U << 4) - 1);
359 offset
+= data_out_desc_size(cmd
);
363 case SRP_NO_DATA_DESC
:
365 case SRP_DATA_DESC_DIRECT
:
366 md
= (struct srp_direct_buf
*)(cmd
->add_data
+ offset
);
367 len
= be32_to_cpu(md
->len
);
369 case SRP_DATA_DESC_INDIRECT
:
370 id
= (struct srp_indirect_buf
*)(cmd
->add_data
+ offset
);
371 len
= be32_to_cpu(id
->len
);
374 pr_err("invalid data format %x\n", fmt
);
380 int srp_get_desc_table(struct srp_cmd
*srp_cmd
, enum dma_data_direction
*dir
,
383 struct srp_indirect_buf
*idb
;
384 struct srp_direct_buf
*db
;
389 * The pointer computations below will only be compiled correctly
390 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
391 * whether srp_cmd::add_data has been declared as a byte pointer.
393 BUILD_BUG_ON(!__same_type(srp_cmd
->add_data
[0], (s8
)0)
394 && !__same_type(srp_cmd
->add_data
[0], (u8
)0));
404 if (srp_cmd
->buf_fmt
& 0xf)
405 *dir
= DMA_FROM_DEVICE
;
406 else if (srp_cmd
->buf_fmt
>> 4)
407 *dir
= DMA_TO_DEVICE
;
409 add_cdb_offset
= srp_cmd
->add_cdb_len
& ~3;
410 if (((srp_cmd
->buf_fmt
& 0xf) == SRP_DATA_DESC_DIRECT
) ||
411 ((srp_cmd
->buf_fmt
>> 4) == SRP_DATA_DESC_DIRECT
)) {
412 db
= (struct srp_direct_buf
*)(srp_cmd
->add_data
414 *data_len
= be32_to_cpu(db
->len
);
415 } else if (((srp_cmd
->buf_fmt
& 0xf) == SRP_DATA_DESC_INDIRECT
) ||
416 ((srp_cmd
->buf_fmt
>> 4) == SRP_DATA_DESC_INDIRECT
)) {
417 idb
= (struct srp_indirect_buf
*)(srp_cmd
->add_data
420 *data_len
= be32_to_cpu(idb
->len
);
425 MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
426 MODULE_AUTHOR("FUJITA Tomonori");
427 MODULE_LICENSE("GPL");