OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / infiniband / ulp / srpt / ib_srpt.c
blobcbae5d93d528e1673a289bf1d5e5c7679d9dcfc6
1 /*
2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <scsi/scsi_tcq.h>
45 #include <target/configfs_macros.h>
46 #include <target/target_core_base.h>
47 #include <target/target_core_fabric_configfs.h>
48 #include <target/target_core_fabric.h>
49 #include <target/target_core_configfs.h>
50 #include "ib_srpt.h"
52 /* Name of this kernel module. */
53 #define DRV_NAME "ib_srpt"
54 #define DRV_VERSION "2.0.0"
55 #define DRV_RELDATE "2011-02-14"
57 #define SRPT_ID_STRING "Linux SRP target"
59 #undef pr_fmt
60 #define pr_fmt(fmt) DRV_NAME " " fmt
62 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
64 "v" DRV_VERSION " (" DRV_RELDATE ")");
65 MODULE_LICENSE("Dual BSD/GPL");
68 * Global Variables
71 static u64 srpt_service_guid;
72 static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
73 static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
75 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
76 module_param(srp_max_req_size, int, 0444);
77 MODULE_PARM_DESC(srp_max_req_size,
78 "Maximum size of SRP request messages in bytes.");
80 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
81 module_param(srpt_srq_size, int, 0444);
82 MODULE_PARM_DESC(srpt_srq_size,
83 "Shared receive queue (SRQ) size.");
85 static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
87 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
89 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
90 0444);
91 MODULE_PARM_DESC(srpt_service_guid,
92 "Using this value for ioc_guid, id_ext, and cm_listen_id"
93 " instead of using the node_guid of the first HCA.");
95 static struct ib_client srpt_client;
96 static struct target_fabric_configfs *srpt_target;
97 static void srpt_release_channel(struct srpt_rdma_ch *ch);
98 static int srpt_queue_status(struct se_cmd *cmd);
101 * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
103 static inline
104 enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
106 switch (dir) {
107 case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
108 case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
109 default: return dir;
114 * srpt_sdev_name() - Return the name associated with the HCA.
116 * Examples are ib0, ib1, ...
118 static inline const char *srpt_sdev_name(struct srpt_device *sdev)
120 return sdev->device->name;
123 static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
125 unsigned long flags;
126 enum rdma_ch_state state;
128 spin_lock_irqsave(&ch->spinlock, flags);
129 state = ch->state;
130 spin_unlock_irqrestore(&ch->spinlock, flags);
131 return state;
134 static enum rdma_ch_state
135 srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
137 unsigned long flags;
138 enum rdma_ch_state prev;
140 spin_lock_irqsave(&ch->spinlock, flags);
141 prev = ch->state;
142 ch->state = new_state;
143 spin_unlock_irqrestore(&ch->spinlock, flags);
144 return prev;
148 * srpt_test_and_set_ch_state() - Test and set the channel state.
150 * Returns true if and only if the channel state has been set to the new state.
152 static bool
153 srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
154 enum rdma_ch_state new)
156 unsigned long flags;
157 enum rdma_ch_state prev;
159 spin_lock_irqsave(&ch->spinlock, flags);
160 prev = ch->state;
161 if (prev == old)
162 ch->state = new;
163 spin_unlock_irqrestore(&ch->spinlock, flags);
164 return prev == old;
168 * srpt_event_handler() - Asynchronous IB event callback function.
170 * Callback function called by the InfiniBand core when an asynchronous IB
171 * event occurs. This callback may occur in interrupt context. See also
172 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
173 * Architecture Specification.
175 static void srpt_event_handler(struct ib_event_handler *handler,
176 struct ib_event *event)
178 struct srpt_device *sdev;
179 struct srpt_port *sport;
181 sdev = ib_get_client_data(event->device, &srpt_client);
182 if (!sdev || sdev->device != event->device)
183 return;
185 pr_debug("ASYNC event= %d on device= %s\n", event->event,
186 srpt_sdev_name(sdev));
188 switch (event->event) {
189 case IB_EVENT_PORT_ERR:
190 if (event->element.port_num <= sdev->device->phys_port_cnt) {
191 sport = &sdev->port[event->element.port_num - 1];
192 sport->lid = 0;
193 sport->sm_lid = 0;
195 break;
196 case IB_EVENT_PORT_ACTIVE:
197 case IB_EVENT_LID_CHANGE:
198 case IB_EVENT_PKEY_CHANGE:
199 case IB_EVENT_SM_CHANGE:
200 case IB_EVENT_CLIENT_REREGISTER:
201 /* Refresh port data asynchronously. */
202 if (event->element.port_num <= sdev->device->phys_port_cnt) {
203 sport = &sdev->port[event->element.port_num - 1];
204 if (!sport->lid && !sport->sm_lid)
205 schedule_work(&sport->work);
207 break;
208 default:
209 printk(KERN_ERR "received unrecognized IB event %d\n",
210 event->event);
211 break;
216 * srpt_srq_event() - SRQ event callback function.
218 static void srpt_srq_event(struct ib_event *event, void *ctx)
220 printk(KERN_INFO "SRQ event %d\n", event->event);
224 * srpt_qp_event() - QP event callback function.
226 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
228 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
229 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
231 switch (event->event) {
232 case IB_EVENT_COMM_EST:
233 ib_cm_notify(ch->cm_id, event->event);
234 break;
235 case IB_EVENT_QP_LAST_WQE_REACHED:
236 if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
237 CH_RELEASING))
238 srpt_release_channel(ch);
239 else
240 pr_debug("%s: state %d - ignored LAST_WQE.\n",
241 ch->sess_name, srpt_get_ch_state(ch));
242 break;
243 default:
244 printk(KERN_ERR "received unrecognized IB QP event %d\n",
245 event->event);
246 break;
251 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
253 * @slot: one-based slot number.
254 * @value: four-bit value.
256 * Copies the lowest four bits of value in element slot of the array of four
257 * bit elements called c_list (controller list). The index slot is one-based.
259 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
261 u16 id;
262 u8 tmp;
264 id = (slot - 1) / 2;
265 if (slot & 0x1) {
266 tmp = c_list[id] & 0xf;
267 c_list[id] = (value << 4) | tmp;
268 } else {
269 tmp = c_list[id] & 0xf0;
270 c_list[id] = (value & 0xf) | tmp;
275 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
277 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
278 * Specification.
280 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
282 struct ib_class_port_info *cif;
284 cif = (struct ib_class_port_info *)mad->data;
285 memset(cif, 0, sizeof *cif);
286 cif->base_version = 1;
287 cif->class_version = 1;
288 cif->resp_time_value = 20;
290 mad->mad_hdr.status = 0;
294 * srpt_get_iou() - Write IOUnitInfo to a management datagram.
296 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
297 * Specification. See also section B.7, table B.6 in the SRP r16a document.
299 static void srpt_get_iou(struct ib_dm_mad *mad)
301 struct ib_dm_iou_info *ioui;
302 u8 slot;
303 int i;
305 ioui = (struct ib_dm_iou_info *)mad->data;
306 ioui->change_id = __constant_cpu_to_be16(1);
307 ioui->max_controllers = 16;
309 /* set present for slot 1 and empty for the rest */
310 srpt_set_ioc(ioui->controller_list, 1, 1);
311 for (i = 1, slot = 2; i < 16; i++, slot++)
312 srpt_set_ioc(ioui->controller_list, slot, 0);
314 mad->mad_hdr.status = 0;
318 * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
320 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
321 * Architecture Specification. See also section B.7, table B.7 in the SRP
322 * r16a document.
324 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
325 struct ib_dm_mad *mad)
327 struct srpt_device *sdev = sport->sdev;
328 struct ib_dm_ioc_profile *iocp;
330 iocp = (struct ib_dm_ioc_profile *)mad->data;
332 if (!slot || slot > 16) {
333 mad->mad_hdr.status
334 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
335 return;
338 if (slot > 2) {
339 mad->mad_hdr.status
340 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
341 return;
344 memset(iocp, 0, sizeof *iocp);
345 strcpy(iocp->id_string, SRPT_ID_STRING);
346 iocp->guid = cpu_to_be64(srpt_service_guid);
347 iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
348 iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
349 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
350 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
351 iocp->subsys_device_id = 0x0;
352 iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
353 iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
354 iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
355 iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
356 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
357 iocp->rdma_read_depth = 4;
358 iocp->send_size = cpu_to_be32(srp_max_req_size);
359 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
360 1U << 24));
361 iocp->num_svc_entries = 1;
362 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
363 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
365 mad->mad_hdr.status = 0;
369 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
371 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
372 * Specification. See also section B.7, table B.8 in the SRP r16a document.
374 static void srpt_get_svc_entries(u64 ioc_guid,
375 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
377 struct ib_dm_svc_entries *svc_entries;
379 WARN_ON(!ioc_guid);
381 if (!slot || slot > 16) {
382 mad->mad_hdr.status
383 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
384 return;
387 if (slot > 2 || lo > hi || hi > 1) {
388 mad->mad_hdr.status
389 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
390 return;
393 svc_entries = (struct ib_dm_svc_entries *)mad->data;
394 memset(svc_entries, 0, sizeof *svc_entries);
395 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
396 snprintf(svc_entries->service_entries[0].name,
397 sizeof(svc_entries->service_entries[0].name),
398 "%s%016llx",
399 SRP_SERVICE_NAME_PREFIX,
400 ioc_guid);
402 mad->mad_hdr.status = 0;
406 * srpt_mgmt_method_get() - Process a received management datagram.
407 * @sp: source port through which the MAD has been received.
408 * @rq_mad: received MAD.
409 * @rsp_mad: response MAD.
411 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
412 struct ib_dm_mad *rsp_mad)
414 u16 attr_id;
415 u32 slot;
416 u8 hi, lo;
418 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
419 switch (attr_id) {
420 case DM_ATTR_CLASS_PORT_INFO:
421 srpt_get_class_port_info(rsp_mad);
422 break;
423 case DM_ATTR_IOU_INFO:
424 srpt_get_iou(rsp_mad);
425 break;
426 case DM_ATTR_IOC_PROFILE:
427 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
428 srpt_get_ioc(sp, slot, rsp_mad);
429 break;
430 case DM_ATTR_SVC_ENTRIES:
431 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
432 hi = (u8) ((slot >> 8) & 0xff);
433 lo = (u8) (slot & 0xff);
434 slot = (u16) ((slot >> 16) & 0xffff);
435 srpt_get_svc_entries(srpt_service_guid,
436 slot, hi, lo, rsp_mad);
437 break;
438 default:
439 rsp_mad->mad_hdr.status =
440 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
441 break;
446 * srpt_mad_send_handler() - Post MAD-send callback function.
448 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
449 struct ib_mad_send_wc *mad_wc)
451 ib_destroy_ah(mad_wc->send_buf->ah);
452 ib_free_send_mad(mad_wc->send_buf);
456 * srpt_mad_recv_handler() - MAD reception callback function.
458 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
459 struct ib_mad_recv_wc *mad_wc)
461 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
462 struct ib_ah *ah;
463 struct ib_mad_send_buf *rsp;
464 struct ib_dm_mad *dm_mad;
466 if (!mad_wc || !mad_wc->recv_buf.mad)
467 return;
469 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
470 mad_wc->recv_buf.grh, mad_agent->port_num);
471 if (IS_ERR(ah))
472 goto err;
474 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
476 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
477 mad_wc->wc->pkey_index, 0,
478 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
479 GFP_KERNEL);
480 if (IS_ERR(rsp))
481 goto err_rsp;
483 rsp->ah = ah;
485 dm_mad = rsp->mad;
486 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
487 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
488 dm_mad->mad_hdr.status = 0;
490 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
491 case IB_MGMT_METHOD_GET:
492 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
493 break;
494 case IB_MGMT_METHOD_SET:
495 dm_mad->mad_hdr.status =
496 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
497 break;
498 default:
499 dm_mad->mad_hdr.status =
500 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
501 break;
504 if (!ib_post_send_mad(rsp, NULL)) {
505 ib_free_recv_mad(mad_wc);
506 /* will destroy_ah & free_send_mad in send completion */
507 return;
510 ib_free_send_mad(rsp);
512 err_rsp:
513 ib_destroy_ah(ah);
514 err:
515 ib_free_recv_mad(mad_wc);
519 * srpt_refresh_port() - Configure a HCA port.
521 * Enable InfiniBand management datagram processing, update the cached sm_lid,
522 * lid and gid values, and register a callback function for processing MADs
523 * on the specified port.
525 * Note: It is safe to call this function more than once for the same port.
527 static int srpt_refresh_port(struct srpt_port *sport)
529 struct ib_mad_reg_req reg_req;
530 struct ib_port_modify port_modify;
531 struct ib_port_attr port_attr;
532 int ret;
534 memset(&port_modify, 0, sizeof port_modify);
535 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
536 port_modify.clr_port_cap_mask = 0;
538 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
539 if (ret)
540 goto err_mod_port;
542 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
543 if (ret)
544 goto err_query_port;
546 sport->sm_lid = port_attr.sm_lid;
547 sport->lid = port_attr.lid;
549 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
550 if (ret)
551 goto err_query_port;
553 if (!sport->mad_agent) {
554 memset(&reg_req, 0, sizeof reg_req);
555 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
556 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
557 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
558 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
560 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
561 sport->port,
562 IB_QPT_GSI,
563 &reg_req, 0,
564 srpt_mad_send_handler,
565 srpt_mad_recv_handler,
566 sport);
567 if (IS_ERR(sport->mad_agent)) {
568 ret = PTR_ERR(sport->mad_agent);
569 sport->mad_agent = NULL;
570 goto err_query_port;
574 return 0;
576 err_query_port:
578 port_modify.set_port_cap_mask = 0;
579 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
580 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
582 err_mod_port:
584 return ret;
588 * srpt_unregister_mad_agent() - Unregister MAD callback functions.
590 * Note: It is safe to call this function more than once for the same device.
592 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
594 struct ib_port_modify port_modify = {
595 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
597 struct srpt_port *sport;
598 int i;
600 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
601 sport = &sdev->port[i - 1];
602 WARN_ON(sport->port != i);
603 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
604 printk(KERN_ERR "disabling MAD processing failed.\n");
605 if (sport->mad_agent) {
606 ib_unregister_mad_agent(sport->mad_agent);
607 sport->mad_agent = NULL;
613 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
615 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
616 int ioctx_size, int dma_size,
617 enum dma_data_direction dir)
619 struct srpt_ioctx *ioctx;
621 ioctx = kmalloc(ioctx_size, GFP_KERNEL);
622 if (!ioctx)
623 goto err;
625 ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
626 if (!ioctx->buf)
627 goto err_free_ioctx;
629 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
630 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
631 goto err_free_buf;
633 return ioctx;
635 err_free_buf:
636 kfree(ioctx->buf);
637 err_free_ioctx:
638 kfree(ioctx);
639 err:
640 return NULL;
644 * srpt_free_ioctx() - Free an SRPT I/O context structure.
646 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
647 int dma_size, enum dma_data_direction dir)
649 if (!ioctx)
650 return;
652 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
653 kfree(ioctx->buf);
654 kfree(ioctx);
658 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
659 * @sdev: Device to allocate the I/O context ring for.
660 * @ring_size: Number of elements in the I/O context ring.
661 * @ioctx_size: I/O context size.
662 * @dma_size: DMA buffer size.
663 * @dir: DMA data direction.
665 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
666 int ring_size, int ioctx_size,
667 int dma_size, enum dma_data_direction dir)
669 struct srpt_ioctx **ring;
670 int i;
672 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
673 && ioctx_size != sizeof(struct srpt_send_ioctx));
675 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
676 if (!ring)
677 goto out;
678 for (i = 0; i < ring_size; ++i) {
679 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
680 if (!ring[i])
681 goto err;
682 ring[i]->index = i;
684 goto out;
686 err:
687 while (--i >= 0)
688 srpt_free_ioctx(sdev, ring[i], dma_size, dir);
689 kfree(ring);
690 ring = NULL;
691 out:
692 return ring;
696 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
698 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
699 struct srpt_device *sdev, int ring_size,
700 int dma_size, enum dma_data_direction dir)
702 int i;
704 for (i = 0; i < ring_size; ++i)
705 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
706 kfree(ioctx_ring);
710 * srpt_get_cmd_state() - Get the state of a SCSI command.
712 static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
714 enum srpt_command_state state;
715 unsigned long flags;
717 BUG_ON(!ioctx);
719 spin_lock_irqsave(&ioctx->spinlock, flags);
720 state = ioctx->state;
721 spin_unlock_irqrestore(&ioctx->spinlock, flags);
722 return state;
726 * srpt_set_cmd_state() - Set the state of a SCSI command.
728 * Does not modify the state of aborted commands. Returns the previous command
729 * state.
731 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
732 enum srpt_command_state new)
734 enum srpt_command_state previous;
735 unsigned long flags;
737 BUG_ON(!ioctx);
739 spin_lock_irqsave(&ioctx->spinlock, flags);
740 previous = ioctx->state;
741 if (previous != SRPT_STATE_DONE)
742 ioctx->state = new;
743 spin_unlock_irqrestore(&ioctx->spinlock, flags);
745 return previous;
749 * srpt_test_and_set_cmd_state() - Test and set the state of a command.
751 * Returns true if and only if the previous command state was equal to 'old'.
753 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
754 enum srpt_command_state old,
755 enum srpt_command_state new)
757 enum srpt_command_state previous;
758 unsigned long flags;
760 WARN_ON(!ioctx);
761 WARN_ON(old == SRPT_STATE_DONE);
762 WARN_ON(new == SRPT_STATE_NEW);
764 spin_lock_irqsave(&ioctx->spinlock, flags);
765 previous = ioctx->state;
766 if (previous == old)
767 ioctx->state = new;
768 spin_unlock_irqrestore(&ioctx->spinlock, flags);
769 return previous == old;
773 * srpt_post_recv() - Post an IB receive request.
775 static int srpt_post_recv(struct srpt_device *sdev,
776 struct srpt_recv_ioctx *ioctx)
778 struct ib_sge list;
779 struct ib_recv_wr wr, *bad_wr;
781 BUG_ON(!sdev);
782 wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
784 list.addr = ioctx->ioctx.dma;
785 list.length = srp_max_req_size;
786 list.lkey = sdev->mr->lkey;
788 wr.next = NULL;
789 wr.sg_list = &list;
790 wr.num_sge = 1;
792 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
796 * srpt_post_send() - Post an IB send request.
798 * Returns zero upon success and a non-zero value upon failure.
800 static int srpt_post_send(struct srpt_rdma_ch *ch,
801 struct srpt_send_ioctx *ioctx, int len)
803 struct ib_sge list;
804 struct ib_send_wr wr, *bad_wr;
805 struct srpt_device *sdev = ch->sport->sdev;
806 int ret;
808 atomic_inc(&ch->req_lim);
810 ret = -ENOMEM;
811 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
812 printk(KERN_WARNING "IB send queue full (needed 1)\n");
813 goto out;
816 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
817 DMA_TO_DEVICE);
819 list.addr = ioctx->ioctx.dma;
820 list.length = len;
821 list.lkey = sdev->mr->lkey;
823 wr.next = NULL;
824 wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
825 wr.sg_list = &list;
826 wr.num_sge = 1;
827 wr.opcode = IB_WR_SEND;
828 wr.send_flags = IB_SEND_SIGNALED;
830 ret = ib_post_send(ch->qp, &wr, &bad_wr);
832 out:
833 if (ret < 0) {
834 atomic_inc(&ch->sq_wr_avail);
835 atomic_dec(&ch->req_lim);
837 return ret;
841 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
842 * @ioctx: Pointer to the I/O context associated with the request.
843 * @srp_cmd: Pointer to the SRP_CMD request data.
844 * @dir: Pointer to the variable to which the transfer direction will be
845 * written.
846 * @data_len: Pointer to the variable to which the total data length of all
847 * descriptors in the SRP_CMD request will be written.
849 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
851 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
852 * -ENOMEM when memory allocation fails and zero upon success.
854 static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
855 struct srp_cmd *srp_cmd,
856 enum dma_data_direction *dir, u64 *data_len)
858 struct srp_indirect_buf *idb;
859 struct srp_direct_buf *db;
860 unsigned add_cdb_offset;
861 int ret;
864 * The pointer computations below will only be compiled correctly
865 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
866 * whether srp_cmd::add_data has been declared as a byte pointer.
868 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
869 && !__same_type(srp_cmd->add_data[0], (u8)0));
871 BUG_ON(!dir);
872 BUG_ON(!data_len);
874 ret = 0;
875 *data_len = 0;
878 * The lower four bits of the buffer format field contain the DATA-IN
879 * buffer descriptor format, and the highest four bits contain the
880 * DATA-OUT buffer descriptor format.
882 *dir = DMA_NONE;
883 if (srp_cmd->buf_fmt & 0xf)
884 /* DATA-IN: transfer data from target to initiator (read). */
885 *dir = DMA_FROM_DEVICE;
886 else if (srp_cmd->buf_fmt >> 4)
887 /* DATA-OUT: transfer data from initiator to target (write). */
888 *dir = DMA_TO_DEVICE;
891 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
892 * CDB LENGTH' field are reserved and the size in bytes of this field
893 * is four times the value specified in bits 3..7. Hence the "& ~3".
895 add_cdb_offset = srp_cmd->add_cdb_len & ~3;
896 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
897 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
898 ioctx->n_rbuf = 1;
899 ioctx->rbufs = &ioctx->single_rbuf;
901 db = (struct srp_direct_buf *)(srp_cmd->add_data
902 + add_cdb_offset);
903 memcpy(ioctx->rbufs, db, sizeof *db);
904 *data_len = be32_to_cpu(db->len);
905 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
906 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
907 idb = (struct srp_indirect_buf *)(srp_cmd->add_data
908 + add_cdb_offset);
910 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
912 if (ioctx->n_rbuf >
913 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
914 printk(KERN_ERR "received unsupported SRP_CMD request"
915 " type (%u out + %u in != %u / %zu)\n",
916 srp_cmd->data_out_desc_cnt,
917 srp_cmd->data_in_desc_cnt,
918 be32_to_cpu(idb->table_desc.len),
919 sizeof(*db));
920 ioctx->n_rbuf = 0;
921 ret = -EINVAL;
922 goto out;
925 if (ioctx->n_rbuf == 1)
926 ioctx->rbufs = &ioctx->single_rbuf;
927 else {
928 ioctx->rbufs =
929 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
930 if (!ioctx->rbufs) {
931 ioctx->n_rbuf = 0;
932 ret = -ENOMEM;
933 goto out;
937 db = idb->desc_list;
938 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
939 *data_len = be32_to_cpu(idb->len);
941 out:
942 return ret;
946 * srpt_init_ch_qp() - Initialize queue pair attributes.
948 * Initialized the attributes of queue pair 'qp' by allowing local write,
949 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
951 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
953 struct ib_qp_attr *attr;
954 int ret;
956 attr = kzalloc(sizeof *attr, GFP_KERNEL);
957 if (!attr)
958 return -ENOMEM;
960 attr->qp_state = IB_QPS_INIT;
961 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
962 IB_ACCESS_REMOTE_WRITE;
963 attr->port_num = ch->sport->port;
964 attr->pkey_index = 0;
966 ret = ib_modify_qp(qp, attr,
967 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
968 IB_QP_PKEY_INDEX);
970 kfree(attr);
971 return ret;
975 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
976 * @ch: channel of the queue pair.
977 * @qp: queue pair to change the state of.
979 * Returns zero upon success and a negative value upon failure.
981 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
982 * If this structure ever becomes larger, it might be necessary to allocate
983 * it dynamically instead of on the stack.
985 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
987 struct ib_qp_attr qp_attr;
988 int attr_mask;
989 int ret;
991 qp_attr.qp_state = IB_QPS_RTR;
992 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
993 if (ret)
994 goto out;
996 qp_attr.max_dest_rd_atomic = 4;
998 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1000 out:
1001 return ret;
1005 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
1006 * @ch: channel of the queue pair.
1007 * @qp: queue pair to change the state of.
1009 * Returns zero upon success and a negative value upon failure.
1011 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1012 * If this structure ever becomes larger, it might be necessary to allocate
1013 * it dynamically instead of on the stack.
1015 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1017 struct ib_qp_attr qp_attr;
1018 int attr_mask;
1019 int ret;
1021 qp_attr.qp_state = IB_QPS_RTS;
1022 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
1023 if (ret)
1024 goto out;
1026 qp_attr.max_rd_atomic = 4;
1028 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1030 out:
1031 return ret;
1035 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
1037 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1039 struct ib_qp_attr qp_attr;
1041 qp_attr.qp_state = IB_QPS_ERR;
1042 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1046 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
1048 static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1049 struct srpt_send_ioctx *ioctx)
1051 struct scatterlist *sg;
1052 enum dma_data_direction dir;
1054 BUG_ON(!ch);
1055 BUG_ON(!ioctx);
1056 BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
1058 while (ioctx->n_rdma)
1059 kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
1061 kfree(ioctx->rdma_ius);
1062 ioctx->rdma_ius = NULL;
1064 if (ioctx->mapped_sg_count) {
1065 sg = ioctx->sg;
1066 WARN_ON(!sg);
1067 dir = ioctx->cmd.data_direction;
1068 BUG_ON(dir == DMA_NONE);
1069 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
1070 opposite_dma_dir(dir));
1071 ioctx->mapped_sg_count = 0;
1076 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
1078 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
1079 struct srpt_send_ioctx *ioctx)
1081 struct se_cmd *cmd;
1082 struct scatterlist *sg, *sg_orig;
1083 int sg_cnt;
1084 enum dma_data_direction dir;
1085 struct rdma_iu *riu;
1086 struct srp_direct_buf *db;
1087 dma_addr_t dma_addr;
1088 struct ib_sge *sge;
1089 u64 raddr;
1090 u32 rsize;
1091 u32 tsize;
1092 u32 dma_len;
1093 int count, nrdma;
1094 int i, j, k;
1096 BUG_ON(!ch);
1097 BUG_ON(!ioctx);
1098 cmd = &ioctx->cmd;
1099 dir = cmd->data_direction;
1100 BUG_ON(dir == DMA_NONE);
1102 transport_do_task_sg_chain(cmd);
1103 ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
1104 ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
1106 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
1107 opposite_dma_dir(dir));
1108 if (unlikely(!count))
1109 return -EAGAIN;
1111 ioctx->mapped_sg_count = count;
1113 if (ioctx->rdma_ius && ioctx->n_rdma_ius)
1114 nrdma = ioctx->n_rdma_ius;
1115 else {
1116 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
1117 + ioctx->n_rbuf;
1119 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
1120 if (!ioctx->rdma_ius)
1121 goto free_mem;
1123 ioctx->n_rdma_ius = nrdma;
1126 db = ioctx->rbufs;
1127 tsize = cmd->data_length;
1128 dma_len = sg_dma_len(&sg[0]);
1129 riu = ioctx->rdma_ius;
1132 * For each remote desc - calculate the #ib_sge.
1133 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
1134 * each remote desc rdma_iu is required a rdma wr;
1135 * else
1136 * we need to allocate extra rdma_iu to carry extra #ib_sge in
1137 * another rdma wr
1139 for (i = 0, j = 0;
1140 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1141 rsize = be32_to_cpu(db->len);
1142 raddr = be64_to_cpu(db->va);
1143 riu->raddr = raddr;
1144 riu->rkey = be32_to_cpu(db->key);
1145 riu->sge_cnt = 0;
1147 /* calculate how many sge required for this remote_buf */
1148 while (rsize > 0 && tsize > 0) {
1150 if (rsize >= dma_len) {
1151 tsize -= dma_len;
1152 rsize -= dma_len;
1153 raddr += dma_len;
1155 if (tsize > 0) {
1156 ++j;
1157 if (j < count) {
1158 sg = sg_next(sg);
1159 dma_len = sg_dma_len(sg);
1162 } else {
1163 tsize -= rsize;
1164 dma_len -= rsize;
1165 rsize = 0;
1168 ++riu->sge_cnt;
1170 if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
1171 ++ioctx->n_rdma;
1172 riu->sge =
1173 kmalloc(riu->sge_cnt * sizeof *riu->sge,
1174 GFP_KERNEL);
1175 if (!riu->sge)
1176 goto free_mem;
1178 ++riu;
1179 riu->sge_cnt = 0;
1180 riu->raddr = raddr;
1181 riu->rkey = be32_to_cpu(db->key);
1185 ++ioctx->n_rdma;
1186 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
1187 GFP_KERNEL);
1188 if (!riu->sge)
1189 goto free_mem;
1192 db = ioctx->rbufs;
1193 tsize = cmd->data_length;
1194 riu = ioctx->rdma_ius;
1195 sg = sg_orig;
1196 dma_len = sg_dma_len(&sg[0]);
1197 dma_addr = sg_dma_address(&sg[0]);
1199 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
1200 for (i = 0, j = 0;
1201 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
1202 rsize = be32_to_cpu(db->len);
1203 sge = riu->sge;
1204 k = 0;
1206 while (rsize > 0 && tsize > 0) {
1207 sge->addr = dma_addr;
1208 sge->lkey = ch->sport->sdev->mr->lkey;
1210 if (rsize >= dma_len) {
1211 sge->length =
1212 (tsize < dma_len) ? tsize : dma_len;
1213 tsize -= dma_len;
1214 rsize -= dma_len;
1216 if (tsize > 0) {
1217 ++j;
1218 if (j < count) {
1219 sg = sg_next(sg);
1220 dma_len = sg_dma_len(sg);
1221 dma_addr = sg_dma_address(sg);
1224 } else {
1225 sge->length = (tsize < rsize) ? tsize : rsize;
1226 tsize -= rsize;
1227 dma_len -= rsize;
1228 dma_addr += rsize;
1229 rsize = 0;
1232 ++k;
1233 if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
1234 ++riu;
1235 sge = riu->sge;
1236 k = 0;
1237 } else if (rsize > 0 && tsize > 0)
1238 ++sge;
1242 return 0;
1244 free_mem:
1245 srpt_unmap_sg_to_ib_sge(ch, ioctx);
1247 return -ENOMEM;
1251 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
1253 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1255 struct srpt_send_ioctx *ioctx;
1256 unsigned long flags;
1258 BUG_ON(!ch);
1260 ioctx = NULL;
1261 spin_lock_irqsave(&ch->spinlock, flags);
1262 if (!list_empty(&ch->free_list)) {
1263 ioctx = list_first_entry(&ch->free_list,
1264 struct srpt_send_ioctx, free_list);
1265 list_del(&ioctx->free_list);
1267 spin_unlock_irqrestore(&ch->spinlock, flags);
1269 if (!ioctx)
1270 return ioctx;
1272 BUG_ON(ioctx->ch != ch);
1273 kref_init(&ioctx->kref);
1274 spin_lock_init(&ioctx->spinlock);
1275 ioctx->state = SRPT_STATE_NEW;
1276 ioctx->n_rbuf = 0;
1277 ioctx->rbufs = NULL;
1278 ioctx->n_rdma = 0;
1279 ioctx->n_rdma_ius = 0;
1280 ioctx->rdma_ius = NULL;
1281 ioctx->mapped_sg_count = 0;
1282 init_completion(&ioctx->tx_done);
1283 ioctx->queue_status_only = false;
1285 * transport_init_se_cmd() does not initialize all fields, so do it
1286 * here.
1288 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1289 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1291 return ioctx;
1295 * srpt_put_send_ioctx() - Free up resources.
1297 static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
1299 struct srpt_rdma_ch *ch;
1300 unsigned long flags;
1302 BUG_ON(!ioctx);
1303 ch = ioctx->ch;
1304 BUG_ON(!ch);
1306 WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
1308 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1309 transport_generic_free_cmd(&ioctx->cmd, 0);
1311 if (ioctx->n_rbuf > 1) {
1312 kfree(ioctx->rbufs);
1313 ioctx->rbufs = NULL;
1314 ioctx->n_rbuf = 0;
1317 spin_lock_irqsave(&ch->spinlock, flags);
1318 list_add(&ioctx->free_list, &ch->free_list);
1319 spin_unlock_irqrestore(&ch->spinlock, flags);
1322 static void srpt_put_send_ioctx_kref(struct kref *kref)
1324 srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
1328 * srpt_abort_cmd() - Abort a SCSI command.
1329 * @ioctx: I/O context associated with the SCSI command.
1330 * @context: Preferred execution context.
1332 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1334 enum srpt_command_state state;
1335 unsigned long flags;
1337 BUG_ON(!ioctx);
1340 * If the command is in a state where the target core is waiting for
1341 * the ib_srpt driver, change the state to the next state. Changing
1342 * the state of the command from SRPT_STATE_NEED_DATA to
1343 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
1344 * function a second time.
1347 spin_lock_irqsave(&ioctx->spinlock, flags);
1348 state = ioctx->state;
1349 switch (state) {
1350 case SRPT_STATE_NEED_DATA:
1351 ioctx->state = SRPT_STATE_DATA_IN;
1352 break;
1353 case SRPT_STATE_DATA_IN:
1354 case SRPT_STATE_CMD_RSP_SENT:
1355 case SRPT_STATE_MGMT_RSP_SENT:
1356 ioctx->state = SRPT_STATE_DONE;
1357 break;
1358 default:
1359 break;
1361 spin_unlock_irqrestore(&ioctx->spinlock, flags);
1363 if (state == SRPT_STATE_DONE)
1364 goto out;
1366 pr_debug("Aborting cmd with state %d and tag %lld\n", state,
1367 ioctx->tag);
1369 switch (state) {
1370 case SRPT_STATE_NEW:
1371 case SRPT_STATE_DATA_IN:
1372 case SRPT_STATE_MGMT:
1374 * Do nothing - defer abort processing until
1375 * srpt_queue_response() is invoked.
1377 WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
1378 break;
1379 case SRPT_STATE_NEED_DATA:
1380 /* DMA_TO_DEVICE (write) - RDMA read error. */
1381 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1382 transport_generic_handle_data(&ioctx->cmd);
1383 break;
1384 case SRPT_STATE_CMD_RSP_SENT:
1386 * SRP_RSP sending failed or the SRP_RSP send completion has
1387 * not been received in time.
1389 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
1390 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1391 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1392 break;
1393 case SRPT_STATE_MGMT_RSP_SENT:
1394 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1395 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1396 break;
1397 default:
1398 WARN_ON("ERROR: unexpected command state");
1399 break;
1402 out:
1403 return state;
1407 * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
1409 static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
1411 struct srpt_send_ioctx *ioctx;
1412 enum srpt_command_state state;
1413 struct se_cmd *cmd;
1414 u32 index;
1416 atomic_inc(&ch->sq_wr_avail);
1418 index = idx_from_wr_id(wr_id);
1419 ioctx = ch->ioctx_ring[index];
1420 state = srpt_get_cmd_state(ioctx);
1421 cmd = &ioctx->cmd;
1423 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1424 && state != SRPT_STATE_MGMT_RSP_SENT
1425 && state != SRPT_STATE_NEED_DATA
1426 && state != SRPT_STATE_DONE);
1428 /* If SRP_RSP sending failed, undo the ch->req_lim change. */
1429 if (state == SRPT_STATE_CMD_RSP_SENT
1430 || state == SRPT_STATE_MGMT_RSP_SENT)
1431 atomic_dec(&ch->req_lim);
1433 srpt_abort_cmd(ioctx);
1437 * srpt_handle_send_comp() - Process an IB send completion notification.
1439 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
1440 struct srpt_send_ioctx *ioctx)
1442 enum srpt_command_state state;
1444 atomic_inc(&ch->sq_wr_avail);
1446 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1448 if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
1449 && state != SRPT_STATE_MGMT_RSP_SENT
1450 && state != SRPT_STATE_DONE))
1451 pr_debug("state = %d\n", state);
1453 if (state != SRPT_STATE_DONE)
1454 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1455 else
1456 printk(KERN_ERR "IB completion has been received too late for"
1457 " wr_id = %u.\n", ioctx->ioctx.index);
1461 * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
1463 * Note: transport_generic_handle_data() is asynchronous so unmapping the
1464 * data that has been transferred via IB RDMA must be postponed until the
1465 * check_stop_free() callback.
1467 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
1468 struct srpt_send_ioctx *ioctx,
1469 enum srpt_opcode opcode)
1471 WARN_ON(ioctx->n_rdma <= 0);
1472 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1474 if (opcode == SRPT_RDMA_READ_LAST) {
1475 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1476 SRPT_STATE_DATA_IN))
1477 transport_generic_handle_data(&ioctx->cmd);
1478 else
1479 printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
1480 __LINE__, srpt_get_cmd_state(ioctx));
1481 } else if (opcode == SRPT_RDMA_ABORT) {
1482 ioctx->rdma_aborted = true;
1483 } else {
1484 WARN(true, "unexpected opcode %d\n", opcode);
1489 * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
1491 static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
1492 struct srpt_send_ioctx *ioctx,
1493 enum srpt_opcode opcode)
1495 struct se_cmd *cmd;
1496 enum srpt_command_state state;
1498 cmd = &ioctx->cmd;
1499 state = srpt_get_cmd_state(ioctx);
1500 switch (opcode) {
1501 case SRPT_RDMA_READ_LAST:
1502 if (ioctx->n_rdma <= 0) {
1503 printk(KERN_ERR "Received invalid RDMA read"
1504 " error completion with idx %d\n",
1505 ioctx->ioctx.index);
1506 break;
1508 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1509 if (state == SRPT_STATE_NEED_DATA)
1510 srpt_abort_cmd(ioctx);
1511 else
1512 printk(KERN_ERR "%s[%d]: wrong state = %d\n",
1513 __func__, __LINE__, state);
1514 break;
1515 case SRPT_RDMA_WRITE_LAST:
1516 atomic_set(&ioctx->cmd.transport_lun_stop, 1);
1517 break;
1518 default:
1519 printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
1520 __LINE__, opcode);
1521 break;
1526 * srpt_build_cmd_rsp() - Build an SRP_RSP response.
1527 * @ch: RDMA channel through which the request has been received.
1528 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1529 * be built in the buffer ioctx->buf points at and hence this function will
1530 * overwrite the request data.
1531 * @tag: tag of the request for which this response is being generated.
1532 * @status: value for the STATUS field of the SRP_RSP information unit.
1534 * Returns the size in bytes of the SRP_RSP response.
1536 * An SRP_RSP response contains a SCSI status or service response. See also
1537 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1538 * response. See also SPC-2 for more information about sense data.
1540 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1541 struct srpt_send_ioctx *ioctx, u64 tag,
1542 int status)
1544 struct srp_rsp *srp_rsp;
1545 const u8 *sense_data;
1546 int sense_data_len, max_sense_len;
1549 * The lowest bit of all SAM-3 status codes is zero (see also
1550 * paragraph 5.3 in SAM-3).
1552 WARN_ON(status & 1);
1554 srp_rsp = ioctx->ioctx.buf;
1555 BUG_ON(!srp_rsp);
1557 sense_data = ioctx->sense_data;
1558 sense_data_len = ioctx->cmd.scsi_sense_length;
1559 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1561 memset(srp_rsp, 0, sizeof *srp_rsp);
1562 srp_rsp->opcode = SRP_RSP;
1563 srp_rsp->req_lim_delta =
1564 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1565 srp_rsp->tag = tag;
1566 srp_rsp->status = status;
1568 if (sense_data_len) {
1569 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1570 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1571 if (sense_data_len > max_sense_len) {
1572 printk(KERN_WARNING "truncated sense data from %d to %d"
1573 " bytes\n", sense_data_len, max_sense_len);
1574 sense_data_len = max_sense_len;
1577 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1578 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1579 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1582 return sizeof(*srp_rsp) + sense_data_len;
1586 * srpt_build_tskmgmt_rsp() - Build a task management response.
1587 * @ch: RDMA channel through which the request has been received.
1588 * @ioctx: I/O context in which the SRP_RSP response will be built.
1589 * @rsp_code: RSP_CODE that will be stored in the response.
1590 * @tag: Tag of the request for which this response is being generated.
1592 * Returns the size in bytes of the SRP_RSP response.
1594 * An SRP_RSP response contains a SCSI status or service response. See also
1595 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1596 * response.
1598 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1599 struct srpt_send_ioctx *ioctx,
1600 u8 rsp_code, u64 tag)
1602 struct srp_rsp *srp_rsp;
1603 int resp_data_len;
1604 int resp_len;
1606 resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
1607 resp_len = sizeof(*srp_rsp) + resp_data_len;
1609 srp_rsp = ioctx->ioctx.buf;
1610 BUG_ON(!srp_rsp);
1611 memset(srp_rsp, 0, sizeof *srp_rsp);
1613 srp_rsp->opcode = SRP_RSP;
1614 srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
1615 + atomic_xchg(&ch->req_lim_delta, 0));
1616 srp_rsp->tag = tag;
1618 if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
1619 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1620 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1621 srp_rsp->data[3] = rsp_code;
1624 return resp_len;
1627 #define NO_SUCH_LUN ((uint64_t)-1LL)
1630 * SCSI LUN addressing method. See also SAM-2 and the section about
1631 * eight byte LUNs.
1633 enum scsi_lun_addr_method {
1634 SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
1635 SCSI_LUN_ADDR_METHOD_FLAT = 1,
1636 SCSI_LUN_ADDR_METHOD_LUN = 2,
1637 SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
1641 * srpt_unpack_lun() - Convert from network LUN to linear LUN.
1643 * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
1644 * order (big endian) to a linear LUN. Supports three LUN addressing methods:
1645 * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
1647 static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1649 uint64_t res = NO_SUCH_LUN;
1650 int addressing_method;
1652 if (unlikely(len < 2)) {
1653 printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
1654 "more", len);
1655 goto out;
1658 switch (len) {
1659 case 8:
1660 if ((*((__be64 *)lun) &
1661 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1662 goto out_err;
1663 break;
1664 case 4:
1665 if (*((__be16 *)&lun[2]) != 0)
1666 goto out_err;
1667 break;
1668 case 6:
1669 if (*((__be32 *)&lun[2]) != 0)
1670 goto out_err;
1671 break;
1672 case 2:
1673 break;
1674 default:
1675 goto out_err;
1678 addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
1679 switch (addressing_method) {
1680 case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
1681 case SCSI_LUN_ADDR_METHOD_FLAT:
1682 case SCSI_LUN_ADDR_METHOD_LUN:
1683 res = *(lun + 1) | (((*lun) & 0x3f) << 8);
1684 break;
1686 case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
1687 default:
1688 printk(KERN_ERR "Unimplemented LUN addressing method %u",
1689 addressing_method);
1690 break;
1693 out:
1694 return res;
1696 out_err:
1697 printk(KERN_ERR "Support for multi-level LUNs has not yet been"
1698 " implemented");
1699 goto out;
1702 static int srpt_check_stop_free(struct se_cmd *cmd)
1704 struct srpt_send_ioctx *ioctx;
1706 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
1707 return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
1711 * srpt_handle_cmd() - Process SRP_CMD.
1713 static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1714 struct srpt_recv_ioctx *recv_ioctx,
1715 struct srpt_send_ioctx *send_ioctx)
1717 struct se_cmd *cmd;
1718 struct srp_cmd *srp_cmd;
1719 uint64_t unpacked_lun;
1720 u64 data_len;
1721 enum dma_data_direction dir;
1722 int ret;
1724 BUG_ON(!send_ioctx);
1726 srp_cmd = recv_ioctx->ioctx.buf;
1727 kref_get(&send_ioctx->kref);
1728 cmd = &send_ioctx->cmd;
1729 send_ioctx->tag = srp_cmd->tag;
1731 switch (srp_cmd->task_attr) {
1732 case SRP_CMD_SIMPLE_Q:
1733 cmd->sam_task_attr = MSG_SIMPLE_TAG;
1734 break;
1735 case SRP_CMD_ORDERED_Q:
1736 default:
1737 cmd->sam_task_attr = MSG_ORDERED_TAG;
1738 break;
1739 case SRP_CMD_HEAD_OF_Q:
1740 cmd->sam_task_attr = MSG_HEAD_TAG;
1741 break;
1742 case SRP_CMD_ACA:
1743 cmd->sam_task_attr = MSG_ACA_TAG;
1744 break;
1747 ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
1748 if (ret) {
1749 printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
1750 srp_cmd->tag);
1751 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1752 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
1753 kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
1754 goto send_sense;
1757 cmd->data_length = data_len;
1758 cmd->data_direction = dir;
1759 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
1760 sizeof(srp_cmd->lun));
1761 if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) {
1762 kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
1763 goto send_sense;
1765 ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
1766 if (ret < 0) {
1767 kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
1768 if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
1769 srpt_queue_status(cmd);
1770 return 0;
1771 } else
1772 goto send_sense;
1775 transport_handle_cdb_direct(cmd);
1776 return 0;
1778 send_sense:
1779 transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
1781 return -1;
1785 * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
1786 * @ch: RDMA channel of the task management request.
1787 * @fn: Task management function to perform.
1788 * @req_tag: Tag of the SRP task management request.
1789 * @mgmt_ioctx: I/O context of the task management request.
1791 * Returns zero if the target core will process the task management
1792 * request asynchronously.
1794 * Note: It is assumed that the initiator serializes tag-based task management
1795 * requests.
1797 static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
1799 struct srpt_device *sdev;
1800 struct srpt_rdma_ch *ch;
1801 struct srpt_send_ioctx *target;
1802 int ret, i;
1804 ret = -EINVAL;
1805 ch = ioctx->ch;
1806 BUG_ON(!ch);
1807 BUG_ON(!ch->sport);
1808 sdev = ch->sport->sdev;
1809 BUG_ON(!sdev);
1810 spin_lock_irq(&sdev->spinlock);
1811 for (i = 0; i < ch->rq_size; ++i) {
1812 target = ch->ioctx_ring[i];
1813 if (target->cmd.se_lun == ioctx->cmd.se_lun &&
1814 target->tag == tag &&
1815 srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
1816 ret = 0;
1817 /* now let the target core abort &target->cmd; */
1818 break;
1821 spin_unlock_irq(&sdev->spinlock);
1822 return ret;
1825 static int srp_tmr_to_tcm(int fn)
1827 switch (fn) {
1828 case SRP_TSK_ABORT_TASK:
1829 return TMR_ABORT_TASK;
1830 case SRP_TSK_ABORT_TASK_SET:
1831 return TMR_ABORT_TASK_SET;
1832 case SRP_TSK_CLEAR_TASK_SET:
1833 return TMR_CLEAR_TASK_SET;
1834 case SRP_TSK_LUN_RESET:
1835 return TMR_LUN_RESET;
1836 case SRP_TSK_CLEAR_ACA:
1837 return TMR_CLEAR_ACA;
1838 default:
1839 return -1;
1844 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
1846 * Returns 0 if and only if the request will be processed by the target core.
1848 * For more information about SRP_TSK_MGMT information units, see also section
1849 * 6.7 in the SRP r16a document.
1851 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1852 struct srpt_recv_ioctx *recv_ioctx,
1853 struct srpt_send_ioctx *send_ioctx)
1855 struct srp_tsk_mgmt *srp_tsk;
1856 struct se_cmd *cmd;
1857 uint64_t unpacked_lun;
1858 int tcm_tmr;
1859 int res;
1861 BUG_ON(!send_ioctx);
1863 srp_tsk = recv_ioctx->ioctx.buf;
1864 cmd = &send_ioctx->cmd;
1866 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
1867 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
1868 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
1870 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1871 send_ioctx->tag = srp_tsk->tag;
1872 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1873 if (tcm_tmr < 0) {
1874 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1875 send_ioctx->cmd.se_tmr_req->response =
1876 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
1877 goto process_tmr;
1879 cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
1880 if (!cmd->se_tmr_req) {
1881 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1882 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1883 goto process_tmr;
1886 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
1887 sizeof(srp_tsk->lun));
1888 res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
1889 if (res) {
1890 pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
1891 send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
1892 send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1893 goto process_tmr;
1896 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
1897 srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
1899 process_tmr:
1900 kref_get(&send_ioctx->kref);
1901 if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
1902 transport_generic_handle_tmr(&send_ioctx->cmd);
1903 else
1904 transport_send_check_condition_and_sense(cmd,
1905 cmd->scsi_sense_reason, 0);
1910 * srpt_handle_new_iu() - Process a newly received information unit.
1911 * @ch: RDMA channel through which the information unit has been received.
1912 * @ioctx: SRPT I/O context associated with the information unit.
1914 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
1915 struct srpt_recv_ioctx *recv_ioctx,
1916 struct srpt_send_ioctx *send_ioctx)
1918 struct srp_cmd *srp_cmd;
1919 enum rdma_ch_state ch_state;
1921 BUG_ON(!ch);
1922 BUG_ON(!recv_ioctx);
1924 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1925 recv_ioctx->ioctx.dma, srp_max_req_size,
1926 DMA_FROM_DEVICE);
1928 ch_state = srpt_get_ch_state(ch);
1929 if (unlikely(ch_state == CH_CONNECTING)) {
1930 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1931 goto out;
1934 if (unlikely(ch_state != CH_LIVE))
1935 goto out;
1937 srp_cmd = recv_ioctx->ioctx.buf;
1938 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
1939 if (!send_ioctx)
1940 send_ioctx = srpt_get_send_ioctx(ch);
1941 if (unlikely(!send_ioctx)) {
1942 list_add_tail(&recv_ioctx->wait_list,
1943 &ch->cmd_wait_list);
1944 goto out;
1948 transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
1949 0, DMA_NONE, MSG_SIMPLE_TAG,
1950 send_ioctx->sense_data);
1952 switch (srp_cmd->opcode) {
1953 case SRP_CMD:
1954 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1955 break;
1956 case SRP_TSK_MGMT:
1957 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1958 break;
1959 case SRP_I_LOGOUT:
1960 printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
1961 break;
1962 case SRP_CRED_RSP:
1963 pr_debug("received SRP_CRED_RSP\n");
1964 break;
1965 case SRP_AER_RSP:
1966 pr_debug("received SRP_AER_RSP\n");
1967 break;
1968 case SRP_RSP:
1969 printk(KERN_ERR "Received SRP_RSP\n");
1970 break;
1971 default:
1972 printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
1973 srp_cmd->opcode);
1974 break;
1977 srpt_post_recv(ch->sport->sdev, recv_ioctx);
1978 out:
1979 return;
1982 static void srpt_process_rcv_completion(struct ib_cq *cq,
1983 struct srpt_rdma_ch *ch,
1984 struct ib_wc *wc)
1986 struct srpt_device *sdev = ch->sport->sdev;
1987 struct srpt_recv_ioctx *ioctx;
1988 u32 index;
1990 index = idx_from_wr_id(wc->wr_id);
1991 if (wc->status == IB_WC_SUCCESS) {
1992 int req_lim;
1994 req_lim = atomic_dec_return(&ch->req_lim);
1995 if (unlikely(req_lim < 0))
1996 printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
1997 ioctx = sdev->ioctx_ring[index];
1998 srpt_handle_new_iu(ch, ioctx, NULL);
1999 } else {
2000 printk(KERN_INFO "receiving failed for idx %u with status %d\n",
2001 index, wc->status);
2006 * srpt_process_send_completion() - Process an IB send completion.
2008 * Note: Although this has not yet been observed during tests, at least in
2009 * theory it is possible that the srpt_get_send_ioctx() call invoked by
2010 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
2011 * value in each response is set to one, and it is possible that this response
2012 * makes the initiator send a new request before the send completion for that
2013 * response has been processed. This could e.g. happen if the call to
2014 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
2015 * if IB retransmission causes generation of the send completion to be
2016 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
2017 * are queued on cmd_wait_list. The code below processes these delayed
2018 * requests one at a time.
2020 static void srpt_process_send_completion(struct ib_cq *cq,
2021 struct srpt_rdma_ch *ch,
2022 struct ib_wc *wc)
2024 struct srpt_send_ioctx *send_ioctx;
2025 uint32_t index;
2026 enum srpt_opcode opcode;
2028 index = idx_from_wr_id(wc->wr_id);
2029 opcode = opcode_from_wr_id(wc->wr_id);
2030 send_ioctx = ch->ioctx_ring[index];
2031 if (wc->status == IB_WC_SUCCESS) {
2032 if (opcode == SRPT_SEND)
2033 srpt_handle_send_comp(ch, send_ioctx);
2034 else {
2035 WARN_ON(opcode != SRPT_RDMA_ABORT &&
2036 wc->opcode != IB_WC_RDMA_READ);
2037 srpt_handle_rdma_comp(ch, send_ioctx, opcode);
2039 } else {
2040 if (opcode == SRPT_SEND) {
2041 printk(KERN_INFO "sending response for idx %u failed"
2042 " with status %d\n", index, wc->status);
2043 srpt_handle_send_err_comp(ch, wc->wr_id);
2044 } else if (opcode != SRPT_RDMA_MID) {
2045 printk(KERN_INFO "RDMA t %d for idx %u failed with"
2046 " status %d", opcode, index, wc->status);
2047 srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
2051 while (unlikely(opcode == SRPT_SEND
2052 && !list_empty(&ch->cmd_wait_list)
2053 && srpt_get_ch_state(ch) == CH_LIVE
2054 && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
2055 struct srpt_recv_ioctx *recv_ioctx;
2057 recv_ioctx = list_first_entry(&ch->cmd_wait_list,
2058 struct srpt_recv_ioctx,
2059 wait_list);
2060 list_del(&recv_ioctx->wait_list);
2061 srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
2065 static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
2067 struct ib_wc *const wc = ch->wc;
2068 int i, n;
2070 WARN_ON(cq != ch->cq);
2072 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
2073 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
2074 for (i = 0; i < n; i++) {
2075 if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
2076 srpt_process_rcv_completion(cq, ch, &wc[i]);
2077 else
2078 srpt_process_send_completion(cq, ch, &wc[i]);
2084 * srpt_completion() - IB completion queue callback function.
2086 * Notes:
2087 * - It is guaranteed that a completion handler will never be invoked
2088 * concurrently on two different CPUs for the same completion queue. See also
2089 * Documentation/infiniband/core_locking.txt and the implementation of
2090 * handle_edge_irq() in kernel/irq/chip.c.
2091 * - When threaded IRQs are enabled, completion handlers are invoked in thread
2092 * context instead of interrupt context.
2094 static void srpt_completion(struct ib_cq *cq, void *ctx)
2096 struct srpt_rdma_ch *ch = ctx;
2098 wake_up_interruptible(&ch->wait_queue);
2101 static int srpt_compl_thread(void *arg)
2103 struct srpt_rdma_ch *ch;
2105 /* Hibernation / freezing of the SRPT kernel thread is not supported. */
2106 current->flags |= PF_NOFREEZE;
2108 ch = arg;
2109 BUG_ON(!ch);
2110 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
2111 ch->sess_name, ch->thread->comm, current->pid);
2112 while (!kthread_should_stop()) {
2113 wait_event_interruptible(ch->wait_queue,
2114 (srpt_process_completion(ch->cq, ch),
2115 kthread_should_stop()));
2117 printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
2118 ch->sess_name, ch->thread->comm, current->pid);
2119 return 0;
2123 * srpt_create_ch_ib() - Create receive and send completion queues.
2125 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2127 struct ib_qp_init_attr *qp_init;
2128 struct srpt_port *sport = ch->sport;
2129 struct srpt_device *sdev = sport->sdev;
2130 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
2131 int ret;
2133 WARN_ON(ch->rq_size < 1);
2135 ret = -ENOMEM;
2136 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
2137 if (!qp_init)
2138 goto out;
2140 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2141 ch->rq_size + srp_sq_size, 0);
2142 if (IS_ERR(ch->cq)) {
2143 ret = PTR_ERR(ch->cq);
2144 printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
2145 ch->rq_size + srp_sq_size, ret);
2146 goto out;
2149 qp_init->qp_context = (void *)ch;
2150 qp_init->event_handler
2151 = (void(*)(struct ib_event *, void*))srpt_qp_event;
2152 qp_init->send_cq = ch->cq;
2153 qp_init->recv_cq = ch->cq;
2154 qp_init->srq = sdev->srq;
2155 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
2156 qp_init->qp_type = IB_QPT_RC;
2157 qp_init->cap.max_send_wr = srp_sq_size;
2158 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
2160 ch->qp = ib_create_qp(sdev->pd, qp_init);
2161 if (IS_ERR(ch->qp)) {
2162 ret = PTR_ERR(ch->qp);
2163 printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
2164 goto err_destroy_cq;
2167 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
2169 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
2170 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
2171 qp_init->cap.max_send_wr, ch->cm_id);
2173 ret = srpt_init_ch_qp(ch, ch->qp);
2174 if (ret)
2175 goto err_destroy_qp;
2177 init_waitqueue_head(&ch->wait_queue);
2179 pr_debug("creating thread for session %s\n", ch->sess_name);
2181 ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
2182 if (IS_ERR(ch->thread)) {
2183 printk(KERN_ERR "failed to create kernel thread %ld\n",
2184 PTR_ERR(ch->thread));
2185 ch->thread = NULL;
2186 goto err_destroy_qp;
2189 out:
2190 kfree(qp_init);
2191 return ret;
2193 err_destroy_qp:
2194 ib_destroy_qp(ch->qp);
2195 err_destroy_cq:
2196 ib_destroy_cq(ch->cq);
2197 goto out;
2200 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
2202 if (ch->thread)
2203 kthread_stop(ch->thread);
2205 ib_destroy_qp(ch->qp);
2206 ib_destroy_cq(ch->cq);
2210 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
2212 * Reset the QP and make sure all resources associated with the channel will
2213 * be deallocated at an appropriate time.
2215 * Note: The caller must hold ch->sport->sdev->spinlock.
2217 static void __srpt_close_ch(struct srpt_rdma_ch *ch)
2219 struct srpt_device *sdev;
2220 enum rdma_ch_state prev_state;
2221 unsigned long flags;
2223 sdev = ch->sport->sdev;
2225 spin_lock_irqsave(&ch->spinlock, flags);
2226 prev_state = ch->state;
2227 switch (prev_state) {
2228 case CH_CONNECTING:
2229 case CH_LIVE:
2230 ch->state = CH_DISCONNECTING;
2231 break;
2232 default:
2233 break;
2235 spin_unlock_irqrestore(&ch->spinlock, flags);
2237 switch (prev_state) {
2238 case CH_CONNECTING:
2239 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
2240 NULL, 0);
2241 /* fall through */
2242 case CH_LIVE:
2243 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
2244 printk(KERN_ERR "sending CM DREQ failed.\n");
2245 break;
2246 case CH_DISCONNECTING:
2247 break;
2248 case CH_DRAINING:
2249 case CH_RELEASING:
2250 break;
2255 * srpt_close_ch() - Close an RDMA channel.
2257 static void srpt_close_ch(struct srpt_rdma_ch *ch)
2259 struct srpt_device *sdev;
2261 sdev = ch->sport->sdev;
2262 spin_lock_irq(&sdev->spinlock);
2263 __srpt_close_ch(ch);
2264 spin_unlock_irq(&sdev->spinlock);
2268 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
2269 * @cm_id: Pointer to the CM ID of the channel to be drained.
2271 * Note: Must be called from inside srpt_cm_handler to avoid a race between
2272 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
2273 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
2274 * waits until all target sessions for the associated IB device have been
2275 * unregistered and target session registration involves a call to
2276 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
2277 * this function has finished).
2279 static void srpt_drain_channel(struct ib_cm_id *cm_id)
2281 struct srpt_device *sdev;
2282 struct srpt_rdma_ch *ch;
2283 int ret;
2284 bool do_reset = false;
2286 WARN_ON_ONCE(irqs_disabled());
2288 sdev = cm_id->context;
2289 BUG_ON(!sdev);
2290 spin_lock_irq(&sdev->spinlock);
2291 list_for_each_entry(ch, &sdev->rch_list, list) {
2292 if (ch->cm_id == cm_id) {
2293 do_reset = srpt_test_and_set_ch_state(ch,
2294 CH_CONNECTING, CH_DRAINING) ||
2295 srpt_test_and_set_ch_state(ch,
2296 CH_LIVE, CH_DRAINING) ||
2297 srpt_test_and_set_ch_state(ch,
2298 CH_DISCONNECTING, CH_DRAINING);
2299 break;
2302 spin_unlock_irq(&sdev->spinlock);
2304 if (do_reset) {
2305 ret = srpt_ch_qp_err(ch);
2306 if (ret < 0)
2307 printk(KERN_ERR "Setting queue pair in error state"
2308 " failed: %d\n", ret);
2313 * srpt_find_channel() - Look up an RDMA channel.
2314 * @cm_id: Pointer to the CM ID of the channel to be looked up.
2316 * Return NULL if no matching RDMA channel has been found.
2318 static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
2319 struct ib_cm_id *cm_id)
2321 struct srpt_rdma_ch *ch;
2322 bool found;
2324 WARN_ON_ONCE(irqs_disabled());
2325 BUG_ON(!sdev);
2327 found = false;
2328 spin_lock_irq(&sdev->spinlock);
2329 list_for_each_entry(ch, &sdev->rch_list, list) {
2330 if (ch->cm_id == cm_id) {
2331 found = true;
2332 break;
2335 spin_unlock_irq(&sdev->spinlock);
2337 return found ? ch : NULL;
2341 * srpt_release_channel() - Release channel resources.
2343 * Schedules the actual release because:
2344 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
2345 * trigger a deadlock.
2346 * - It is not safe to call TCM transport_* functions from interrupt context.
2348 static void srpt_release_channel(struct srpt_rdma_ch *ch)
2350 schedule_work(&ch->release_work);
2353 static void srpt_release_channel_work(struct work_struct *w)
2355 struct srpt_rdma_ch *ch;
2356 struct srpt_device *sdev;
2358 ch = container_of(w, struct srpt_rdma_ch, release_work);
2359 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
2360 ch->release_done);
2362 sdev = ch->sport->sdev;
2363 BUG_ON(!sdev);
2365 transport_deregister_session_configfs(ch->sess);
2366 transport_deregister_session(ch->sess);
2367 ch->sess = NULL;
2369 srpt_destroy_ch_ib(ch);
2371 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2372 ch->sport->sdev, ch->rq_size,
2373 ch->rsp_size, DMA_TO_DEVICE);
2375 spin_lock_irq(&sdev->spinlock);
2376 list_del(&ch->list);
2377 spin_unlock_irq(&sdev->spinlock);
2379 ib_destroy_cm_id(ch->cm_id);
2381 if (ch->release_done)
2382 complete(ch->release_done);
2384 wake_up(&sdev->ch_releaseQ);
2386 kfree(ch);
2389 static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
2390 u8 i_port_id[16])
2392 struct srpt_node_acl *nacl;
2394 list_for_each_entry(nacl, &sport->port_acl_list, list)
2395 if (memcmp(nacl->i_port_id, i_port_id,
2396 sizeof(nacl->i_port_id)) == 0)
2397 return nacl;
2399 return NULL;
2402 static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
2403 u8 i_port_id[16])
2405 struct srpt_node_acl *nacl;
2407 spin_lock_irq(&sport->port_acl_lock);
2408 nacl = __srpt_lookup_acl(sport, i_port_id);
2409 spin_unlock_irq(&sport->port_acl_lock);
2411 return nacl;
2415 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
2417 * Ownership of the cm_id is transferred to the target session if this
2418 * functions returns zero. Otherwise the caller remains the owner of cm_id.
2420 static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2421 struct ib_cm_req_event_param *param,
2422 void *private_data)
2424 struct srpt_device *sdev = cm_id->context;
2425 struct srpt_port *sport = &sdev->port[param->port - 1];
2426 struct srp_login_req *req;
2427 struct srp_login_rsp *rsp;
2428 struct srp_login_rej *rej;
2429 struct ib_cm_rep_param *rep_param;
2430 struct srpt_rdma_ch *ch, *tmp_ch;
2431 struct srpt_node_acl *nacl;
2432 u32 it_iu_len;
2433 int i;
2434 int ret = 0;
2436 WARN_ON_ONCE(irqs_disabled());
2438 if (WARN_ON(!sdev || !private_data))
2439 return -EINVAL;
2441 req = (struct srp_login_req *)private_data;
2443 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2445 printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
2446 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
2447 " (guid=0x%llx:0x%llx)\n",
2448 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
2449 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
2450 be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
2451 be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
2452 it_iu_len,
2453 param->port,
2454 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
2455 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
2457 rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
2458 rej = kzalloc(sizeof *rej, GFP_KERNEL);
2459 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
2461 if (!rsp || !rej || !rep_param) {
2462 ret = -ENOMEM;
2463 goto out;
2466 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2467 rej->reason = __constant_cpu_to_be32(
2468 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2469 ret = -EINVAL;
2470 printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
2471 " length (%d bytes) is out of range (%d .. %d)\n",
2472 it_iu_len, 64, srp_max_req_size);
2473 goto reject;
2476 if (!sport->enabled) {
2477 rej->reason = __constant_cpu_to_be32(
2478 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2479 ret = -EINVAL;
2480 printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
2481 " has not yet been enabled\n");
2482 goto reject;
2485 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2486 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
2488 spin_lock_irq(&sdev->spinlock);
2490 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
2491 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
2492 && !memcmp(ch->t_port_id, req->target_port_id, 16)
2493 && param->port == ch->sport->port
2494 && param->listen_id == ch->sport->sdev->cm_id
2495 && ch->cm_id) {
2496 enum rdma_ch_state ch_state;
2498 ch_state = srpt_get_ch_state(ch);
2499 if (ch_state != CH_CONNECTING
2500 && ch_state != CH_LIVE)
2501 continue;
2503 /* found an existing channel */
2504 pr_debug("Found existing channel %s"
2505 " cm_id= %p state= %d\n",
2506 ch->sess_name, ch->cm_id, ch_state);
2508 __srpt_close_ch(ch);
2510 rsp->rsp_flags =
2511 SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2515 spin_unlock_irq(&sdev->spinlock);
2517 } else
2518 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2520 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2521 || *(__be64 *)(req->target_port_id + 8) !=
2522 cpu_to_be64(srpt_service_guid)) {
2523 rej->reason = __constant_cpu_to_be32(
2524 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2525 ret = -ENOMEM;
2526 printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
2527 " has an invalid target port identifier.\n");
2528 goto reject;
2531 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2532 if (!ch) {
2533 rej->reason = __constant_cpu_to_be32(
2534 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2535 printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
2536 ret = -ENOMEM;
2537 goto reject;
2540 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2541 memcpy(ch->i_port_id, req->initiator_port_id, 16);
2542 memcpy(ch->t_port_id, req->target_port_id, 16);
2543 ch->sport = &sdev->port[param->port - 1];
2544 ch->cm_id = cm_id;
2546 * Avoid QUEUE_FULL conditions by limiting the number of buffers used
2547 * for the SRP protocol to the command queue size.
2549 ch->rq_size = SRPT_RQ_SIZE;
2550 spin_lock_init(&ch->spinlock);
2551 ch->state = CH_CONNECTING;
2552 INIT_LIST_HEAD(&ch->cmd_wait_list);
2553 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2555 ch->ioctx_ring = (struct srpt_send_ioctx **)
2556 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2557 sizeof(*ch->ioctx_ring[0]),
2558 ch->rsp_size, DMA_TO_DEVICE);
2559 if (!ch->ioctx_ring)
2560 goto free_ch;
2562 INIT_LIST_HEAD(&ch->free_list);
2563 for (i = 0; i < ch->rq_size; i++) {
2564 ch->ioctx_ring[i]->ch = ch;
2565 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2568 ret = srpt_create_ch_ib(ch);
2569 if (ret) {
2570 rej->reason = __constant_cpu_to_be32(
2571 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2572 printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
2573 " a new RDMA channel failed.\n");
2574 goto free_ring;
2577 ret = srpt_ch_qp_rtr(ch, ch->qp);
2578 if (ret) {
2579 rej->reason = __constant_cpu_to_be32(
2580 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2581 printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
2582 " RTR failed (error code = %d)\n", ret);
2583 goto destroy_ib;
2586 * Use the initator port identifier as the session name.
2588 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
2589 be64_to_cpu(*(__be64 *)ch->i_port_id),
2590 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
2592 pr_debug("registering session %s\n", ch->sess_name);
2594 nacl = srpt_lookup_acl(sport, ch->i_port_id);
2595 if (!nacl) {
2596 printk(KERN_INFO "Rejected login because no ACL has been"
2597 " configured yet for initiator %s.\n", ch->sess_name);
2598 rej->reason = __constant_cpu_to_be32(
2599 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2600 goto destroy_ib;
2603 ch->sess = transport_init_session();
2604 if (IS_ERR(ch->sess)) {
2605 rej->reason = __constant_cpu_to_be32(
2606 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2607 pr_debug("Failed to create session\n");
2608 goto deregister_session;
2610 ch->sess->se_node_acl = &nacl->nacl;
2611 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
2613 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
2614 ch->sess_name, ch->cm_id);
2616 /* create srp_login_response */
2617 rsp->opcode = SRP_LOGIN_RSP;
2618 rsp->tag = req->tag;
2619 rsp->max_it_iu_len = req->req_it_iu_len;
2620 rsp->max_ti_iu_len = req->req_it_iu_len;
2621 ch->max_ti_iu_len = it_iu_len;
2622 rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2623 | SRP_BUF_FORMAT_INDIRECT);
2624 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2625 atomic_set(&ch->req_lim, ch->rq_size);
2626 atomic_set(&ch->req_lim_delta, 0);
2628 /* create cm reply */
2629 rep_param->qp_num = ch->qp->qp_num;
2630 rep_param->private_data = (void *)rsp;
2631 rep_param->private_data_len = sizeof *rsp;
2632 rep_param->rnr_retry_count = 7;
2633 rep_param->flow_control = 1;
2634 rep_param->failover_accepted = 0;
2635 rep_param->srq = 1;
2636 rep_param->responder_resources = 4;
2637 rep_param->initiator_depth = 4;
2639 ret = ib_send_cm_rep(cm_id, rep_param);
2640 if (ret) {
2641 printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
2642 " (error code = %d)\n", ret);
2643 goto release_channel;
2646 spin_lock_irq(&sdev->spinlock);
2647 list_add_tail(&ch->list, &sdev->rch_list);
2648 spin_unlock_irq(&sdev->spinlock);
2650 goto out;
2652 release_channel:
2653 srpt_set_ch_state(ch, CH_RELEASING);
2654 transport_deregister_session_configfs(ch->sess);
2656 deregister_session:
2657 transport_deregister_session(ch->sess);
2658 ch->sess = NULL;
2660 destroy_ib:
2661 srpt_destroy_ch_ib(ch);
2663 free_ring:
2664 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2665 ch->sport->sdev, ch->rq_size,
2666 ch->rsp_size, DMA_TO_DEVICE);
2667 free_ch:
2668 kfree(ch);
2670 reject:
2671 rej->opcode = SRP_LOGIN_REJ;
2672 rej->tag = req->tag;
2673 rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2674 | SRP_BUF_FORMAT_INDIRECT);
2676 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2677 (void *)rej, sizeof *rej);
2679 out:
2680 kfree(rep_param);
2681 kfree(rsp);
2682 kfree(rej);
2684 return ret;
2687 static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
2689 printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
2690 srpt_drain_channel(cm_id);
2694 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
2696 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2697 * and that the recipient may begin transmitting (RTU = ready to use).
2699 static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
2701 struct srpt_rdma_ch *ch;
2702 int ret;
2704 ch = srpt_find_channel(cm_id->context, cm_id);
2705 BUG_ON(!ch);
2707 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
2708 struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
2710 ret = srpt_ch_qp_rts(ch, ch->qp);
2712 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
2713 wait_list) {
2714 list_del(&ioctx->wait_list);
2715 srpt_handle_new_iu(ch, ioctx, NULL);
2717 if (ret)
2718 srpt_close_ch(ch);
2722 static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
2724 printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
2725 srpt_drain_channel(cm_id);
2728 static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
2730 printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
2731 srpt_drain_channel(cm_id);
2735 * srpt_cm_dreq_recv() - Process reception of a DREQ message.
2737 static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
2739 struct srpt_rdma_ch *ch;
2740 unsigned long flags;
2741 bool send_drep = false;
2743 ch = srpt_find_channel(cm_id->context, cm_id);
2744 BUG_ON(!ch);
2746 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
2748 spin_lock_irqsave(&ch->spinlock, flags);
2749 switch (ch->state) {
2750 case CH_CONNECTING:
2751 case CH_LIVE:
2752 send_drep = true;
2753 ch->state = CH_DISCONNECTING;
2754 break;
2755 case CH_DISCONNECTING:
2756 case CH_DRAINING:
2757 case CH_RELEASING:
2758 WARN(true, "unexpected channel state %d\n", ch->state);
2759 break;
2761 spin_unlock_irqrestore(&ch->spinlock, flags);
2763 if (send_drep) {
2764 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
2765 printk(KERN_ERR "Sending IB DREP failed.\n");
2766 printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
2767 ch->sess_name);
2772 * srpt_cm_drep_recv() - Process reception of a DREP message.
2774 static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
2776 printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
2777 cm_id);
2778 srpt_drain_channel(cm_id);
2782 * srpt_cm_handler() - IB connection manager callback function.
2784 * A non-zero return value will cause the caller destroy the CM ID.
2786 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2787 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2788 * a non-zero value in any other case will trigger a race with the
2789 * ib_destroy_cm_id() call in srpt_release_channel().
2791 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2793 int ret;
2795 ret = 0;
2796 switch (event->event) {
2797 case IB_CM_REQ_RECEIVED:
2798 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
2799 event->private_data);
2800 break;
2801 case IB_CM_REJ_RECEIVED:
2802 srpt_cm_rej_recv(cm_id);
2803 break;
2804 case IB_CM_RTU_RECEIVED:
2805 case IB_CM_USER_ESTABLISHED:
2806 srpt_cm_rtu_recv(cm_id);
2807 break;
2808 case IB_CM_DREQ_RECEIVED:
2809 srpt_cm_dreq_recv(cm_id);
2810 break;
2811 case IB_CM_DREP_RECEIVED:
2812 srpt_cm_drep_recv(cm_id);
2813 break;
2814 case IB_CM_TIMEWAIT_EXIT:
2815 srpt_cm_timewait_exit(cm_id);
2816 break;
2817 case IB_CM_REP_ERROR:
2818 srpt_cm_rep_error(cm_id);
2819 break;
2820 case IB_CM_DREQ_ERROR:
2821 printk(KERN_INFO "Received IB DREQ ERROR event.\n");
2822 break;
2823 case IB_CM_MRA_RECEIVED:
2824 printk(KERN_INFO "Received IB MRA event\n");
2825 break;
2826 default:
2827 printk(KERN_ERR "received unrecognized IB CM event %d\n",
2828 event->event);
2829 break;
2832 return ret;
2836 * srpt_perform_rdmas() - Perform IB RDMA.
2838 * Returns zero upon success or a negative number upon failure.
2840 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
2841 struct srpt_send_ioctx *ioctx)
2843 struct ib_send_wr wr;
2844 struct ib_send_wr *bad_wr;
2845 struct rdma_iu *riu;
2846 int i;
2847 int ret;
2848 int sq_wr_avail;
2849 enum dma_data_direction dir;
2850 const int n_rdma = ioctx->n_rdma;
2852 dir = ioctx->cmd.data_direction;
2853 if (dir == DMA_TO_DEVICE) {
2854 /* write */
2855 ret = -ENOMEM;
2856 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
2857 if (sq_wr_avail < 0) {
2858 printk(KERN_WARNING "IB send queue full (needed %d)\n",
2859 n_rdma);
2860 goto out;
2864 ioctx->rdma_aborted = false;
2865 ret = 0;
2866 riu = ioctx->rdma_ius;
2867 memset(&wr, 0, sizeof wr);
2869 for (i = 0; i < n_rdma; ++i, ++riu) {
2870 if (dir == DMA_FROM_DEVICE) {
2871 wr.opcode = IB_WR_RDMA_WRITE;
2872 wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2873 SRPT_RDMA_WRITE_LAST :
2874 SRPT_RDMA_MID,
2875 ioctx->ioctx.index);
2876 } else {
2877 wr.opcode = IB_WR_RDMA_READ;
2878 wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
2879 SRPT_RDMA_READ_LAST :
2880 SRPT_RDMA_MID,
2881 ioctx->ioctx.index);
2883 wr.next = NULL;
2884 wr.wr.rdma.remote_addr = riu->raddr;
2885 wr.wr.rdma.rkey = riu->rkey;
2886 wr.num_sge = riu->sge_cnt;
2887 wr.sg_list = riu->sge;
2889 /* only get completion event for the last rdma write */
2890 if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
2891 wr.send_flags = IB_SEND_SIGNALED;
2893 ret = ib_post_send(ch->qp, &wr, &bad_wr);
2894 if (ret)
2895 break;
2898 if (ret)
2899 printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
2900 __func__, __LINE__, ret, i, n_rdma);
2901 if (ret && i > 0) {
2902 wr.num_sge = 0;
2903 wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
2904 wr.send_flags = IB_SEND_SIGNALED;
2905 while (ch->state == CH_LIVE &&
2906 ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
2907 printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
2908 ioctx->ioctx.index);
2909 msleep(1000);
2911 while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
2912 printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
2913 ioctx->ioctx.index);
2914 msleep(1000);
2917 out:
2918 if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
2919 atomic_add(n_rdma, &ch->sq_wr_avail);
2920 return ret;
2924 * srpt_xfer_data() - Start data transfer from initiator to target.
2926 static int srpt_xfer_data(struct srpt_rdma_ch *ch,
2927 struct srpt_send_ioctx *ioctx)
2929 int ret;
2931 ret = srpt_map_sg_to_ib_sge(ch, ioctx);
2932 if (ret) {
2933 printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
2934 goto out;
2937 ret = srpt_perform_rdmas(ch, ioctx);
2938 if (ret) {
2939 if (ret == -EAGAIN || ret == -ENOMEM)
2940 printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
2941 __func__, __LINE__, ret);
2942 else
2943 printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
2944 __func__, __LINE__, ret);
2945 goto out_unmap;
2948 out:
2949 return ret;
2950 out_unmap:
2951 srpt_unmap_sg_to_ib_sge(ch, ioctx);
2952 goto out;
2955 static int srpt_write_pending_status(struct se_cmd *se_cmd)
2957 struct srpt_send_ioctx *ioctx;
2959 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2960 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
2964 * srpt_write_pending() - Start data transfer from initiator to target (write).
2966 static int srpt_write_pending(struct se_cmd *se_cmd)
2968 struct srpt_rdma_ch *ch;
2969 struct srpt_send_ioctx *ioctx;
2970 enum srpt_command_state new_state;
2971 enum rdma_ch_state ch_state;
2972 int ret;
2974 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
2976 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2977 WARN_ON(new_state == SRPT_STATE_DONE);
2979 ch = ioctx->ch;
2980 BUG_ON(!ch);
2982 ch_state = srpt_get_ch_state(ch);
2983 switch (ch_state) {
2984 case CH_CONNECTING:
2985 WARN(true, "unexpected channel state %d\n", ch_state);
2986 ret = -EINVAL;
2987 goto out;
2988 case CH_LIVE:
2989 break;
2990 case CH_DISCONNECTING:
2991 case CH_DRAINING:
2992 case CH_RELEASING:
2993 pr_debug("cmd with tag %lld: channel disconnecting\n",
2994 ioctx->tag);
2995 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2996 ret = -EINVAL;
2997 goto out;
2999 ret = srpt_xfer_data(ch, ioctx);
3001 out:
3002 return ret;
3005 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
3007 switch (tcm_mgmt_status) {
3008 case TMR_FUNCTION_COMPLETE:
3009 return SRP_TSK_MGMT_SUCCESS;
3010 case TMR_FUNCTION_REJECTED:
3011 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
3013 return SRP_TSK_MGMT_FAILED;
3017 * srpt_queue_response() - Transmits the response to a SCSI command.
3019 * Callback function called by the TCM core. Must not block since it can be
3020 * invoked on the context of the IB completion handler.
3022 static int srpt_queue_response(struct se_cmd *cmd)
3024 struct srpt_rdma_ch *ch;
3025 struct srpt_send_ioctx *ioctx;
3026 enum srpt_command_state state;
3027 unsigned long flags;
3028 int ret;
3029 enum dma_data_direction dir;
3030 int resp_len;
3031 u8 srp_tm_status;
3033 ret = 0;
3035 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3036 ch = ioctx->ch;
3037 BUG_ON(!ch);
3039 spin_lock_irqsave(&ioctx->spinlock, flags);
3040 state = ioctx->state;
3041 switch (state) {
3042 case SRPT_STATE_NEW:
3043 case SRPT_STATE_DATA_IN:
3044 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
3045 break;
3046 case SRPT_STATE_MGMT:
3047 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
3048 break;
3049 default:
3050 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
3051 ch, ioctx->ioctx.index, ioctx->state);
3052 break;
3054 spin_unlock_irqrestore(&ioctx->spinlock, flags);
3056 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
3057 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
3058 atomic_inc(&ch->req_lim_delta);
3059 srpt_abort_cmd(ioctx);
3060 goto out;
3063 dir = ioctx->cmd.data_direction;
3065 /* For read commands, transfer the data to the initiator. */
3066 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
3067 !ioctx->queue_status_only) {
3068 ret = srpt_xfer_data(ch, ioctx);
3069 if (ret) {
3070 printk(KERN_ERR "xfer_data failed for tag %llu\n",
3071 ioctx->tag);
3072 goto out;
3076 if (state != SRPT_STATE_MGMT)
3077 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
3078 cmd->scsi_status);
3079 else {
3080 srp_tm_status
3081 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
3082 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
3083 ioctx->tag);
3085 ret = srpt_post_send(ch, ioctx, resp_len);
3086 if (ret) {
3087 printk(KERN_ERR "sending cmd response failed for tag %llu\n",
3088 ioctx->tag);
3089 srpt_unmap_sg_to_ib_sge(ch, ioctx);
3090 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
3091 kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
3094 out:
3095 return ret;
3098 static int srpt_queue_status(struct se_cmd *cmd)
3100 struct srpt_send_ioctx *ioctx;
3102 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
3103 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
3104 if (cmd->se_cmd_flags &
3105 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
3106 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
3107 ioctx->queue_status_only = true;
3108 return srpt_queue_response(cmd);
3111 static void srpt_refresh_port_work(struct work_struct *work)
3113 struct srpt_port *sport = container_of(work, struct srpt_port, work);
3115 srpt_refresh_port(sport);
3118 static int srpt_ch_list_empty(struct srpt_device *sdev)
3120 int res;
3122 spin_lock_irq(&sdev->spinlock);
3123 res = list_empty(&sdev->rch_list);
3124 spin_unlock_irq(&sdev->spinlock);
3126 return res;
3130 * srpt_release_sdev() - Free the channel resources associated with a target.
3132 static int srpt_release_sdev(struct srpt_device *sdev)
3134 struct srpt_rdma_ch *ch, *tmp_ch;
3135 int res;
3137 WARN_ON_ONCE(irqs_disabled());
3139 BUG_ON(!sdev);
3141 spin_lock_irq(&sdev->spinlock);
3142 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
3143 __srpt_close_ch(ch);
3144 spin_unlock_irq(&sdev->spinlock);
3146 res = wait_event_interruptible(sdev->ch_releaseQ,
3147 srpt_ch_list_empty(sdev));
3148 if (res)
3149 printk(KERN_ERR "%s: interrupted.\n", __func__);
3151 return 0;
3154 static struct srpt_port *__srpt_lookup_port(const char *name)
3156 struct ib_device *dev;
3157 struct srpt_device *sdev;
3158 struct srpt_port *sport;
3159 int i;
3161 list_for_each_entry(sdev, &srpt_dev_list, list) {
3162 dev = sdev->device;
3163 if (!dev)
3164 continue;
3166 for (i = 0; i < dev->phys_port_cnt; i++) {
3167 sport = &sdev->port[i];
3169 if (!strcmp(sport->port_guid, name))
3170 return sport;
3174 return NULL;
3177 static struct srpt_port *srpt_lookup_port(const char *name)
3179 struct srpt_port *sport;
3181 spin_lock(&srpt_dev_lock);
3182 sport = __srpt_lookup_port(name);
3183 spin_unlock(&srpt_dev_lock);
3185 return sport;
3189 * srpt_add_one() - Infiniband device addition callback function.
3191 static void srpt_add_one(struct ib_device *device)
3193 struct srpt_device *sdev;
3194 struct srpt_port *sport;
3195 struct ib_srq_init_attr srq_attr;
3196 int i;
3198 pr_debug("device = %p, device->dma_ops = %p\n", device,
3199 device->dma_ops);
3201 sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
3202 if (!sdev)
3203 goto err;
3205 sdev->device = device;
3206 INIT_LIST_HEAD(&sdev->rch_list);
3207 init_waitqueue_head(&sdev->ch_releaseQ);
3208 spin_lock_init(&sdev->spinlock);
3210 if (ib_query_device(device, &sdev->dev_attr))
3211 goto free_dev;
3213 sdev->pd = ib_alloc_pd(device);
3214 if (IS_ERR(sdev->pd))
3215 goto free_dev;
3217 sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
3218 if (IS_ERR(sdev->mr))
3219 goto err_pd;
3221 sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
3223 srq_attr.event_handler = srpt_srq_event;
3224 srq_attr.srq_context = (void *)sdev;
3225 srq_attr.attr.max_wr = sdev->srq_size;
3226 srq_attr.attr.max_sge = 1;
3227 srq_attr.attr.srq_limit = 0;
3228 srq_attr.srq_type = IB_SRQT_BASIC;
3230 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
3231 if (IS_ERR(sdev->srq))
3232 goto err_mr;
3234 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
3235 __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
3236 device->name);
3238 if (!srpt_service_guid)
3239 srpt_service_guid = be64_to_cpu(device->node_guid);
3241 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3242 if (IS_ERR(sdev->cm_id))
3243 goto err_srq;
3245 /* print out target login information */
3246 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3247 "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
3248 srpt_service_guid, srpt_service_guid);
3251 * We do not have a consistent service_id (ie. also id_ext of target_id)
3252 * to identify this target. We currently use the guid of the first HCA
3253 * in the system as service_id; therefore, the target_id will change
3254 * if this HCA is gone bad and replaced by different HCA
3256 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
3257 goto err_cm;
3259 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3260 srpt_event_handler);
3261 if (ib_register_event_handler(&sdev->event_handler))
3262 goto err_cm;
3264 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3265 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3266 sizeof(*sdev->ioctx_ring[0]),
3267 srp_max_req_size, DMA_FROM_DEVICE);
3268 if (!sdev->ioctx_ring)
3269 goto err_event;
3271 for (i = 0; i < sdev->srq_size; ++i)
3272 srpt_post_recv(sdev, sdev->ioctx_ring[i]);
3274 WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
3276 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3277 sport = &sdev->port[i - 1];
3278 sport->sdev = sdev;
3279 sport->port = i;
3280 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3281 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3282 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3283 INIT_WORK(&sport->work, srpt_refresh_port_work);
3284 INIT_LIST_HEAD(&sport->port_acl_list);
3285 spin_lock_init(&sport->port_acl_lock);
3287 if (srpt_refresh_port(sport)) {
3288 printk(KERN_ERR "MAD registration failed for %s-%d.\n",
3289 srpt_sdev_name(sdev), i);
3290 goto err_ring;
3292 snprintf(sport->port_guid, sizeof(sport->port_guid),
3293 "0x%016llx%016llx",
3294 be64_to_cpu(sport->gid.global.subnet_prefix),
3295 be64_to_cpu(sport->gid.global.interface_id));
3298 spin_lock(&srpt_dev_lock);
3299 list_add_tail(&sdev->list, &srpt_dev_list);
3300 spin_unlock(&srpt_dev_lock);
3302 out:
3303 ib_set_client_data(device, &srpt_client, sdev);
3304 pr_debug("added %s.\n", device->name);
3305 return;
3307 err_ring:
3308 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3309 sdev->srq_size, srp_max_req_size,
3310 DMA_FROM_DEVICE);
3311 err_event:
3312 ib_unregister_event_handler(&sdev->event_handler);
3313 err_cm:
3314 ib_destroy_cm_id(sdev->cm_id);
3315 err_srq:
3316 ib_destroy_srq(sdev->srq);
3317 err_mr:
3318 ib_dereg_mr(sdev->mr);
3319 err_pd:
3320 ib_dealloc_pd(sdev->pd);
3321 free_dev:
3322 kfree(sdev);
3323 err:
3324 sdev = NULL;
3325 printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
3326 goto out;
3330 * srpt_remove_one() - InfiniBand device removal callback function.
3332 static void srpt_remove_one(struct ib_device *device)
3334 struct srpt_device *sdev;
3335 int i;
3337 sdev = ib_get_client_data(device, &srpt_client);
3338 if (!sdev) {
3339 printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
3340 device->name);
3341 return;
3344 srpt_unregister_mad_agent(sdev);
3346 ib_unregister_event_handler(&sdev->event_handler);
3348 /* Cancel any work queued by the just unregistered IB event handler. */
3349 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3350 cancel_work_sync(&sdev->port[i].work);
3352 ib_destroy_cm_id(sdev->cm_id);
3355 * Unregistering a target must happen after destroying sdev->cm_id
3356 * such that no new SRP_LOGIN_REQ information units can arrive while
3357 * destroying the target.
3359 spin_lock(&srpt_dev_lock);
3360 list_del(&sdev->list);
3361 spin_unlock(&srpt_dev_lock);
3362 srpt_release_sdev(sdev);
3364 ib_destroy_srq(sdev->srq);
3365 ib_dereg_mr(sdev->mr);
3366 ib_dealloc_pd(sdev->pd);
3368 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3369 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
3370 sdev->ioctx_ring = NULL;
3371 kfree(sdev);
3374 static struct ib_client srpt_client = {
3375 .name = DRV_NAME,
3376 .add = srpt_add_one,
3377 .remove = srpt_remove_one
3380 static int srpt_check_true(struct se_portal_group *se_tpg)
3382 return 1;
3385 static int srpt_check_false(struct se_portal_group *se_tpg)
3387 return 0;
3390 static char *srpt_get_fabric_name(void)
3392 return "srpt";
3395 static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
3397 return SCSI_TRANSPORTID_PROTOCOLID_SRP;
3400 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3402 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3404 return sport->port_guid;
3407 static u16 srpt_get_tag(struct se_portal_group *tpg)
3409 return 1;
3412 static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
3414 return 1;
3417 static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
3418 struct se_node_acl *se_nacl,
3419 struct t10_pr_registration *pr_reg,
3420 int *format_code, unsigned char *buf)
3422 struct srpt_node_acl *nacl;
3423 struct spc_rdma_transport_id *tr_id;
3425 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3426 tr_id = (void *)buf;
3427 tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
3428 memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
3429 return sizeof(*tr_id);
3432 static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
3433 struct se_node_acl *se_nacl,
3434 struct t10_pr_registration *pr_reg,
3435 int *format_code)
3437 *format_code = 0;
3438 return sizeof(struct spc_rdma_transport_id);
3441 static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
3442 const char *buf, u32 *out_tid_len,
3443 char **port_nexus_ptr)
3445 struct spc_rdma_transport_id *tr_id;
3447 *port_nexus_ptr = NULL;
3448 *out_tid_len = sizeof(struct spc_rdma_transport_id);
3449 tr_id = (void *)buf;
3450 return (char *)tr_id->i_port_id;
3453 static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
3455 struct srpt_node_acl *nacl;
3457 nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
3458 if (!nacl) {
3459 printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
3460 return NULL;
3463 return &nacl->nacl;
3466 static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
3467 struct se_node_acl *se_nacl)
3469 struct srpt_node_acl *nacl;
3471 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3472 kfree(nacl);
3475 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3477 return 1;
3480 static void srpt_release_cmd(struct se_cmd *se_cmd)
3485 * srpt_shutdown_session() - Whether or not a session may be shut down.
3487 static int srpt_shutdown_session(struct se_session *se_sess)
3489 return true;
3493 * srpt_close_session() - Forcibly close a session.
3495 * Callback function invoked by the TCM core to clean up sessions associated
3496 * with a node ACL when the user invokes
3497 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3499 static void srpt_close_session(struct se_session *se_sess)
3501 DECLARE_COMPLETION_ONSTACK(release_done);
3502 struct srpt_rdma_ch *ch;
3503 struct srpt_device *sdev;
3504 int res;
3506 ch = se_sess->fabric_sess_ptr;
3507 WARN_ON(ch->sess != se_sess);
3509 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
3511 sdev = ch->sport->sdev;
3512 spin_lock_irq(&sdev->spinlock);
3513 BUG_ON(ch->release_done);
3514 ch->release_done = &release_done;
3515 __srpt_close_ch(ch);
3516 spin_unlock_irq(&sdev->spinlock);
3518 res = wait_for_completion_timeout(&release_done, 60 * HZ);
3519 WARN_ON(res <= 0);
3523 * To do: Find out whether stop_session() has a meaning for transports
3524 * other than iSCSI.
3526 static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
3527 int conn_sleep)
3531 static void srpt_reset_nexus(struct se_session *sess)
3533 printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
3536 static int srpt_sess_logged_in(struct se_session *se_sess)
3538 return true;
3542 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
3544 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3545 * This object represents an arbitrary integer used to uniquely identify a
3546 * particular attached remote initiator port to a particular SCSI target port
3547 * within a particular SCSI target device within a particular SCSI instance.
3549 static u32 srpt_sess_get_index(struct se_session *se_sess)
3551 return 0;
3554 static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3558 static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
3560 struct srpt_send_ioctx *ioctx;
3562 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3563 return ioctx->tag;
3566 /* Note: only used from inside debug printk's by the TCM core. */
3567 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3569 struct srpt_send_ioctx *ioctx;
3571 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3572 return srpt_get_cmd_state(ioctx);
3575 static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
3577 return 0;
3580 static u16 srpt_get_fabric_sense_len(void)
3582 return 0;
3585 static int srpt_is_state_remove(struct se_cmd *se_cmd)
3587 return 0;
3591 * srpt_parse_i_port_id() - Parse an initiator port ID.
3592 * @name: ASCII representation of a 128-bit initiator port ID.
3593 * @i_port_id: Binary 128-bit port ID.
3595 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3597 const char *p;
3598 unsigned len, count, leading_zero_bytes;
3599 int ret, rc;
3601 p = name;
3602 if (strnicmp(p, "0x", 2) == 0)
3603 p += 2;
3604 ret = -EINVAL;
3605 len = strlen(p);
3606 if (len % 2)
3607 goto out;
3608 count = min(len / 2, 16U);
3609 leading_zero_bytes = 16 - count;
3610 memset(i_port_id, 0, leading_zero_bytes);
3611 rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
3612 if (rc < 0)
3613 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
3614 ret = 0;
3615 out:
3616 return ret;
3620 * configfs callback function invoked for
3621 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3623 static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
3624 struct config_group *group,
3625 const char *name)
3627 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
3628 struct se_node_acl *se_nacl, *se_nacl_new;
3629 struct srpt_node_acl *nacl;
3630 int ret = 0;
3631 u32 nexus_depth = 1;
3632 u8 i_port_id[16];
3634 if (srpt_parse_i_port_id(i_port_id, name) < 0) {
3635 printk(KERN_ERR "invalid initiator port ID %s\n", name);
3636 ret = -EINVAL;
3637 goto err;
3640 se_nacl_new = srpt_alloc_fabric_acl(tpg);
3641 if (!se_nacl_new) {
3642 ret = -ENOMEM;
3643 goto err;
3646 * nacl_new may be released by core_tpg_add_initiator_node_acl()
3647 * when converting a node ACL from demo mode to explict
3649 se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
3650 nexus_depth);
3651 if (IS_ERR(se_nacl)) {
3652 ret = PTR_ERR(se_nacl);
3653 goto err;
3655 /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
3656 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3657 memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
3658 nacl->sport = sport;
3660 spin_lock_irq(&sport->port_acl_lock);
3661 list_add_tail(&nacl->list, &sport->port_acl_list);
3662 spin_unlock_irq(&sport->port_acl_lock);
3664 return se_nacl;
3665 err:
3666 return ERR_PTR(ret);
3670 * configfs callback function invoked for
3671 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3673 static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
3675 struct srpt_node_acl *nacl;
3676 struct srpt_device *sdev;
3677 struct srpt_port *sport;
3679 nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
3680 sport = nacl->sport;
3681 sdev = sport->sdev;
3682 spin_lock_irq(&sport->port_acl_lock);
3683 list_del(&nacl->list);
3684 spin_unlock_irq(&sport->port_acl_lock);
3685 core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
3686 srpt_release_fabric_acl(NULL, se_nacl);
3689 static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
3690 struct se_portal_group *se_tpg,
3691 char *page)
3693 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3695 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3698 static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
3699 struct se_portal_group *se_tpg,
3700 const char *page,
3701 size_t count)
3703 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3704 unsigned long val;
3705 int ret;
3707 ret = strict_strtoul(page, 0, &val);
3708 if (ret < 0) {
3709 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3710 return -EINVAL;
3712 if (val > MAX_SRPT_RDMA_SIZE) {
3713 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3714 MAX_SRPT_RDMA_SIZE);
3715 return -EINVAL;
3717 if (val < DEFAULT_MAX_RDMA_SIZE) {
3718 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3719 val, DEFAULT_MAX_RDMA_SIZE);
3720 return -EINVAL;
3722 sport->port_attrib.srp_max_rdma_size = val;
3724 return count;
3727 TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
3729 static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
3730 struct se_portal_group *se_tpg,
3731 char *page)
3733 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3735 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3738 static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
3739 struct se_portal_group *se_tpg,
3740 const char *page,
3741 size_t count)
3743 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3744 unsigned long val;
3745 int ret;
3747 ret = strict_strtoul(page, 0, &val);
3748 if (ret < 0) {
3749 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3750 return -EINVAL;
3752 if (val > MAX_SRPT_RSP_SIZE) {
3753 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3754 MAX_SRPT_RSP_SIZE);
3755 return -EINVAL;
3757 if (val < MIN_MAX_RSP_SIZE) {
3758 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3759 MIN_MAX_RSP_SIZE);
3760 return -EINVAL;
3762 sport->port_attrib.srp_max_rsp_size = val;
3764 return count;
3767 TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
3769 static ssize_t srpt_tpg_attrib_show_srp_sq_size(
3770 struct se_portal_group *se_tpg,
3771 char *page)
3773 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3775 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3778 static ssize_t srpt_tpg_attrib_store_srp_sq_size(
3779 struct se_portal_group *se_tpg,
3780 const char *page,
3781 size_t count)
3783 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3784 unsigned long val;
3785 int ret;
3787 ret = strict_strtoul(page, 0, &val);
3788 if (ret < 0) {
3789 pr_err("strict_strtoul() failed with ret: %d\n", ret);
3790 return -EINVAL;
3792 if (val > MAX_SRPT_SRQ_SIZE) {
3793 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3794 MAX_SRPT_SRQ_SIZE);
3795 return -EINVAL;
3797 if (val < MIN_SRPT_SRQ_SIZE) {
3798 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3799 MIN_SRPT_SRQ_SIZE);
3800 return -EINVAL;
3802 sport->port_attrib.srp_sq_size = val;
3804 return count;
3807 TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
3809 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3810 &srpt_tpg_attrib_srp_max_rdma_size.attr,
3811 &srpt_tpg_attrib_srp_max_rsp_size.attr,
3812 &srpt_tpg_attrib_srp_sq_size.attr,
3813 NULL,
3816 static ssize_t srpt_tpg_show_enable(
3817 struct se_portal_group *se_tpg,
3818 char *page)
3820 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3822 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
3825 static ssize_t srpt_tpg_store_enable(
3826 struct se_portal_group *se_tpg,
3827 const char *page,
3828 size_t count)
3830 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
3831 unsigned long tmp;
3832 int ret;
3834 ret = strict_strtoul(page, 0, &tmp);
3835 if (ret < 0) {
3836 printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
3837 return -EINVAL;
3840 if ((tmp != 0) && (tmp != 1)) {
3841 printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3842 return -EINVAL;
3844 if (tmp == 1)
3845 sport->enabled = true;
3846 else
3847 sport->enabled = false;
3849 return count;
3852 TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
3854 static struct configfs_attribute *srpt_tpg_attrs[] = {
3855 &srpt_tpg_enable.attr,
3856 NULL,
3860 * configfs callback invoked for
3861 * mkdir /sys/kernel/config/target/$driver/$port/$tpg
3863 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3864 struct config_group *group,
3865 const char *name)
3867 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3868 int res;
3870 /* Initialize sport->port_wwn and sport->port_tpg_1 */
3871 res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
3872 &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
3873 if (res)
3874 return ERR_PTR(res);
3876 return &sport->port_tpg_1;
3880 * configfs callback invoked for
3881 * rmdir /sys/kernel/config/target/$driver/$port/$tpg
3883 static void srpt_drop_tpg(struct se_portal_group *tpg)
3885 struct srpt_port *sport = container_of(tpg,
3886 struct srpt_port, port_tpg_1);
3888 sport->enabled = false;
3889 core_tpg_deregister(&sport->port_tpg_1);
3893 * configfs callback invoked for
3894 * mkdir /sys/kernel/config/target/$driver/$port
3896 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3897 struct config_group *group,
3898 const char *name)
3900 struct srpt_port *sport;
3901 int ret;
3903 sport = srpt_lookup_port(name);
3904 pr_debug("make_tport(%s)\n", name);
3905 ret = -EINVAL;
3906 if (!sport)
3907 goto err;
3909 return &sport->port_wwn;
3911 err:
3912 return ERR_PTR(ret);
3916 * configfs callback invoked for
3917 * rmdir /sys/kernel/config/target/$driver/$port
3919 static void srpt_drop_tport(struct se_wwn *wwn)
3921 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
3923 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
3926 static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
3927 char *buf)
3929 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
3932 TF_WWN_ATTR_RO(srpt, version);
3934 static struct configfs_attribute *srpt_wwn_attrs[] = {
3935 &srpt_wwn_version.attr,
3936 NULL,
3939 static struct target_core_fabric_ops srpt_template = {
3940 .get_fabric_name = srpt_get_fabric_name,
3941 .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
3942 .tpg_get_wwn = srpt_get_fabric_wwn,
3943 .tpg_get_tag = srpt_get_tag,
3944 .tpg_get_default_depth = srpt_get_default_depth,
3945 .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
3946 .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
3947 .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
3948 .tpg_check_demo_mode = srpt_check_false,
3949 .tpg_check_demo_mode_cache = srpt_check_true,
3950 .tpg_check_demo_mode_write_protect = srpt_check_true,
3951 .tpg_check_prod_mode_write_protect = srpt_check_false,
3952 .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
3953 .tpg_release_fabric_acl = srpt_release_fabric_acl,
3954 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3955 .release_cmd = srpt_release_cmd,
3956 .check_stop_free = srpt_check_stop_free,
3957 .shutdown_session = srpt_shutdown_session,
3958 .close_session = srpt_close_session,
3959 .stop_session = srpt_stop_session,
3960 .fall_back_to_erl0 = srpt_reset_nexus,
3961 .sess_logged_in = srpt_sess_logged_in,
3962 .sess_get_index = srpt_sess_get_index,
3963 .sess_get_initiator_sid = NULL,
3964 .write_pending = srpt_write_pending,
3965 .write_pending_status = srpt_write_pending_status,
3966 .set_default_node_attributes = srpt_set_default_node_attrs,
3967 .get_task_tag = srpt_get_task_tag,
3968 .get_cmd_state = srpt_get_tcm_cmd_state,
3969 .queue_data_in = srpt_queue_response,
3970 .queue_status = srpt_queue_status,
3971 .queue_tm_rsp = srpt_queue_response,
3972 .get_fabric_sense_len = srpt_get_fabric_sense_len,
3973 .set_fabric_sense_len = srpt_set_fabric_sense_len,
3974 .is_state_remove = srpt_is_state_remove,
3976 * Setup function pointers for generic logic in
3977 * target_core_fabric_configfs.c
3979 .fabric_make_wwn = srpt_make_tport,
3980 .fabric_drop_wwn = srpt_drop_tport,
3981 .fabric_make_tpg = srpt_make_tpg,
3982 .fabric_drop_tpg = srpt_drop_tpg,
3983 .fabric_post_link = NULL,
3984 .fabric_pre_unlink = NULL,
3985 .fabric_make_np = NULL,
3986 .fabric_drop_np = NULL,
3987 .fabric_make_nodeacl = srpt_make_nodeacl,
3988 .fabric_drop_nodeacl = srpt_drop_nodeacl,
3992 * srpt_init_module() - Kernel module initialization.
3994 * Note: Since ib_register_client() registers callback functions, and since at
3995 * least one of these callback functions (srpt_add_one()) calls target core
3996 * functions, this driver must be registered with the target core before
3997 * ib_register_client() is called.
3999 static int __init srpt_init_module(void)
4001 int ret;
4003 ret = -EINVAL;
4004 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
4005 printk(KERN_ERR "invalid value %d for kernel module parameter"
4006 " srp_max_req_size -- must be at least %d.\n",
4007 srp_max_req_size, MIN_MAX_REQ_SIZE);
4008 goto out;
4011 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
4012 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
4013 printk(KERN_ERR "invalid value %d for kernel module parameter"
4014 " srpt_srq_size -- must be in the range [%d..%d].\n",
4015 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
4016 goto out;
4019 srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
4020 if (IS_ERR(srpt_target)) {
4021 printk(KERN_ERR "couldn't register\n");
4022 ret = PTR_ERR(srpt_target);
4023 goto out;
4026 srpt_target->tf_ops = srpt_template;
4028 /* Enable SG chaining */
4029 srpt_target->tf_ops.task_sg_chaining = true;
4032 * Set up default attribute lists.
4034 srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
4035 srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
4036 srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
4037 srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
4038 srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
4039 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
4040 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
4041 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
4042 srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
4044 ret = target_fabric_configfs_register(srpt_target);
4045 if (ret < 0) {
4046 printk(KERN_ERR "couldn't register\n");
4047 goto out_free_target;
4050 ret = ib_register_client(&srpt_client);
4051 if (ret) {
4052 printk(KERN_ERR "couldn't register IB client\n");
4053 goto out_unregister_target;
4056 return 0;
4058 out_unregister_target:
4059 target_fabric_configfs_deregister(srpt_target);
4060 srpt_target = NULL;
4061 out_free_target:
4062 if (srpt_target)
4063 target_fabric_configfs_free(srpt_target);
4064 out:
4065 return ret;
4068 static void __exit srpt_cleanup_module(void)
4070 ib_unregister_client(&srpt_client);
4071 target_fabric_configfs_deregister(srpt_target);
4072 srpt_target = NULL;
4075 module_init(srpt_init_module);
4076 module_exit(srpt_cleanup_module);