2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <rdma/ib_cache.h>
45 #include <scsi/scsi_proto.h>
46 #include <scsi/scsi_tcq.h>
47 #include <target/target_core_base.h>
48 #include <target/target_core_fabric.h>
51 /* Name of this kernel module. */
52 #define DRV_NAME "ib_srpt"
53 #define DRV_VERSION "2.0.0"
54 #define DRV_RELDATE "2011-02-14"
56 #define SRPT_ID_STRING "Linux SRP target"
59 #define pr_fmt(fmt) DRV_NAME " " fmt
61 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
63 "v" DRV_VERSION
" (" DRV_RELDATE
")");
64 MODULE_LICENSE("Dual BSD/GPL");
70 static u64 srpt_service_guid
;
71 static DEFINE_SPINLOCK(srpt_dev_lock
); /* Protects srpt_dev_list. */
72 static LIST_HEAD(srpt_dev_list
); /* List of srpt_device structures. */
74 static unsigned srp_max_req_size
= DEFAULT_MAX_REQ_SIZE
;
75 module_param(srp_max_req_size
, int, 0444);
76 MODULE_PARM_DESC(srp_max_req_size
,
77 "Maximum size of SRP request messages in bytes.");
79 static int srpt_srq_size
= DEFAULT_SRPT_SRQ_SIZE
;
80 module_param(srpt_srq_size
, int, 0444);
81 MODULE_PARM_DESC(srpt_srq_size
,
82 "Shared receive queue (SRQ) size.");
84 static int srpt_get_u64_x(char *buffer
, const struct kernel_param
*kp
)
86 return sprintf(buffer
, "0x%016llx", *(u64
*)kp
->arg
);
88 module_param_call(srpt_service_guid
, NULL
, srpt_get_u64_x
, &srpt_service_guid
,
90 MODULE_PARM_DESC(srpt_service_guid
,
91 "Using this value for ioc_guid, id_ext, and cm_listen_id"
92 " instead of using the node_guid of the first HCA.");
94 static struct ib_client srpt_client
;
95 static void srpt_release_cmd(struct se_cmd
*se_cmd
);
96 static void srpt_free_ch(struct kref
*kref
);
97 static int srpt_queue_status(struct se_cmd
*cmd
);
98 static void srpt_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
99 static void srpt_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
100 static void srpt_process_wait_list(struct srpt_rdma_ch
*ch
);
103 * The only allowed channel state changes are those that change the channel
104 * state into a state with a higher numerical value. Hence the new > prev test.
106 static bool srpt_set_ch_state(struct srpt_rdma_ch
*ch
, enum rdma_ch_state
new)
109 enum rdma_ch_state prev
;
110 bool changed
= false;
112 spin_lock_irqsave(&ch
->spinlock
, flags
);
118 spin_unlock_irqrestore(&ch
->spinlock
, flags
);
124 * srpt_event_handler - asynchronous IB event callback function
125 * @handler: IB event handler registered by ib_register_event_handler().
126 * @event: Description of the event that occurred.
128 * Callback function called by the InfiniBand core when an asynchronous IB
129 * event occurs. This callback may occur in interrupt context. See also
130 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
131 * Architecture Specification.
133 static void srpt_event_handler(struct ib_event_handler
*handler
,
134 struct ib_event
*event
)
136 struct srpt_device
*sdev
;
137 struct srpt_port
*sport
;
140 sdev
= ib_get_client_data(event
->device
, &srpt_client
);
141 if (!sdev
|| sdev
->device
!= event
->device
)
144 pr_debug("ASYNC event= %d on device= %s\n", event
->event
,
147 switch (event
->event
) {
148 case IB_EVENT_PORT_ERR
:
149 port_num
= event
->element
.port_num
- 1;
150 if (port_num
< sdev
->device
->phys_port_cnt
) {
151 sport
= &sdev
->port
[port_num
];
155 WARN(true, "event %d: port_num %d out of range 1..%d\n",
156 event
->event
, port_num
+ 1,
157 sdev
->device
->phys_port_cnt
);
160 case IB_EVENT_PORT_ACTIVE
:
161 case IB_EVENT_LID_CHANGE
:
162 case IB_EVENT_PKEY_CHANGE
:
163 case IB_EVENT_SM_CHANGE
:
164 case IB_EVENT_CLIENT_REREGISTER
:
165 case IB_EVENT_GID_CHANGE
:
166 /* Refresh port data asynchronously. */
167 port_num
= event
->element
.port_num
- 1;
168 if (port_num
< sdev
->device
->phys_port_cnt
) {
169 sport
= &sdev
->port
[port_num
];
170 if (!sport
->lid
&& !sport
->sm_lid
)
171 schedule_work(&sport
->work
);
173 WARN(true, "event %d: port_num %d out of range 1..%d\n",
174 event
->event
, port_num
+ 1,
175 sdev
->device
->phys_port_cnt
);
179 pr_err("received unrecognized IB event %d\n", event
->event
);
185 * srpt_srq_event - SRQ event callback function
186 * @event: Description of the event that occurred.
187 * @ctx: Context pointer specified at SRQ creation time.
189 static void srpt_srq_event(struct ib_event
*event
, void *ctx
)
191 pr_debug("SRQ event %d\n", event
->event
);
194 static const char *get_ch_state_name(enum rdma_ch_state s
)
201 case CH_DISCONNECTING
:
202 return "disconnecting";
205 case CH_DISCONNECTED
:
206 return "disconnected";
212 * srpt_qp_event - QP event callback function
213 * @event: Description of the event that occurred.
214 * @ch: SRPT RDMA channel.
216 static void srpt_qp_event(struct ib_event
*event
, struct srpt_rdma_ch
*ch
)
218 pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
219 event
->event
, ch
, ch
->sess_name
, ch
->state
);
221 switch (event
->event
) {
222 case IB_EVENT_COMM_EST
:
223 ib_cm_notify(ch
->ib_cm
.cm_id
, event
->event
);
225 case IB_EVENT_QP_LAST_WQE_REACHED
:
226 pr_debug("%s-%d, state %s: received Last WQE event.\n",
227 ch
->sess_name
, ch
->qp
->qp_num
,
228 get_ch_state_name(ch
->state
));
231 pr_err("received unrecognized IB QP event %d\n", event
->event
);
237 * srpt_set_ioc - initialize a IOUnitInfo structure
238 * @c_list: controller list.
239 * @slot: one-based slot number.
240 * @value: four-bit value.
242 * Copies the lowest four bits of value in element slot of the array of four
243 * bit elements called c_list (controller list). The index slot is one-based.
245 static void srpt_set_ioc(u8
*c_list
, u32 slot
, u8 value
)
252 tmp
= c_list
[id
] & 0xf;
253 c_list
[id
] = (value
<< 4) | tmp
;
255 tmp
= c_list
[id
] & 0xf0;
256 c_list
[id
] = (value
& 0xf) | tmp
;
261 * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
262 * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
264 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
267 static void srpt_get_class_port_info(struct ib_dm_mad
*mad
)
269 struct ib_class_port_info
*cif
;
271 cif
= (struct ib_class_port_info
*)mad
->data
;
272 memset(cif
, 0, sizeof(*cif
));
273 cif
->base_version
= 1;
274 cif
->class_version
= 1;
276 ib_set_cpi_resp_time(cif
, 20);
277 mad
->mad_hdr
.status
= 0;
281 * srpt_get_iou - write IOUnitInfo to a management datagram
282 * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
284 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
285 * Specification. See also section B.7, table B.6 in the SRP r16a document.
287 static void srpt_get_iou(struct ib_dm_mad
*mad
)
289 struct ib_dm_iou_info
*ioui
;
293 ioui
= (struct ib_dm_iou_info
*)mad
->data
;
294 ioui
->change_id
= cpu_to_be16(1);
295 ioui
->max_controllers
= 16;
297 /* set present for slot 1 and empty for the rest */
298 srpt_set_ioc(ioui
->controller_list
, 1, 1);
299 for (i
= 1, slot
= 2; i
< 16; i
++, slot
++)
300 srpt_set_ioc(ioui
->controller_list
, slot
, 0);
302 mad
->mad_hdr
.status
= 0;
306 * srpt_get_ioc - write IOControllerprofile to a management datagram
307 * @sport: HCA port through which the MAD has been received.
308 * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
309 * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
311 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
312 * Architecture Specification. See also section B.7, table B.7 in the SRP
315 static void srpt_get_ioc(struct srpt_port
*sport
, u32 slot
,
316 struct ib_dm_mad
*mad
)
318 struct srpt_device
*sdev
= sport
->sdev
;
319 struct ib_dm_ioc_profile
*iocp
;
320 int send_queue_depth
;
322 iocp
= (struct ib_dm_ioc_profile
*)mad
->data
;
324 if (!slot
|| slot
> 16) {
326 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD
);
332 = cpu_to_be16(DM_MAD_STATUS_NO_IOC
);
337 send_queue_depth
= sdev
->srq_size
;
339 send_queue_depth
= min(MAX_SRPT_RQ_SIZE
,
340 sdev
->device
->attrs
.max_qp_wr
);
342 memset(iocp
, 0, sizeof(*iocp
));
343 strcpy(iocp
->id_string
, SRPT_ID_STRING
);
344 iocp
->guid
= cpu_to_be64(srpt_service_guid
);
345 iocp
->vendor_id
= cpu_to_be32(sdev
->device
->attrs
.vendor_id
);
346 iocp
->device_id
= cpu_to_be32(sdev
->device
->attrs
.vendor_part_id
);
347 iocp
->device_version
= cpu_to_be16(sdev
->device
->attrs
.hw_ver
);
348 iocp
->subsys_vendor_id
= cpu_to_be32(sdev
->device
->attrs
.vendor_id
);
349 iocp
->subsys_device_id
= 0x0;
350 iocp
->io_class
= cpu_to_be16(SRP_REV16A_IB_IO_CLASS
);
351 iocp
->io_subclass
= cpu_to_be16(SRP_IO_SUBCLASS
);
352 iocp
->protocol
= cpu_to_be16(SRP_PROTOCOL
);
353 iocp
->protocol_version
= cpu_to_be16(SRP_PROTOCOL_VERSION
);
354 iocp
->send_queue_depth
= cpu_to_be16(send_queue_depth
);
355 iocp
->rdma_read_depth
= 4;
356 iocp
->send_size
= cpu_to_be32(srp_max_req_size
);
357 iocp
->rdma_size
= cpu_to_be32(min(sport
->port_attrib
.srp_max_rdma_size
,
359 iocp
->num_svc_entries
= 1;
360 iocp
->op_cap_mask
= SRP_SEND_TO_IOC
| SRP_SEND_FROM_IOC
|
361 SRP_RDMA_READ_FROM_IOC
| SRP_RDMA_WRITE_FROM_IOC
;
363 mad
->mad_hdr
.status
= 0;
367 * srpt_get_svc_entries - write ServiceEntries to a management datagram
368 * @ioc_guid: I/O controller GUID to use in reply.
369 * @slot: I/O controller number.
370 * @hi: End of the range of service entries to be specified in the reply.
371 * @lo: Start of the range of service entries to be specified in the reply..
372 * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
374 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
375 * Specification. See also section B.7, table B.8 in the SRP r16a document.
377 static void srpt_get_svc_entries(u64 ioc_guid
,
378 u16 slot
, u8 hi
, u8 lo
, struct ib_dm_mad
*mad
)
380 struct ib_dm_svc_entries
*svc_entries
;
384 if (!slot
|| slot
> 16) {
386 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD
);
390 if (slot
> 2 || lo
> hi
|| hi
> 1) {
392 = cpu_to_be16(DM_MAD_STATUS_NO_IOC
);
396 svc_entries
= (struct ib_dm_svc_entries
*)mad
->data
;
397 memset(svc_entries
, 0, sizeof(*svc_entries
));
398 svc_entries
->service_entries
[0].id
= cpu_to_be64(ioc_guid
);
399 snprintf(svc_entries
->service_entries
[0].name
,
400 sizeof(svc_entries
->service_entries
[0].name
),
402 SRP_SERVICE_NAME_PREFIX
,
405 mad
->mad_hdr
.status
= 0;
409 * srpt_mgmt_method_get - process a received management datagram
410 * @sp: HCA port through which the MAD has been received.
411 * @rq_mad: received MAD.
412 * @rsp_mad: response MAD.
414 static void srpt_mgmt_method_get(struct srpt_port
*sp
, struct ib_mad
*rq_mad
,
415 struct ib_dm_mad
*rsp_mad
)
421 attr_id
= be16_to_cpu(rq_mad
->mad_hdr
.attr_id
);
423 case DM_ATTR_CLASS_PORT_INFO
:
424 srpt_get_class_port_info(rsp_mad
);
426 case DM_ATTR_IOU_INFO
:
427 srpt_get_iou(rsp_mad
);
429 case DM_ATTR_IOC_PROFILE
:
430 slot
= be32_to_cpu(rq_mad
->mad_hdr
.attr_mod
);
431 srpt_get_ioc(sp
, slot
, rsp_mad
);
433 case DM_ATTR_SVC_ENTRIES
:
434 slot
= be32_to_cpu(rq_mad
->mad_hdr
.attr_mod
);
435 hi
= (u8
) ((slot
>> 8) & 0xff);
436 lo
= (u8
) (slot
& 0xff);
437 slot
= (u16
) ((slot
>> 16) & 0xffff);
438 srpt_get_svc_entries(srpt_service_guid
,
439 slot
, hi
, lo
, rsp_mad
);
442 rsp_mad
->mad_hdr
.status
=
443 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR
);
449 * srpt_mad_send_handler - MAD send completion callback
450 * @mad_agent: Return value of ib_register_mad_agent().
451 * @mad_wc: Work completion reporting that the MAD has been sent.
453 static void srpt_mad_send_handler(struct ib_mad_agent
*mad_agent
,
454 struct ib_mad_send_wc
*mad_wc
)
456 rdma_destroy_ah(mad_wc
->send_buf
->ah
);
457 ib_free_send_mad(mad_wc
->send_buf
);
461 * srpt_mad_recv_handler - MAD reception callback function
462 * @mad_agent: Return value of ib_register_mad_agent().
463 * @send_buf: Not used.
464 * @mad_wc: Work completion reporting that a MAD has been received.
466 static void srpt_mad_recv_handler(struct ib_mad_agent
*mad_agent
,
467 struct ib_mad_send_buf
*send_buf
,
468 struct ib_mad_recv_wc
*mad_wc
)
470 struct srpt_port
*sport
= (struct srpt_port
*)mad_agent
->context
;
472 struct ib_mad_send_buf
*rsp
;
473 struct ib_dm_mad
*dm_mad
;
475 if (!mad_wc
|| !mad_wc
->recv_buf
.mad
)
478 ah
= ib_create_ah_from_wc(mad_agent
->qp
->pd
, mad_wc
->wc
,
479 mad_wc
->recv_buf
.grh
, mad_agent
->port_num
);
483 BUILD_BUG_ON(offsetof(struct ib_dm_mad
, data
) != IB_MGMT_DEVICE_HDR
);
485 rsp
= ib_create_send_mad(mad_agent
, mad_wc
->wc
->src_qp
,
486 mad_wc
->wc
->pkey_index
, 0,
487 IB_MGMT_DEVICE_HDR
, IB_MGMT_DEVICE_DATA
,
489 IB_MGMT_BASE_VERSION
);
496 memcpy(dm_mad
, mad_wc
->recv_buf
.mad
, sizeof(*dm_mad
));
497 dm_mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET_RESP
;
498 dm_mad
->mad_hdr
.status
= 0;
500 switch (mad_wc
->recv_buf
.mad
->mad_hdr
.method
) {
501 case IB_MGMT_METHOD_GET
:
502 srpt_mgmt_method_get(sport
, mad_wc
->recv_buf
.mad
, dm_mad
);
504 case IB_MGMT_METHOD_SET
:
505 dm_mad
->mad_hdr
.status
=
506 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR
);
509 dm_mad
->mad_hdr
.status
=
510 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD
);
514 if (!ib_post_send_mad(rsp
, NULL
)) {
515 ib_free_recv_mad(mad_wc
);
516 /* will destroy_ah & free_send_mad in send completion */
520 ib_free_send_mad(rsp
);
525 ib_free_recv_mad(mad_wc
);
528 static int srpt_format_guid(char *buf
, unsigned int size
, const __be64
*guid
)
530 const __be16
*g
= (const __be16
*)guid
;
532 return snprintf(buf
, size
, "%04x:%04x:%04x:%04x",
533 be16_to_cpu(g
[0]), be16_to_cpu(g
[1]),
534 be16_to_cpu(g
[2]), be16_to_cpu(g
[3]));
538 * srpt_refresh_port - configure a HCA port
539 * @sport: SRPT HCA port.
541 * Enable InfiniBand management datagram processing, update the cached sm_lid,
542 * lid and gid values, and register a callback function for processing MADs
543 * on the specified port.
545 * Note: It is safe to call this function more than once for the same port.
547 static int srpt_refresh_port(struct srpt_port
*sport
)
549 struct ib_mad_reg_req reg_req
;
550 struct ib_port_modify port_modify
;
551 struct ib_port_attr port_attr
;
554 memset(&port_modify
, 0, sizeof(port_modify
));
555 port_modify
.set_port_cap_mask
= IB_PORT_DEVICE_MGMT_SUP
;
556 port_modify
.clr_port_cap_mask
= 0;
558 ret
= ib_modify_port(sport
->sdev
->device
, sport
->port
, 0, &port_modify
);
562 ret
= ib_query_port(sport
->sdev
->device
, sport
->port
, &port_attr
);
566 sport
->sm_lid
= port_attr
.sm_lid
;
567 sport
->lid
= port_attr
.lid
;
569 ret
= ib_query_gid(sport
->sdev
->device
, sport
->port
, 0, &sport
->gid
,
574 sport
->port_guid_wwn
.priv
= sport
;
575 srpt_format_guid(sport
->port_guid
, sizeof(sport
->port_guid
),
576 &sport
->gid
.global
.interface_id
);
577 sport
->port_gid_wwn
.priv
= sport
;
578 snprintf(sport
->port_gid
, sizeof(sport
->port_gid
),
580 be64_to_cpu(sport
->gid
.global
.subnet_prefix
),
581 be64_to_cpu(sport
->gid
.global
.interface_id
));
583 if (!sport
->mad_agent
) {
584 memset(®_req
, 0, sizeof(reg_req
));
585 reg_req
.mgmt_class
= IB_MGMT_CLASS_DEVICE_MGMT
;
586 reg_req
.mgmt_class_version
= IB_MGMT_BASE_VERSION
;
587 set_bit(IB_MGMT_METHOD_GET
, reg_req
.method_mask
);
588 set_bit(IB_MGMT_METHOD_SET
, reg_req
.method_mask
);
590 sport
->mad_agent
= ib_register_mad_agent(sport
->sdev
->device
,
594 srpt_mad_send_handler
,
595 srpt_mad_recv_handler
,
597 if (IS_ERR(sport
->mad_agent
)) {
598 ret
= PTR_ERR(sport
->mad_agent
);
599 sport
->mad_agent
= NULL
;
608 port_modify
.set_port_cap_mask
= 0;
609 port_modify
.clr_port_cap_mask
= IB_PORT_DEVICE_MGMT_SUP
;
610 ib_modify_port(sport
->sdev
->device
, sport
->port
, 0, &port_modify
);
618 * srpt_unregister_mad_agent - unregister MAD callback functions
619 * @sdev: SRPT HCA pointer.
621 * Note: It is safe to call this function more than once for the same device.
623 static void srpt_unregister_mad_agent(struct srpt_device
*sdev
)
625 struct ib_port_modify port_modify
= {
626 .clr_port_cap_mask
= IB_PORT_DEVICE_MGMT_SUP
,
628 struct srpt_port
*sport
;
631 for (i
= 1; i
<= sdev
->device
->phys_port_cnt
; i
++) {
632 sport
= &sdev
->port
[i
- 1];
633 WARN_ON(sport
->port
!= i
);
634 if (ib_modify_port(sdev
->device
, i
, 0, &port_modify
) < 0)
635 pr_err("disabling MAD processing failed.\n");
636 if (sport
->mad_agent
) {
637 ib_unregister_mad_agent(sport
->mad_agent
);
638 sport
->mad_agent
= NULL
;
644 * srpt_alloc_ioctx - allocate a SRPT I/O context structure
645 * @sdev: SRPT HCA pointer.
646 * @ioctx_size: I/O context size.
647 * @dma_size: Size of I/O context DMA buffer.
648 * @dir: DMA data direction.
650 static struct srpt_ioctx
*srpt_alloc_ioctx(struct srpt_device
*sdev
,
651 int ioctx_size
, int dma_size
,
652 enum dma_data_direction dir
)
654 struct srpt_ioctx
*ioctx
;
656 ioctx
= kmalloc(ioctx_size
, GFP_KERNEL
);
660 ioctx
->buf
= kmalloc(dma_size
, GFP_KERNEL
);
664 ioctx
->dma
= ib_dma_map_single(sdev
->device
, ioctx
->buf
, dma_size
, dir
);
665 if (ib_dma_mapping_error(sdev
->device
, ioctx
->dma
))
679 * srpt_free_ioctx - free a SRPT I/O context structure
680 * @sdev: SRPT HCA pointer.
681 * @ioctx: I/O context pointer.
682 * @dma_size: Size of I/O context DMA buffer.
683 * @dir: DMA data direction.
685 static void srpt_free_ioctx(struct srpt_device
*sdev
, struct srpt_ioctx
*ioctx
,
686 int dma_size
, enum dma_data_direction dir
)
691 ib_dma_unmap_single(sdev
->device
, ioctx
->dma
, dma_size
, dir
);
697 * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
698 * @sdev: Device to allocate the I/O context ring for.
699 * @ring_size: Number of elements in the I/O context ring.
700 * @ioctx_size: I/O context size.
701 * @dma_size: DMA buffer size.
702 * @dir: DMA data direction.
704 static struct srpt_ioctx
**srpt_alloc_ioctx_ring(struct srpt_device
*sdev
,
705 int ring_size
, int ioctx_size
,
706 int dma_size
, enum dma_data_direction dir
)
708 struct srpt_ioctx
**ring
;
711 WARN_ON(ioctx_size
!= sizeof(struct srpt_recv_ioctx
)
712 && ioctx_size
!= sizeof(struct srpt_send_ioctx
));
714 ring
= kmalloc(ring_size
* sizeof(ring
[0]), GFP_KERNEL
);
717 for (i
= 0; i
< ring_size
; ++i
) {
718 ring
[i
] = srpt_alloc_ioctx(sdev
, ioctx_size
, dma_size
, dir
);
727 srpt_free_ioctx(sdev
, ring
[i
], dma_size
, dir
);
735 * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
736 * @ioctx_ring: I/O context ring to be freed.
737 * @sdev: SRPT HCA pointer.
738 * @ring_size: Number of ring elements.
739 * @dma_size: Size of I/O context DMA buffer.
740 * @dir: DMA data direction.
742 static void srpt_free_ioctx_ring(struct srpt_ioctx
**ioctx_ring
,
743 struct srpt_device
*sdev
, int ring_size
,
744 int dma_size
, enum dma_data_direction dir
)
751 for (i
= 0; i
< ring_size
; ++i
)
752 srpt_free_ioctx(sdev
, ioctx_ring
[i
], dma_size
, dir
);
757 * srpt_set_cmd_state - set the state of a SCSI command
758 * @ioctx: Send I/O context.
759 * @new: New I/O context state.
761 * Does not modify the state of aborted commands. Returns the previous command
764 static enum srpt_command_state
srpt_set_cmd_state(struct srpt_send_ioctx
*ioctx
,
765 enum srpt_command_state
new)
767 enum srpt_command_state previous
;
769 previous
= ioctx
->state
;
770 if (previous
!= SRPT_STATE_DONE
)
777 * srpt_test_and_set_cmd_state - test and set the state of a command
778 * @ioctx: Send I/O context.
779 * @old: Current I/O context state.
780 * @new: New I/O context state.
782 * Returns true if and only if the previous command state was equal to 'old'.
784 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx
*ioctx
,
785 enum srpt_command_state old
,
786 enum srpt_command_state
new)
788 enum srpt_command_state previous
;
791 WARN_ON(old
== SRPT_STATE_DONE
);
792 WARN_ON(new == SRPT_STATE_NEW
);
794 previous
= ioctx
->state
;
798 return previous
== old
;
802 * srpt_post_recv - post an IB receive request
803 * @sdev: SRPT HCA pointer.
804 * @ch: SRPT RDMA channel.
805 * @ioctx: Receive I/O context pointer.
807 static int srpt_post_recv(struct srpt_device
*sdev
, struct srpt_rdma_ch
*ch
,
808 struct srpt_recv_ioctx
*ioctx
)
811 struct ib_recv_wr wr
, *bad_wr
;
814 list
.addr
= ioctx
->ioctx
.dma
;
815 list
.length
= srp_max_req_size
;
816 list
.lkey
= sdev
->lkey
;
818 ioctx
->ioctx
.cqe
.done
= srpt_recv_done
;
819 wr
.wr_cqe
= &ioctx
->ioctx
.cqe
;
825 return ib_post_srq_recv(sdev
->srq
, &wr
, &bad_wr
);
827 return ib_post_recv(ch
->qp
, &wr
, &bad_wr
);
831 * srpt_zerolength_write - perform a zero-length RDMA write
832 * @ch: SRPT RDMA channel.
834 * A quote from the InfiniBand specification: C9-88: For an HCA responder
835 * using Reliable Connection service, for each zero-length RDMA READ or WRITE
836 * request, the R_Key shall not be validated, even if the request includes
839 static int srpt_zerolength_write(struct srpt_rdma_ch
*ch
)
841 struct ib_send_wr wr
, *bad_wr
;
843 pr_debug("%s-%d: queued zerolength write\n", ch
->sess_name
,
846 memset(&wr
, 0, sizeof(wr
));
847 wr
.opcode
= IB_WR_RDMA_WRITE
;
848 wr
.wr_cqe
= &ch
->zw_cqe
;
849 wr
.send_flags
= IB_SEND_SIGNALED
;
850 return ib_post_send(ch
->qp
, &wr
, &bad_wr
);
853 static void srpt_zerolength_write_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
855 struct srpt_rdma_ch
*ch
= cq
->cq_context
;
857 pr_debug("%s-%d wc->status %d\n", ch
->sess_name
, ch
->qp
->qp_num
,
860 if (wc
->status
== IB_WC_SUCCESS
) {
861 srpt_process_wait_list(ch
);
863 if (srpt_set_ch_state(ch
, CH_DISCONNECTED
))
864 schedule_work(&ch
->release_work
);
866 pr_debug("%s-%d: already disconnected.\n",
867 ch
->sess_name
, ch
->qp
->qp_num
);
871 static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx
*ioctx
,
872 struct srp_direct_buf
*db
, int nbufs
, struct scatterlist
**sg
,
875 enum dma_data_direction dir
= target_reverse_dma_direction(&ioctx
->cmd
);
876 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
877 struct scatterlist
*prev
= NULL
;
882 ioctx
->rw_ctxs
= &ioctx
->s_rw_ctx
;
884 ioctx
->rw_ctxs
= kmalloc_array(nbufs
, sizeof(*ioctx
->rw_ctxs
),
890 for (i
= ioctx
->n_rw_ctx
; i
< nbufs
; i
++, db
++) {
891 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
892 u64 remote_addr
= be64_to_cpu(db
->va
);
893 u32 size
= be32_to_cpu(db
->len
);
894 u32 rkey
= be32_to_cpu(db
->key
);
896 ret
= target_alloc_sgl(&ctx
->sg
, &ctx
->nents
, size
, false,
901 ret
= rdma_rw_ctx_init(&ctx
->rw
, ch
->qp
, ch
->sport
->port
,
902 ctx
->sg
, ctx
->nents
, 0, remote_addr
, rkey
, dir
);
904 target_free_sgl(ctx
->sg
, ctx
->nents
);
908 ioctx
->n_rdma
+= ret
;
912 sg_unmark_end(&prev
[prev_nents
- 1]);
913 sg_chain(prev
, prev_nents
+ 1, ctx
->sg
);
919 prev_nents
= ctx
->nents
;
921 *sg_cnt
+= ctx
->nents
;
928 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
930 rdma_rw_ctx_destroy(&ctx
->rw
, ch
->qp
, ch
->sport
->port
,
931 ctx
->sg
, ctx
->nents
, dir
);
932 target_free_sgl(ctx
->sg
, ctx
->nents
);
934 if (ioctx
->rw_ctxs
!= &ioctx
->s_rw_ctx
)
935 kfree(ioctx
->rw_ctxs
);
939 static void srpt_free_rw_ctxs(struct srpt_rdma_ch
*ch
,
940 struct srpt_send_ioctx
*ioctx
)
942 enum dma_data_direction dir
= target_reverse_dma_direction(&ioctx
->cmd
);
945 for (i
= 0; i
< ioctx
->n_rw_ctx
; i
++) {
946 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
948 rdma_rw_ctx_destroy(&ctx
->rw
, ch
->qp
, ch
->sport
->port
,
949 ctx
->sg
, ctx
->nents
, dir
);
950 target_free_sgl(ctx
->sg
, ctx
->nents
);
953 if (ioctx
->rw_ctxs
!= &ioctx
->s_rw_ctx
)
954 kfree(ioctx
->rw_ctxs
);
957 static inline void *srpt_get_desc_buf(struct srp_cmd
*srp_cmd
)
960 * The pointer computations below will only be compiled correctly
961 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
962 * whether srp_cmd::add_data has been declared as a byte pointer.
964 BUILD_BUG_ON(!__same_type(srp_cmd
->add_data
[0], (s8
)0) &&
965 !__same_type(srp_cmd
->add_data
[0], (u8
)0));
968 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
969 * CDB LENGTH' field are reserved and the size in bytes of this field
970 * is four times the value specified in bits 3..7. Hence the "& ~3".
972 return srp_cmd
->add_data
+ (srp_cmd
->add_cdb_len
& ~3);
976 * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
977 * @ioctx: Pointer to the I/O context associated with the request.
978 * @srp_cmd: Pointer to the SRP_CMD request data.
979 * @dir: Pointer to the variable to which the transfer direction will be
981 * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
982 * @sg_cnt: [out] length of @sg.
983 * @data_len: Pointer to the variable to which the total data length of all
984 * descriptors in the SRP_CMD request will be written.
986 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
988 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
989 * -ENOMEM when memory allocation fails and zero upon success.
991 static int srpt_get_desc_tbl(struct srpt_send_ioctx
*ioctx
,
992 struct srp_cmd
*srp_cmd
, enum dma_data_direction
*dir
,
993 struct scatterlist
**sg
, unsigned *sg_cnt
, u64
*data_len
)
999 * The lower four bits of the buffer format field contain the DATA-IN
1000 * buffer descriptor format, and the highest four bits contain the
1001 * DATA-OUT buffer descriptor format.
1003 if (srp_cmd
->buf_fmt
& 0xf)
1004 /* DATA-IN: transfer data from target to initiator (read). */
1005 *dir
= DMA_FROM_DEVICE
;
1006 else if (srp_cmd
->buf_fmt
>> 4)
1007 /* DATA-OUT: transfer data from initiator to target (write). */
1008 *dir
= DMA_TO_DEVICE
;
1012 /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
1013 ioctx
->cmd
.data_direction
= *dir
;
1015 if (((srp_cmd
->buf_fmt
& 0xf) == SRP_DATA_DESC_DIRECT
) ||
1016 ((srp_cmd
->buf_fmt
>> 4) == SRP_DATA_DESC_DIRECT
)) {
1017 struct srp_direct_buf
*db
= srpt_get_desc_buf(srp_cmd
);
1019 *data_len
= be32_to_cpu(db
->len
);
1020 return srpt_alloc_rw_ctxs(ioctx
, db
, 1, sg
, sg_cnt
);
1021 } else if (((srp_cmd
->buf_fmt
& 0xf) == SRP_DATA_DESC_INDIRECT
) ||
1022 ((srp_cmd
->buf_fmt
>> 4) == SRP_DATA_DESC_INDIRECT
)) {
1023 struct srp_indirect_buf
*idb
= srpt_get_desc_buf(srp_cmd
);
1024 int nbufs
= be32_to_cpu(idb
->table_desc
.len
) /
1025 sizeof(struct srp_direct_buf
);
1028 (srp_cmd
->data_out_desc_cnt
+ srp_cmd
->data_in_desc_cnt
)) {
1029 pr_err("received unsupported SRP_CMD request"
1030 " type (%u out + %u in != %u / %zu)\n",
1031 srp_cmd
->data_out_desc_cnt
,
1032 srp_cmd
->data_in_desc_cnt
,
1033 be32_to_cpu(idb
->table_desc
.len
),
1034 sizeof(struct srp_direct_buf
));
1038 *data_len
= be32_to_cpu(idb
->len
);
1039 return srpt_alloc_rw_ctxs(ioctx
, idb
->desc_list
, nbufs
,
1048 * srpt_init_ch_qp - initialize queue pair attributes
1049 * @ch: SRPT RDMA channel.
1050 * @qp: Queue pair pointer.
1052 * Initialized the attributes of queue pair 'qp' by allowing local write,
1053 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
1055 static int srpt_init_ch_qp(struct srpt_rdma_ch
*ch
, struct ib_qp
*qp
)
1057 struct ib_qp_attr
*attr
;
1060 attr
= kzalloc(sizeof(*attr
), GFP_KERNEL
);
1064 attr
->qp_state
= IB_QPS_INIT
;
1065 attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
1066 attr
->port_num
= ch
->sport
->port
;
1068 ret
= ib_find_cached_pkey(ch
->sport
->sdev
->device
, ch
->sport
->port
,
1069 ch
->pkey
, &attr
->pkey_index
);
1071 pr_err("Translating pkey %#x failed (%d) - using index 0\n",
1074 ret
= ib_modify_qp(qp
, attr
,
1075 IB_QP_STATE
| IB_QP_ACCESS_FLAGS
| IB_QP_PORT
|
1083 * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
1084 * @ch: channel of the queue pair.
1085 * @qp: queue pair to change the state of.
1087 * Returns zero upon success and a negative value upon failure.
1089 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1090 * If this structure ever becomes larger, it might be necessary to allocate
1091 * it dynamically instead of on the stack.
1093 static int srpt_ch_qp_rtr(struct srpt_rdma_ch
*ch
, struct ib_qp
*qp
)
1095 struct ib_qp_attr qp_attr
;
1099 qp_attr
.qp_state
= IB_QPS_RTR
;
1100 ret
= ib_cm_init_qp_attr(ch
->ib_cm
.cm_id
, &qp_attr
, &attr_mask
);
1104 qp_attr
.max_dest_rd_atomic
= 4;
1106 ret
= ib_modify_qp(qp
, &qp_attr
, attr_mask
);
1113 * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
1114 * @ch: channel of the queue pair.
1115 * @qp: queue pair to change the state of.
1117 * Returns zero upon success and a negative value upon failure.
1119 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1120 * If this structure ever becomes larger, it might be necessary to allocate
1121 * it dynamically instead of on the stack.
1123 static int srpt_ch_qp_rts(struct srpt_rdma_ch
*ch
, struct ib_qp
*qp
)
1125 struct ib_qp_attr qp_attr
;
1129 qp_attr
.qp_state
= IB_QPS_RTS
;
1130 ret
= ib_cm_init_qp_attr(ch
->ib_cm
.cm_id
, &qp_attr
, &attr_mask
);
1134 qp_attr
.max_rd_atomic
= 4;
1136 ret
= ib_modify_qp(qp
, &qp_attr
, attr_mask
);
1143 * srpt_ch_qp_err - set the channel queue pair state to 'error'
1144 * @ch: SRPT RDMA channel.
1146 static int srpt_ch_qp_err(struct srpt_rdma_ch
*ch
)
1148 struct ib_qp_attr qp_attr
;
1150 qp_attr
.qp_state
= IB_QPS_ERR
;
1151 return ib_modify_qp(ch
->qp
, &qp_attr
, IB_QP_STATE
);
1155 * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
1156 * @ch: SRPT RDMA channel.
1158 static struct srpt_send_ioctx
*srpt_get_send_ioctx(struct srpt_rdma_ch
*ch
)
1160 struct srpt_send_ioctx
*ioctx
;
1161 unsigned long flags
;
1166 spin_lock_irqsave(&ch
->spinlock
, flags
);
1167 if (!list_empty(&ch
->free_list
)) {
1168 ioctx
= list_first_entry(&ch
->free_list
,
1169 struct srpt_send_ioctx
, free_list
);
1170 list_del(&ioctx
->free_list
);
1172 spin_unlock_irqrestore(&ch
->spinlock
, flags
);
1177 BUG_ON(ioctx
->ch
!= ch
);
1178 ioctx
->state
= SRPT_STATE_NEW
;
1180 ioctx
->n_rw_ctx
= 0;
1181 ioctx
->queue_status_only
= false;
1183 * transport_init_se_cmd() does not initialize all fields, so do it
1186 memset(&ioctx
->cmd
, 0, sizeof(ioctx
->cmd
));
1187 memset(&ioctx
->sense_data
, 0, sizeof(ioctx
->sense_data
));
1193 * srpt_abort_cmd - abort a SCSI command
1194 * @ioctx: I/O context associated with the SCSI command.
1196 static int srpt_abort_cmd(struct srpt_send_ioctx
*ioctx
)
1198 enum srpt_command_state state
;
1203 * If the command is in a state where the target core is waiting for
1204 * the ib_srpt driver, change the state to the next state.
1207 state
= ioctx
->state
;
1209 case SRPT_STATE_NEED_DATA
:
1210 ioctx
->state
= SRPT_STATE_DATA_IN
;
1212 case SRPT_STATE_CMD_RSP_SENT
:
1213 case SRPT_STATE_MGMT_RSP_SENT
:
1214 ioctx
->state
= SRPT_STATE_DONE
;
1217 WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1222 pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state
,
1223 ioctx
->state
, ioctx
->cmd
.tag
);
1226 case SRPT_STATE_NEW
:
1227 case SRPT_STATE_DATA_IN
:
1228 case SRPT_STATE_MGMT
:
1229 case SRPT_STATE_DONE
:
1231 * Do nothing - defer abort processing until
1232 * srpt_queue_response() is invoked.
1235 case SRPT_STATE_NEED_DATA
:
1236 pr_debug("tag %#llx: RDMA read error\n", ioctx
->cmd
.tag
);
1237 transport_generic_request_failure(&ioctx
->cmd
,
1238 TCM_CHECK_CONDITION_ABORT_CMD
);
1240 case SRPT_STATE_CMD_RSP_SENT
:
1242 * SRP_RSP sending failed or the SRP_RSP send completion has
1243 * not been received in time.
1245 transport_generic_free_cmd(&ioctx
->cmd
, 0);
1247 case SRPT_STATE_MGMT_RSP_SENT
:
1248 transport_generic_free_cmd(&ioctx
->cmd
, 0);
1251 WARN(1, "Unexpected command state (%d)", state
);
1259 * srpt_rdma_read_done - RDMA read completion callback
1260 * @cq: Completion queue.
1261 * @wc: Work completion.
1263 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1264 * the data that has been transferred via IB RDMA had to be postponed until the
1265 * check_stop_free() callback. None of this is necessary anymore and needs to
1268 static void srpt_rdma_read_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1270 struct srpt_rdma_ch
*ch
= cq
->cq_context
;
1271 struct srpt_send_ioctx
*ioctx
=
1272 container_of(wc
->wr_cqe
, struct srpt_send_ioctx
, rdma_cqe
);
1274 WARN_ON(ioctx
->n_rdma
<= 0);
1275 atomic_add(ioctx
->n_rdma
, &ch
->sq_wr_avail
);
1278 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1279 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1281 srpt_abort_cmd(ioctx
);
1285 if (srpt_test_and_set_cmd_state(ioctx
, SRPT_STATE_NEED_DATA
,
1286 SRPT_STATE_DATA_IN
))
1287 target_execute_cmd(&ioctx
->cmd
);
1289 pr_err("%s[%d]: wrong state = %d\n", __func__
,
1290 __LINE__
, ioctx
->state
);
1294 * srpt_build_cmd_rsp - build a SRP_RSP response
1295 * @ch: RDMA channel through which the request has been received.
1296 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1297 * be built in the buffer ioctx->buf points at and hence this function will
1298 * overwrite the request data.
1299 * @tag: tag of the request for which this response is being generated.
1300 * @status: value for the STATUS field of the SRP_RSP information unit.
1302 * Returns the size in bytes of the SRP_RSP response.
1304 * An SRP_RSP response contains a SCSI status or service response. See also
1305 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1306 * response. See also SPC-2 for more information about sense data.
1308 static int srpt_build_cmd_rsp(struct srpt_rdma_ch
*ch
,
1309 struct srpt_send_ioctx
*ioctx
, u64 tag
,
1312 struct srp_rsp
*srp_rsp
;
1313 const u8
*sense_data
;
1314 int sense_data_len
, max_sense_len
;
1317 * The lowest bit of all SAM-3 status codes is zero (see also
1318 * paragraph 5.3 in SAM-3).
1320 WARN_ON(status
& 1);
1322 srp_rsp
= ioctx
->ioctx
.buf
;
1325 sense_data
= ioctx
->sense_data
;
1326 sense_data_len
= ioctx
->cmd
.scsi_sense_length
;
1327 WARN_ON(sense_data_len
> sizeof(ioctx
->sense_data
));
1329 memset(srp_rsp
, 0, sizeof(*srp_rsp
));
1330 srp_rsp
->opcode
= SRP_RSP
;
1331 srp_rsp
->req_lim_delta
=
1332 cpu_to_be32(1 + atomic_xchg(&ch
->req_lim_delta
, 0));
1334 srp_rsp
->status
= status
;
1336 if (sense_data_len
) {
1337 BUILD_BUG_ON(MIN_MAX_RSP_SIZE
<= sizeof(*srp_rsp
));
1338 max_sense_len
= ch
->max_ti_iu_len
- sizeof(*srp_rsp
);
1339 if (sense_data_len
> max_sense_len
) {
1340 pr_warn("truncated sense data from %d to %d"
1341 " bytes\n", sense_data_len
, max_sense_len
);
1342 sense_data_len
= max_sense_len
;
1345 srp_rsp
->flags
|= SRP_RSP_FLAG_SNSVALID
;
1346 srp_rsp
->sense_data_len
= cpu_to_be32(sense_data_len
);
1347 memcpy(srp_rsp
+ 1, sense_data
, sense_data_len
);
1350 return sizeof(*srp_rsp
) + sense_data_len
;
1354 * srpt_build_tskmgmt_rsp - build a task management response
1355 * @ch: RDMA channel through which the request has been received.
1356 * @ioctx: I/O context in which the SRP_RSP response will be built.
1357 * @rsp_code: RSP_CODE that will be stored in the response.
1358 * @tag: Tag of the request for which this response is being generated.
1360 * Returns the size in bytes of the SRP_RSP response.
1362 * An SRP_RSP response contains a SCSI status or service response. See also
1363 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1366 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch
*ch
,
1367 struct srpt_send_ioctx
*ioctx
,
1368 u8 rsp_code
, u64 tag
)
1370 struct srp_rsp
*srp_rsp
;
1375 resp_len
= sizeof(*srp_rsp
) + resp_data_len
;
1377 srp_rsp
= ioctx
->ioctx
.buf
;
1379 memset(srp_rsp
, 0, sizeof(*srp_rsp
));
1381 srp_rsp
->opcode
= SRP_RSP
;
1382 srp_rsp
->req_lim_delta
=
1383 cpu_to_be32(1 + atomic_xchg(&ch
->req_lim_delta
, 0));
1386 srp_rsp
->flags
|= SRP_RSP_FLAG_RSPVALID
;
1387 srp_rsp
->resp_data_len
= cpu_to_be32(resp_data_len
);
1388 srp_rsp
->data
[3] = rsp_code
;
1393 static int srpt_check_stop_free(struct se_cmd
*cmd
)
1395 struct srpt_send_ioctx
*ioctx
= container_of(cmd
,
1396 struct srpt_send_ioctx
, cmd
);
1398 return target_put_sess_cmd(&ioctx
->cmd
);
1402 * srpt_handle_cmd - process a SRP_CMD information unit
1403 * @ch: SRPT RDMA channel.
1404 * @recv_ioctx: Receive I/O context.
1405 * @send_ioctx: Send I/O context.
1407 static void srpt_handle_cmd(struct srpt_rdma_ch
*ch
,
1408 struct srpt_recv_ioctx
*recv_ioctx
,
1409 struct srpt_send_ioctx
*send_ioctx
)
1412 struct srp_cmd
*srp_cmd
;
1413 struct scatterlist
*sg
= NULL
;
1414 unsigned sg_cnt
= 0;
1416 enum dma_data_direction dir
;
1419 BUG_ON(!send_ioctx
);
1421 srp_cmd
= recv_ioctx
->ioctx
.buf
;
1422 cmd
= &send_ioctx
->cmd
;
1423 cmd
->tag
= srp_cmd
->tag
;
1425 switch (srp_cmd
->task_attr
) {
1426 case SRP_CMD_SIMPLE_Q
:
1427 cmd
->sam_task_attr
= TCM_SIMPLE_TAG
;
1429 case SRP_CMD_ORDERED_Q
:
1431 cmd
->sam_task_attr
= TCM_ORDERED_TAG
;
1433 case SRP_CMD_HEAD_OF_Q
:
1434 cmd
->sam_task_attr
= TCM_HEAD_TAG
;
1437 cmd
->sam_task_attr
= TCM_ACA_TAG
;
1441 rc
= srpt_get_desc_tbl(send_ioctx
, srp_cmd
, &dir
, &sg
, &sg_cnt
,
1444 if (rc
!= -EAGAIN
) {
1445 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1451 rc
= target_submit_cmd_map_sgls(cmd
, ch
->sess
, srp_cmd
->cdb
,
1452 &send_ioctx
->sense_data
[0],
1453 scsilun_to_int(&srp_cmd
->lun
), data_len
,
1454 TCM_SIMPLE_TAG
, dir
, TARGET_SCF_ACK_KREF
,
1455 sg
, sg_cnt
, NULL
, 0, NULL
, 0);
1457 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc
,
1464 send_ioctx
->state
= SRPT_STATE_DONE
;
1465 srpt_release_cmd(cmd
);
1468 static int srp_tmr_to_tcm(int fn
)
1471 case SRP_TSK_ABORT_TASK
:
1472 return TMR_ABORT_TASK
;
1473 case SRP_TSK_ABORT_TASK_SET
:
1474 return TMR_ABORT_TASK_SET
;
1475 case SRP_TSK_CLEAR_TASK_SET
:
1476 return TMR_CLEAR_TASK_SET
;
1477 case SRP_TSK_LUN_RESET
:
1478 return TMR_LUN_RESET
;
1479 case SRP_TSK_CLEAR_ACA
:
1480 return TMR_CLEAR_ACA
;
1487 * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
1488 * @ch: SRPT RDMA channel.
1489 * @recv_ioctx: Receive I/O context.
1490 * @send_ioctx: Send I/O context.
1492 * Returns 0 if and only if the request will be processed by the target core.
1494 * For more information about SRP_TSK_MGMT information units, see also section
1495 * 6.7 in the SRP r16a document.
1497 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch
*ch
,
1498 struct srpt_recv_ioctx
*recv_ioctx
,
1499 struct srpt_send_ioctx
*send_ioctx
)
1501 struct srp_tsk_mgmt
*srp_tsk
;
1503 struct se_session
*sess
= ch
->sess
;
1507 BUG_ON(!send_ioctx
);
1509 srp_tsk
= recv_ioctx
->ioctx
.buf
;
1510 cmd
= &send_ioctx
->cmd
;
1512 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
1513 srp_tsk
->tsk_mgmt_func
, srp_tsk
->task_tag
, srp_tsk
->tag
, ch
,
1516 srpt_set_cmd_state(send_ioctx
, SRPT_STATE_MGMT
);
1517 send_ioctx
->cmd
.tag
= srp_tsk
->tag
;
1518 tcm_tmr
= srp_tmr_to_tcm(srp_tsk
->tsk_mgmt_func
);
1519 rc
= target_submit_tmr(&send_ioctx
->cmd
, sess
, NULL
,
1520 scsilun_to_int(&srp_tsk
->lun
), srp_tsk
, tcm_tmr
,
1521 GFP_KERNEL
, srp_tsk
->task_tag
,
1522 TARGET_SCF_ACK_KREF
);
1524 send_ioctx
->cmd
.se_tmr_req
->response
= TMR_FUNCTION_REJECTED
;
1529 transport_send_check_condition_and_sense(cmd
, 0, 0); // XXX:
1533 * srpt_handle_new_iu - process a newly received information unit
1534 * @ch: RDMA channel through which the information unit has been received.
1535 * @recv_ioctx: Receive I/O context associated with the information unit.
1538 srpt_handle_new_iu(struct srpt_rdma_ch
*ch
, struct srpt_recv_ioctx
*recv_ioctx
)
1540 struct srpt_send_ioctx
*send_ioctx
= NULL
;
1541 struct srp_cmd
*srp_cmd
;
1546 BUG_ON(!recv_ioctx
);
1548 if (unlikely(ch
->state
== CH_CONNECTING
))
1551 ib_dma_sync_single_for_cpu(ch
->sport
->sdev
->device
,
1552 recv_ioctx
->ioctx
.dma
, srp_max_req_size
,
1555 srp_cmd
= recv_ioctx
->ioctx
.buf
;
1556 opcode
= srp_cmd
->opcode
;
1557 if (opcode
== SRP_CMD
|| opcode
== SRP_TSK_MGMT
) {
1558 send_ioctx
= srpt_get_send_ioctx(ch
);
1559 if (unlikely(!send_ioctx
))
1563 if (!list_empty(&recv_ioctx
->wait_list
)) {
1564 WARN_ON_ONCE(!ch
->processing_wait_list
);
1565 list_del_init(&recv_ioctx
->wait_list
);
1570 srpt_handle_cmd(ch
, recv_ioctx
, send_ioctx
);
1573 srpt_handle_tsk_mgmt(ch
, recv_ioctx
, send_ioctx
);
1576 pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1579 pr_debug("received SRP_CRED_RSP\n");
1582 pr_debug("received SRP_AER_RSP\n");
1585 pr_err("Received SRP_RSP\n");
1588 pr_err("received IU with unknown opcode 0x%x\n", opcode
);
1592 srpt_post_recv(ch
->sport
->sdev
, ch
, recv_ioctx
);
1599 if (list_empty(&recv_ioctx
->wait_list
)) {
1600 WARN_ON_ONCE(ch
->processing_wait_list
);
1601 list_add_tail(&recv_ioctx
->wait_list
, &ch
->cmd_wait_list
);
1606 static void srpt_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1608 struct srpt_rdma_ch
*ch
= cq
->cq_context
;
1609 struct srpt_recv_ioctx
*ioctx
=
1610 container_of(wc
->wr_cqe
, struct srpt_recv_ioctx
, ioctx
.cqe
);
1612 if (wc
->status
== IB_WC_SUCCESS
) {
1615 req_lim
= atomic_dec_return(&ch
->req_lim
);
1616 if (unlikely(req_lim
< 0))
1617 pr_err("req_lim = %d < 0\n", req_lim
);
1618 srpt_handle_new_iu(ch
, ioctx
);
1620 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
1626 * This function must be called from the context in which RDMA completions are
1627 * processed because it accesses the wait list without protection against
1628 * access from other threads.
1630 static void srpt_process_wait_list(struct srpt_rdma_ch
*ch
)
1632 struct srpt_recv_ioctx
*recv_ioctx
, *tmp
;
1634 WARN_ON_ONCE(ch
->state
== CH_CONNECTING
);
1636 if (list_empty(&ch
->cmd_wait_list
))
1639 WARN_ON_ONCE(ch
->processing_wait_list
);
1640 ch
->processing_wait_list
= true;
1641 list_for_each_entry_safe(recv_ioctx
, tmp
, &ch
->cmd_wait_list
,
1643 if (!srpt_handle_new_iu(ch
, recv_ioctx
))
1646 ch
->processing_wait_list
= false;
1650 * srpt_send_done - send completion callback
1651 * @cq: Completion queue.
1652 * @wc: Work completion.
1654 * Note: Although this has not yet been observed during tests, at least in
1655 * theory it is possible that the srpt_get_send_ioctx() call invoked by
1656 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1657 * value in each response is set to one, and it is possible that this response
1658 * makes the initiator send a new request before the send completion for that
1659 * response has been processed. This could e.g. happen if the call to
1660 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1661 * if IB retransmission causes generation of the send completion to be
1662 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1663 * are queued on cmd_wait_list. The code below processes these delayed
1664 * requests one at a time.
1666 static void srpt_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1668 struct srpt_rdma_ch
*ch
= cq
->cq_context
;
1669 struct srpt_send_ioctx
*ioctx
=
1670 container_of(wc
->wr_cqe
, struct srpt_send_ioctx
, ioctx
.cqe
);
1671 enum srpt_command_state state
;
1673 state
= srpt_set_cmd_state(ioctx
, SRPT_STATE_DONE
);
1675 WARN_ON(state
!= SRPT_STATE_CMD_RSP_SENT
&&
1676 state
!= SRPT_STATE_MGMT_RSP_SENT
);
1678 atomic_add(1 + ioctx
->n_rdma
, &ch
->sq_wr_avail
);
1680 if (wc
->status
!= IB_WC_SUCCESS
)
1681 pr_info("sending response for ioctx 0x%p failed"
1682 " with status %d\n", ioctx
, wc
->status
);
1684 if (state
!= SRPT_STATE_DONE
) {
1685 transport_generic_free_cmd(&ioctx
->cmd
, 0);
1687 pr_err("IB completion has been received too late for"
1688 " wr_id = %u.\n", ioctx
->ioctx
.index
);
1691 srpt_process_wait_list(ch
);
1695 * srpt_create_ch_ib - create receive and send completion queues
1696 * @ch: SRPT RDMA channel.
1698 static int srpt_create_ch_ib(struct srpt_rdma_ch
*ch
)
1700 struct ib_qp_init_attr
*qp_init
;
1701 struct srpt_port
*sport
= ch
->sport
;
1702 struct srpt_device
*sdev
= sport
->sdev
;
1703 const struct ib_device_attr
*attrs
= &sdev
->device
->attrs
;
1704 int sq_size
= sport
->port_attrib
.srp_sq_size
;
1707 WARN_ON(ch
->rq_size
< 1);
1710 qp_init
= kzalloc(sizeof(*qp_init
), GFP_KERNEL
);
1715 ch
->cq
= ib_alloc_cq(sdev
->device
, ch
, ch
->rq_size
+ sq_size
,
1716 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE
);
1717 if (IS_ERR(ch
->cq
)) {
1718 ret
= PTR_ERR(ch
->cq
);
1719 pr_err("failed to create CQ cqe= %d ret= %d\n",
1720 ch
->rq_size
+ sq_size
, ret
);
1724 qp_init
->qp_context
= (void *)ch
;
1725 qp_init
->event_handler
1726 = (void(*)(struct ib_event
*, void*))srpt_qp_event
;
1727 qp_init
->send_cq
= ch
->cq
;
1728 qp_init
->recv_cq
= ch
->cq
;
1729 qp_init
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
1730 qp_init
->qp_type
= IB_QPT_RC
;
1732 * We divide up our send queue size into half SEND WRs to send the
1733 * completions, and half R/W contexts to actually do the RDMA
1734 * READ/WRITE transfers. Note that we need to allocate CQ slots for
1735 * both both, as RDMA contexts will also post completions for the
1738 qp_init
->cap
.max_send_wr
= min(sq_size
/ 2, attrs
->max_qp_wr
);
1739 qp_init
->cap
.max_rdma_ctxs
= sq_size
/ 2;
1740 qp_init
->cap
.max_send_sge
= min(attrs
->max_sge
, SRPT_MAX_SG_PER_WQE
);
1741 qp_init
->port_num
= ch
->sport
->port
;
1742 if (sdev
->use_srq
) {
1743 qp_init
->srq
= sdev
->srq
;
1745 qp_init
->cap
.max_recv_wr
= ch
->rq_size
;
1746 qp_init
->cap
.max_recv_sge
= qp_init
->cap
.max_send_sge
;
1749 ch
->qp
= ib_create_qp(sdev
->pd
, qp_init
);
1750 if (IS_ERR(ch
->qp
)) {
1751 ret
= PTR_ERR(ch
->qp
);
1752 if (ret
== -ENOMEM
) {
1754 if (sq_size
>= MIN_SRPT_SQ_SIZE
) {
1755 ib_destroy_cq(ch
->cq
);
1759 pr_err("failed to create_qp ret= %d\n", ret
);
1760 goto err_destroy_cq
;
1763 atomic_set(&ch
->sq_wr_avail
, qp_init
->cap
.max_send_wr
);
1765 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
1766 __func__
, ch
->cq
->cqe
, qp_init
->cap
.max_send_sge
,
1767 qp_init
->cap
.max_send_wr
, ch
);
1769 ret
= srpt_init_ch_qp(ch
, ch
->qp
);
1771 goto err_destroy_qp
;
1774 for (i
= 0; i
< ch
->rq_size
; i
++)
1775 srpt_post_recv(sdev
, ch
, ch
->ioctx_recv_ring
[i
]);
1782 ib_destroy_qp(ch
->qp
);
1788 static void srpt_destroy_ch_ib(struct srpt_rdma_ch
*ch
)
1790 ib_destroy_qp(ch
->qp
);
1795 * srpt_close_ch - close a RDMA channel
1796 * @ch: SRPT RDMA channel.
1798 * Make sure all resources associated with the channel will be deallocated at
1799 * an appropriate time.
1801 * Returns true if and only if the channel state has been modified into
1804 static bool srpt_close_ch(struct srpt_rdma_ch
*ch
)
1808 if (!srpt_set_ch_state(ch
, CH_DRAINING
)) {
1809 pr_debug("%s-%d: already closed\n", ch
->sess_name
,
1814 kref_get(&ch
->kref
);
1816 ret
= srpt_ch_qp_err(ch
);
1818 pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1819 ch
->sess_name
, ch
->qp
->qp_num
, ret
);
1821 ret
= srpt_zerolength_write(ch
);
1823 pr_err("%s-%d: queuing zero-length write failed: %d\n",
1824 ch
->sess_name
, ch
->qp
->qp_num
, ret
);
1825 if (srpt_set_ch_state(ch
, CH_DISCONNECTED
))
1826 schedule_work(&ch
->release_work
);
1831 kref_put(&ch
->kref
, srpt_free_ch
);
1837 * Change the channel state into CH_DISCONNECTING. If a channel has not yet
1838 * reached the connected state, close it. If a channel is in the connected
1839 * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
1840 * the responsibility of the caller to ensure that this function is not
1841 * invoked concurrently with the code that accepts a connection. This means
1842 * that this function must either be invoked from inside a CM callback
1843 * function or that it must be invoked with the srpt_port.mutex held.
1845 static int srpt_disconnect_ch(struct srpt_rdma_ch
*ch
)
1849 if (!srpt_set_ch_state(ch
, CH_DISCONNECTING
))
1852 ret
= ib_send_cm_dreq(ch
->ib_cm
.cm_id
, NULL
, 0);
1854 ret
= ib_send_cm_drep(ch
->ib_cm
.cm_id
, NULL
, 0);
1856 if (ret
< 0 && srpt_close_ch(ch
))
1862 static bool srpt_ch_closed(struct srpt_port
*sport
, struct srpt_rdma_ch
*ch
)
1864 struct srpt_nexus
*nexus
;
1865 struct srpt_rdma_ch
*ch2
;
1869 list_for_each_entry(nexus
, &sport
->nexus_list
, entry
) {
1870 list_for_each_entry(ch2
, &nexus
->ch_list
, list
) {
1883 /* Send DREQ and wait for DREP. */
1884 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch
*ch
)
1886 struct srpt_port
*sport
= ch
->sport
;
1888 pr_debug("ch %s-%d state %d\n", ch
->sess_name
, ch
->qp
->qp_num
,
1891 mutex_lock(&sport
->mutex
);
1892 srpt_disconnect_ch(ch
);
1893 mutex_unlock(&sport
->mutex
);
1895 while (wait_event_timeout(sport
->ch_releaseQ
, srpt_ch_closed(sport
, ch
),
1897 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__
,
1898 ch
->sess_name
, ch
->qp
->qp_num
, ch
->state
);
1902 static void __srpt_close_all_ch(struct srpt_port
*sport
)
1904 struct srpt_nexus
*nexus
;
1905 struct srpt_rdma_ch
*ch
;
1907 lockdep_assert_held(&sport
->mutex
);
1909 list_for_each_entry(nexus
, &sport
->nexus_list
, entry
) {
1910 list_for_each_entry(ch
, &nexus
->ch_list
, list
) {
1911 if (srpt_disconnect_ch(ch
) >= 0)
1912 pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
1913 ch
->sess_name
, ch
->qp
->qp_num
,
1914 sport
->sdev
->device
->name
, sport
->port
);
1921 * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
1922 * it does not yet exist.
1924 static struct srpt_nexus
*srpt_get_nexus(struct srpt_port
*sport
,
1925 const u8 i_port_id
[16],
1926 const u8 t_port_id
[16])
1928 struct srpt_nexus
*nexus
= NULL
, *tmp_nexus
= NULL
, *n
;
1931 mutex_lock(&sport
->mutex
);
1932 list_for_each_entry(n
, &sport
->nexus_list
, entry
) {
1933 if (memcmp(n
->i_port_id
, i_port_id
, 16) == 0 &&
1934 memcmp(n
->t_port_id
, t_port_id
, 16) == 0) {
1939 if (!nexus
&& tmp_nexus
) {
1940 list_add_tail_rcu(&tmp_nexus
->entry
,
1941 &sport
->nexus_list
);
1942 swap(nexus
, tmp_nexus
);
1944 mutex_unlock(&sport
->mutex
);
1948 tmp_nexus
= kzalloc(sizeof(*nexus
), GFP_KERNEL
);
1950 nexus
= ERR_PTR(-ENOMEM
);
1953 INIT_LIST_HEAD(&tmp_nexus
->ch_list
);
1954 memcpy(tmp_nexus
->i_port_id
, i_port_id
, 16);
1955 memcpy(tmp_nexus
->t_port_id
, t_port_id
, 16);
1963 static void srpt_set_enabled(struct srpt_port
*sport
, bool enabled
)
1964 __must_hold(&sport
->mutex
)
1966 lockdep_assert_held(&sport
->mutex
);
1968 if (sport
->enabled
== enabled
)
1970 sport
->enabled
= enabled
;
1972 __srpt_close_all_ch(sport
);
1975 static void srpt_free_ch(struct kref
*kref
)
1977 struct srpt_rdma_ch
*ch
= container_of(kref
, struct srpt_rdma_ch
, kref
);
1982 static void srpt_release_channel_work(struct work_struct
*w
)
1984 struct srpt_rdma_ch
*ch
;
1985 struct srpt_device
*sdev
;
1986 struct srpt_port
*sport
;
1987 struct se_session
*se_sess
;
1989 ch
= container_of(w
, struct srpt_rdma_ch
, release_work
);
1990 pr_debug("%s-%d\n", ch
->sess_name
, ch
->qp
->qp_num
);
1992 sdev
= ch
->sport
->sdev
;
1998 target_sess_cmd_list_set_waiting(se_sess
);
1999 target_wait_for_sess_cmds(se_sess
);
2001 transport_deregister_session_configfs(se_sess
);
2002 transport_deregister_session(se_sess
);
2005 ib_destroy_cm_id(ch
->ib_cm
.cm_id
);
2007 srpt_destroy_ch_ib(ch
);
2009 srpt_free_ioctx_ring((struct srpt_ioctx
**)ch
->ioctx_ring
,
2010 ch
->sport
->sdev
, ch
->rq_size
,
2011 ch
->max_rsp_size
, DMA_TO_DEVICE
);
2013 srpt_free_ioctx_ring((struct srpt_ioctx
**)ch
->ioctx_recv_ring
,
2015 srp_max_req_size
, DMA_FROM_DEVICE
);
2018 mutex_lock(&sport
->mutex
);
2019 list_del_rcu(&ch
->list
);
2020 mutex_unlock(&sport
->mutex
);
2022 wake_up(&sport
->ch_releaseQ
);
2024 kref_put(&ch
->kref
, srpt_free_ch
);
2028 * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
2029 * @cm_id: IB/CM connection identifier.
2030 * @port_num: Port through which the IB/CM REQ message was received.
2031 * @pkey: P_Key of the incoming connection.
2032 * @req: SRP login request.
2033 * @src_addr: GID of the port that submitted the login request.
2035 * Ownership of the cm_id is transferred to the target session if this
2036 * functions returns zero. Otherwise the caller remains the owner of cm_id.
2038 static int srpt_cm_req_recv(struct ib_cm_id
*cm_id
,
2039 u8 port_num
, __be16 pkey
,
2040 const struct srp_login_req
*req
,
2041 const char *src_addr
)
2043 struct srpt_device
*sdev
= cm_id
->context
;
2044 struct srpt_port
*sport
= &sdev
->port
[port_num
- 1];
2045 struct srpt_nexus
*nexus
;
2046 struct srp_login_rsp
*rsp
= NULL
;
2047 struct srp_login_rej
*rej
= NULL
;
2048 struct ib_cm_rep_param
*rep_param
= NULL
;
2049 struct srpt_rdma_ch
*ch
;
2054 WARN_ON_ONCE(irqs_disabled());
2056 if (WARN_ON(!sdev
|| !req
))
2059 it_iu_len
= be32_to_cpu(req
->req_it_iu_len
);
2061 pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
2062 req
->initiator_port_id
, req
->target_port_id
, it_iu_len
,
2063 port_num
, &sport
->gid
, be16_to_cpu(pkey
));
2065 nexus
= srpt_get_nexus(sport
, req
->initiator_port_id
,
2066 req
->target_port_id
);
2067 if (IS_ERR(nexus
)) {
2068 ret
= PTR_ERR(nexus
);
2073 rsp
= kzalloc(sizeof(*rsp
), GFP_KERNEL
);
2074 rej
= kzalloc(sizeof(*rej
), GFP_KERNEL
);
2075 rep_param
= kzalloc(sizeof(*rep_param
), GFP_KERNEL
);
2076 if (!rsp
|| !rej
|| !rep_param
)
2080 if (it_iu_len
> srp_max_req_size
|| it_iu_len
< 64) {
2081 rej
->reason
= cpu_to_be32(
2082 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
);
2083 pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
2084 it_iu_len
, 64, srp_max_req_size
);
2088 if (!sport
->enabled
) {
2089 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2090 pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2091 sport
->sdev
->device
->name
, port_num
);
2095 if (*(__be64
*)req
->target_port_id
!= cpu_to_be64(srpt_service_guid
)
2096 || *(__be64
*)(req
->target_port_id
+ 8) !=
2097 cpu_to_be64(srpt_service_guid
)) {
2098 rej
->reason
= cpu_to_be32(
2099 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL
);
2100 pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
2105 ch
= kzalloc(sizeof(*ch
), GFP_KERNEL
);
2107 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2108 pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
2112 kref_init(&ch
->kref
);
2113 ch
->pkey
= be16_to_cpu(pkey
);
2115 ch
->zw_cqe
.done
= srpt_zerolength_write_done
;
2116 INIT_WORK(&ch
->release_work
, srpt_release_channel_work
);
2118 ch
->ib_cm
.cm_id
= cm_id
;
2119 cm_id
->context
= ch
;
2121 * ch->rq_size should be at least as large as the initiator queue
2122 * depth to avoid that the initiator driver has to report QUEUE_FULL
2123 * to the SCSI mid-layer.
2125 ch
->rq_size
= min(MAX_SRPT_RQ_SIZE
, sdev
->device
->attrs
.max_qp_wr
);
2126 spin_lock_init(&ch
->spinlock
);
2127 ch
->state
= CH_CONNECTING
;
2128 INIT_LIST_HEAD(&ch
->cmd_wait_list
);
2129 ch
->max_rsp_size
= ch
->sport
->port_attrib
.srp_max_rsp_size
;
2131 ch
->ioctx_ring
= (struct srpt_send_ioctx
**)
2132 srpt_alloc_ioctx_ring(ch
->sport
->sdev
, ch
->rq_size
,
2133 sizeof(*ch
->ioctx_ring
[0]),
2134 ch
->max_rsp_size
, DMA_TO_DEVICE
);
2135 if (!ch
->ioctx_ring
) {
2136 pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
2137 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2141 INIT_LIST_HEAD(&ch
->free_list
);
2142 for (i
= 0; i
< ch
->rq_size
; i
++) {
2143 ch
->ioctx_ring
[i
]->ch
= ch
;
2144 list_add_tail(&ch
->ioctx_ring
[i
]->free_list
, &ch
->free_list
);
2146 if (!sdev
->use_srq
) {
2147 ch
->ioctx_recv_ring
= (struct srpt_recv_ioctx
**)
2148 srpt_alloc_ioctx_ring(ch
->sport
->sdev
, ch
->rq_size
,
2149 sizeof(*ch
->ioctx_recv_ring
[0]),
2152 if (!ch
->ioctx_recv_ring
) {
2153 pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
2155 cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2158 for (i
= 0; i
< ch
->rq_size
; i
++)
2159 INIT_LIST_HEAD(&ch
->ioctx_recv_ring
[i
]->wait_list
);
2162 ret
= srpt_create_ch_ib(ch
);
2164 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2165 pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
2166 goto free_recv_ring
;
2169 strlcpy(ch
->sess_name
, src_addr
, sizeof(ch
->sess_name
));
2170 snprintf(i_port_id
, sizeof(i_port_id
), "0x%016llx%016llx",
2171 be64_to_cpu(*(__be64
*)nexus
->i_port_id
),
2172 be64_to_cpu(*(__be64
*)(nexus
->i_port_id
+ 8)));
2174 pr_debug("registering session %s\n", ch
->sess_name
);
2176 if (sport
->port_guid_tpg
.se_tpg_wwn
)
2177 ch
->sess
= target_alloc_session(&sport
->port_guid_tpg
, 0, 0,
2179 ch
->sess_name
, ch
, NULL
);
2180 if (sport
->port_gid_tpg
.se_tpg_wwn
&& IS_ERR_OR_NULL(ch
->sess
))
2181 ch
->sess
= target_alloc_session(&sport
->port_gid_tpg
, 0, 0,
2182 TARGET_PROT_NORMAL
, i_port_id
, ch
,
2184 /* Retry without leading "0x" */
2185 if (sport
->port_gid_tpg
.se_tpg_wwn
&& IS_ERR_OR_NULL(ch
->sess
))
2186 ch
->sess
= target_alloc_session(&sport
->port_gid_tpg
, 0, 0,
2188 i_port_id
+ 2, ch
, NULL
);
2189 if (IS_ERR_OR_NULL(ch
->sess
)) {
2190 ret
= PTR_ERR(ch
->sess
);
2191 pr_info("Rejected login for initiator %s: ret = %d.\n",
2192 ch
->sess_name
, ret
);
2193 rej
->reason
= cpu_to_be32(ret
== -ENOMEM
?
2194 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
:
2195 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED
);
2199 mutex_lock(&sport
->mutex
);
2201 if ((req
->req_flags
& SRP_MTCH_ACTION
) == SRP_MULTICHAN_SINGLE
) {
2202 struct srpt_rdma_ch
*ch2
;
2204 rsp
->rsp_flags
= SRP_LOGIN_RSP_MULTICHAN_NO_CHAN
;
2206 list_for_each_entry(ch2
, &nexus
->ch_list
, list
) {
2207 if (srpt_disconnect_ch(ch2
) < 0)
2209 pr_info("Relogin - closed existing channel %s\n",
2211 rsp
->rsp_flags
= SRP_LOGIN_RSP_MULTICHAN_TERMINATED
;
2214 rsp
->rsp_flags
= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED
;
2217 list_add_tail_rcu(&ch
->list
, &nexus
->ch_list
);
2219 if (!sport
->enabled
) {
2220 rej
->reason
= cpu_to_be32(
2221 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2222 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2223 sdev
->device
->name
, port_num
);
2224 mutex_unlock(&sport
->mutex
);
2228 mutex_unlock(&sport
->mutex
);
2230 ret
= srpt_ch_qp_rtr(ch
, ch
->qp
);
2232 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2233 pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
2238 pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch
->sess
,
2241 /* create srp_login_response */
2242 rsp
->opcode
= SRP_LOGIN_RSP
;
2243 rsp
->tag
= req
->tag
;
2244 rsp
->max_it_iu_len
= req
->req_it_iu_len
;
2245 rsp
->max_ti_iu_len
= req
->req_it_iu_len
;
2246 ch
->max_ti_iu_len
= it_iu_len
;
2247 rsp
->buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
2248 SRP_BUF_FORMAT_INDIRECT
);
2249 rsp
->req_lim_delta
= cpu_to_be32(ch
->rq_size
);
2250 atomic_set(&ch
->req_lim
, ch
->rq_size
);
2251 atomic_set(&ch
->req_lim_delta
, 0);
2253 /* create cm reply */
2254 rep_param
->qp_num
= ch
->qp
->qp_num
;
2255 rep_param
->private_data
= (void *)rsp
;
2256 rep_param
->private_data_len
= sizeof(*rsp
);
2257 rep_param
->rnr_retry_count
= 7;
2258 rep_param
->flow_control
= 1;
2259 rep_param
->failover_accepted
= 0;
2261 rep_param
->responder_resources
= 4;
2262 rep_param
->initiator_depth
= 4;
2265 * Hold the sport mutex while accepting a connection to avoid that
2266 * srpt_disconnect_ch() is invoked concurrently with this code.
2268 mutex_lock(&sport
->mutex
);
2269 if (sport
->enabled
&& ch
->state
== CH_CONNECTING
)
2270 ret
= ib_send_cm_rep(cm_id
, rep_param
);
2273 mutex_unlock(&sport
->mutex
);
2281 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2282 pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
2290 srpt_destroy_ch_ib(ch
);
2293 srpt_free_ioctx_ring((struct srpt_ioctx
**)ch
->ioctx_recv_ring
,
2294 ch
->sport
->sdev
, ch
->rq_size
,
2295 srp_max_req_size
, DMA_FROM_DEVICE
);
2298 srpt_free_ioctx_ring((struct srpt_ioctx
**)ch
->ioctx_ring
,
2299 ch
->sport
->sdev
, ch
->rq_size
,
2300 ch
->max_rsp_size
, DMA_TO_DEVICE
);
2302 cm_id
->context
= NULL
;
2306 WARN_ON_ONCE(ret
== 0);
2309 pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej
->reason
));
2310 rej
->opcode
= SRP_LOGIN_REJ
;
2311 rej
->tag
= req
->tag
;
2312 rej
->buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
2313 SRP_BUF_FORMAT_INDIRECT
);
2315 ib_send_cm_rej(cm_id
, IB_CM_REJ_CONSUMER_DEFINED
, NULL
, 0,
2316 (void *)rej
, sizeof(*rej
));
2326 static int srpt_ib_cm_req_recv(struct ib_cm_id
*cm_id
,
2327 struct ib_cm_req_event_param
*param
,
2332 srpt_format_guid(sguid
, sizeof(sguid
),
2333 ¶m
->primary_path
->dgid
.global
.interface_id
);
2335 return srpt_cm_req_recv(cm_id
, param
->port
, param
->primary_path
->pkey
,
2336 private_data
, sguid
);
2339 static void srpt_cm_rej_recv(struct srpt_rdma_ch
*ch
,
2340 enum ib_cm_rej_reason reason
,
2341 const u8
*private_data
,
2342 u8 private_data_len
)
2347 if (private_data_len
&& (priv
= kmalloc(private_data_len
* 3 + 1,
2349 for (i
= 0; i
< private_data_len
; i
++)
2350 sprintf(priv
+ 3 * i
, " %02x", private_data
[i
]);
2352 pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2353 ch
->sess_name
, ch
->qp
->qp_num
, reason
, private_data_len
?
2354 "; private data" : "", priv
? priv
: " (?)");
2359 * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
2360 * @ch: SRPT RDMA channel.
2362 * An IB_CM_RTU_RECEIVED message indicates that the connection is established
2363 * and that the recipient may begin transmitting (RTU = ready to use).
2365 static void srpt_cm_rtu_recv(struct srpt_rdma_ch
*ch
)
2369 ret
= srpt_ch_qp_rts(ch
, ch
->qp
);
2371 pr_err("%s-%d: QP transition to RTS failed\n", ch
->sess_name
,
2378 * Note: calling srpt_close_ch() if the transition to the LIVE state
2379 * fails is not necessary since that means that that function has
2380 * already been invoked from another thread.
2382 if (!srpt_set_ch_state(ch
, CH_LIVE
)) {
2383 pr_err("%s-%d: channel transition to LIVE state failed\n",
2384 ch
->sess_name
, ch
->qp
->qp_num
);
2388 /* Trigger wait list processing. */
2389 ret
= srpt_zerolength_write(ch
);
2390 WARN_ONCE(ret
< 0, "%d\n", ret
);
2394 * srpt_cm_handler - IB connection manager callback function
2395 * @cm_id: IB/CM connection identifier.
2396 * @event: IB/CM event.
2398 * A non-zero return value will cause the caller destroy the CM ID.
2400 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2401 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2402 * a non-zero value in any other case will trigger a race with the
2403 * ib_destroy_cm_id() call in srpt_release_channel().
2405 static int srpt_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
2407 struct srpt_rdma_ch
*ch
= cm_id
->context
;
2411 switch (event
->event
) {
2412 case IB_CM_REQ_RECEIVED
:
2413 ret
= srpt_ib_cm_req_recv(cm_id
, &event
->param
.req_rcvd
,
2414 event
->private_data
);
2416 case IB_CM_REJ_RECEIVED
:
2417 srpt_cm_rej_recv(ch
, event
->param
.rej_rcvd
.reason
,
2418 event
->private_data
,
2419 IB_CM_REJ_PRIVATE_DATA_SIZE
);
2421 case IB_CM_RTU_RECEIVED
:
2422 case IB_CM_USER_ESTABLISHED
:
2423 srpt_cm_rtu_recv(ch
);
2425 case IB_CM_DREQ_RECEIVED
:
2426 srpt_disconnect_ch(ch
);
2428 case IB_CM_DREP_RECEIVED
:
2429 pr_info("Received CM DREP message for ch %s-%d.\n",
2430 ch
->sess_name
, ch
->qp
->qp_num
);
2433 case IB_CM_TIMEWAIT_EXIT
:
2434 pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2435 ch
->sess_name
, ch
->qp
->qp_num
);
2438 case IB_CM_REP_ERROR
:
2439 pr_info("Received CM REP error for ch %s-%d.\n", ch
->sess_name
,
2442 case IB_CM_DREQ_ERROR
:
2443 pr_info("Received CM DREQ ERROR event.\n");
2445 case IB_CM_MRA_RECEIVED
:
2446 pr_info("Received CM MRA event\n");
2449 pr_err("received unrecognized CM event %d\n", event
->event
);
2456 static int srpt_write_pending_status(struct se_cmd
*se_cmd
)
2458 struct srpt_send_ioctx
*ioctx
;
2460 ioctx
= container_of(se_cmd
, struct srpt_send_ioctx
, cmd
);
2461 return ioctx
->state
== SRPT_STATE_NEED_DATA
;
2465 * srpt_write_pending - Start data transfer from initiator to target (write).
2467 static int srpt_write_pending(struct se_cmd
*se_cmd
)
2469 struct srpt_send_ioctx
*ioctx
=
2470 container_of(se_cmd
, struct srpt_send_ioctx
, cmd
);
2471 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
2472 struct ib_send_wr
*first_wr
= NULL
, *bad_wr
;
2473 struct ib_cqe
*cqe
= &ioctx
->rdma_cqe
;
2474 enum srpt_command_state new_state
;
2477 new_state
= srpt_set_cmd_state(ioctx
, SRPT_STATE_NEED_DATA
);
2478 WARN_ON(new_state
== SRPT_STATE_DONE
);
2480 if (atomic_sub_return(ioctx
->n_rdma
, &ch
->sq_wr_avail
) < 0) {
2481 pr_warn("%s: IB send queue full (needed %d)\n",
2482 __func__
, ioctx
->n_rdma
);
2487 cqe
->done
= srpt_rdma_read_done
;
2488 for (i
= ioctx
->n_rw_ctx
- 1; i
>= 0; i
--) {
2489 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
2491 first_wr
= rdma_rw_ctx_wrs(&ctx
->rw
, ch
->qp
, ch
->sport
->port
,
2496 ret
= ib_post_send(ch
->qp
, first_wr
, &bad_wr
);
2498 pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
2499 __func__
, ret
, ioctx
->n_rdma
,
2500 atomic_read(&ch
->sq_wr_avail
));
2506 atomic_add(ioctx
->n_rdma
, &ch
->sq_wr_avail
);
2510 static u8
tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status
)
2512 switch (tcm_mgmt_status
) {
2513 case TMR_FUNCTION_COMPLETE
:
2514 return SRP_TSK_MGMT_SUCCESS
;
2515 case TMR_FUNCTION_REJECTED
:
2516 return SRP_TSK_MGMT_FUNC_NOT_SUPP
;
2518 return SRP_TSK_MGMT_FAILED
;
2522 * srpt_queue_response - transmit the response to a SCSI command
2523 * @cmd: SCSI target command.
2525 * Callback function called by the TCM core. Must not block since it can be
2526 * invoked on the context of the IB completion handler.
2528 static void srpt_queue_response(struct se_cmd
*cmd
)
2530 struct srpt_send_ioctx
*ioctx
=
2531 container_of(cmd
, struct srpt_send_ioctx
, cmd
);
2532 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
2533 struct srpt_device
*sdev
= ch
->sport
->sdev
;
2534 struct ib_send_wr send_wr
, *first_wr
= &send_wr
, *bad_wr
;
2536 enum srpt_command_state state
;
2537 int resp_len
, ret
, i
;
2542 state
= ioctx
->state
;
2544 case SRPT_STATE_NEW
:
2545 case SRPT_STATE_DATA_IN
:
2546 ioctx
->state
= SRPT_STATE_CMD_RSP_SENT
;
2548 case SRPT_STATE_MGMT
:
2549 ioctx
->state
= SRPT_STATE_MGMT_RSP_SENT
;
2552 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2553 ch
, ioctx
->ioctx
.index
, ioctx
->state
);
2557 if (unlikely(WARN_ON_ONCE(state
== SRPT_STATE_CMD_RSP_SENT
)))
2560 /* For read commands, transfer the data to the initiator. */
2561 if (ioctx
->cmd
.data_direction
== DMA_FROM_DEVICE
&&
2562 ioctx
->cmd
.data_length
&&
2563 !ioctx
->queue_status_only
) {
2564 for (i
= ioctx
->n_rw_ctx
- 1; i
>= 0; i
--) {
2565 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
2567 first_wr
= rdma_rw_ctx_wrs(&ctx
->rw
, ch
->qp
,
2568 ch
->sport
->port
, NULL
, first_wr
);
2572 if (state
!= SRPT_STATE_MGMT
)
2573 resp_len
= srpt_build_cmd_rsp(ch
, ioctx
, ioctx
->cmd
.tag
,
2577 = tcm_to_srp_tsk_mgmt_status(cmd
->se_tmr_req
->response
);
2578 resp_len
= srpt_build_tskmgmt_rsp(ch
, ioctx
, srp_tm_status
,
2582 atomic_inc(&ch
->req_lim
);
2584 if (unlikely(atomic_sub_return(1 + ioctx
->n_rdma
,
2585 &ch
->sq_wr_avail
) < 0)) {
2586 pr_warn("%s: IB send queue full (needed %d)\n",
2587 __func__
, ioctx
->n_rdma
);
2592 ib_dma_sync_single_for_device(sdev
->device
, ioctx
->ioctx
.dma
, resp_len
,
2595 sge
.addr
= ioctx
->ioctx
.dma
;
2596 sge
.length
= resp_len
;
2597 sge
.lkey
= sdev
->lkey
;
2599 ioctx
->ioctx
.cqe
.done
= srpt_send_done
;
2600 send_wr
.next
= NULL
;
2601 send_wr
.wr_cqe
= &ioctx
->ioctx
.cqe
;
2602 send_wr
.sg_list
= &sge
;
2603 send_wr
.num_sge
= 1;
2604 send_wr
.opcode
= IB_WR_SEND
;
2605 send_wr
.send_flags
= IB_SEND_SIGNALED
;
2607 ret
= ib_post_send(ch
->qp
, first_wr
, &bad_wr
);
2609 pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
2610 __func__
, ioctx
->cmd
.tag
, ret
);
2617 atomic_add(1 + ioctx
->n_rdma
, &ch
->sq_wr_avail
);
2618 atomic_dec(&ch
->req_lim
);
2619 srpt_set_cmd_state(ioctx
, SRPT_STATE_DONE
);
2620 target_put_sess_cmd(&ioctx
->cmd
);
2623 static int srpt_queue_data_in(struct se_cmd
*cmd
)
2625 srpt_queue_response(cmd
);
2629 static void srpt_queue_tm_rsp(struct se_cmd
*cmd
)
2631 srpt_queue_response(cmd
);
2634 static void srpt_aborted_task(struct se_cmd
*cmd
)
2638 static int srpt_queue_status(struct se_cmd
*cmd
)
2640 struct srpt_send_ioctx
*ioctx
;
2642 ioctx
= container_of(cmd
, struct srpt_send_ioctx
, cmd
);
2643 BUG_ON(ioctx
->sense_data
!= cmd
->sense_buffer
);
2644 if (cmd
->se_cmd_flags
&
2645 (SCF_TRANSPORT_TASK_SENSE
| SCF_EMULATED_TASK_SENSE
))
2646 WARN_ON(cmd
->scsi_status
!= SAM_STAT_CHECK_CONDITION
);
2647 ioctx
->queue_status_only
= true;
2648 srpt_queue_response(cmd
);
2652 static void srpt_refresh_port_work(struct work_struct
*work
)
2654 struct srpt_port
*sport
= container_of(work
, struct srpt_port
, work
);
2656 srpt_refresh_port(sport
);
2659 static bool srpt_ch_list_empty(struct srpt_port
*sport
)
2661 struct srpt_nexus
*nexus
;
2665 list_for_each_entry(nexus
, &sport
->nexus_list
, entry
)
2666 if (!list_empty(&nexus
->ch_list
))
2674 * srpt_release_sport - disable login and wait for associated channels
2675 * @sport: SRPT HCA port.
2677 static int srpt_release_sport(struct srpt_port
*sport
)
2679 struct srpt_nexus
*nexus
, *next_n
;
2680 struct srpt_rdma_ch
*ch
;
2682 WARN_ON_ONCE(irqs_disabled());
2684 mutex_lock(&sport
->mutex
);
2685 srpt_set_enabled(sport
, false);
2686 mutex_unlock(&sport
->mutex
);
2688 while (wait_event_timeout(sport
->ch_releaseQ
,
2689 srpt_ch_list_empty(sport
), 5 * HZ
) <= 0) {
2690 pr_info("%s_%d: waiting for session unregistration ...\n",
2691 sport
->sdev
->device
->name
, sport
->port
);
2693 list_for_each_entry(nexus
, &sport
->nexus_list
, entry
) {
2694 list_for_each_entry(ch
, &nexus
->ch_list
, list
) {
2695 pr_info("%s-%d: state %s\n",
2696 ch
->sess_name
, ch
->qp
->qp_num
,
2697 get_ch_state_name(ch
->state
));
2703 mutex_lock(&sport
->mutex
);
2704 list_for_each_entry_safe(nexus
, next_n
, &sport
->nexus_list
, entry
) {
2705 list_del(&nexus
->entry
);
2706 kfree_rcu(nexus
, rcu
);
2708 mutex_unlock(&sport
->mutex
);
2713 static struct se_wwn
*__srpt_lookup_wwn(const char *name
)
2715 struct ib_device
*dev
;
2716 struct srpt_device
*sdev
;
2717 struct srpt_port
*sport
;
2720 list_for_each_entry(sdev
, &srpt_dev_list
, list
) {
2725 for (i
= 0; i
< dev
->phys_port_cnt
; i
++) {
2726 sport
= &sdev
->port
[i
];
2728 if (strcmp(sport
->port_guid
, name
) == 0)
2729 return &sport
->port_guid_wwn
;
2730 if (strcmp(sport
->port_gid
, name
) == 0)
2731 return &sport
->port_gid_wwn
;
2738 static struct se_wwn
*srpt_lookup_wwn(const char *name
)
2742 spin_lock(&srpt_dev_lock
);
2743 wwn
= __srpt_lookup_wwn(name
);
2744 spin_unlock(&srpt_dev_lock
);
2749 static void srpt_free_srq(struct srpt_device
*sdev
)
2754 ib_destroy_srq(sdev
->srq
);
2755 srpt_free_ioctx_ring((struct srpt_ioctx
**)sdev
->ioctx_ring
, sdev
,
2756 sdev
->srq_size
, srp_max_req_size
, DMA_FROM_DEVICE
);
2760 static int srpt_alloc_srq(struct srpt_device
*sdev
)
2762 struct ib_srq_init_attr srq_attr
= {
2763 .event_handler
= srpt_srq_event
,
2764 .srq_context
= (void *)sdev
,
2765 .attr
.max_wr
= sdev
->srq_size
,
2767 .srq_type
= IB_SRQT_BASIC
,
2769 struct ib_device
*device
= sdev
->device
;
2773 WARN_ON_ONCE(sdev
->srq
);
2774 srq
= ib_create_srq(sdev
->pd
, &srq_attr
);
2776 pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq
));
2777 return PTR_ERR(srq
);
2780 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev
->srq_size
,
2781 sdev
->device
->attrs
.max_srq_wr
, device
->name
);
2783 sdev
->ioctx_ring
= (struct srpt_recv_ioctx
**)
2784 srpt_alloc_ioctx_ring(sdev
, sdev
->srq_size
,
2785 sizeof(*sdev
->ioctx_ring
[0]),
2786 srp_max_req_size
, DMA_FROM_DEVICE
);
2787 if (!sdev
->ioctx_ring
) {
2788 ib_destroy_srq(srq
);
2792 sdev
->use_srq
= true;
2795 for (i
= 0; i
< sdev
->srq_size
; ++i
) {
2796 INIT_LIST_HEAD(&sdev
->ioctx_ring
[i
]->wait_list
);
2797 srpt_post_recv(sdev
, NULL
, sdev
->ioctx_ring
[i
]);
2803 static int srpt_use_srq(struct srpt_device
*sdev
, bool use_srq
)
2805 struct ib_device
*device
= sdev
->device
;
2809 srpt_free_srq(sdev
);
2810 sdev
->use_srq
= false;
2811 } else if (use_srq
&& !sdev
->srq
) {
2812 ret
= srpt_alloc_srq(sdev
);
2814 pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__
, device
->name
,
2815 sdev
->use_srq
, ret
);
2820 * srpt_add_one - InfiniBand device addition callback function
2821 * @device: Describes a HCA.
2823 static void srpt_add_one(struct ib_device
*device
)
2825 struct srpt_device
*sdev
;
2826 struct srpt_port
*sport
;
2829 pr_debug("device = %p\n", device
);
2831 sdev
= kzalloc(sizeof(*sdev
), GFP_KERNEL
);
2835 sdev
->device
= device
;
2836 mutex_init(&sdev
->sdev_mutex
);
2838 sdev
->pd
= ib_alloc_pd(device
, 0);
2839 if (IS_ERR(sdev
->pd
))
2842 sdev
->lkey
= sdev
->pd
->local_dma_lkey
;
2844 sdev
->srq_size
= min(srpt_srq_size
, sdev
->device
->attrs
.max_srq_wr
);
2846 srpt_use_srq(sdev
, sdev
->port
[0].port_attrib
.use_srq
);
2848 if (!srpt_service_guid
)
2849 srpt_service_guid
= be64_to_cpu(device
->node_guid
);
2851 sdev
->cm_id
= ib_create_cm_id(device
, srpt_cm_handler
, sdev
);
2852 if (IS_ERR(sdev
->cm_id
))
2855 /* print out target login information */
2856 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
2857 "pkey=ffff,service_id=%016llx\n", srpt_service_guid
,
2858 srpt_service_guid
, srpt_service_guid
);
2861 * We do not have a consistent service_id (ie. also id_ext of target_id)
2862 * to identify this target. We currently use the guid of the first HCA
2863 * in the system as service_id; therefore, the target_id will change
2864 * if this HCA is gone bad and replaced by different HCA
2866 if (ib_cm_listen(sdev
->cm_id
, cpu_to_be64(srpt_service_guid
), 0))
2869 INIT_IB_EVENT_HANDLER(&sdev
->event_handler
, sdev
->device
,
2870 srpt_event_handler
);
2871 ib_register_event_handler(&sdev
->event_handler
);
2873 WARN_ON(sdev
->device
->phys_port_cnt
> ARRAY_SIZE(sdev
->port
));
2875 for (i
= 1; i
<= sdev
->device
->phys_port_cnt
; i
++) {
2876 sport
= &sdev
->port
[i
- 1];
2877 INIT_LIST_HEAD(&sport
->nexus_list
);
2878 init_waitqueue_head(&sport
->ch_releaseQ
);
2879 mutex_init(&sport
->mutex
);
2882 sport
->port_attrib
.srp_max_rdma_size
= DEFAULT_MAX_RDMA_SIZE
;
2883 sport
->port_attrib
.srp_max_rsp_size
= DEFAULT_MAX_RSP_SIZE
;
2884 sport
->port_attrib
.srp_sq_size
= DEF_SRPT_SQ_SIZE
;
2885 sport
->port_attrib
.use_srq
= false;
2886 INIT_WORK(&sport
->work
, srpt_refresh_port_work
);
2888 if (srpt_refresh_port(sport
)) {
2889 pr_err("MAD registration failed for %s-%d.\n",
2890 sdev
->device
->name
, i
);
2895 spin_lock(&srpt_dev_lock
);
2896 list_add_tail(&sdev
->list
, &srpt_dev_list
);
2897 spin_unlock(&srpt_dev_lock
);
2900 ib_set_client_data(device
, &srpt_client
, sdev
);
2901 pr_debug("added %s.\n", device
->name
);
2905 ib_unregister_event_handler(&sdev
->event_handler
);
2907 ib_destroy_cm_id(sdev
->cm_id
);
2909 srpt_free_srq(sdev
);
2910 ib_dealloc_pd(sdev
->pd
);
2915 pr_info("%s(%s) failed.\n", __func__
, device
->name
);
2920 * srpt_remove_one - InfiniBand device removal callback function
2921 * @device: Describes a HCA.
2922 * @client_data: The value passed as the third argument to ib_set_client_data().
2924 static void srpt_remove_one(struct ib_device
*device
, void *client_data
)
2926 struct srpt_device
*sdev
= client_data
;
2930 pr_info("%s(%s): nothing to do.\n", __func__
, device
->name
);
2934 srpt_unregister_mad_agent(sdev
);
2936 ib_unregister_event_handler(&sdev
->event_handler
);
2938 /* Cancel any work queued by the just unregistered IB event handler. */
2939 for (i
= 0; i
< sdev
->device
->phys_port_cnt
; i
++)
2940 cancel_work_sync(&sdev
->port
[i
].work
);
2942 ib_destroy_cm_id(sdev
->cm_id
);
2945 * Unregistering a target must happen after destroying sdev->cm_id
2946 * such that no new SRP_LOGIN_REQ information units can arrive while
2947 * destroying the target.
2949 spin_lock(&srpt_dev_lock
);
2950 list_del(&sdev
->list
);
2951 spin_unlock(&srpt_dev_lock
);
2953 for (i
= 0; i
< sdev
->device
->phys_port_cnt
; i
++)
2954 srpt_release_sport(&sdev
->port
[i
]);
2956 srpt_free_srq(sdev
);
2958 ib_dealloc_pd(sdev
->pd
);
2963 static struct ib_client srpt_client
= {
2965 .add
= srpt_add_one
,
2966 .remove
= srpt_remove_one
2969 static int srpt_check_true(struct se_portal_group
*se_tpg
)
2974 static int srpt_check_false(struct se_portal_group
*se_tpg
)
2979 static char *srpt_get_fabric_name(void)
2984 static struct srpt_port
*srpt_tpg_to_sport(struct se_portal_group
*tpg
)
2986 return tpg
->se_tpg_wwn
->priv
;
2989 static char *srpt_get_fabric_wwn(struct se_portal_group
*tpg
)
2991 struct srpt_port
*sport
= srpt_tpg_to_sport(tpg
);
2993 WARN_ON_ONCE(tpg
!= &sport
->port_guid_tpg
&&
2994 tpg
!= &sport
->port_gid_tpg
);
2995 return tpg
== &sport
->port_guid_tpg
? sport
->port_guid
:
2999 static u16
srpt_get_tag(struct se_portal_group
*tpg
)
3004 static u32
srpt_tpg_get_inst_index(struct se_portal_group
*se_tpg
)
3009 static void srpt_release_cmd(struct se_cmd
*se_cmd
)
3011 struct srpt_send_ioctx
*ioctx
= container_of(se_cmd
,
3012 struct srpt_send_ioctx
, cmd
);
3013 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
3014 unsigned long flags
;
3016 WARN_ON_ONCE(ioctx
->state
!= SRPT_STATE_DONE
&&
3017 !(ioctx
->cmd
.transport_state
& CMD_T_ABORTED
));
3019 if (ioctx
->n_rw_ctx
) {
3020 srpt_free_rw_ctxs(ch
, ioctx
);
3021 ioctx
->n_rw_ctx
= 0;
3024 spin_lock_irqsave(&ch
->spinlock
, flags
);
3025 list_add(&ioctx
->free_list
, &ch
->free_list
);
3026 spin_unlock_irqrestore(&ch
->spinlock
, flags
);
3030 * srpt_close_session - forcibly close a session
3031 * @se_sess: SCSI target session.
3033 * Callback function invoked by the TCM core to clean up sessions associated
3034 * with a node ACL when the user invokes
3035 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3037 static void srpt_close_session(struct se_session
*se_sess
)
3039 struct srpt_rdma_ch
*ch
= se_sess
->fabric_sess_ptr
;
3041 srpt_disconnect_ch_sync(ch
);
3045 * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
3046 * @se_sess: SCSI target session.
3048 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3049 * This object represents an arbitrary integer used to uniquely identify a
3050 * particular attached remote initiator port to a particular SCSI target port
3051 * within a particular SCSI target device within a particular SCSI instance.
3053 static u32
srpt_sess_get_index(struct se_session
*se_sess
)
3058 static void srpt_set_default_node_attrs(struct se_node_acl
*nacl
)
3062 /* Note: only used from inside debug printk's by the TCM core. */
3063 static int srpt_get_tcm_cmd_state(struct se_cmd
*se_cmd
)
3065 struct srpt_send_ioctx
*ioctx
;
3067 ioctx
= container_of(se_cmd
, struct srpt_send_ioctx
, cmd
);
3068 return ioctx
->state
;
3071 static int srpt_parse_guid(u64
*guid
, const char *name
)
3076 if (sscanf(name
, "%hx:%hx:%hx:%hx", &w
[0], &w
[1], &w
[2], &w
[3]) != 4)
3078 *guid
= get_unaligned_be64(w
);
3085 * srpt_parse_i_port_id - parse an initiator port ID
3086 * @name: ASCII representation of a 128-bit initiator port ID.
3087 * @i_port_id: Binary 128-bit port ID.
3089 static int srpt_parse_i_port_id(u8 i_port_id
[16], const char *name
)
3092 unsigned len
, count
, leading_zero_bytes
;
3096 if (strncasecmp(p
, "0x", 2) == 0)
3102 count
= min(len
/ 2, 16U);
3103 leading_zero_bytes
= 16 - count
;
3104 memset(i_port_id
, 0, leading_zero_bytes
);
3105 ret
= hex2bin(i_port_id
+ leading_zero_bytes
, p
, count
);
3107 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret
);
3113 * configfs callback function invoked for
3114 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3116 static int srpt_init_nodeacl(struct se_node_acl
*se_nacl
, const char *name
)
3122 ret
= srpt_parse_guid(&guid
, name
);
3124 ret
= srpt_parse_i_port_id(i_port_id
, name
);
3126 pr_err("invalid initiator port ID %s\n", name
);
3130 static ssize_t
srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item
*item
,
3133 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3134 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3136 return sprintf(page
, "%u\n", sport
->port_attrib
.srp_max_rdma_size
);
3139 static ssize_t
srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item
*item
,
3140 const char *page
, size_t count
)
3142 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3143 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3147 ret
= kstrtoul(page
, 0, &val
);
3149 pr_err("kstrtoul() failed with ret: %d\n", ret
);
3152 if (val
> MAX_SRPT_RDMA_SIZE
) {
3153 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val
,
3154 MAX_SRPT_RDMA_SIZE
);
3157 if (val
< DEFAULT_MAX_RDMA_SIZE
) {
3158 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3159 val
, DEFAULT_MAX_RDMA_SIZE
);
3162 sport
->port_attrib
.srp_max_rdma_size
= val
;
3167 static ssize_t
srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item
*item
,
3170 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3171 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3173 return sprintf(page
, "%u\n", sport
->port_attrib
.srp_max_rsp_size
);
3176 static ssize_t
srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item
*item
,
3177 const char *page
, size_t count
)
3179 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3180 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3184 ret
= kstrtoul(page
, 0, &val
);
3186 pr_err("kstrtoul() failed with ret: %d\n", ret
);
3189 if (val
> MAX_SRPT_RSP_SIZE
) {
3190 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val
,
3194 if (val
< MIN_MAX_RSP_SIZE
) {
3195 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val
,
3199 sport
->port_attrib
.srp_max_rsp_size
= val
;
3204 static ssize_t
srpt_tpg_attrib_srp_sq_size_show(struct config_item
*item
,
3207 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3208 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3210 return sprintf(page
, "%u\n", sport
->port_attrib
.srp_sq_size
);
3213 static ssize_t
srpt_tpg_attrib_srp_sq_size_store(struct config_item
*item
,
3214 const char *page
, size_t count
)
3216 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3217 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3221 ret
= kstrtoul(page
, 0, &val
);
3223 pr_err("kstrtoul() failed with ret: %d\n", ret
);
3226 if (val
> MAX_SRPT_SRQ_SIZE
) {
3227 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val
,
3231 if (val
< MIN_SRPT_SRQ_SIZE
) {
3232 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val
,
3236 sport
->port_attrib
.srp_sq_size
= val
;
3241 static ssize_t
srpt_tpg_attrib_use_srq_show(struct config_item
*item
,
3244 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3245 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3247 return sprintf(page
, "%d\n", sport
->port_attrib
.use_srq
);
3250 static ssize_t
srpt_tpg_attrib_use_srq_store(struct config_item
*item
,
3251 const char *page
, size_t count
)
3253 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3254 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3255 struct srpt_device
*sdev
= sport
->sdev
;
3260 ret
= kstrtoul(page
, 0, &val
);
3266 ret
= mutex_lock_interruptible(&sdev
->sdev_mutex
);
3269 ret
= mutex_lock_interruptible(&sport
->mutex
);
3272 enabled
= sport
->enabled
;
3273 /* Log out all initiator systems before changing 'use_srq'. */
3274 srpt_set_enabled(sport
, false);
3275 sport
->port_attrib
.use_srq
= val
;
3276 srpt_use_srq(sdev
, sport
->port_attrib
.use_srq
);
3277 srpt_set_enabled(sport
, enabled
);
3279 mutex_unlock(&sport
->mutex
);
3281 mutex_unlock(&sdev
->sdev_mutex
);
3286 CONFIGFS_ATTR(srpt_tpg_attrib_
, srp_max_rdma_size
);
3287 CONFIGFS_ATTR(srpt_tpg_attrib_
, srp_max_rsp_size
);
3288 CONFIGFS_ATTR(srpt_tpg_attrib_
, srp_sq_size
);
3289 CONFIGFS_ATTR(srpt_tpg_attrib_
, use_srq
);
3291 static struct configfs_attribute
*srpt_tpg_attrib_attrs
[] = {
3292 &srpt_tpg_attrib_attr_srp_max_rdma_size
,
3293 &srpt_tpg_attrib_attr_srp_max_rsp_size
,
3294 &srpt_tpg_attrib_attr_srp_sq_size
,
3295 &srpt_tpg_attrib_attr_use_srq
,
3299 static ssize_t
srpt_tpg_enable_show(struct config_item
*item
, char *page
)
3301 struct se_portal_group
*se_tpg
= to_tpg(item
);
3302 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3304 return snprintf(page
, PAGE_SIZE
, "%d\n", (sport
->enabled
) ? 1: 0);
3307 static ssize_t
srpt_tpg_enable_store(struct config_item
*item
,
3308 const char *page
, size_t count
)
3310 struct se_portal_group
*se_tpg
= to_tpg(item
);
3311 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3315 ret
= kstrtoul(page
, 0, &tmp
);
3317 pr_err("Unable to extract srpt_tpg_store_enable\n");
3321 if ((tmp
!= 0) && (tmp
!= 1)) {
3322 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp
);
3326 mutex_lock(&sport
->mutex
);
3327 srpt_set_enabled(sport
, tmp
);
3328 mutex_unlock(&sport
->mutex
);
3333 CONFIGFS_ATTR(srpt_tpg_
, enable
);
3335 static struct configfs_attribute
*srpt_tpg_attrs
[] = {
3336 &srpt_tpg_attr_enable
,
3341 * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
3342 * @wwn: Corresponds to $driver/$port.
3346 static struct se_portal_group
*srpt_make_tpg(struct se_wwn
*wwn
,
3347 struct config_group
*group
,
3350 struct srpt_port
*sport
= wwn
->priv
;
3351 static struct se_portal_group
*tpg
;
3354 WARN_ON_ONCE(wwn
!= &sport
->port_guid_wwn
&&
3355 wwn
!= &sport
->port_gid_wwn
);
3356 tpg
= wwn
== &sport
->port_guid_wwn
? &sport
->port_guid_tpg
:
3357 &sport
->port_gid_tpg
;
3358 res
= core_tpg_register(wwn
, tpg
, SCSI_PROTOCOL_SRP
);
3360 return ERR_PTR(res
);
3366 * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
3367 * @tpg: Target portal group to deregister.
3369 static void srpt_drop_tpg(struct se_portal_group
*tpg
)
3371 struct srpt_port
*sport
= srpt_tpg_to_sport(tpg
);
3373 sport
->enabled
= false;
3374 core_tpg_deregister(tpg
);
3378 * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
3383 static struct se_wwn
*srpt_make_tport(struct target_fabric_configfs
*tf
,
3384 struct config_group
*group
,
3387 return srpt_lookup_wwn(name
) ? : ERR_PTR(-EINVAL
);
3391 * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
3394 static void srpt_drop_tport(struct se_wwn
*wwn
)
3398 static ssize_t
srpt_wwn_version_show(struct config_item
*item
, char *buf
)
3400 return scnprintf(buf
, PAGE_SIZE
, "%s\n", DRV_VERSION
);
3403 CONFIGFS_ATTR_RO(srpt_wwn_
, version
);
3405 static struct configfs_attribute
*srpt_wwn_attrs
[] = {
3406 &srpt_wwn_attr_version
,
3410 static const struct target_core_fabric_ops srpt_template
= {
3411 .module
= THIS_MODULE
,
3413 .get_fabric_name
= srpt_get_fabric_name
,
3414 .tpg_get_wwn
= srpt_get_fabric_wwn
,
3415 .tpg_get_tag
= srpt_get_tag
,
3416 .tpg_check_demo_mode
= srpt_check_false
,
3417 .tpg_check_demo_mode_cache
= srpt_check_true
,
3418 .tpg_check_demo_mode_write_protect
= srpt_check_true
,
3419 .tpg_check_prod_mode_write_protect
= srpt_check_false
,
3420 .tpg_get_inst_index
= srpt_tpg_get_inst_index
,
3421 .release_cmd
= srpt_release_cmd
,
3422 .check_stop_free
= srpt_check_stop_free
,
3423 .close_session
= srpt_close_session
,
3424 .sess_get_index
= srpt_sess_get_index
,
3425 .sess_get_initiator_sid
= NULL
,
3426 .write_pending
= srpt_write_pending
,
3427 .write_pending_status
= srpt_write_pending_status
,
3428 .set_default_node_attributes
= srpt_set_default_node_attrs
,
3429 .get_cmd_state
= srpt_get_tcm_cmd_state
,
3430 .queue_data_in
= srpt_queue_data_in
,
3431 .queue_status
= srpt_queue_status
,
3432 .queue_tm_rsp
= srpt_queue_tm_rsp
,
3433 .aborted_task
= srpt_aborted_task
,
3435 * Setup function pointers for generic logic in
3436 * target_core_fabric_configfs.c
3438 .fabric_make_wwn
= srpt_make_tport
,
3439 .fabric_drop_wwn
= srpt_drop_tport
,
3440 .fabric_make_tpg
= srpt_make_tpg
,
3441 .fabric_drop_tpg
= srpt_drop_tpg
,
3442 .fabric_init_nodeacl
= srpt_init_nodeacl
,
3444 .tfc_wwn_attrs
= srpt_wwn_attrs
,
3445 .tfc_tpg_base_attrs
= srpt_tpg_attrs
,
3446 .tfc_tpg_attrib_attrs
= srpt_tpg_attrib_attrs
,
3450 * srpt_init_module - kernel module initialization
3452 * Note: Since ib_register_client() registers callback functions, and since at
3453 * least one of these callback functions (srpt_add_one()) calls target core
3454 * functions, this driver must be registered with the target core before
3455 * ib_register_client() is called.
3457 static int __init
srpt_init_module(void)
3462 if (srp_max_req_size
< MIN_MAX_REQ_SIZE
) {
3463 pr_err("invalid value %d for kernel module parameter"
3464 " srp_max_req_size -- must be at least %d.\n",
3465 srp_max_req_size
, MIN_MAX_REQ_SIZE
);
3469 if (srpt_srq_size
< MIN_SRPT_SRQ_SIZE
3470 || srpt_srq_size
> MAX_SRPT_SRQ_SIZE
) {
3471 pr_err("invalid value %d for kernel module parameter"
3472 " srpt_srq_size -- must be in the range [%d..%d].\n",
3473 srpt_srq_size
, MIN_SRPT_SRQ_SIZE
, MAX_SRPT_SRQ_SIZE
);
3477 ret
= target_register_template(&srpt_template
);
3481 ret
= ib_register_client(&srpt_client
);
3483 pr_err("couldn't register IB client\n");
3484 goto out_unregister_target
;
3489 out_unregister_target
:
3490 target_unregister_template(&srpt_template
);
3495 static void __exit
srpt_cleanup_module(void)
3497 ib_unregister_client(&srpt_client
);
3498 target_unregister_template(&srpt_template
);
3501 module_init(srpt_init_module
);
3502 module_exit(srpt_cleanup_module
);