2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
46 #include <scsi/scsi_proto.h>
47 #include <scsi/scsi_tcq.h>
48 #include <target/target_core_base.h>
49 #include <target/target_core_fabric.h>
52 /* Name of this kernel module. */
53 #define DRV_NAME "ib_srpt"
54 #define DRV_VERSION "2.0.0"
55 #define DRV_RELDATE "2011-02-14"
57 #define SRPT_ID_STRING "Linux SRP target"
60 #define pr_fmt(fmt) DRV_NAME " " fmt
62 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
64 "v" DRV_VERSION
" (" DRV_RELDATE
")");
65 MODULE_LICENSE("Dual BSD/GPL");
71 static u64 srpt_service_guid
;
72 static DEFINE_SPINLOCK(srpt_dev_lock
); /* Protects srpt_dev_list. */
73 static LIST_HEAD(srpt_dev_list
); /* List of srpt_device structures. */
75 static unsigned srp_max_req_size
= DEFAULT_MAX_REQ_SIZE
;
76 module_param(srp_max_req_size
, int, 0444);
77 MODULE_PARM_DESC(srp_max_req_size
,
78 "Maximum size of SRP request messages in bytes.");
80 static int srpt_srq_size
= DEFAULT_SRPT_SRQ_SIZE
;
81 module_param(srpt_srq_size
, int, 0444);
82 MODULE_PARM_DESC(srpt_srq_size
,
83 "Shared receive queue (SRQ) size.");
85 static int srpt_get_u64_x(char *buffer
, const struct kernel_param
*kp
)
87 return sprintf(buffer
, "0x%016llx", *(u64
*)kp
->arg
);
89 module_param_call(srpt_service_guid
, NULL
, srpt_get_u64_x
, &srpt_service_guid
,
91 MODULE_PARM_DESC(srpt_service_guid
,
92 "Using this value for ioc_guid, id_ext, and cm_listen_id"
93 " instead of using the node_guid of the first HCA.");
95 static struct ib_client srpt_client
;
96 /* Protects both rdma_cm_port and rdma_cm_id. */
97 static DEFINE_MUTEX(rdma_cm_mutex
);
98 /* Port number RDMA/CM will bind to. */
99 static u16 rdma_cm_port
;
100 static struct rdma_cm_id
*rdma_cm_id
;
101 static void srpt_release_cmd(struct se_cmd
*se_cmd
);
102 static void srpt_free_ch(struct kref
*kref
);
103 static int srpt_queue_status(struct se_cmd
*cmd
);
104 static void srpt_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
105 static void srpt_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
106 static void srpt_process_wait_list(struct srpt_rdma_ch
*ch
);
109 * The only allowed channel state changes are those that change the channel
110 * state into a state with a higher numerical value. Hence the new > prev test.
112 static bool srpt_set_ch_state(struct srpt_rdma_ch
*ch
, enum rdma_ch_state
new)
115 enum rdma_ch_state prev
;
116 bool changed
= false;
118 spin_lock_irqsave(&ch
->spinlock
, flags
);
124 spin_unlock_irqrestore(&ch
->spinlock
, flags
);
130 * srpt_event_handler - asynchronous IB event callback function
131 * @handler: IB event handler registered by ib_register_event_handler().
132 * @event: Description of the event that occurred.
134 * Callback function called by the InfiniBand core when an asynchronous IB
135 * event occurs. This callback may occur in interrupt context. See also
136 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
137 * Architecture Specification.
139 static void srpt_event_handler(struct ib_event_handler
*handler
,
140 struct ib_event
*event
)
142 struct srpt_device
*sdev
;
143 struct srpt_port
*sport
;
146 sdev
= ib_get_client_data(event
->device
, &srpt_client
);
147 if (!sdev
|| sdev
->device
!= event
->device
)
150 pr_debug("ASYNC event= %d on device= %s\n", event
->event
,
153 switch (event
->event
) {
154 case IB_EVENT_PORT_ERR
:
155 port_num
= event
->element
.port_num
- 1;
156 if (port_num
< sdev
->device
->phys_port_cnt
) {
157 sport
= &sdev
->port
[port_num
];
161 WARN(true, "event %d: port_num %d out of range 1..%d\n",
162 event
->event
, port_num
+ 1,
163 sdev
->device
->phys_port_cnt
);
166 case IB_EVENT_PORT_ACTIVE
:
167 case IB_EVENT_LID_CHANGE
:
168 case IB_EVENT_PKEY_CHANGE
:
169 case IB_EVENT_SM_CHANGE
:
170 case IB_EVENT_CLIENT_REREGISTER
:
171 case IB_EVENT_GID_CHANGE
:
172 /* Refresh port data asynchronously. */
173 port_num
= event
->element
.port_num
- 1;
174 if (port_num
< sdev
->device
->phys_port_cnt
) {
175 sport
= &sdev
->port
[port_num
];
176 if (!sport
->lid
&& !sport
->sm_lid
)
177 schedule_work(&sport
->work
);
179 WARN(true, "event %d: port_num %d out of range 1..%d\n",
180 event
->event
, port_num
+ 1,
181 sdev
->device
->phys_port_cnt
);
185 pr_err("received unrecognized IB event %d\n", event
->event
);
191 * srpt_srq_event - SRQ event callback function
192 * @event: Description of the event that occurred.
193 * @ctx: Context pointer specified at SRQ creation time.
195 static void srpt_srq_event(struct ib_event
*event
, void *ctx
)
197 pr_debug("SRQ event %d\n", event
->event
);
200 static const char *get_ch_state_name(enum rdma_ch_state s
)
207 case CH_DISCONNECTING
:
208 return "disconnecting";
211 case CH_DISCONNECTED
:
212 return "disconnected";
218 * srpt_qp_event - QP event callback function
219 * @event: Description of the event that occurred.
220 * @ch: SRPT RDMA channel.
222 static void srpt_qp_event(struct ib_event
*event
, struct srpt_rdma_ch
*ch
)
224 pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
225 event
->event
, ch
, ch
->sess_name
, ch
->state
);
227 switch (event
->event
) {
228 case IB_EVENT_COMM_EST
:
229 if (ch
->using_rdma_cm
)
230 rdma_notify(ch
->rdma_cm
.cm_id
, event
->event
);
232 ib_cm_notify(ch
->ib_cm
.cm_id
, event
->event
);
234 case IB_EVENT_QP_LAST_WQE_REACHED
:
235 pr_debug("%s-%d, state %s: received Last WQE event.\n",
236 ch
->sess_name
, ch
->qp
->qp_num
,
237 get_ch_state_name(ch
->state
));
240 pr_err("received unrecognized IB QP event %d\n", event
->event
);
246 * srpt_set_ioc - initialize a IOUnitInfo structure
247 * @c_list: controller list.
248 * @slot: one-based slot number.
249 * @value: four-bit value.
251 * Copies the lowest four bits of value in element slot of the array of four
252 * bit elements called c_list (controller list). The index slot is one-based.
254 static void srpt_set_ioc(u8
*c_list
, u32 slot
, u8 value
)
261 tmp
= c_list
[id
] & 0xf;
262 c_list
[id
] = (value
<< 4) | tmp
;
264 tmp
= c_list
[id
] & 0xf0;
265 c_list
[id
] = (value
& 0xf) | tmp
;
270 * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
271 * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
273 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
276 static void srpt_get_class_port_info(struct ib_dm_mad
*mad
)
278 struct ib_class_port_info
*cif
;
280 cif
= (struct ib_class_port_info
*)mad
->data
;
281 memset(cif
, 0, sizeof(*cif
));
282 cif
->base_version
= 1;
283 cif
->class_version
= 1;
285 ib_set_cpi_resp_time(cif
, 20);
286 mad
->mad_hdr
.status
= 0;
290 * srpt_get_iou - write IOUnitInfo to a management datagram
291 * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
293 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
294 * Specification. See also section B.7, table B.6 in the SRP r16a document.
296 static void srpt_get_iou(struct ib_dm_mad
*mad
)
298 struct ib_dm_iou_info
*ioui
;
302 ioui
= (struct ib_dm_iou_info
*)mad
->data
;
303 ioui
->change_id
= cpu_to_be16(1);
304 ioui
->max_controllers
= 16;
306 /* set present for slot 1 and empty for the rest */
307 srpt_set_ioc(ioui
->controller_list
, 1, 1);
308 for (i
= 1, slot
= 2; i
< 16; i
++, slot
++)
309 srpt_set_ioc(ioui
->controller_list
, slot
, 0);
311 mad
->mad_hdr
.status
= 0;
315 * srpt_get_ioc - write IOControllerprofile to a management datagram
316 * @sport: HCA port through which the MAD has been received.
317 * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
318 * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
320 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
321 * Architecture Specification. See also section B.7, table B.7 in the SRP
324 static void srpt_get_ioc(struct srpt_port
*sport
, u32 slot
,
325 struct ib_dm_mad
*mad
)
327 struct srpt_device
*sdev
= sport
->sdev
;
328 struct ib_dm_ioc_profile
*iocp
;
329 int send_queue_depth
;
331 iocp
= (struct ib_dm_ioc_profile
*)mad
->data
;
333 if (!slot
|| slot
> 16) {
335 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD
);
341 = cpu_to_be16(DM_MAD_STATUS_NO_IOC
);
346 send_queue_depth
= sdev
->srq_size
;
348 send_queue_depth
= min(MAX_SRPT_RQ_SIZE
,
349 sdev
->device
->attrs
.max_qp_wr
);
351 memset(iocp
, 0, sizeof(*iocp
));
352 strcpy(iocp
->id_string
, SRPT_ID_STRING
);
353 iocp
->guid
= cpu_to_be64(srpt_service_guid
);
354 iocp
->vendor_id
= cpu_to_be32(sdev
->device
->attrs
.vendor_id
);
355 iocp
->device_id
= cpu_to_be32(sdev
->device
->attrs
.vendor_part_id
);
356 iocp
->device_version
= cpu_to_be16(sdev
->device
->attrs
.hw_ver
);
357 iocp
->subsys_vendor_id
= cpu_to_be32(sdev
->device
->attrs
.vendor_id
);
358 iocp
->subsys_device_id
= 0x0;
359 iocp
->io_class
= cpu_to_be16(SRP_REV16A_IB_IO_CLASS
);
360 iocp
->io_subclass
= cpu_to_be16(SRP_IO_SUBCLASS
);
361 iocp
->protocol
= cpu_to_be16(SRP_PROTOCOL
);
362 iocp
->protocol_version
= cpu_to_be16(SRP_PROTOCOL_VERSION
);
363 iocp
->send_queue_depth
= cpu_to_be16(send_queue_depth
);
364 iocp
->rdma_read_depth
= 4;
365 iocp
->send_size
= cpu_to_be32(srp_max_req_size
);
366 iocp
->rdma_size
= cpu_to_be32(min(sport
->port_attrib
.srp_max_rdma_size
,
368 iocp
->num_svc_entries
= 1;
369 iocp
->op_cap_mask
= SRP_SEND_TO_IOC
| SRP_SEND_FROM_IOC
|
370 SRP_RDMA_READ_FROM_IOC
| SRP_RDMA_WRITE_FROM_IOC
;
372 mad
->mad_hdr
.status
= 0;
376 * srpt_get_svc_entries - write ServiceEntries to a management datagram
377 * @ioc_guid: I/O controller GUID to use in reply.
378 * @slot: I/O controller number.
379 * @hi: End of the range of service entries to be specified in the reply.
380 * @lo: Start of the range of service entries to be specified in the reply..
381 * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
383 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
384 * Specification. See also section B.7, table B.8 in the SRP r16a document.
386 static void srpt_get_svc_entries(u64 ioc_guid
,
387 u16 slot
, u8 hi
, u8 lo
, struct ib_dm_mad
*mad
)
389 struct ib_dm_svc_entries
*svc_entries
;
393 if (!slot
|| slot
> 16) {
395 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD
);
399 if (slot
> 2 || lo
> hi
|| hi
> 1) {
401 = cpu_to_be16(DM_MAD_STATUS_NO_IOC
);
405 svc_entries
= (struct ib_dm_svc_entries
*)mad
->data
;
406 memset(svc_entries
, 0, sizeof(*svc_entries
));
407 svc_entries
->service_entries
[0].id
= cpu_to_be64(ioc_guid
);
408 snprintf(svc_entries
->service_entries
[0].name
,
409 sizeof(svc_entries
->service_entries
[0].name
),
411 SRP_SERVICE_NAME_PREFIX
,
414 mad
->mad_hdr
.status
= 0;
418 * srpt_mgmt_method_get - process a received management datagram
419 * @sp: HCA port through which the MAD has been received.
420 * @rq_mad: received MAD.
421 * @rsp_mad: response MAD.
423 static void srpt_mgmt_method_get(struct srpt_port
*sp
, struct ib_mad
*rq_mad
,
424 struct ib_dm_mad
*rsp_mad
)
430 attr_id
= be16_to_cpu(rq_mad
->mad_hdr
.attr_id
);
432 case DM_ATTR_CLASS_PORT_INFO
:
433 srpt_get_class_port_info(rsp_mad
);
435 case DM_ATTR_IOU_INFO
:
436 srpt_get_iou(rsp_mad
);
438 case DM_ATTR_IOC_PROFILE
:
439 slot
= be32_to_cpu(rq_mad
->mad_hdr
.attr_mod
);
440 srpt_get_ioc(sp
, slot
, rsp_mad
);
442 case DM_ATTR_SVC_ENTRIES
:
443 slot
= be32_to_cpu(rq_mad
->mad_hdr
.attr_mod
);
444 hi
= (u8
) ((slot
>> 8) & 0xff);
445 lo
= (u8
) (slot
& 0xff);
446 slot
= (u16
) ((slot
>> 16) & 0xffff);
447 srpt_get_svc_entries(srpt_service_guid
,
448 slot
, hi
, lo
, rsp_mad
);
451 rsp_mad
->mad_hdr
.status
=
452 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR
);
458 * srpt_mad_send_handler - MAD send completion callback
459 * @mad_agent: Return value of ib_register_mad_agent().
460 * @mad_wc: Work completion reporting that the MAD has been sent.
462 static void srpt_mad_send_handler(struct ib_mad_agent
*mad_agent
,
463 struct ib_mad_send_wc
*mad_wc
)
465 rdma_destroy_ah(mad_wc
->send_buf
->ah
);
466 ib_free_send_mad(mad_wc
->send_buf
);
470 * srpt_mad_recv_handler - MAD reception callback function
471 * @mad_agent: Return value of ib_register_mad_agent().
472 * @send_buf: Not used.
473 * @mad_wc: Work completion reporting that a MAD has been received.
475 static void srpt_mad_recv_handler(struct ib_mad_agent
*mad_agent
,
476 struct ib_mad_send_buf
*send_buf
,
477 struct ib_mad_recv_wc
*mad_wc
)
479 struct srpt_port
*sport
= (struct srpt_port
*)mad_agent
->context
;
481 struct ib_mad_send_buf
*rsp
;
482 struct ib_dm_mad
*dm_mad
;
484 if (!mad_wc
|| !mad_wc
->recv_buf
.mad
)
487 ah
= ib_create_ah_from_wc(mad_agent
->qp
->pd
, mad_wc
->wc
,
488 mad_wc
->recv_buf
.grh
, mad_agent
->port_num
);
492 BUILD_BUG_ON(offsetof(struct ib_dm_mad
, data
) != IB_MGMT_DEVICE_HDR
);
494 rsp
= ib_create_send_mad(mad_agent
, mad_wc
->wc
->src_qp
,
495 mad_wc
->wc
->pkey_index
, 0,
496 IB_MGMT_DEVICE_HDR
, IB_MGMT_DEVICE_DATA
,
498 IB_MGMT_BASE_VERSION
);
505 memcpy(dm_mad
, mad_wc
->recv_buf
.mad
, sizeof(*dm_mad
));
506 dm_mad
->mad_hdr
.method
= IB_MGMT_METHOD_GET_RESP
;
507 dm_mad
->mad_hdr
.status
= 0;
509 switch (mad_wc
->recv_buf
.mad
->mad_hdr
.method
) {
510 case IB_MGMT_METHOD_GET
:
511 srpt_mgmt_method_get(sport
, mad_wc
->recv_buf
.mad
, dm_mad
);
513 case IB_MGMT_METHOD_SET
:
514 dm_mad
->mad_hdr
.status
=
515 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR
);
518 dm_mad
->mad_hdr
.status
=
519 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD
);
523 if (!ib_post_send_mad(rsp
, NULL
)) {
524 ib_free_recv_mad(mad_wc
);
525 /* will destroy_ah & free_send_mad in send completion */
529 ib_free_send_mad(rsp
);
534 ib_free_recv_mad(mad_wc
);
537 static int srpt_format_guid(char *buf
, unsigned int size
, const __be64
*guid
)
539 const __be16
*g
= (const __be16
*)guid
;
541 return snprintf(buf
, size
, "%04x:%04x:%04x:%04x",
542 be16_to_cpu(g
[0]), be16_to_cpu(g
[1]),
543 be16_to_cpu(g
[2]), be16_to_cpu(g
[3]));
547 * srpt_refresh_port - configure a HCA port
548 * @sport: SRPT HCA port.
550 * Enable InfiniBand management datagram processing, update the cached sm_lid,
551 * lid and gid values, and register a callback function for processing MADs
552 * on the specified port.
554 * Note: It is safe to call this function more than once for the same port.
556 static int srpt_refresh_port(struct srpt_port
*sport
)
558 struct ib_mad_reg_req reg_req
;
559 struct ib_port_modify port_modify
;
560 struct ib_port_attr port_attr
;
563 memset(&port_modify
, 0, sizeof(port_modify
));
564 port_modify
.set_port_cap_mask
= IB_PORT_DEVICE_MGMT_SUP
;
565 port_modify
.clr_port_cap_mask
= 0;
567 ret
= ib_modify_port(sport
->sdev
->device
, sport
->port
, 0, &port_modify
);
571 ret
= ib_query_port(sport
->sdev
->device
, sport
->port
, &port_attr
);
575 sport
->sm_lid
= port_attr
.sm_lid
;
576 sport
->lid
= port_attr
.lid
;
578 ret
= rdma_query_gid(sport
->sdev
->device
, sport
->port
, 0, &sport
->gid
);
582 sport
->port_guid_wwn
.priv
= sport
;
583 srpt_format_guid(sport
->port_guid
, sizeof(sport
->port_guid
),
584 &sport
->gid
.global
.interface_id
);
585 sport
->port_gid_wwn
.priv
= sport
;
586 snprintf(sport
->port_gid
, sizeof(sport
->port_gid
),
588 be64_to_cpu(sport
->gid
.global
.subnet_prefix
),
589 be64_to_cpu(sport
->gid
.global
.interface_id
));
591 if (!sport
->mad_agent
) {
592 memset(®_req
, 0, sizeof(reg_req
));
593 reg_req
.mgmt_class
= IB_MGMT_CLASS_DEVICE_MGMT
;
594 reg_req
.mgmt_class_version
= IB_MGMT_BASE_VERSION
;
595 set_bit(IB_MGMT_METHOD_GET
, reg_req
.method_mask
);
596 set_bit(IB_MGMT_METHOD_SET
, reg_req
.method_mask
);
598 sport
->mad_agent
= ib_register_mad_agent(sport
->sdev
->device
,
602 srpt_mad_send_handler
,
603 srpt_mad_recv_handler
,
605 if (IS_ERR(sport
->mad_agent
)) {
606 ret
= PTR_ERR(sport
->mad_agent
);
607 sport
->mad_agent
= NULL
;
616 port_modify
.set_port_cap_mask
= 0;
617 port_modify
.clr_port_cap_mask
= IB_PORT_DEVICE_MGMT_SUP
;
618 ib_modify_port(sport
->sdev
->device
, sport
->port
, 0, &port_modify
);
626 * srpt_unregister_mad_agent - unregister MAD callback functions
627 * @sdev: SRPT HCA pointer.
629 * Note: It is safe to call this function more than once for the same device.
631 static void srpt_unregister_mad_agent(struct srpt_device
*sdev
)
633 struct ib_port_modify port_modify
= {
634 .clr_port_cap_mask
= IB_PORT_DEVICE_MGMT_SUP
,
636 struct srpt_port
*sport
;
639 for (i
= 1; i
<= sdev
->device
->phys_port_cnt
; i
++) {
640 sport
= &sdev
->port
[i
- 1];
641 WARN_ON(sport
->port
!= i
);
642 if (ib_modify_port(sdev
->device
, i
, 0, &port_modify
) < 0)
643 pr_err("disabling MAD processing failed.\n");
644 if (sport
->mad_agent
) {
645 ib_unregister_mad_agent(sport
->mad_agent
);
646 sport
->mad_agent
= NULL
;
652 * srpt_alloc_ioctx - allocate a SRPT I/O context structure
653 * @sdev: SRPT HCA pointer.
654 * @ioctx_size: I/O context size.
655 * @dma_size: Size of I/O context DMA buffer.
656 * @dir: DMA data direction.
658 static struct srpt_ioctx
*srpt_alloc_ioctx(struct srpt_device
*sdev
,
659 int ioctx_size
, int dma_size
,
660 enum dma_data_direction dir
)
662 struct srpt_ioctx
*ioctx
;
664 ioctx
= kmalloc(ioctx_size
, GFP_KERNEL
);
668 ioctx
->buf
= kmalloc(dma_size
, GFP_KERNEL
);
672 ioctx
->dma
= ib_dma_map_single(sdev
->device
, ioctx
->buf
, dma_size
, dir
);
673 if (ib_dma_mapping_error(sdev
->device
, ioctx
->dma
))
687 * srpt_free_ioctx - free a SRPT I/O context structure
688 * @sdev: SRPT HCA pointer.
689 * @ioctx: I/O context pointer.
690 * @dma_size: Size of I/O context DMA buffer.
691 * @dir: DMA data direction.
693 static void srpt_free_ioctx(struct srpt_device
*sdev
, struct srpt_ioctx
*ioctx
,
694 int dma_size
, enum dma_data_direction dir
)
699 ib_dma_unmap_single(sdev
->device
, ioctx
->dma
, dma_size
, dir
);
705 * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
706 * @sdev: Device to allocate the I/O context ring for.
707 * @ring_size: Number of elements in the I/O context ring.
708 * @ioctx_size: I/O context size.
709 * @dma_size: DMA buffer size.
710 * @dir: DMA data direction.
712 static struct srpt_ioctx
**srpt_alloc_ioctx_ring(struct srpt_device
*sdev
,
713 int ring_size
, int ioctx_size
,
714 int dma_size
, enum dma_data_direction dir
)
716 struct srpt_ioctx
**ring
;
719 WARN_ON(ioctx_size
!= sizeof(struct srpt_recv_ioctx
)
720 && ioctx_size
!= sizeof(struct srpt_send_ioctx
));
722 ring
= kvmalloc_array(ring_size
, sizeof(ring
[0]), GFP_KERNEL
);
725 for (i
= 0; i
< ring_size
; ++i
) {
726 ring
[i
] = srpt_alloc_ioctx(sdev
, ioctx_size
, dma_size
, dir
);
735 srpt_free_ioctx(sdev
, ring
[i
], dma_size
, dir
);
743 * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
744 * @ioctx_ring: I/O context ring to be freed.
745 * @sdev: SRPT HCA pointer.
746 * @ring_size: Number of ring elements.
747 * @dma_size: Size of I/O context DMA buffer.
748 * @dir: DMA data direction.
750 static void srpt_free_ioctx_ring(struct srpt_ioctx
**ioctx_ring
,
751 struct srpt_device
*sdev
, int ring_size
,
752 int dma_size
, enum dma_data_direction dir
)
759 for (i
= 0; i
< ring_size
; ++i
)
760 srpt_free_ioctx(sdev
, ioctx_ring
[i
], dma_size
, dir
);
765 * srpt_set_cmd_state - set the state of a SCSI command
766 * @ioctx: Send I/O context.
767 * @new: New I/O context state.
769 * Does not modify the state of aborted commands. Returns the previous command
772 static enum srpt_command_state
srpt_set_cmd_state(struct srpt_send_ioctx
*ioctx
,
773 enum srpt_command_state
new)
775 enum srpt_command_state previous
;
777 previous
= ioctx
->state
;
778 if (previous
!= SRPT_STATE_DONE
)
785 * srpt_test_and_set_cmd_state - test and set the state of a command
786 * @ioctx: Send I/O context.
787 * @old: Current I/O context state.
788 * @new: New I/O context state.
790 * Returns true if and only if the previous command state was equal to 'old'.
792 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx
*ioctx
,
793 enum srpt_command_state old
,
794 enum srpt_command_state
new)
796 enum srpt_command_state previous
;
799 WARN_ON(old
== SRPT_STATE_DONE
);
800 WARN_ON(new == SRPT_STATE_NEW
);
802 previous
= ioctx
->state
;
806 return previous
== old
;
810 * srpt_post_recv - post an IB receive request
811 * @sdev: SRPT HCA pointer.
812 * @ch: SRPT RDMA channel.
813 * @ioctx: Receive I/O context pointer.
815 static int srpt_post_recv(struct srpt_device
*sdev
, struct srpt_rdma_ch
*ch
,
816 struct srpt_recv_ioctx
*ioctx
)
819 struct ib_recv_wr wr
;
822 list
.addr
= ioctx
->ioctx
.dma
;
823 list
.length
= srp_max_req_size
;
824 list
.lkey
= sdev
->lkey
;
826 ioctx
->ioctx
.cqe
.done
= srpt_recv_done
;
827 wr
.wr_cqe
= &ioctx
->ioctx
.cqe
;
833 return ib_post_srq_recv(sdev
->srq
, &wr
, NULL
);
835 return ib_post_recv(ch
->qp
, &wr
, NULL
);
839 * srpt_zerolength_write - perform a zero-length RDMA write
840 * @ch: SRPT RDMA channel.
842 * A quote from the InfiniBand specification: C9-88: For an HCA responder
843 * using Reliable Connection service, for each zero-length RDMA READ or WRITE
844 * request, the R_Key shall not be validated, even if the request includes
847 static int srpt_zerolength_write(struct srpt_rdma_ch
*ch
)
849 struct ib_rdma_wr wr
= {
852 { .wr_cqe
= &ch
->zw_cqe
, },
853 .opcode
= IB_WR_RDMA_WRITE
,
854 .send_flags
= IB_SEND_SIGNALED
,
858 pr_debug("%s-%d: queued zerolength write\n", ch
->sess_name
,
861 return ib_post_send(ch
->qp
, &wr
.wr
, NULL
);
864 static void srpt_zerolength_write_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
866 struct srpt_rdma_ch
*ch
= cq
->cq_context
;
868 pr_debug("%s-%d wc->status %d\n", ch
->sess_name
, ch
->qp
->qp_num
,
871 if (wc
->status
== IB_WC_SUCCESS
) {
872 srpt_process_wait_list(ch
);
874 if (srpt_set_ch_state(ch
, CH_DISCONNECTED
))
875 schedule_work(&ch
->release_work
);
877 pr_debug("%s-%d: already disconnected.\n",
878 ch
->sess_name
, ch
->qp
->qp_num
);
882 static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx
*ioctx
,
883 struct srp_direct_buf
*db
, int nbufs
, struct scatterlist
**sg
,
886 enum dma_data_direction dir
= target_reverse_dma_direction(&ioctx
->cmd
);
887 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
888 struct scatterlist
*prev
= NULL
;
893 ioctx
->rw_ctxs
= &ioctx
->s_rw_ctx
;
895 ioctx
->rw_ctxs
= kmalloc_array(nbufs
, sizeof(*ioctx
->rw_ctxs
),
901 for (i
= ioctx
->n_rw_ctx
; i
< nbufs
; i
++, db
++) {
902 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
903 u64 remote_addr
= be64_to_cpu(db
->va
);
904 u32 size
= be32_to_cpu(db
->len
);
905 u32 rkey
= be32_to_cpu(db
->key
);
907 ret
= target_alloc_sgl(&ctx
->sg
, &ctx
->nents
, size
, false,
912 ret
= rdma_rw_ctx_init(&ctx
->rw
, ch
->qp
, ch
->sport
->port
,
913 ctx
->sg
, ctx
->nents
, 0, remote_addr
, rkey
, dir
);
915 target_free_sgl(ctx
->sg
, ctx
->nents
);
919 ioctx
->n_rdma
+= ret
;
923 sg_unmark_end(&prev
[prev_nents
- 1]);
924 sg_chain(prev
, prev_nents
+ 1, ctx
->sg
);
930 prev_nents
= ctx
->nents
;
932 *sg_cnt
+= ctx
->nents
;
939 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
941 rdma_rw_ctx_destroy(&ctx
->rw
, ch
->qp
, ch
->sport
->port
,
942 ctx
->sg
, ctx
->nents
, dir
);
943 target_free_sgl(ctx
->sg
, ctx
->nents
);
945 if (ioctx
->rw_ctxs
!= &ioctx
->s_rw_ctx
)
946 kfree(ioctx
->rw_ctxs
);
950 static void srpt_free_rw_ctxs(struct srpt_rdma_ch
*ch
,
951 struct srpt_send_ioctx
*ioctx
)
953 enum dma_data_direction dir
= target_reverse_dma_direction(&ioctx
->cmd
);
956 for (i
= 0; i
< ioctx
->n_rw_ctx
; i
++) {
957 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
959 rdma_rw_ctx_destroy(&ctx
->rw
, ch
->qp
, ch
->sport
->port
,
960 ctx
->sg
, ctx
->nents
, dir
);
961 target_free_sgl(ctx
->sg
, ctx
->nents
);
964 if (ioctx
->rw_ctxs
!= &ioctx
->s_rw_ctx
)
965 kfree(ioctx
->rw_ctxs
);
968 static inline void *srpt_get_desc_buf(struct srp_cmd
*srp_cmd
)
971 * The pointer computations below will only be compiled correctly
972 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
973 * whether srp_cmd::add_data has been declared as a byte pointer.
975 BUILD_BUG_ON(!__same_type(srp_cmd
->add_data
[0], (s8
)0) &&
976 !__same_type(srp_cmd
->add_data
[0], (u8
)0));
979 * According to the SRP spec, the lower two bits of the 'ADDITIONAL
980 * CDB LENGTH' field are reserved and the size in bytes of this field
981 * is four times the value specified in bits 3..7. Hence the "& ~3".
983 return srp_cmd
->add_data
+ (srp_cmd
->add_cdb_len
& ~3);
987 * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
988 * @ioctx: Pointer to the I/O context associated with the request.
989 * @srp_cmd: Pointer to the SRP_CMD request data.
990 * @dir: Pointer to the variable to which the transfer direction will be
992 * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
993 * @sg_cnt: [out] length of @sg.
994 * @data_len: Pointer to the variable to which the total data length of all
995 * descriptors in the SRP_CMD request will be written.
997 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
999 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
1000 * -ENOMEM when memory allocation fails and zero upon success.
1002 static int srpt_get_desc_tbl(struct srpt_send_ioctx
*ioctx
,
1003 struct srp_cmd
*srp_cmd
, enum dma_data_direction
*dir
,
1004 struct scatterlist
**sg
, unsigned *sg_cnt
, u64
*data_len
)
1010 * The lower four bits of the buffer format field contain the DATA-IN
1011 * buffer descriptor format, and the highest four bits contain the
1012 * DATA-OUT buffer descriptor format.
1014 if (srp_cmd
->buf_fmt
& 0xf)
1015 /* DATA-IN: transfer data from target to initiator (read). */
1016 *dir
= DMA_FROM_DEVICE
;
1017 else if (srp_cmd
->buf_fmt
>> 4)
1018 /* DATA-OUT: transfer data from initiator to target (write). */
1019 *dir
= DMA_TO_DEVICE
;
1023 /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
1024 ioctx
->cmd
.data_direction
= *dir
;
1026 if (((srp_cmd
->buf_fmt
& 0xf) == SRP_DATA_DESC_DIRECT
) ||
1027 ((srp_cmd
->buf_fmt
>> 4) == SRP_DATA_DESC_DIRECT
)) {
1028 struct srp_direct_buf
*db
= srpt_get_desc_buf(srp_cmd
);
1030 *data_len
= be32_to_cpu(db
->len
);
1031 return srpt_alloc_rw_ctxs(ioctx
, db
, 1, sg
, sg_cnt
);
1032 } else if (((srp_cmd
->buf_fmt
& 0xf) == SRP_DATA_DESC_INDIRECT
) ||
1033 ((srp_cmd
->buf_fmt
>> 4) == SRP_DATA_DESC_INDIRECT
)) {
1034 struct srp_indirect_buf
*idb
= srpt_get_desc_buf(srp_cmd
);
1035 int nbufs
= be32_to_cpu(idb
->table_desc
.len
) /
1036 sizeof(struct srp_direct_buf
);
1039 (srp_cmd
->data_out_desc_cnt
+ srp_cmd
->data_in_desc_cnt
)) {
1040 pr_err("received unsupported SRP_CMD request"
1041 " type (%u out + %u in != %u / %zu)\n",
1042 srp_cmd
->data_out_desc_cnt
,
1043 srp_cmd
->data_in_desc_cnt
,
1044 be32_to_cpu(idb
->table_desc
.len
),
1045 sizeof(struct srp_direct_buf
));
1049 *data_len
= be32_to_cpu(idb
->len
);
1050 return srpt_alloc_rw_ctxs(ioctx
, idb
->desc_list
, nbufs
,
1059 * srpt_init_ch_qp - initialize queue pair attributes
1060 * @ch: SRPT RDMA channel.
1061 * @qp: Queue pair pointer.
1063 * Initialized the attributes of queue pair 'qp' by allowing local write,
1064 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
1066 static int srpt_init_ch_qp(struct srpt_rdma_ch
*ch
, struct ib_qp
*qp
)
1068 struct ib_qp_attr
*attr
;
1071 WARN_ON_ONCE(ch
->using_rdma_cm
);
1073 attr
= kzalloc(sizeof(*attr
), GFP_KERNEL
);
1077 attr
->qp_state
= IB_QPS_INIT
;
1078 attr
->qp_access_flags
= IB_ACCESS_LOCAL_WRITE
;
1079 attr
->port_num
= ch
->sport
->port
;
1081 ret
= ib_find_cached_pkey(ch
->sport
->sdev
->device
, ch
->sport
->port
,
1082 ch
->pkey
, &attr
->pkey_index
);
1084 pr_err("Translating pkey %#x failed (%d) - using index 0\n",
1087 ret
= ib_modify_qp(qp
, attr
,
1088 IB_QP_STATE
| IB_QP_ACCESS_FLAGS
| IB_QP_PORT
|
1096 * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
1097 * @ch: channel of the queue pair.
1098 * @qp: queue pair to change the state of.
1100 * Returns zero upon success and a negative value upon failure.
1102 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1103 * If this structure ever becomes larger, it might be necessary to allocate
1104 * it dynamically instead of on the stack.
1106 static int srpt_ch_qp_rtr(struct srpt_rdma_ch
*ch
, struct ib_qp
*qp
)
1108 struct ib_qp_attr qp_attr
;
1112 WARN_ON_ONCE(ch
->using_rdma_cm
);
1114 qp_attr
.qp_state
= IB_QPS_RTR
;
1115 ret
= ib_cm_init_qp_attr(ch
->ib_cm
.cm_id
, &qp_attr
, &attr_mask
);
1119 qp_attr
.max_dest_rd_atomic
= 4;
1121 ret
= ib_modify_qp(qp
, &qp_attr
, attr_mask
);
1128 * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
1129 * @ch: channel of the queue pair.
1130 * @qp: queue pair to change the state of.
1132 * Returns zero upon success and a negative value upon failure.
1134 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
1135 * If this structure ever becomes larger, it might be necessary to allocate
1136 * it dynamically instead of on the stack.
1138 static int srpt_ch_qp_rts(struct srpt_rdma_ch
*ch
, struct ib_qp
*qp
)
1140 struct ib_qp_attr qp_attr
;
1144 qp_attr
.qp_state
= IB_QPS_RTS
;
1145 ret
= ib_cm_init_qp_attr(ch
->ib_cm
.cm_id
, &qp_attr
, &attr_mask
);
1149 qp_attr
.max_rd_atomic
= 4;
1151 ret
= ib_modify_qp(qp
, &qp_attr
, attr_mask
);
1158 * srpt_ch_qp_err - set the channel queue pair state to 'error'
1159 * @ch: SRPT RDMA channel.
1161 static int srpt_ch_qp_err(struct srpt_rdma_ch
*ch
)
1163 struct ib_qp_attr qp_attr
;
1165 qp_attr
.qp_state
= IB_QPS_ERR
;
1166 return ib_modify_qp(ch
->qp
, &qp_attr
, IB_QP_STATE
);
1170 * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
1171 * @ch: SRPT RDMA channel.
1173 static struct srpt_send_ioctx
*srpt_get_send_ioctx(struct srpt_rdma_ch
*ch
)
1175 struct srpt_send_ioctx
*ioctx
;
1176 unsigned long flags
;
1181 spin_lock_irqsave(&ch
->spinlock
, flags
);
1182 if (!list_empty(&ch
->free_list
)) {
1183 ioctx
= list_first_entry(&ch
->free_list
,
1184 struct srpt_send_ioctx
, free_list
);
1185 list_del(&ioctx
->free_list
);
1187 spin_unlock_irqrestore(&ch
->spinlock
, flags
);
1192 BUG_ON(ioctx
->ch
!= ch
);
1193 ioctx
->state
= SRPT_STATE_NEW
;
1195 ioctx
->n_rw_ctx
= 0;
1196 ioctx
->queue_status_only
= false;
1198 * transport_init_se_cmd() does not initialize all fields, so do it
1201 memset(&ioctx
->cmd
, 0, sizeof(ioctx
->cmd
));
1202 memset(&ioctx
->sense_data
, 0, sizeof(ioctx
->sense_data
));
1208 * srpt_abort_cmd - abort a SCSI command
1209 * @ioctx: I/O context associated with the SCSI command.
1211 static int srpt_abort_cmd(struct srpt_send_ioctx
*ioctx
)
1213 enum srpt_command_state state
;
1218 * If the command is in a state where the target core is waiting for
1219 * the ib_srpt driver, change the state to the next state.
1222 state
= ioctx
->state
;
1224 case SRPT_STATE_NEED_DATA
:
1225 ioctx
->state
= SRPT_STATE_DATA_IN
;
1227 case SRPT_STATE_CMD_RSP_SENT
:
1228 case SRPT_STATE_MGMT_RSP_SENT
:
1229 ioctx
->state
= SRPT_STATE_DONE
;
1232 WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1237 pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state
,
1238 ioctx
->state
, ioctx
->cmd
.tag
);
1241 case SRPT_STATE_NEW
:
1242 case SRPT_STATE_DATA_IN
:
1243 case SRPT_STATE_MGMT
:
1244 case SRPT_STATE_DONE
:
1246 * Do nothing - defer abort processing until
1247 * srpt_queue_response() is invoked.
1250 case SRPT_STATE_NEED_DATA
:
1251 pr_debug("tag %#llx: RDMA read error\n", ioctx
->cmd
.tag
);
1252 transport_generic_request_failure(&ioctx
->cmd
,
1253 TCM_CHECK_CONDITION_ABORT_CMD
);
1255 case SRPT_STATE_CMD_RSP_SENT
:
1257 * SRP_RSP sending failed or the SRP_RSP send completion has
1258 * not been received in time.
1260 transport_generic_free_cmd(&ioctx
->cmd
, 0);
1262 case SRPT_STATE_MGMT_RSP_SENT
:
1263 transport_generic_free_cmd(&ioctx
->cmd
, 0);
1266 WARN(1, "Unexpected command state (%d)", state
);
1274 * srpt_rdma_read_done - RDMA read completion callback
1275 * @cq: Completion queue.
1276 * @wc: Work completion.
1278 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
1279 * the data that has been transferred via IB RDMA had to be postponed until the
1280 * check_stop_free() callback. None of this is necessary anymore and needs to
1283 static void srpt_rdma_read_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1285 struct srpt_rdma_ch
*ch
= cq
->cq_context
;
1286 struct srpt_send_ioctx
*ioctx
=
1287 container_of(wc
->wr_cqe
, struct srpt_send_ioctx
, rdma_cqe
);
1289 WARN_ON(ioctx
->n_rdma
<= 0);
1290 atomic_add(ioctx
->n_rdma
, &ch
->sq_wr_avail
);
1293 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1294 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1296 srpt_abort_cmd(ioctx
);
1300 if (srpt_test_and_set_cmd_state(ioctx
, SRPT_STATE_NEED_DATA
,
1301 SRPT_STATE_DATA_IN
))
1302 target_execute_cmd(&ioctx
->cmd
);
1304 pr_err("%s[%d]: wrong state = %d\n", __func__
,
1305 __LINE__
, ioctx
->state
);
1309 * srpt_build_cmd_rsp - build a SRP_RSP response
1310 * @ch: RDMA channel through which the request has been received.
1311 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1312 * be built in the buffer ioctx->buf points at and hence this function will
1313 * overwrite the request data.
1314 * @tag: tag of the request for which this response is being generated.
1315 * @status: value for the STATUS field of the SRP_RSP information unit.
1317 * Returns the size in bytes of the SRP_RSP response.
1319 * An SRP_RSP response contains a SCSI status or service response. See also
1320 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1321 * response. See also SPC-2 for more information about sense data.
1323 static int srpt_build_cmd_rsp(struct srpt_rdma_ch
*ch
,
1324 struct srpt_send_ioctx
*ioctx
, u64 tag
,
1327 struct se_cmd
*cmd
= &ioctx
->cmd
;
1328 struct srp_rsp
*srp_rsp
;
1329 const u8
*sense_data
;
1330 int sense_data_len
, max_sense_len
;
1331 u32 resid
= cmd
->residual_count
;
1334 * The lowest bit of all SAM-3 status codes is zero (see also
1335 * paragraph 5.3 in SAM-3).
1337 WARN_ON(status
& 1);
1339 srp_rsp
= ioctx
->ioctx
.buf
;
1342 sense_data
= ioctx
->sense_data
;
1343 sense_data_len
= ioctx
->cmd
.scsi_sense_length
;
1344 WARN_ON(sense_data_len
> sizeof(ioctx
->sense_data
));
1346 memset(srp_rsp
, 0, sizeof(*srp_rsp
));
1347 srp_rsp
->opcode
= SRP_RSP
;
1348 srp_rsp
->req_lim_delta
=
1349 cpu_to_be32(1 + atomic_xchg(&ch
->req_lim_delta
, 0));
1351 srp_rsp
->status
= status
;
1353 if (cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
1354 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1355 /* residual data from an underflow write */
1356 srp_rsp
->flags
= SRP_RSP_FLAG_DOUNDER
;
1357 srp_rsp
->data_out_res_cnt
= cpu_to_be32(resid
);
1358 } else if (cmd
->data_direction
== DMA_FROM_DEVICE
) {
1359 /* residual data from an underflow read */
1360 srp_rsp
->flags
= SRP_RSP_FLAG_DIUNDER
;
1361 srp_rsp
->data_in_res_cnt
= cpu_to_be32(resid
);
1363 } else if (cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
1364 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1365 /* residual data from an overflow write */
1366 srp_rsp
->flags
= SRP_RSP_FLAG_DOOVER
;
1367 srp_rsp
->data_out_res_cnt
= cpu_to_be32(resid
);
1368 } else if (cmd
->data_direction
== DMA_FROM_DEVICE
) {
1369 /* residual data from an overflow read */
1370 srp_rsp
->flags
= SRP_RSP_FLAG_DIOVER
;
1371 srp_rsp
->data_in_res_cnt
= cpu_to_be32(resid
);
1375 if (sense_data_len
) {
1376 BUILD_BUG_ON(MIN_MAX_RSP_SIZE
<= sizeof(*srp_rsp
));
1377 max_sense_len
= ch
->max_ti_iu_len
- sizeof(*srp_rsp
);
1378 if (sense_data_len
> max_sense_len
) {
1379 pr_warn("truncated sense data from %d to %d"
1380 " bytes\n", sense_data_len
, max_sense_len
);
1381 sense_data_len
= max_sense_len
;
1384 srp_rsp
->flags
|= SRP_RSP_FLAG_SNSVALID
;
1385 srp_rsp
->sense_data_len
= cpu_to_be32(sense_data_len
);
1386 memcpy(srp_rsp
+ 1, sense_data
, sense_data_len
);
1389 return sizeof(*srp_rsp
) + sense_data_len
;
1393 * srpt_build_tskmgmt_rsp - build a task management response
1394 * @ch: RDMA channel through which the request has been received.
1395 * @ioctx: I/O context in which the SRP_RSP response will be built.
1396 * @rsp_code: RSP_CODE that will be stored in the response.
1397 * @tag: Tag of the request for which this response is being generated.
1399 * Returns the size in bytes of the SRP_RSP response.
1401 * An SRP_RSP response contains a SCSI status or service response. See also
1402 * section 6.9 in the SRP r16a document for the format of an SRP_RSP
1405 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch
*ch
,
1406 struct srpt_send_ioctx
*ioctx
,
1407 u8 rsp_code
, u64 tag
)
1409 struct srp_rsp
*srp_rsp
;
1414 resp_len
= sizeof(*srp_rsp
) + resp_data_len
;
1416 srp_rsp
= ioctx
->ioctx
.buf
;
1418 memset(srp_rsp
, 0, sizeof(*srp_rsp
));
1420 srp_rsp
->opcode
= SRP_RSP
;
1421 srp_rsp
->req_lim_delta
=
1422 cpu_to_be32(1 + atomic_xchg(&ch
->req_lim_delta
, 0));
1425 srp_rsp
->flags
|= SRP_RSP_FLAG_RSPVALID
;
1426 srp_rsp
->resp_data_len
= cpu_to_be32(resp_data_len
);
1427 srp_rsp
->data
[3] = rsp_code
;
1432 static int srpt_check_stop_free(struct se_cmd
*cmd
)
1434 struct srpt_send_ioctx
*ioctx
= container_of(cmd
,
1435 struct srpt_send_ioctx
, cmd
);
1437 return target_put_sess_cmd(&ioctx
->cmd
);
1441 * srpt_handle_cmd - process a SRP_CMD information unit
1442 * @ch: SRPT RDMA channel.
1443 * @recv_ioctx: Receive I/O context.
1444 * @send_ioctx: Send I/O context.
1446 static void srpt_handle_cmd(struct srpt_rdma_ch
*ch
,
1447 struct srpt_recv_ioctx
*recv_ioctx
,
1448 struct srpt_send_ioctx
*send_ioctx
)
1451 struct srp_cmd
*srp_cmd
;
1452 struct scatterlist
*sg
= NULL
;
1453 unsigned sg_cnt
= 0;
1455 enum dma_data_direction dir
;
1458 BUG_ON(!send_ioctx
);
1460 srp_cmd
= recv_ioctx
->ioctx
.buf
;
1461 cmd
= &send_ioctx
->cmd
;
1462 cmd
->tag
= srp_cmd
->tag
;
1464 switch (srp_cmd
->task_attr
) {
1465 case SRP_CMD_SIMPLE_Q
:
1466 cmd
->sam_task_attr
= TCM_SIMPLE_TAG
;
1468 case SRP_CMD_ORDERED_Q
:
1470 cmd
->sam_task_attr
= TCM_ORDERED_TAG
;
1472 case SRP_CMD_HEAD_OF_Q
:
1473 cmd
->sam_task_attr
= TCM_HEAD_TAG
;
1476 cmd
->sam_task_attr
= TCM_ACA_TAG
;
1480 rc
= srpt_get_desc_tbl(send_ioctx
, srp_cmd
, &dir
, &sg
, &sg_cnt
,
1483 if (rc
!= -EAGAIN
) {
1484 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1490 rc
= target_submit_cmd_map_sgls(cmd
, ch
->sess
, srp_cmd
->cdb
,
1491 &send_ioctx
->sense_data
[0],
1492 scsilun_to_int(&srp_cmd
->lun
), data_len
,
1493 TCM_SIMPLE_TAG
, dir
, TARGET_SCF_ACK_KREF
,
1494 sg
, sg_cnt
, NULL
, 0, NULL
, 0);
1496 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc
,
1503 send_ioctx
->state
= SRPT_STATE_DONE
;
1504 srpt_release_cmd(cmd
);
1507 static int srp_tmr_to_tcm(int fn
)
1510 case SRP_TSK_ABORT_TASK
:
1511 return TMR_ABORT_TASK
;
1512 case SRP_TSK_ABORT_TASK_SET
:
1513 return TMR_ABORT_TASK_SET
;
1514 case SRP_TSK_CLEAR_TASK_SET
:
1515 return TMR_CLEAR_TASK_SET
;
1516 case SRP_TSK_LUN_RESET
:
1517 return TMR_LUN_RESET
;
1518 case SRP_TSK_CLEAR_ACA
:
1519 return TMR_CLEAR_ACA
;
1526 * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
1527 * @ch: SRPT RDMA channel.
1528 * @recv_ioctx: Receive I/O context.
1529 * @send_ioctx: Send I/O context.
1531 * Returns 0 if and only if the request will be processed by the target core.
1533 * For more information about SRP_TSK_MGMT information units, see also section
1534 * 6.7 in the SRP r16a document.
1536 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch
*ch
,
1537 struct srpt_recv_ioctx
*recv_ioctx
,
1538 struct srpt_send_ioctx
*send_ioctx
)
1540 struct srp_tsk_mgmt
*srp_tsk
;
1542 struct se_session
*sess
= ch
->sess
;
1546 BUG_ON(!send_ioctx
);
1548 srp_tsk
= recv_ioctx
->ioctx
.buf
;
1549 cmd
= &send_ioctx
->cmd
;
1551 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
1552 srp_tsk
->tsk_mgmt_func
, srp_tsk
->task_tag
, srp_tsk
->tag
, ch
,
1555 srpt_set_cmd_state(send_ioctx
, SRPT_STATE_MGMT
);
1556 send_ioctx
->cmd
.tag
= srp_tsk
->tag
;
1557 tcm_tmr
= srp_tmr_to_tcm(srp_tsk
->tsk_mgmt_func
);
1558 rc
= target_submit_tmr(&send_ioctx
->cmd
, sess
, NULL
,
1559 scsilun_to_int(&srp_tsk
->lun
), srp_tsk
, tcm_tmr
,
1560 GFP_KERNEL
, srp_tsk
->task_tag
,
1561 TARGET_SCF_ACK_KREF
);
1563 send_ioctx
->cmd
.se_tmr_req
->response
= TMR_FUNCTION_REJECTED
;
1568 transport_send_check_condition_and_sense(cmd
, 0, 0); // XXX:
1572 * srpt_handle_new_iu - process a newly received information unit
1573 * @ch: RDMA channel through which the information unit has been received.
1574 * @recv_ioctx: Receive I/O context associated with the information unit.
1577 srpt_handle_new_iu(struct srpt_rdma_ch
*ch
, struct srpt_recv_ioctx
*recv_ioctx
)
1579 struct srpt_send_ioctx
*send_ioctx
= NULL
;
1580 struct srp_cmd
*srp_cmd
;
1585 BUG_ON(!recv_ioctx
);
1587 if (unlikely(ch
->state
== CH_CONNECTING
))
1590 ib_dma_sync_single_for_cpu(ch
->sport
->sdev
->device
,
1591 recv_ioctx
->ioctx
.dma
, srp_max_req_size
,
1594 srp_cmd
= recv_ioctx
->ioctx
.buf
;
1595 opcode
= srp_cmd
->opcode
;
1596 if (opcode
== SRP_CMD
|| opcode
== SRP_TSK_MGMT
) {
1597 send_ioctx
= srpt_get_send_ioctx(ch
);
1598 if (unlikely(!send_ioctx
))
1602 if (!list_empty(&recv_ioctx
->wait_list
)) {
1603 WARN_ON_ONCE(!ch
->processing_wait_list
);
1604 list_del_init(&recv_ioctx
->wait_list
);
1609 srpt_handle_cmd(ch
, recv_ioctx
, send_ioctx
);
1612 srpt_handle_tsk_mgmt(ch
, recv_ioctx
, send_ioctx
);
1615 pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1618 pr_debug("received SRP_CRED_RSP\n");
1621 pr_debug("received SRP_AER_RSP\n");
1624 pr_err("Received SRP_RSP\n");
1627 pr_err("received IU with unknown opcode 0x%x\n", opcode
);
1631 srpt_post_recv(ch
->sport
->sdev
, ch
, recv_ioctx
);
1638 if (list_empty(&recv_ioctx
->wait_list
)) {
1639 WARN_ON_ONCE(ch
->processing_wait_list
);
1640 list_add_tail(&recv_ioctx
->wait_list
, &ch
->cmd_wait_list
);
1645 static void srpt_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1647 struct srpt_rdma_ch
*ch
= cq
->cq_context
;
1648 struct srpt_recv_ioctx
*ioctx
=
1649 container_of(wc
->wr_cqe
, struct srpt_recv_ioctx
, ioctx
.cqe
);
1651 if (wc
->status
== IB_WC_SUCCESS
) {
1654 req_lim
= atomic_dec_return(&ch
->req_lim
);
1655 if (unlikely(req_lim
< 0))
1656 pr_err("req_lim = %d < 0\n", req_lim
);
1657 srpt_handle_new_iu(ch
, ioctx
);
1659 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
1665 * This function must be called from the context in which RDMA completions are
1666 * processed because it accesses the wait list without protection against
1667 * access from other threads.
1669 static void srpt_process_wait_list(struct srpt_rdma_ch
*ch
)
1671 struct srpt_recv_ioctx
*recv_ioctx
, *tmp
;
1673 WARN_ON_ONCE(ch
->state
== CH_CONNECTING
);
1675 if (list_empty(&ch
->cmd_wait_list
))
1678 WARN_ON_ONCE(ch
->processing_wait_list
);
1679 ch
->processing_wait_list
= true;
1680 list_for_each_entry_safe(recv_ioctx
, tmp
, &ch
->cmd_wait_list
,
1682 if (!srpt_handle_new_iu(ch
, recv_ioctx
))
1685 ch
->processing_wait_list
= false;
1689 * srpt_send_done - send completion callback
1690 * @cq: Completion queue.
1691 * @wc: Work completion.
1693 * Note: Although this has not yet been observed during tests, at least in
1694 * theory it is possible that the srpt_get_send_ioctx() call invoked by
1695 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
1696 * value in each response is set to one, and it is possible that this response
1697 * makes the initiator send a new request before the send completion for that
1698 * response has been processed. This could e.g. happen if the call to
1699 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
1700 * if IB retransmission causes generation of the send completion to be
1701 * delayed. Incoming information units for which srpt_get_send_ioctx() fails
1702 * are queued on cmd_wait_list. The code below processes these delayed
1703 * requests one at a time.
1705 static void srpt_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1707 struct srpt_rdma_ch
*ch
= cq
->cq_context
;
1708 struct srpt_send_ioctx
*ioctx
=
1709 container_of(wc
->wr_cqe
, struct srpt_send_ioctx
, ioctx
.cqe
);
1710 enum srpt_command_state state
;
1712 state
= srpt_set_cmd_state(ioctx
, SRPT_STATE_DONE
);
1714 WARN_ON(state
!= SRPT_STATE_CMD_RSP_SENT
&&
1715 state
!= SRPT_STATE_MGMT_RSP_SENT
);
1717 atomic_add(1 + ioctx
->n_rdma
, &ch
->sq_wr_avail
);
1719 if (wc
->status
!= IB_WC_SUCCESS
)
1720 pr_info("sending response for ioctx 0x%p failed"
1721 " with status %d\n", ioctx
, wc
->status
);
1723 if (state
!= SRPT_STATE_DONE
) {
1724 transport_generic_free_cmd(&ioctx
->cmd
, 0);
1726 pr_err("IB completion has been received too late for"
1727 " wr_id = %u.\n", ioctx
->ioctx
.index
);
1730 srpt_process_wait_list(ch
);
1734 * srpt_create_ch_ib - create receive and send completion queues
1735 * @ch: SRPT RDMA channel.
1737 static int srpt_create_ch_ib(struct srpt_rdma_ch
*ch
)
1739 struct ib_qp_init_attr
*qp_init
;
1740 struct srpt_port
*sport
= ch
->sport
;
1741 struct srpt_device
*sdev
= sport
->sdev
;
1742 const struct ib_device_attr
*attrs
= &sdev
->device
->attrs
;
1743 int sq_size
= sport
->port_attrib
.srp_sq_size
;
1746 WARN_ON(ch
->rq_size
< 1);
1749 qp_init
= kzalloc(sizeof(*qp_init
), GFP_KERNEL
);
1754 ch
->cq
= ib_alloc_cq(sdev
->device
, ch
, ch
->rq_size
+ sq_size
,
1755 0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE
);
1756 if (IS_ERR(ch
->cq
)) {
1757 ret
= PTR_ERR(ch
->cq
);
1758 pr_err("failed to create CQ cqe= %d ret= %d\n",
1759 ch
->rq_size
+ sq_size
, ret
);
1763 qp_init
->qp_context
= (void *)ch
;
1764 qp_init
->event_handler
1765 = (void(*)(struct ib_event
*, void*))srpt_qp_event
;
1766 qp_init
->send_cq
= ch
->cq
;
1767 qp_init
->recv_cq
= ch
->cq
;
1768 qp_init
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
1769 qp_init
->qp_type
= IB_QPT_RC
;
1771 * We divide up our send queue size into half SEND WRs to send the
1772 * completions, and half R/W contexts to actually do the RDMA
1773 * READ/WRITE transfers. Note that we need to allocate CQ slots for
1774 * both both, as RDMA contexts will also post completions for the
1777 qp_init
->cap
.max_send_wr
= min(sq_size
/ 2, attrs
->max_qp_wr
);
1778 qp_init
->cap
.max_rdma_ctxs
= sq_size
/ 2;
1779 qp_init
->cap
.max_send_sge
= min(attrs
->max_send_sge
,
1780 SRPT_MAX_SG_PER_WQE
);
1781 qp_init
->port_num
= ch
->sport
->port
;
1782 if (sdev
->use_srq
) {
1783 qp_init
->srq
= sdev
->srq
;
1785 qp_init
->cap
.max_recv_wr
= ch
->rq_size
;
1786 qp_init
->cap
.max_recv_sge
= min(attrs
->max_recv_sge
,
1787 SRPT_MAX_SG_PER_WQE
);
1790 if (ch
->using_rdma_cm
) {
1791 ret
= rdma_create_qp(ch
->rdma_cm
.cm_id
, sdev
->pd
, qp_init
);
1792 ch
->qp
= ch
->rdma_cm
.cm_id
->qp
;
1794 ch
->qp
= ib_create_qp(sdev
->pd
, qp_init
);
1795 if (!IS_ERR(ch
->qp
)) {
1796 ret
= srpt_init_ch_qp(ch
, ch
->qp
);
1798 ib_destroy_qp(ch
->qp
);
1800 ret
= PTR_ERR(ch
->qp
);
1804 bool retry
= sq_size
> MIN_SRPT_SQ_SIZE
;
1807 pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
1810 sq_size
= max(sq_size
/ 2, MIN_SRPT_SQ_SIZE
);
1813 pr_err("failed to create queue pair with sq_size = %d (%d)\n",
1815 goto err_destroy_cq
;
1819 atomic_set(&ch
->sq_wr_avail
, qp_init
->cap
.max_send_wr
);
1821 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
1822 __func__
, ch
->cq
->cqe
, qp_init
->cap
.max_send_sge
,
1823 qp_init
->cap
.max_send_wr
, ch
);
1826 for (i
= 0; i
< ch
->rq_size
; i
++)
1827 srpt_post_recv(sdev
, ch
, ch
->ioctx_recv_ring
[i
]);
1839 static void srpt_destroy_ch_ib(struct srpt_rdma_ch
*ch
)
1841 ib_destroy_qp(ch
->qp
);
1846 * srpt_close_ch - close a RDMA channel
1847 * @ch: SRPT RDMA channel.
1849 * Make sure all resources associated with the channel will be deallocated at
1850 * an appropriate time.
1852 * Returns true if and only if the channel state has been modified into
1855 static bool srpt_close_ch(struct srpt_rdma_ch
*ch
)
1859 if (!srpt_set_ch_state(ch
, CH_DRAINING
)) {
1860 pr_debug("%s: already closed\n", ch
->sess_name
);
1864 kref_get(&ch
->kref
);
1866 ret
= srpt_ch_qp_err(ch
);
1868 pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1869 ch
->sess_name
, ch
->qp
->qp_num
, ret
);
1871 ret
= srpt_zerolength_write(ch
);
1873 pr_err("%s-%d: queuing zero-length write failed: %d\n",
1874 ch
->sess_name
, ch
->qp
->qp_num
, ret
);
1875 if (srpt_set_ch_state(ch
, CH_DISCONNECTED
))
1876 schedule_work(&ch
->release_work
);
1881 kref_put(&ch
->kref
, srpt_free_ch
);
1887 * Change the channel state into CH_DISCONNECTING. If a channel has not yet
1888 * reached the connected state, close it. If a channel is in the connected
1889 * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
1890 * the responsibility of the caller to ensure that this function is not
1891 * invoked concurrently with the code that accepts a connection. This means
1892 * that this function must either be invoked from inside a CM callback
1893 * function or that it must be invoked with the srpt_port.mutex held.
1895 static int srpt_disconnect_ch(struct srpt_rdma_ch
*ch
)
1899 if (!srpt_set_ch_state(ch
, CH_DISCONNECTING
))
1902 if (ch
->using_rdma_cm
) {
1903 ret
= rdma_disconnect(ch
->rdma_cm
.cm_id
);
1905 ret
= ib_send_cm_dreq(ch
->ib_cm
.cm_id
, NULL
, 0);
1907 ret
= ib_send_cm_drep(ch
->ib_cm
.cm_id
, NULL
, 0);
1910 if (ret
< 0 && srpt_close_ch(ch
))
1916 static bool srpt_ch_closed(struct srpt_port
*sport
, struct srpt_rdma_ch
*ch
)
1918 struct srpt_nexus
*nexus
;
1919 struct srpt_rdma_ch
*ch2
;
1923 list_for_each_entry(nexus
, &sport
->nexus_list
, entry
) {
1924 list_for_each_entry(ch2
, &nexus
->ch_list
, list
) {
1937 /* Send DREQ and wait for DREP. */
1938 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch
*ch
)
1940 struct srpt_port
*sport
= ch
->sport
;
1942 pr_debug("ch %s-%d state %d\n", ch
->sess_name
, ch
->qp
->qp_num
,
1945 mutex_lock(&sport
->mutex
);
1946 srpt_disconnect_ch(ch
);
1947 mutex_unlock(&sport
->mutex
);
1949 while (wait_event_timeout(sport
->ch_releaseQ
, srpt_ch_closed(sport
, ch
),
1951 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__
,
1952 ch
->sess_name
, ch
->qp
->qp_num
, ch
->state
);
1956 static void __srpt_close_all_ch(struct srpt_port
*sport
)
1958 struct srpt_nexus
*nexus
;
1959 struct srpt_rdma_ch
*ch
;
1961 lockdep_assert_held(&sport
->mutex
);
1963 list_for_each_entry(nexus
, &sport
->nexus_list
, entry
) {
1964 list_for_each_entry(ch
, &nexus
->ch_list
, list
) {
1965 if (srpt_disconnect_ch(ch
) >= 0)
1966 pr_info("Closing channel %s because target %s_%d has been disabled\n",
1968 sport
->sdev
->device
->name
, sport
->port
);
1975 * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
1976 * it does not yet exist.
1978 static struct srpt_nexus
*srpt_get_nexus(struct srpt_port
*sport
,
1979 const u8 i_port_id
[16],
1980 const u8 t_port_id
[16])
1982 struct srpt_nexus
*nexus
= NULL
, *tmp_nexus
= NULL
, *n
;
1985 mutex_lock(&sport
->mutex
);
1986 list_for_each_entry(n
, &sport
->nexus_list
, entry
) {
1987 if (memcmp(n
->i_port_id
, i_port_id
, 16) == 0 &&
1988 memcmp(n
->t_port_id
, t_port_id
, 16) == 0) {
1993 if (!nexus
&& tmp_nexus
) {
1994 list_add_tail_rcu(&tmp_nexus
->entry
,
1995 &sport
->nexus_list
);
1996 swap(nexus
, tmp_nexus
);
1998 mutex_unlock(&sport
->mutex
);
2002 tmp_nexus
= kzalloc(sizeof(*nexus
), GFP_KERNEL
);
2004 nexus
= ERR_PTR(-ENOMEM
);
2007 INIT_LIST_HEAD(&tmp_nexus
->ch_list
);
2008 memcpy(tmp_nexus
->i_port_id
, i_port_id
, 16);
2009 memcpy(tmp_nexus
->t_port_id
, t_port_id
, 16);
2017 static void srpt_set_enabled(struct srpt_port
*sport
, bool enabled
)
2018 __must_hold(&sport
->mutex
)
2020 lockdep_assert_held(&sport
->mutex
);
2022 if (sport
->enabled
== enabled
)
2024 sport
->enabled
= enabled
;
2026 __srpt_close_all_ch(sport
);
2029 static void srpt_free_ch(struct kref
*kref
)
2031 struct srpt_rdma_ch
*ch
= container_of(kref
, struct srpt_rdma_ch
, kref
);
2037 * Shut down the SCSI target session, tell the connection manager to
2038 * disconnect the associated RDMA channel, transition the QP to the error
2039 * state and remove the channel from the channel list. This function is
2040 * typically called from inside srpt_zerolength_write_done(). Concurrent
2041 * srpt_zerolength_write() calls from inside srpt_close_ch() are possible
2042 * as long as the channel is on sport->nexus_list.
2044 static void srpt_release_channel_work(struct work_struct
*w
)
2046 struct srpt_rdma_ch
*ch
;
2047 struct srpt_device
*sdev
;
2048 struct srpt_port
*sport
;
2049 struct se_session
*se_sess
;
2051 ch
= container_of(w
, struct srpt_rdma_ch
, release_work
);
2052 pr_debug("%s-%d\n", ch
->sess_name
, ch
->qp
->qp_num
);
2054 sdev
= ch
->sport
->sdev
;
2060 target_sess_cmd_list_set_waiting(se_sess
);
2061 target_wait_for_sess_cmds(se_sess
);
2063 target_remove_session(se_sess
);
2066 if (ch
->using_rdma_cm
)
2067 rdma_destroy_id(ch
->rdma_cm
.cm_id
);
2069 ib_destroy_cm_id(ch
->ib_cm
.cm_id
);
2072 mutex_lock(&sport
->mutex
);
2073 list_del_rcu(&ch
->list
);
2074 mutex_unlock(&sport
->mutex
);
2076 srpt_destroy_ch_ib(ch
);
2078 srpt_free_ioctx_ring((struct srpt_ioctx
**)ch
->ioctx_ring
,
2079 ch
->sport
->sdev
, ch
->rq_size
,
2080 ch
->max_rsp_size
, DMA_TO_DEVICE
);
2082 srpt_free_ioctx_ring((struct srpt_ioctx
**)ch
->ioctx_recv_ring
,
2084 srp_max_req_size
, DMA_FROM_DEVICE
);
2086 wake_up(&sport
->ch_releaseQ
);
2088 kref_put(&ch
->kref
, srpt_free_ch
);
2092 * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
2093 * @sdev: HCA through which the login request was received.
2094 * @ib_cm_id: IB/CM connection identifier in case of IB/CM.
2095 * @rdma_cm_id: RDMA/CM connection identifier in case of RDMA/CM.
2096 * @port_num: Port through which the REQ message was received.
2097 * @pkey: P_Key of the incoming connection.
2098 * @req: SRP login request.
2099 * @src_addr: GID (IB/CM) or IP address (RDMA/CM) of the port that submitted
2100 * the login request.
2102 * Ownership of the cm_id is transferred to the target session if this
2103 * function returns zero. Otherwise the caller remains the owner of cm_id.
2105 static int srpt_cm_req_recv(struct srpt_device
*const sdev
,
2106 struct ib_cm_id
*ib_cm_id
,
2107 struct rdma_cm_id
*rdma_cm_id
,
2108 u8 port_num
, __be16 pkey
,
2109 const struct srp_login_req
*req
,
2110 const char *src_addr
)
2112 struct srpt_port
*sport
= &sdev
->port
[port_num
- 1];
2113 struct srpt_nexus
*nexus
;
2114 struct srp_login_rsp
*rsp
= NULL
;
2115 struct srp_login_rej
*rej
= NULL
;
2117 struct rdma_conn_param rdma_cm
;
2118 struct ib_cm_rep_param ib_cm
;
2119 } *rep_param
= NULL
;
2120 struct srpt_rdma_ch
*ch
= NULL
;
2125 WARN_ON_ONCE(irqs_disabled());
2127 if (WARN_ON(!sdev
|| !req
))
2130 it_iu_len
= be32_to_cpu(req
->req_it_iu_len
);
2132 pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
2133 req
->initiator_port_id
, req
->target_port_id
, it_iu_len
,
2134 port_num
, &sport
->gid
, be16_to_cpu(pkey
));
2136 nexus
= srpt_get_nexus(sport
, req
->initiator_port_id
,
2137 req
->target_port_id
);
2138 if (IS_ERR(nexus
)) {
2139 ret
= PTR_ERR(nexus
);
2144 rsp
= kzalloc(sizeof(*rsp
), GFP_KERNEL
);
2145 rej
= kzalloc(sizeof(*rej
), GFP_KERNEL
);
2146 rep_param
= kzalloc(sizeof(*rep_param
), GFP_KERNEL
);
2147 if (!rsp
|| !rej
|| !rep_param
)
2151 if (it_iu_len
> srp_max_req_size
|| it_iu_len
< 64) {
2152 rej
->reason
= cpu_to_be32(
2153 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
);
2154 pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
2155 it_iu_len
, 64, srp_max_req_size
);
2159 if (!sport
->enabled
) {
2160 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2161 pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2162 sport
->sdev
->device
->name
, port_num
);
2166 if (*(__be64
*)req
->target_port_id
!= cpu_to_be64(srpt_service_guid
)
2167 || *(__be64
*)(req
->target_port_id
+ 8) !=
2168 cpu_to_be64(srpt_service_guid
)) {
2169 rej
->reason
= cpu_to_be32(
2170 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL
);
2171 pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
2176 ch
= kzalloc(sizeof(*ch
), GFP_KERNEL
);
2178 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2179 pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
2183 kref_init(&ch
->kref
);
2184 ch
->pkey
= be16_to_cpu(pkey
);
2186 ch
->zw_cqe
.done
= srpt_zerolength_write_done
;
2187 INIT_WORK(&ch
->release_work
, srpt_release_channel_work
);
2190 ch
->ib_cm
.cm_id
= ib_cm_id
;
2191 ib_cm_id
->context
= ch
;
2193 ch
->using_rdma_cm
= true;
2194 ch
->rdma_cm
.cm_id
= rdma_cm_id
;
2195 rdma_cm_id
->context
= ch
;
2198 * ch->rq_size should be at least as large as the initiator queue
2199 * depth to avoid that the initiator driver has to report QUEUE_FULL
2200 * to the SCSI mid-layer.
2202 ch
->rq_size
= min(MAX_SRPT_RQ_SIZE
, sdev
->device
->attrs
.max_qp_wr
);
2203 spin_lock_init(&ch
->spinlock
);
2204 ch
->state
= CH_CONNECTING
;
2205 INIT_LIST_HEAD(&ch
->cmd_wait_list
);
2206 ch
->max_rsp_size
= ch
->sport
->port_attrib
.srp_max_rsp_size
;
2208 ch
->ioctx_ring
= (struct srpt_send_ioctx
**)
2209 srpt_alloc_ioctx_ring(ch
->sport
->sdev
, ch
->rq_size
,
2210 sizeof(*ch
->ioctx_ring
[0]),
2211 ch
->max_rsp_size
, DMA_TO_DEVICE
);
2212 if (!ch
->ioctx_ring
) {
2213 pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
2214 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2218 INIT_LIST_HEAD(&ch
->free_list
);
2219 for (i
= 0; i
< ch
->rq_size
; i
++) {
2220 ch
->ioctx_ring
[i
]->ch
= ch
;
2221 list_add_tail(&ch
->ioctx_ring
[i
]->free_list
, &ch
->free_list
);
2223 if (!sdev
->use_srq
) {
2224 ch
->ioctx_recv_ring
= (struct srpt_recv_ioctx
**)
2225 srpt_alloc_ioctx_ring(ch
->sport
->sdev
, ch
->rq_size
,
2226 sizeof(*ch
->ioctx_recv_ring
[0]),
2229 if (!ch
->ioctx_recv_ring
) {
2230 pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
2232 cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2235 for (i
= 0; i
< ch
->rq_size
; i
++)
2236 INIT_LIST_HEAD(&ch
->ioctx_recv_ring
[i
]->wait_list
);
2239 ret
= srpt_create_ch_ib(ch
);
2241 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2242 pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
2243 goto free_recv_ring
;
2246 strlcpy(ch
->sess_name
, src_addr
, sizeof(ch
->sess_name
));
2247 snprintf(i_port_id
, sizeof(i_port_id
), "0x%016llx%016llx",
2248 be64_to_cpu(*(__be64
*)nexus
->i_port_id
),
2249 be64_to_cpu(*(__be64
*)(nexus
->i_port_id
+ 8)));
2251 pr_debug("registering session %s\n", ch
->sess_name
);
2253 if (sport
->port_guid_tpg
.se_tpg_wwn
)
2254 ch
->sess
= target_setup_session(&sport
->port_guid_tpg
, 0, 0,
2256 ch
->sess_name
, ch
, NULL
);
2257 if (sport
->port_gid_tpg
.se_tpg_wwn
&& IS_ERR_OR_NULL(ch
->sess
))
2258 ch
->sess
= target_setup_session(&sport
->port_gid_tpg
, 0, 0,
2259 TARGET_PROT_NORMAL
, i_port_id
, ch
,
2261 /* Retry without leading "0x" */
2262 if (sport
->port_gid_tpg
.se_tpg_wwn
&& IS_ERR_OR_NULL(ch
->sess
))
2263 ch
->sess
= target_setup_session(&sport
->port_gid_tpg
, 0, 0,
2265 i_port_id
+ 2, ch
, NULL
);
2266 if (IS_ERR_OR_NULL(ch
->sess
)) {
2267 WARN_ON_ONCE(ch
->sess
== NULL
);
2268 ret
= PTR_ERR(ch
->sess
);
2270 pr_info("Rejected login for initiator %s: ret = %d.\n",
2271 ch
->sess_name
, ret
);
2272 rej
->reason
= cpu_to_be32(ret
== -ENOMEM
?
2273 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
:
2274 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED
);
2278 mutex_lock(&sport
->mutex
);
2280 if ((req
->req_flags
& SRP_MTCH_ACTION
) == SRP_MULTICHAN_SINGLE
) {
2281 struct srpt_rdma_ch
*ch2
;
2283 rsp
->rsp_flags
= SRP_LOGIN_RSP_MULTICHAN_NO_CHAN
;
2285 list_for_each_entry(ch2
, &nexus
->ch_list
, list
) {
2286 if (srpt_disconnect_ch(ch2
) < 0)
2288 pr_info("Relogin - closed existing channel %s\n",
2290 rsp
->rsp_flags
= SRP_LOGIN_RSP_MULTICHAN_TERMINATED
;
2293 rsp
->rsp_flags
= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED
;
2296 list_add_tail_rcu(&ch
->list
, &nexus
->ch_list
);
2298 if (!sport
->enabled
) {
2299 rej
->reason
= cpu_to_be32(
2300 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2301 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2302 sdev
->device
->name
, port_num
);
2303 mutex_unlock(&sport
->mutex
);
2307 mutex_unlock(&sport
->mutex
);
2309 ret
= ch
->using_rdma_cm
? 0 : srpt_ch_qp_rtr(ch
, ch
->qp
);
2311 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2312 pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
2317 pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch
->sess
,
2320 /* create srp_login_response */
2321 rsp
->opcode
= SRP_LOGIN_RSP
;
2322 rsp
->tag
= req
->tag
;
2323 rsp
->max_it_iu_len
= req
->req_it_iu_len
;
2324 rsp
->max_ti_iu_len
= req
->req_it_iu_len
;
2325 ch
->max_ti_iu_len
= it_iu_len
;
2326 rsp
->buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
2327 SRP_BUF_FORMAT_INDIRECT
);
2328 rsp
->req_lim_delta
= cpu_to_be32(ch
->rq_size
);
2329 atomic_set(&ch
->req_lim
, ch
->rq_size
);
2330 atomic_set(&ch
->req_lim_delta
, 0);
2332 /* create cm reply */
2333 if (ch
->using_rdma_cm
) {
2334 rep_param
->rdma_cm
.private_data
= (void *)rsp
;
2335 rep_param
->rdma_cm
.private_data_len
= sizeof(*rsp
);
2336 rep_param
->rdma_cm
.rnr_retry_count
= 7;
2337 rep_param
->rdma_cm
.flow_control
= 1;
2338 rep_param
->rdma_cm
.responder_resources
= 4;
2339 rep_param
->rdma_cm
.initiator_depth
= 4;
2341 rep_param
->ib_cm
.qp_num
= ch
->qp
->qp_num
;
2342 rep_param
->ib_cm
.private_data
= (void *)rsp
;
2343 rep_param
->ib_cm
.private_data_len
= sizeof(*rsp
);
2344 rep_param
->ib_cm
.rnr_retry_count
= 7;
2345 rep_param
->ib_cm
.flow_control
= 1;
2346 rep_param
->ib_cm
.failover_accepted
= 0;
2347 rep_param
->ib_cm
.srq
= 1;
2348 rep_param
->ib_cm
.responder_resources
= 4;
2349 rep_param
->ib_cm
.initiator_depth
= 4;
2353 * Hold the sport mutex while accepting a connection to avoid that
2354 * srpt_disconnect_ch() is invoked concurrently with this code.
2356 mutex_lock(&sport
->mutex
);
2357 if (sport
->enabled
&& ch
->state
== CH_CONNECTING
) {
2358 if (ch
->using_rdma_cm
)
2359 ret
= rdma_accept(rdma_cm_id
, &rep_param
->rdma_cm
);
2361 ret
= ib_send_cm_rep(ib_cm_id
, &rep_param
->ib_cm
);
2365 mutex_unlock(&sport
->mutex
);
2373 rej
->reason
= cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES
);
2374 pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
2382 srpt_destroy_ch_ib(ch
);
2385 srpt_free_ioctx_ring((struct srpt_ioctx
**)ch
->ioctx_recv_ring
,
2386 ch
->sport
->sdev
, ch
->rq_size
,
2387 srp_max_req_size
, DMA_FROM_DEVICE
);
2390 srpt_free_ioctx_ring((struct srpt_ioctx
**)ch
->ioctx_ring
,
2391 ch
->sport
->sdev
, ch
->rq_size
,
2392 ch
->max_rsp_size
, DMA_TO_DEVICE
);
2396 rdma_cm_id
->context
= NULL
;
2398 ib_cm_id
->context
= NULL
;
2402 WARN_ON_ONCE(ret
== 0);
2405 pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej
->reason
));
2406 rej
->opcode
= SRP_LOGIN_REJ
;
2407 rej
->tag
= req
->tag
;
2408 rej
->buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
2409 SRP_BUF_FORMAT_INDIRECT
);
2412 rdma_reject(rdma_cm_id
, rej
, sizeof(*rej
));
2414 ib_send_cm_rej(ib_cm_id
, IB_CM_REJ_CONSUMER_DEFINED
, NULL
, 0,
2417 if (ch
&& ch
->sess
) {
2420 * Tell the caller not to free cm_id since
2421 * srpt_release_channel_work() will do that.
2434 static int srpt_ib_cm_req_recv(struct ib_cm_id
*cm_id
,
2435 const struct ib_cm_req_event_param
*param
,
2440 srpt_format_guid(sguid
, sizeof(sguid
),
2441 ¶m
->primary_path
->dgid
.global
.interface_id
);
2443 return srpt_cm_req_recv(cm_id
->context
, cm_id
, NULL
, param
->port
,
2444 param
->primary_path
->pkey
,
2445 private_data
, sguid
);
2448 static int srpt_rdma_cm_req_recv(struct rdma_cm_id
*cm_id
,
2449 struct rdma_cm_event
*event
)
2451 struct srpt_device
*sdev
;
2452 struct srp_login_req req
;
2453 const struct srp_login_req_rdma
*req_rdma
;
2456 sdev
= ib_get_client_data(cm_id
->device
, &srpt_client
);
2458 return -ECONNREFUSED
;
2460 if (event
->param
.conn
.private_data_len
< sizeof(*req_rdma
))
2463 /* Transform srp_login_req_rdma into srp_login_req. */
2464 req_rdma
= event
->param
.conn
.private_data
;
2465 memset(&req
, 0, sizeof(req
));
2466 req
.opcode
= req_rdma
->opcode
;
2467 req
.tag
= req_rdma
->tag
;
2468 req
.req_it_iu_len
= req_rdma
->req_it_iu_len
;
2469 req
.req_buf_fmt
= req_rdma
->req_buf_fmt
;
2470 req
.req_flags
= req_rdma
->req_flags
;
2471 memcpy(req
.initiator_port_id
, req_rdma
->initiator_port_id
, 16);
2472 memcpy(req
.target_port_id
, req_rdma
->target_port_id
, 16);
2474 snprintf(src_addr
, sizeof(src_addr
), "%pIS",
2475 &cm_id
->route
.addr
.src_addr
);
2477 return srpt_cm_req_recv(sdev
, NULL
, cm_id
, cm_id
->port_num
,
2478 cm_id
->route
.path_rec
->pkey
, &req
, src_addr
);
2481 static void srpt_cm_rej_recv(struct srpt_rdma_ch
*ch
,
2482 enum ib_cm_rej_reason reason
,
2483 const u8
*private_data
,
2484 u8 private_data_len
)
2489 if (private_data_len
&& (priv
= kmalloc(private_data_len
* 3 + 1,
2491 for (i
= 0; i
< private_data_len
; i
++)
2492 sprintf(priv
+ 3 * i
, " %02x", private_data
[i
]);
2494 pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2495 ch
->sess_name
, ch
->qp
->qp_num
, reason
, private_data_len
?
2496 "; private data" : "", priv
? priv
: " (?)");
2501 * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
2502 * @ch: SRPT RDMA channel.
2504 * An RTU (ready to use) message indicates that the connection has been
2505 * established and that the recipient may begin transmitting.
2507 static void srpt_cm_rtu_recv(struct srpt_rdma_ch
*ch
)
2511 ret
= ch
->using_rdma_cm
? 0 : srpt_ch_qp_rts(ch
, ch
->qp
);
2513 pr_err("%s-%d: QP transition to RTS failed\n", ch
->sess_name
,
2520 * Note: calling srpt_close_ch() if the transition to the LIVE state
2521 * fails is not necessary since that means that that function has
2522 * already been invoked from another thread.
2524 if (!srpt_set_ch_state(ch
, CH_LIVE
)) {
2525 pr_err("%s-%d: channel transition to LIVE state failed\n",
2526 ch
->sess_name
, ch
->qp
->qp_num
);
2530 /* Trigger wait list processing. */
2531 ret
= srpt_zerolength_write(ch
);
2532 WARN_ONCE(ret
< 0, "%d\n", ret
);
2536 * srpt_cm_handler - IB connection manager callback function
2537 * @cm_id: IB/CM connection identifier.
2538 * @event: IB/CM event.
2540 * A non-zero return value will cause the caller destroy the CM ID.
2542 * Note: srpt_cm_handler() must only return a non-zero value when transferring
2543 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
2544 * a non-zero value in any other case will trigger a race with the
2545 * ib_destroy_cm_id() call in srpt_release_channel().
2547 static int srpt_cm_handler(struct ib_cm_id
*cm_id
,
2548 const struct ib_cm_event
*event
)
2550 struct srpt_rdma_ch
*ch
= cm_id
->context
;
2554 switch (event
->event
) {
2555 case IB_CM_REQ_RECEIVED
:
2556 ret
= srpt_ib_cm_req_recv(cm_id
, &event
->param
.req_rcvd
,
2557 event
->private_data
);
2559 case IB_CM_REJ_RECEIVED
:
2560 srpt_cm_rej_recv(ch
, event
->param
.rej_rcvd
.reason
,
2561 event
->private_data
,
2562 IB_CM_REJ_PRIVATE_DATA_SIZE
);
2564 case IB_CM_RTU_RECEIVED
:
2565 case IB_CM_USER_ESTABLISHED
:
2566 srpt_cm_rtu_recv(ch
);
2568 case IB_CM_DREQ_RECEIVED
:
2569 srpt_disconnect_ch(ch
);
2571 case IB_CM_DREP_RECEIVED
:
2572 pr_info("Received CM DREP message for ch %s-%d.\n",
2573 ch
->sess_name
, ch
->qp
->qp_num
);
2576 case IB_CM_TIMEWAIT_EXIT
:
2577 pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2578 ch
->sess_name
, ch
->qp
->qp_num
);
2581 case IB_CM_REP_ERROR
:
2582 pr_info("Received CM REP error for ch %s-%d.\n", ch
->sess_name
,
2585 case IB_CM_DREQ_ERROR
:
2586 pr_info("Received CM DREQ ERROR event.\n");
2588 case IB_CM_MRA_RECEIVED
:
2589 pr_info("Received CM MRA event\n");
2592 pr_err("received unrecognized CM event %d\n", event
->event
);
2599 static int srpt_rdma_cm_handler(struct rdma_cm_id
*cm_id
,
2600 struct rdma_cm_event
*event
)
2602 struct srpt_rdma_ch
*ch
= cm_id
->context
;
2605 switch (event
->event
) {
2606 case RDMA_CM_EVENT_CONNECT_REQUEST
:
2607 ret
= srpt_rdma_cm_req_recv(cm_id
, event
);
2609 case RDMA_CM_EVENT_REJECTED
:
2610 srpt_cm_rej_recv(ch
, event
->status
,
2611 event
->param
.conn
.private_data
,
2612 event
->param
.conn
.private_data_len
);
2614 case RDMA_CM_EVENT_ESTABLISHED
:
2615 srpt_cm_rtu_recv(ch
);
2617 case RDMA_CM_EVENT_DISCONNECTED
:
2618 if (ch
->state
< CH_DISCONNECTING
)
2619 srpt_disconnect_ch(ch
);
2623 case RDMA_CM_EVENT_TIMEWAIT_EXIT
:
2626 case RDMA_CM_EVENT_UNREACHABLE
:
2627 pr_info("Received CM REP error for ch %s-%d.\n", ch
->sess_name
,
2630 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
2631 case RDMA_CM_EVENT_ADDR_CHANGE
:
2634 pr_err("received unrecognized RDMA CM event %d\n",
2642 static int srpt_write_pending_status(struct se_cmd
*se_cmd
)
2644 struct srpt_send_ioctx
*ioctx
;
2646 ioctx
= container_of(se_cmd
, struct srpt_send_ioctx
, cmd
);
2647 return ioctx
->state
== SRPT_STATE_NEED_DATA
;
2651 * srpt_write_pending - Start data transfer from initiator to target (write).
2653 static int srpt_write_pending(struct se_cmd
*se_cmd
)
2655 struct srpt_send_ioctx
*ioctx
=
2656 container_of(se_cmd
, struct srpt_send_ioctx
, cmd
);
2657 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
2658 struct ib_send_wr
*first_wr
= NULL
;
2659 struct ib_cqe
*cqe
= &ioctx
->rdma_cqe
;
2660 enum srpt_command_state new_state
;
2663 new_state
= srpt_set_cmd_state(ioctx
, SRPT_STATE_NEED_DATA
);
2664 WARN_ON(new_state
== SRPT_STATE_DONE
);
2666 if (atomic_sub_return(ioctx
->n_rdma
, &ch
->sq_wr_avail
) < 0) {
2667 pr_warn("%s: IB send queue full (needed %d)\n",
2668 __func__
, ioctx
->n_rdma
);
2673 cqe
->done
= srpt_rdma_read_done
;
2674 for (i
= ioctx
->n_rw_ctx
- 1; i
>= 0; i
--) {
2675 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
2677 first_wr
= rdma_rw_ctx_wrs(&ctx
->rw
, ch
->qp
, ch
->sport
->port
,
2682 ret
= ib_post_send(ch
->qp
, first_wr
, NULL
);
2684 pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
2685 __func__
, ret
, ioctx
->n_rdma
,
2686 atomic_read(&ch
->sq_wr_avail
));
2692 atomic_add(ioctx
->n_rdma
, &ch
->sq_wr_avail
);
2696 static u8
tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status
)
2698 switch (tcm_mgmt_status
) {
2699 case TMR_FUNCTION_COMPLETE
:
2700 return SRP_TSK_MGMT_SUCCESS
;
2701 case TMR_FUNCTION_REJECTED
:
2702 return SRP_TSK_MGMT_FUNC_NOT_SUPP
;
2704 return SRP_TSK_MGMT_FAILED
;
2708 * srpt_queue_response - transmit the response to a SCSI command
2709 * @cmd: SCSI target command.
2711 * Callback function called by the TCM core. Must not block since it can be
2712 * invoked on the context of the IB completion handler.
2714 static void srpt_queue_response(struct se_cmd
*cmd
)
2716 struct srpt_send_ioctx
*ioctx
=
2717 container_of(cmd
, struct srpt_send_ioctx
, cmd
);
2718 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
2719 struct srpt_device
*sdev
= ch
->sport
->sdev
;
2720 struct ib_send_wr send_wr
, *first_wr
= &send_wr
;
2722 enum srpt_command_state state
;
2723 int resp_len
, ret
, i
;
2728 state
= ioctx
->state
;
2730 case SRPT_STATE_NEW
:
2731 case SRPT_STATE_DATA_IN
:
2732 ioctx
->state
= SRPT_STATE_CMD_RSP_SENT
;
2734 case SRPT_STATE_MGMT
:
2735 ioctx
->state
= SRPT_STATE_MGMT_RSP_SENT
;
2738 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2739 ch
, ioctx
->ioctx
.index
, ioctx
->state
);
2743 if (unlikely(WARN_ON_ONCE(state
== SRPT_STATE_CMD_RSP_SENT
)))
2746 /* For read commands, transfer the data to the initiator. */
2747 if (ioctx
->cmd
.data_direction
== DMA_FROM_DEVICE
&&
2748 ioctx
->cmd
.data_length
&&
2749 !ioctx
->queue_status_only
) {
2750 for (i
= ioctx
->n_rw_ctx
- 1; i
>= 0; i
--) {
2751 struct srpt_rw_ctx
*ctx
= &ioctx
->rw_ctxs
[i
];
2753 first_wr
= rdma_rw_ctx_wrs(&ctx
->rw
, ch
->qp
,
2754 ch
->sport
->port
, NULL
, first_wr
);
2758 if (state
!= SRPT_STATE_MGMT
)
2759 resp_len
= srpt_build_cmd_rsp(ch
, ioctx
, ioctx
->cmd
.tag
,
2763 = tcm_to_srp_tsk_mgmt_status(cmd
->se_tmr_req
->response
);
2764 resp_len
= srpt_build_tskmgmt_rsp(ch
, ioctx
, srp_tm_status
,
2768 atomic_inc(&ch
->req_lim
);
2770 if (unlikely(atomic_sub_return(1 + ioctx
->n_rdma
,
2771 &ch
->sq_wr_avail
) < 0)) {
2772 pr_warn("%s: IB send queue full (needed %d)\n",
2773 __func__
, ioctx
->n_rdma
);
2778 ib_dma_sync_single_for_device(sdev
->device
, ioctx
->ioctx
.dma
, resp_len
,
2781 sge
.addr
= ioctx
->ioctx
.dma
;
2782 sge
.length
= resp_len
;
2783 sge
.lkey
= sdev
->lkey
;
2785 ioctx
->ioctx
.cqe
.done
= srpt_send_done
;
2786 send_wr
.next
= NULL
;
2787 send_wr
.wr_cqe
= &ioctx
->ioctx
.cqe
;
2788 send_wr
.sg_list
= &sge
;
2789 send_wr
.num_sge
= 1;
2790 send_wr
.opcode
= IB_WR_SEND
;
2791 send_wr
.send_flags
= IB_SEND_SIGNALED
;
2793 ret
= ib_post_send(ch
->qp
, first_wr
, NULL
);
2795 pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
2796 __func__
, ioctx
->cmd
.tag
, ret
);
2803 atomic_add(1 + ioctx
->n_rdma
, &ch
->sq_wr_avail
);
2804 atomic_dec(&ch
->req_lim
);
2805 srpt_set_cmd_state(ioctx
, SRPT_STATE_DONE
);
2806 target_put_sess_cmd(&ioctx
->cmd
);
2809 static int srpt_queue_data_in(struct se_cmd
*cmd
)
2811 srpt_queue_response(cmd
);
2815 static void srpt_queue_tm_rsp(struct se_cmd
*cmd
)
2817 srpt_queue_response(cmd
);
2821 * This function is called for aborted commands if no response is sent to the
2822 * initiator. Make sure that the credits freed by aborting a command are
2823 * returned to the initiator the next time a response is sent by incrementing
2824 * ch->req_lim_delta.
2826 static void srpt_aborted_task(struct se_cmd
*cmd
)
2828 struct srpt_send_ioctx
*ioctx
= container_of(cmd
,
2829 struct srpt_send_ioctx
, cmd
);
2830 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
2832 atomic_inc(&ch
->req_lim_delta
);
2835 static int srpt_queue_status(struct se_cmd
*cmd
)
2837 struct srpt_send_ioctx
*ioctx
;
2839 ioctx
= container_of(cmd
, struct srpt_send_ioctx
, cmd
);
2840 BUG_ON(ioctx
->sense_data
!= cmd
->sense_buffer
);
2841 if (cmd
->se_cmd_flags
&
2842 (SCF_TRANSPORT_TASK_SENSE
| SCF_EMULATED_TASK_SENSE
))
2843 WARN_ON(cmd
->scsi_status
!= SAM_STAT_CHECK_CONDITION
);
2844 ioctx
->queue_status_only
= true;
2845 srpt_queue_response(cmd
);
2849 static void srpt_refresh_port_work(struct work_struct
*work
)
2851 struct srpt_port
*sport
= container_of(work
, struct srpt_port
, work
);
2853 srpt_refresh_port(sport
);
2856 static bool srpt_ch_list_empty(struct srpt_port
*sport
)
2858 struct srpt_nexus
*nexus
;
2862 list_for_each_entry(nexus
, &sport
->nexus_list
, entry
)
2863 if (!list_empty(&nexus
->ch_list
))
2871 * srpt_release_sport - disable login and wait for associated channels
2872 * @sport: SRPT HCA port.
2874 static int srpt_release_sport(struct srpt_port
*sport
)
2876 struct srpt_nexus
*nexus
, *next_n
;
2877 struct srpt_rdma_ch
*ch
;
2879 WARN_ON_ONCE(irqs_disabled());
2881 mutex_lock(&sport
->mutex
);
2882 srpt_set_enabled(sport
, false);
2883 mutex_unlock(&sport
->mutex
);
2885 while (wait_event_timeout(sport
->ch_releaseQ
,
2886 srpt_ch_list_empty(sport
), 5 * HZ
) <= 0) {
2887 pr_info("%s_%d: waiting for session unregistration ...\n",
2888 sport
->sdev
->device
->name
, sport
->port
);
2890 list_for_each_entry(nexus
, &sport
->nexus_list
, entry
) {
2891 list_for_each_entry(ch
, &nexus
->ch_list
, list
) {
2892 pr_info("%s-%d: state %s\n",
2893 ch
->sess_name
, ch
->qp
->qp_num
,
2894 get_ch_state_name(ch
->state
));
2900 mutex_lock(&sport
->mutex
);
2901 list_for_each_entry_safe(nexus
, next_n
, &sport
->nexus_list
, entry
) {
2902 list_del(&nexus
->entry
);
2903 kfree_rcu(nexus
, rcu
);
2905 mutex_unlock(&sport
->mutex
);
2910 static struct se_wwn
*__srpt_lookup_wwn(const char *name
)
2912 struct ib_device
*dev
;
2913 struct srpt_device
*sdev
;
2914 struct srpt_port
*sport
;
2917 list_for_each_entry(sdev
, &srpt_dev_list
, list
) {
2922 for (i
= 0; i
< dev
->phys_port_cnt
; i
++) {
2923 sport
= &sdev
->port
[i
];
2925 if (strcmp(sport
->port_guid
, name
) == 0)
2926 return &sport
->port_guid_wwn
;
2927 if (strcmp(sport
->port_gid
, name
) == 0)
2928 return &sport
->port_gid_wwn
;
2935 static struct se_wwn
*srpt_lookup_wwn(const char *name
)
2939 spin_lock(&srpt_dev_lock
);
2940 wwn
= __srpt_lookup_wwn(name
);
2941 spin_unlock(&srpt_dev_lock
);
2946 static void srpt_free_srq(struct srpt_device
*sdev
)
2951 ib_destroy_srq(sdev
->srq
);
2952 srpt_free_ioctx_ring((struct srpt_ioctx
**)sdev
->ioctx_ring
, sdev
,
2953 sdev
->srq_size
, srp_max_req_size
, DMA_FROM_DEVICE
);
2957 static int srpt_alloc_srq(struct srpt_device
*sdev
)
2959 struct ib_srq_init_attr srq_attr
= {
2960 .event_handler
= srpt_srq_event
,
2961 .srq_context
= (void *)sdev
,
2962 .attr
.max_wr
= sdev
->srq_size
,
2964 .srq_type
= IB_SRQT_BASIC
,
2966 struct ib_device
*device
= sdev
->device
;
2970 WARN_ON_ONCE(sdev
->srq
);
2971 srq
= ib_create_srq(sdev
->pd
, &srq_attr
);
2973 pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq
));
2974 return PTR_ERR(srq
);
2977 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev
->srq_size
,
2978 sdev
->device
->attrs
.max_srq_wr
, device
->name
);
2980 sdev
->ioctx_ring
= (struct srpt_recv_ioctx
**)
2981 srpt_alloc_ioctx_ring(sdev
, sdev
->srq_size
,
2982 sizeof(*sdev
->ioctx_ring
[0]),
2983 srp_max_req_size
, DMA_FROM_DEVICE
);
2984 if (!sdev
->ioctx_ring
) {
2985 ib_destroy_srq(srq
);
2989 sdev
->use_srq
= true;
2992 for (i
= 0; i
< sdev
->srq_size
; ++i
) {
2993 INIT_LIST_HEAD(&sdev
->ioctx_ring
[i
]->wait_list
);
2994 srpt_post_recv(sdev
, NULL
, sdev
->ioctx_ring
[i
]);
3000 static int srpt_use_srq(struct srpt_device
*sdev
, bool use_srq
)
3002 struct ib_device
*device
= sdev
->device
;
3006 srpt_free_srq(sdev
);
3007 sdev
->use_srq
= false;
3008 } else if (use_srq
&& !sdev
->srq
) {
3009 ret
= srpt_alloc_srq(sdev
);
3011 pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__
, device
->name
,
3012 sdev
->use_srq
, ret
);
3017 * srpt_add_one - InfiniBand device addition callback function
3018 * @device: Describes a HCA.
3020 static void srpt_add_one(struct ib_device
*device
)
3022 struct srpt_device
*sdev
;
3023 struct srpt_port
*sport
;
3026 pr_debug("device = %p\n", device
);
3028 sdev
= kzalloc(struct_size(sdev
, port
, device
->phys_port_cnt
),
3033 sdev
->device
= device
;
3034 mutex_init(&sdev
->sdev_mutex
);
3036 sdev
->pd
= ib_alloc_pd(device
, 0);
3037 if (IS_ERR(sdev
->pd
))
3040 sdev
->lkey
= sdev
->pd
->local_dma_lkey
;
3042 sdev
->srq_size
= min(srpt_srq_size
, sdev
->device
->attrs
.max_srq_wr
);
3044 srpt_use_srq(sdev
, sdev
->port
[0].port_attrib
.use_srq
);
3046 if (!srpt_service_guid
)
3047 srpt_service_guid
= be64_to_cpu(device
->node_guid
);
3049 if (rdma_port_get_link_layer(device
, 1) == IB_LINK_LAYER_INFINIBAND
)
3050 sdev
->cm_id
= ib_create_cm_id(device
, srpt_cm_handler
, sdev
);
3051 if (IS_ERR(sdev
->cm_id
)) {
3052 pr_info("ib_create_cm_id() failed: %ld\n",
3053 PTR_ERR(sdev
->cm_id
));
3059 /* print out target login information */
3060 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
3061 "pkey=ffff,service_id=%016llx\n", srpt_service_guid
,
3062 srpt_service_guid
, srpt_service_guid
);
3065 * We do not have a consistent service_id (ie. also id_ext of target_id)
3066 * to identify this target. We currently use the guid of the first HCA
3067 * in the system as service_id; therefore, the target_id will change
3068 * if this HCA is gone bad and replaced by different HCA
3071 ib_cm_listen(sdev
->cm_id
, cpu_to_be64(srpt_service_guid
), 0) :
3074 pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret
,
3075 sdev
->cm_id
->state
);
3079 INIT_IB_EVENT_HANDLER(&sdev
->event_handler
, sdev
->device
,
3080 srpt_event_handler
);
3081 ib_register_event_handler(&sdev
->event_handler
);
3083 for (i
= 1; i
<= sdev
->device
->phys_port_cnt
; i
++) {
3084 sport
= &sdev
->port
[i
- 1];
3085 INIT_LIST_HEAD(&sport
->nexus_list
);
3086 init_waitqueue_head(&sport
->ch_releaseQ
);
3087 mutex_init(&sport
->mutex
);
3090 sport
->port_attrib
.srp_max_rdma_size
= DEFAULT_MAX_RDMA_SIZE
;
3091 sport
->port_attrib
.srp_max_rsp_size
= DEFAULT_MAX_RSP_SIZE
;
3092 sport
->port_attrib
.srp_sq_size
= DEF_SRPT_SQ_SIZE
;
3093 sport
->port_attrib
.use_srq
= false;
3094 INIT_WORK(&sport
->work
, srpt_refresh_port_work
);
3096 if (srpt_refresh_port(sport
)) {
3097 pr_err("MAD registration failed for %s-%d.\n",
3098 sdev
->device
->name
, i
);
3103 spin_lock(&srpt_dev_lock
);
3104 list_add_tail(&sdev
->list
, &srpt_dev_list
);
3105 spin_unlock(&srpt_dev_lock
);
3108 ib_set_client_data(device
, &srpt_client
, sdev
);
3109 pr_debug("added %s.\n", device
->name
);
3113 ib_unregister_event_handler(&sdev
->event_handler
);
3116 ib_destroy_cm_id(sdev
->cm_id
);
3118 srpt_free_srq(sdev
);
3119 ib_dealloc_pd(sdev
->pd
);
3124 pr_info("%s(%s) failed.\n", __func__
, device
->name
);
3129 * srpt_remove_one - InfiniBand device removal callback function
3130 * @device: Describes a HCA.
3131 * @client_data: The value passed as the third argument to ib_set_client_data().
3133 static void srpt_remove_one(struct ib_device
*device
, void *client_data
)
3135 struct srpt_device
*sdev
= client_data
;
3139 pr_info("%s(%s): nothing to do.\n", __func__
, device
->name
);
3143 srpt_unregister_mad_agent(sdev
);
3145 ib_unregister_event_handler(&sdev
->event_handler
);
3147 /* Cancel any work queued by the just unregistered IB event handler. */
3148 for (i
= 0; i
< sdev
->device
->phys_port_cnt
; i
++)
3149 cancel_work_sync(&sdev
->port
[i
].work
);
3152 ib_destroy_cm_id(sdev
->cm_id
);
3154 ib_set_client_data(device
, &srpt_client
, NULL
);
3157 * Unregistering a target must happen after destroying sdev->cm_id
3158 * such that no new SRP_LOGIN_REQ information units can arrive while
3159 * destroying the target.
3161 spin_lock(&srpt_dev_lock
);
3162 list_del(&sdev
->list
);
3163 spin_unlock(&srpt_dev_lock
);
3165 for (i
= 0; i
< sdev
->device
->phys_port_cnt
; i
++)
3166 srpt_release_sport(&sdev
->port
[i
]);
3168 srpt_free_srq(sdev
);
3170 ib_dealloc_pd(sdev
->pd
);
3175 static struct ib_client srpt_client
= {
3177 .add
= srpt_add_one
,
3178 .remove
= srpt_remove_one
3181 static int srpt_check_true(struct se_portal_group
*se_tpg
)
3186 static int srpt_check_false(struct se_portal_group
*se_tpg
)
3191 static char *srpt_get_fabric_name(void)
3196 static struct srpt_port
*srpt_tpg_to_sport(struct se_portal_group
*tpg
)
3198 return tpg
->se_tpg_wwn
->priv
;
3201 static char *srpt_get_fabric_wwn(struct se_portal_group
*tpg
)
3203 struct srpt_port
*sport
= srpt_tpg_to_sport(tpg
);
3205 WARN_ON_ONCE(tpg
!= &sport
->port_guid_tpg
&&
3206 tpg
!= &sport
->port_gid_tpg
);
3207 return tpg
== &sport
->port_guid_tpg
? sport
->port_guid
:
3211 static u16
srpt_get_tag(struct se_portal_group
*tpg
)
3216 static u32
srpt_tpg_get_inst_index(struct se_portal_group
*se_tpg
)
3221 static void srpt_release_cmd(struct se_cmd
*se_cmd
)
3223 struct srpt_send_ioctx
*ioctx
= container_of(se_cmd
,
3224 struct srpt_send_ioctx
, cmd
);
3225 struct srpt_rdma_ch
*ch
= ioctx
->ch
;
3226 unsigned long flags
;
3228 WARN_ON_ONCE(ioctx
->state
!= SRPT_STATE_DONE
&&
3229 !(ioctx
->cmd
.transport_state
& CMD_T_ABORTED
));
3231 if (ioctx
->n_rw_ctx
) {
3232 srpt_free_rw_ctxs(ch
, ioctx
);
3233 ioctx
->n_rw_ctx
= 0;
3236 spin_lock_irqsave(&ch
->spinlock
, flags
);
3237 list_add(&ioctx
->free_list
, &ch
->free_list
);
3238 spin_unlock_irqrestore(&ch
->spinlock
, flags
);
3242 * srpt_close_session - forcibly close a session
3243 * @se_sess: SCSI target session.
3245 * Callback function invoked by the TCM core to clean up sessions associated
3246 * with a node ACL when the user invokes
3247 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3249 static void srpt_close_session(struct se_session
*se_sess
)
3251 struct srpt_rdma_ch
*ch
= se_sess
->fabric_sess_ptr
;
3253 srpt_disconnect_ch_sync(ch
);
3257 * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
3258 * @se_sess: SCSI target session.
3260 * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
3261 * This object represents an arbitrary integer used to uniquely identify a
3262 * particular attached remote initiator port to a particular SCSI target port
3263 * within a particular SCSI target device within a particular SCSI instance.
3265 static u32
srpt_sess_get_index(struct se_session
*se_sess
)
3270 static void srpt_set_default_node_attrs(struct se_node_acl
*nacl
)
3274 /* Note: only used from inside debug printk's by the TCM core. */
3275 static int srpt_get_tcm_cmd_state(struct se_cmd
*se_cmd
)
3277 struct srpt_send_ioctx
*ioctx
;
3279 ioctx
= container_of(se_cmd
, struct srpt_send_ioctx
, cmd
);
3280 return ioctx
->state
;
3283 static int srpt_parse_guid(u64
*guid
, const char *name
)
3288 if (sscanf(name
, "%hx:%hx:%hx:%hx", &w
[0], &w
[1], &w
[2], &w
[3]) != 4)
3290 *guid
= get_unaligned_be64(w
);
3297 * srpt_parse_i_port_id - parse an initiator port ID
3298 * @name: ASCII representation of a 128-bit initiator port ID.
3299 * @i_port_id: Binary 128-bit port ID.
3301 static int srpt_parse_i_port_id(u8 i_port_id
[16], const char *name
)
3304 unsigned len
, count
, leading_zero_bytes
;
3308 if (strncasecmp(p
, "0x", 2) == 0)
3314 count
= min(len
/ 2, 16U);
3315 leading_zero_bytes
= 16 - count
;
3316 memset(i_port_id
, 0, leading_zero_bytes
);
3317 ret
= hex2bin(i_port_id
+ leading_zero_bytes
, p
, count
);
3324 * configfs callback function invoked for mkdir
3325 * /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
3327 * i_port_id must be an initiator port GUID, GID or IP address. See also the
3328 * target_alloc_session() calls in this driver. Examples of valid initiator
3330 * 0x0000000000000000505400fffe4a0b7b
3331 * 0000000000000000505400fffe4a0b7b
3332 * 5054:00ff:fe4a:0b7b
3335 static int srpt_init_nodeacl(struct se_node_acl
*se_nacl
, const char *name
)
3337 struct sockaddr_storage sa
;
3342 ret
= srpt_parse_guid(&guid
, name
);
3344 ret
= srpt_parse_i_port_id(i_port_id
, name
);
3346 ret
= inet_pton_with_scope(&init_net
, AF_UNSPEC
, name
, NULL
,
3349 pr_err("invalid initiator port ID %s\n", name
);
3353 static ssize_t
srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item
*item
,
3356 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3357 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3359 return sprintf(page
, "%u\n", sport
->port_attrib
.srp_max_rdma_size
);
3362 static ssize_t
srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item
*item
,
3363 const char *page
, size_t count
)
3365 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3366 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3370 ret
= kstrtoul(page
, 0, &val
);
3372 pr_err("kstrtoul() failed with ret: %d\n", ret
);
3375 if (val
> MAX_SRPT_RDMA_SIZE
) {
3376 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val
,
3377 MAX_SRPT_RDMA_SIZE
);
3380 if (val
< DEFAULT_MAX_RDMA_SIZE
) {
3381 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3382 val
, DEFAULT_MAX_RDMA_SIZE
);
3385 sport
->port_attrib
.srp_max_rdma_size
= val
;
3390 static ssize_t
srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item
*item
,
3393 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3394 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3396 return sprintf(page
, "%u\n", sport
->port_attrib
.srp_max_rsp_size
);
3399 static ssize_t
srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item
*item
,
3400 const char *page
, size_t count
)
3402 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3403 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3407 ret
= kstrtoul(page
, 0, &val
);
3409 pr_err("kstrtoul() failed with ret: %d\n", ret
);
3412 if (val
> MAX_SRPT_RSP_SIZE
) {
3413 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val
,
3417 if (val
< MIN_MAX_RSP_SIZE
) {
3418 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val
,
3422 sport
->port_attrib
.srp_max_rsp_size
= val
;
3427 static ssize_t
srpt_tpg_attrib_srp_sq_size_show(struct config_item
*item
,
3430 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3431 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3433 return sprintf(page
, "%u\n", sport
->port_attrib
.srp_sq_size
);
3436 static ssize_t
srpt_tpg_attrib_srp_sq_size_store(struct config_item
*item
,
3437 const char *page
, size_t count
)
3439 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3440 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3444 ret
= kstrtoul(page
, 0, &val
);
3446 pr_err("kstrtoul() failed with ret: %d\n", ret
);
3449 if (val
> MAX_SRPT_SRQ_SIZE
) {
3450 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val
,
3454 if (val
< MIN_SRPT_SRQ_SIZE
) {
3455 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val
,
3459 sport
->port_attrib
.srp_sq_size
= val
;
3464 static ssize_t
srpt_tpg_attrib_use_srq_show(struct config_item
*item
,
3467 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3468 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3470 return sprintf(page
, "%d\n", sport
->port_attrib
.use_srq
);
3473 static ssize_t
srpt_tpg_attrib_use_srq_store(struct config_item
*item
,
3474 const char *page
, size_t count
)
3476 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
3477 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3478 struct srpt_device
*sdev
= sport
->sdev
;
3483 ret
= kstrtoul(page
, 0, &val
);
3489 ret
= mutex_lock_interruptible(&sdev
->sdev_mutex
);
3492 ret
= mutex_lock_interruptible(&sport
->mutex
);
3495 enabled
= sport
->enabled
;
3496 /* Log out all initiator systems before changing 'use_srq'. */
3497 srpt_set_enabled(sport
, false);
3498 sport
->port_attrib
.use_srq
= val
;
3499 srpt_use_srq(sdev
, sport
->port_attrib
.use_srq
);
3500 srpt_set_enabled(sport
, enabled
);
3502 mutex_unlock(&sport
->mutex
);
3504 mutex_unlock(&sdev
->sdev_mutex
);
3509 CONFIGFS_ATTR(srpt_tpg_attrib_
, srp_max_rdma_size
);
3510 CONFIGFS_ATTR(srpt_tpg_attrib_
, srp_max_rsp_size
);
3511 CONFIGFS_ATTR(srpt_tpg_attrib_
, srp_sq_size
);
3512 CONFIGFS_ATTR(srpt_tpg_attrib_
, use_srq
);
3514 static struct configfs_attribute
*srpt_tpg_attrib_attrs
[] = {
3515 &srpt_tpg_attrib_attr_srp_max_rdma_size
,
3516 &srpt_tpg_attrib_attr_srp_max_rsp_size
,
3517 &srpt_tpg_attrib_attr_srp_sq_size
,
3518 &srpt_tpg_attrib_attr_use_srq
,
3522 static struct rdma_cm_id
*srpt_create_rdma_id(struct sockaddr
*listen_addr
)
3524 struct rdma_cm_id
*rdma_cm_id
;
3527 rdma_cm_id
= rdma_create_id(&init_net
, srpt_rdma_cm_handler
,
3528 NULL
, RDMA_PS_TCP
, IB_QPT_RC
);
3529 if (IS_ERR(rdma_cm_id
)) {
3530 pr_err("RDMA/CM ID creation failed: %ld\n",
3531 PTR_ERR(rdma_cm_id
));
3535 ret
= rdma_bind_addr(rdma_cm_id
, listen_addr
);
3539 snprintf(addr_str
, sizeof(addr_str
), "%pISp", listen_addr
);
3540 pr_err("Binding RDMA/CM ID to address %s failed: %d\n",
3542 rdma_destroy_id(rdma_cm_id
);
3543 rdma_cm_id
= ERR_PTR(ret
);
3547 ret
= rdma_listen(rdma_cm_id
, 128);
3549 pr_err("rdma_listen() failed: %d\n", ret
);
3550 rdma_destroy_id(rdma_cm_id
);
3551 rdma_cm_id
= ERR_PTR(ret
);
3558 static ssize_t
srpt_rdma_cm_port_show(struct config_item
*item
, char *page
)
3560 return sprintf(page
, "%d\n", rdma_cm_port
);
3563 static ssize_t
srpt_rdma_cm_port_store(struct config_item
*item
,
3564 const char *page
, size_t count
)
3566 struct sockaddr_in addr4
= { .sin_family
= AF_INET
};
3567 struct sockaddr_in6 addr6
= { .sin6_family
= AF_INET6
};
3568 struct rdma_cm_id
*new_id
= NULL
;
3572 ret
= kstrtou16(page
, 0, &val
);
3576 if (rdma_cm_port
== val
)
3580 addr6
.sin6_port
= cpu_to_be16(val
);
3581 new_id
= srpt_create_rdma_id((struct sockaddr
*)&addr6
);
3582 if (IS_ERR(new_id
)) {
3583 addr4
.sin_port
= cpu_to_be16(val
);
3584 new_id
= srpt_create_rdma_id((struct sockaddr
*)&addr4
);
3585 if (IS_ERR(new_id
)) {
3586 ret
= PTR_ERR(new_id
);
3592 mutex_lock(&rdma_cm_mutex
);
3594 swap(rdma_cm_id
, new_id
);
3595 mutex_unlock(&rdma_cm_mutex
);
3598 rdma_destroy_id(new_id
);
3604 CONFIGFS_ATTR(srpt_
, rdma_cm_port
);
3606 static struct configfs_attribute
*srpt_da_attrs
[] = {
3607 &srpt_attr_rdma_cm_port
,
3611 static ssize_t
srpt_tpg_enable_show(struct config_item
*item
, char *page
)
3613 struct se_portal_group
*se_tpg
= to_tpg(item
);
3614 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3616 return snprintf(page
, PAGE_SIZE
, "%d\n", (sport
->enabled
) ? 1: 0);
3619 static ssize_t
srpt_tpg_enable_store(struct config_item
*item
,
3620 const char *page
, size_t count
)
3622 struct se_portal_group
*se_tpg
= to_tpg(item
);
3623 struct srpt_port
*sport
= srpt_tpg_to_sport(se_tpg
);
3627 ret
= kstrtoul(page
, 0, &tmp
);
3629 pr_err("Unable to extract srpt_tpg_store_enable\n");
3633 if ((tmp
!= 0) && (tmp
!= 1)) {
3634 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp
);
3638 mutex_lock(&sport
->mutex
);
3639 srpt_set_enabled(sport
, tmp
);
3640 mutex_unlock(&sport
->mutex
);
3645 CONFIGFS_ATTR(srpt_tpg_
, enable
);
3647 static struct configfs_attribute
*srpt_tpg_attrs
[] = {
3648 &srpt_tpg_attr_enable
,
3653 * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
3654 * @wwn: Corresponds to $driver/$port.
3657 static struct se_portal_group
*srpt_make_tpg(struct se_wwn
*wwn
,
3660 struct srpt_port
*sport
= wwn
->priv
;
3661 static struct se_portal_group
*tpg
;
3664 WARN_ON_ONCE(wwn
!= &sport
->port_guid_wwn
&&
3665 wwn
!= &sport
->port_gid_wwn
);
3666 tpg
= wwn
== &sport
->port_guid_wwn
? &sport
->port_guid_tpg
:
3667 &sport
->port_gid_tpg
;
3668 res
= core_tpg_register(wwn
, tpg
, SCSI_PROTOCOL_SRP
);
3670 return ERR_PTR(res
);
3676 * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
3677 * @tpg: Target portal group to deregister.
3679 static void srpt_drop_tpg(struct se_portal_group
*tpg
)
3681 struct srpt_port
*sport
= srpt_tpg_to_sport(tpg
);
3683 sport
->enabled
= false;
3684 core_tpg_deregister(tpg
);
3688 * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
3693 static struct se_wwn
*srpt_make_tport(struct target_fabric_configfs
*tf
,
3694 struct config_group
*group
,
3697 return srpt_lookup_wwn(name
) ? : ERR_PTR(-EINVAL
);
3701 * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
3704 static void srpt_drop_tport(struct se_wwn
*wwn
)
3708 static ssize_t
srpt_wwn_version_show(struct config_item
*item
, char *buf
)
3710 return scnprintf(buf
, PAGE_SIZE
, "%s\n", DRV_VERSION
);
3713 CONFIGFS_ATTR_RO(srpt_wwn_
, version
);
3715 static struct configfs_attribute
*srpt_wwn_attrs
[] = {
3716 &srpt_wwn_attr_version
,
3720 static const struct target_core_fabric_ops srpt_template
= {
3721 .module
= THIS_MODULE
,
3723 .get_fabric_name
= srpt_get_fabric_name
,
3724 .tpg_get_wwn
= srpt_get_fabric_wwn
,
3725 .tpg_get_tag
= srpt_get_tag
,
3726 .tpg_check_demo_mode
= srpt_check_false
,
3727 .tpg_check_demo_mode_cache
= srpt_check_true
,
3728 .tpg_check_demo_mode_write_protect
= srpt_check_true
,
3729 .tpg_check_prod_mode_write_protect
= srpt_check_false
,
3730 .tpg_get_inst_index
= srpt_tpg_get_inst_index
,
3731 .release_cmd
= srpt_release_cmd
,
3732 .check_stop_free
= srpt_check_stop_free
,
3733 .close_session
= srpt_close_session
,
3734 .sess_get_index
= srpt_sess_get_index
,
3735 .sess_get_initiator_sid
= NULL
,
3736 .write_pending
= srpt_write_pending
,
3737 .write_pending_status
= srpt_write_pending_status
,
3738 .set_default_node_attributes
= srpt_set_default_node_attrs
,
3739 .get_cmd_state
= srpt_get_tcm_cmd_state
,
3740 .queue_data_in
= srpt_queue_data_in
,
3741 .queue_status
= srpt_queue_status
,
3742 .queue_tm_rsp
= srpt_queue_tm_rsp
,
3743 .aborted_task
= srpt_aborted_task
,
3745 * Setup function pointers for generic logic in
3746 * target_core_fabric_configfs.c
3748 .fabric_make_wwn
= srpt_make_tport
,
3749 .fabric_drop_wwn
= srpt_drop_tport
,
3750 .fabric_make_tpg
= srpt_make_tpg
,
3751 .fabric_drop_tpg
= srpt_drop_tpg
,
3752 .fabric_init_nodeacl
= srpt_init_nodeacl
,
3754 .tfc_discovery_attrs
= srpt_da_attrs
,
3755 .tfc_wwn_attrs
= srpt_wwn_attrs
,
3756 .tfc_tpg_base_attrs
= srpt_tpg_attrs
,
3757 .tfc_tpg_attrib_attrs
= srpt_tpg_attrib_attrs
,
3761 * srpt_init_module - kernel module initialization
3763 * Note: Since ib_register_client() registers callback functions, and since at
3764 * least one of these callback functions (srpt_add_one()) calls target core
3765 * functions, this driver must be registered with the target core before
3766 * ib_register_client() is called.
3768 static int __init
srpt_init_module(void)
3773 if (srp_max_req_size
< MIN_MAX_REQ_SIZE
) {
3774 pr_err("invalid value %d for kernel module parameter"
3775 " srp_max_req_size -- must be at least %d.\n",
3776 srp_max_req_size
, MIN_MAX_REQ_SIZE
);
3780 if (srpt_srq_size
< MIN_SRPT_SRQ_SIZE
3781 || srpt_srq_size
> MAX_SRPT_SRQ_SIZE
) {
3782 pr_err("invalid value %d for kernel module parameter"
3783 " srpt_srq_size -- must be in the range [%d..%d].\n",
3784 srpt_srq_size
, MIN_SRPT_SRQ_SIZE
, MAX_SRPT_SRQ_SIZE
);
3788 ret
= target_register_template(&srpt_template
);
3792 ret
= ib_register_client(&srpt_client
);
3794 pr_err("couldn't register IB client\n");
3795 goto out_unregister_target
;
3800 out_unregister_target
:
3801 target_unregister_template(&srpt_template
);
3806 static void __exit
srpt_cleanup_module(void)
3809 rdma_destroy_id(rdma_cm_id
);
3810 ib_unregister_client(&srpt_client
);
3811 target_unregister_template(&srpt_template
);
3814 module_init(srpt_init_module
);
3815 module_exit(srpt_cleanup_module
);