2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
47 #include <linux/atomic.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_device.h>
51 #include <scsi/scsi_dbg.h>
52 #include <scsi/scsi_tcq.h>
54 #include <scsi/scsi_transport_srp.h>
58 #define DRV_NAME "ib_srp"
59 #define PFX DRV_NAME ": "
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
65 #if !defined(CONFIG_DYNAMIC_DEBUG)
66 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
70 static unsigned int srp_sg_tablesize
;
71 static unsigned int cmd_sg_entries
;
72 static unsigned int indirect_sg_entries
;
73 static bool allow_ext_sg
;
74 static bool register_always
= true;
75 static bool never_register
;
76 static int topspin_workarounds
= 1;
78 module_param(srp_sg_tablesize
, uint
, 0444);
79 MODULE_PARM_DESC(srp_sg_tablesize
, "Deprecated name for cmd_sg_entries");
81 module_param(cmd_sg_entries
, uint
, 0444);
82 MODULE_PARM_DESC(cmd_sg_entries
,
83 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
85 module_param(indirect_sg_entries
, uint
, 0444);
86 MODULE_PARM_DESC(indirect_sg_entries
,
87 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS
) ")");
89 module_param(allow_ext_sg
, bool, 0444);
90 MODULE_PARM_DESC(allow_ext_sg
,
91 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
93 module_param(topspin_workarounds
, int, 0444);
94 MODULE_PARM_DESC(topspin_workarounds
,
95 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
97 module_param(register_always
, bool, 0444);
98 MODULE_PARM_DESC(register_always
,
99 "Use memory registration even for contiguous memory regions");
101 module_param(never_register
, bool, 0444);
102 MODULE_PARM_DESC(never_register
, "Never register memory");
104 static const struct kernel_param_ops srp_tmo_ops
;
106 static int srp_reconnect_delay
= 10;
107 module_param_cb(reconnect_delay
, &srp_tmo_ops
, &srp_reconnect_delay
,
109 MODULE_PARM_DESC(reconnect_delay
, "Time between successive reconnect attempts");
111 static int srp_fast_io_fail_tmo
= 15;
112 module_param_cb(fast_io_fail_tmo
, &srp_tmo_ops
, &srp_fast_io_fail_tmo
,
114 MODULE_PARM_DESC(fast_io_fail_tmo
,
115 "Number of seconds between the observation of a transport"
116 " layer error and failing all I/O. \"off\" means that this"
117 " functionality is disabled.");
119 static int srp_dev_loss_tmo
= 600;
120 module_param_cb(dev_loss_tmo
, &srp_tmo_ops
, &srp_dev_loss_tmo
,
122 MODULE_PARM_DESC(dev_loss_tmo
,
123 "Maximum number of seconds that the SRP transport should"
124 " insulate transport layer errors. After this time has been"
125 " exceeded the SCSI host is removed. Should be"
126 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT
)
127 " if fast_io_fail_tmo has not been set. \"off\" means that"
128 " this functionality is disabled.");
130 static bool srp_use_imm_data
= true;
131 module_param_named(use_imm_data
, srp_use_imm_data
, bool, 0644);
132 MODULE_PARM_DESC(use_imm_data
,
133 "Whether or not to request permission to use immediate data during SRP login.");
135 static unsigned int srp_max_imm_data
= 8 * 1024;
136 module_param_named(max_imm_data
, srp_max_imm_data
, uint
, 0644);
137 MODULE_PARM_DESC(max_imm_data
, "Maximum immediate data size.");
139 static unsigned ch_count
;
140 module_param(ch_count
, uint
, 0444);
141 MODULE_PARM_DESC(ch_count
,
142 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
144 static int srp_add_one(struct ib_device
*device
);
145 static void srp_remove_one(struct ib_device
*device
, void *client_data
);
146 static void srp_rename_dev(struct ib_device
*device
, void *client_data
);
147 static void srp_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
148 static void srp_handle_qp_err(struct ib_cq
*cq
, struct ib_wc
*wc
,
150 static int srp_ib_cm_handler(struct ib_cm_id
*cm_id
,
151 const struct ib_cm_event
*event
);
152 static int srp_rdma_cm_handler(struct rdma_cm_id
*cm_id
,
153 struct rdma_cm_event
*event
);
155 static struct scsi_transport_template
*ib_srp_transport_template
;
156 static struct workqueue_struct
*srp_remove_wq
;
158 static struct ib_client srp_client
= {
161 .remove
= srp_remove_one
,
162 .rename
= srp_rename_dev
165 static struct ib_sa_client srp_sa_client
;
167 static int srp_tmo_get(char *buffer
, const struct kernel_param
*kp
)
169 int tmo
= *(int *)kp
->arg
;
172 return sysfs_emit(buffer
, "%d\n", tmo
);
174 return sysfs_emit(buffer
, "off\n");
177 static int srp_tmo_set(const char *val
, const struct kernel_param
*kp
)
181 res
= srp_parse_tmo(&tmo
, val
);
185 if (kp
->arg
== &srp_reconnect_delay
)
186 res
= srp_tmo_valid(tmo
, srp_fast_io_fail_tmo
,
188 else if (kp
->arg
== &srp_fast_io_fail_tmo
)
189 res
= srp_tmo_valid(srp_reconnect_delay
, tmo
, srp_dev_loss_tmo
);
191 res
= srp_tmo_valid(srp_reconnect_delay
, srp_fast_io_fail_tmo
,
195 *(int *)kp
->arg
= tmo
;
201 static const struct kernel_param_ops srp_tmo_ops
= {
206 static inline struct srp_target_port
*host_to_target(struct Scsi_Host
*host
)
208 return (struct srp_target_port
*) host
->hostdata
;
211 static const char *srp_target_info(struct Scsi_Host
*host
)
213 return host_to_target(host
)->target_name
;
216 static int srp_target_is_topspin(struct srp_target_port
*target
)
218 static const u8 topspin_oui
[3] = { 0x00, 0x05, 0xad };
219 static const u8 cisco_oui
[3] = { 0x00, 0x1b, 0x0d };
221 return topspin_workarounds
&&
222 (!memcmp(&target
->ioc_guid
, topspin_oui
, sizeof topspin_oui
) ||
223 !memcmp(&target
->ioc_guid
, cisco_oui
, sizeof cisco_oui
));
226 static struct srp_iu
*srp_alloc_iu(struct srp_host
*host
, size_t size
,
228 enum dma_data_direction direction
)
232 iu
= kmalloc(sizeof *iu
, gfp_mask
);
236 iu
->buf
= kzalloc(size
, gfp_mask
);
240 iu
->dma
= ib_dma_map_single(host
->srp_dev
->dev
, iu
->buf
, size
,
242 if (ib_dma_mapping_error(host
->srp_dev
->dev
, iu
->dma
))
246 iu
->direction
= direction
;
258 static void srp_free_iu(struct srp_host
*host
, struct srp_iu
*iu
)
263 ib_dma_unmap_single(host
->srp_dev
->dev
, iu
->dma
, iu
->size
,
269 static void srp_qp_event(struct ib_event
*event
, void *context
)
271 pr_debug("QP event %s (%d)\n",
272 ib_event_msg(event
->event
), event
->event
);
275 static int srp_init_ib_qp(struct srp_target_port
*target
,
278 struct ib_qp_attr
*attr
;
281 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
285 ret
= ib_find_cached_pkey(target
->srp_host
->srp_dev
->dev
,
286 target
->srp_host
->port
,
287 be16_to_cpu(target
->ib_cm
.pkey
),
292 attr
->qp_state
= IB_QPS_INIT
;
293 attr
->qp_access_flags
= (IB_ACCESS_REMOTE_READ
|
294 IB_ACCESS_REMOTE_WRITE
);
295 attr
->port_num
= target
->srp_host
->port
;
297 ret
= ib_modify_qp(qp
, attr
,
308 static int srp_new_ib_cm_id(struct srp_rdma_ch
*ch
)
310 struct srp_target_port
*target
= ch
->target
;
311 struct ib_cm_id
*new_cm_id
;
313 new_cm_id
= ib_create_cm_id(target
->srp_host
->srp_dev
->dev
,
314 srp_ib_cm_handler
, ch
);
315 if (IS_ERR(new_cm_id
))
316 return PTR_ERR(new_cm_id
);
319 ib_destroy_cm_id(ch
->ib_cm
.cm_id
);
320 ch
->ib_cm
.cm_id
= new_cm_id
;
321 if (rdma_cap_opa_ah(target
->srp_host
->srp_dev
->dev
,
322 target
->srp_host
->port
))
323 ch
->ib_cm
.path
.rec_type
= SA_PATH_REC_TYPE_OPA
;
325 ch
->ib_cm
.path
.rec_type
= SA_PATH_REC_TYPE_IB
;
326 ch
->ib_cm
.path
.sgid
= target
->sgid
;
327 ch
->ib_cm
.path
.dgid
= target
->ib_cm
.orig_dgid
;
328 ch
->ib_cm
.path
.pkey
= target
->ib_cm
.pkey
;
329 ch
->ib_cm
.path
.service_id
= target
->ib_cm
.service_id
;
334 static int srp_new_rdma_cm_id(struct srp_rdma_ch
*ch
)
336 struct srp_target_port
*target
= ch
->target
;
337 struct rdma_cm_id
*new_cm_id
;
340 new_cm_id
= rdma_create_id(target
->net
, srp_rdma_cm_handler
, ch
,
341 RDMA_PS_TCP
, IB_QPT_RC
);
342 if (IS_ERR(new_cm_id
)) {
343 ret
= PTR_ERR(new_cm_id
);
348 init_completion(&ch
->done
);
349 ret
= rdma_resolve_addr(new_cm_id
, target
->rdma_cm
.src_specified
?
350 &target
->rdma_cm
.src
.sa
: NULL
,
351 &target
->rdma_cm
.dst
.sa
,
352 SRP_PATH_REC_TIMEOUT_MS
);
354 pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
355 &target
->rdma_cm
.src
, &target
->rdma_cm
.dst
, ret
);
358 ret
= wait_for_completion_interruptible(&ch
->done
);
364 pr_err("Resolving address %pISpsc failed (%d)\n",
365 &target
->rdma_cm
.dst
, ret
);
369 swap(ch
->rdma_cm
.cm_id
, new_cm_id
);
373 rdma_destroy_id(new_cm_id
);
378 static int srp_new_cm_id(struct srp_rdma_ch
*ch
)
380 struct srp_target_port
*target
= ch
->target
;
382 return target
->using_rdma_cm
? srp_new_rdma_cm_id(ch
) :
383 srp_new_ib_cm_id(ch
);
387 * srp_destroy_fr_pool() - free the resources owned by a pool
388 * @pool: Fast registration pool to be destroyed.
390 static void srp_destroy_fr_pool(struct srp_fr_pool
*pool
)
393 struct srp_fr_desc
*d
;
398 for (i
= 0, d
= &pool
->desc
[0]; i
< pool
->size
; i
++, d
++) {
406 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
407 * @device: IB device to allocate fast registration descriptors for.
408 * @pd: Protection domain associated with the FR descriptors.
409 * @pool_size: Number of descriptors to allocate.
410 * @max_page_list_len: Maximum fast registration work request page list length.
412 static struct srp_fr_pool
*srp_create_fr_pool(struct ib_device
*device
,
413 struct ib_pd
*pd
, int pool_size
,
414 int max_page_list_len
)
416 struct srp_fr_pool
*pool
;
417 struct srp_fr_desc
*d
;
419 int i
, ret
= -EINVAL
;
420 enum ib_mr_type mr_type
;
425 pool
= kzalloc(struct_size(pool
, desc
, pool_size
), GFP_KERNEL
);
428 pool
->size
= pool_size
;
429 pool
->max_page_list_len
= max_page_list_len
;
430 spin_lock_init(&pool
->lock
);
431 INIT_LIST_HEAD(&pool
->free_list
);
433 if (device
->attrs
.device_cap_flags
& IB_DEVICE_SG_GAPS_REG
)
434 mr_type
= IB_MR_TYPE_SG_GAPS
;
436 mr_type
= IB_MR_TYPE_MEM_REG
;
438 for (i
= 0, d
= &pool
->desc
[0]; i
< pool
->size
; i
++, d
++) {
439 mr
= ib_alloc_mr(pd
, mr_type
, max_page_list_len
);
443 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444 dev_name(&device
->dev
));
448 list_add_tail(&d
->entry
, &pool
->free_list
);
455 srp_destroy_fr_pool(pool
);
463 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
464 * @pool: Pool to obtain descriptor from.
466 static struct srp_fr_desc
*srp_fr_pool_get(struct srp_fr_pool
*pool
)
468 struct srp_fr_desc
*d
= NULL
;
471 spin_lock_irqsave(&pool
->lock
, flags
);
472 if (!list_empty(&pool
->free_list
)) {
473 d
= list_first_entry(&pool
->free_list
, typeof(*d
), entry
);
476 spin_unlock_irqrestore(&pool
->lock
, flags
);
482 * srp_fr_pool_put() - put an FR descriptor back in the free list
483 * @pool: Pool the descriptor was allocated from.
484 * @desc: Pointer to an array of fast registration descriptor pointers.
485 * @n: Number of descriptors to put back.
487 * Note: The caller must already have queued an invalidation request for
488 * desc->mr->rkey before calling this function.
490 static void srp_fr_pool_put(struct srp_fr_pool
*pool
, struct srp_fr_desc
**desc
,
496 spin_lock_irqsave(&pool
->lock
, flags
);
497 for (i
= 0; i
< n
; i
++)
498 list_add(&desc
[i
]->entry
, &pool
->free_list
);
499 spin_unlock_irqrestore(&pool
->lock
, flags
);
502 static struct srp_fr_pool
*srp_alloc_fr_pool(struct srp_target_port
*target
)
504 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
506 return srp_create_fr_pool(dev
->dev
, dev
->pd
, target
->mr_pool_size
,
507 dev
->max_pages_per_mr
);
511 * srp_destroy_qp() - destroy an RDMA queue pair
512 * @ch: SRP RDMA channel.
514 * Drain the qp before destroying it. This avoids that the receive
515 * completion handler can access the queue pair while it is
518 static void srp_destroy_qp(struct srp_rdma_ch
*ch
)
520 spin_lock_irq(&ch
->lock
);
521 ib_process_cq_direct(ch
->send_cq
, -1);
522 spin_unlock_irq(&ch
->lock
);
525 ib_destroy_qp(ch
->qp
);
528 static int srp_create_ch_ib(struct srp_rdma_ch
*ch
)
530 struct srp_target_port
*target
= ch
->target
;
531 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
532 const struct ib_device_attr
*attr
= &dev
->dev
->attrs
;
533 struct ib_qp_init_attr
*init_attr
;
534 struct ib_cq
*recv_cq
, *send_cq
;
536 struct srp_fr_pool
*fr_pool
= NULL
;
537 const int m
= 1 + dev
->use_fast_reg
* target
->mr_per_cmd
* 2;
540 init_attr
= kzalloc(sizeof *init_attr
, GFP_KERNEL
);
544 /* queue_size + 1 for ib_drain_rq() */
545 recv_cq
= ib_alloc_cq(dev
->dev
, ch
, target
->queue_size
+ 1,
546 ch
->comp_vector
, IB_POLL_SOFTIRQ
);
547 if (IS_ERR(recv_cq
)) {
548 ret
= PTR_ERR(recv_cq
);
552 send_cq
= ib_alloc_cq(dev
->dev
, ch
, m
* target
->queue_size
,
553 ch
->comp_vector
, IB_POLL_DIRECT
);
554 if (IS_ERR(send_cq
)) {
555 ret
= PTR_ERR(send_cq
);
559 init_attr
->event_handler
= srp_qp_event
;
560 init_attr
->cap
.max_send_wr
= m
* target
->queue_size
;
561 init_attr
->cap
.max_recv_wr
= target
->queue_size
+ 1;
562 init_attr
->cap
.max_recv_sge
= 1;
563 init_attr
->cap
.max_send_sge
= min(SRP_MAX_SGE
, attr
->max_send_sge
);
564 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
565 init_attr
->qp_type
= IB_QPT_RC
;
566 init_attr
->send_cq
= send_cq
;
567 init_attr
->recv_cq
= recv_cq
;
569 ch
->max_imm_sge
= min(init_attr
->cap
.max_send_sge
- 1U, 255U);
571 if (target
->using_rdma_cm
) {
572 ret
= rdma_create_qp(ch
->rdma_cm
.cm_id
, dev
->pd
, init_attr
);
573 qp
= ch
->rdma_cm
.cm_id
->qp
;
575 qp
= ib_create_qp(dev
->pd
, init_attr
);
577 ret
= srp_init_ib_qp(target
, qp
);
585 pr_err("QP creation failed for dev %s: %d\n",
586 dev_name(&dev
->dev
->dev
), ret
);
590 if (dev
->use_fast_reg
) {
591 fr_pool
= srp_alloc_fr_pool(target
);
592 if (IS_ERR(fr_pool
)) {
593 ret
= PTR_ERR(fr_pool
);
594 shost_printk(KERN_WARNING
, target
->scsi_host
, PFX
595 "FR pool allocation failed (%d)\n", ret
);
603 ib_free_cq(ch
->recv_cq
);
605 ib_free_cq(ch
->send_cq
);
608 ch
->recv_cq
= recv_cq
;
609 ch
->send_cq
= send_cq
;
611 if (dev
->use_fast_reg
) {
613 srp_destroy_fr_pool(ch
->fr_pool
);
614 ch
->fr_pool
= fr_pool
;
621 if (target
->using_rdma_cm
)
622 rdma_destroy_qp(ch
->rdma_cm
.cm_id
);
638 * Note: this function may be called without srp_alloc_iu_bufs() having been
639 * invoked. Hence the ch->[rt]x_ring checks.
641 static void srp_free_ch_ib(struct srp_target_port
*target
,
642 struct srp_rdma_ch
*ch
)
644 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
650 if (target
->using_rdma_cm
) {
651 if (ch
->rdma_cm
.cm_id
) {
652 rdma_destroy_id(ch
->rdma_cm
.cm_id
);
653 ch
->rdma_cm
.cm_id
= NULL
;
656 if (ch
->ib_cm
.cm_id
) {
657 ib_destroy_cm_id(ch
->ib_cm
.cm_id
);
658 ch
->ib_cm
.cm_id
= NULL
;
662 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
666 if (dev
->use_fast_reg
) {
668 srp_destroy_fr_pool(ch
->fr_pool
);
672 ib_free_cq(ch
->send_cq
);
673 ib_free_cq(ch
->recv_cq
);
676 * Avoid that the SCSI error handler tries to use this channel after
677 * it has been freed. The SCSI error handler can namely continue
678 * trying to perform recovery actions after scsi_remove_host()
684 ch
->send_cq
= ch
->recv_cq
= NULL
;
687 for (i
= 0; i
< target
->queue_size
; ++i
)
688 srp_free_iu(target
->srp_host
, ch
->rx_ring
[i
]);
693 for (i
= 0; i
< target
->queue_size
; ++i
)
694 srp_free_iu(target
->srp_host
, ch
->tx_ring
[i
]);
700 static void srp_path_rec_completion(int status
,
701 struct sa_path_rec
*pathrec
,
704 struct srp_rdma_ch
*ch
= ch_ptr
;
705 struct srp_target_port
*target
= ch
->target
;
709 shost_printk(KERN_ERR
, target
->scsi_host
,
710 PFX
"Got failed path rec status %d\n", status
);
712 ch
->ib_cm
.path
= *pathrec
;
716 static int srp_ib_lookup_path(struct srp_rdma_ch
*ch
)
718 struct srp_target_port
*target
= ch
->target
;
721 ch
->ib_cm
.path
.numb_path
= 1;
723 init_completion(&ch
->done
);
725 ch
->ib_cm
.path_query_id
= ib_sa_path_rec_get(&srp_sa_client
,
726 target
->srp_host
->srp_dev
->dev
,
727 target
->srp_host
->port
,
729 IB_SA_PATH_REC_SERVICE_ID
|
730 IB_SA_PATH_REC_DGID
|
731 IB_SA_PATH_REC_SGID
|
732 IB_SA_PATH_REC_NUMB_PATH
|
734 SRP_PATH_REC_TIMEOUT_MS
,
736 srp_path_rec_completion
,
737 ch
, &ch
->ib_cm
.path_query
);
738 if (ch
->ib_cm
.path_query_id
< 0)
739 return ch
->ib_cm
.path_query_id
;
741 ret
= wait_for_completion_interruptible(&ch
->done
);
746 shost_printk(KERN_WARNING
, target
->scsi_host
,
747 PFX
"Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
748 ch
->ib_cm
.path
.sgid
.raw
, ch
->ib_cm
.path
.dgid
.raw
,
749 be16_to_cpu(target
->ib_cm
.pkey
),
750 be64_to_cpu(target
->ib_cm
.service_id
));
755 static int srp_rdma_lookup_path(struct srp_rdma_ch
*ch
)
757 struct srp_target_port
*target
= ch
->target
;
760 init_completion(&ch
->done
);
762 ret
= rdma_resolve_route(ch
->rdma_cm
.cm_id
, SRP_PATH_REC_TIMEOUT_MS
);
766 wait_for_completion_interruptible(&ch
->done
);
769 shost_printk(KERN_WARNING
, target
->scsi_host
,
770 PFX
"Path resolution failed\n");
775 static int srp_lookup_path(struct srp_rdma_ch
*ch
)
777 struct srp_target_port
*target
= ch
->target
;
779 return target
->using_rdma_cm
? srp_rdma_lookup_path(ch
) :
780 srp_ib_lookup_path(ch
);
783 static u8
srp_get_subnet_timeout(struct srp_host
*host
)
785 struct ib_port_attr attr
;
787 u8 subnet_timeout
= 18;
789 ret
= ib_query_port(host
->srp_dev
->dev
, host
->port
, &attr
);
791 subnet_timeout
= attr
.subnet_timeout
;
793 if (unlikely(subnet_timeout
< 15))
794 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795 dev_name(&host
->srp_dev
->dev
->dev
), subnet_timeout
);
797 return subnet_timeout
;
800 static int srp_send_req(struct srp_rdma_ch
*ch
, uint32_t max_iu_len
,
803 struct srp_target_port
*target
= ch
->target
;
805 struct rdma_conn_param rdma_param
;
806 struct srp_login_req_rdma rdma_req
;
807 struct ib_cm_req_param ib_param
;
808 struct srp_login_req ib_req
;
813 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
817 req
->ib_param
.flow_control
= 1;
818 req
->ib_param
.retry_count
= target
->tl_retry_count
;
821 * Pick some arbitrary defaults here; we could make these
822 * module parameters if anyone cared about setting them.
824 req
->ib_param
.responder_resources
= 4;
825 req
->ib_param
.rnr_retry_count
= 7;
826 req
->ib_param
.max_cm_retries
= 15;
828 req
->ib_req
.opcode
= SRP_LOGIN_REQ
;
830 req
->ib_req
.req_it_iu_len
= cpu_to_be32(max_iu_len
);
831 req
->ib_req
.req_buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
832 SRP_BUF_FORMAT_INDIRECT
);
833 req
->ib_req
.req_flags
= (multich
? SRP_MULTICHAN_MULTI
:
834 SRP_MULTICHAN_SINGLE
);
835 if (srp_use_imm_data
) {
836 req
->ib_req
.req_flags
|= SRP_IMMED_REQUESTED
;
837 req
->ib_req
.imm_data_offset
= cpu_to_be16(SRP_IMM_DATA_OFFSET
);
840 if (target
->using_rdma_cm
) {
841 req
->rdma_param
.flow_control
= req
->ib_param
.flow_control
;
842 req
->rdma_param
.responder_resources
=
843 req
->ib_param
.responder_resources
;
844 req
->rdma_param
.initiator_depth
= req
->ib_param
.initiator_depth
;
845 req
->rdma_param
.retry_count
= req
->ib_param
.retry_count
;
846 req
->rdma_param
.rnr_retry_count
= req
->ib_param
.rnr_retry_count
;
847 req
->rdma_param
.private_data
= &req
->rdma_req
;
848 req
->rdma_param
.private_data_len
= sizeof(req
->rdma_req
);
850 req
->rdma_req
.opcode
= req
->ib_req
.opcode
;
851 req
->rdma_req
.tag
= req
->ib_req
.tag
;
852 req
->rdma_req
.req_it_iu_len
= req
->ib_req
.req_it_iu_len
;
853 req
->rdma_req
.req_buf_fmt
= req
->ib_req
.req_buf_fmt
;
854 req
->rdma_req
.req_flags
= req
->ib_req
.req_flags
;
855 req
->rdma_req
.imm_data_offset
= req
->ib_req
.imm_data_offset
;
857 ipi
= req
->rdma_req
.initiator_port_id
;
858 tpi
= req
->rdma_req
.target_port_id
;
862 subnet_timeout
= srp_get_subnet_timeout(target
->srp_host
);
864 req
->ib_param
.primary_path
= &ch
->ib_cm
.path
;
865 req
->ib_param
.alternate_path
= NULL
;
866 req
->ib_param
.service_id
= target
->ib_cm
.service_id
;
867 get_random_bytes(&req
->ib_param
.starting_psn
, 4);
868 req
->ib_param
.starting_psn
&= 0xffffff;
869 req
->ib_param
.qp_num
= ch
->qp
->qp_num
;
870 req
->ib_param
.qp_type
= ch
->qp
->qp_type
;
871 req
->ib_param
.local_cm_response_timeout
= subnet_timeout
+ 2;
872 req
->ib_param
.remote_cm_response_timeout
= subnet_timeout
+ 2;
873 req
->ib_param
.private_data
= &req
->ib_req
;
874 req
->ib_param
.private_data_len
= sizeof(req
->ib_req
);
876 ipi
= req
->ib_req
.initiator_port_id
;
877 tpi
= req
->ib_req
.target_port_id
;
881 * In the published SRP specification (draft rev. 16a), the
882 * port identifier format is 8 bytes of ID extension followed
883 * by 8 bytes of GUID. Older drafts put the two halves in the
884 * opposite order, so that the GUID comes first.
886 * Targets conforming to these obsolete drafts can be
887 * recognized by the I/O Class they report.
889 if (target
->io_class
== SRP_REV10_IB_IO_CLASS
) {
890 memcpy(ipi
, &target
->sgid
.global
.interface_id
, 8);
891 memcpy(ipi
+ 8, &target
->initiator_ext
, 8);
892 memcpy(tpi
, &target
->ioc_guid
, 8);
893 memcpy(tpi
+ 8, &target
->id_ext
, 8);
895 memcpy(ipi
, &target
->initiator_ext
, 8);
896 memcpy(ipi
+ 8, &target
->sgid
.global
.interface_id
, 8);
897 memcpy(tpi
, &target
->id_ext
, 8);
898 memcpy(tpi
+ 8, &target
->ioc_guid
, 8);
902 * Topspin/Cisco SRP targets will reject our login unless we
903 * zero out the first 8 bytes of our initiator port ID and set
904 * the second 8 bytes to the local node GUID.
906 if (srp_target_is_topspin(target
)) {
907 shost_printk(KERN_DEBUG
, target
->scsi_host
,
908 PFX
"Topspin/Cisco initiator port ID workaround "
909 "activated for target GUID %016llx\n",
910 be64_to_cpu(target
->ioc_guid
));
912 memcpy(ipi
+ 8, &target
->srp_host
->srp_dev
->dev
->node_guid
, 8);
915 if (target
->using_rdma_cm
)
916 status
= rdma_connect(ch
->rdma_cm
.cm_id
, &req
->rdma_param
);
918 status
= ib_send_cm_req(ch
->ib_cm
.cm_id
, &req
->ib_param
);
925 static bool srp_queue_remove_work(struct srp_target_port
*target
)
927 bool changed
= false;
929 spin_lock_irq(&target
->lock
);
930 if (target
->state
!= SRP_TARGET_REMOVED
) {
931 target
->state
= SRP_TARGET_REMOVED
;
934 spin_unlock_irq(&target
->lock
);
937 queue_work(srp_remove_wq
, &target
->remove_work
);
942 static void srp_disconnect_target(struct srp_target_port
*target
)
944 struct srp_rdma_ch
*ch
;
947 /* XXX should send SRP_I_LOGOUT request */
949 for (i
= 0; i
< target
->ch_count
; i
++) {
951 ch
->connected
= false;
953 if (target
->using_rdma_cm
) {
954 if (ch
->rdma_cm
.cm_id
)
955 rdma_disconnect(ch
->rdma_cm
.cm_id
);
958 ret
= ib_send_cm_dreq(ch
->ib_cm
.cm_id
,
962 shost_printk(KERN_DEBUG
, target
->scsi_host
,
963 PFX
"Sending CM DREQ failed\n");
968 static void srp_free_req_data(struct srp_target_port
*target
,
969 struct srp_rdma_ch
*ch
)
971 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
972 struct ib_device
*ibdev
= dev
->dev
;
973 struct srp_request
*req
;
979 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
980 req
= &ch
->req_ring
[i
];
981 if (dev
->use_fast_reg
)
983 if (req
->indirect_dma_addr
) {
984 ib_dma_unmap_single(ibdev
, req
->indirect_dma_addr
,
985 target
->indirect_size
,
988 kfree(req
->indirect_desc
);
995 static int srp_alloc_req_data(struct srp_rdma_ch
*ch
)
997 struct srp_target_port
*target
= ch
->target
;
998 struct srp_device
*srp_dev
= target
->srp_host
->srp_dev
;
999 struct ib_device
*ibdev
= srp_dev
->dev
;
1000 struct srp_request
*req
;
1002 dma_addr_t dma_addr
;
1003 int i
, ret
= -ENOMEM
;
1005 ch
->req_ring
= kcalloc(target
->req_ring_size
, sizeof(*ch
->req_ring
),
1010 for (i
= 0; i
< target
->req_ring_size
; ++i
) {
1011 req
= &ch
->req_ring
[i
];
1012 mr_list
= kmalloc_array(target
->mr_per_cmd
, sizeof(void *),
1016 if (srp_dev
->use_fast_reg
)
1017 req
->fr_list
= mr_list
;
1018 req
->indirect_desc
= kmalloc(target
->indirect_size
, GFP_KERNEL
);
1019 if (!req
->indirect_desc
)
1022 dma_addr
= ib_dma_map_single(ibdev
, req
->indirect_desc
,
1023 target
->indirect_size
,
1025 if (ib_dma_mapping_error(ibdev
, dma_addr
))
1028 req
->indirect_dma_addr
= dma_addr
;
1037 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1038 * @shost: SCSI host whose attributes to remove from sysfs.
1040 * Note: Any attributes defined in the host template and that did not exist
1041 * before invocation of this function will be ignored.
1043 static void srp_del_scsi_host_attr(struct Scsi_Host
*shost
)
1045 struct device_attribute
**attr
;
1047 for (attr
= shost
->hostt
->shost_attrs
; attr
&& *attr
; ++attr
)
1048 device_remove_file(&shost
->shost_dev
, *attr
);
1051 static void srp_remove_target(struct srp_target_port
*target
)
1053 struct srp_rdma_ch
*ch
;
1056 WARN_ON_ONCE(target
->state
!= SRP_TARGET_REMOVED
);
1058 srp_del_scsi_host_attr(target
->scsi_host
);
1059 srp_rport_get(target
->rport
);
1060 srp_remove_host(target
->scsi_host
);
1061 scsi_remove_host(target
->scsi_host
);
1062 srp_stop_rport_timers(target
->rport
);
1063 srp_disconnect_target(target
);
1064 kobj_ns_drop(KOBJ_NS_TYPE_NET
, target
->net
);
1065 for (i
= 0; i
< target
->ch_count
; i
++) {
1066 ch
= &target
->ch
[i
];
1067 srp_free_ch_ib(target
, ch
);
1069 cancel_work_sync(&target
->tl_err_work
);
1070 srp_rport_put(target
->rport
);
1071 for (i
= 0; i
< target
->ch_count
; i
++) {
1072 ch
= &target
->ch
[i
];
1073 srp_free_req_data(target
, ch
);
1078 spin_lock(&target
->srp_host
->target_lock
);
1079 list_del(&target
->list
);
1080 spin_unlock(&target
->srp_host
->target_lock
);
1082 scsi_host_put(target
->scsi_host
);
1085 static void srp_remove_work(struct work_struct
*work
)
1087 struct srp_target_port
*target
=
1088 container_of(work
, struct srp_target_port
, remove_work
);
1090 WARN_ON_ONCE(target
->state
!= SRP_TARGET_REMOVED
);
1092 srp_remove_target(target
);
1095 static void srp_rport_delete(struct srp_rport
*rport
)
1097 struct srp_target_port
*target
= rport
->lld_data
;
1099 srp_queue_remove_work(target
);
1103 * srp_connected_ch() - number of connected channels
1104 * @target: SRP target port.
1106 static int srp_connected_ch(struct srp_target_port
*target
)
1110 for (i
= 0; i
< target
->ch_count
; i
++)
1111 c
+= target
->ch
[i
].connected
;
1116 static int srp_connect_ch(struct srp_rdma_ch
*ch
, uint32_t max_iu_len
,
1119 struct srp_target_port
*target
= ch
->target
;
1122 WARN_ON_ONCE(!multich
&& srp_connected_ch(target
) > 0);
1124 ret
= srp_lookup_path(ch
);
1129 init_completion(&ch
->done
);
1130 ret
= srp_send_req(ch
, max_iu_len
, multich
);
1133 ret
= wait_for_completion_interruptible(&ch
->done
);
1138 * The CM event handling code will set status to
1139 * SRP_PORT_REDIRECT if we get a port redirect REJ
1140 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1141 * redirect REJ back.
1146 ch
->connected
= true;
1149 case SRP_PORT_REDIRECT
:
1150 ret
= srp_lookup_path(ch
);
1155 case SRP_DLID_REDIRECT
:
1158 case SRP_STALE_CONN
:
1159 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1160 "giving up on stale connection\n");
1170 return ret
<= 0 ? ret
: -ENODEV
;
1173 static void srp_inv_rkey_err_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1175 srp_handle_qp_err(cq
, wc
, "INV RKEY");
1178 static int srp_inv_rkey(struct srp_request
*req
, struct srp_rdma_ch
*ch
,
1181 struct ib_send_wr wr
= {
1182 .opcode
= IB_WR_LOCAL_INV
,
1186 .ex
.invalidate_rkey
= rkey
,
1189 wr
.wr_cqe
= &req
->reg_cqe
;
1190 req
->reg_cqe
.done
= srp_inv_rkey_err_done
;
1191 return ib_post_send(ch
->qp
, &wr
, NULL
);
1194 static void srp_unmap_data(struct scsi_cmnd
*scmnd
,
1195 struct srp_rdma_ch
*ch
,
1196 struct srp_request
*req
)
1198 struct srp_target_port
*target
= ch
->target
;
1199 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1200 struct ib_device
*ibdev
= dev
->dev
;
1203 if (!scsi_sglist(scmnd
) ||
1204 (scmnd
->sc_data_direction
!= DMA_TO_DEVICE
&&
1205 scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
))
1208 if (dev
->use_fast_reg
) {
1209 struct srp_fr_desc
**pfr
;
1211 for (i
= req
->nmdesc
, pfr
= req
->fr_list
; i
> 0; i
--, pfr
++) {
1212 res
= srp_inv_rkey(req
, ch
, (*pfr
)->mr
->rkey
);
1214 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1215 "Queueing INV WR for rkey %#x failed (%d)\n",
1216 (*pfr
)->mr
->rkey
, res
);
1217 queue_work(system_long_wq
,
1218 &target
->tl_err_work
);
1222 srp_fr_pool_put(ch
->fr_pool
, req
->fr_list
,
1226 ib_dma_unmap_sg(ibdev
, scsi_sglist(scmnd
), scsi_sg_count(scmnd
),
1227 scmnd
->sc_data_direction
);
1231 * srp_claim_req - Take ownership of the scmnd associated with a request.
1232 * @ch: SRP RDMA channel.
1233 * @req: SRP request.
1234 * @sdev: If not NULL, only take ownership for this SCSI device.
1235 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1236 * ownership of @req->scmnd if it equals @scmnd.
1239 * Either NULL or a pointer to the SCSI command the caller became owner of.
1241 static struct scsi_cmnd
*srp_claim_req(struct srp_rdma_ch
*ch
,
1242 struct srp_request
*req
,
1243 struct scsi_device
*sdev
,
1244 struct scsi_cmnd
*scmnd
)
1246 unsigned long flags
;
1248 spin_lock_irqsave(&ch
->lock
, flags
);
1250 (!sdev
|| req
->scmnd
->device
== sdev
) &&
1251 (!scmnd
|| req
->scmnd
== scmnd
)) {
1257 spin_unlock_irqrestore(&ch
->lock
, flags
);
1263 * srp_free_req() - Unmap data and adjust ch->req_lim.
1264 * @ch: SRP RDMA channel.
1265 * @req: Request to be freed.
1266 * @scmnd: SCSI command associated with @req.
1267 * @req_lim_delta: Amount to be added to @target->req_lim.
1269 static void srp_free_req(struct srp_rdma_ch
*ch
, struct srp_request
*req
,
1270 struct scsi_cmnd
*scmnd
, s32 req_lim_delta
)
1272 unsigned long flags
;
1274 srp_unmap_data(scmnd
, ch
, req
);
1276 spin_lock_irqsave(&ch
->lock
, flags
);
1277 ch
->req_lim
+= req_lim_delta
;
1278 spin_unlock_irqrestore(&ch
->lock
, flags
);
1281 static void srp_finish_req(struct srp_rdma_ch
*ch
, struct srp_request
*req
,
1282 struct scsi_device
*sdev
, int result
)
1284 struct scsi_cmnd
*scmnd
= srp_claim_req(ch
, req
, sdev
, NULL
);
1287 srp_free_req(ch
, req
, scmnd
, 0);
1288 scmnd
->result
= result
;
1289 scmnd
->scsi_done(scmnd
);
1293 static void srp_terminate_io(struct srp_rport
*rport
)
1295 struct srp_target_port
*target
= rport
->lld_data
;
1296 struct srp_rdma_ch
*ch
;
1299 for (i
= 0; i
< target
->ch_count
; i
++) {
1300 ch
= &target
->ch
[i
];
1302 for (j
= 0; j
< target
->req_ring_size
; ++j
) {
1303 struct srp_request
*req
= &ch
->req_ring
[j
];
1305 srp_finish_req(ch
, req
, NULL
,
1306 DID_TRANSPORT_FAILFAST
<< 16);
1311 /* Calculate maximum initiator to target information unit length. */
1312 static uint32_t srp_max_it_iu_len(int cmd_sg_cnt
, bool use_imm_data
,
1313 uint32_t max_it_iu_size
)
1315 uint32_t max_iu_len
= sizeof(struct srp_cmd
) + SRP_MAX_ADD_CDB_LEN
+
1316 sizeof(struct srp_indirect_buf
) +
1317 cmd_sg_cnt
* sizeof(struct srp_direct_buf
);
1320 max_iu_len
= max(max_iu_len
, SRP_IMM_DATA_OFFSET
+
1324 max_iu_len
= min(max_iu_len
, max_it_iu_size
);
1326 pr_debug("max_iu_len = %d\n", max_iu_len
);
1332 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1333 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1334 * srp_reset_device() or srp_reset_host() calls will occur while this function
1335 * is in progress. One way to realize that is not to call this function
1336 * directly but to call srp_reconnect_rport() instead since that last function
1337 * serializes calls of this function via rport->mutex and also blocks
1338 * srp_queuecommand() calls before invoking this function.
1340 static int srp_rport_reconnect(struct srp_rport
*rport
)
1342 struct srp_target_port
*target
= rport
->lld_data
;
1343 struct srp_rdma_ch
*ch
;
1344 uint32_t max_iu_len
= srp_max_it_iu_len(target
->cmd_sg_cnt
,
1346 target
->max_it_iu_size
);
1348 bool multich
= false;
1350 srp_disconnect_target(target
);
1352 if (target
->state
== SRP_TARGET_SCANNING
)
1356 * Now get a new local CM ID so that we avoid confusing the target in
1357 * case things are really fouled up. Doing so also ensures that all CM
1358 * callbacks will have finished before a new QP is allocated.
1360 for (i
= 0; i
< target
->ch_count
; i
++) {
1361 ch
= &target
->ch
[i
];
1362 ret
+= srp_new_cm_id(ch
);
1364 for (i
= 0; i
< target
->ch_count
; i
++) {
1365 ch
= &target
->ch
[i
];
1366 for (j
= 0; j
< target
->req_ring_size
; ++j
) {
1367 struct srp_request
*req
= &ch
->req_ring
[j
];
1369 srp_finish_req(ch
, req
, NULL
, DID_RESET
<< 16);
1372 for (i
= 0; i
< target
->ch_count
; i
++) {
1373 ch
= &target
->ch
[i
];
1375 * Whether or not creating a new CM ID succeeded, create a new
1376 * QP. This guarantees that all completion callback function
1377 * invocations have finished before request resetting starts.
1379 ret
+= srp_create_ch_ib(ch
);
1381 INIT_LIST_HEAD(&ch
->free_tx
);
1382 for (j
= 0; j
< target
->queue_size
; ++j
)
1383 list_add(&ch
->tx_ring
[j
]->list
, &ch
->free_tx
);
1386 target
->qp_in_error
= false;
1388 for (i
= 0; i
< target
->ch_count
; i
++) {
1389 ch
= &target
->ch
[i
];
1392 ret
= srp_connect_ch(ch
, max_iu_len
, multich
);
1397 shost_printk(KERN_INFO
, target
->scsi_host
,
1398 PFX
"reconnect succeeded\n");
1403 static void srp_map_desc(struct srp_map_state
*state
, dma_addr_t dma_addr
,
1404 unsigned int dma_len
, u32 rkey
)
1406 struct srp_direct_buf
*desc
= state
->desc
;
1408 WARN_ON_ONCE(!dma_len
);
1410 desc
->va
= cpu_to_be64(dma_addr
);
1411 desc
->key
= cpu_to_be32(rkey
);
1412 desc
->len
= cpu_to_be32(dma_len
);
1414 state
->total_len
+= dma_len
;
1419 static void srp_reg_mr_err_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1421 srp_handle_qp_err(cq
, wc
, "FAST REG");
1425 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1426 * where to start in the first element. If sg_offset_p != NULL then
1427 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1428 * byte that has not yet been mapped.
1430 static int srp_map_finish_fr(struct srp_map_state
*state
,
1431 struct srp_request
*req
,
1432 struct srp_rdma_ch
*ch
, int sg_nents
,
1433 unsigned int *sg_offset_p
)
1435 struct srp_target_port
*target
= ch
->target
;
1436 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1437 struct ib_reg_wr wr
;
1438 struct srp_fr_desc
*desc
;
1442 if (state
->fr
.next
>= state
->fr
.end
) {
1443 shost_printk(KERN_ERR
, ch
->target
->scsi_host
,
1444 PFX
"Out of MRs (mr_per_cmd = %d)\n",
1445 ch
->target
->mr_per_cmd
);
1449 WARN_ON_ONCE(!dev
->use_fast_reg
);
1451 if (sg_nents
== 1 && target
->global_rkey
) {
1452 unsigned int sg_offset
= sg_offset_p
? *sg_offset_p
: 0;
1454 srp_map_desc(state
, sg_dma_address(state
->sg
) + sg_offset
,
1455 sg_dma_len(state
->sg
) - sg_offset
,
1456 target
->global_rkey
);
1462 desc
= srp_fr_pool_get(ch
->fr_pool
);
1466 rkey
= ib_inc_rkey(desc
->mr
->rkey
);
1467 ib_update_fast_reg_key(desc
->mr
, rkey
);
1469 n
= ib_map_mr_sg(desc
->mr
, state
->sg
, sg_nents
, sg_offset_p
,
1471 if (unlikely(n
< 0)) {
1472 srp_fr_pool_put(ch
->fr_pool
, &desc
, 1);
1473 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1474 dev_name(&req
->scmnd
->device
->sdev_gendev
), sg_nents
,
1475 sg_offset_p
? *sg_offset_p
: -1, n
);
1479 WARN_ON_ONCE(desc
->mr
->length
== 0);
1481 req
->reg_cqe
.done
= srp_reg_mr_err_done
;
1484 wr
.wr
.opcode
= IB_WR_REG_MR
;
1485 wr
.wr
.wr_cqe
= &req
->reg_cqe
;
1487 wr
.wr
.send_flags
= 0;
1489 wr
.key
= desc
->mr
->rkey
;
1490 wr
.access
= (IB_ACCESS_LOCAL_WRITE
|
1491 IB_ACCESS_REMOTE_READ
|
1492 IB_ACCESS_REMOTE_WRITE
);
1494 *state
->fr
.next
++ = desc
;
1497 srp_map_desc(state
, desc
->mr
->iova
,
1498 desc
->mr
->length
, desc
->mr
->rkey
);
1500 err
= ib_post_send(ch
->qp
, &wr
.wr
, NULL
);
1501 if (unlikely(err
)) {
1502 WARN_ON_ONCE(err
== -ENOMEM
);
1509 static int srp_map_sg_fr(struct srp_map_state
*state
, struct srp_rdma_ch
*ch
,
1510 struct srp_request
*req
, struct scatterlist
*scat
,
1513 unsigned int sg_offset
= 0;
1515 state
->fr
.next
= req
->fr_list
;
1516 state
->fr
.end
= req
->fr_list
+ ch
->target
->mr_per_cmd
;
1525 n
= srp_map_finish_fr(state
, req
, ch
, count
, &sg_offset
);
1526 if (unlikely(n
< 0))
1530 for (i
= 0; i
< n
; i
++)
1531 state
->sg
= sg_next(state
->sg
);
1537 static int srp_map_sg_dma(struct srp_map_state
*state
, struct srp_rdma_ch
*ch
,
1538 struct srp_request
*req
, struct scatterlist
*scat
,
1541 struct srp_target_port
*target
= ch
->target
;
1542 struct scatterlist
*sg
;
1545 for_each_sg(scat
, sg
, count
, i
) {
1546 srp_map_desc(state
, sg_dma_address(sg
), sg_dma_len(sg
),
1547 target
->global_rkey
);
1554 * Register the indirect data buffer descriptor with the HCA.
1556 * Note: since the indirect data buffer descriptor has been allocated with
1557 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1560 static int srp_map_idb(struct srp_rdma_ch
*ch
, struct srp_request
*req
,
1561 void **next_mr
, void **end_mr
, u32 idb_len
,
1564 struct srp_target_port
*target
= ch
->target
;
1565 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
1566 struct srp_map_state state
;
1567 struct srp_direct_buf idb_desc
;
1568 struct scatterlist idb_sg
[1];
1571 memset(&state
, 0, sizeof(state
));
1572 memset(&idb_desc
, 0, sizeof(idb_desc
));
1573 state
.gen
.next
= next_mr
;
1574 state
.gen
.end
= end_mr
;
1575 state
.desc
= &idb_desc
;
1576 state
.base_dma_addr
= req
->indirect_dma_addr
;
1577 state
.dma_len
= idb_len
;
1579 if (dev
->use_fast_reg
) {
1581 sg_init_one(idb_sg
, req
->indirect_desc
, idb_len
);
1582 idb_sg
->dma_address
= req
->indirect_dma_addr
; /* hack! */
1583 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1584 idb_sg
->dma_length
= idb_sg
->length
; /* hack^2 */
1586 ret
= srp_map_finish_fr(&state
, req
, ch
, 1, NULL
);
1589 WARN_ON_ONCE(ret
< 1);
1594 *idb_rkey
= idb_desc
.key
;
1599 static void srp_check_mapping(struct srp_map_state
*state
,
1600 struct srp_rdma_ch
*ch
, struct srp_request
*req
,
1601 struct scatterlist
*scat
, int count
)
1603 struct srp_device
*dev
= ch
->target
->srp_host
->srp_dev
;
1604 struct srp_fr_desc
**pfr
;
1605 u64 desc_len
= 0, mr_len
= 0;
1608 for (i
= 0; i
< state
->ndesc
; i
++)
1609 desc_len
+= be32_to_cpu(req
->indirect_desc
[i
].len
);
1610 if (dev
->use_fast_reg
)
1611 for (i
= 0, pfr
= req
->fr_list
; i
< state
->nmdesc
; i
++, pfr
++)
1612 mr_len
+= (*pfr
)->mr
->length
;
1613 if (desc_len
!= scsi_bufflen(req
->scmnd
) ||
1614 mr_len
> scsi_bufflen(req
->scmnd
))
1615 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1616 scsi_bufflen(req
->scmnd
), desc_len
, mr_len
,
1617 state
->ndesc
, state
->nmdesc
);
1621 * srp_map_data() - map SCSI data buffer onto an SRP request
1622 * @scmnd: SCSI command to map
1623 * @ch: SRP RDMA channel
1626 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1627 * mapping failed. The size of any immediate data is not included in the
1630 static int srp_map_data(struct scsi_cmnd
*scmnd
, struct srp_rdma_ch
*ch
,
1631 struct srp_request
*req
)
1633 struct srp_target_port
*target
= ch
->target
;
1634 struct scatterlist
*scat
, *sg
;
1635 struct srp_cmd
*cmd
= req
->cmd
->buf
;
1636 int i
, len
, nents
, count
, ret
;
1637 struct srp_device
*dev
;
1638 struct ib_device
*ibdev
;
1639 struct srp_map_state state
;
1640 struct srp_indirect_buf
*indirect_hdr
;
1642 u32 idb_len
, table_len
;
1646 req
->cmd
->num_sge
= 1;
1648 if (!scsi_sglist(scmnd
) || scmnd
->sc_data_direction
== DMA_NONE
)
1649 return sizeof(struct srp_cmd
) + cmd
->add_cdb_len
;
1651 if (scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
&&
1652 scmnd
->sc_data_direction
!= DMA_TO_DEVICE
) {
1653 shost_printk(KERN_WARNING
, target
->scsi_host
,
1654 PFX
"Unhandled data direction %d\n",
1655 scmnd
->sc_data_direction
);
1659 nents
= scsi_sg_count(scmnd
);
1660 scat
= scsi_sglist(scmnd
);
1661 data_len
= scsi_bufflen(scmnd
);
1663 dev
= target
->srp_host
->srp_dev
;
1666 count
= ib_dma_map_sg(ibdev
, scat
, nents
, scmnd
->sc_data_direction
);
1667 if (unlikely(count
== 0))
1670 if (ch
->use_imm_data
&&
1671 count
<= ch
->max_imm_sge
&&
1672 SRP_IMM_DATA_OFFSET
+ data_len
<= ch
->max_it_iu_len
&&
1673 scmnd
->sc_data_direction
== DMA_TO_DEVICE
) {
1674 struct srp_imm_buf
*buf
;
1675 struct ib_sge
*sge
= &req
->cmd
->sge
[1];
1677 fmt
= SRP_DATA_DESC_IMM
;
1678 len
= SRP_IMM_DATA_OFFSET
;
1680 buf
= (void *)cmd
->add_data
+ cmd
->add_cdb_len
;
1681 buf
->len
= cpu_to_be32(data_len
);
1682 WARN_ON_ONCE((void *)(buf
+ 1) > (void *)cmd
+ len
);
1683 for_each_sg(scat
, sg
, count
, i
) {
1684 sge
[i
].addr
= sg_dma_address(sg
);
1685 sge
[i
].length
= sg_dma_len(sg
);
1686 sge
[i
].lkey
= target
->lkey
;
1688 req
->cmd
->num_sge
+= count
;
1692 fmt
= SRP_DATA_DESC_DIRECT
;
1693 len
= sizeof(struct srp_cmd
) + cmd
->add_cdb_len
+
1694 sizeof(struct srp_direct_buf
);
1696 if (count
== 1 && target
->global_rkey
) {
1698 * The midlayer only generated a single gather/scatter
1699 * entry, or DMA mapping coalesced everything to a
1700 * single entry. So a direct descriptor along with
1701 * the DMA MR suffices.
1703 struct srp_direct_buf
*buf
;
1705 buf
= (void *)cmd
->add_data
+ cmd
->add_cdb_len
;
1706 buf
->va
= cpu_to_be64(sg_dma_address(scat
));
1707 buf
->key
= cpu_to_be32(target
->global_rkey
);
1708 buf
->len
= cpu_to_be32(sg_dma_len(scat
));
1715 * We have more than one scatter/gather entry, so build our indirect
1716 * descriptor table, trying to merge as many entries as we can.
1718 indirect_hdr
= (void *)cmd
->add_data
+ cmd
->add_cdb_len
;
1720 ib_dma_sync_single_for_cpu(ibdev
, req
->indirect_dma_addr
,
1721 target
->indirect_size
, DMA_TO_DEVICE
);
1723 memset(&state
, 0, sizeof(state
));
1724 state
.desc
= req
->indirect_desc
;
1725 if (dev
->use_fast_reg
)
1726 ret
= srp_map_sg_fr(&state
, ch
, req
, scat
, count
);
1728 ret
= srp_map_sg_dma(&state
, ch
, req
, scat
, count
);
1729 req
->nmdesc
= state
.nmdesc
;
1734 DEFINE_DYNAMIC_DEBUG_METADATA(ddm
,
1735 "Memory mapping consistency check");
1736 if (DYNAMIC_DEBUG_BRANCH(ddm
))
1737 srp_check_mapping(&state
, ch
, req
, scat
, count
);
1740 /* We've mapped the request, now pull as much of the indirect
1741 * descriptor table as we can into the command buffer. If this
1742 * target is not using an external indirect table, we are
1743 * guaranteed to fit into the command, as the SCSI layer won't
1744 * give us more S/G entries than we allow.
1746 if (state
.ndesc
== 1) {
1748 * Memory registration collapsed the sg-list into one entry,
1749 * so use a direct descriptor.
1751 struct srp_direct_buf
*buf
;
1753 buf
= (void *)cmd
->add_data
+ cmd
->add_cdb_len
;
1754 *buf
= req
->indirect_desc
[0];
1758 if (unlikely(target
->cmd_sg_cnt
< state
.ndesc
&&
1759 !target
->allow_ext_sg
)) {
1760 shost_printk(KERN_ERR
, target
->scsi_host
,
1761 "Could not fit S/G list into SRP_CMD\n");
1766 count
= min(state
.ndesc
, target
->cmd_sg_cnt
);
1767 table_len
= state
.ndesc
* sizeof (struct srp_direct_buf
);
1768 idb_len
= sizeof(struct srp_indirect_buf
) + table_len
;
1770 fmt
= SRP_DATA_DESC_INDIRECT
;
1771 len
= sizeof(struct srp_cmd
) + cmd
->add_cdb_len
+
1772 sizeof(struct srp_indirect_buf
);
1773 len
+= count
* sizeof (struct srp_direct_buf
);
1775 memcpy(indirect_hdr
->desc_list
, req
->indirect_desc
,
1776 count
* sizeof (struct srp_direct_buf
));
1778 if (!target
->global_rkey
) {
1779 ret
= srp_map_idb(ch
, req
, state
.gen
.next
, state
.gen
.end
,
1780 idb_len
, &idb_rkey
);
1785 idb_rkey
= cpu_to_be32(target
->global_rkey
);
1788 indirect_hdr
->table_desc
.va
= cpu_to_be64(req
->indirect_dma_addr
);
1789 indirect_hdr
->table_desc
.key
= idb_rkey
;
1790 indirect_hdr
->table_desc
.len
= cpu_to_be32(table_len
);
1791 indirect_hdr
->len
= cpu_to_be32(state
.total_len
);
1793 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
1794 cmd
->data_out_desc_cnt
= count
;
1796 cmd
->data_in_desc_cnt
= count
;
1798 ib_dma_sync_single_for_device(ibdev
, req
->indirect_dma_addr
, table_len
,
1802 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
1803 cmd
->buf_fmt
= fmt
<< 4;
1810 srp_unmap_data(scmnd
, ch
, req
);
1811 if (ret
== -ENOMEM
&& req
->nmdesc
>= target
->mr_pool_size
)
1817 * Return an IU and possible credit to the free pool
1819 static void srp_put_tx_iu(struct srp_rdma_ch
*ch
, struct srp_iu
*iu
,
1820 enum srp_iu_type iu_type
)
1822 unsigned long flags
;
1824 spin_lock_irqsave(&ch
->lock
, flags
);
1825 list_add(&iu
->list
, &ch
->free_tx
);
1826 if (iu_type
!= SRP_IU_RSP
)
1828 spin_unlock_irqrestore(&ch
->lock
, flags
);
1832 * Must be called with ch->lock held to protect req_lim and free_tx.
1833 * If IU is not sent, it must be returned using srp_put_tx_iu().
1836 * An upper limit for the number of allocated information units for each
1838 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1839 * more than Scsi_Host.can_queue requests.
1840 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1841 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1842 * one unanswered SRP request to an initiator.
1844 static struct srp_iu
*__srp_get_tx_iu(struct srp_rdma_ch
*ch
,
1845 enum srp_iu_type iu_type
)
1847 struct srp_target_port
*target
= ch
->target
;
1848 s32 rsv
= (iu_type
== SRP_IU_TSK_MGMT
) ? 0 : SRP_TSK_MGMT_SQ_SIZE
;
1851 lockdep_assert_held(&ch
->lock
);
1853 ib_process_cq_direct(ch
->send_cq
, -1);
1855 if (list_empty(&ch
->free_tx
))
1858 /* Initiator responses to target requests do not consume credits */
1859 if (iu_type
!= SRP_IU_RSP
) {
1860 if (ch
->req_lim
<= rsv
) {
1861 ++target
->zero_req_lim
;
1868 iu
= list_first_entry(&ch
->free_tx
, struct srp_iu
, list
);
1869 list_del(&iu
->list
);
1874 * Note: if this function is called from inside ib_drain_sq() then it will
1875 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1876 * with status IB_WC_SUCCESS then that's a bug.
1878 static void srp_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1880 struct srp_iu
*iu
= container_of(wc
->wr_cqe
, struct srp_iu
, cqe
);
1881 struct srp_rdma_ch
*ch
= cq
->cq_context
;
1883 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1884 srp_handle_qp_err(cq
, wc
, "SEND");
1888 lockdep_assert_held(&ch
->lock
);
1890 list_add(&iu
->list
, &ch
->free_tx
);
1894 * srp_post_send() - send an SRP information unit
1895 * @ch: RDMA channel over which to send the information unit.
1896 * @iu: Information unit to send.
1897 * @len: Length of the information unit excluding immediate data.
1899 static int srp_post_send(struct srp_rdma_ch
*ch
, struct srp_iu
*iu
, int len
)
1901 struct srp_target_port
*target
= ch
->target
;
1902 struct ib_send_wr wr
;
1904 if (WARN_ON_ONCE(iu
->num_sge
> SRP_MAX_SGE
))
1907 iu
->sge
[0].addr
= iu
->dma
;
1908 iu
->sge
[0].length
= len
;
1909 iu
->sge
[0].lkey
= target
->lkey
;
1911 iu
->cqe
.done
= srp_send_done
;
1914 wr
.wr_cqe
= &iu
->cqe
;
1915 wr
.sg_list
= &iu
->sge
[0];
1916 wr
.num_sge
= iu
->num_sge
;
1917 wr
.opcode
= IB_WR_SEND
;
1918 wr
.send_flags
= IB_SEND_SIGNALED
;
1920 return ib_post_send(ch
->qp
, &wr
, NULL
);
1923 static int srp_post_recv(struct srp_rdma_ch
*ch
, struct srp_iu
*iu
)
1925 struct srp_target_port
*target
= ch
->target
;
1926 struct ib_recv_wr wr
;
1929 list
.addr
= iu
->dma
;
1930 list
.length
= iu
->size
;
1931 list
.lkey
= target
->lkey
;
1933 iu
->cqe
.done
= srp_recv_done
;
1936 wr
.wr_cqe
= &iu
->cqe
;
1940 return ib_post_recv(ch
->qp
, &wr
, NULL
);
1943 static void srp_process_rsp(struct srp_rdma_ch
*ch
, struct srp_rsp
*rsp
)
1945 struct srp_target_port
*target
= ch
->target
;
1946 struct srp_request
*req
;
1947 struct scsi_cmnd
*scmnd
;
1948 unsigned long flags
;
1950 if (unlikely(rsp
->tag
& SRP_TAG_TSK_MGMT
)) {
1951 spin_lock_irqsave(&ch
->lock
, flags
);
1952 ch
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1953 if (rsp
->tag
== ch
->tsk_mgmt_tag
) {
1954 ch
->tsk_mgmt_status
= -1;
1955 if (be32_to_cpu(rsp
->resp_data_len
) >= 4)
1956 ch
->tsk_mgmt_status
= rsp
->data
[3];
1957 complete(&ch
->tsk_mgmt_done
);
1959 shost_printk(KERN_ERR
, target
->scsi_host
,
1960 "Received tsk mgmt response too late for tag %#llx\n",
1963 spin_unlock_irqrestore(&ch
->lock
, flags
);
1965 scmnd
= scsi_host_find_tag(target
->scsi_host
, rsp
->tag
);
1966 if (scmnd
&& scmnd
->host_scribble
) {
1967 req
= (void *)scmnd
->host_scribble
;
1968 scmnd
= srp_claim_req(ch
, req
, NULL
, scmnd
);
1973 shost_printk(KERN_ERR
, target
->scsi_host
,
1974 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1975 rsp
->tag
, ch
- target
->ch
, ch
->qp
->qp_num
);
1977 spin_lock_irqsave(&ch
->lock
, flags
);
1978 ch
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1979 spin_unlock_irqrestore(&ch
->lock
, flags
);
1983 scmnd
->result
= rsp
->status
;
1985 if (rsp
->flags
& SRP_RSP_FLAG_SNSVALID
) {
1986 memcpy(scmnd
->sense_buffer
, rsp
->data
+
1987 be32_to_cpu(rsp
->resp_data_len
),
1988 min_t(int, be32_to_cpu(rsp
->sense_data_len
),
1989 SCSI_SENSE_BUFFERSIZE
));
1992 if (unlikely(rsp
->flags
& SRP_RSP_FLAG_DIUNDER
))
1993 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_in_res_cnt
));
1994 else if (unlikely(rsp
->flags
& SRP_RSP_FLAG_DIOVER
))
1995 scsi_set_resid(scmnd
, -be32_to_cpu(rsp
->data_in_res_cnt
));
1996 else if (unlikely(rsp
->flags
& SRP_RSP_FLAG_DOUNDER
))
1997 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_out_res_cnt
));
1998 else if (unlikely(rsp
->flags
& SRP_RSP_FLAG_DOOVER
))
1999 scsi_set_resid(scmnd
, -be32_to_cpu(rsp
->data_out_res_cnt
));
2001 srp_free_req(ch
, req
, scmnd
,
2002 be32_to_cpu(rsp
->req_lim_delta
));
2004 scmnd
->host_scribble
= NULL
;
2005 scmnd
->scsi_done(scmnd
);
2009 static int srp_response_common(struct srp_rdma_ch
*ch
, s32 req_delta
,
2012 struct srp_target_port
*target
= ch
->target
;
2013 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
2014 unsigned long flags
;
2018 spin_lock_irqsave(&ch
->lock
, flags
);
2019 ch
->req_lim
+= req_delta
;
2020 iu
= __srp_get_tx_iu(ch
, SRP_IU_RSP
);
2021 spin_unlock_irqrestore(&ch
->lock
, flags
);
2024 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
2025 "no IU available to send response\n");
2030 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
2031 memcpy(iu
->buf
, rsp
, len
);
2032 ib_dma_sync_single_for_device(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
2034 err
= srp_post_send(ch
, iu
, len
);
2036 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
2037 "unable to post response: %d\n", err
);
2038 srp_put_tx_iu(ch
, iu
, SRP_IU_RSP
);
2044 static void srp_process_cred_req(struct srp_rdma_ch
*ch
,
2045 struct srp_cred_req
*req
)
2047 struct srp_cred_rsp rsp
= {
2048 .opcode
= SRP_CRED_RSP
,
2051 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
2053 if (srp_response_common(ch
, delta
, &rsp
, sizeof(rsp
)))
2054 shost_printk(KERN_ERR
, ch
->target
->scsi_host
, PFX
2055 "problems processing SRP_CRED_REQ\n");
2058 static void srp_process_aer_req(struct srp_rdma_ch
*ch
,
2059 struct srp_aer_req
*req
)
2061 struct srp_target_port
*target
= ch
->target
;
2062 struct srp_aer_rsp rsp
= {
2063 .opcode
= SRP_AER_RSP
,
2066 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
2068 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
2069 "ignoring AER for LUN %llu\n", scsilun_to_int(&req
->lun
));
2071 if (srp_response_common(ch
, delta
, &rsp
, sizeof(rsp
)))
2072 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
2073 "problems processing SRP_AER_REQ\n");
2076 static void srp_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2078 struct srp_iu
*iu
= container_of(wc
->wr_cqe
, struct srp_iu
, cqe
);
2079 struct srp_rdma_ch
*ch
= cq
->cq_context
;
2080 struct srp_target_port
*target
= ch
->target
;
2081 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
2085 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
2086 srp_handle_qp_err(cq
, wc
, "RECV");
2090 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, ch
->max_ti_iu_len
,
2093 opcode
= *(u8
*) iu
->buf
;
2096 shost_printk(KERN_ERR
, target
->scsi_host
,
2097 PFX
"recv completion, opcode 0x%02x\n", opcode
);
2098 print_hex_dump(KERN_ERR
, "", DUMP_PREFIX_OFFSET
, 8, 1,
2099 iu
->buf
, wc
->byte_len
, true);
2104 srp_process_rsp(ch
, iu
->buf
);
2108 srp_process_cred_req(ch
, iu
->buf
);
2112 srp_process_aer_req(ch
, iu
->buf
);
2116 /* XXX Handle target logout */
2117 shost_printk(KERN_WARNING
, target
->scsi_host
,
2118 PFX
"Got target logout request\n");
2122 shost_printk(KERN_WARNING
, target
->scsi_host
,
2123 PFX
"Unhandled SRP opcode 0x%02x\n", opcode
);
2127 ib_dma_sync_single_for_device(dev
, iu
->dma
, ch
->max_ti_iu_len
,
2130 res
= srp_post_recv(ch
, iu
);
2132 shost_printk(KERN_ERR
, target
->scsi_host
,
2133 PFX
"Recv failed with error code %d\n", res
);
2137 * srp_tl_err_work() - handle a transport layer error
2138 * @work: Work structure embedded in an SRP target port.
2140 * Note: This function may get invoked before the rport has been created,
2141 * hence the target->rport test.
2143 static void srp_tl_err_work(struct work_struct
*work
)
2145 struct srp_target_port
*target
;
2147 target
= container_of(work
, struct srp_target_port
, tl_err_work
);
2149 srp_start_tl_fail_timers(target
->rport
);
2152 static void srp_handle_qp_err(struct ib_cq
*cq
, struct ib_wc
*wc
,
2155 struct srp_rdma_ch
*ch
= cq
->cq_context
;
2156 struct srp_target_port
*target
= ch
->target
;
2158 if (ch
->connected
&& !target
->qp_in_error
) {
2159 shost_printk(KERN_ERR
, target
->scsi_host
,
2160 PFX
"failed %s status %s (%d) for CQE %p\n",
2161 opname
, ib_wc_status_msg(wc
->status
), wc
->status
,
2163 queue_work(system_long_wq
, &target
->tl_err_work
);
2165 target
->qp_in_error
= true;
2168 static int srp_queuecommand(struct Scsi_Host
*shost
, struct scsi_cmnd
*scmnd
)
2170 struct srp_target_port
*target
= host_to_target(shost
);
2171 struct srp_rdma_ch
*ch
;
2172 struct srp_request
*req
;
2174 struct srp_cmd
*cmd
;
2175 struct ib_device
*dev
;
2176 unsigned long flags
;
2181 scmnd
->result
= srp_chkready(target
->rport
);
2182 if (unlikely(scmnd
->result
))
2185 WARN_ON_ONCE(scmnd
->request
->tag
< 0);
2186 tag
= blk_mq_unique_tag(scmnd
->request
);
2187 ch
= &target
->ch
[blk_mq_unique_tag_to_hwq(tag
)];
2188 idx
= blk_mq_unique_tag_to_tag(tag
);
2189 WARN_ONCE(idx
>= target
->req_ring_size
, "%s: tag %#x: idx %d >= %d\n",
2190 dev_name(&shost
->shost_gendev
), tag
, idx
,
2191 target
->req_ring_size
);
2193 spin_lock_irqsave(&ch
->lock
, flags
);
2194 iu
= __srp_get_tx_iu(ch
, SRP_IU_CMD
);
2195 spin_unlock_irqrestore(&ch
->lock
, flags
);
2200 req
= &ch
->req_ring
[idx
];
2201 dev
= target
->srp_host
->srp_dev
->dev
;
2202 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, ch
->max_it_iu_len
,
2205 scmnd
->host_scribble
= (void *) req
;
2208 memset(cmd
, 0, sizeof *cmd
);
2210 cmd
->opcode
= SRP_CMD
;
2211 int_to_scsilun(scmnd
->device
->lun
, &cmd
->lun
);
2213 memcpy(cmd
->cdb
, scmnd
->cmnd
, scmnd
->cmd_len
);
2214 if (unlikely(scmnd
->cmd_len
> sizeof(cmd
->cdb
))) {
2215 cmd
->add_cdb_len
= round_up(scmnd
->cmd_len
- sizeof(cmd
->cdb
),
2217 if (WARN_ON_ONCE(cmd
->add_cdb_len
> SRP_MAX_ADD_CDB_LEN
))
2224 len
= srp_map_data(scmnd
, ch
, req
);
2226 shost_printk(KERN_ERR
, target
->scsi_host
,
2227 PFX
"Failed to map data (%d)\n", len
);
2229 * If we ran out of memory descriptors (-ENOMEM) because an
2230 * application is queuing many requests with more than
2231 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2232 * to reduce queue depth temporarily.
2234 scmnd
->result
= len
== -ENOMEM
?
2235 DID_OK
<< 16 | QUEUE_FULL
<< 1 : DID_ERROR
<< 16;
2239 ib_dma_sync_single_for_device(dev
, iu
->dma
, ch
->max_it_iu_len
,
2242 if (srp_post_send(ch
, iu
, len
)) {
2243 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"Send failed\n");
2244 scmnd
->result
= DID_ERROR
<< 16;
2251 srp_unmap_data(scmnd
, ch
, req
);
2254 srp_put_tx_iu(ch
, iu
, SRP_IU_CMD
);
2257 * Avoid that the loops that iterate over the request ring can
2258 * encounter a dangling SCSI command pointer.
2263 if (scmnd
->result
) {
2264 scmnd
->scsi_done(scmnd
);
2267 ret
= SCSI_MLQUEUE_HOST_BUSY
;
2274 * Note: the resources allocated in this function are freed in
2277 static int srp_alloc_iu_bufs(struct srp_rdma_ch
*ch
)
2279 struct srp_target_port
*target
= ch
->target
;
2282 ch
->rx_ring
= kcalloc(target
->queue_size
, sizeof(*ch
->rx_ring
),
2286 ch
->tx_ring
= kcalloc(target
->queue_size
, sizeof(*ch
->tx_ring
),
2291 for (i
= 0; i
< target
->queue_size
; ++i
) {
2292 ch
->rx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
2294 GFP_KERNEL
, DMA_FROM_DEVICE
);
2295 if (!ch
->rx_ring
[i
])
2299 for (i
= 0; i
< target
->queue_size
; ++i
) {
2300 ch
->tx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
2302 GFP_KERNEL
, DMA_TO_DEVICE
);
2303 if (!ch
->tx_ring
[i
])
2306 list_add(&ch
->tx_ring
[i
]->list
, &ch
->free_tx
);
2312 for (i
= 0; i
< target
->queue_size
; ++i
) {
2313 srp_free_iu(target
->srp_host
, ch
->rx_ring
[i
]);
2314 srp_free_iu(target
->srp_host
, ch
->tx_ring
[i
]);
2327 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr
*qp_attr
, int attr_mask
)
2329 uint64_t T_tr_ns
, max_compl_time_ms
;
2330 uint32_t rq_tmo_jiffies
;
2333 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2334 * table 91), both the QP timeout and the retry count have to be set
2335 * for RC QP's during the RTR to RTS transition.
2337 WARN_ON_ONCE((attr_mask
& (IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
)) !=
2338 (IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
));
2341 * Set target->rq_tmo_jiffies to one second more than the largest time
2342 * it can take before an error completion is generated. See also
2343 * C9-140..142 in the IBTA spec for more information about how to
2344 * convert the QP Local ACK Timeout value to nanoseconds.
2346 T_tr_ns
= 4096 * (1ULL << qp_attr
->timeout
);
2347 max_compl_time_ms
= qp_attr
->retry_cnt
* 4 * T_tr_ns
;
2348 do_div(max_compl_time_ms
, NSEC_PER_MSEC
);
2349 rq_tmo_jiffies
= msecs_to_jiffies(max_compl_time_ms
+ 1000);
2351 return rq_tmo_jiffies
;
2354 static void srp_cm_rep_handler(struct ib_cm_id
*cm_id
,
2355 const struct srp_login_rsp
*lrsp
,
2356 struct srp_rdma_ch
*ch
)
2358 struct srp_target_port
*target
= ch
->target
;
2359 struct ib_qp_attr
*qp_attr
= NULL
;
2364 if (lrsp
->opcode
== SRP_LOGIN_RSP
) {
2365 ch
->max_ti_iu_len
= be32_to_cpu(lrsp
->max_ti_iu_len
);
2366 ch
->req_lim
= be32_to_cpu(lrsp
->req_lim_delta
);
2367 ch
->use_imm_data
= srp_use_imm_data
&&
2368 (lrsp
->rsp_flags
& SRP_LOGIN_RSP_IMMED_SUPP
);
2369 ch
->max_it_iu_len
= srp_max_it_iu_len(target
->cmd_sg_cnt
,
2371 target
->max_it_iu_size
);
2372 WARN_ON_ONCE(ch
->max_it_iu_len
>
2373 be32_to_cpu(lrsp
->max_it_iu_len
));
2375 if (ch
->use_imm_data
)
2376 shost_printk(KERN_DEBUG
, target
->scsi_host
,
2377 PFX
"using immediate data\n");
2380 * Reserve credits for task management so we don't
2381 * bounce requests back to the SCSI mid-layer.
2383 target
->scsi_host
->can_queue
2384 = min(ch
->req_lim
- SRP_TSK_MGMT_SQ_SIZE
,
2385 target
->scsi_host
->can_queue
);
2386 target
->scsi_host
->cmd_per_lun
2387 = min_t(int, target
->scsi_host
->can_queue
,
2388 target
->scsi_host
->cmd_per_lun
);
2390 shost_printk(KERN_WARNING
, target
->scsi_host
,
2391 PFX
"Unhandled RSP opcode %#x\n", lrsp
->opcode
);
2397 ret
= srp_alloc_iu_bufs(ch
);
2402 for (i
= 0; i
< target
->queue_size
; i
++) {
2403 struct srp_iu
*iu
= ch
->rx_ring
[i
];
2405 ret
= srp_post_recv(ch
, iu
);
2410 if (!target
->using_rdma_cm
) {
2412 qp_attr
= kmalloc(sizeof(*qp_attr
), GFP_KERNEL
);
2416 qp_attr
->qp_state
= IB_QPS_RTR
;
2417 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
2421 ret
= ib_modify_qp(ch
->qp
, qp_attr
, attr_mask
);
2425 qp_attr
->qp_state
= IB_QPS_RTS
;
2426 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
2430 target
->rq_tmo_jiffies
= srp_compute_rq_tmo(qp_attr
, attr_mask
);
2432 ret
= ib_modify_qp(ch
->qp
, qp_attr
, attr_mask
);
2436 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
2446 static void srp_ib_cm_rej_handler(struct ib_cm_id
*cm_id
,
2447 const struct ib_cm_event
*event
,
2448 struct srp_rdma_ch
*ch
)
2450 struct srp_target_port
*target
= ch
->target
;
2451 struct Scsi_Host
*shost
= target
->scsi_host
;
2452 struct ib_class_port_info
*cpi
;
2456 switch (event
->param
.rej_rcvd
.reason
) {
2457 case IB_CM_REJ_PORT_CM_REDIRECT
:
2458 cpi
= event
->param
.rej_rcvd
.ari
;
2459 dlid
= be16_to_cpu(cpi
->redirect_lid
);
2460 sa_path_set_dlid(&ch
->ib_cm
.path
, dlid
);
2461 ch
->ib_cm
.path
.pkey
= cpi
->redirect_pkey
;
2462 cm_id
->remote_cm_qpn
= be32_to_cpu(cpi
->redirect_qp
) & 0x00ffffff;
2463 memcpy(ch
->ib_cm
.path
.dgid
.raw
, cpi
->redirect_gid
, 16);
2465 ch
->status
= dlid
? SRP_DLID_REDIRECT
: SRP_PORT_REDIRECT
;
2468 case IB_CM_REJ_PORT_REDIRECT
:
2469 if (srp_target_is_topspin(target
)) {
2470 union ib_gid
*dgid
= &ch
->ib_cm
.path
.dgid
;
2473 * Topspin/Cisco SRP gateways incorrectly send
2474 * reject reason code 25 when they mean 24
2477 memcpy(dgid
->raw
, event
->param
.rej_rcvd
.ari
, 16);
2479 shost_printk(KERN_DEBUG
, shost
,
2480 PFX
"Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2481 be64_to_cpu(dgid
->global
.subnet_prefix
),
2482 be64_to_cpu(dgid
->global
.interface_id
));
2484 ch
->status
= SRP_PORT_REDIRECT
;
2486 shost_printk(KERN_WARNING
, shost
,
2487 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2488 ch
->status
= -ECONNRESET
;
2492 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID
:
2493 shost_printk(KERN_WARNING
, shost
,
2494 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2495 ch
->status
= -ECONNRESET
;
2498 case IB_CM_REJ_CONSUMER_DEFINED
:
2499 opcode
= *(u8
*) event
->private_data
;
2500 if (opcode
== SRP_LOGIN_REJ
) {
2501 struct srp_login_rej
*rej
= event
->private_data
;
2502 u32 reason
= be32_to_cpu(rej
->reason
);
2504 if (reason
== SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
)
2505 shost_printk(KERN_WARNING
, shost
,
2506 PFX
"SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2508 shost_printk(KERN_WARNING
, shost
, PFX
2509 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2511 target
->ib_cm
.orig_dgid
.raw
,
2514 shost_printk(KERN_WARNING
, shost
,
2515 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2516 " opcode 0x%02x\n", opcode
);
2517 ch
->status
= -ECONNRESET
;
2520 case IB_CM_REJ_STALE_CONN
:
2521 shost_printk(KERN_WARNING
, shost
, " REJ reason: stale connection\n");
2522 ch
->status
= SRP_STALE_CONN
;
2526 shost_printk(KERN_WARNING
, shost
, " REJ reason 0x%x\n",
2527 event
->param
.rej_rcvd
.reason
);
2528 ch
->status
= -ECONNRESET
;
2532 static int srp_ib_cm_handler(struct ib_cm_id
*cm_id
,
2533 const struct ib_cm_event
*event
)
2535 struct srp_rdma_ch
*ch
= cm_id
->context
;
2536 struct srp_target_port
*target
= ch
->target
;
2539 switch (event
->event
) {
2540 case IB_CM_REQ_ERROR
:
2541 shost_printk(KERN_DEBUG
, target
->scsi_host
,
2542 PFX
"Sending CM REQ failed\n");
2544 ch
->status
= -ECONNRESET
;
2547 case IB_CM_REP_RECEIVED
:
2549 srp_cm_rep_handler(cm_id
, event
->private_data
, ch
);
2552 case IB_CM_REJ_RECEIVED
:
2553 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
"REJ received\n");
2556 srp_ib_cm_rej_handler(cm_id
, event
, ch
);
2559 case IB_CM_DREQ_RECEIVED
:
2560 shost_printk(KERN_WARNING
, target
->scsi_host
,
2561 PFX
"DREQ received - connection closed\n");
2562 ch
->connected
= false;
2563 if (ib_send_cm_drep(cm_id
, NULL
, 0))
2564 shost_printk(KERN_ERR
, target
->scsi_host
,
2565 PFX
"Sending CM DREP failed\n");
2566 queue_work(system_long_wq
, &target
->tl_err_work
);
2569 case IB_CM_TIMEWAIT_EXIT
:
2570 shost_printk(KERN_ERR
, target
->scsi_host
,
2571 PFX
"connection closed\n");
2577 case IB_CM_MRA_RECEIVED
:
2578 case IB_CM_DREQ_ERROR
:
2579 case IB_CM_DREP_RECEIVED
:
2583 shost_printk(KERN_WARNING
, target
->scsi_host
,
2584 PFX
"Unhandled CM event %d\n", event
->event
);
2589 complete(&ch
->done
);
2594 static void srp_rdma_cm_rej_handler(struct srp_rdma_ch
*ch
,
2595 struct rdma_cm_event
*event
)
2597 struct srp_target_port
*target
= ch
->target
;
2598 struct Scsi_Host
*shost
= target
->scsi_host
;
2601 switch (event
->status
) {
2602 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID
:
2603 shost_printk(KERN_WARNING
, shost
,
2604 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2605 ch
->status
= -ECONNRESET
;
2608 case IB_CM_REJ_CONSUMER_DEFINED
:
2609 opcode
= *(u8
*) event
->param
.conn
.private_data
;
2610 if (opcode
== SRP_LOGIN_REJ
) {
2611 struct srp_login_rej
*rej
=
2612 (struct srp_login_rej
*)
2613 event
->param
.conn
.private_data
;
2614 u32 reason
= be32_to_cpu(rej
->reason
);
2616 if (reason
== SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
)
2617 shost_printk(KERN_WARNING
, shost
,
2618 PFX
"SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2620 shost_printk(KERN_WARNING
, shost
,
2621 PFX
"SRP LOGIN REJECTED, reason 0x%08x\n", reason
);
2623 shost_printk(KERN_WARNING
, shost
,
2624 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2627 ch
->status
= -ECONNRESET
;
2630 case IB_CM_REJ_STALE_CONN
:
2631 shost_printk(KERN_WARNING
, shost
,
2632 " REJ reason: stale connection\n");
2633 ch
->status
= SRP_STALE_CONN
;
2637 shost_printk(KERN_WARNING
, shost
, " REJ reason 0x%x\n",
2639 ch
->status
= -ECONNRESET
;
2644 static int srp_rdma_cm_handler(struct rdma_cm_id
*cm_id
,
2645 struct rdma_cm_event
*event
)
2647 struct srp_rdma_ch
*ch
= cm_id
->context
;
2648 struct srp_target_port
*target
= ch
->target
;
2651 switch (event
->event
) {
2652 case RDMA_CM_EVENT_ADDR_RESOLVED
:
2657 case RDMA_CM_EVENT_ADDR_ERROR
:
2658 ch
->status
= -ENXIO
;
2662 case RDMA_CM_EVENT_ROUTE_RESOLVED
:
2667 case RDMA_CM_EVENT_ROUTE_ERROR
:
2668 case RDMA_CM_EVENT_UNREACHABLE
:
2669 ch
->status
= -EHOSTUNREACH
;
2673 case RDMA_CM_EVENT_CONNECT_ERROR
:
2674 shost_printk(KERN_DEBUG
, target
->scsi_host
,
2675 PFX
"Sending CM REQ failed\n");
2677 ch
->status
= -ECONNRESET
;
2680 case RDMA_CM_EVENT_ESTABLISHED
:
2682 srp_cm_rep_handler(NULL
, event
->param
.conn
.private_data
, ch
);
2685 case RDMA_CM_EVENT_REJECTED
:
2686 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
"REJ received\n");
2689 srp_rdma_cm_rej_handler(ch
, event
);
2692 case RDMA_CM_EVENT_DISCONNECTED
:
2693 if (ch
->connected
) {
2694 shost_printk(KERN_WARNING
, target
->scsi_host
,
2695 PFX
"received DREQ\n");
2696 rdma_disconnect(ch
->rdma_cm
.cm_id
);
2699 queue_work(system_long_wq
, &target
->tl_err_work
);
2703 case RDMA_CM_EVENT_TIMEWAIT_EXIT
:
2704 shost_printk(KERN_ERR
, target
->scsi_host
,
2705 PFX
"connection closed\n");
2712 shost_printk(KERN_WARNING
, target
->scsi_host
,
2713 PFX
"Unhandled CM event %d\n", event
->event
);
2718 complete(&ch
->done
);
2724 * srp_change_queue_depth - setting device queue depth
2725 * @sdev: scsi device struct
2726 * @qdepth: requested queue depth
2728 * Returns queue depth.
2731 srp_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
2733 if (!sdev
->tagged_supported
)
2735 return scsi_change_queue_depth(sdev
, qdepth
);
2738 static int srp_send_tsk_mgmt(struct srp_rdma_ch
*ch
, u64 req_tag
, u64 lun
,
2739 u8 func
, u8
*status
)
2741 struct srp_target_port
*target
= ch
->target
;
2742 struct srp_rport
*rport
= target
->rport
;
2743 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
2745 struct srp_tsk_mgmt
*tsk_mgmt
;
2748 if (!ch
->connected
|| target
->qp_in_error
)
2752 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2753 * invoked while a task management function is being sent.
2755 mutex_lock(&rport
->mutex
);
2756 spin_lock_irq(&ch
->lock
);
2757 iu
= __srp_get_tx_iu(ch
, SRP_IU_TSK_MGMT
);
2758 spin_unlock_irq(&ch
->lock
);
2761 mutex_unlock(&rport
->mutex
);
2768 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, sizeof *tsk_mgmt
,
2771 memset(tsk_mgmt
, 0, sizeof *tsk_mgmt
);
2773 tsk_mgmt
->opcode
= SRP_TSK_MGMT
;
2774 int_to_scsilun(lun
, &tsk_mgmt
->lun
);
2775 tsk_mgmt
->tsk_mgmt_func
= func
;
2776 tsk_mgmt
->task_tag
= req_tag
;
2778 spin_lock_irq(&ch
->lock
);
2779 ch
->tsk_mgmt_tag
= (ch
->tsk_mgmt_tag
+ 1) | SRP_TAG_TSK_MGMT
;
2780 tsk_mgmt
->tag
= ch
->tsk_mgmt_tag
;
2781 spin_unlock_irq(&ch
->lock
);
2783 init_completion(&ch
->tsk_mgmt_done
);
2785 ib_dma_sync_single_for_device(dev
, iu
->dma
, sizeof *tsk_mgmt
,
2787 if (srp_post_send(ch
, iu
, sizeof(*tsk_mgmt
))) {
2788 srp_put_tx_iu(ch
, iu
, SRP_IU_TSK_MGMT
);
2789 mutex_unlock(&rport
->mutex
);
2793 res
= wait_for_completion_timeout(&ch
->tsk_mgmt_done
,
2794 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS
));
2795 if (res
> 0 && status
)
2796 *status
= ch
->tsk_mgmt_status
;
2797 mutex_unlock(&rport
->mutex
);
2799 WARN_ON_ONCE(res
< 0);
2801 return res
> 0 ? 0 : -1;
2804 static int srp_abort(struct scsi_cmnd
*scmnd
)
2806 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2807 struct srp_request
*req
= (struct srp_request
*) scmnd
->host_scribble
;
2810 struct srp_rdma_ch
*ch
;
2813 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP abort called\n");
2817 tag
= blk_mq_unique_tag(scmnd
->request
);
2818 ch_idx
= blk_mq_unique_tag_to_hwq(tag
);
2819 if (WARN_ON_ONCE(ch_idx
>= target
->ch_count
))
2821 ch
= &target
->ch
[ch_idx
];
2822 if (!srp_claim_req(ch
, req
, NULL
, scmnd
))
2824 shost_printk(KERN_ERR
, target
->scsi_host
,
2825 "Sending SRP abort for tag %#x\n", tag
);
2826 if (srp_send_tsk_mgmt(ch
, tag
, scmnd
->device
->lun
,
2827 SRP_TSK_ABORT_TASK
, NULL
) == 0)
2829 else if (target
->rport
->state
== SRP_RPORT_LOST
)
2833 if (ret
== SUCCESS
) {
2834 srp_free_req(ch
, req
, scmnd
, 0);
2835 scmnd
->result
= DID_ABORT
<< 16;
2836 scmnd
->scsi_done(scmnd
);
2842 static int srp_reset_device(struct scsi_cmnd
*scmnd
)
2844 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2845 struct srp_rdma_ch
*ch
;
2848 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP reset_device called\n");
2850 ch
= &target
->ch
[0];
2851 if (srp_send_tsk_mgmt(ch
, SRP_TAG_NO_REQ
, scmnd
->device
->lun
,
2852 SRP_TSK_LUN_RESET
, &status
))
2860 static int srp_reset_host(struct scsi_cmnd
*scmnd
)
2862 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
2864 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"SRP reset_host called\n");
2866 return srp_reconnect_rport(target
->rport
) == 0 ? SUCCESS
: FAILED
;
2869 static int srp_target_alloc(struct scsi_target
*starget
)
2871 struct Scsi_Host
*shost
= dev_to_shost(starget
->dev
.parent
);
2872 struct srp_target_port
*target
= host_to_target(shost
);
2874 if (target
->target_can_queue
)
2875 starget
->can_queue
= target
->target_can_queue
;
2879 static int srp_slave_configure(struct scsi_device
*sdev
)
2881 struct Scsi_Host
*shost
= sdev
->host
;
2882 struct srp_target_port
*target
= host_to_target(shost
);
2883 struct request_queue
*q
= sdev
->request_queue
;
2884 unsigned long timeout
;
2886 if (sdev
->type
== TYPE_DISK
) {
2887 timeout
= max_t(unsigned, 30 * HZ
, target
->rq_tmo_jiffies
);
2888 blk_queue_rq_timeout(q
, timeout
);
2894 static ssize_t
show_id_ext(struct device
*dev
, struct device_attribute
*attr
,
2897 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2899 return sysfs_emit(buf
, "0x%016llx\n", be64_to_cpu(target
->id_ext
));
2902 static ssize_t
show_ioc_guid(struct device
*dev
, struct device_attribute
*attr
,
2905 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2907 return sysfs_emit(buf
, "0x%016llx\n", be64_to_cpu(target
->ioc_guid
));
2910 static ssize_t
show_service_id(struct device
*dev
,
2911 struct device_attribute
*attr
, char *buf
)
2913 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2915 if (target
->using_rdma_cm
)
2917 return sysfs_emit(buf
, "0x%016llx\n",
2918 be64_to_cpu(target
->ib_cm
.service_id
));
2921 static ssize_t
show_pkey(struct device
*dev
, struct device_attribute
*attr
,
2924 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2926 if (target
->using_rdma_cm
)
2929 return sysfs_emit(buf
, "0x%04x\n", be16_to_cpu(target
->ib_cm
.pkey
));
2932 static ssize_t
show_sgid(struct device
*dev
, struct device_attribute
*attr
,
2935 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2937 return sysfs_emit(buf
, "%pI6\n", target
->sgid
.raw
);
2940 static ssize_t
show_dgid(struct device
*dev
, struct device_attribute
*attr
,
2943 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2944 struct srp_rdma_ch
*ch
= &target
->ch
[0];
2946 if (target
->using_rdma_cm
)
2949 return sysfs_emit(buf
, "%pI6\n", ch
->ib_cm
.path
.dgid
.raw
);
2952 static ssize_t
show_orig_dgid(struct device
*dev
,
2953 struct device_attribute
*attr
, char *buf
)
2955 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2957 if (target
->using_rdma_cm
)
2960 return sysfs_emit(buf
, "%pI6\n", target
->ib_cm
.orig_dgid
.raw
);
2963 static ssize_t
show_req_lim(struct device
*dev
,
2964 struct device_attribute
*attr
, char *buf
)
2966 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2967 struct srp_rdma_ch
*ch
;
2968 int i
, req_lim
= INT_MAX
;
2970 for (i
= 0; i
< target
->ch_count
; i
++) {
2971 ch
= &target
->ch
[i
];
2972 req_lim
= min(req_lim
, ch
->req_lim
);
2975 return sysfs_emit(buf
, "%d\n", req_lim
);
2978 static ssize_t
show_zero_req_lim(struct device
*dev
,
2979 struct device_attribute
*attr
, char *buf
)
2981 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2983 return sysfs_emit(buf
, "%d\n", target
->zero_req_lim
);
2986 static ssize_t
show_local_ib_port(struct device
*dev
,
2987 struct device_attribute
*attr
, char *buf
)
2989 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2991 return sysfs_emit(buf
, "%d\n", target
->srp_host
->port
);
2994 static ssize_t
show_local_ib_device(struct device
*dev
,
2995 struct device_attribute
*attr
, char *buf
)
2997 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
2999 return sysfs_emit(buf
, "%s\n",
3000 dev_name(&target
->srp_host
->srp_dev
->dev
->dev
));
3003 static ssize_t
show_ch_count(struct device
*dev
, struct device_attribute
*attr
,
3006 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
3008 return sysfs_emit(buf
, "%d\n", target
->ch_count
);
3011 static ssize_t
show_comp_vector(struct device
*dev
,
3012 struct device_attribute
*attr
, char *buf
)
3014 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
3016 return sysfs_emit(buf
, "%d\n", target
->comp_vector
);
3019 static ssize_t
show_tl_retry_count(struct device
*dev
,
3020 struct device_attribute
*attr
, char *buf
)
3022 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
3024 return sysfs_emit(buf
, "%d\n", target
->tl_retry_count
);
3027 static ssize_t
show_cmd_sg_entries(struct device
*dev
,
3028 struct device_attribute
*attr
, char *buf
)
3030 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
3032 return sysfs_emit(buf
, "%u\n", target
->cmd_sg_cnt
);
3035 static ssize_t
show_allow_ext_sg(struct device
*dev
,
3036 struct device_attribute
*attr
, char *buf
)
3038 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
3040 return sysfs_emit(buf
, "%s\n", target
->allow_ext_sg
? "true" : "false");
3043 static DEVICE_ATTR(id_ext
, S_IRUGO
, show_id_ext
, NULL
);
3044 static DEVICE_ATTR(ioc_guid
, S_IRUGO
, show_ioc_guid
, NULL
);
3045 static DEVICE_ATTR(service_id
, S_IRUGO
, show_service_id
, NULL
);
3046 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
3047 static DEVICE_ATTR(sgid
, S_IRUGO
, show_sgid
, NULL
);
3048 static DEVICE_ATTR(dgid
, S_IRUGO
, show_dgid
, NULL
);
3049 static DEVICE_ATTR(orig_dgid
, S_IRUGO
, show_orig_dgid
, NULL
);
3050 static DEVICE_ATTR(req_lim
, S_IRUGO
, show_req_lim
, NULL
);
3051 static DEVICE_ATTR(zero_req_lim
, S_IRUGO
, show_zero_req_lim
, NULL
);
3052 static DEVICE_ATTR(local_ib_port
, S_IRUGO
, show_local_ib_port
, NULL
);
3053 static DEVICE_ATTR(local_ib_device
, S_IRUGO
, show_local_ib_device
, NULL
);
3054 static DEVICE_ATTR(ch_count
, S_IRUGO
, show_ch_count
, NULL
);
3055 static DEVICE_ATTR(comp_vector
, S_IRUGO
, show_comp_vector
, NULL
);
3056 static DEVICE_ATTR(tl_retry_count
, S_IRUGO
, show_tl_retry_count
, NULL
);
3057 static DEVICE_ATTR(cmd_sg_entries
, S_IRUGO
, show_cmd_sg_entries
, NULL
);
3058 static DEVICE_ATTR(allow_ext_sg
, S_IRUGO
, show_allow_ext_sg
, NULL
);
3060 static struct device_attribute
*srp_host_attrs
[] = {
3063 &dev_attr_service_id
,
3067 &dev_attr_orig_dgid
,
3069 &dev_attr_zero_req_lim
,
3070 &dev_attr_local_ib_port
,
3071 &dev_attr_local_ib_device
,
3073 &dev_attr_comp_vector
,
3074 &dev_attr_tl_retry_count
,
3075 &dev_attr_cmd_sg_entries
,
3076 &dev_attr_allow_ext_sg
,
3080 static struct scsi_host_template srp_template
= {
3081 .module
= THIS_MODULE
,
3082 .name
= "InfiniBand SRP initiator",
3083 .proc_name
= DRV_NAME
,
3084 .target_alloc
= srp_target_alloc
,
3085 .slave_configure
= srp_slave_configure
,
3086 .info
= srp_target_info
,
3087 .queuecommand
= srp_queuecommand
,
3088 .change_queue_depth
= srp_change_queue_depth
,
3089 .eh_timed_out
= srp_timed_out
,
3090 .eh_abort_handler
= srp_abort
,
3091 .eh_device_reset_handler
= srp_reset_device
,
3092 .eh_host_reset_handler
= srp_reset_host
,
3093 .skip_settle_delay
= true,
3094 .sg_tablesize
= SRP_DEF_SG_TABLESIZE
,
3095 .can_queue
= SRP_DEFAULT_CMD_SQ_SIZE
,
3097 .cmd_per_lun
= SRP_DEFAULT_CMD_SQ_SIZE
,
3098 .shost_attrs
= srp_host_attrs
,
3099 .track_queue_depth
= 1,
3102 static int srp_sdev_count(struct Scsi_Host
*host
)
3104 struct scsi_device
*sdev
;
3107 shost_for_each_device(sdev
, host
)
3115 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3116 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3117 * removal has been scheduled.
3118 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3120 static int srp_add_target(struct srp_host
*host
, struct srp_target_port
*target
)
3122 struct srp_rport_identifiers ids
;
3123 struct srp_rport
*rport
;
3125 target
->state
= SRP_TARGET_SCANNING
;
3126 sprintf(target
->target_name
, "SRP.T10:%016llX",
3127 be64_to_cpu(target
->id_ext
));
3129 if (scsi_add_host(target
->scsi_host
, host
->srp_dev
->dev
->dev
.parent
))
3132 memcpy(ids
.port_id
, &target
->id_ext
, 8);
3133 memcpy(ids
.port_id
+ 8, &target
->ioc_guid
, 8);
3134 ids
.roles
= SRP_RPORT_ROLE_TARGET
;
3135 rport
= srp_rport_add(target
->scsi_host
, &ids
);
3136 if (IS_ERR(rport
)) {
3137 scsi_remove_host(target
->scsi_host
);
3138 return PTR_ERR(rport
);
3141 rport
->lld_data
= target
;
3142 target
->rport
= rport
;
3144 spin_lock(&host
->target_lock
);
3145 list_add_tail(&target
->list
, &host
->target_list
);
3146 spin_unlock(&host
->target_lock
);
3148 scsi_scan_target(&target
->scsi_host
->shost_gendev
,
3149 0, target
->scsi_id
, SCAN_WILD_CARD
, SCSI_SCAN_INITIAL
);
3151 if (srp_connected_ch(target
) < target
->ch_count
||
3152 target
->qp_in_error
) {
3153 shost_printk(KERN_INFO
, target
->scsi_host
,
3154 PFX
"SCSI scan failed - removing SCSI host\n");
3155 srp_queue_remove_work(target
);
3159 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3160 dev_name(&target
->scsi_host
->shost_gendev
),
3161 srp_sdev_count(target
->scsi_host
));
3163 spin_lock_irq(&target
->lock
);
3164 if (target
->state
== SRP_TARGET_SCANNING
)
3165 target
->state
= SRP_TARGET_LIVE
;
3166 spin_unlock_irq(&target
->lock
);
3172 static void srp_release_dev(struct device
*dev
)
3174 struct srp_host
*host
=
3175 container_of(dev
, struct srp_host
, dev
);
3177 complete(&host
->released
);
3180 static struct class srp_class
= {
3181 .name
= "infiniband_srp",
3182 .dev_release
= srp_release_dev
3186 * srp_conn_unique() - check whether the connection to a target is unique
3188 * @target: SRP target port.
3190 static bool srp_conn_unique(struct srp_host
*host
,
3191 struct srp_target_port
*target
)
3193 struct srp_target_port
*t
;
3196 if (target
->state
== SRP_TARGET_REMOVED
)
3201 spin_lock(&host
->target_lock
);
3202 list_for_each_entry(t
, &host
->target_list
, list
) {
3204 target
->id_ext
== t
->id_ext
&&
3205 target
->ioc_guid
== t
->ioc_guid
&&
3206 target
->initiator_ext
== t
->initiator_ext
) {
3211 spin_unlock(&host
->target_lock
);
3218 * Target ports are added by writing
3220 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3221 * pkey=<P_Key>,service_id=<service ID>
3223 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3224 * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3226 * to the add_target sysfs attribute.
3230 SRP_OPT_ID_EXT
= 1 << 0,
3231 SRP_OPT_IOC_GUID
= 1 << 1,
3232 SRP_OPT_DGID
= 1 << 2,
3233 SRP_OPT_PKEY
= 1 << 3,
3234 SRP_OPT_SERVICE_ID
= 1 << 4,
3235 SRP_OPT_MAX_SECT
= 1 << 5,
3236 SRP_OPT_MAX_CMD_PER_LUN
= 1 << 6,
3237 SRP_OPT_IO_CLASS
= 1 << 7,
3238 SRP_OPT_INITIATOR_EXT
= 1 << 8,
3239 SRP_OPT_CMD_SG_ENTRIES
= 1 << 9,
3240 SRP_OPT_ALLOW_EXT_SG
= 1 << 10,
3241 SRP_OPT_SG_TABLESIZE
= 1 << 11,
3242 SRP_OPT_COMP_VECTOR
= 1 << 12,
3243 SRP_OPT_TL_RETRY_COUNT
= 1 << 13,
3244 SRP_OPT_QUEUE_SIZE
= 1 << 14,
3245 SRP_OPT_IP_SRC
= 1 << 15,
3246 SRP_OPT_IP_DEST
= 1 << 16,
3247 SRP_OPT_TARGET_CAN_QUEUE
= 1 << 17,
3248 SRP_OPT_MAX_IT_IU_SIZE
= 1 << 18,
3249 SRP_OPT_CH_COUNT
= 1 << 19,
3252 static unsigned int srp_opt_mandatory
[] = {
3263 static const match_table_t srp_opt_tokens
= {
3264 { SRP_OPT_ID_EXT
, "id_ext=%s" },
3265 { SRP_OPT_IOC_GUID
, "ioc_guid=%s" },
3266 { SRP_OPT_DGID
, "dgid=%s" },
3267 { SRP_OPT_PKEY
, "pkey=%x" },
3268 { SRP_OPT_SERVICE_ID
, "service_id=%s" },
3269 { SRP_OPT_MAX_SECT
, "max_sect=%d" },
3270 { SRP_OPT_MAX_CMD_PER_LUN
, "max_cmd_per_lun=%d" },
3271 { SRP_OPT_TARGET_CAN_QUEUE
, "target_can_queue=%d" },
3272 { SRP_OPT_IO_CLASS
, "io_class=%x" },
3273 { SRP_OPT_INITIATOR_EXT
, "initiator_ext=%s" },
3274 { SRP_OPT_CMD_SG_ENTRIES
, "cmd_sg_entries=%u" },
3275 { SRP_OPT_ALLOW_EXT_SG
, "allow_ext_sg=%u" },
3276 { SRP_OPT_SG_TABLESIZE
, "sg_tablesize=%u" },
3277 { SRP_OPT_COMP_VECTOR
, "comp_vector=%u" },
3278 { SRP_OPT_TL_RETRY_COUNT
, "tl_retry_count=%u" },
3279 { SRP_OPT_QUEUE_SIZE
, "queue_size=%d" },
3280 { SRP_OPT_IP_SRC
, "src=%s" },
3281 { SRP_OPT_IP_DEST
, "dest=%s" },
3282 { SRP_OPT_MAX_IT_IU_SIZE
, "max_it_iu_size=%d" },
3283 { SRP_OPT_CH_COUNT
, "ch_count=%u", },
3284 { SRP_OPT_ERR
, NULL
}
3288 * srp_parse_in - parse an IP address and port number combination
3289 * @net: [in] Network namespace.
3290 * @sa: [out] Address family, IP address and port number.
3291 * @addr_port_str: [in] IP address and port number.
3292 * @has_port: [out] Whether or not @addr_port_str includes a port number.
3294 * Parse the following address formats:
3295 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3296 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3298 static int srp_parse_in(struct net
*net
, struct sockaddr_storage
*sa
,
3299 const char *addr_port_str
, bool *has_port
)
3301 char *addr_end
, *addr
= kstrdup(addr_port_str
, GFP_KERNEL
);
3307 port_str
= strrchr(addr
, ':');
3308 if (port_str
&& strchr(port_str
, ']'))
3313 *has_port
= port_str
!= NULL
;
3314 ret
= inet_pton_with_scope(net
, AF_INET
, addr
, port_str
, sa
);
3315 if (ret
&& addr
[0]) {
3316 addr_end
= addr
+ strlen(addr
) - 1;
3317 if (addr
[0] == '[' && *addr_end
== ']') {
3319 ret
= inet_pton_with_scope(net
, AF_INET6
, addr
+ 1,
3324 pr_debug("%s -> %pISpfsc\n", addr_port_str
, sa
);
3328 static int srp_parse_options(struct net
*net
, const char *buf
,
3329 struct srp_target_port
*target
)
3331 char *options
, *sep_opt
;
3333 substring_t args
[MAX_OPT_ARGS
];
3334 unsigned long long ull
;
3341 options
= kstrdup(buf
, GFP_KERNEL
);
3346 while ((p
= strsep(&sep_opt
, ",\n")) != NULL
) {
3350 token
= match_token(p
, srp_opt_tokens
, args
);
3354 case SRP_OPT_ID_EXT
:
3355 p
= match_strdup(args
);
3360 ret
= kstrtoull(p
, 16, &ull
);
3362 pr_warn("invalid id_ext parameter '%s'\n", p
);
3366 target
->id_ext
= cpu_to_be64(ull
);
3370 case SRP_OPT_IOC_GUID
:
3371 p
= match_strdup(args
);
3376 ret
= kstrtoull(p
, 16, &ull
);
3378 pr_warn("invalid ioc_guid parameter '%s'\n", p
);
3382 target
->ioc_guid
= cpu_to_be64(ull
);
3387 p
= match_strdup(args
);
3392 if (strlen(p
) != 32) {
3393 pr_warn("bad dest GID parameter '%s'\n", p
);
3398 ret
= hex2bin(target
->ib_cm
.orig_dgid
.raw
, p
, 16);
3405 if (match_hex(args
, &token
)) {
3406 pr_warn("bad P_Key parameter '%s'\n", p
);
3409 target
->ib_cm
.pkey
= cpu_to_be16(token
);
3412 case SRP_OPT_SERVICE_ID
:
3413 p
= match_strdup(args
);
3418 ret
= kstrtoull(p
, 16, &ull
);
3420 pr_warn("bad service_id parameter '%s'\n", p
);
3424 target
->ib_cm
.service_id
= cpu_to_be64(ull
);
3428 case SRP_OPT_IP_SRC
:
3429 p
= match_strdup(args
);
3434 ret
= srp_parse_in(net
, &target
->rdma_cm
.src
.ss
, p
,
3437 pr_warn("bad source parameter '%s'\n", p
);
3441 target
->rdma_cm
.src_specified
= true;
3445 case SRP_OPT_IP_DEST
:
3446 p
= match_strdup(args
);
3451 ret
= srp_parse_in(net
, &target
->rdma_cm
.dst
.ss
, p
,
3456 pr_warn("bad dest parameter '%s'\n", p
);
3460 target
->using_rdma_cm
= true;
3464 case SRP_OPT_MAX_SECT
:
3465 if (match_int(args
, &token
)) {
3466 pr_warn("bad max sect parameter '%s'\n", p
);
3469 target
->scsi_host
->max_sectors
= token
;
3472 case SRP_OPT_QUEUE_SIZE
:
3473 if (match_int(args
, &token
) || token
< 1) {
3474 pr_warn("bad queue_size parameter '%s'\n", p
);
3477 target
->scsi_host
->can_queue
= token
;
3478 target
->queue_size
= token
+ SRP_RSP_SQ_SIZE
+
3479 SRP_TSK_MGMT_SQ_SIZE
;
3480 if (!(opt_mask
& SRP_OPT_MAX_CMD_PER_LUN
))
3481 target
->scsi_host
->cmd_per_lun
= token
;
3484 case SRP_OPT_MAX_CMD_PER_LUN
:
3485 if (match_int(args
, &token
) || token
< 1) {
3486 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3490 target
->scsi_host
->cmd_per_lun
= token
;
3493 case SRP_OPT_TARGET_CAN_QUEUE
:
3494 if (match_int(args
, &token
) || token
< 1) {
3495 pr_warn("bad max target_can_queue parameter '%s'\n",
3499 target
->target_can_queue
= token
;
3502 case SRP_OPT_IO_CLASS
:
3503 if (match_hex(args
, &token
)) {
3504 pr_warn("bad IO class parameter '%s'\n", p
);
3507 if (token
!= SRP_REV10_IB_IO_CLASS
&&
3508 token
!= SRP_REV16A_IB_IO_CLASS
) {
3509 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3510 token
, SRP_REV10_IB_IO_CLASS
,
3511 SRP_REV16A_IB_IO_CLASS
);
3514 target
->io_class
= token
;
3517 case SRP_OPT_INITIATOR_EXT
:
3518 p
= match_strdup(args
);
3523 ret
= kstrtoull(p
, 16, &ull
);
3525 pr_warn("bad initiator_ext value '%s'\n", p
);
3529 target
->initiator_ext
= cpu_to_be64(ull
);
3533 case SRP_OPT_CMD_SG_ENTRIES
:
3534 if (match_int(args
, &token
) || token
< 1 || token
> 255) {
3535 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3539 target
->cmd_sg_cnt
= token
;
3542 case SRP_OPT_ALLOW_EXT_SG
:
3543 if (match_int(args
, &token
)) {
3544 pr_warn("bad allow_ext_sg parameter '%s'\n", p
);
3547 target
->allow_ext_sg
= !!token
;
3550 case SRP_OPT_SG_TABLESIZE
:
3551 if (match_int(args
, &token
) || token
< 1 ||
3552 token
> SG_MAX_SEGMENTS
) {
3553 pr_warn("bad max sg_tablesize parameter '%s'\n",
3557 target
->sg_tablesize
= token
;
3560 case SRP_OPT_COMP_VECTOR
:
3561 if (match_int(args
, &token
) || token
< 0) {
3562 pr_warn("bad comp_vector parameter '%s'\n", p
);
3565 target
->comp_vector
= token
;
3568 case SRP_OPT_TL_RETRY_COUNT
:
3569 if (match_int(args
, &token
) || token
< 2 || token
> 7) {
3570 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3574 target
->tl_retry_count
= token
;
3577 case SRP_OPT_MAX_IT_IU_SIZE
:
3578 if (match_int(args
, &token
) || token
< 0) {
3579 pr_warn("bad maximum initiator to target IU size '%s'\n", p
);
3582 target
->max_it_iu_size
= token
;
3585 case SRP_OPT_CH_COUNT
:
3586 if (match_int(args
, &token
) || token
< 1) {
3587 pr_warn("bad channel count %s\n", p
);
3590 target
->ch_count
= token
;
3594 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3600 for (i
= 0; i
< ARRAY_SIZE(srp_opt_mandatory
); i
++) {
3601 if ((opt_mask
& srp_opt_mandatory
[i
]) == srp_opt_mandatory
[i
]) {
3607 pr_warn("target creation request is missing one or more parameters\n");
3609 if (target
->scsi_host
->cmd_per_lun
> target
->scsi_host
->can_queue
3610 && (opt_mask
& SRP_OPT_MAX_CMD_PER_LUN
))
3611 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3612 target
->scsi_host
->cmd_per_lun
,
3613 target
->scsi_host
->can_queue
);
3620 static ssize_t
srp_create_target(struct device
*dev
,
3621 struct device_attribute
*attr
,
3622 const char *buf
, size_t count
)
3624 struct srp_host
*host
=
3625 container_of(dev
, struct srp_host
, dev
);
3626 struct Scsi_Host
*target_host
;
3627 struct srp_target_port
*target
;
3628 struct srp_rdma_ch
*ch
;
3629 struct srp_device
*srp_dev
= host
->srp_dev
;
3630 struct ib_device
*ibdev
= srp_dev
->dev
;
3631 int ret
, node_idx
, node
, cpu
, i
;
3632 unsigned int max_sectors_per_mr
, mr_per_cmd
= 0;
3633 bool multich
= false;
3634 uint32_t max_iu_len
;
3636 target_host
= scsi_host_alloc(&srp_template
,
3637 sizeof (struct srp_target_port
));
3641 target_host
->transportt
= ib_srp_transport_template
;
3642 target_host
->max_channel
= 0;
3643 target_host
->max_id
= 1;
3644 target_host
->max_lun
= -1LL;
3645 target_host
->max_cmd_len
= sizeof ((struct srp_cmd
*) (void *) 0L)->cdb
;
3646 target_host
->max_segment_size
= ib_dma_max_seg_size(ibdev
);
3648 if (!(ibdev
->attrs
.device_cap_flags
& IB_DEVICE_SG_GAPS_REG
))
3649 target_host
->virt_boundary_mask
= ~srp_dev
->mr_page_mask
;
3651 target
= host_to_target(target_host
);
3653 target
->net
= kobj_ns_grab_current(KOBJ_NS_TYPE_NET
);
3654 target
->io_class
= SRP_REV16A_IB_IO_CLASS
;
3655 target
->scsi_host
= target_host
;
3656 target
->srp_host
= host
;
3657 target
->lkey
= host
->srp_dev
->pd
->local_dma_lkey
;
3658 target
->global_rkey
= host
->srp_dev
->global_rkey
;
3659 target
->cmd_sg_cnt
= cmd_sg_entries
;
3660 target
->sg_tablesize
= indirect_sg_entries
? : cmd_sg_entries
;
3661 target
->allow_ext_sg
= allow_ext_sg
;
3662 target
->tl_retry_count
= 7;
3663 target
->queue_size
= SRP_DEFAULT_QUEUE_SIZE
;
3666 * Avoid that the SCSI host can be removed by srp_remove_target()
3667 * before this function returns.
3669 scsi_host_get(target
->scsi_host
);
3671 ret
= mutex_lock_interruptible(&host
->add_target_mutex
);
3675 ret
= srp_parse_options(target
->net
, buf
, target
);
3679 target
->req_ring_size
= target
->queue_size
- SRP_TSK_MGMT_SQ_SIZE
;
3681 if (!srp_conn_unique(target
->srp_host
, target
)) {
3682 if (target
->using_rdma_cm
) {
3683 shost_printk(KERN_INFO
, target
->scsi_host
,
3684 PFX
"Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3685 be64_to_cpu(target
->id_ext
),
3686 be64_to_cpu(target
->ioc_guid
),
3687 &target
->rdma_cm
.dst
);
3689 shost_printk(KERN_INFO
, target
->scsi_host
,
3690 PFX
"Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3691 be64_to_cpu(target
->id_ext
),
3692 be64_to_cpu(target
->ioc_guid
),
3693 be64_to_cpu(target
->initiator_ext
));
3699 if (!srp_dev
->has_fr
&& !target
->allow_ext_sg
&&
3700 target
->cmd_sg_cnt
< target
->sg_tablesize
) {
3701 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3702 target
->sg_tablesize
= target
->cmd_sg_cnt
;
3705 if (srp_dev
->use_fast_reg
) {
3706 bool gaps_reg
= (ibdev
->attrs
.device_cap_flags
&
3707 IB_DEVICE_SG_GAPS_REG
);
3709 max_sectors_per_mr
= srp_dev
->max_pages_per_mr
<<
3710 (ilog2(srp_dev
->mr_page_size
) - 9);
3713 * FR can only map one HCA page per entry. If the start
3714 * address is not aligned on a HCA page boundary two
3715 * entries will be used for the head and the tail
3716 * although these two entries combined contain at most
3717 * one HCA page of data. Hence the "+ 1" in the
3718 * calculation below.
3720 * The indirect data buffer descriptor is contiguous
3721 * so the memory for that buffer will only be
3722 * registered if register_always is true. Hence add
3723 * one to mr_per_cmd if register_always has been set.
3725 mr_per_cmd
= register_always
+
3726 (target
->scsi_host
->max_sectors
+ 1 +
3727 max_sectors_per_mr
- 1) / max_sectors_per_mr
;
3729 mr_per_cmd
= register_always
+
3730 (target
->sg_tablesize
+
3731 srp_dev
->max_pages_per_mr
- 1) /
3732 srp_dev
->max_pages_per_mr
;
3734 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3735 target
->scsi_host
->max_sectors
, srp_dev
->max_pages_per_mr
, srp_dev
->mr_page_size
,
3736 max_sectors_per_mr
, mr_per_cmd
);
3739 target_host
->sg_tablesize
= target
->sg_tablesize
;
3740 target
->mr_pool_size
= target
->scsi_host
->can_queue
* mr_per_cmd
;
3741 target
->mr_per_cmd
= mr_per_cmd
;
3742 target
->indirect_size
= target
->sg_tablesize
*
3743 sizeof (struct srp_direct_buf
);
3744 max_iu_len
= srp_max_it_iu_len(target
->cmd_sg_cnt
,
3746 target
->max_it_iu_size
);
3748 INIT_WORK(&target
->tl_err_work
, srp_tl_err_work
);
3749 INIT_WORK(&target
->remove_work
, srp_remove_work
);
3750 spin_lock_init(&target
->lock
);
3751 ret
= rdma_query_gid(ibdev
, host
->port
, 0, &target
->sgid
);
3756 if (target
->ch_count
== 0)
3758 max_t(unsigned int, num_online_nodes(),
3760 min(4 * num_online_nodes(),
3761 ibdev
->num_comp_vectors
),
3762 num_online_cpus()));
3763 target
->ch
= kcalloc(target
->ch_count
, sizeof(*target
->ch
),
3769 for_each_online_node(node
) {
3770 const int ch_start
= (node_idx
* target
->ch_count
/
3771 num_online_nodes());
3772 const int ch_end
= ((node_idx
+ 1) * target
->ch_count
/
3773 num_online_nodes());
3774 const int cv_start
= node_idx
* ibdev
->num_comp_vectors
/
3776 const int cv_end
= (node_idx
+ 1) * ibdev
->num_comp_vectors
/
3780 for_each_online_cpu(cpu
) {
3781 if (cpu_to_node(cpu
) != node
)
3783 if (ch_start
+ cpu_idx
>= ch_end
)
3785 ch
= &target
->ch
[ch_start
+ cpu_idx
];
3786 ch
->target
= target
;
3787 ch
->comp_vector
= cv_start
== cv_end
? cv_start
:
3788 cv_start
+ cpu_idx
% (cv_end
- cv_start
);
3789 spin_lock_init(&ch
->lock
);
3790 INIT_LIST_HEAD(&ch
->free_tx
);
3791 ret
= srp_new_cm_id(ch
);
3793 goto err_disconnect
;
3795 ret
= srp_create_ch_ib(ch
);
3797 goto err_disconnect
;
3799 ret
= srp_alloc_req_data(ch
);
3801 goto err_disconnect
;
3803 ret
= srp_connect_ch(ch
, max_iu_len
, multich
);
3807 if (target
->using_rdma_cm
)
3808 snprintf(dst
, sizeof(dst
), "%pIS",
3809 &target
->rdma_cm
.dst
);
3811 snprintf(dst
, sizeof(dst
), "%pI6",
3812 target
->ib_cm
.orig_dgid
.raw
);
3813 shost_printk(KERN_ERR
, target
->scsi_host
,
3814 PFX
"Connection %d/%d to %s failed\n",
3816 target
->ch_count
, dst
);
3817 if (node_idx
== 0 && cpu_idx
== 0) {
3820 srp_free_ch_ib(target
, ch
);
3821 srp_free_req_data(target
, ch
);
3822 target
->ch_count
= ch
- target
->ch
;
3834 target
->scsi_host
->nr_hw_queues
= target
->ch_count
;
3836 ret
= srp_add_target(host
, target
);
3838 goto err_disconnect
;
3840 if (target
->state
!= SRP_TARGET_REMOVED
) {
3841 if (target
->using_rdma_cm
) {
3842 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
3843 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3844 be64_to_cpu(target
->id_ext
),
3845 be64_to_cpu(target
->ioc_guid
),
3846 target
->sgid
.raw
, &target
->rdma_cm
.dst
);
3848 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
3849 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3850 be64_to_cpu(target
->id_ext
),
3851 be64_to_cpu(target
->ioc_guid
),
3852 be16_to_cpu(target
->ib_cm
.pkey
),
3853 be64_to_cpu(target
->ib_cm
.service_id
),
3855 target
->ib_cm
.orig_dgid
.raw
);
3862 mutex_unlock(&host
->add_target_mutex
);
3865 scsi_host_put(target
->scsi_host
);
3868 * If a call to srp_remove_target() has not been scheduled,
3869 * drop the network namespace reference now that was obtained
3870 * earlier in this function.
3872 if (target
->state
!= SRP_TARGET_REMOVED
)
3873 kobj_ns_drop(KOBJ_NS_TYPE_NET
, target
->net
);
3874 scsi_host_put(target
->scsi_host
);
3880 srp_disconnect_target(target
);
3883 for (i
= 0; i
< target
->ch_count
; i
++) {
3884 ch
= &target
->ch
[i
];
3885 srp_free_ch_ib(target
, ch
);
3886 srp_free_req_data(target
, ch
);
3893 static DEVICE_ATTR(add_target
, S_IWUSR
, NULL
, srp_create_target
);
3895 static ssize_t
show_ibdev(struct device
*dev
, struct device_attribute
*attr
,
3898 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
3900 return sysfs_emit(buf
, "%s\n", dev_name(&host
->srp_dev
->dev
->dev
));
3903 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
3905 static ssize_t
show_port(struct device
*dev
, struct device_attribute
*attr
,
3908 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
3910 return sysfs_emit(buf
, "%d\n", host
->port
);
3913 static DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
3915 static struct srp_host
*srp_add_port(struct srp_device
*device
, u8 port
)
3917 struct srp_host
*host
;
3919 host
= kzalloc(sizeof *host
, GFP_KERNEL
);
3923 INIT_LIST_HEAD(&host
->target_list
);
3924 spin_lock_init(&host
->target_lock
);
3925 init_completion(&host
->released
);
3926 mutex_init(&host
->add_target_mutex
);
3927 host
->srp_dev
= device
;
3930 host
->dev
.class = &srp_class
;
3931 host
->dev
.parent
= device
->dev
->dev
.parent
;
3932 dev_set_name(&host
->dev
, "srp-%s-%d", dev_name(&device
->dev
->dev
),
3935 if (device_register(&host
->dev
))
3937 if (device_create_file(&host
->dev
, &dev_attr_add_target
))
3939 if (device_create_file(&host
->dev
, &dev_attr_ibdev
))
3941 if (device_create_file(&host
->dev
, &dev_attr_port
))
3947 device_unregister(&host
->dev
);
3955 static void srp_rename_dev(struct ib_device
*device
, void *client_data
)
3957 struct srp_device
*srp_dev
= client_data
;
3958 struct srp_host
*host
, *tmp_host
;
3960 list_for_each_entry_safe(host
, tmp_host
, &srp_dev
->dev_list
, list
) {
3961 char name
[IB_DEVICE_NAME_MAX
+ 8];
3963 snprintf(name
, sizeof(name
), "srp-%s-%d",
3964 dev_name(&device
->dev
), host
->port
);
3965 device_rename(&host
->dev
, name
);
3969 static int srp_add_one(struct ib_device
*device
)
3971 struct srp_device
*srp_dev
;
3972 struct ib_device_attr
*attr
= &device
->attrs
;
3973 struct srp_host
*host
;
3976 u64 max_pages_per_mr
;
3977 unsigned int flags
= 0;
3979 srp_dev
= kzalloc(sizeof(*srp_dev
), GFP_KERNEL
);
3984 * Use the smallest page size supported by the HCA, down to a
3985 * minimum of 4096 bytes. We're unlikely to build large sglists
3986 * out of smaller entries.
3988 mr_page_shift
= max(12, ffs(attr
->page_size_cap
) - 1);
3989 srp_dev
->mr_page_size
= 1 << mr_page_shift
;
3990 srp_dev
->mr_page_mask
= ~((u64
) srp_dev
->mr_page_size
- 1);
3991 max_pages_per_mr
= attr
->max_mr_size
;
3992 do_div(max_pages_per_mr
, srp_dev
->mr_page_size
);
3993 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__
,
3994 attr
->max_mr_size
, srp_dev
->mr_page_size
,
3995 max_pages_per_mr
, SRP_MAX_PAGES_PER_MR
);
3996 srp_dev
->max_pages_per_mr
= min_t(u64
, SRP_MAX_PAGES_PER_MR
,
3999 srp_dev
->has_fr
= (attr
->device_cap_flags
&
4000 IB_DEVICE_MEM_MGT_EXTENSIONS
);
4001 if (!never_register
&& !srp_dev
->has_fr
)
4002 dev_warn(&device
->dev
, "FR is not supported\n");
4003 else if (!never_register
&&
4004 attr
->max_mr_size
>= 2 * srp_dev
->mr_page_size
)
4005 srp_dev
->use_fast_reg
= srp_dev
->has_fr
;
4007 if (never_register
|| !register_always
|| !srp_dev
->has_fr
)
4008 flags
|= IB_PD_UNSAFE_GLOBAL_RKEY
;
4010 if (srp_dev
->use_fast_reg
) {
4011 srp_dev
->max_pages_per_mr
=
4012 min_t(u32
, srp_dev
->max_pages_per_mr
,
4013 attr
->max_fast_reg_page_list_len
);
4015 srp_dev
->mr_max_size
= srp_dev
->mr_page_size
*
4016 srp_dev
->max_pages_per_mr
;
4017 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
4018 dev_name(&device
->dev
), mr_page_shift
, attr
->max_mr_size
,
4019 attr
->max_fast_reg_page_list_len
,
4020 srp_dev
->max_pages_per_mr
, srp_dev
->mr_max_size
);
4022 INIT_LIST_HEAD(&srp_dev
->dev_list
);
4024 srp_dev
->dev
= device
;
4025 srp_dev
->pd
= ib_alloc_pd(device
, flags
);
4026 if (IS_ERR(srp_dev
->pd
)) {
4027 int ret
= PTR_ERR(srp_dev
->pd
);
4033 if (flags
& IB_PD_UNSAFE_GLOBAL_RKEY
) {
4034 srp_dev
->global_rkey
= srp_dev
->pd
->unsafe_global_rkey
;
4035 WARN_ON_ONCE(srp_dev
->global_rkey
== 0);
4038 rdma_for_each_port (device
, p
) {
4039 host
= srp_add_port(srp_dev
, p
);
4041 list_add_tail(&host
->list
, &srp_dev
->dev_list
);
4044 ib_set_client_data(device
, &srp_client
, srp_dev
);
4048 static void srp_remove_one(struct ib_device
*device
, void *client_data
)
4050 struct srp_device
*srp_dev
;
4051 struct srp_host
*host
, *tmp_host
;
4052 struct srp_target_port
*target
;
4054 srp_dev
= client_data
;
4056 list_for_each_entry_safe(host
, tmp_host
, &srp_dev
->dev_list
, list
) {
4057 device_unregister(&host
->dev
);
4059 * Wait for the sysfs entry to go away, so that no new
4060 * target ports can be created.
4062 wait_for_completion(&host
->released
);
4065 * Remove all target ports.
4067 spin_lock(&host
->target_lock
);
4068 list_for_each_entry(target
, &host
->target_list
, list
)
4069 srp_queue_remove_work(target
);
4070 spin_unlock(&host
->target_lock
);
4073 * Wait for tl_err and target port removal tasks.
4075 flush_workqueue(system_long_wq
);
4076 flush_workqueue(srp_remove_wq
);
4081 ib_dealloc_pd(srp_dev
->pd
);
4086 static struct srp_function_template ib_srp_transport_functions
= {
4087 .has_rport_state
= true,
4088 .reset_timer_if_blocked
= true,
4089 .reconnect_delay
= &srp_reconnect_delay
,
4090 .fast_io_fail_tmo
= &srp_fast_io_fail_tmo
,
4091 .dev_loss_tmo
= &srp_dev_loss_tmo
,
4092 .reconnect
= srp_rport_reconnect
,
4093 .rport_delete
= srp_rport_delete
,
4094 .terminate_rport_io
= srp_terminate_io
,
4097 static int __init
srp_init_module(void)
4101 BUILD_BUG_ON(sizeof(struct srp_imm_buf
) != 4);
4102 BUILD_BUG_ON(sizeof(struct srp_login_req
) != 64);
4103 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma
) != 56);
4104 BUILD_BUG_ON(sizeof(struct srp_cmd
) != 48);
4106 if (srp_sg_tablesize
) {
4107 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4108 if (!cmd_sg_entries
)
4109 cmd_sg_entries
= srp_sg_tablesize
;
4112 if (!cmd_sg_entries
)
4113 cmd_sg_entries
= SRP_DEF_SG_TABLESIZE
;
4115 if (cmd_sg_entries
> 255) {
4116 pr_warn("Clamping cmd_sg_entries to 255\n");
4117 cmd_sg_entries
= 255;
4120 if (!indirect_sg_entries
)
4121 indirect_sg_entries
= cmd_sg_entries
;
4122 else if (indirect_sg_entries
< cmd_sg_entries
) {
4123 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4125 indirect_sg_entries
= cmd_sg_entries
;
4128 if (indirect_sg_entries
> SG_MAX_SEGMENTS
) {
4129 pr_warn("Clamping indirect_sg_entries to %u\n",
4131 indirect_sg_entries
= SG_MAX_SEGMENTS
;
4134 srp_remove_wq
= create_workqueue("srp_remove");
4135 if (!srp_remove_wq
) {
4141 ib_srp_transport_template
=
4142 srp_attach_transport(&ib_srp_transport_functions
);
4143 if (!ib_srp_transport_template
)
4146 ret
= class_register(&srp_class
);
4148 pr_err("couldn't register class infiniband_srp\n");
4152 ib_sa_register_client(&srp_sa_client
);
4154 ret
= ib_register_client(&srp_client
);
4156 pr_err("couldn't register IB client\n");
4164 ib_sa_unregister_client(&srp_sa_client
);
4165 class_unregister(&srp_class
);
4168 srp_release_transport(ib_srp_transport_template
);
4171 destroy_workqueue(srp_remove_wq
);
4175 static void __exit
srp_cleanup_module(void)
4177 ib_unregister_client(&srp_client
);
4178 ib_sa_unregister_client(&srp_sa_client
);
4179 class_unregister(&srp_class
);
4180 srp_release_transport(ib_srp_transport_template
);
4181 destroy_workqueue(srp_remove_wq
);
4184 module_init(srp_init_module
);
4185 module_exit(srp_cleanup_module
);