2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) PFX fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <linux/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_transport_srp.h>
54 #define DRV_NAME "ib_srp"
55 #define PFX DRV_NAME ": "
56 #define DRV_VERSION "1.0"
57 #define DRV_RELDATE "July 1, 2013"
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
61 "v" DRV_VERSION
" (" DRV_RELDATE
")");
62 MODULE_LICENSE("Dual BSD/GPL");
64 static unsigned int srp_sg_tablesize
;
65 static unsigned int cmd_sg_entries
;
66 static unsigned int indirect_sg_entries
;
67 static bool allow_ext_sg
;
68 static int topspin_workarounds
= 1;
70 module_param(srp_sg_tablesize
, uint
, 0444);
71 MODULE_PARM_DESC(srp_sg_tablesize
, "Deprecated name for cmd_sg_entries");
73 module_param(cmd_sg_entries
, uint
, 0444);
74 MODULE_PARM_DESC(cmd_sg_entries
,
75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
77 module_param(indirect_sg_entries
, uint
, 0444);
78 MODULE_PARM_DESC(indirect_sg_entries
,
79 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS
) ")");
81 module_param(allow_ext_sg
, bool, 0444);
82 MODULE_PARM_DESC(allow_ext_sg
,
83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
85 module_param(topspin_workarounds
, int, 0444);
86 MODULE_PARM_DESC(topspin_workarounds
,
87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
89 static void srp_add_one(struct ib_device
*device
);
90 static void srp_remove_one(struct ib_device
*device
);
91 static void srp_recv_completion(struct ib_cq
*cq
, void *target_ptr
);
92 static void srp_send_completion(struct ib_cq
*cq
, void *target_ptr
);
93 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
);
95 static struct scsi_transport_template
*ib_srp_transport_template
;
96 static struct workqueue_struct
*srp_remove_wq
;
98 static struct ib_client srp_client
= {
101 .remove
= srp_remove_one
104 static struct ib_sa_client srp_sa_client
;
106 static inline struct srp_target_port
*host_to_target(struct Scsi_Host
*host
)
108 return (struct srp_target_port
*) host
->hostdata
;
111 static const char *srp_target_info(struct Scsi_Host
*host
)
113 return host_to_target(host
)->target_name
;
116 static int srp_target_is_topspin(struct srp_target_port
*target
)
118 static const u8 topspin_oui
[3] = { 0x00, 0x05, 0xad };
119 static const u8 cisco_oui
[3] = { 0x00, 0x1b, 0x0d };
121 return topspin_workarounds
&&
122 (!memcmp(&target
->ioc_guid
, topspin_oui
, sizeof topspin_oui
) ||
123 !memcmp(&target
->ioc_guid
, cisco_oui
, sizeof cisco_oui
));
126 static struct srp_iu
*srp_alloc_iu(struct srp_host
*host
, size_t size
,
128 enum dma_data_direction direction
)
132 iu
= kmalloc(sizeof *iu
, gfp_mask
);
136 iu
->buf
= kzalloc(size
, gfp_mask
);
140 iu
->dma
= ib_dma_map_single(host
->srp_dev
->dev
, iu
->buf
, size
,
142 if (ib_dma_mapping_error(host
->srp_dev
->dev
, iu
->dma
))
146 iu
->direction
= direction
;
158 static void srp_free_iu(struct srp_host
*host
, struct srp_iu
*iu
)
163 ib_dma_unmap_single(host
->srp_dev
->dev
, iu
->dma
, iu
->size
,
169 static void srp_qp_event(struct ib_event
*event
, void *context
)
171 pr_debug("QP event %d\n", event
->event
);
174 static int srp_init_qp(struct srp_target_port
*target
,
177 struct ib_qp_attr
*attr
;
180 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
184 ret
= ib_find_pkey(target
->srp_host
->srp_dev
->dev
,
185 target
->srp_host
->port
,
186 be16_to_cpu(target
->path
.pkey
),
191 attr
->qp_state
= IB_QPS_INIT
;
192 attr
->qp_access_flags
= (IB_ACCESS_REMOTE_READ
|
193 IB_ACCESS_REMOTE_WRITE
);
194 attr
->port_num
= target
->srp_host
->port
;
196 ret
= ib_modify_qp(qp
, attr
,
207 static int srp_new_cm_id(struct srp_target_port
*target
)
209 struct ib_cm_id
*new_cm_id
;
211 new_cm_id
= ib_create_cm_id(target
->srp_host
->srp_dev
->dev
,
212 srp_cm_handler
, target
);
213 if (IS_ERR(new_cm_id
))
214 return PTR_ERR(new_cm_id
);
217 ib_destroy_cm_id(target
->cm_id
);
218 target
->cm_id
= new_cm_id
;
223 static int srp_create_target_ib(struct srp_target_port
*target
)
225 struct ib_qp_init_attr
*init_attr
;
226 struct ib_cq
*recv_cq
, *send_cq
;
230 init_attr
= kzalloc(sizeof *init_attr
, GFP_KERNEL
);
234 recv_cq
= ib_create_cq(target
->srp_host
->srp_dev
->dev
,
235 srp_recv_completion
, NULL
, target
, SRP_RQ_SIZE
,
236 target
->comp_vector
);
237 if (IS_ERR(recv_cq
)) {
238 ret
= PTR_ERR(recv_cq
);
242 send_cq
= ib_create_cq(target
->srp_host
->srp_dev
->dev
,
243 srp_send_completion
, NULL
, target
, SRP_SQ_SIZE
,
244 target
->comp_vector
);
245 if (IS_ERR(send_cq
)) {
246 ret
= PTR_ERR(send_cq
);
250 ib_req_notify_cq(recv_cq
, IB_CQ_NEXT_COMP
);
252 init_attr
->event_handler
= srp_qp_event
;
253 init_attr
->cap
.max_send_wr
= SRP_SQ_SIZE
;
254 init_attr
->cap
.max_recv_wr
= SRP_RQ_SIZE
;
255 init_attr
->cap
.max_recv_sge
= 1;
256 init_attr
->cap
.max_send_sge
= 1;
257 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
258 init_attr
->qp_type
= IB_QPT_RC
;
259 init_attr
->send_cq
= send_cq
;
260 init_attr
->recv_cq
= recv_cq
;
262 qp
= ib_create_qp(target
->srp_host
->srp_dev
->pd
, init_attr
);
268 ret
= srp_init_qp(target
, qp
);
273 ib_destroy_qp(target
->qp
);
275 ib_destroy_cq(target
->recv_cq
);
277 ib_destroy_cq(target
->send_cq
);
280 target
->recv_cq
= recv_cq
;
281 target
->send_cq
= send_cq
;
290 ib_destroy_cq(send_cq
);
293 ib_destroy_cq(recv_cq
);
300 static void srp_free_target_ib(struct srp_target_port
*target
)
304 ib_destroy_qp(target
->qp
);
305 ib_destroy_cq(target
->send_cq
);
306 ib_destroy_cq(target
->recv_cq
);
309 target
->send_cq
= target
->recv_cq
= NULL
;
311 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
)
312 srp_free_iu(target
->srp_host
, target
->rx_ring
[i
]);
313 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
)
314 srp_free_iu(target
->srp_host
, target
->tx_ring
[i
]);
317 static void srp_path_rec_completion(int status
,
318 struct ib_sa_path_rec
*pathrec
,
321 struct srp_target_port
*target
= target_ptr
;
323 target
->status
= status
;
325 shost_printk(KERN_ERR
, target
->scsi_host
,
326 PFX
"Got failed path rec status %d\n", status
);
328 target
->path
= *pathrec
;
329 complete(&target
->done
);
332 static int srp_lookup_path(struct srp_target_port
*target
)
334 target
->path
.numb_path
= 1;
336 init_completion(&target
->done
);
338 target
->path_query_id
= ib_sa_path_rec_get(&srp_sa_client
,
339 target
->srp_host
->srp_dev
->dev
,
340 target
->srp_host
->port
,
342 IB_SA_PATH_REC_SERVICE_ID
|
343 IB_SA_PATH_REC_DGID
|
344 IB_SA_PATH_REC_SGID
|
345 IB_SA_PATH_REC_NUMB_PATH
|
347 SRP_PATH_REC_TIMEOUT_MS
,
349 srp_path_rec_completion
,
350 target
, &target
->path_query
);
351 if (target
->path_query_id
< 0)
352 return target
->path_query_id
;
354 wait_for_completion(&target
->done
);
356 if (target
->status
< 0)
357 shost_printk(KERN_WARNING
, target
->scsi_host
,
358 PFX
"Path record query failed\n");
360 return target
->status
;
363 static int srp_send_req(struct srp_target_port
*target
)
366 struct ib_cm_req_param param
;
367 struct srp_login_req priv
;
371 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
375 req
->param
.primary_path
= &target
->path
;
376 req
->param
.alternate_path
= NULL
;
377 req
->param
.service_id
= target
->service_id
;
378 req
->param
.qp_num
= target
->qp
->qp_num
;
379 req
->param
.qp_type
= target
->qp
->qp_type
;
380 req
->param
.private_data
= &req
->priv
;
381 req
->param
.private_data_len
= sizeof req
->priv
;
382 req
->param
.flow_control
= 1;
384 get_random_bytes(&req
->param
.starting_psn
, 4);
385 req
->param
.starting_psn
&= 0xffffff;
388 * Pick some arbitrary defaults here; we could make these
389 * module parameters if anyone cared about setting them.
391 req
->param
.responder_resources
= 4;
392 req
->param
.remote_cm_response_timeout
= 20;
393 req
->param
.local_cm_response_timeout
= 20;
394 req
->param
.retry_count
= 7;
395 req
->param
.rnr_retry_count
= 7;
396 req
->param
.max_cm_retries
= 15;
398 req
->priv
.opcode
= SRP_LOGIN_REQ
;
400 req
->priv
.req_it_iu_len
= cpu_to_be32(target
->max_iu_len
);
401 req
->priv
.req_buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
402 SRP_BUF_FORMAT_INDIRECT
);
404 * In the published SRP specification (draft rev. 16a), the
405 * port identifier format is 8 bytes of ID extension followed
406 * by 8 bytes of GUID. Older drafts put the two halves in the
407 * opposite order, so that the GUID comes first.
409 * Targets conforming to these obsolete drafts can be
410 * recognized by the I/O Class they report.
412 if (target
->io_class
== SRP_REV10_IB_IO_CLASS
) {
413 memcpy(req
->priv
.initiator_port_id
,
414 &target
->path
.sgid
.global
.interface_id
, 8);
415 memcpy(req
->priv
.initiator_port_id
+ 8,
416 &target
->initiator_ext
, 8);
417 memcpy(req
->priv
.target_port_id
, &target
->ioc_guid
, 8);
418 memcpy(req
->priv
.target_port_id
+ 8, &target
->id_ext
, 8);
420 memcpy(req
->priv
.initiator_port_id
,
421 &target
->initiator_ext
, 8);
422 memcpy(req
->priv
.initiator_port_id
+ 8,
423 &target
->path
.sgid
.global
.interface_id
, 8);
424 memcpy(req
->priv
.target_port_id
, &target
->id_ext
, 8);
425 memcpy(req
->priv
.target_port_id
+ 8, &target
->ioc_guid
, 8);
429 * Topspin/Cisco SRP targets will reject our login unless we
430 * zero out the first 8 bytes of our initiator port ID and set
431 * the second 8 bytes to the local node GUID.
433 if (srp_target_is_topspin(target
)) {
434 shost_printk(KERN_DEBUG
, target
->scsi_host
,
435 PFX
"Topspin/Cisco initiator port ID workaround "
436 "activated for target GUID %016llx\n",
437 (unsigned long long) be64_to_cpu(target
->ioc_guid
));
438 memset(req
->priv
.initiator_port_id
, 0, 8);
439 memcpy(req
->priv
.initiator_port_id
+ 8,
440 &target
->srp_host
->srp_dev
->dev
->node_guid
, 8);
443 status
= ib_send_cm_req(target
->cm_id
, &req
->param
);
450 static bool srp_queue_remove_work(struct srp_target_port
*target
)
452 bool changed
= false;
454 spin_lock_irq(&target
->lock
);
455 if (target
->state
!= SRP_TARGET_REMOVED
) {
456 target
->state
= SRP_TARGET_REMOVED
;
459 spin_unlock_irq(&target
->lock
);
462 queue_work(srp_remove_wq
, &target
->remove_work
);
467 static bool srp_change_conn_state(struct srp_target_port
*target
,
470 bool changed
= false;
472 spin_lock_irq(&target
->lock
);
473 if (target
->connected
!= connected
) {
474 target
->connected
= connected
;
477 spin_unlock_irq(&target
->lock
);
482 static void srp_disconnect_target(struct srp_target_port
*target
)
484 if (srp_change_conn_state(target
, false)) {
485 /* XXX should send SRP_I_LOGOUT request */
487 if (ib_send_cm_dreq(target
->cm_id
, NULL
, 0)) {
488 shost_printk(KERN_DEBUG
, target
->scsi_host
,
489 PFX
"Sending CM DREQ failed\n");
494 static void srp_free_req_data(struct srp_target_port
*target
)
496 struct ib_device
*ibdev
= target
->srp_host
->srp_dev
->dev
;
497 struct srp_request
*req
;
500 for (i
= 0, req
= target
->req_ring
; i
< SRP_CMD_SQ_SIZE
; ++i
, ++req
) {
501 kfree(req
->fmr_list
);
502 kfree(req
->map_page
);
503 if (req
->indirect_dma_addr
) {
504 ib_dma_unmap_single(ibdev
, req
->indirect_dma_addr
,
505 target
->indirect_size
,
508 kfree(req
->indirect_desc
);
513 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
514 * @shost: SCSI host whose attributes to remove from sysfs.
516 * Note: Any attributes defined in the host template and that did not exist
517 * before invocation of this function will be ignored.
519 static void srp_del_scsi_host_attr(struct Scsi_Host
*shost
)
521 struct device_attribute
**attr
;
523 for (attr
= shost
->hostt
->shost_attrs
; attr
&& *attr
; ++attr
)
524 device_remove_file(&shost
->shost_dev
, *attr
);
527 static void srp_remove_target(struct srp_target_port
*target
)
529 WARN_ON_ONCE(target
->state
!= SRP_TARGET_REMOVED
);
531 srp_del_scsi_host_attr(target
->scsi_host
);
532 srp_remove_host(target
->scsi_host
);
533 scsi_remove_host(target
->scsi_host
);
534 srp_disconnect_target(target
);
535 ib_destroy_cm_id(target
->cm_id
);
536 srp_free_target_ib(target
);
537 srp_free_req_data(target
);
539 spin_lock(&target
->srp_host
->target_lock
);
540 list_del(&target
->list
);
541 spin_unlock(&target
->srp_host
->target_lock
);
543 scsi_host_put(target
->scsi_host
);
546 static void srp_remove_work(struct work_struct
*work
)
548 struct srp_target_port
*target
=
549 container_of(work
, struct srp_target_port
, remove_work
);
551 WARN_ON_ONCE(target
->state
!= SRP_TARGET_REMOVED
);
553 srp_remove_target(target
);
556 static void srp_rport_delete(struct srp_rport
*rport
)
558 struct srp_target_port
*target
= rport
->lld_data
;
560 srp_queue_remove_work(target
);
563 static int srp_connect_target(struct srp_target_port
*target
)
568 WARN_ON_ONCE(target
->connected
);
570 target
->qp_in_error
= false;
572 ret
= srp_lookup_path(target
);
577 init_completion(&target
->done
);
578 ret
= srp_send_req(target
);
581 wait_for_completion(&target
->done
);
584 * The CM event handling code will set status to
585 * SRP_PORT_REDIRECT if we get a port redirect REJ
586 * back, or SRP_DLID_REDIRECT if we get a lid/qp
589 switch (target
->status
) {
591 srp_change_conn_state(target
, true);
594 case SRP_PORT_REDIRECT
:
595 ret
= srp_lookup_path(target
);
600 case SRP_DLID_REDIRECT
:
604 /* Our current CM id was stale, and is now in timewait.
605 * Try to reconnect with a new one.
607 if (!retries
-- || srp_new_cm_id(target
)) {
608 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
609 "giving up on stale connection\n");
610 target
->status
= -ECONNRESET
;
611 return target
->status
;
614 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
615 "retrying stale connection\n");
619 return target
->status
;
624 static void srp_unmap_data(struct scsi_cmnd
*scmnd
,
625 struct srp_target_port
*target
,
626 struct srp_request
*req
)
628 struct ib_device
*ibdev
= target
->srp_host
->srp_dev
->dev
;
629 struct ib_pool_fmr
**pfmr
;
631 if (!scsi_sglist(scmnd
) ||
632 (scmnd
->sc_data_direction
!= DMA_TO_DEVICE
&&
633 scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
))
636 pfmr
= req
->fmr_list
;
638 ib_fmr_pool_unmap(*pfmr
++);
640 ib_dma_unmap_sg(ibdev
, scsi_sglist(scmnd
), scsi_sg_count(scmnd
),
641 scmnd
->sc_data_direction
);
645 * srp_claim_req - Take ownership of the scmnd associated with a request.
646 * @target: SRP target port.
648 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
649 * ownership of @req->scmnd if it equals @scmnd.
652 * Either NULL or a pointer to the SCSI command the caller became owner of.
654 static struct scsi_cmnd
*srp_claim_req(struct srp_target_port
*target
,
655 struct srp_request
*req
,
656 struct scsi_cmnd
*scmnd
)
660 spin_lock_irqsave(&target
->lock
, flags
);
664 } else if (req
->scmnd
== scmnd
) {
669 spin_unlock_irqrestore(&target
->lock
, flags
);
675 * srp_free_req() - Unmap data and add request to the free request list.
677 static void srp_free_req(struct srp_target_port
*target
,
678 struct srp_request
*req
, struct scsi_cmnd
*scmnd
,
683 srp_unmap_data(scmnd
, target
, req
);
685 spin_lock_irqsave(&target
->lock
, flags
);
686 target
->req_lim
+= req_lim_delta
;
687 list_add_tail(&req
->list
, &target
->free_reqs
);
688 spin_unlock_irqrestore(&target
->lock
, flags
);
691 static void srp_reset_req(struct srp_target_port
*target
, struct srp_request
*req
)
693 struct scsi_cmnd
*scmnd
= srp_claim_req(target
, req
, NULL
);
696 srp_free_req(target
, req
, scmnd
, 0);
697 scmnd
->result
= DID_RESET
<< 16;
698 scmnd
->scsi_done(scmnd
);
702 static int srp_reconnect_target(struct srp_target_port
*target
)
704 struct Scsi_Host
*shost
= target
->scsi_host
;
707 scsi_target_block(&shost
->shost_gendev
);
709 srp_disconnect_target(target
);
711 * Now get a new local CM ID so that we avoid confusing the target in
712 * case things are really fouled up. Doing so also ensures that all CM
713 * callbacks will have finished before a new QP is allocated.
715 ret
= srp_new_cm_id(target
);
717 * Whether or not creating a new CM ID succeeded, create a new
718 * QP. This guarantees that all completion callback function
719 * invocations have finished before request resetting starts.
722 ret
= srp_create_target_ib(target
);
724 srp_create_target_ib(target
);
726 for (i
= 0; i
< SRP_CMD_SQ_SIZE
; ++i
) {
727 struct srp_request
*req
= &target
->req_ring
[i
];
729 srp_reset_req(target
, req
);
732 INIT_LIST_HEAD(&target
->free_tx
);
733 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
)
734 list_add(&target
->tx_ring
[i
]->list
, &target
->free_tx
);
737 ret
= srp_connect_target(target
);
739 scsi_target_unblock(&shost
->shost_gendev
, ret
== 0 ? SDEV_RUNNING
:
740 SDEV_TRANSPORT_OFFLINE
);
741 target
->transport_offline
= !!ret
;
746 shost_printk(KERN_INFO
, target
->scsi_host
, PFX
"reconnect succeeded\n");
751 shost_printk(KERN_ERR
, target
->scsi_host
,
752 PFX
"reconnect failed (%d), removing target port.\n", ret
);
755 * We couldn't reconnect, so kill our target port off.
756 * However, we have to defer the real removal because we
757 * are in the context of the SCSI error handler now, which
758 * will deadlock if we call scsi_remove_host().
760 srp_queue_remove_work(target
);
765 static void srp_map_desc(struct srp_map_state
*state
, dma_addr_t dma_addr
,
766 unsigned int dma_len
, u32 rkey
)
768 struct srp_direct_buf
*desc
= state
->desc
;
770 desc
->va
= cpu_to_be64(dma_addr
);
771 desc
->key
= cpu_to_be32(rkey
);
772 desc
->len
= cpu_to_be32(dma_len
);
774 state
->total_len
+= dma_len
;
779 static int srp_map_finish_fmr(struct srp_map_state
*state
,
780 struct srp_target_port
*target
)
782 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
783 struct ib_pool_fmr
*fmr
;
789 if (state
->npages
== 1) {
790 srp_map_desc(state
, state
->base_dma_addr
, state
->fmr_len
,
792 state
->npages
= state
->fmr_len
= 0;
796 fmr
= ib_fmr_pool_map_phys(dev
->fmr_pool
, state
->pages
,
797 state
->npages
, io_addr
);
801 *state
->next_fmr
++ = fmr
;
804 srp_map_desc(state
, 0, state
->fmr_len
, fmr
->fmr
->rkey
);
805 state
->npages
= state
->fmr_len
= 0;
809 static void srp_map_update_start(struct srp_map_state
*state
,
810 struct scatterlist
*sg
, int sg_index
,
813 state
->unmapped_sg
= sg
;
814 state
->unmapped_index
= sg_index
;
815 state
->unmapped_addr
= dma_addr
;
818 static int srp_map_sg_entry(struct srp_map_state
*state
,
819 struct srp_target_port
*target
,
820 struct scatterlist
*sg
, int sg_index
,
823 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
824 struct ib_device
*ibdev
= dev
->dev
;
825 dma_addr_t dma_addr
= ib_sg_dma_address(ibdev
, sg
);
826 unsigned int dma_len
= ib_sg_dma_len(ibdev
, sg
);
833 if (use_fmr
== SRP_MAP_NO_FMR
) {
834 /* Once we're in direct map mode for a request, we don't
835 * go back to FMR mode, so no need to update anything
836 * other than the descriptor.
838 srp_map_desc(state
, dma_addr
, dma_len
, target
->rkey
);
842 /* If we start at an offset into the FMR page, don't merge into
843 * the current FMR. Finish it out, and use the kernel's MR for this
844 * sg entry. This is to avoid potential bugs on some SRP targets
845 * that were never quite defined, but went away when the initiator
846 * avoided using FMR on such page fragments.
848 if (dma_addr
& ~dev
->fmr_page_mask
|| dma_len
> dev
->fmr_max_size
) {
849 ret
= srp_map_finish_fmr(state
, target
);
853 srp_map_desc(state
, dma_addr
, dma_len
, target
->rkey
);
854 srp_map_update_start(state
, NULL
, 0, 0);
858 /* If this is the first sg to go into the FMR, save our position.
859 * We need to know the first unmapped entry, its index, and the
860 * first unmapped address within that entry to be able to restart
861 * mapping after an error.
863 if (!state
->unmapped_sg
)
864 srp_map_update_start(state
, sg
, sg_index
, dma_addr
);
867 if (state
->npages
== SRP_FMR_SIZE
) {
868 ret
= srp_map_finish_fmr(state
, target
);
872 srp_map_update_start(state
, sg
, sg_index
, dma_addr
);
875 len
= min_t(unsigned int, dma_len
, dev
->fmr_page_size
);
878 state
->base_dma_addr
= dma_addr
;
879 state
->pages
[state
->npages
++] = dma_addr
;
880 state
->fmr_len
+= len
;
885 /* If the last entry of the FMR wasn't a full page, then we need to
886 * close it out and start a new one -- we can only merge at page
890 if (len
!= dev
->fmr_page_size
) {
891 ret
= srp_map_finish_fmr(state
, target
);
893 srp_map_update_start(state
, NULL
, 0, 0);
898 static int srp_map_data(struct scsi_cmnd
*scmnd
, struct srp_target_port
*target
,
899 struct srp_request
*req
)
901 struct scatterlist
*scat
, *sg
;
902 struct srp_cmd
*cmd
= req
->cmd
->buf
;
903 int i
, len
, nents
, count
, use_fmr
;
904 struct srp_device
*dev
;
905 struct ib_device
*ibdev
;
906 struct srp_map_state state
;
907 struct srp_indirect_buf
*indirect_hdr
;
911 if (!scsi_sglist(scmnd
) || scmnd
->sc_data_direction
== DMA_NONE
)
912 return sizeof (struct srp_cmd
);
914 if (scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
&&
915 scmnd
->sc_data_direction
!= DMA_TO_DEVICE
) {
916 shost_printk(KERN_WARNING
, target
->scsi_host
,
917 PFX
"Unhandled data direction %d\n",
918 scmnd
->sc_data_direction
);
922 nents
= scsi_sg_count(scmnd
);
923 scat
= scsi_sglist(scmnd
);
925 dev
= target
->srp_host
->srp_dev
;
928 count
= ib_dma_map_sg(ibdev
, scat
, nents
, scmnd
->sc_data_direction
);
929 if (unlikely(count
== 0))
932 fmt
= SRP_DATA_DESC_DIRECT
;
933 len
= sizeof (struct srp_cmd
) + sizeof (struct srp_direct_buf
);
937 * The midlayer only generated a single gather/scatter
938 * entry, or DMA mapping coalesced everything to a
939 * single entry. So a direct descriptor along with
940 * the DMA MR suffices.
942 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
944 buf
->va
= cpu_to_be64(ib_sg_dma_address(ibdev
, scat
));
945 buf
->key
= cpu_to_be32(target
->rkey
);
946 buf
->len
= cpu_to_be32(ib_sg_dma_len(ibdev
, scat
));
952 /* We have more than one scatter/gather entry, so build our indirect
953 * descriptor table, trying to merge as many entries with FMR as we
956 indirect_hdr
= (void *) cmd
->add_data
;
958 ib_dma_sync_single_for_cpu(ibdev
, req
->indirect_dma_addr
,
959 target
->indirect_size
, DMA_TO_DEVICE
);
961 memset(&state
, 0, sizeof(state
));
962 state
.desc
= req
->indirect_desc
;
963 state
.pages
= req
->map_page
;
964 state
.next_fmr
= req
->fmr_list
;
966 use_fmr
= dev
->fmr_pool
? SRP_MAP_ALLOW_FMR
: SRP_MAP_NO_FMR
;
968 for_each_sg(scat
, sg
, count
, i
) {
969 if (srp_map_sg_entry(&state
, target
, sg
, i
, use_fmr
)) {
970 /* FMR mapping failed, so backtrack to the first
971 * unmapped entry and continue on without using FMR.
974 unsigned int dma_len
;
977 sg
= state
.unmapped_sg
;
978 i
= state
.unmapped_index
;
980 dma_addr
= ib_sg_dma_address(ibdev
, sg
);
981 dma_len
= ib_sg_dma_len(ibdev
, sg
);
982 dma_len
-= (state
.unmapped_addr
- dma_addr
);
983 dma_addr
= state
.unmapped_addr
;
984 use_fmr
= SRP_MAP_NO_FMR
;
985 srp_map_desc(&state
, dma_addr
, dma_len
, target
->rkey
);
989 if (use_fmr
== SRP_MAP_ALLOW_FMR
&& srp_map_finish_fmr(&state
, target
))
992 /* We've mapped the request, now pull as much of the indirect
993 * descriptor table as we can into the command buffer. If this
994 * target is not using an external indirect table, we are
995 * guaranteed to fit into the command, as the SCSI layer won't
996 * give us more S/G entries than we allow.
998 req
->nfmr
= state
.nfmr
;
999 if (state
.ndesc
== 1) {
1000 /* FMR mapping was able to collapse this to one entry,
1001 * so use a direct descriptor.
1003 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
1005 *buf
= req
->indirect_desc
[0];
1009 if (unlikely(target
->cmd_sg_cnt
< state
.ndesc
&&
1010 !target
->allow_ext_sg
)) {
1011 shost_printk(KERN_ERR
, target
->scsi_host
,
1012 "Could not fit S/G list into SRP_CMD\n");
1016 count
= min(state
.ndesc
, target
->cmd_sg_cnt
);
1017 table_len
= state
.ndesc
* sizeof (struct srp_direct_buf
);
1019 fmt
= SRP_DATA_DESC_INDIRECT
;
1020 len
= sizeof(struct srp_cmd
) + sizeof (struct srp_indirect_buf
);
1021 len
+= count
* sizeof (struct srp_direct_buf
);
1023 memcpy(indirect_hdr
->desc_list
, req
->indirect_desc
,
1024 count
* sizeof (struct srp_direct_buf
));
1026 indirect_hdr
->table_desc
.va
= cpu_to_be64(req
->indirect_dma_addr
);
1027 indirect_hdr
->table_desc
.key
= cpu_to_be32(target
->rkey
);
1028 indirect_hdr
->table_desc
.len
= cpu_to_be32(table_len
);
1029 indirect_hdr
->len
= cpu_to_be32(state
.total_len
);
1031 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
1032 cmd
->data_out_desc_cnt
= count
;
1034 cmd
->data_in_desc_cnt
= count
;
1036 ib_dma_sync_single_for_device(ibdev
, req
->indirect_dma_addr
, table_len
,
1040 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
1041 cmd
->buf_fmt
= fmt
<< 4;
1049 * Return an IU and possible credit to the free pool
1051 static void srp_put_tx_iu(struct srp_target_port
*target
, struct srp_iu
*iu
,
1052 enum srp_iu_type iu_type
)
1054 unsigned long flags
;
1056 spin_lock_irqsave(&target
->lock
, flags
);
1057 list_add(&iu
->list
, &target
->free_tx
);
1058 if (iu_type
!= SRP_IU_RSP
)
1060 spin_unlock_irqrestore(&target
->lock
, flags
);
1064 * Must be called with target->lock held to protect req_lim and free_tx.
1065 * If IU is not sent, it must be returned using srp_put_tx_iu().
1068 * An upper limit for the number of allocated information units for each
1070 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1071 * more than Scsi_Host.can_queue requests.
1072 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1073 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1074 * one unanswered SRP request to an initiator.
1076 static struct srp_iu
*__srp_get_tx_iu(struct srp_target_port
*target
,
1077 enum srp_iu_type iu_type
)
1079 s32 rsv
= (iu_type
== SRP_IU_TSK_MGMT
) ? 0 : SRP_TSK_MGMT_SQ_SIZE
;
1082 srp_send_completion(target
->send_cq
, target
);
1084 if (list_empty(&target
->free_tx
))
1087 /* Initiator responses to target requests do not consume credits */
1088 if (iu_type
!= SRP_IU_RSP
) {
1089 if (target
->req_lim
<= rsv
) {
1090 ++target
->zero_req_lim
;
1097 iu
= list_first_entry(&target
->free_tx
, struct srp_iu
, list
);
1098 list_del(&iu
->list
);
1102 static int srp_post_send(struct srp_target_port
*target
,
1103 struct srp_iu
*iu
, int len
)
1106 struct ib_send_wr wr
, *bad_wr
;
1108 list
.addr
= iu
->dma
;
1110 list
.lkey
= target
->lkey
;
1113 wr
.wr_id
= (uintptr_t) iu
;
1116 wr
.opcode
= IB_WR_SEND
;
1117 wr
.send_flags
= IB_SEND_SIGNALED
;
1119 return ib_post_send(target
->qp
, &wr
, &bad_wr
);
1122 static int srp_post_recv(struct srp_target_port
*target
, struct srp_iu
*iu
)
1124 struct ib_recv_wr wr
, *bad_wr
;
1127 list
.addr
= iu
->dma
;
1128 list
.length
= iu
->size
;
1129 list
.lkey
= target
->lkey
;
1132 wr
.wr_id
= (uintptr_t) iu
;
1136 return ib_post_recv(target
->qp
, &wr
, &bad_wr
);
1139 static void srp_process_rsp(struct srp_target_port
*target
, struct srp_rsp
*rsp
)
1141 struct srp_request
*req
;
1142 struct scsi_cmnd
*scmnd
;
1143 unsigned long flags
;
1145 if (unlikely(rsp
->tag
& SRP_TAG_TSK_MGMT
)) {
1146 spin_lock_irqsave(&target
->lock
, flags
);
1147 target
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1148 spin_unlock_irqrestore(&target
->lock
, flags
);
1150 target
->tsk_mgmt_status
= -1;
1151 if (be32_to_cpu(rsp
->resp_data_len
) >= 4)
1152 target
->tsk_mgmt_status
= rsp
->data
[3];
1153 complete(&target
->tsk_mgmt_done
);
1155 req
= &target
->req_ring
[rsp
->tag
];
1156 scmnd
= srp_claim_req(target
, req
, NULL
);
1158 shost_printk(KERN_ERR
, target
->scsi_host
,
1159 "Null scmnd for RSP w/tag %016llx\n",
1160 (unsigned long long) rsp
->tag
);
1162 spin_lock_irqsave(&target
->lock
, flags
);
1163 target
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1164 spin_unlock_irqrestore(&target
->lock
, flags
);
1168 scmnd
->result
= rsp
->status
;
1170 if (rsp
->flags
& SRP_RSP_FLAG_SNSVALID
) {
1171 memcpy(scmnd
->sense_buffer
, rsp
->data
+
1172 be32_to_cpu(rsp
->resp_data_len
),
1173 min_t(int, be32_to_cpu(rsp
->sense_data_len
),
1174 SCSI_SENSE_BUFFERSIZE
));
1177 if (rsp
->flags
& (SRP_RSP_FLAG_DOOVER
| SRP_RSP_FLAG_DOUNDER
))
1178 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_out_res_cnt
));
1179 else if (rsp
->flags
& (SRP_RSP_FLAG_DIOVER
| SRP_RSP_FLAG_DIUNDER
))
1180 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_in_res_cnt
));
1182 srp_free_req(target
, req
, scmnd
,
1183 be32_to_cpu(rsp
->req_lim_delta
));
1185 scmnd
->host_scribble
= NULL
;
1186 scmnd
->scsi_done(scmnd
);
1190 static int srp_response_common(struct srp_target_port
*target
, s32 req_delta
,
1193 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1194 unsigned long flags
;
1198 spin_lock_irqsave(&target
->lock
, flags
);
1199 target
->req_lim
+= req_delta
;
1200 iu
= __srp_get_tx_iu(target
, SRP_IU_RSP
);
1201 spin_unlock_irqrestore(&target
->lock
, flags
);
1204 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1205 "no IU available to send response\n");
1209 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
1210 memcpy(iu
->buf
, rsp
, len
);
1211 ib_dma_sync_single_for_device(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
1213 err
= srp_post_send(target
, iu
, len
);
1215 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1216 "unable to post response: %d\n", err
);
1217 srp_put_tx_iu(target
, iu
, SRP_IU_RSP
);
1223 static void srp_process_cred_req(struct srp_target_port
*target
,
1224 struct srp_cred_req
*req
)
1226 struct srp_cred_rsp rsp
= {
1227 .opcode
= SRP_CRED_RSP
,
1230 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
1232 if (srp_response_common(target
, delta
, &rsp
, sizeof rsp
))
1233 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1234 "problems processing SRP_CRED_REQ\n");
1237 static void srp_process_aer_req(struct srp_target_port
*target
,
1238 struct srp_aer_req
*req
)
1240 struct srp_aer_rsp rsp
= {
1241 .opcode
= SRP_AER_RSP
,
1244 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
1246 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1247 "ignoring AER for LUN %llu\n", be64_to_cpu(req
->lun
));
1249 if (srp_response_common(target
, delta
, &rsp
, sizeof rsp
))
1250 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1251 "problems processing SRP_AER_REQ\n");
1254 static void srp_handle_recv(struct srp_target_port
*target
, struct ib_wc
*wc
)
1256 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1257 struct srp_iu
*iu
= (struct srp_iu
*) (uintptr_t) wc
->wr_id
;
1261 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, target
->max_ti_iu_len
,
1264 opcode
= *(u8
*) iu
->buf
;
1267 shost_printk(KERN_ERR
, target
->scsi_host
,
1268 PFX
"recv completion, opcode 0x%02x\n", opcode
);
1269 print_hex_dump(KERN_ERR
, "", DUMP_PREFIX_OFFSET
, 8, 1,
1270 iu
->buf
, wc
->byte_len
, true);
1275 srp_process_rsp(target
, iu
->buf
);
1279 srp_process_cred_req(target
, iu
->buf
);
1283 srp_process_aer_req(target
, iu
->buf
);
1287 /* XXX Handle target logout */
1288 shost_printk(KERN_WARNING
, target
->scsi_host
,
1289 PFX
"Got target logout request\n");
1293 shost_printk(KERN_WARNING
, target
->scsi_host
,
1294 PFX
"Unhandled SRP opcode 0x%02x\n", opcode
);
1298 ib_dma_sync_single_for_device(dev
, iu
->dma
, target
->max_ti_iu_len
,
1301 res
= srp_post_recv(target
, iu
);
1303 shost_printk(KERN_ERR
, target
->scsi_host
,
1304 PFX
"Recv failed with error code %d\n", res
);
1307 static void srp_handle_qp_err(enum ib_wc_status wc_status
, bool send_err
,
1308 struct srp_target_port
*target
)
1310 if (target
->connected
&& !target
->qp_in_error
) {
1311 shost_printk(KERN_ERR
, target
->scsi_host
,
1312 PFX
"failed %s status %d\n",
1313 send_err
? "send" : "receive",
1316 target
->qp_in_error
= true;
1319 static void srp_recv_completion(struct ib_cq
*cq
, void *target_ptr
)
1321 struct srp_target_port
*target
= target_ptr
;
1324 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
1325 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
1326 if (likely(wc
.status
== IB_WC_SUCCESS
)) {
1327 srp_handle_recv(target
, &wc
);
1329 srp_handle_qp_err(wc
.status
, false, target
);
1334 static void srp_send_completion(struct ib_cq
*cq
, void *target_ptr
)
1336 struct srp_target_port
*target
= target_ptr
;
1340 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
1341 if (likely(wc
.status
== IB_WC_SUCCESS
)) {
1342 iu
= (struct srp_iu
*) (uintptr_t) wc
.wr_id
;
1343 list_add(&iu
->list
, &target
->free_tx
);
1345 srp_handle_qp_err(wc
.status
, true, target
);
1350 static int srp_queuecommand(struct Scsi_Host
*shost
, struct scsi_cmnd
*scmnd
)
1352 struct srp_target_port
*target
= host_to_target(shost
);
1353 struct srp_request
*req
;
1355 struct srp_cmd
*cmd
;
1356 struct ib_device
*dev
;
1357 unsigned long flags
;
1360 if (unlikely(target
->transport_offline
)) {
1361 scmnd
->result
= DID_NO_CONNECT
<< 16;
1362 scmnd
->scsi_done(scmnd
);
1366 spin_lock_irqsave(&target
->lock
, flags
);
1367 iu
= __srp_get_tx_iu(target
, SRP_IU_CMD
);
1371 req
= list_first_entry(&target
->free_reqs
, struct srp_request
, list
);
1372 list_del(&req
->list
);
1373 spin_unlock_irqrestore(&target
->lock
, flags
);
1375 dev
= target
->srp_host
->srp_dev
->dev
;
1376 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, target
->max_iu_len
,
1380 scmnd
->host_scribble
= (void *) req
;
1383 memset(cmd
, 0, sizeof *cmd
);
1385 cmd
->opcode
= SRP_CMD
;
1386 cmd
->lun
= cpu_to_be64((u64
) scmnd
->device
->lun
<< 48);
1387 cmd
->tag
= req
->index
;
1388 memcpy(cmd
->cdb
, scmnd
->cmnd
, scmnd
->cmd_len
);
1393 len
= srp_map_data(scmnd
, target
, req
);
1395 shost_printk(KERN_ERR
, target
->scsi_host
,
1396 PFX
"Failed to map data\n");
1400 ib_dma_sync_single_for_device(dev
, iu
->dma
, target
->max_iu_len
,
1403 if (srp_post_send(target
, iu
, len
)) {
1404 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"Send failed\n");
1411 srp_unmap_data(scmnd
, target
, req
);
1414 srp_put_tx_iu(target
, iu
, SRP_IU_CMD
);
1417 * Avoid that the loops that iterate over the request ring can
1418 * encounter a dangling SCSI command pointer.
1422 spin_lock_irqsave(&target
->lock
, flags
);
1423 list_add(&req
->list
, &target
->free_reqs
);
1426 spin_unlock_irqrestore(&target
->lock
, flags
);
1428 return SCSI_MLQUEUE_HOST_BUSY
;
1431 static int srp_alloc_iu_bufs(struct srp_target_port
*target
)
1435 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
) {
1436 target
->rx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
1437 target
->max_ti_iu_len
,
1438 GFP_KERNEL
, DMA_FROM_DEVICE
);
1439 if (!target
->rx_ring
[i
])
1443 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
) {
1444 target
->tx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
1446 GFP_KERNEL
, DMA_TO_DEVICE
);
1447 if (!target
->tx_ring
[i
])
1450 list_add(&target
->tx_ring
[i
]->list
, &target
->free_tx
);
1456 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
) {
1457 srp_free_iu(target
->srp_host
, target
->rx_ring
[i
]);
1458 target
->rx_ring
[i
] = NULL
;
1461 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
) {
1462 srp_free_iu(target
->srp_host
, target
->tx_ring
[i
]);
1463 target
->tx_ring
[i
] = NULL
;
1469 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr
*qp_attr
, int attr_mask
)
1471 uint64_t T_tr_ns
, max_compl_time_ms
;
1472 uint32_t rq_tmo_jiffies
;
1475 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
1476 * table 91), both the QP timeout and the retry count have to be set
1477 * for RC QP's during the RTR to RTS transition.
1479 WARN_ON_ONCE((attr_mask
& (IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
)) !=
1480 (IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
));
1483 * Set target->rq_tmo_jiffies to one second more than the largest time
1484 * it can take before an error completion is generated. See also
1485 * C9-140..142 in the IBTA spec for more information about how to
1486 * convert the QP Local ACK Timeout value to nanoseconds.
1488 T_tr_ns
= 4096 * (1ULL << qp_attr
->timeout
);
1489 max_compl_time_ms
= qp_attr
->retry_cnt
* 4 * T_tr_ns
;
1490 do_div(max_compl_time_ms
, NSEC_PER_MSEC
);
1491 rq_tmo_jiffies
= msecs_to_jiffies(max_compl_time_ms
+ 1000);
1493 return rq_tmo_jiffies
;
1496 static void srp_cm_rep_handler(struct ib_cm_id
*cm_id
,
1497 struct srp_login_rsp
*lrsp
,
1498 struct srp_target_port
*target
)
1500 struct ib_qp_attr
*qp_attr
= NULL
;
1505 if (lrsp
->opcode
== SRP_LOGIN_RSP
) {
1506 target
->max_ti_iu_len
= be32_to_cpu(lrsp
->max_ti_iu_len
);
1507 target
->req_lim
= be32_to_cpu(lrsp
->req_lim_delta
);
1510 * Reserve credits for task management so we don't
1511 * bounce requests back to the SCSI mid-layer.
1513 target
->scsi_host
->can_queue
1514 = min(target
->req_lim
- SRP_TSK_MGMT_SQ_SIZE
,
1515 target
->scsi_host
->can_queue
);
1517 shost_printk(KERN_WARNING
, target
->scsi_host
,
1518 PFX
"Unhandled RSP opcode %#x\n", lrsp
->opcode
);
1523 if (!target
->rx_ring
[0]) {
1524 ret
= srp_alloc_iu_bufs(target
);
1530 qp_attr
= kmalloc(sizeof *qp_attr
, GFP_KERNEL
);
1534 qp_attr
->qp_state
= IB_QPS_RTR
;
1535 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
1539 ret
= ib_modify_qp(target
->qp
, qp_attr
, attr_mask
);
1543 for (i
= 0; i
< SRP_RQ_SIZE
; i
++) {
1544 struct srp_iu
*iu
= target
->rx_ring
[i
];
1545 ret
= srp_post_recv(target
, iu
);
1550 qp_attr
->qp_state
= IB_QPS_RTS
;
1551 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
1555 target
->rq_tmo_jiffies
= srp_compute_rq_tmo(qp_attr
, attr_mask
);
1557 ret
= ib_modify_qp(target
->qp
, qp_attr
, attr_mask
);
1561 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
1567 target
->status
= ret
;
1570 static void srp_cm_rej_handler(struct ib_cm_id
*cm_id
,
1571 struct ib_cm_event
*event
,
1572 struct srp_target_port
*target
)
1574 struct Scsi_Host
*shost
= target
->scsi_host
;
1575 struct ib_class_port_info
*cpi
;
1578 switch (event
->param
.rej_rcvd
.reason
) {
1579 case IB_CM_REJ_PORT_CM_REDIRECT
:
1580 cpi
= event
->param
.rej_rcvd
.ari
;
1581 target
->path
.dlid
= cpi
->redirect_lid
;
1582 target
->path
.pkey
= cpi
->redirect_pkey
;
1583 cm_id
->remote_cm_qpn
= be32_to_cpu(cpi
->redirect_qp
) & 0x00ffffff;
1584 memcpy(target
->path
.dgid
.raw
, cpi
->redirect_gid
, 16);
1586 target
->status
= target
->path
.dlid
?
1587 SRP_DLID_REDIRECT
: SRP_PORT_REDIRECT
;
1590 case IB_CM_REJ_PORT_REDIRECT
:
1591 if (srp_target_is_topspin(target
)) {
1593 * Topspin/Cisco SRP gateways incorrectly send
1594 * reject reason code 25 when they mean 24
1597 memcpy(target
->path
.dgid
.raw
,
1598 event
->param
.rej_rcvd
.ari
, 16);
1600 shost_printk(KERN_DEBUG
, shost
,
1601 PFX
"Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1602 (unsigned long long) be64_to_cpu(target
->path
.dgid
.global
.subnet_prefix
),
1603 (unsigned long long) be64_to_cpu(target
->path
.dgid
.global
.interface_id
));
1605 target
->status
= SRP_PORT_REDIRECT
;
1607 shost_printk(KERN_WARNING
, shost
,
1608 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1609 target
->status
= -ECONNRESET
;
1613 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID
:
1614 shost_printk(KERN_WARNING
, shost
,
1615 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1616 target
->status
= -ECONNRESET
;
1619 case IB_CM_REJ_CONSUMER_DEFINED
:
1620 opcode
= *(u8
*) event
->private_data
;
1621 if (opcode
== SRP_LOGIN_REJ
) {
1622 struct srp_login_rej
*rej
= event
->private_data
;
1623 u32 reason
= be32_to_cpu(rej
->reason
);
1625 if (reason
== SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
)
1626 shost_printk(KERN_WARNING
, shost
,
1627 PFX
"SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1629 shost_printk(KERN_WARNING
, shost
,
1630 PFX
"SRP LOGIN REJECTED, reason 0x%08x\n", reason
);
1632 shost_printk(KERN_WARNING
, shost
,
1633 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1634 " opcode 0x%02x\n", opcode
);
1635 target
->status
= -ECONNRESET
;
1638 case IB_CM_REJ_STALE_CONN
:
1639 shost_printk(KERN_WARNING
, shost
, " REJ reason: stale connection\n");
1640 target
->status
= SRP_STALE_CONN
;
1644 shost_printk(KERN_WARNING
, shost
, " REJ reason 0x%x\n",
1645 event
->param
.rej_rcvd
.reason
);
1646 target
->status
= -ECONNRESET
;
1650 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
1652 struct srp_target_port
*target
= cm_id
->context
;
1655 switch (event
->event
) {
1656 case IB_CM_REQ_ERROR
:
1657 shost_printk(KERN_DEBUG
, target
->scsi_host
,
1658 PFX
"Sending CM REQ failed\n");
1660 target
->status
= -ECONNRESET
;
1663 case IB_CM_REP_RECEIVED
:
1665 srp_cm_rep_handler(cm_id
, event
->private_data
, target
);
1668 case IB_CM_REJ_RECEIVED
:
1669 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
"REJ received\n");
1672 srp_cm_rej_handler(cm_id
, event
, target
);
1675 case IB_CM_DREQ_RECEIVED
:
1676 shost_printk(KERN_WARNING
, target
->scsi_host
,
1677 PFX
"DREQ received - connection closed\n");
1678 srp_change_conn_state(target
, false);
1679 if (ib_send_cm_drep(cm_id
, NULL
, 0))
1680 shost_printk(KERN_ERR
, target
->scsi_host
,
1681 PFX
"Sending CM DREP failed\n");
1684 case IB_CM_TIMEWAIT_EXIT
:
1685 shost_printk(KERN_ERR
, target
->scsi_host
,
1686 PFX
"connection closed\n");
1691 case IB_CM_MRA_RECEIVED
:
1692 case IB_CM_DREQ_ERROR
:
1693 case IB_CM_DREP_RECEIVED
:
1697 shost_printk(KERN_WARNING
, target
->scsi_host
,
1698 PFX
"Unhandled CM event %d\n", event
->event
);
1703 complete(&target
->done
);
1708 static int srp_send_tsk_mgmt(struct srp_target_port
*target
,
1709 u64 req_tag
, unsigned int lun
, u8 func
)
1711 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1713 struct srp_tsk_mgmt
*tsk_mgmt
;
1715 if (!target
->connected
|| target
->qp_in_error
)
1718 init_completion(&target
->tsk_mgmt_done
);
1720 spin_lock_irq(&target
->lock
);
1721 iu
= __srp_get_tx_iu(target
, SRP_IU_TSK_MGMT
);
1722 spin_unlock_irq(&target
->lock
);
1727 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, sizeof *tsk_mgmt
,
1730 memset(tsk_mgmt
, 0, sizeof *tsk_mgmt
);
1732 tsk_mgmt
->opcode
= SRP_TSK_MGMT
;
1733 tsk_mgmt
->lun
= cpu_to_be64((u64
) lun
<< 48);
1734 tsk_mgmt
->tag
= req_tag
| SRP_TAG_TSK_MGMT
;
1735 tsk_mgmt
->tsk_mgmt_func
= func
;
1736 tsk_mgmt
->task_tag
= req_tag
;
1738 ib_dma_sync_single_for_device(dev
, iu
->dma
, sizeof *tsk_mgmt
,
1740 if (srp_post_send(target
, iu
, sizeof *tsk_mgmt
)) {
1741 srp_put_tx_iu(target
, iu
, SRP_IU_TSK_MGMT
);
1745 if (!wait_for_completion_timeout(&target
->tsk_mgmt_done
,
1746 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS
)))
1752 static int srp_abort(struct scsi_cmnd
*scmnd
)
1754 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1755 struct srp_request
*req
= (struct srp_request
*) scmnd
->host_scribble
;
1758 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP abort called\n");
1760 if (!req
|| !srp_claim_req(target
, req
, scmnd
))
1762 if (srp_send_tsk_mgmt(target
, req
->index
, scmnd
->device
->lun
,
1763 SRP_TSK_ABORT_TASK
) == 0)
1765 else if (target
->transport_offline
)
1769 srp_free_req(target
, req
, scmnd
, 0);
1770 scmnd
->result
= DID_ABORT
<< 16;
1771 scmnd
->scsi_done(scmnd
);
1776 static int srp_reset_device(struct scsi_cmnd
*scmnd
)
1778 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1781 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP reset_device called\n");
1783 if (srp_send_tsk_mgmt(target
, SRP_TAG_NO_REQ
, scmnd
->device
->lun
,
1786 if (target
->tsk_mgmt_status
)
1789 for (i
= 0; i
< SRP_CMD_SQ_SIZE
; ++i
) {
1790 struct srp_request
*req
= &target
->req_ring
[i
];
1791 if (req
->scmnd
&& req
->scmnd
->device
== scmnd
->device
)
1792 srp_reset_req(target
, req
);
1798 static int srp_reset_host(struct scsi_cmnd
*scmnd
)
1800 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1803 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"SRP reset_host called\n");
1805 if (!srp_reconnect_target(target
))
1811 static int srp_slave_configure(struct scsi_device
*sdev
)
1813 struct Scsi_Host
*shost
= sdev
->host
;
1814 struct srp_target_port
*target
= host_to_target(shost
);
1815 struct request_queue
*q
= sdev
->request_queue
;
1816 unsigned long timeout
;
1818 if (sdev
->type
== TYPE_DISK
) {
1819 timeout
= max_t(unsigned, 30 * HZ
, target
->rq_tmo_jiffies
);
1820 blk_queue_rq_timeout(q
, timeout
);
1826 static ssize_t
show_id_ext(struct device
*dev
, struct device_attribute
*attr
,
1829 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1831 return sprintf(buf
, "0x%016llx\n",
1832 (unsigned long long) be64_to_cpu(target
->id_ext
));
1835 static ssize_t
show_ioc_guid(struct device
*dev
, struct device_attribute
*attr
,
1838 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1840 return sprintf(buf
, "0x%016llx\n",
1841 (unsigned long long) be64_to_cpu(target
->ioc_guid
));
1844 static ssize_t
show_service_id(struct device
*dev
,
1845 struct device_attribute
*attr
, char *buf
)
1847 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1849 return sprintf(buf
, "0x%016llx\n",
1850 (unsigned long long) be64_to_cpu(target
->service_id
));
1853 static ssize_t
show_pkey(struct device
*dev
, struct device_attribute
*attr
,
1856 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1858 return sprintf(buf
, "0x%04x\n", be16_to_cpu(target
->path
.pkey
));
1861 static ssize_t
show_dgid(struct device
*dev
, struct device_attribute
*attr
,
1864 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1866 return sprintf(buf
, "%pI6\n", target
->path
.dgid
.raw
);
1869 static ssize_t
show_orig_dgid(struct device
*dev
,
1870 struct device_attribute
*attr
, char *buf
)
1872 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1874 return sprintf(buf
, "%pI6\n", target
->orig_dgid
);
1877 static ssize_t
show_req_lim(struct device
*dev
,
1878 struct device_attribute
*attr
, char *buf
)
1880 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1882 return sprintf(buf
, "%d\n", target
->req_lim
);
1885 static ssize_t
show_zero_req_lim(struct device
*dev
,
1886 struct device_attribute
*attr
, char *buf
)
1888 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1890 return sprintf(buf
, "%d\n", target
->zero_req_lim
);
1893 static ssize_t
show_local_ib_port(struct device
*dev
,
1894 struct device_attribute
*attr
, char *buf
)
1896 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1898 return sprintf(buf
, "%d\n", target
->srp_host
->port
);
1901 static ssize_t
show_local_ib_device(struct device
*dev
,
1902 struct device_attribute
*attr
, char *buf
)
1904 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1906 return sprintf(buf
, "%s\n", target
->srp_host
->srp_dev
->dev
->name
);
1909 static ssize_t
show_comp_vector(struct device
*dev
,
1910 struct device_attribute
*attr
, char *buf
)
1912 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1914 return sprintf(buf
, "%d\n", target
->comp_vector
);
1917 static ssize_t
show_cmd_sg_entries(struct device
*dev
,
1918 struct device_attribute
*attr
, char *buf
)
1920 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1922 return sprintf(buf
, "%u\n", target
->cmd_sg_cnt
);
1925 static ssize_t
show_allow_ext_sg(struct device
*dev
,
1926 struct device_attribute
*attr
, char *buf
)
1928 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1930 return sprintf(buf
, "%s\n", target
->allow_ext_sg
? "true" : "false");
1933 static DEVICE_ATTR(id_ext
, S_IRUGO
, show_id_ext
, NULL
);
1934 static DEVICE_ATTR(ioc_guid
, S_IRUGO
, show_ioc_guid
, NULL
);
1935 static DEVICE_ATTR(service_id
, S_IRUGO
, show_service_id
, NULL
);
1936 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
1937 static DEVICE_ATTR(dgid
, S_IRUGO
, show_dgid
, NULL
);
1938 static DEVICE_ATTR(orig_dgid
, S_IRUGO
, show_orig_dgid
, NULL
);
1939 static DEVICE_ATTR(req_lim
, S_IRUGO
, show_req_lim
, NULL
);
1940 static DEVICE_ATTR(zero_req_lim
, S_IRUGO
, show_zero_req_lim
, NULL
);
1941 static DEVICE_ATTR(local_ib_port
, S_IRUGO
, show_local_ib_port
, NULL
);
1942 static DEVICE_ATTR(local_ib_device
, S_IRUGO
, show_local_ib_device
, NULL
);
1943 static DEVICE_ATTR(comp_vector
, S_IRUGO
, show_comp_vector
, NULL
);
1944 static DEVICE_ATTR(cmd_sg_entries
, S_IRUGO
, show_cmd_sg_entries
, NULL
);
1945 static DEVICE_ATTR(allow_ext_sg
, S_IRUGO
, show_allow_ext_sg
, NULL
);
1947 static struct device_attribute
*srp_host_attrs
[] = {
1950 &dev_attr_service_id
,
1953 &dev_attr_orig_dgid
,
1955 &dev_attr_zero_req_lim
,
1956 &dev_attr_local_ib_port
,
1957 &dev_attr_local_ib_device
,
1958 &dev_attr_comp_vector
,
1959 &dev_attr_cmd_sg_entries
,
1960 &dev_attr_allow_ext_sg
,
1964 static struct scsi_host_template srp_template
= {
1965 .module
= THIS_MODULE
,
1966 .name
= "InfiniBand SRP initiator",
1967 .proc_name
= DRV_NAME
,
1968 .slave_configure
= srp_slave_configure
,
1969 .info
= srp_target_info
,
1970 .queuecommand
= srp_queuecommand
,
1971 .eh_abort_handler
= srp_abort
,
1972 .eh_device_reset_handler
= srp_reset_device
,
1973 .eh_host_reset_handler
= srp_reset_host
,
1974 .skip_settle_delay
= true,
1975 .sg_tablesize
= SRP_DEF_SG_TABLESIZE
,
1976 .can_queue
= SRP_CMD_SQ_SIZE
,
1978 .cmd_per_lun
= SRP_CMD_SQ_SIZE
,
1979 .use_clustering
= ENABLE_CLUSTERING
,
1980 .shost_attrs
= srp_host_attrs
1983 static int srp_add_target(struct srp_host
*host
, struct srp_target_port
*target
)
1985 struct srp_rport_identifiers ids
;
1986 struct srp_rport
*rport
;
1988 sprintf(target
->target_name
, "SRP.T10:%016llX",
1989 (unsigned long long) be64_to_cpu(target
->id_ext
));
1991 if (scsi_add_host(target
->scsi_host
, host
->srp_dev
->dev
->dma_device
))
1994 memcpy(ids
.port_id
, &target
->id_ext
, 8);
1995 memcpy(ids
.port_id
+ 8, &target
->ioc_guid
, 8);
1996 ids
.roles
= SRP_RPORT_ROLE_TARGET
;
1997 rport
= srp_rport_add(target
->scsi_host
, &ids
);
1998 if (IS_ERR(rport
)) {
1999 scsi_remove_host(target
->scsi_host
);
2000 return PTR_ERR(rport
);
2003 rport
->lld_data
= target
;
2005 spin_lock(&host
->target_lock
);
2006 list_add_tail(&target
->list
, &host
->target_list
);
2007 spin_unlock(&host
->target_lock
);
2009 target
->state
= SRP_TARGET_LIVE
;
2011 scsi_scan_target(&target
->scsi_host
->shost_gendev
,
2012 0, target
->scsi_id
, SCAN_WILD_CARD
, 0);
2017 static void srp_release_dev(struct device
*dev
)
2019 struct srp_host
*host
=
2020 container_of(dev
, struct srp_host
, dev
);
2022 complete(&host
->released
);
2025 static struct class srp_class
= {
2026 .name
= "infiniband_srp",
2027 .dev_release
= srp_release_dev
2031 * srp_conn_unique() - check whether the connection to a target is unique
2033 static bool srp_conn_unique(struct srp_host
*host
,
2034 struct srp_target_port
*target
)
2036 struct srp_target_port
*t
;
2039 if (target
->state
== SRP_TARGET_REMOVED
)
2044 spin_lock(&host
->target_lock
);
2045 list_for_each_entry(t
, &host
->target_list
, list
) {
2047 target
->id_ext
== t
->id_ext
&&
2048 target
->ioc_guid
== t
->ioc_guid
&&
2049 target
->initiator_ext
== t
->initiator_ext
) {
2054 spin_unlock(&host
->target_lock
);
2061 * Target ports are added by writing
2063 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2064 * pkey=<P_Key>,service_id=<service ID>
2066 * to the add_target sysfs attribute.
2070 SRP_OPT_ID_EXT
= 1 << 0,
2071 SRP_OPT_IOC_GUID
= 1 << 1,
2072 SRP_OPT_DGID
= 1 << 2,
2073 SRP_OPT_PKEY
= 1 << 3,
2074 SRP_OPT_SERVICE_ID
= 1 << 4,
2075 SRP_OPT_MAX_SECT
= 1 << 5,
2076 SRP_OPT_MAX_CMD_PER_LUN
= 1 << 6,
2077 SRP_OPT_IO_CLASS
= 1 << 7,
2078 SRP_OPT_INITIATOR_EXT
= 1 << 8,
2079 SRP_OPT_CMD_SG_ENTRIES
= 1 << 9,
2080 SRP_OPT_ALLOW_EXT_SG
= 1 << 10,
2081 SRP_OPT_SG_TABLESIZE
= 1 << 11,
2082 SRP_OPT_COMP_VECTOR
= 1 << 12,
2083 SRP_OPT_ALL
= (SRP_OPT_ID_EXT
|
2087 SRP_OPT_SERVICE_ID
),
2090 static const match_table_t srp_opt_tokens
= {
2091 { SRP_OPT_ID_EXT
, "id_ext=%s" },
2092 { SRP_OPT_IOC_GUID
, "ioc_guid=%s" },
2093 { SRP_OPT_DGID
, "dgid=%s" },
2094 { SRP_OPT_PKEY
, "pkey=%x" },
2095 { SRP_OPT_SERVICE_ID
, "service_id=%s" },
2096 { SRP_OPT_MAX_SECT
, "max_sect=%d" },
2097 { SRP_OPT_MAX_CMD_PER_LUN
, "max_cmd_per_lun=%d" },
2098 { SRP_OPT_IO_CLASS
, "io_class=%x" },
2099 { SRP_OPT_INITIATOR_EXT
, "initiator_ext=%s" },
2100 { SRP_OPT_CMD_SG_ENTRIES
, "cmd_sg_entries=%u" },
2101 { SRP_OPT_ALLOW_EXT_SG
, "allow_ext_sg=%u" },
2102 { SRP_OPT_SG_TABLESIZE
, "sg_tablesize=%u" },
2103 { SRP_OPT_COMP_VECTOR
, "comp_vector=%u" },
2104 { SRP_OPT_ERR
, NULL
}
2107 static int srp_parse_options(const char *buf
, struct srp_target_port
*target
)
2109 char *options
, *sep_opt
;
2112 substring_t args
[MAX_OPT_ARGS
];
2118 options
= kstrdup(buf
, GFP_KERNEL
);
2123 while ((p
= strsep(&sep_opt
, ",")) != NULL
) {
2127 token
= match_token(p
, srp_opt_tokens
, args
);
2131 case SRP_OPT_ID_EXT
:
2132 p
= match_strdup(args
);
2137 target
->id_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2141 case SRP_OPT_IOC_GUID
:
2142 p
= match_strdup(args
);
2147 target
->ioc_guid
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2152 p
= match_strdup(args
);
2157 if (strlen(p
) != 32) {
2158 pr_warn("bad dest GID parameter '%s'\n", p
);
2163 for (i
= 0; i
< 16; ++i
) {
2164 strlcpy(dgid
, p
+ i
* 2, 3);
2165 target
->path
.dgid
.raw
[i
] = simple_strtoul(dgid
, NULL
, 16);
2168 memcpy(target
->orig_dgid
, target
->path
.dgid
.raw
, 16);
2172 if (match_hex(args
, &token
)) {
2173 pr_warn("bad P_Key parameter '%s'\n", p
);
2176 target
->path
.pkey
= cpu_to_be16(token
);
2179 case SRP_OPT_SERVICE_ID
:
2180 p
= match_strdup(args
);
2185 target
->service_id
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2186 target
->path
.service_id
= target
->service_id
;
2190 case SRP_OPT_MAX_SECT
:
2191 if (match_int(args
, &token
)) {
2192 pr_warn("bad max sect parameter '%s'\n", p
);
2195 target
->scsi_host
->max_sectors
= token
;
2198 case SRP_OPT_MAX_CMD_PER_LUN
:
2199 if (match_int(args
, &token
)) {
2200 pr_warn("bad max cmd_per_lun parameter '%s'\n",
2204 target
->scsi_host
->cmd_per_lun
= min(token
, SRP_CMD_SQ_SIZE
);
2207 case SRP_OPT_IO_CLASS
:
2208 if (match_hex(args
, &token
)) {
2209 pr_warn("bad IO class parameter '%s'\n", p
);
2212 if (token
!= SRP_REV10_IB_IO_CLASS
&&
2213 token
!= SRP_REV16A_IB_IO_CLASS
) {
2214 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
2215 token
, SRP_REV10_IB_IO_CLASS
,
2216 SRP_REV16A_IB_IO_CLASS
);
2219 target
->io_class
= token
;
2222 case SRP_OPT_INITIATOR_EXT
:
2223 p
= match_strdup(args
);
2228 target
->initiator_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2232 case SRP_OPT_CMD_SG_ENTRIES
:
2233 if (match_int(args
, &token
) || token
< 1 || token
> 255) {
2234 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
2238 target
->cmd_sg_cnt
= token
;
2241 case SRP_OPT_ALLOW_EXT_SG
:
2242 if (match_int(args
, &token
)) {
2243 pr_warn("bad allow_ext_sg parameter '%s'\n", p
);
2246 target
->allow_ext_sg
= !!token
;
2249 case SRP_OPT_SG_TABLESIZE
:
2250 if (match_int(args
, &token
) || token
< 1 ||
2251 token
> SCSI_MAX_SG_CHAIN_SEGMENTS
) {
2252 pr_warn("bad max sg_tablesize parameter '%s'\n",
2256 target
->sg_tablesize
= token
;
2259 case SRP_OPT_COMP_VECTOR
:
2260 if (match_int(args
, &token
) || token
< 0) {
2261 pr_warn("bad comp_vector parameter '%s'\n", p
);
2264 target
->comp_vector
= token
;
2268 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
2274 if ((opt_mask
& SRP_OPT_ALL
) == SRP_OPT_ALL
)
2277 for (i
= 0; i
< ARRAY_SIZE(srp_opt_tokens
); ++i
)
2278 if ((srp_opt_tokens
[i
].token
& SRP_OPT_ALL
) &&
2279 !(srp_opt_tokens
[i
].token
& opt_mask
))
2280 pr_warn("target creation request is missing parameter '%s'\n",
2281 srp_opt_tokens
[i
].pattern
);
2288 static ssize_t
srp_create_target(struct device
*dev
,
2289 struct device_attribute
*attr
,
2290 const char *buf
, size_t count
)
2292 struct srp_host
*host
=
2293 container_of(dev
, struct srp_host
, dev
);
2294 struct Scsi_Host
*target_host
;
2295 struct srp_target_port
*target
;
2296 struct ib_device
*ibdev
= host
->srp_dev
->dev
;
2297 dma_addr_t dma_addr
;
2300 target_host
= scsi_host_alloc(&srp_template
,
2301 sizeof (struct srp_target_port
));
2305 target_host
->transportt
= ib_srp_transport_template
;
2306 target_host
->max_channel
= 0;
2307 target_host
->max_id
= 1;
2308 target_host
->max_lun
= SRP_MAX_LUN
;
2309 target_host
->max_cmd_len
= sizeof ((struct srp_cmd
*) (void *) 0L)->cdb
;
2311 target
= host_to_target(target_host
);
2313 target
->io_class
= SRP_REV16A_IB_IO_CLASS
;
2314 target
->scsi_host
= target_host
;
2315 target
->srp_host
= host
;
2316 target
->lkey
= host
->srp_dev
->mr
->lkey
;
2317 target
->rkey
= host
->srp_dev
->mr
->rkey
;
2318 target
->cmd_sg_cnt
= cmd_sg_entries
;
2319 target
->sg_tablesize
= indirect_sg_entries
? : cmd_sg_entries
;
2320 target
->allow_ext_sg
= allow_ext_sg
;
2322 ret
= srp_parse_options(buf
, target
);
2326 if (!srp_conn_unique(target
->srp_host
, target
)) {
2327 shost_printk(KERN_INFO
, target
->scsi_host
,
2328 PFX
"Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
2329 be64_to_cpu(target
->id_ext
),
2330 be64_to_cpu(target
->ioc_guid
),
2331 be64_to_cpu(target
->initiator_ext
));
2336 if (!host
->srp_dev
->fmr_pool
&& !target
->allow_ext_sg
&&
2337 target
->cmd_sg_cnt
< target
->sg_tablesize
) {
2338 pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2339 target
->sg_tablesize
= target
->cmd_sg_cnt
;
2342 target_host
->sg_tablesize
= target
->sg_tablesize
;
2343 target
->indirect_size
= target
->sg_tablesize
*
2344 sizeof (struct srp_direct_buf
);
2345 target
->max_iu_len
= sizeof (struct srp_cmd
) +
2346 sizeof (struct srp_indirect_buf
) +
2347 target
->cmd_sg_cnt
* sizeof (struct srp_direct_buf
);
2349 INIT_WORK(&target
->remove_work
, srp_remove_work
);
2350 spin_lock_init(&target
->lock
);
2351 INIT_LIST_HEAD(&target
->free_tx
);
2352 INIT_LIST_HEAD(&target
->free_reqs
);
2353 for (i
= 0; i
< SRP_CMD_SQ_SIZE
; ++i
) {
2354 struct srp_request
*req
= &target
->req_ring
[i
];
2356 req
->fmr_list
= kmalloc(target
->cmd_sg_cnt
* sizeof (void *),
2358 req
->map_page
= kmalloc(SRP_FMR_SIZE
* sizeof (void *),
2360 req
->indirect_desc
= kmalloc(target
->indirect_size
, GFP_KERNEL
);
2361 if (!req
->fmr_list
|| !req
->map_page
|| !req
->indirect_desc
)
2364 dma_addr
= ib_dma_map_single(ibdev
, req
->indirect_desc
,
2365 target
->indirect_size
,
2367 if (ib_dma_mapping_error(ibdev
, dma_addr
))
2370 req
->indirect_dma_addr
= dma_addr
;
2372 list_add_tail(&req
->list
, &target
->free_reqs
);
2375 ib_query_gid(ibdev
, host
->port
, 0, &target
->path
.sgid
);
2377 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
2378 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2379 "service_id %016llx dgid %pI6\n",
2380 (unsigned long long) be64_to_cpu(target
->id_ext
),
2381 (unsigned long long) be64_to_cpu(target
->ioc_guid
),
2382 be16_to_cpu(target
->path
.pkey
),
2383 (unsigned long long) be64_to_cpu(target
->service_id
),
2384 target
->path
.dgid
.raw
);
2386 ret
= srp_create_target_ib(target
);
2390 ret
= srp_new_cm_id(target
);
2394 ret
= srp_connect_target(target
);
2396 shost_printk(KERN_ERR
, target
->scsi_host
,
2397 PFX
"Connection failed\n");
2401 ret
= srp_add_target(host
, target
);
2403 goto err_disconnect
;
2408 srp_disconnect_target(target
);
2411 ib_destroy_cm_id(target
->cm_id
);
2414 srp_free_target_ib(target
);
2417 srp_free_req_data(target
);
2420 scsi_host_put(target_host
);
2425 static DEVICE_ATTR(add_target
, S_IWUSR
, NULL
, srp_create_target
);
2427 static ssize_t
show_ibdev(struct device
*dev
, struct device_attribute
*attr
,
2430 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
2432 return sprintf(buf
, "%s\n", host
->srp_dev
->dev
->name
);
2435 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
2437 static ssize_t
show_port(struct device
*dev
, struct device_attribute
*attr
,
2440 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
2442 return sprintf(buf
, "%d\n", host
->port
);
2445 static DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
2447 static struct srp_host
*srp_add_port(struct srp_device
*device
, u8 port
)
2449 struct srp_host
*host
;
2451 host
= kzalloc(sizeof *host
, GFP_KERNEL
);
2455 INIT_LIST_HEAD(&host
->target_list
);
2456 spin_lock_init(&host
->target_lock
);
2457 init_completion(&host
->released
);
2458 host
->srp_dev
= device
;
2461 host
->dev
.class = &srp_class
;
2462 host
->dev
.parent
= device
->dev
->dma_device
;
2463 dev_set_name(&host
->dev
, "srp-%s-%d", device
->dev
->name
, port
);
2465 if (device_register(&host
->dev
))
2467 if (device_create_file(&host
->dev
, &dev_attr_add_target
))
2469 if (device_create_file(&host
->dev
, &dev_attr_ibdev
))
2471 if (device_create_file(&host
->dev
, &dev_attr_port
))
2477 device_unregister(&host
->dev
);
2485 static void srp_add_one(struct ib_device
*device
)
2487 struct srp_device
*srp_dev
;
2488 struct ib_device_attr
*dev_attr
;
2489 struct ib_fmr_pool_param fmr_param
;
2490 struct srp_host
*host
;
2491 int max_pages_per_fmr
, fmr_page_shift
, s
, e
, p
;
2493 dev_attr
= kmalloc(sizeof *dev_attr
, GFP_KERNEL
);
2497 if (ib_query_device(device
, dev_attr
)) {
2498 pr_warn("Query device failed for %s\n", device
->name
);
2502 srp_dev
= kmalloc(sizeof *srp_dev
, GFP_KERNEL
);
2507 * Use the smallest page size supported by the HCA, down to a
2508 * minimum of 4096 bytes. We're unlikely to build large sglists
2509 * out of smaller entries.
2511 fmr_page_shift
= max(12, ffs(dev_attr
->page_size_cap
) - 1);
2512 srp_dev
->fmr_page_size
= 1 << fmr_page_shift
;
2513 srp_dev
->fmr_page_mask
= ~((u64
) srp_dev
->fmr_page_size
- 1);
2514 srp_dev
->fmr_max_size
= srp_dev
->fmr_page_size
* SRP_FMR_SIZE
;
2516 INIT_LIST_HEAD(&srp_dev
->dev_list
);
2518 srp_dev
->dev
= device
;
2519 srp_dev
->pd
= ib_alloc_pd(device
);
2520 if (IS_ERR(srp_dev
->pd
))
2523 srp_dev
->mr
= ib_get_dma_mr(srp_dev
->pd
,
2524 IB_ACCESS_LOCAL_WRITE
|
2525 IB_ACCESS_REMOTE_READ
|
2526 IB_ACCESS_REMOTE_WRITE
);
2527 if (IS_ERR(srp_dev
->mr
))
2530 for (max_pages_per_fmr
= SRP_FMR_SIZE
;
2531 max_pages_per_fmr
>= SRP_FMR_MIN_SIZE
;
2532 max_pages_per_fmr
/= 2, srp_dev
->fmr_max_size
/= 2) {
2533 memset(&fmr_param
, 0, sizeof fmr_param
);
2534 fmr_param
.pool_size
= SRP_FMR_POOL_SIZE
;
2535 fmr_param
.dirty_watermark
= SRP_FMR_DIRTY_SIZE
;
2536 fmr_param
.cache
= 1;
2537 fmr_param
.max_pages_per_fmr
= max_pages_per_fmr
;
2538 fmr_param
.page_shift
= fmr_page_shift
;
2539 fmr_param
.access
= (IB_ACCESS_LOCAL_WRITE
|
2540 IB_ACCESS_REMOTE_WRITE
|
2541 IB_ACCESS_REMOTE_READ
);
2543 srp_dev
->fmr_pool
= ib_create_fmr_pool(srp_dev
->pd
, &fmr_param
);
2544 if (!IS_ERR(srp_dev
->fmr_pool
))
2548 if (IS_ERR(srp_dev
->fmr_pool
))
2549 srp_dev
->fmr_pool
= NULL
;
2551 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
2556 e
= device
->phys_port_cnt
;
2559 for (p
= s
; p
<= e
; ++p
) {
2560 host
= srp_add_port(srp_dev
, p
);
2562 list_add_tail(&host
->list
, &srp_dev
->dev_list
);
2565 ib_set_client_data(device
, &srp_client
, srp_dev
);
2570 ib_dealloc_pd(srp_dev
->pd
);
2579 static void srp_remove_one(struct ib_device
*device
)
2581 struct srp_device
*srp_dev
;
2582 struct srp_host
*host
, *tmp_host
;
2583 struct srp_target_port
*target
;
2585 srp_dev
= ib_get_client_data(device
, &srp_client
);
2589 list_for_each_entry_safe(host
, tmp_host
, &srp_dev
->dev_list
, list
) {
2590 device_unregister(&host
->dev
);
2592 * Wait for the sysfs entry to go away, so that no new
2593 * target ports can be created.
2595 wait_for_completion(&host
->released
);
2598 * Remove all target ports.
2600 spin_lock(&host
->target_lock
);
2601 list_for_each_entry(target
, &host
->target_list
, list
)
2602 srp_queue_remove_work(target
);
2603 spin_unlock(&host
->target_lock
);
2606 * Wait for tl_err and target port removal tasks.
2608 flush_workqueue(system_long_wq
);
2609 flush_workqueue(srp_remove_wq
);
2614 if (srp_dev
->fmr_pool
)
2615 ib_destroy_fmr_pool(srp_dev
->fmr_pool
);
2616 ib_dereg_mr(srp_dev
->mr
);
2617 ib_dealloc_pd(srp_dev
->pd
);
2622 static struct srp_function_template ib_srp_transport_functions
= {
2623 .rport_delete
= srp_rport_delete
,
2626 static int __init
srp_init_module(void)
2630 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc
, wr_id
) < sizeof(void *));
2632 if (srp_sg_tablesize
) {
2633 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2634 if (!cmd_sg_entries
)
2635 cmd_sg_entries
= srp_sg_tablesize
;
2638 if (!cmd_sg_entries
)
2639 cmd_sg_entries
= SRP_DEF_SG_TABLESIZE
;
2641 if (cmd_sg_entries
> 255) {
2642 pr_warn("Clamping cmd_sg_entries to 255\n");
2643 cmd_sg_entries
= 255;
2646 if (!indirect_sg_entries
)
2647 indirect_sg_entries
= cmd_sg_entries
;
2648 else if (indirect_sg_entries
< cmd_sg_entries
) {
2649 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2651 indirect_sg_entries
= cmd_sg_entries
;
2654 srp_remove_wq
= create_workqueue("srp_remove");
2655 if (IS_ERR(srp_remove_wq
)) {
2656 ret
= PTR_ERR(srp_remove_wq
);
2661 ib_srp_transport_template
=
2662 srp_attach_transport(&ib_srp_transport_functions
);
2663 if (!ib_srp_transport_template
)
2666 ret
= class_register(&srp_class
);
2668 pr_err("couldn't register class infiniband_srp\n");
2672 ib_sa_register_client(&srp_sa_client
);
2674 ret
= ib_register_client(&srp_client
);
2676 pr_err("couldn't register IB client\n");
2684 ib_sa_unregister_client(&srp_sa_client
);
2685 class_unregister(&srp_class
);
2688 srp_release_transport(ib_srp_transport_template
);
2691 destroy_workqueue(srp_remove_wq
);
2695 static void __exit
srp_cleanup_module(void)
2697 ib_unregister_client(&srp_client
);
2698 ib_sa_unregister_client(&srp_sa_client
);
2699 class_unregister(&srp_class
);
2700 srp_release_transport(ib_srp_transport_template
);
2701 destroy_workqueue(srp_remove_wq
);
2704 module_init(srp_init_module
);
2705 module_exit(srp_cleanup_module
);