2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/err.h>
37 #include <linux/string.h>
38 #include <linux/parser.h>
39 #include <linux/random.h>
40 #include <linux/jiffies.h>
42 #include <asm/atomic.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_device.h>
46 #include <scsi/scsi_dbg.h>
48 #include <scsi/scsi_transport_srp.h>
52 #define DRV_NAME "ib_srp"
53 #define PFX DRV_NAME ": "
54 #define DRV_VERSION "0.2"
55 #define DRV_RELDATE "November 1, 2005"
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
59 "v" DRV_VERSION
" (" DRV_RELDATE
")");
60 MODULE_LICENSE("Dual BSD/GPL");
62 static unsigned int srp_sg_tablesize
;
63 static unsigned int cmd_sg_entries
;
64 static unsigned int indirect_sg_entries
;
65 static bool allow_ext_sg
;
66 static int topspin_workarounds
= 1;
68 module_param(srp_sg_tablesize
, uint
, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize
, "Deprecated name for cmd_sg_entries");
71 module_param(cmd_sg_entries
, uint
, 0444);
72 MODULE_PARM_DESC(cmd_sg_entries
,
73 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
75 module_param(indirect_sg_entries
, uint
, 0444);
76 MODULE_PARM_DESC(indirect_sg_entries
,
77 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS
) ")");
79 module_param(allow_ext_sg
, bool, 0444);
80 MODULE_PARM_DESC(allow_ext_sg
,
81 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
83 module_param(topspin_workarounds
, int, 0444);
84 MODULE_PARM_DESC(topspin_workarounds
,
85 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
87 static void srp_add_one(struct ib_device
*device
);
88 static void srp_remove_one(struct ib_device
*device
);
89 static void srp_recv_completion(struct ib_cq
*cq
, void *target_ptr
);
90 static void srp_send_completion(struct ib_cq
*cq
, void *target_ptr
);
91 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
);
93 static struct scsi_transport_template
*ib_srp_transport_template
;
95 static struct ib_client srp_client
= {
98 .remove
= srp_remove_one
101 static struct ib_sa_client srp_sa_client
;
103 static inline struct srp_target_port
*host_to_target(struct Scsi_Host
*host
)
105 return (struct srp_target_port
*) host
->hostdata
;
108 static const char *srp_target_info(struct Scsi_Host
*host
)
110 return host_to_target(host
)->target_name
;
113 static int srp_target_is_topspin(struct srp_target_port
*target
)
115 static const u8 topspin_oui
[3] = { 0x00, 0x05, 0xad };
116 static const u8 cisco_oui
[3] = { 0x00, 0x1b, 0x0d };
118 return topspin_workarounds
&&
119 (!memcmp(&target
->ioc_guid
, topspin_oui
, sizeof topspin_oui
) ||
120 !memcmp(&target
->ioc_guid
, cisco_oui
, sizeof cisco_oui
));
123 static struct srp_iu
*srp_alloc_iu(struct srp_host
*host
, size_t size
,
125 enum dma_data_direction direction
)
129 iu
= kmalloc(sizeof *iu
, gfp_mask
);
133 iu
->buf
= kzalloc(size
, gfp_mask
);
137 iu
->dma
= ib_dma_map_single(host
->srp_dev
->dev
, iu
->buf
, size
,
139 if (ib_dma_mapping_error(host
->srp_dev
->dev
, iu
->dma
))
143 iu
->direction
= direction
;
155 static void srp_free_iu(struct srp_host
*host
, struct srp_iu
*iu
)
160 ib_dma_unmap_single(host
->srp_dev
->dev
, iu
->dma
, iu
->size
,
166 static void srp_qp_event(struct ib_event
*event
, void *context
)
168 printk(KERN_ERR PFX
"QP event %d\n", event
->event
);
171 static int srp_init_qp(struct srp_target_port
*target
,
174 struct ib_qp_attr
*attr
;
177 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
181 ret
= ib_find_pkey(target
->srp_host
->srp_dev
->dev
,
182 target
->srp_host
->port
,
183 be16_to_cpu(target
->path
.pkey
),
188 attr
->qp_state
= IB_QPS_INIT
;
189 attr
->qp_access_flags
= (IB_ACCESS_REMOTE_READ
|
190 IB_ACCESS_REMOTE_WRITE
);
191 attr
->port_num
= target
->srp_host
->port
;
193 ret
= ib_modify_qp(qp
, attr
,
204 static int srp_new_cm_id(struct srp_target_port
*target
)
206 struct ib_cm_id
*new_cm_id
;
208 new_cm_id
= ib_create_cm_id(target
->srp_host
->srp_dev
->dev
,
209 srp_cm_handler
, target
);
210 if (IS_ERR(new_cm_id
))
211 return PTR_ERR(new_cm_id
);
214 ib_destroy_cm_id(target
->cm_id
);
215 target
->cm_id
= new_cm_id
;
220 static int srp_create_target_ib(struct srp_target_port
*target
)
222 struct ib_qp_init_attr
*init_attr
;
225 init_attr
= kzalloc(sizeof *init_attr
, GFP_KERNEL
);
229 target
->recv_cq
= ib_create_cq(target
->srp_host
->srp_dev
->dev
,
230 srp_recv_completion
, NULL
, target
, SRP_RQ_SIZE
, 0);
231 if (IS_ERR(target
->recv_cq
)) {
232 ret
= PTR_ERR(target
->recv_cq
);
236 target
->send_cq
= ib_create_cq(target
->srp_host
->srp_dev
->dev
,
237 srp_send_completion
, NULL
, target
, SRP_SQ_SIZE
, 0);
238 if (IS_ERR(target
->send_cq
)) {
239 ret
= PTR_ERR(target
->send_cq
);
243 ib_req_notify_cq(target
->recv_cq
, IB_CQ_NEXT_COMP
);
245 init_attr
->event_handler
= srp_qp_event
;
246 init_attr
->cap
.max_send_wr
= SRP_SQ_SIZE
;
247 init_attr
->cap
.max_recv_wr
= SRP_RQ_SIZE
;
248 init_attr
->cap
.max_recv_sge
= 1;
249 init_attr
->cap
.max_send_sge
= 1;
250 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
251 init_attr
->qp_type
= IB_QPT_RC
;
252 init_attr
->send_cq
= target
->send_cq
;
253 init_attr
->recv_cq
= target
->recv_cq
;
255 target
->qp
= ib_create_qp(target
->srp_host
->srp_dev
->pd
, init_attr
);
256 if (IS_ERR(target
->qp
)) {
257 ret
= PTR_ERR(target
->qp
);
261 ret
= srp_init_qp(target
, target
->qp
);
269 ib_destroy_qp(target
->qp
);
272 ib_destroy_cq(target
->send_cq
);
275 ib_destroy_cq(target
->recv_cq
);
282 static void srp_free_target_ib(struct srp_target_port
*target
)
286 ib_destroy_qp(target
->qp
);
287 ib_destroy_cq(target
->send_cq
);
288 ib_destroy_cq(target
->recv_cq
);
290 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
)
291 srp_free_iu(target
->srp_host
, target
->rx_ring
[i
]);
292 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
)
293 srp_free_iu(target
->srp_host
, target
->tx_ring
[i
]);
296 static void srp_path_rec_completion(int status
,
297 struct ib_sa_path_rec
*pathrec
,
300 struct srp_target_port
*target
= target_ptr
;
302 target
->status
= status
;
304 shost_printk(KERN_ERR
, target
->scsi_host
,
305 PFX
"Got failed path rec status %d\n", status
);
307 target
->path
= *pathrec
;
308 complete(&target
->done
);
311 static int srp_lookup_path(struct srp_target_port
*target
)
313 target
->path
.numb_path
= 1;
315 init_completion(&target
->done
);
317 target
->path_query_id
= ib_sa_path_rec_get(&srp_sa_client
,
318 target
->srp_host
->srp_dev
->dev
,
319 target
->srp_host
->port
,
321 IB_SA_PATH_REC_SERVICE_ID
|
322 IB_SA_PATH_REC_DGID
|
323 IB_SA_PATH_REC_SGID
|
324 IB_SA_PATH_REC_NUMB_PATH
|
326 SRP_PATH_REC_TIMEOUT_MS
,
328 srp_path_rec_completion
,
329 target
, &target
->path_query
);
330 if (target
->path_query_id
< 0)
331 return target
->path_query_id
;
333 wait_for_completion(&target
->done
);
335 if (target
->status
< 0)
336 shost_printk(KERN_WARNING
, target
->scsi_host
,
337 PFX
"Path record query failed\n");
339 return target
->status
;
342 static int srp_send_req(struct srp_target_port
*target
)
345 struct ib_cm_req_param param
;
346 struct srp_login_req priv
;
350 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
354 req
->param
.primary_path
= &target
->path
;
355 req
->param
.alternate_path
= NULL
;
356 req
->param
.service_id
= target
->service_id
;
357 req
->param
.qp_num
= target
->qp
->qp_num
;
358 req
->param
.qp_type
= target
->qp
->qp_type
;
359 req
->param
.private_data
= &req
->priv
;
360 req
->param
.private_data_len
= sizeof req
->priv
;
361 req
->param
.flow_control
= 1;
363 get_random_bytes(&req
->param
.starting_psn
, 4);
364 req
->param
.starting_psn
&= 0xffffff;
367 * Pick some arbitrary defaults here; we could make these
368 * module parameters if anyone cared about setting them.
370 req
->param
.responder_resources
= 4;
371 req
->param
.remote_cm_response_timeout
= 20;
372 req
->param
.local_cm_response_timeout
= 20;
373 req
->param
.retry_count
= 7;
374 req
->param
.rnr_retry_count
= 7;
375 req
->param
.max_cm_retries
= 15;
377 req
->priv
.opcode
= SRP_LOGIN_REQ
;
379 req
->priv
.req_it_iu_len
= cpu_to_be32(target
->max_iu_len
);
380 req
->priv
.req_buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
381 SRP_BUF_FORMAT_INDIRECT
);
383 * In the published SRP specification (draft rev. 16a), the
384 * port identifier format is 8 bytes of ID extension followed
385 * by 8 bytes of GUID. Older drafts put the two halves in the
386 * opposite order, so that the GUID comes first.
388 * Targets conforming to these obsolete drafts can be
389 * recognized by the I/O Class they report.
391 if (target
->io_class
== SRP_REV10_IB_IO_CLASS
) {
392 memcpy(req
->priv
.initiator_port_id
,
393 &target
->path
.sgid
.global
.interface_id
, 8);
394 memcpy(req
->priv
.initiator_port_id
+ 8,
395 &target
->initiator_ext
, 8);
396 memcpy(req
->priv
.target_port_id
, &target
->ioc_guid
, 8);
397 memcpy(req
->priv
.target_port_id
+ 8, &target
->id_ext
, 8);
399 memcpy(req
->priv
.initiator_port_id
,
400 &target
->initiator_ext
, 8);
401 memcpy(req
->priv
.initiator_port_id
+ 8,
402 &target
->path
.sgid
.global
.interface_id
, 8);
403 memcpy(req
->priv
.target_port_id
, &target
->id_ext
, 8);
404 memcpy(req
->priv
.target_port_id
+ 8, &target
->ioc_guid
, 8);
408 * Topspin/Cisco SRP targets will reject our login unless we
409 * zero out the first 8 bytes of our initiator port ID and set
410 * the second 8 bytes to the local node GUID.
412 if (srp_target_is_topspin(target
)) {
413 shost_printk(KERN_DEBUG
, target
->scsi_host
,
414 PFX
"Topspin/Cisco initiator port ID workaround "
415 "activated for target GUID %016llx\n",
416 (unsigned long long) be64_to_cpu(target
->ioc_guid
));
417 memset(req
->priv
.initiator_port_id
, 0, 8);
418 memcpy(req
->priv
.initiator_port_id
+ 8,
419 &target
->srp_host
->srp_dev
->dev
->node_guid
, 8);
422 status
= ib_send_cm_req(target
->cm_id
, &req
->param
);
429 static void srp_disconnect_target(struct srp_target_port
*target
)
431 /* XXX should send SRP_I_LOGOUT request */
433 init_completion(&target
->done
);
434 if (ib_send_cm_dreq(target
->cm_id
, NULL
, 0)) {
435 shost_printk(KERN_DEBUG
, target
->scsi_host
,
436 PFX
"Sending CM DREQ failed\n");
439 wait_for_completion(&target
->done
);
442 static bool srp_change_state(struct srp_target_port
*target
,
443 enum srp_target_state old
,
444 enum srp_target_state
new)
446 bool changed
= false;
448 spin_lock_irq(&target
->lock
);
449 if (target
->state
== old
) {
453 spin_unlock_irq(&target
->lock
);
457 static void srp_free_req_data(struct srp_target_port
*target
)
459 struct ib_device
*ibdev
= target
->srp_host
->srp_dev
->dev
;
460 struct srp_request
*req
;
463 for (i
= 0, req
= target
->req_ring
; i
< SRP_CMD_SQ_SIZE
; ++i
, ++req
) {
464 kfree(req
->fmr_list
);
465 kfree(req
->map_page
);
466 if (req
->indirect_dma_addr
) {
467 ib_dma_unmap_single(ibdev
, req
->indirect_dma_addr
,
468 target
->indirect_size
,
471 kfree(req
->indirect_desc
);
475 static void srp_remove_work(struct work_struct
*work
)
477 struct srp_target_port
*target
=
478 container_of(work
, struct srp_target_port
, work
);
480 if (!srp_change_state(target
, SRP_TARGET_DEAD
, SRP_TARGET_REMOVED
))
483 spin_lock(&target
->srp_host
->target_lock
);
484 list_del(&target
->list
);
485 spin_unlock(&target
->srp_host
->target_lock
);
487 srp_remove_host(target
->scsi_host
);
488 scsi_remove_host(target
->scsi_host
);
489 ib_destroy_cm_id(target
->cm_id
);
490 srp_free_target_ib(target
);
491 srp_free_req_data(target
);
492 scsi_host_put(target
->scsi_host
);
495 static int srp_connect_target(struct srp_target_port
*target
)
500 ret
= srp_lookup_path(target
);
505 init_completion(&target
->done
);
506 ret
= srp_send_req(target
);
509 wait_for_completion(&target
->done
);
512 * The CM event handling code will set status to
513 * SRP_PORT_REDIRECT if we get a port redirect REJ
514 * back, or SRP_DLID_REDIRECT if we get a lid/qp
517 switch (target
->status
) {
521 case SRP_PORT_REDIRECT
:
522 ret
= srp_lookup_path(target
);
527 case SRP_DLID_REDIRECT
:
531 /* Our current CM id was stale, and is now in timewait.
532 * Try to reconnect with a new one.
534 if (!retries
-- || srp_new_cm_id(target
)) {
535 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
536 "giving up on stale connection\n");
537 target
->status
= -ECONNRESET
;
538 return target
->status
;
541 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
542 "retrying stale connection\n");
546 return target
->status
;
551 static void srp_unmap_data(struct scsi_cmnd
*scmnd
,
552 struct srp_target_port
*target
,
553 struct srp_request
*req
)
555 struct ib_device
*ibdev
= target
->srp_host
->srp_dev
->dev
;
556 struct ib_pool_fmr
**pfmr
;
558 if (!scsi_sglist(scmnd
) ||
559 (scmnd
->sc_data_direction
!= DMA_TO_DEVICE
&&
560 scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
))
563 pfmr
= req
->fmr_list
;
565 ib_fmr_pool_unmap(*pfmr
++);
567 ib_dma_unmap_sg(ibdev
, scsi_sglist(scmnd
), scsi_sg_count(scmnd
),
568 scmnd
->sc_data_direction
);
571 static void srp_remove_req(struct srp_target_port
*target
,
572 struct srp_request
*req
, s32 req_lim_delta
)
576 srp_unmap_data(req
->scmnd
, target
, req
);
577 spin_lock_irqsave(&target
->lock
, flags
);
578 target
->req_lim
+= req_lim_delta
;
580 list_add_tail(&req
->list
, &target
->free_reqs
);
581 spin_unlock_irqrestore(&target
->lock
, flags
);
584 static void srp_reset_req(struct srp_target_port
*target
, struct srp_request
*req
)
586 req
->scmnd
->result
= DID_RESET
<< 16;
587 req
->scmnd
->scsi_done(req
->scmnd
);
588 srp_remove_req(target
, req
, 0);
591 static int srp_reconnect_target(struct srp_target_port
*target
)
593 struct ib_qp_attr qp_attr
;
597 if (!srp_change_state(target
, SRP_TARGET_LIVE
, SRP_TARGET_CONNECTING
))
600 srp_disconnect_target(target
);
602 * Now get a new local CM ID so that we avoid confusing the
603 * target in case things are really fouled up.
605 ret
= srp_new_cm_id(target
);
609 qp_attr
.qp_state
= IB_QPS_RESET
;
610 ret
= ib_modify_qp(target
->qp
, &qp_attr
, IB_QP_STATE
);
614 ret
= srp_init_qp(target
, target
->qp
);
618 while (ib_poll_cq(target
->recv_cq
, 1, &wc
) > 0)
620 while (ib_poll_cq(target
->send_cq
, 1, &wc
) > 0)
623 for (i
= 0; i
< SRP_CMD_SQ_SIZE
; ++i
) {
624 struct srp_request
*req
= &target
->req_ring
[i
];
626 srp_reset_req(target
, req
);
629 INIT_LIST_HEAD(&target
->free_tx
);
630 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
)
631 list_add(&target
->tx_ring
[i
]->list
, &target
->free_tx
);
633 target
->qp_in_error
= 0;
634 ret
= srp_connect_target(target
);
638 if (!srp_change_state(target
, SRP_TARGET_CONNECTING
, SRP_TARGET_LIVE
))
644 shost_printk(KERN_ERR
, target
->scsi_host
,
645 PFX
"reconnect failed (%d), removing target port.\n", ret
);
648 * We couldn't reconnect, so kill our target port off.
649 * However, we have to defer the real removal because we
650 * are in the context of the SCSI error handler now, which
651 * will deadlock if we call scsi_remove_host().
653 * Schedule our work inside the lock to avoid a race with
654 * the flush_scheduled_work() in srp_remove_one().
656 spin_lock_irq(&target
->lock
);
657 if (target
->state
== SRP_TARGET_CONNECTING
) {
658 target
->state
= SRP_TARGET_DEAD
;
659 INIT_WORK(&target
->work
, srp_remove_work
);
660 queue_work(ib_wq
, &target
->work
);
662 spin_unlock_irq(&target
->lock
);
667 static void srp_map_desc(struct srp_map_state
*state
, dma_addr_t dma_addr
,
668 unsigned int dma_len
, u32 rkey
)
670 struct srp_direct_buf
*desc
= state
->desc
;
672 desc
->va
= cpu_to_be64(dma_addr
);
673 desc
->key
= cpu_to_be32(rkey
);
674 desc
->len
= cpu_to_be32(dma_len
);
676 state
->total_len
+= dma_len
;
681 static int srp_map_finish_fmr(struct srp_map_state
*state
,
682 struct srp_target_port
*target
)
684 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
685 struct ib_pool_fmr
*fmr
;
691 if (state
->npages
== 1) {
692 srp_map_desc(state
, state
->base_dma_addr
, state
->fmr_len
,
694 state
->npages
= state
->fmr_len
= 0;
698 fmr
= ib_fmr_pool_map_phys(dev
->fmr_pool
, state
->pages
,
699 state
->npages
, io_addr
);
703 *state
->next_fmr
++ = fmr
;
706 srp_map_desc(state
, 0, state
->fmr_len
, fmr
->fmr
->rkey
);
707 state
->npages
= state
->fmr_len
= 0;
711 static void srp_map_update_start(struct srp_map_state
*state
,
712 struct scatterlist
*sg
, int sg_index
,
715 state
->unmapped_sg
= sg
;
716 state
->unmapped_index
= sg_index
;
717 state
->unmapped_addr
= dma_addr
;
720 static int srp_map_sg_entry(struct srp_map_state
*state
,
721 struct srp_target_port
*target
,
722 struct scatterlist
*sg
, int sg_index
,
725 struct srp_device
*dev
= target
->srp_host
->srp_dev
;
726 struct ib_device
*ibdev
= dev
->dev
;
727 dma_addr_t dma_addr
= ib_sg_dma_address(ibdev
, sg
);
728 unsigned int dma_len
= ib_sg_dma_len(ibdev
, sg
);
735 if (use_fmr
== SRP_MAP_NO_FMR
) {
736 /* Once we're in direct map mode for a request, we don't
737 * go back to FMR mode, so no need to update anything
738 * other than the descriptor.
740 srp_map_desc(state
, dma_addr
, dma_len
, target
->rkey
);
744 /* If we start at an offset into the FMR page, don't merge into
745 * the current FMR. Finish it out, and use the kernel's MR for this
746 * sg entry. This is to avoid potential bugs on some SRP targets
747 * that were never quite defined, but went away when the initiator
748 * avoided using FMR on such page fragments.
750 if (dma_addr
& ~dev
->fmr_page_mask
|| dma_len
> dev
->fmr_max_size
) {
751 ret
= srp_map_finish_fmr(state
, target
);
755 srp_map_desc(state
, dma_addr
, dma_len
, target
->rkey
);
756 srp_map_update_start(state
, NULL
, 0, 0);
760 /* If this is the first sg to go into the FMR, save our position.
761 * We need to know the first unmapped entry, its index, and the
762 * first unmapped address within that entry to be able to restart
763 * mapping after an error.
765 if (!state
->unmapped_sg
)
766 srp_map_update_start(state
, sg
, sg_index
, dma_addr
);
769 if (state
->npages
== SRP_FMR_SIZE
) {
770 ret
= srp_map_finish_fmr(state
, target
);
774 srp_map_update_start(state
, sg
, sg_index
, dma_addr
);
777 len
= min_t(unsigned int, dma_len
, dev
->fmr_page_size
);
780 state
->base_dma_addr
= dma_addr
;
781 state
->pages
[state
->npages
++] = dma_addr
;
782 state
->fmr_len
+= len
;
787 /* If the last entry of the FMR wasn't a full page, then we need to
788 * close it out and start a new one -- we can only merge at page
792 if (len
!= dev
->fmr_page_size
) {
793 ret
= srp_map_finish_fmr(state
, target
);
795 srp_map_update_start(state
, NULL
, 0, 0);
800 static int srp_map_data(struct scsi_cmnd
*scmnd
, struct srp_target_port
*target
,
801 struct srp_request
*req
)
803 struct scatterlist
*scat
, *sg
;
804 struct srp_cmd
*cmd
= req
->cmd
->buf
;
805 int i
, len
, nents
, count
, use_fmr
;
806 struct srp_device
*dev
;
807 struct ib_device
*ibdev
;
808 struct srp_map_state state
;
809 struct srp_indirect_buf
*indirect_hdr
;
813 if (!scsi_sglist(scmnd
) || scmnd
->sc_data_direction
== DMA_NONE
)
814 return sizeof (struct srp_cmd
);
816 if (scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
&&
817 scmnd
->sc_data_direction
!= DMA_TO_DEVICE
) {
818 shost_printk(KERN_WARNING
, target
->scsi_host
,
819 PFX
"Unhandled data direction %d\n",
820 scmnd
->sc_data_direction
);
824 nents
= scsi_sg_count(scmnd
);
825 scat
= scsi_sglist(scmnd
);
827 dev
= target
->srp_host
->srp_dev
;
830 count
= ib_dma_map_sg(ibdev
, scat
, nents
, scmnd
->sc_data_direction
);
831 if (unlikely(count
== 0))
834 fmt
= SRP_DATA_DESC_DIRECT
;
835 len
= sizeof (struct srp_cmd
) + sizeof (struct srp_direct_buf
);
839 * The midlayer only generated a single gather/scatter
840 * entry, or DMA mapping coalesced everything to a
841 * single entry. So a direct descriptor along with
842 * the DMA MR suffices.
844 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
846 buf
->va
= cpu_to_be64(ib_sg_dma_address(ibdev
, scat
));
847 buf
->key
= cpu_to_be32(target
->rkey
);
848 buf
->len
= cpu_to_be32(ib_sg_dma_len(ibdev
, scat
));
854 /* We have more than one scatter/gather entry, so build our indirect
855 * descriptor table, trying to merge as many entries with FMR as we
858 indirect_hdr
= (void *) cmd
->add_data
;
860 ib_dma_sync_single_for_cpu(ibdev
, req
->indirect_dma_addr
,
861 target
->indirect_size
, DMA_TO_DEVICE
);
863 memset(&state
, 0, sizeof(state
));
864 state
.desc
= req
->indirect_desc
;
865 state
.pages
= req
->map_page
;
866 state
.next_fmr
= req
->fmr_list
;
868 use_fmr
= dev
->fmr_pool
? SRP_MAP_ALLOW_FMR
: SRP_MAP_NO_FMR
;
870 for_each_sg(scat
, sg
, count
, i
) {
871 if (srp_map_sg_entry(&state
, target
, sg
, i
, use_fmr
)) {
872 /* FMR mapping failed, so backtrack to the first
873 * unmapped entry and continue on without using FMR.
876 unsigned int dma_len
;
879 sg
= state
.unmapped_sg
;
880 i
= state
.unmapped_index
;
882 dma_addr
= ib_sg_dma_address(ibdev
, sg
);
883 dma_len
= ib_sg_dma_len(ibdev
, sg
);
884 dma_len
-= (state
.unmapped_addr
- dma_addr
);
885 dma_addr
= state
.unmapped_addr
;
886 use_fmr
= SRP_MAP_NO_FMR
;
887 srp_map_desc(&state
, dma_addr
, dma_len
, target
->rkey
);
891 if (use_fmr
== SRP_MAP_ALLOW_FMR
&& srp_map_finish_fmr(&state
, target
))
894 /* We've mapped the request, now pull as much of the indirect
895 * descriptor table as we can into the command buffer. If this
896 * target is not using an external indirect table, we are
897 * guaranteed to fit into the command, as the SCSI layer won't
898 * give us more S/G entries than we allow.
900 req
->nfmr
= state
.nfmr
;
901 if (state
.ndesc
== 1) {
902 /* FMR mapping was able to collapse this to one entry,
903 * so use a direct descriptor.
905 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
907 *buf
= req
->indirect_desc
[0];
911 if (unlikely(target
->cmd_sg_cnt
< state
.ndesc
&&
912 !target
->allow_ext_sg
)) {
913 shost_printk(KERN_ERR
, target
->scsi_host
,
914 "Could not fit S/G list into SRP_CMD\n");
918 count
= min(state
.ndesc
, target
->cmd_sg_cnt
);
919 table_len
= state
.ndesc
* sizeof (struct srp_direct_buf
);
921 fmt
= SRP_DATA_DESC_INDIRECT
;
922 len
= sizeof(struct srp_cmd
) + sizeof (struct srp_indirect_buf
);
923 len
+= count
* sizeof (struct srp_direct_buf
);
925 memcpy(indirect_hdr
->desc_list
, req
->indirect_desc
,
926 count
* sizeof (struct srp_direct_buf
));
928 indirect_hdr
->table_desc
.va
= cpu_to_be64(req
->indirect_dma_addr
);
929 indirect_hdr
->table_desc
.key
= cpu_to_be32(target
->rkey
);
930 indirect_hdr
->table_desc
.len
= cpu_to_be32(table_len
);
931 indirect_hdr
->len
= cpu_to_be32(state
.total_len
);
933 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
934 cmd
->data_out_desc_cnt
= count
;
936 cmd
->data_in_desc_cnt
= count
;
938 ib_dma_sync_single_for_device(ibdev
, req
->indirect_dma_addr
, table_len
,
942 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
943 cmd
->buf_fmt
= fmt
<< 4;
951 * Return an IU and possible credit to the free pool
953 static void srp_put_tx_iu(struct srp_target_port
*target
, struct srp_iu
*iu
,
954 enum srp_iu_type iu_type
)
958 spin_lock_irqsave(&target
->lock
, flags
);
959 list_add(&iu
->list
, &target
->free_tx
);
960 if (iu_type
!= SRP_IU_RSP
)
962 spin_unlock_irqrestore(&target
->lock
, flags
);
966 * Must be called with target->lock held to protect req_lim and free_tx.
967 * If IU is not sent, it must be returned using srp_put_tx_iu().
970 * An upper limit for the number of allocated information units for each
972 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
973 * more than Scsi_Host.can_queue requests.
974 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
975 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
976 * one unanswered SRP request to an initiator.
978 static struct srp_iu
*__srp_get_tx_iu(struct srp_target_port
*target
,
979 enum srp_iu_type iu_type
)
981 s32 rsv
= (iu_type
== SRP_IU_TSK_MGMT
) ? 0 : SRP_TSK_MGMT_SQ_SIZE
;
984 srp_send_completion(target
->send_cq
, target
);
986 if (list_empty(&target
->free_tx
))
989 /* Initiator responses to target requests do not consume credits */
990 if (iu_type
!= SRP_IU_RSP
) {
991 if (target
->req_lim
<= rsv
) {
992 ++target
->zero_req_lim
;
999 iu
= list_first_entry(&target
->free_tx
, struct srp_iu
, list
);
1000 list_del(&iu
->list
);
1004 static int srp_post_send(struct srp_target_port
*target
,
1005 struct srp_iu
*iu
, int len
)
1008 struct ib_send_wr wr
, *bad_wr
;
1010 list
.addr
= iu
->dma
;
1012 list
.lkey
= target
->lkey
;
1015 wr
.wr_id
= (uintptr_t) iu
;
1018 wr
.opcode
= IB_WR_SEND
;
1019 wr
.send_flags
= IB_SEND_SIGNALED
;
1021 return ib_post_send(target
->qp
, &wr
, &bad_wr
);
1024 static int srp_post_recv(struct srp_target_port
*target
, struct srp_iu
*iu
)
1026 struct ib_recv_wr wr
, *bad_wr
;
1029 list
.addr
= iu
->dma
;
1030 list
.length
= iu
->size
;
1031 list
.lkey
= target
->lkey
;
1034 wr
.wr_id
= (uintptr_t) iu
;
1038 return ib_post_recv(target
->qp
, &wr
, &bad_wr
);
1041 static void srp_process_rsp(struct srp_target_port
*target
, struct srp_rsp
*rsp
)
1043 struct srp_request
*req
;
1044 struct scsi_cmnd
*scmnd
;
1045 unsigned long flags
;
1047 if (unlikely(rsp
->tag
& SRP_TAG_TSK_MGMT
)) {
1048 spin_lock_irqsave(&target
->lock
, flags
);
1049 target
->req_lim
+= be32_to_cpu(rsp
->req_lim_delta
);
1050 spin_unlock_irqrestore(&target
->lock
, flags
);
1052 target
->tsk_mgmt_status
= -1;
1053 if (be32_to_cpu(rsp
->resp_data_len
) >= 4)
1054 target
->tsk_mgmt_status
= rsp
->data
[3];
1055 complete(&target
->tsk_mgmt_done
);
1057 req
= &target
->req_ring
[rsp
->tag
];
1060 shost_printk(KERN_ERR
, target
->scsi_host
,
1061 "Null scmnd for RSP w/tag %016llx\n",
1062 (unsigned long long) rsp
->tag
);
1063 scmnd
->result
= rsp
->status
;
1065 if (rsp
->flags
& SRP_RSP_FLAG_SNSVALID
) {
1066 memcpy(scmnd
->sense_buffer
, rsp
->data
+
1067 be32_to_cpu(rsp
->resp_data_len
),
1068 min_t(int, be32_to_cpu(rsp
->sense_data_len
),
1069 SCSI_SENSE_BUFFERSIZE
));
1072 if (rsp
->flags
& (SRP_RSP_FLAG_DOOVER
| SRP_RSP_FLAG_DOUNDER
))
1073 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_out_res_cnt
));
1074 else if (rsp
->flags
& (SRP_RSP_FLAG_DIOVER
| SRP_RSP_FLAG_DIUNDER
))
1075 scsi_set_resid(scmnd
, be32_to_cpu(rsp
->data_in_res_cnt
));
1077 srp_remove_req(target
, req
, be32_to_cpu(rsp
->req_lim_delta
));
1078 scmnd
->host_scribble
= NULL
;
1079 scmnd
->scsi_done(scmnd
);
1083 static int srp_response_common(struct srp_target_port
*target
, s32 req_delta
,
1086 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1087 unsigned long flags
;
1091 spin_lock_irqsave(&target
->lock
, flags
);
1092 target
->req_lim
+= req_delta
;
1093 iu
= __srp_get_tx_iu(target
, SRP_IU_RSP
);
1094 spin_unlock_irqrestore(&target
->lock
, flags
);
1097 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1098 "no IU available to send response\n");
1102 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
1103 memcpy(iu
->buf
, rsp
, len
);
1104 ib_dma_sync_single_for_device(dev
, iu
->dma
, len
, DMA_TO_DEVICE
);
1106 err
= srp_post_send(target
, iu
, len
);
1108 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1109 "unable to post response: %d\n", err
);
1110 srp_put_tx_iu(target
, iu
, SRP_IU_RSP
);
1116 static void srp_process_cred_req(struct srp_target_port
*target
,
1117 struct srp_cred_req
*req
)
1119 struct srp_cred_rsp rsp
= {
1120 .opcode
= SRP_CRED_RSP
,
1123 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
1125 if (srp_response_common(target
, delta
, &rsp
, sizeof rsp
))
1126 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1127 "problems processing SRP_CRED_REQ\n");
1130 static void srp_process_aer_req(struct srp_target_port
*target
,
1131 struct srp_aer_req
*req
)
1133 struct srp_aer_rsp rsp
= {
1134 .opcode
= SRP_AER_RSP
,
1137 s32 delta
= be32_to_cpu(req
->req_lim_delta
);
1139 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1140 "ignoring AER for LUN %llu\n", be64_to_cpu(req
->lun
));
1142 if (srp_response_common(target
, delta
, &rsp
, sizeof rsp
))
1143 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
1144 "problems processing SRP_AER_REQ\n");
1147 static void srp_handle_recv(struct srp_target_port
*target
, struct ib_wc
*wc
)
1149 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1150 struct srp_iu
*iu
= (struct srp_iu
*) wc
->wr_id
;
1154 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, target
->max_ti_iu_len
,
1157 opcode
= *(u8
*) iu
->buf
;
1160 shost_printk(KERN_ERR
, target
->scsi_host
,
1161 PFX
"recv completion, opcode 0x%02x\n", opcode
);
1162 print_hex_dump(KERN_ERR
, "", DUMP_PREFIX_OFFSET
, 8, 1,
1163 iu
->buf
, wc
->byte_len
, true);
1168 srp_process_rsp(target
, iu
->buf
);
1172 srp_process_cred_req(target
, iu
->buf
);
1176 srp_process_aer_req(target
, iu
->buf
);
1180 /* XXX Handle target logout */
1181 shost_printk(KERN_WARNING
, target
->scsi_host
,
1182 PFX
"Got target logout request\n");
1186 shost_printk(KERN_WARNING
, target
->scsi_host
,
1187 PFX
"Unhandled SRP opcode 0x%02x\n", opcode
);
1191 ib_dma_sync_single_for_device(dev
, iu
->dma
, target
->max_ti_iu_len
,
1194 res
= srp_post_recv(target
, iu
);
1196 shost_printk(KERN_ERR
, target
->scsi_host
,
1197 PFX
"Recv failed with error code %d\n", res
);
1200 static void srp_recv_completion(struct ib_cq
*cq
, void *target_ptr
)
1202 struct srp_target_port
*target
= target_ptr
;
1205 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
1206 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
1208 shost_printk(KERN_ERR
, target
->scsi_host
,
1209 PFX
"failed receive status %d\n",
1211 target
->qp_in_error
= 1;
1215 srp_handle_recv(target
, &wc
);
1219 static void srp_send_completion(struct ib_cq
*cq
, void *target_ptr
)
1221 struct srp_target_port
*target
= target_ptr
;
1225 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
1227 shost_printk(KERN_ERR
, target
->scsi_host
,
1228 PFX
"failed send status %d\n",
1230 target
->qp_in_error
= 1;
1234 iu
= (struct srp_iu
*) wc
.wr_id
;
1235 list_add(&iu
->list
, &target
->free_tx
);
1239 static int srp_queuecommand(struct Scsi_Host
*shost
, struct scsi_cmnd
*scmnd
)
1241 struct srp_target_port
*target
= host_to_target(shost
);
1242 struct srp_request
*req
;
1244 struct srp_cmd
*cmd
;
1245 struct ib_device
*dev
;
1246 unsigned long flags
;
1249 if (target
->state
== SRP_TARGET_CONNECTING
)
1252 if (target
->state
== SRP_TARGET_DEAD
||
1253 target
->state
== SRP_TARGET_REMOVED
) {
1254 scmnd
->result
= DID_BAD_TARGET
<< 16;
1255 scmnd
->scsi_done(scmnd
);
1259 spin_lock_irqsave(&target
->lock
, flags
);
1260 iu
= __srp_get_tx_iu(target
, SRP_IU_CMD
);
1264 req
= list_first_entry(&target
->free_reqs
, struct srp_request
, list
);
1265 list_del(&req
->list
);
1266 spin_unlock_irqrestore(&target
->lock
, flags
);
1268 dev
= target
->srp_host
->srp_dev
->dev
;
1269 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, target
->max_iu_len
,
1273 scmnd
->host_scribble
= (void *) req
;
1276 memset(cmd
, 0, sizeof *cmd
);
1278 cmd
->opcode
= SRP_CMD
;
1279 cmd
->lun
= cpu_to_be64((u64
) scmnd
->device
->lun
<< 48);
1280 cmd
->tag
= req
->index
;
1281 memcpy(cmd
->cdb
, scmnd
->cmnd
, scmnd
->cmd_len
);
1286 len
= srp_map_data(scmnd
, target
, req
);
1288 shost_printk(KERN_ERR
, target
->scsi_host
,
1289 PFX
"Failed to map data\n");
1293 ib_dma_sync_single_for_device(dev
, iu
->dma
, target
->max_iu_len
,
1296 if (srp_post_send(target
, iu
, len
)) {
1297 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"Send failed\n");
1304 srp_unmap_data(scmnd
, target
, req
);
1307 srp_put_tx_iu(target
, iu
, SRP_IU_CMD
);
1309 spin_lock_irqsave(&target
->lock
, flags
);
1310 list_add(&req
->list
, &target
->free_reqs
);
1313 spin_unlock_irqrestore(&target
->lock
, flags
);
1316 return SCSI_MLQUEUE_HOST_BUSY
;
1319 static int srp_alloc_iu_bufs(struct srp_target_port
*target
)
1323 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
) {
1324 target
->rx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
1325 target
->max_ti_iu_len
,
1326 GFP_KERNEL
, DMA_FROM_DEVICE
);
1327 if (!target
->rx_ring
[i
])
1331 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
) {
1332 target
->tx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
1334 GFP_KERNEL
, DMA_TO_DEVICE
);
1335 if (!target
->tx_ring
[i
])
1338 list_add(&target
->tx_ring
[i
]->list
, &target
->free_tx
);
1344 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
) {
1345 srp_free_iu(target
->srp_host
, target
->rx_ring
[i
]);
1346 target
->rx_ring
[i
] = NULL
;
1349 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
) {
1350 srp_free_iu(target
->srp_host
, target
->tx_ring
[i
]);
1351 target
->tx_ring
[i
] = NULL
;
1357 static void srp_cm_rep_handler(struct ib_cm_id
*cm_id
,
1358 struct srp_login_rsp
*lrsp
,
1359 struct srp_target_port
*target
)
1361 struct ib_qp_attr
*qp_attr
= NULL
;
1366 if (lrsp
->opcode
== SRP_LOGIN_RSP
) {
1367 target
->max_ti_iu_len
= be32_to_cpu(lrsp
->max_ti_iu_len
);
1368 target
->req_lim
= be32_to_cpu(lrsp
->req_lim_delta
);
1371 * Reserve credits for task management so we don't
1372 * bounce requests back to the SCSI mid-layer.
1374 target
->scsi_host
->can_queue
1375 = min(target
->req_lim
- SRP_TSK_MGMT_SQ_SIZE
,
1376 target
->scsi_host
->can_queue
);
1378 shost_printk(KERN_WARNING
, target
->scsi_host
,
1379 PFX
"Unhandled RSP opcode %#x\n", lrsp
->opcode
);
1384 if (!target
->rx_ring
[0]) {
1385 ret
= srp_alloc_iu_bufs(target
);
1391 qp_attr
= kmalloc(sizeof *qp_attr
, GFP_KERNEL
);
1395 qp_attr
->qp_state
= IB_QPS_RTR
;
1396 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
1400 ret
= ib_modify_qp(target
->qp
, qp_attr
, attr_mask
);
1404 for (i
= 0; i
< SRP_RQ_SIZE
; i
++) {
1405 struct srp_iu
*iu
= target
->rx_ring
[i
];
1406 ret
= srp_post_recv(target
, iu
);
1411 qp_attr
->qp_state
= IB_QPS_RTS
;
1412 ret
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
1416 ret
= ib_modify_qp(target
->qp
, qp_attr
, attr_mask
);
1420 ret
= ib_send_cm_rtu(cm_id
, NULL
, 0);
1426 target
->status
= ret
;
1429 static void srp_cm_rej_handler(struct ib_cm_id
*cm_id
,
1430 struct ib_cm_event
*event
,
1431 struct srp_target_port
*target
)
1433 struct Scsi_Host
*shost
= target
->scsi_host
;
1434 struct ib_class_port_info
*cpi
;
1437 switch (event
->param
.rej_rcvd
.reason
) {
1438 case IB_CM_REJ_PORT_CM_REDIRECT
:
1439 cpi
= event
->param
.rej_rcvd
.ari
;
1440 target
->path
.dlid
= cpi
->redirect_lid
;
1441 target
->path
.pkey
= cpi
->redirect_pkey
;
1442 cm_id
->remote_cm_qpn
= be32_to_cpu(cpi
->redirect_qp
) & 0x00ffffff;
1443 memcpy(target
->path
.dgid
.raw
, cpi
->redirect_gid
, 16);
1445 target
->status
= target
->path
.dlid
?
1446 SRP_DLID_REDIRECT
: SRP_PORT_REDIRECT
;
1449 case IB_CM_REJ_PORT_REDIRECT
:
1450 if (srp_target_is_topspin(target
)) {
1452 * Topspin/Cisco SRP gateways incorrectly send
1453 * reject reason code 25 when they mean 24
1456 memcpy(target
->path
.dgid
.raw
,
1457 event
->param
.rej_rcvd
.ari
, 16);
1459 shost_printk(KERN_DEBUG
, shost
,
1460 PFX
"Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1461 (unsigned long long) be64_to_cpu(target
->path
.dgid
.global
.subnet_prefix
),
1462 (unsigned long long) be64_to_cpu(target
->path
.dgid
.global
.interface_id
));
1464 target
->status
= SRP_PORT_REDIRECT
;
1466 shost_printk(KERN_WARNING
, shost
,
1467 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1468 target
->status
= -ECONNRESET
;
1472 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID
:
1473 shost_printk(KERN_WARNING
, shost
,
1474 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1475 target
->status
= -ECONNRESET
;
1478 case IB_CM_REJ_CONSUMER_DEFINED
:
1479 opcode
= *(u8
*) event
->private_data
;
1480 if (opcode
== SRP_LOGIN_REJ
) {
1481 struct srp_login_rej
*rej
= event
->private_data
;
1482 u32 reason
= be32_to_cpu(rej
->reason
);
1484 if (reason
== SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
)
1485 shost_printk(KERN_WARNING
, shost
,
1486 PFX
"SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1488 shost_printk(KERN_WARNING
, shost
,
1489 PFX
"SRP LOGIN REJECTED, reason 0x%08x\n", reason
);
1491 shost_printk(KERN_WARNING
, shost
,
1492 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1493 " opcode 0x%02x\n", opcode
);
1494 target
->status
= -ECONNRESET
;
1497 case IB_CM_REJ_STALE_CONN
:
1498 shost_printk(KERN_WARNING
, shost
, " REJ reason: stale connection\n");
1499 target
->status
= SRP_STALE_CONN
;
1503 shost_printk(KERN_WARNING
, shost
, " REJ reason 0x%x\n",
1504 event
->param
.rej_rcvd
.reason
);
1505 target
->status
= -ECONNRESET
;
1509 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
1511 struct srp_target_port
*target
= cm_id
->context
;
1514 switch (event
->event
) {
1515 case IB_CM_REQ_ERROR
:
1516 shost_printk(KERN_DEBUG
, target
->scsi_host
,
1517 PFX
"Sending CM REQ failed\n");
1519 target
->status
= -ECONNRESET
;
1522 case IB_CM_REP_RECEIVED
:
1524 srp_cm_rep_handler(cm_id
, event
->private_data
, target
);
1527 case IB_CM_REJ_RECEIVED
:
1528 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
"REJ received\n");
1531 srp_cm_rej_handler(cm_id
, event
, target
);
1534 case IB_CM_DREQ_RECEIVED
:
1535 shost_printk(KERN_WARNING
, target
->scsi_host
,
1536 PFX
"DREQ received - connection closed\n");
1537 if (ib_send_cm_drep(cm_id
, NULL
, 0))
1538 shost_printk(KERN_ERR
, target
->scsi_host
,
1539 PFX
"Sending CM DREP failed\n");
1542 case IB_CM_TIMEWAIT_EXIT
:
1543 shost_printk(KERN_ERR
, target
->scsi_host
,
1544 PFX
"connection closed\n");
1550 case IB_CM_MRA_RECEIVED
:
1551 case IB_CM_DREQ_ERROR
:
1552 case IB_CM_DREP_RECEIVED
:
1556 shost_printk(KERN_WARNING
, target
->scsi_host
,
1557 PFX
"Unhandled CM event %d\n", event
->event
);
1562 complete(&target
->done
);
1567 static int srp_send_tsk_mgmt(struct srp_target_port
*target
,
1568 u64 req_tag
, unsigned int lun
, u8 func
)
1570 struct ib_device
*dev
= target
->srp_host
->srp_dev
->dev
;
1572 struct srp_tsk_mgmt
*tsk_mgmt
;
1574 if (target
->state
== SRP_TARGET_DEAD
||
1575 target
->state
== SRP_TARGET_REMOVED
)
1578 init_completion(&target
->tsk_mgmt_done
);
1580 spin_lock_irq(&target
->lock
);
1581 iu
= __srp_get_tx_iu(target
, SRP_IU_TSK_MGMT
);
1582 spin_unlock_irq(&target
->lock
);
1587 ib_dma_sync_single_for_cpu(dev
, iu
->dma
, sizeof *tsk_mgmt
,
1590 memset(tsk_mgmt
, 0, sizeof *tsk_mgmt
);
1592 tsk_mgmt
->opcode
= SRP_TSK_MGMT
;
1593 tsk_mgmt
->lun
= cpu_to_be64((u64
) lun
<< 48);
1594 tsk_mgmt
->tag
= req_tag
| SRP_TAG_TSK_MGMT
;
1595 tsk_mgmt
->tsk_mgmt_func
= func
;
1596 tsk_mgmt
->task_tag
= req_tag
;
1598 ib_dma_sync_single_for_device(dev
, iu
->dma
, sizeof *tsk_mgmt
,
1600 if (srp_post_send(target
, iu
, sizeof *tsk_mgmt
)) {
1601 srp_put_tx_iu(target
, iu
, SRP_IU_TSK_MGMT
);
1605 if (!wait_for_completion_timeout(&target
->tsk_mgmt_done
,
1606 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS
)))
1612 static int srp_abort(struct scsi_cmnd
*scmnd
)
1614 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1615 struct srp_request
*req
= (struct srp_request
*) scmnd
->host_scribble
;
1618 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP abort called\n");
1620 if (!req
|| target
->qp_in_error
)
1622 if (srp_send_tsk_mgmt(target
, req
->index
, scmnd
->device
->lun
,
1623 SRP_TSK_ABORT_TASK
))
1627 if (!target
->tsk_mgmt_status
) {
1628 srp_remove_req(target
, req
, 0);
1629 scmnd
->result
= DID_ABORT
<< 16;
1637 static int srp_reset_device(struct scsi_cmnd
*scmnd
)
1639 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1642 shost_printk(KERN_ERR
, target
->scsi_host
, "SRP reset_device called\n");
1644 if (target
->qp_in_error
)
1646 if (srp_send_tsk_mgmt(target
, SRP_TAG_NO_REQ
, scmnd
->device
->lun
,
1649 if (target
->tsk_mgmt_status
)
1652 for (i
= 0; i
< SRP_CMD_SQ_SIZE
; ++i
) {
1653 struct srp_request
*req
= &target
->req_ring
[i
];
1654 if (req
->scmnd
&& req
->scmnd
->device
== scmnd
->device
)
1655 srp_reset_req(target
, req
);
1661 static int srp_reset_host(struct scsi_cmnd
*scmnd
)
1663 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1666 shost_printk(KERN_ERR
, target
->scsi_host
, PFX
"SRP reset_host called\n");
1668 if (!srp_reconnect_target(target
))
1674 static ssize_t
show_id_ext(struct device
*dev
, struct device_attribute
*attr
,
1677 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1679 if (target
->state
== SRP_TARGET_DEAD
||
1680 target
->state
== SRP_TARGET_REMOVED
)
1683 return sprintf(buf
, "0x%016llx\n",
1684 (unsigned long long) be64_to_cpu(target
->id_ext
));
1687 static ssize_t
show_ioc_guid(struct device
*dev
, struct device_attribute
*attr
,
1690 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1692 if (target
->state
== SRP_TARGET_DEAD
||
1693 target
->state
== SRP_TARGET_REMOVED
)
1696 return sprintf(buf
, "0x%016llx\n",
1697 (unsigned long long) be64_to_cpu(target
->ioc_guid
));
1700 static ssize_t
show_service_id(struct device
*dev
,
1701 struct device_attribute
*attr
, char *buf
)
1703 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1705 if (target
->state
== SRP_TARGET_DEAD
||
1706 target
->state
== SRP_TARGET_REMOVED
)
1709 return sprintf(buf
, "0x%016llx\n",
1710 (unsigned long long) be64_to_cpu(target
->service_id
));
1713 static ssize_t
show_pkey(struct device
*dev
, struct device_attribute
*attr
,
1716 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1718 if (target
->state
== SRP_TARGET_DEAD
||
1719 target
->state
== SRP_TARGET_REMOVED
)
1722 return sprintf(buf
, "0x%04x\n", be16_to_cpu(target
->path
.pkey
));
1725 static ssize_t
show_dgid(struct device
*dev
, struct device_attribute
*attr
,
1728 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1730 if (target
->state
== SRP_TARGET_DEAD
||
1731 target
->state
== SRP_TARGET_REMOVED
)
1734 return sprintf(buf
, "%pI6\n", target
->path
.dgid
.raw
);
1737 static ssize_t
show_orig_dgid(struct device
*dev
,
1738 struct device_attribute
*attr
, char *buf
)
1740 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1742 if (target
->state
== SRP_TARGET_DEAD
||
1743 target
->state
== SRP_TARGET_REMOVED
)
1746 return sprintf(buf
, "%pI6\n", target
->orig_dgid
);
1749 static ssize_t
show_req_lim(struct device
*dev
,
1750 struct device_attribute
*attr
, char *buf
)
1752 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1754 if (target
->state
== SRP_TARGET_DEAD
||
1755 target
->state
== SRP_TARGET_REMOVED
)
1758 return sprintf(buf
, "%d\n", target
->req_lim
);
1761 static ssize_t
show_zero_req_lim(struct device
*dev
,
1762 struct device_attribute
*attr
, char *buf
)
1764 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1766 if (target
->state
== SRP_TARGET_DEAD
||
1767 target
->state
== SRP_TARGET_REMOVED
)
1770 return sprintf(buf
, "%d\n", target
->zero_req_lim
);
1773 static ssize_t
show_local_ib_port(struct device
*dev
,
1774 struct device_attribute
*attr
, char *buf
)
1776 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1778 return sprintf(buf
, "%d\n", target
->srp_host
->port
);
1781 static ssize_t
show_local_ib_device(struct device
*dev
,
1782 struct device_attribute
*attr
, char *buf
)
1784 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1786 return sprintf(buf
, "%s\n", target
->srp_host
->srp_dev
->dev
->name
);
1789 static ssize_t
show_cmd_sg_entries(struct device
*dev
,
1790 struct device_attribute
*attr
, char *buf
)
1792 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1794 return sprintf(buf
, "%u\n", target
->cmd_sg_cnt
);
1797 static ssize_t
show_allow_ext_sg(struct device
*dev
,
1798 struct device_attribute
*attr
, char *buf
)
1800 struct srp_target_port
*target
= host_to_target(class_to_shost(dev
));
1802 return sprintf(buf
, "%s\n", target
->allow_ext_sg
? "true" : "false");
1805 static DEVICE_ATTR(id_ext
, S_IRUGO
, show_id_ext
, NULL
);
1806 static DEVICE_ATTR(ioc_guid
, S_IRUGO
, show_ioc_guid
, NULL
);
1807 static DEVICE_ATTR(service_id
, S_IRUGO
, show_service_id
, NULL
);
1808 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
1809 static DEVICE_ATTR(dgid
, S_IRUGO
, show_dgid
, NULL
);
1810 static DEVICE_ATTR(orig_dgid
, S_IRUGO
, show_orig_dgid
, NULL
);
1811 static DEVICE_ATTR(req_lim
, S_IRUGO
, show_req_lim
, NULL
);
1812 static DEVICE_ATTR(zero_req_lim
, S_IRUGO
, show_zero_req_lim
, NULL
);
1813 static DEVICE_ATTR(local_ib_port
, S_IRUGO
, show_local_ib_port
, NULL
);
1814 static DEVICE_ATTR(local_ib_device
, S_IRUGO
, show_local_ib_device
, NULL
);
1815 static DEVICE_ATTR(cmd_sg_entries
, S_IRUGO
, show_cmd_sg_entries
, NULL
);
1816 static DEVICE_ATTR(allow_ext_sg
, S_IRUGO
, show_allow_ext_sg
, NULL
);
1818 static struct device_attribute
*srp_host_attrs
[] = {
1821 &dev_attr_service_id
,
1824 &dev_attr_orig_dgid
,
1826 &dev_attr_zero_req_lim
,
1827 &dev_attr_local_ib_port
,
1828 &dev_attr_local_ib_device
,
1829 &dev_attr_cmd_sg_entries
,
1830 &dev_attr_allow_ext_sg
,
1834 static struct scsi_host_template srp_template
= {
1835 .module
= THIS_MODULE
,
1836 .name
= "InfiniBand SRP initiator",
1837 .proc_name
= DRV_NAME
,
1838 .info
= srp_target_info
,
1839 .queuecommand
= srp_queuecommand
,
1840 .eh_abort_handler
= srp_abort
,
1841 .eh_device_reset_handler
= srp_reset_device
,
1842 .eh_host_reset_handler
= srp_reset_host
,
1843 .sg_tablesize
= SRP_DEF_SG_TABLESIZE
,
1844 .can_queue
= SRP_CMD_SQ_SIZE
,
1846 .cmd_per_lun
= SRP_CMD_SQ_SIZE
,
1847 .use_clustering
= ENABLE_CLUSTERING
,
1848 .shost_attrs
= srp_host_attrs
1851 static int srp_add_target(struct srp_host
*host
, struct srp_target_port
*target
)
1853 struct srp_rport_identifiers ids
;
1854 struct srp_rport
*rport
;
1856 sprintf(target
->target_name
, "SRP.T10:%016llX",
1857 (unsigned long long) be64_to_cpu(target
->id_ext
));
1859 if (scsi_add_host(target
->scsi_host
, host
->srp_dev
->dev
->dma_device
))
1862 memcpy(ids
.port_id
, &target
->id_ext
, 8);
1863 memcpy(ids
.port_id
+ 8, &target
->ioc_guid
, 8);
1864 ids
.roles
= SRP_RPORT_ROLE_TARGET
;
1865 rport
= srp_rport_add(target
->scsi_host
, &ids
);
1866 if (IS_ERR(rport
)) {
1867 scsi_remove_host(target
->scsi_host
);
1868 return PTR_ERR(rport
);
1871 spin_lock(&host
->target_lock
);
1872 list_add_tail(&target
->list
, &host
->target_list
);
1873 spin_unlock(&host
->target_lock
);
1875 target
->state
= SRP_TARGET_LIVE
;
1877 scsi_scan_target(&target
->scsi_host
->shost_gendev
,
1878 0, target
->scsi_id
, SCAN_WILD_CARD
, 0);
1883 static void srp_release_dev(struct device
*dev
)
1885 struct srp_host
*host
=
1886 container_of(dev
, struct srp_host
, dev
);
1888 complete(&host
->released
);
1891 static struct class srp_class
= {
1892 .name
= "infiniband_srp",
1893 .dev_release
= srp_release_dev
1897 * Target ports are added by writing
1899 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1900 * pkey=<P_Key>,service_id=<service ID>
1902 * to the add_target sysfs attribute.
1906 SRP_OPT_ID_EXT
= 1 << 0,
1907 SRP_OPT_IOC_GUID
= 1 << 1,
1908 SRP_OPT_DGID
= 1 << 2,
1909 SRP_OPT_PKEY
= 1 << 3,
1910 SRP_OPT_SERVICE_ID
= 1 << 4,
1911 SRP_OPT_MAX_SECT
= 1 << 5,
1912 SRP_OPT_MAX_CMD_PER_LUN
= 1 << 6,
1913 SRP_OPT_IO_CLASS
= 1 << 7,
1914 SRP_OPT_INITIATOR_EXT
= 1 << 8,
1915 SRP_OPT_CMD_SG_ENTRIES
= 1 << 9,
1916 SRP_OPT_ALLOW_EXT_SG
= 1 << 10,
1917 SRP_OPT_SG_TABLESIZE
= 1 << 11,
1918 SRP_OPT_ALL
= (SRP_OPT_ID_EXT
|
1922 SRP_OPT_SERVICE_ID
),
1925 static const match_table_t srp_opt_tokens
= {
1926 { SRP_OPT_ID_EXT
, "id_ext=%s" },
1927 { SRP_OPT_IOC_GUID
, "ioc_guid=%s" },
1928 { SRP_OPT_DGID
, "dgid=%s" },
1929 { SRP_OPT_PKEY
, "pkey=%x" },
1930 { SRP_OPT_SERVICE_ID
, "service_id=%s" },
1931 { SRP_OPT_MAX_SECT
, "max_sect=%d" },
1932 { SRP_OPT_MAX_CMD_PER_LUN
, "max_cmd_per_lun=%d" },
1933 { SRP_OPT_IO_CLASS
, "io_class=%x" },
1934 { SRP_OPT_INITIATOR_EXT
, "initiator_ext=%s" },
1935 { SRP_OPT_CMD_SG_ENTRIES
, "cmd_sg_entries=%u" },
1936 { SRP_OPT_ALLOW_EXT_SG
, "allow_ext_sg=%u" },
1937 { SRP_OPT_SG_TABLESIZE
, "sg_tablesize=%u" },
1938 { SRP_OPT_ERR
, NULL
}
1941 static int srp_parse_options(const char *buf
, struct srp_target_port
*target
)
1943 char *options
, *sep_opt
;
1946 substring_t args
[MAX_OPT_ARGS
];
1952 options
= kstrdup(buf
, GFP_KERNEL
);
1957 while ((p
= strsep(&sep_opt
, ",")) != NULL
) {
1961 token
= match_token(p
, srp_opt_tokens
, args
);
1965 case SRP_OPT_ID_EXT
:
1966 p
= match_strdup(args
);
1971 target
->id_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
1975 case SRP_OPT_IOC_GUID
:
1976 p
= match_strdup(args
);
1981 target
->ioc_guid
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
1986 p
= match_strdup(args
);
1991 if (strlen(p
) != 32) {
1992 printk(KERN_WARNING PFX
"bad dest GID parameter '%s'\n", p
);
1997 for (i
= 0; i
< 16; ++i
) {
1998 strlcpy(dgid
, p
+ i
* 2, 3);
1999 target
->path
.dgid
.raw
[i
] = simple_strtoul(dgid
, NULL
, 16);
2002 memcpy(target
->orig_dgid
, target
->path
.dgid
.raw
, 16);
2006 if (match_hex(args
, &token
)) {
2007 printk(KERN_WARNING PFX
"bad P_Key parameter '%s'\n", p
);
2010 target
->path
.pkey
= cpu_to_be16(token
);
2013 case SRP_OPT_SERVICE_ID
:
2014 p
= match_strdup(args
);
2019 target
->service_id
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2020 target
->path
.service_id
= target
->service_id
;
2024 case SRP_OPT_MAX_SECT
:
2025 if (match_int(args
, &token
)) {
2026 printk(KERN_WARNING PFX
"bad max sect parameter '%s'\n", p
);
2029 target
->scsi_host
->max_sectors
= token
;
2032 case SRP_OPT_MAX_CMD_PER_LUN
:
2033 if (match_int(args
, &token
)) {
2034 printk(KERN_WARNING PFX
"bad max cmd_per_lun parameter '%s'\n", p
);
2037 target
->scsi_host
->cmd_per_lun
= min(token
, SRP_CMD_SQ_SIZE
);
2040 case SRP_OPT_IO_CLASS
:
2041 if (match_hex(args
, &token
)) {
2042 printk(KERN_WARNING PFX
"bad IO class parameter '%s' \n", p
);
2045 if (token
!= SRP_REV10_IB_IO_CLASS
&&
2046 token
!= SRP_REV16A_IB_IO_CLASS
) {
2047 printk(KERN_WARNING PFX
"unknown IO class parameter value"
2048 " %x specified (use %x or %x).\n",
2049 token
, SRP_REV10_IB_IO_CLASS
, SRP_REV16A_IB_IO_CLASS
);
2052 target
->io_class
= token
;
2055 case SRP_OPT_INITIATOR_EXT
:
2056 p
= match_strdup(args
);
2061 target
->initiator_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
2065 case SRP_OPT_CMD_SG_ENTRIES
:
2066 if (match_int(args
, &token
) || token
< 1 || token
> 255) {
2067 printk(KERN_WARNING PFX
"bad max cmd_sg_entries parameter '%s'\n", p
);
2070 target
->cmd_sg_cnt
= token
;
2073 case SRP_OPT_ALLOW_EXT_SG
:
2074 if (match_int(args
, &token
)) {
2075 printk(KERN_WARNING PFX
"bad allow_ext_sg parameter '%s'\n", p
);
2078 target
->allow_ext_sg
= !!token
;
2081 case SRP_OPT_SG_TABLESIZE
:
2082 if (match_int(args
, &token
) || token
< 1 ||
2083 token
> SCSI_MAX_SG_CHAIN_SEGMENTS
) {
2084 printk(KERN_WARNING PFX
"bad max sg_tablesize parameter '%s'\n", p
);
2087 target
->sg_tablesize
= token
;
2091 printk(KERN_WARNING PFX
"unknown parameter or missing value "
2092 "'%s' in target creation request\n", p
);
2097 if ((opt_mask
& SRP_OPT_ALL
) == SRP_OPT_ALL
)
2100 for (i
= 0; i
< ARRAY_SIZE(srp_opt_tokens
); ++i
)
2101 if ((srp_opt_tokens
[i
].token
& SRP_OPT_ALL
) &&
2102 !(srp_opt_tokens
[i
].token
& opt_mask
))
2103 printk(KERN_WARNING PFX
"target creation request is "
2104 "missing parameter '%s'\n",
2105 srp_opt_tokens
[i
].pattern
);
2112 static ssize_t
srp_create_target(struct device
*dev
,
2113 struct device_attribute
*attr
,
2114 const char *buf
, size_t count
)
2116 struct srp_host
*host
=
2117 container_of(dev
, struct srp_host
, dev
);
2118 struct Scsi_Host
*target_host
;
2119 struct srp_target_port
*target
;
2120 struct ib_device
*ibdev
= host
->srp_dev
->dev
;
2121 dma_addr_t dma_addr
;
2124 target_host
= scsi_host_alloc(&srp_template
,
2125 sizeof (struct srp_target_port
));
2129 target_host
->transportt
= ib_srp_transport_template
;
2130 target_host
->max_lun
= SRP_MAX_LUN
;
2131 target_host
->max_cmd_len
= sizeof ((struct srp_cmd
*) (void *) 0L)->cdb
;
2133 target
= host_to_target(target_host
);
2135 target
->io_class
= SRP_REV16A_IB_IO_CLASS
;
2136 target
->scsi_host
= target_host
;
2137 target
->srp_host
= host
;
2138 target
->lkey
= host
->srp_dev
->mr
->lkey
;
2139 target
->rkey
= host
->srp_dev
->mr
->rkey
;
2140 target
->cmd_sg_cnt
= cmd_sg_entries
;
2141 target
->sg_tablesize
= indirect_sg_entries
? : cmd_sg_entries
;
2142 target
->allow_ext_sg
= allow_ext_sg
;
2144 ret
= srp_parse_options(buf
, target
);
2148 if (!host
->srp_dev
->fmr_pool
&& !target
->allow_ext_sg
&&
2149 target
->cmd_sg_cnt
< target
->sg_tablesize
) {
2150 printk(KERN_WARNING PFX
"No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2151 target
->sg_tablesize
= target
->cmd_sg_cnt
;
2154 target_host
->sg_tablesize
= target
->sg_tablesize
;
2155 target
->indirect_size
= target
->sg_tablesize
*
2156 sizeof (struct srp_direct_buf
);
2157 target
->max_iu_len
= sizeof (struct srp_cmd
) +
2158 sizeof (struct srp_indirect_buf
) +
2159 target
->cmd_sg_cnt
* sizeof (struct srp_direct_buf
);
2161 spin_lock_init(&target
->lock
);
2162 INIT_LIST_HEAD(&target
->free_tx
);
2163 INIT_LIST_HEAD(&target
->free_reqs
);
2164 for (i
= 0; i
< SRP_CMD_SQ_SIZE
; ++i
) {
2165 struct srp_request
*req
= &target
->req_ring
[i
];
2167 req
->fmr_list
= kmalloc(target
->cmd_sg_cnt
* sizeof (void *),
2169 req
->map_page
= kmalloc(SRP_FMR_SIZE
* sizeof (void *),
2171 req
->indirect_desc
= kmalloc(target
->indirect_size
, GFP_KERNEL
);
2172 if (!req
->fmr_list
|| !req
->map_page
|| !req
->indirect_desc
)
2175 dma_addr
= ib_dma_map_single(ibdev
, req
->indirect_desc
,
2176 target
->indirect_size
,
2178 if (ib_dma_mapping_error(ibdev
, dma_addr
))
2181 req
->indirect_dma_addr
= dma_addr
;
2183 list_add_tail(&req
->list
, &target
->free_reqs
);
2186 ib_query_gid(ibdev
, host
->port
, 0, &target
->path
.sgid
);
2188 shost_printk(KERN_DEBUG
, target
->scsi_host
, PFX
2189 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2190 "service_id %016llx dgid %pI6\n",
2191 (unsigned long long) be64_to_cpu(target
->id_ext
),
2192 (unsigned long long) be64_to_cpu(target
->ioc_guid
),
2193 be16_to_cpu(target
->path
.pkey
),
2194 (unsigned long long) be64_to_cpu(target
->service_id
),
2195 target
->path
.dgid
.raw
);
2197 ret
= srp_create_target_ib(target
);
2201 ret
= srp_new_cm_id(target
);
2205 target
->qp_in_error
= 0;
2206 ret
= srp_connect_target(target
);
2208 shost_printk(KERN_ERR
, target
->scsi_host
,
2209 PFX
"Connection failed\n");
2213 ret
= srp_add_target(host
, target
);
2215 goto err_disconnect
;
2220 srp_disconnect_target(target
);
2223 ib_destroy_cm_id(target
->cm_id
);
2226 srp_free_target_ib(target
);
2229 srp_free_req_data(target
);
2232 scsi_host_put(target_host
);
2237 static DEVICE_ATTR(add_target
, S_IWUSR
, NULL
, srp_create_target
);
2239 static ssize_t
show_ibdev(struct device
*dev
, struct device_attribute
*attr
,
2242 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
2244 return sprintf(buf
, "%s\n", host
->srp_dev
->dev
->name
);
2247 static DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
2249 static ssize_t
show_port(struct device
*dev
, struct device_attribute
*attr
,
2252 struct srp_host
*host
= container_of(dev
, struct srp_host
, dev
);
2254 return sprintf(buf
, "%d\n", host
->port
);
2257 static DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
2259 static struct srp_host
*srp_add_port(struct srp_device
*device
, u8 port
)
2261 struct srp_host
*host
;
2263 host
= kzalloc(sizeof *host
, GFP_KERNEL
);
2267 INIT_LIST_HEAD(&host
->target_list
);
2268 spin_lock_init(&host
->target_lock
);
2269 init_completion(&host
->released
);
2270 host
->srp_dev
= device
;
2273 host
->dev
.class = &srp_class
;
2274 host
->dev
.parent
= device
->dev
->dma_device
;
2275 dev_set_name(&host
->dev
, "srp-%s-%d", device
->dev
->name
, port
);
2277 if (device_register(&host
->dev
))
2279 if (device_create_file(&host
->dev
, &dev_attr_add_target
))
2281 if (device_create_file(&host
->dev
, &dev_attr_ibdev
))
2283 if (device_create_file(&host
->dev
, &dev_attr_port
))
2289 device_unregister(&host
->dev
);
2297 static void srp_add_one(struct ib_device
*device
)
2299 struct srp_device
*srp_dev
;
2300 struct ib_device_attr
*dev_attr
;
2301 struct ib_fmr_pool_param fmr_param
;
2302 struct srp_host
*host
;
2303 int max_pages_per_fmr
, fmr_page_shift
, s
, e
, p
;
2305 dev_attr
= kmalloc(sizeof *dev_attr
, GFP_KERNEL
);
2309 if (ib_query_device(device
, dev_attr
)) {
2310 printk(KERN_WARNING PFX
"Query device failed for %s\n",
2315 srp_dev
= kmalloc(sizeof *srp_dev
, GFP_KERNEL
);
2320 * Use the smallest page size supported by the HCA, down to a
2321 * minimum of 4096 bytes. We're unlikely to build large sglists
2322 * out of smaller entries.
2324 fmr_page_shift
= max(12, ffs(dev_attr
->page_size_cap
) - 1);
2325 srp_dev
->fmr_page_size
= 1 << fmr_page_shift
;
2326 srp_dev
->fmr_page_mask
= ~((u64
) srp_dev
->fmr_page_size
- 1);
2327 srp_dev
->fmr_max_size
= srp_dev
->fmr_page_size
* SRP_FMR_SIZE
;
2329 INIT_LIST_HEAD(&srp_dev
->dev_list
);
2331 srp_dev
->dev
= device
;
2332 srp_dev
->pd
= ib_alloc_pd(device
);
2333 if (IS_ERR(srp_dev
->pd
))
2336 srp_dev
->mr
= ib_get_dma_mr(srp_dev
->pd
,
2337 IB_ACCESS_LOCAL_WRITE
|
2338 IB_ACCESS_REMOTE_READ
|
2339 IB_ACCESS_REMOTE_WRITE
);
2340 if (IS_ERR(srp_dev
->mr
))
2343 for (max_pages_per_fmr
= SRP_FMR_SIZE
;
2344 max_pages_per_fmr
>= SRP_FMR_MIN_SIZE
;
2345 max_pages_per_fmr
/= 2, srp_dev
->fmr_max_size
/= 2) {
2346 memset(&fmr_param
, 0, sizeof fmr_param
);
2347 fmr_param
.pool_size
= SRP_FMR_POOL_SIZE
;
2348 fmr_param
.dirty_watermark
= SRP_FMR_DIRTY_SIZE
;
2349 fmr_param
.cache
= 1;
2350 fmr_param
.max_pages_per_fmr
= max_pages_per_fmr
;
2351 fmr_param
.page_shift
= fmr_page_shift
;
2352 fmr_param
.access
= (IB_ACCESS_LOCAL_WRITE
|
2353 IB_ACCESS_REMOTE_WRITE
|
2354 IB_ACCESS_REMOTE_READ
);
2356 srp_dev
->fmr_pool
= ib_create_fmr_pool(srp_dev
->pd
, &fmr_param
);
2357 if (!IS_ERR(srp_dev
->fmr_pool
))
2361 if (IS_ERR(srp_dev
->fmr_pool
))
2362 srp_dev
->fmr_pool
= NULL
;
2364 if (device
->node_type
== RDMA_NODE_IB_SWITCH
) {
2369 e
= device
->phys_port_cnt
;
2372 for (p
= s
; p
<= e
; ++p
) {
2373 host
= srp_add_port(srp_dev
, p
);
2375 list_add_tail(&host
->list
, &srp_dev
->dev_list
);
2378 ib_set_client_data(device
, &srp_client
, srp_dev
);
2383 ib_dealloc_pd(srp_dev
->pd
);
2392 static void srp_remove_one(struct ib_device
*device
)
2394 struct srp_device
*srp_dev
;
2395 struct srp_host
*host
, *tmp_host
;
2396 LIST_HEAD(target_list
);
2397 struct srp_target_port
*target
, *tmp_target
;
2399 srp_dev
= ib_get_client_data(device
, &srp_client
);
2401 list_for_each_entry_safe(host
, tmp_host
, &srp_dev
->dev_list
, list
) {
2402 device_unregister(&host
->dev
);
2404 * Wait for the sysfs entry to go away, so that no new
2405 * target ports can be created.
2407 wait_for_completion(&host
->released
);
2410 * Mark all target ports as removed, so we stop queueing
2411 * commands and don't try to reconnect.
2413 spin_lock(&host
->target_lock
);
2414 list_for_each_entry(target
, &host
->target_list
, list
) {
2415 spin_lock_irq(&target
->lock
);
2416 target
->state
= SRP_TARGET_REMOVED
;
2417 spin_unlock_irq(&target
->lock
);
2419 spin_unlock(&host
->target_lock
);
2422 * Wait for any reconnection tasks that may have
2423 * started before we marked our target ports as
2424 * removed, and any target port removal tasks.
2426 flush_workqueue(ib_wq
);
2428 list_for_each_entry_safe(target
, tmp_target
,
2429 &host
->target_list
, list
) {
2430 srp_remove_host(target
->scsi_host
);
2431 scsi_remove_host(target
->scsi_host
);
2432 srp_disconnect_target(target
);
2433 ib_destroy_cm_id(target
->cm_id
);
2434 srp_free_target_ib(target
);
2435 srp_free_req_data(target
);
2436 scsi_host_put(target
->scsi_host
);
2442 if (srp_dev
->fmr_pool
)
2443 ib_destroy_fmr_pool(srp_dev
->fmr_pool
);
2444 ib_dereg_mr(srp_dev
->mr
);
2445 ib_dealloc_pd(srp_dev
->pd
);
2450 static struct srp_function_template ib_srp_transport_functions
= {
2453 static int __init
srp_init_module(void)
2457 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc
, wr_id
) < sizeof(void *));
2459 if (srp_sg_tablesize
) {
2460 printk(KERN_WARNING PFX
"srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2461 if (!cmd_sg_entries
)
2462 cmd_sg_entries
= srp_sg_tablesize
;
2465 if (!cmd_sg_entries
)
2466 cmd_sg_entries
= SRP_DEF_SG_TABLESIZE
;
2468 if (cmd_sg_entries
> 255) {
2469 printk(KERN_WARNING PFX
"Clamping cmd_sg_entries to 255\n");
2470 cmd_sg_entries
= 255;
2473 if (!indirect_sg_entries
)
2474 indirect_sg_entries
= cmd_sg_entries
;
2475 else if (indirect_sg_entries
< cmd_sg_entries
) {
2476 printk(KERN_WARNING PFX
"Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries
);
2477 indirect_sg_entries
= cmd_sg_entries
;
2480 ib_srp_transport_template
=
2481 srp_attach_transport(&ib_srp_transport_functions
);
2482 if (!ib_srp_transport_template
)
2485 ret
= class_register(&srp_class
);
2487 printk(KERN_ERR PFX
"couldn't register class infiniband_srp\n");
2488 srp_release_transport(ib_srp_transport_template
);
2492 ib_sa_register_client(&srp_sa_client
);
2494 ret
= ib_register_client(&srp_client
);
2496 printk(KERN_ERR PFX
"couldn't register IB client\n");
2497 srp_release_transport(ib_srp_transport_template
);
2498 ib_sa_unregister_client(&srp_sa_client
);
2499 class_unregister(&srp_class
);
2506 static void __exit
srp_cleanup_module(void)
2508 ib_unregister_client(&srp_client
);
2509 ib_sa_unregister_client(&srp_sa_client
);
2510 class_unregister(&srp_class
);
2511 srp_release_transport(ib_srp_transport_template
);
2514 module_init(srp_init_module
);
2515 module_exit(srp_cleanup_module
);