2 * SCSI RDMA (SRP) transport class
4 * Copyright (C) 2007 FUJITA Tomonori <tomof@acm.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2 of the
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/jiffies.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/delay.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_transport_srp.h>
35 #include "scsi_priv.h"
36 #include "scsi_transport_srp_internal.h"
38 struct srp_host_attrs
{
39 atomic_t next_port_id
;
41 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
43 #define SRP_HOST_ATTRS 0
44 #define SRP_RPORT_ATTRS 8
47 struct scsi_transport_template t
;
48 struct srp_function_template
*f
;
50 struct device_attribute
*host_attrs
[SRP_HOST_ATTRS
+ 1];
52 struct device_attribute
*rport_attrs
[SRP_RPORT_ATTRS
+ 1];
53 struct transport_container rport_attr_cont
;
56 #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
58 #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
59 #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent)
60 static inline struct Scsi_Host
*rport_to_shost(struct srp_rport
*r
)
62 return dev_to_shost(r
->dev
.parent
);
66 * srp_tmo_valid() - check timeout combination validity
67 * @reconnect_delay: Reconnect delay in seconds.
68 * @fast_io_fail_tmo: Fast I/O fail timeout in seconds.
69 * @dev_loss_tmo: Device loss timeout in seconds.
71 * The combination of the timeout parameters must be such that SCSI commands
72 * are finished in a reasonable time. Hence do not allow the fast I/O fail
73 * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to
74 * exceed that limit if failing I/O fast has been disabled. Furthermore, these
75 * parameters must be such that multipath can detect failed paths timely.
76 * Hence do not allow all three parameters to be disabled simultaneously.
78 int srp_tmo_valid(int reconnect_delay
, int fast_io_fail_tmo
, int dev_loss_tmo
)
80 if (reconnect_delay
< 0 && fast_io_fail_tmo
< 0 && dev_loss_tmo
< 0)
82 if (reconnect_delay
== 0)
84 if (fast_io_fail_tmo
> SCSI_DEVICE_BLOCK_MAX_TIMEOUT
)
86 if (fast_io_fail_tmo
< 0 &&
87 dev_loss_tmo
> SCSI_DEVICE_BLOCK_MAX_TIMEOUT
)
89 if (dev_loss_tmo
>= LONG_MAX
/ HZ
)
91 if (fast_io_fail_tmo
>= 0 && dev_loss_tmo
>= 0 &&
92 fast_io_fail_tmo
>= dev_loss_tmo
)
96 EXPORT_SYMBOL_GPL(srp_tmo_valid
);
98 static int srp_host_setup(struct transport_container
*tc
, struct device
*dev
,
101 struct Scsi_Host
*shost
= dev_to_shost(dev
);
102 struct srp_host_attrs
*srp_host
= to_srp_host_attrs(shost
);
104 atomic_set(&srp_host
->next_port_id
, 0);
108 static DECLARE_TRANSPORT_CLASS(srp_host_class
, "srp_host", srp_host_setup
,
111 static DECLARE_TRANSPORT_CLASS(srp_rport_class
, "srp_remote_ports",
115 (p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
116 (p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
117 (p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
118 (p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
120 #define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
121 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
124 show_srp_rport_id(struct device
*dev
, struct device_attribute
*attr
,
127 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
128 return sprintf(buf
, SRP_PID_FMT
"\n", SRP_PID(rport
));
131 static DEVICE_ATTR(port_id
, S_IRUGO
, show_srp_rport_id
, NULL
);
133 static const struct {
136 } srp_rport_role_names
[] = {
137 {SRP_RPORT_ROLE_INITIATOR
, "SRP Initiator"},
138 {SRP_RPORT_ROLE_TARGET
, "SRP Target"},
142 show_srp_rport_roles(struct device
*dev
, struct device_attribute
*attr
,
145 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
149 for (i
= 0; i
< ARRAY_SIZE(srp_rport_role_names
); i
++)
150 if (srp_rport_role_names
[i
].value
== rport
->roles
) {
151 name
= srp_rport_role_names
[i
].name
;
154 return sprintf(buf
, "%s\n", name
? : "unknown");
157 static DEVICE_ATTR(roles
, S_IRUGO
, show_srp_rport_roles
, NULL
);
159 static ssize_t
store_srp_rport_delete(struct device
*dev
,
160 struct device_attribute
*attr
,
161 const char *buf
, size_t count
)
163 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
164 struct Scsi_Host
*shost
= dev_to_shost(dev
);
165 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
167 if (i
->f
->rport_delete
) {
168 i
->f
->rport_delete(rport
);
175 static DEVICE_ATTR(delete, S_IWUSR
, NULL
, store_srp_rport_delete
);
177 static ssize_t
show_srp_rport_state(struct device
*dev
,
178 struct device_attribute
*attr
,
181 static const char *const state_name
[] = {
182 [SRP_RPORT_RUNNING
] = "running",
183 [SRP_RPORT_BLOCKED
] = "blocked",
184 [SRP_RPORT_FAIL_FAST
] = "fail-fast",
185 [SRP_RPORT_LOST
] = "lost",
187 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
188 enum srp_rport_state state
= rport
->state
;
190 return sprintf(buf
, "%s\n",
191 (unsigned)state
< ARRAY_SIZE(state_name
) ?
192 state_name
[state
] : "???");
195 static DEVICE_ATTR(state
, S_IRUGO
, show_srp_rport_state
, NULL
);
197 static ssize_t
srp_show_tmo(char *buf
, int tmo
)
199 return tmo
>= 0 ? sprintf(buf
, "%d\n", tmo
) : sprintf(buf
, "off\n");
202 static int srp_parse_tmo(int *tmo
, const char *buf
)
206 if (strncmp(buf
, "off", 3) != 0)
207 res
= kstrtoint(buf
, 0, tmo
);
214 static ssize_t
show_reconnect_delay(struct device
*dev
,
215 struct device_attribute
*attr
, char *buf
)
217 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
219 return srp_show_tmo(buf
, rport
->reconnect_delay
);
222 static ssize_t
store_reconnect_delay(struct device
*dev
,
223 struct device_attribute
*attr
,
224 const char *buf
, const size_t count
)
226 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
229 res
= srp_parse_tmo(&delay
, buf
);
232 res
= srp_tmo_valid(delay
, rport
->fast_io_fail_tmo
,
233 rport
->dev_loss_tmo
);
237 if (rport
->reconnect_delay
<= 0 && delay
> 0 &&
238 rport
->state
!= SRP_RPORT_RUNNING
) {
239 queue_delayed_work(system_long_wq
, &rport
->reconnect_work
,
241 } else if (delay
<= 0) {
242 cancel_delayed_work(&rport
->reconnect_work
);
244 rport
->reconnect_delay
= delay
;
251 static DEVICE_ATTR(reconnect_delay
, S_IRUGO
| S_IWUSR
, show_reconnect_delay
,
252 store_reconnect_delay
);
254 static ssize_t
show_failed_reconnects(struct device
*dev
,
255 struct device_attribute
*attr
, char *buf
)
257 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
259 return sprintf(buf
, "%d\n", rport
->failed_reconnects
);
262 static DEVICE_ATTR(failed_reconnects
, S_IRUGO
, show_failed_reconnects
, NULL
);
264 static ssize_t
show_srp_rport_fast_io_fail_tmo(struct device
*dev
,
265 struct device_attribute
*attr
,
268 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
270 return srp_show_tmo(buf
, rport
->fast_io_fail_tmo
);
273 static ssize_t
store_srp_rport_fast_io_fail_tmo(struct device
*dev
,
274 struct device_attribute
*attr
,
275 const char *buf
, size_t count
)
277 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
279 int fast_io_fail_tmo
;
281 res
= srp_parse_tmo(&fast_io_fail_tmo
, buf
);
284 res
= srp_tmo_valid(rport
->reconnect_delay
, fast_io_fail_tmo
,
285 rport
->dev_loss_tmo
);
288 rport
->fast_io_fail_tmo
= fast_io_fail_tmo
;
295 static DEVICE_ATTR(fast_io_fail_tmo
, S_IRUGO
| S_IWUSR
,
296 show_srp_rport_fast_io_fail_tmo
,
297 store_srp_rport_fast_io_fail_tmo
);
299 static ssize_t
show_srp_rport_dev_loss_tmo(struct device
*dev
,
300 struct device_attribute
*attr
,
303 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
305 return srp_show_tmo(buf
, rport
->dev_loss_tmo
);
308 static ssize_t
store_srp_rport_dev_loss_tmo(struct device
*dev
,
309 struct device_attribute
*attr
,
310 const char *buf
, size_t count
)
312 struct srp_rport
*rport
= transport_class_to_srp_rport(dev
);
316 res
= srp_parse_tmo(&dev_loss_tmo
, buf
);
319 res
= srp_tmo_valid(rport
->reconnect_delay
, rport
->fast_io_fail_tmo
,
323 rport
->dev_loss_tmo
= dev_loss_tmo
;
330 static DEVICE_ATTR(dev_loss_tmo
, S_IRUGO
| S_IWUSR
,
331 show_srp_rport_dev_loss_tmo
,
332 store_srp_rport_dev_loss_tmo
);
334 static int srp_rport_set_state(struct srp_rport
*rport
,
335 enum srp_rport_state new_state
)
337 enum srp_rport_state old_state
= rport
->state
;
339 lockdep_assert_held(&rport
->mutex
);
342 case SRP_RPORT_RUNNING
:
350 case SRP_RPORT_BLOCKED
:
352 case SRP_RPORT_RUNNING
:
358 case SRP_RPORT_FAIL_FAST
:
369 rport
->state
= new_state
;
377 * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
378 * @work: Work structure used for scheduling this operation.
380 static void srp_reconnect_work(struct work_struct
*work
)
382 struct srp_rport
*rport
= container_of(to_delayed_work(work
),
383 struct srp_rport
, reconnect_work
);
384 struct Scsi_Host
*shost
= rport_to_shost(rport
);
387 res
= srp_reconnect_rport(rport
);
389 shost_printk(KERN_ERR
, shost
,
390 "reconnect attempt %d failed (%d)\n",
391 ++rport
->failed_reconnects
, res
);
392 delay
= rport
->reconnect_delay
*
393 min(100, max(1, rport
->failed_reconnects
- 10));
395 queue_delayed_work(system_long_wq
,
396 &rport
->reconnect_work
, delay
* HZ
);
400 static void __rport_fail_io_fast(struct srp_rport
*rport
)
402 struct Scsi_Host
*shost
= rport_to_shost(rport
);
403 struct srp_internal
*i
;
405 lockdep_assert_held(&rport
->mutex
);
407 if (srp_rport_set_state(rport
, SRP_RPORT_FAIL_FAST
))
409 scsi_target_unblock(rport
->dev
.parent
, SDEV_TRANSPORT_OFFLINE
);
411 /* Involve the LLD if possible to terminate all I/O on the rport. */
412 i
= to_srp_internal(shost
->transportt
);
413 if (i
->f
->terminate_rport_io
)
414 i
->f
->terminate_rport_io(rport
);
418 * rport_fast_io_fail_timedout() - fast I/O failure timeout handler
419 * @work: Work structure used for scheduling this operation.
421 static void rport_fast_io_fail_timedout(struct work_struct
*work
)
423 struct srp_rport
*rport
= container_of(to_delayed_work(work
),
424 struct srp_rport
, fast_io_fail_work
);
425 struct Scsi_Host
*shost
= rport_to_shost(rport
);
427 pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
428 dev_name(&rport
->dev
), dev_name(&shost
->shost_gendev
));
430 mutex_lock(&rport
->mutex
);
431 if (rport
->state
== SRP_RPORT_BLOCKED
)
432 __rport_fail_io_fast(rport
);
433 mutex_unlock(&rport
->mutex
);
437 * rport_dev_loss_timedout() - device loss timeout handler
438 * @work: Work structure used for scheduling this operation.
440 static void rport_dev_loss_timedout(struct work_struct
*work
)
442 struct srp_rport
*rport
= container_of(to_delayed_work(work
),
443 struct srp_rport
, dev_loss_work
);
444 struct Scsi_Host
*shost
= rport_to_shost(rport
);
445 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
447 pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
448 dev_name(&rport
->dev
), dev_name(&shost
->shost_gendev
));
450 mutex_lock(&rport
->mutex
);
451 WARN_ON(srp_rport_set_state(rport
, SRP_RPORT_LOST
) != 0);
452 scsi_target_unblock(rport
->dev
.parent
, SDEV_TRANSPORT_OFFLINE
);
453 mutex_unlock(&rport
->mutex
);
455 i
->f
->rport_delete(rport
);
458 static void __srp_start_tl_fail_timers(struct srp_rport
*rport
)
460 struct Scsi_Host
*shost
= rport_to_shost(rport
);
461 int delay
, fast_io_fail_tmo
, dev_loss_tmo
;
463 lockdep_assert_held(&rport
->mutex
);
465 delay
= rport
->reconnect_delay
;
466 fast_io_fail_tmo
= rport
->fast_io_fail_tmo
;
467 dev_loss_tmo
= rport
->dev_loss_tmo
;
468 pr_debug("%s current state: %d\n", dev_name(&shost
->shost_gendev
),
471 if (rport
->state
== SRP_RPORT_LOST
)
474 queue_delayed_work(system_long_wq
, &rport
->reconnect_work
,
476 if (srp_rport_set_state(rport
, SRP_RPORT_BLOCKED
) == 0) {
477 pr_debug("%s new state: %d\n", dev_name(&shost
->shost_gendev
),
479 scsi_target_block(&shost
->shost_gendev
);
480 if (fast_io_fail_tmo
>= 0)
481 queue_delayed_work(system_long_wq
,
482 &rport
->fast_io_fail_work
,
483 1UL * fast_io_fail_tmo
* HZ
);
484 if (dev_loss_tmo
>= 0)
485 queue_delayed_work(system_long_wq
,
486 &rport
->dev_loss_work
,
487 1UL * dev_loss_tmo
* HZ
);
492 * srp_start_tl_fail_timers() - start the transport layer failure timers
493 * @rport: SRP target port.
495 * Start the transport layer fast I/O failure and device loss timers. Do not
496 * modify a timer that was already started.
498 void srp_start_tl_fail_timers(struct srp_rport
*rport
)
500 mutex_lock(&rport
->mutex
);
501 __srp_start_tl_fail_timers(rport
);
502 mutex_unlock(&rport
->mutex
);
504 EXPORT_SYMBOL(srp_start_tl_fail_timers
);
507 * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
508 * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
510 static int scsi_request_fn_active(struct Scsi_Host
*shost
)
512 struct scsi_device
*sdev
;
513 struct request_queue
*q
;
514 int request_fn_active
= 0;
516 shost_for_each_device(sdev
, shost
) {
517 q
= sdev
->request_queue
;
519 spin_lock_irq(q
->queue_lock
);
520 request_fn_active
+= q
->request_fn_active
;
521 spin_unlock_irq(q
->queue_lock
);
524 return request_fn_active
;
528 * srp_reconnect_rport() - reconnect to an SRP target port
529 * @rport: SRP target port.
531 * Blocks SCSI command queueing before invoking reconnect() such that
532 * queuecommand() won't be invoked concurrently with reconnect() from outside
533 * the SCSI EH. This is important since a reconnect() implementation may
534 * reallocate resources needed by queuecommand().
537 * - This function neither waits until outstanding requests have finished nor
538 * tries to abort these. It is the responsibility of the reconnect()
539 * function to finish outstanding commands before reconnecting to the target
541 * - It is the responsibility of the caller to ensure that the resources
542 * reallocated by the reconnect() function won't be used while this function
543 * is in progress. One possible strategy is to invoke this function from
544 * the context of the SCSI EH thread only. Another possible strategy is to
545 * lock the rport mutex inside each SCSI LLD callback that can be invoked by
546 * the SCSI EH (the scsi_host_template.eh_*() functions and also the
547 * scsi_host_template.queuecommand() function).
549 int srp_reconnect_rport(struct srp_rport
*rport
)
551 struct Scsi_Host
*shost
= rport_to_shost(rport
);
552 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
553 struct scsi_device
*sdev
;
556 pr_debug("SCSI host %s\n", dev_name(&shost
->shost_gendev
));
558 res
= mutex_lock_interruptible(&rport
->mutex
);
561 scsi_target_block(&shost
->shost_gendev
);
562 while (scsi_request_fn_active(shost
))
564 res
= rport
->state
!= SRP_RPORT_LOST
? i
->f
->reconnect(rport
) : -ENODEV
;
565 pr_debug("%s (state %d): transport.reconnect() returned %d\n",
566 dev_name(&shost
->shost_gendev
), rport
->state
, res
);
568 cancel_delayed_work(&rport
->fast_io_fail_work
);
569 cancel_delayed_work(&rport
->dev_loss_work
);
571 rport
->failed_reconnects
= 0;
572 srp_rport_set_state(rport
, SRP_RPORT_RUNNING
);
573 scsi_target_unblock(&shost
->shost_gendev
, SDEV_RUNNING
);
575 * If the SCSI error handler has offlined one or more devices,
576 * invoking scsi_target_unblock() won't change the state of
577 * these devices into running so do that explicitly.
579 spin_lock_irq(shost
->host_lock
);
580 __shost_for_each_device(sdev
, shost
)
581 if (sdev
->sdev_state
== SDEV_OFFLINE
)
582 sdev
->sdev_state
= SDEV_RUNNING
;
583 spin_unlock_irq(shost
->host_lock
);
584 } else if (rport
->state
== SRP_RPORT_RUNNING
) {
586 * srp_reconnect_rport() has been invoked with fast_io_fail
587 * and dev_loss off. Mark the port as failed and start the TL
588 * failure timers if these had not yet been started.
590 __rport_fail_io_fast(rport
);
591 scsi_target_unblock(&shost
->shost_gendev
,
592 SDEV_TRANSPORT_OFFLINE
);
593 __srp_start_tl_fail_timers(rport
);
594 } else if (rport
->state
!= SRP_RPORT_BLOCKED
) {
595 scsi_target_unblock(&shost
->shost_gendev
,
596 SDEV_TRANSPORT_OFFLINE
);
598 mutex_unlock(&rport
->mutex
);
603 EXPORT_SYMBOL(srp_reconnect_rport
);
606 * srp_timed_out() - SRP transport intercept of the SCSI timeout EH
607 * @scmd: SCSI command.
609 * If a timeout occurs while an rport is in the blocked state, ask the SCSI
610 * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core
611 * handle the timeout (BLK_EH_NOT_HANDLED).
613 * Note: This function is called from soft-IRQ context and with the request
616 static enum blk_eh_timer_return
srp_timed_out(struct scsi_cmnd
*scmd
)
618 struct scsi_device
*sdev
= scmd
->device
;
619 struct Scsi_Host
*shost
= sdev
->host
;
620 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
622 pr_debug("timeout for sdev %s\n", dev_name(&sdev
->sdev_gendev
));
623 return i
->f
->reset_timer_if_blocked
&& scsi_device_blocked(sdev
) ?
624 BLK_EH_RESET_TIMER
: BLK_EH_NOT_HANDLED
;
627 static void srp_rport_release(struct device
*dev
)
629 struct srp_rport
*rport
= dev_to_rport(dev
);
631 put_device(dev
->parent
);
635 static int scsi_is_srp_rport(const struct device
*dev
)
637 return dev
->release
== srp_rport_release
;
640 static int srp_rport_match(struct attribute_container
*cont
,
643 struct Scsi_Host
*shost
;
644 struct srp_internal
*i
;
646 if (!scsi_is_srp_rport(dev
))
649 shost
= dev_to_shost(dev
->parent
);
650 if (!shost
->transportt
)
652 if (shost
->transportt
->host_attrs
.ac
.class != &srp_host_class
.class)
655 i
= to_srp_internal(shost
->transportt
);
656 return &i
->rport_attr_cont
.ac
== cont
;
659 static int srp_host_match(struct attribute_container
*cont
, struct device
*dev
)
661 struct Scsi_Host
*shost
;
662 struct srp_internal
*i
;
664 if (!scsi_is_host_device(dev
))
667 shost
= dev_to_shost(dev
);
668 if (!shost
->transportt
)
670 if (shost
->transportt
->host_attrs
.ac
.class != &srp_host_class
.class)
673 i
= to_srp_internal(shost
->transportt
);
674 return &i
->t
.host_attrs
.ac
== cont
;
678 * srp_rport_get() - increment rport reference count
679 * @rport: SRP target port.
681 void srp_rport_get(struct srp_rport
*rport
)
683 get_device(&rport
->dev
);
685 EXPORT_SYMBOL(srp_rport_get
);
688 * srp_rport_put() - decrement rport reference count
689 * @rport: SRP target port.
691 void srp_rport_put(struct srp_rport
*rport
)
693 put_device(&rport
->dev
);
695 EXPORT_SYMBOL(srp_rport_put
);
698 * srp_rport_add - add a SRP remote port to the device hierarchy
699 * @shost: scsi host the remote port is connected to.
700 * @ids: The port id for the remote port.
702 * Publishes a port to the rest of the system.
704 struct srp_rport
*srp_rport_add(struct Scsi_Host
*shost
,
705 struct srp_rport_identifiers
*ids
)
707 struct srp_rport
*rport
;
708 struct device
*parent
= &shost
->shost_gendev
;
709 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
712 rport
= kzalloc(sizeof(*rport
), GFP_KERNEL
);
714 return ERR_PTR(-ENOMEM
);
716 mutex_init(&rport
->mutex
);
718 device_initialize(&rport
->dev
);
720 rport
->dev
.parent
= get_device(parent
);
721 rport
->dev
.release
= srp_rport_release
;
723 memcpy(rport
->port_id
, ids
->port_id
, sizeof(rport
->port_id
));
724 rport
->roles
= ids
->roles
;
727 rport
->reconnect_delay
= i
->f
->reconnect_delay
?
728 *i
->f
->reconnect_delay
: 10;
729 INIT_DELAYED_WORK(&rport
->reconnect_work
, srp_reconnect_work
);
730 rport
->fast_io_fail_tmo
= i
->f
->fast_io_fail_tmo
?
731 *i
->f
->fast_io_fail_tmo
: 15;
732 rport
->dev_loss_tmo
= i
->f
->dev_loss_tmo
? *i
->f
->dev_loss_tmo
: 60;
733 INIT_DELAYED_WORK(&rport
->fast_io_fail_work
,
734 rport_fast_io_fail_timedout
);
735 INIT_DELAYED_WORK(&rport
->dev_loss_work
, rport_dev_loss_timedout
);
737 id
= atomic_inc_return(&to_srp_host_attrs(shost
)->next_port_id
);
738 dev_set_name(&rport
->dev
, "port-%d:%d", shost
->host_no
, id
);
740 transport_setup_device(&rport
->dev
);
742 ret
= device_add(&rport
->dev
);
744 transport_destroy_device(&rport
->dev
);
745 put_device(&rport
->dev
);
749 if (shost
->active_mode
& MODE_TARGET
&&
750 ids
->roles
== SRP_RPORT_ROLE_INITIATOR
) {
751 ret
= srp_tgt_it_nexus_create(shost
, (unsigned long)rport
,
754 device_del(&rport
->dev
);
755 transport_destroy_device(&rport
->dev
);
756 put_device(&rport
->dev
);
761 transport_add_device(&rport
->dev
);
762 transport_configure_device(&rport
->dev
);
766 EXPORT_SYMBOL_GPL(srp_rport_add
);
769 * srp_rport_del - remove a SRP remote port
770 * @rport: SRP remote port to remove
772 * Removes the specified SRP remote port.
774 void srp_rport_del(struct srp_rport
*rport
)
776 struct device
*dev
= &rport
->dev
;
777 struct Scsi_Host
*shost
= dev_to_shost(dev
->parent
);
779 if (shost
->active_mode
& MODE_TARGET
&&
780 rport
->roles
== SRP_RPORT_ROLE_INITIATOR
)
781 srp_tgt_it_nexus_destroy(shost
, (unsigned long)rport
);
783 transport_remove_device(dev
);
785 transport_destroy_device(dev
);
789 EXPORT_SYMBOL_GPL(srp_rport_del
);
791 static int do_srp_rport_del(struct device
*dev
, void *data
)
793 if (scsi_is_srp_rport(dev
))
794 srp_rport_del(dev_to_rport(dev
));
799 * srp_remove_host - tear down a Scsi_Host's SRP data structures
800 * @shost: Scsi Host that is torn down
802 * Removes all SRP remote ports for a given Scsi_Host.
803 * Must be called just before scsi_remove_host for SRP HBAs.
805 void srp_remove_host(struct Scsi_Host
*shost
)
807 device_for_each_child(&shost
->shost_gendev
, NULL
, do_srp_rport_del
);
809 EXPORT_SYMBOL_GPL(srp_remove_host
);
812 * srp_stop_rport_timers - stop the transport layer recovery timers
814 * Must be called after srp_remove_host() and scsi_remove_host(). The caller
815 * must hold a reference on the rport (rport->dev) and on the SCSI host
816 * (rport->dev.parent).
818 void srp_stop_rport_timers(struct srp_rport
*rport
)
820 mutex_lock(&rport
->mutex
);
821 if (rport
->state
== SRP_RPORT_BLOCKED
)
822 __rport_fail_io_fast(rport
);
823 srp_rport_set_state(rport
, SRP_RPORT_LOST
);
824 mutex_unlock(&rport
->mutex
);
826 cancel_delayed_work_sync(&rport
->reconnect_work
);
827 cancel_delayed_work_sync(&rport
->fast_io_fail_work
);
828 cancel_delayed_work_sync(&rport
->dev_loss_work
);
830 EXPORT_SYMBOL_GPL(srp_stop_rport_timers
);
832 static int srp_tsk_mgmt_response(struct Scsi_Host
*shost
, u64 nexus
, u64 tm_id
,
835 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
836 return i
->f
->tsk_mgmt_response(shost
, nexus
, tm_id
, result
);
839 static int srp_it_nexus_response(struct Scsi_Host
*shost
, u64 nexus
, int result
)
841 struct srp_internal
*i
= to_srp_internal(shost
->transportt
);
842 return i
->f
->it_nexus_response(shost
, nexus
, result
);
846 * srp_attach_transport - instantiate SRP transport template
847 * @ft: SRP transport class function template
849 struct scsi_transport_template
*
850 srp_attach_transport(struct srp_function_template
*ft
)
853 struct srp_internal
*i
;
855 i
= kzalloc(sizeof(*i
), GFP_KERNEL
);
859 i
->t
.eh_timed_out
= srp_timed_out
;
861 i
->t
.tsk_mgmt_response
= srp_tsk_mgmt_response
;
862 i
->t
.it_nexus_response
= srp_it_nexus_response
;
864 i
->t
.host_size
= sizeof(struct srp_host_attrs
);
865 i
->t
.host_attrs
.ac
.attrs
= &i
->host_attrs
[0];
866 i
->t
.host_attrs
.ac
.class = &srp_host_class
.class;
867 i
->t
.host_attrs
.ac
.match
= srp_host_match
;
868 i
->host_attrs
[0] = NULL
;
869 transport_container_register(&i
->t
.host_attrs
);
871 i
->rport_attr_cont
.ac
.attrs
= &i
->rport_attrs
[0];
872 i
->rport_attr_cont
.ac
.class = &srp_rport_class
.class;
873 i
->rport_attr_cont
.ac
.match
= srp_rport_match
;
876 i
->rport_attrs
[count
++] = &dev_attr_port_id
;
877 i
->rport_attrs
[count
++] = &dev_attr_roles
;
878 if (ft
->has_rport_state
) {
879 i
->rport_attrs
[count
++] = &dev_attr_state
;
880 i
->rport_attrs
[count
++] = &dev_attr_fast_io_fail_tmo
;
881 i
->rport_attrs
[count
++] = &dev_attr_dev_loss_tmo
;
884 i
->rport_attrs
[count
++] = &dev_attr_reconnect_delay
;
885 i
->rport_attrs
[count
++] = &dev_attr_failed_reconnects
;
887 if (ft
->rport_delete
)
888 i
->rport_attrs
[count
++] = &dev_attr_delete
;
889 i
->rport_attrs
[count
++] = NULL
;
890 BUG_ON(count
> ARRAY_SIZE(i
->rport_attrs
));
892 transport_container_register(&i
->rport_attr_cont
);
898 EXPORT_SYMBOL_GPL(srp_attach_transport
);
901 * srp_release_transport - release SRP transport template instance
902 * @t: transport template instance
904 void srp_release_transport(struct scsi_transport_template
*t
)
906 struct srp_internal
*i
= to_srp_internal(t
);
908 transport_container_unregister(&i
->t
.host_attrs
);
909 transport_container_unregister(&i
->rport_attr_cont
);
913 EXPORT_SYMBOL_GPL(srp_release_transport
);
915 static __init
int srp_transport_init(void)
919 ret
= transport_class_register(&srp_host_class
);
922 ret
= transport_class_register(&srp_rport_class
);
924 goto unregister_host_class
;
927 unregister_host_class
:
928 transport_class_unregister(&srp_host_class
);
932 static void __exit
srp_transport_exit(void)
934 transport_class_unregister(&srp_host_class
);
935 transport_class_unregister(&srp_rport_class
);
938 MODULE_AUTHOR("FUJITA Tomonori");
939 MODULE_DESCRIPTION("SRP Transport Attributes");
940 MODULE_LICENSE("GPL");
942 module_init(srp_transport_init
);
943 module_exit(srp_transport_exit
);