1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SBP2 target driver (SCSI over IEEE1394 in target mode)
5 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
8 #define KMSG_COMPONENT "sbp_target"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/configfs.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
19 #include <linux/firewire.h>
20 #include <linux/firewire-constants.h>
21 #include <scsi/scsi_proto.h>
22 #include <scsi/scsi_tcq.h>
23 #include <target/target_core_base.h>
24 #include <target/target_core_backend.h>
25 #include <target/target_core_fabric.h>
26 #include <asm/unaligned.h>
28 #include "sbp_target.h"
30 /* FireWire address region for management and command block address handlers */
31 static const struct fw_address_region sbp_register_region
= {
32 .start
= CSR_REGISTER_BASE
+ 0x10000,
33 .end
= 0x1000000000000ULL
,
36 static const u32 sbp_unit_directory_template
[] = {
37 0x1200609e, /* unit_specifier_id: NCITS/T10 */
38 0x13010483, /* unit_sw_version: 1155D Rev 4 */
39 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
40 0x390104d8, /* command_set: SPC-2 */
41 0x3b000000, /* command_set_revision: 0 */
42 0x3c000001, /* firmware_revision: 1 */
45 #define SESSION_MAINTENANCE_INTERVAL HZ
47 static atomic_t login_id
= ATOMIC_INIT(0);
49 static void session_maintenance_work(struct work_struct
*);
50 static int sbp_run_transaction(struct fw_card
*, int, int, int, int,
51 unsigned long long, void *, size_t);
53 static int read_peer_guid(u64
*guid
, const struct sbp_management_request
*req
)
58 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
59 req
->node_addr
, req
->generation
, req
->speed
,
60 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 3 * 4,
62 if (ret
!= RCODE_COMPLETE
)
65 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
66 req
->node_addr
, req
->generation
, req
->speed
,
67 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 4 * 4,
69 if (ret
!= RCODE_COMPLETE
)
72 *guid
= (u64
)be32_to_cpu(high
) << 32 | be32_to_cpu(low
);
74 return RCODE_COMPLETE
;
77 static struct sbp_session
*sbp_session_find_by_guid(
78 struct sbp_tpg
*tpg
, u64 guid
)
80 struct se_session
*se_sess
;
81 struct sbp_session
*sess
, *found
= NULL
;
83 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
84 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
85 sess
= se_sess
->fabric_sess_ptr
;
86 if (sess
->guid
== guid
)
89 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
94 static struct sbp_login_descriptor
*sbp_login_find_by_lun(
95 struct sbp_session
*session
, u32 unpacked_lun
)
97 struct sbp_login_descriptor
*login
, *found
= NULL
;
99 spin_lock_bh(&session
->lock
);
100 list_for_each_entry(login
, &session
->login_list
, link
) {
101 if (login
->login_lun
== unpacked_lun
)
104 spin_unlock_bh(&session
->lock
);
109 static int sbp_login_count_all_by_lun(
114 struct se_session
*se_sess
;
115 struct sbp_session
*sess
;
116 struct sbp_login_descriptor
*login
;
119 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
120 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
121 sess
= se_sess
->fabric_sess_ptr
;
123 spin_lock_bh(&sess
->lock
);
124 list_for_each_entry(login
, &sess
->login_list
, link
) {
125 if (login
->login_lun
!= unpacked_lun
)
128 if (!exclusive
|| login
->exclusive
)
131 spin_unlock_bh(&sess
->lock
);
133 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
138 static struct sbp_login_descriptor
*sbp_login_find_by_id(
139 struct sbp_tpg
*tpg
, int login_id
)
141 struct se_session
*se_sess
;
142 struct sbp_session
*sess
;
143 struct sbp_login_descriptor
*login
, *found
= NULL
;
145 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
146 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
147 sess
= se_sess
->fabric_sess_ptr
;
149 spin_lock_bh(&sess
->lock
);
150 list_for_each_entry(login
, &sess
->login_list
, link
) {
151 if (login
->login_id
== login_id
)
154 spin_unlock_bh(&sess
->lock
);
156 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
161 static u32
sbp_get_lun_from_tpg(struct sbp_tpg
*tpg
, u32 login_lun
, int *err
)
163 struct se_portal_group
*se_tpg
= &tpg
->se_tpg
;
164 struct se_lun
*se_lun
;
167 hlist_for_each_entry_rcu(se_lun
, &se_tpg
->tpg_lun_hlist
, link
) {
168 if (se_lun
->unpacked_lun
== login_lun
) {
180 static struct sbp_session
*sbp_session_create(
184 struct sbp_session
*sess
;
188 snprintf(guid_str
, sizeof(guid_str
), "%016llx", guid
);
190 sess
= kmalloc(sizeof(*sess
), GFP_KERNEL
);
192 return ERR_PTR(-ENOMEM
);
194 spin_lock_init(&sess
->lock
);
195 INIT_LIST_HEAD(&sess
->login_list
);
196 INIT_DELAYED_WORK(&sess
->maint_work
, session_maintenance_work
);
199 sess
->se_sess
= target_setup_session(&tpg
->se_tpg
, 128,
200 sizeof(struct sbp_target_request
),
201 TARGET_PROT_NORMAL
, guid_str
,
203 if (IS_ERR(sess
->se_sess
)) {
204 pr_err("failed to init se_session\n");
205 ret
= PTR_ERR(sess
->se_sess
);
213 static void sbp_session_release(struct sbp_session
*sess
, bool cancel_work
)
215 spin_lock_bh(&sess
->lock
);
216 if (!list_empty(&sess
->login_list
)) {
217 spin_unlock_bh(&sess
->lock
);
220 spin_unlock_bh(&sess
->lock
);
223 cancel_delayed_work_sync(&sess
->maint_work
);
225 target_remove_session(sess
->se_sess
);
228 fw_card_put(sess
->card
);
233 static void sbp_target_agent_unregister(struct sbp_target_agent
*);
235 static void sbp_login_release(struct sbp_login_descriptor
*login
,
238 struct sbp_session
*sess
= login
->sess
;
240 /* FIXME: abort/wait on tasks */
242 sbp_target_agent_unregister(login
->tgt_agt
);
245 spin_lock_bh(&sess
->lock
);
246 list_del(&login
->link
);
247 spin_unlock_bh(&sess
->lock
);
249 sbp_session_release(sess
, cancel_work
);
255 static struct sbp_target_agent
*sbp_target_agent_register(
256 struct sbp_login_descriptor
*);
258 static void sbp_management_request_login(
259 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
260 int *status_data_size
)
262 struct sbp_tport
*tport
= agent
->tport
;
263 struct sbp_tpg
*tpg
= tport
->tpg
;
264 struct sbp_session
*sess
;
265 struct sbp_login_descriptor
*login
;
266 struct sbp_login_response_block
*response
;
269 int login_response_len
, ret
;
271 unpacked_lun
= sbp_get_lun_from_tpg(tpg
,
272 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)), &ret
);
274 pr_notice("login to unknown LUN: %d\n",
275 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)));
277 req
->status
.status
= cpu_to_be32(
278 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
279 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP
));
283 ret
= read_peer_guid(&guid
, req
);
284 if (ret
!= RCODE_COMPLETE
) {
285 pr_warn("failed to read peer GUID: %d\n", ret
);
287 req
->status
.status
= cpu_to_be32(
288 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
289 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
293 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
296 sess
= sbp_session_find_by_guid(tpg
, guid
);
298 login
= sbp_login_find_by_lun(sess
, unpacked_lun
);
300 pr_notice("initiator already logged-in\n");
303 * SBP-2 R4 says we should return access denied, but
304 * that can confuse initiators. Instead we need to
305 * treat this like a reconnect, but send the login
306 * response block like a fresh login.
308 * This is required particularly in the case of Apple
309 * devices booting off the FireWire target, where
310 * the firmware has an active login to the target. When
311 * the OS takes control of the session it issues its own
312 * LOGIN rather than a RECONNECT. To avoid the machine
313 * waiting until the reconnect_hold expires, we can skip
314 * the ACCESS_DENIED errors to speed things up.
317 goto already_logged_in
;
322 * check exclusive bit in login request
323 * reject with access_denied if any logins present
325 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
)) &&
326 sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 0)) {
327 pr_warn("refusing exclusive login with other active logins\n");
329 req
->status
.status
= cpu_to_be32(
330 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
331 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
336 * check exclusive bit in any existing login descriptor
337 * reject with access_denied if any exclusive logins present
339 if (sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 1)) {
340 pr_warn("refusing login while another exclusive login present\n");
342 req
->status
.status
= cpu_to_be32(
343 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
344 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
349 * check we haven't exceeded the number of allowed logins
350 * reject with resources_unavailable if we have
352 if (sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 0) >=
353 tport
->max_logins_per_lun
) {
354 pr_warn("max number of logins reached\n");
356 req
->status
.status
= cpu_to_be32(
357 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
358 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
363 sess
= sbp_session_create(tpg
, guid
);
365 switch (PTR_ERR(sess
)) {
367 ret
= SBP_STATUS_ACCESS_DENIED
;
370 ret
= SBP_STATUS_RESOURCES_UNAVAIL
;
374 req
->status
.status
= cpu_to_be32(
376 STATUS_RESP_REQUEST_COMPLETE
) |
377 STATUS_BLOCK_SBP_STATUS(ret
));
381 sess
->node_id
= req
->node_addr
;
382 sess
->card
= fw_card_get(req
->card
);
383 sess
->generation
= req
->generation
;
384 sess
->speed
= req
->speed
;
386 schedule_delayed_work(&sess
->maint_work
,
387 SESSION_MAINTENANCE_INTERVAL
);
390 /* only take the latest reconnect_hold into account */
391 sess
->reconnect_hold
= min(
392 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req
->orb
.misc
)),
393 tport
->max_reconnect_timeout
) - 1;
395 login
= kmalloc(sizeof(*login
), GFP_KERNEL
);
397 pr_err("failed to allocate login descriptor\n");
399 sbp_session_release(sess
, true);
401 req
->status
.status
= cpu_to_be32(
402 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
403 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
408 login
->login_lun
= unpacked_lun
;
409 login
->status_fifo_addr
= sbp2_pointer_to_addr(&req
->orb
.status_fifo
);
410 login
->exclusive
= LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
));
411 login
->login_id
= atomic_inc_return(&login_id
);
413 login
->tgt_agt
= sbp_target_agent_register(login
);
414 if (IS_ERR(login
->tgt_agt
)) {
415 ret
= PTR_ERR(login
->tgt_agt
);
416 pr_err("failed to map command block handler: %d\n", ret
);
418 sbp_session_release(sess
, true);
421 req
->status
.status
= cpu_to_be32(
422 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
423 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
427 spin_lock_bh(&sess
->lock
);
428 list_add_tail(&login
->link
, &sess
->login_list
);
429 spin_unlock_bh(&sess
->lock
);
432 response
= kzalloc(sizeof(*response
), GFP_KERNEL
);
434 pr_err("failed to allocate login response block\n");
436 sbp_login_release(login
, true);
438 req
->status
.status
= cpu_to_be32(
439 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
440 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
444 login_response_len
= clamp_val(
445 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req
->orb
.length
)),
446 12, sizeof(*response
));
447 response
->misc
= cpu_to_be32(
448 ((login_response_len
& 0xffff) << 16) |
449 (login
->login_id
& 0xffff));
450 response
->reconnect_hold
= cpu_to_be32(sess
->reconnect_hold
& 0xffff);
451 addr_to_sbp2_pointer(login
->tgt_agt
->handler
.offset
,
452 &response
->command_block_agent
);
454 ret
= sbp_run_transaction(sess
->card
, TCODE_WRITE_BLOCK_REQUEST
,
455 sess
->node_id
, sess
->generation
, sess
->speed
,
456 sbp2_pointer_to_addr(&req
->orb
.ptr2
), response
,
458 if (ret
!= RCODE_COMPLETE
) {
459 pr_debug("failed to write login response block: %x\n", ret
);
462 sbp_login_release(login
, true);
464 req
->status
.status
= cpu_to_be32(
465 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
466 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
472 req
->status
.status
= cpu_to_be32(
473 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
474 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
477 static void sbp_management_request_query_logins(
478 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
479 int *status_data_size
)
481 pr_notice("QUERY LOGINS not implemented\n");
482 /* FIXME: implement */
484 req
->status
.status
= cpu_to_be32(
485 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
486 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
489 static void sbp_management_request_reconnect(
490 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
491 int *status_data_size
)
493 struct sbp_tport
*tport
= agent
->tport
;
494 struct sbp_tpg
*tpg
= tport
->tpg
;
497 struct sbp_login_descriptor
*login
;
499 ret
= read_peer_guid(&guid
, req
);
500 if (ret
!= RCODE_COMPLETE
) {
501 pr_warn("failed to read peer GUID: %d\n", ret
);
503 req
->status
.status
= cpu_to_be32(
504 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
505 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
509 pr_notice("mgt_agent RECONNECT from %016llx\n", guid
);
511 login
= sbp_login_find_by_id(tpg
,
512 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
)));
515 pr_err("mgt_agent RECONNECT unknown login ID\n");
517 req
->status
.status
= cpu_to_be32(
518 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
519 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
523 if (login
->sess
->guid
!= guid
) {
524 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
526 req
->status
.status
= cpu_to_be32(
527 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
528 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
532 spin_lock_bh(&login
->sess
->lock
);
533 if (login
->sess
->card
)
534 fw_card_put(login
->sess
->card
);
536 /* update the node details */
537 login
->sess
->generation
= req
->generation
;
538 login
->sess
->node_id
= req
->node_addr
;
539 login
->sess
->card
= fw_card_get(req
->card
);
540 login
->sess
->speed
= req
->speed
;
541 spin_unlock_bh(&login
->sess
->lock
);
543 req
->status
.status
= cpu_to_be32(
544 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
545 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
548 static void sbp_management_request_logout(
549 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
550 int *status_data_size
)
552 struct sbp_tport
*tport
= agent
->tport
;
553 struct sbp_tpg
*tpg
= tport
->tpg
;
555 struct sbp_login_descriptor
*login
;
557 id
= LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
));
559 login
= sbp_login_find_by_id(tpg
, id
);
561 pr_warn("cannot find login: %d\n", id
);
563 req
->status
.status
= cpu_to_be32(
564 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
565 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN
));
569 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
570 login
->login_lun
, login
->login_id
);
572 if (req
->node_addr
!= login
->sess
->node_id
) {
573 pr_warn("logout from different node ID\n");
575 req
->status
.status
= cpu_to_be32(
576 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
577 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
581 sbp_login_release(login
, true);
583 req
->status
.status
= cpu_to_be32(
584 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
585 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
588 static void session_check_for_reset(struct sbp_session
*sess
)
590 bool card_valid
= false;
592 spin_lock_bh(&sess
->lock
);
595 spin_lock_irq(&sess
->card
->lock
);
596 card_valid
= (sess
->card
->local_node
!= NULL
);
597 spin_unlock_irq(&sess
->card
->lock
);
600 fw_card_put(sess
->card
);
605 if (!card_valid
|| (sess
->generation
!= sess
->card
->generation
)) {
606 pr_info("Waiting for reconnect from node: %016llx\n",
610 sess
->reconnect_expires
= get_jiffies_64() +
611 ((sess
->reconnect_hold
+ 1) * HZ
);
614 spin_unlock_bh(&sess
->lock
);
617 static void session_reconnect_expired(struct sbp_session
*sess
)
619 struct sbp_login_descriptor
*login
, *temp
;
620 LIST_HEAD(login_list
);
622 pr_info("Reconnect timer expired for node: %016llx\n", sess
->guid
);
624 spin_lock_bh(&sess
->lock
);
625 list_for_each_entry_safe(login
, temp
, &sess
->login_list
, link
) {
627 list_move_tail(&login
->link
, &login_list
);
629 spin_unlock_bh(&sess
->lock
);
631 list_for_each_entry_safe(login
, temp
, &login_list
, link
) {
632 list_del(&login
->link
);
633 sbp_login_release(login
, false);
636 sbp_session_release(sess
, false);
639 static void session_maintenance_work(struct work_struct
*work
)
641 struct sbp_session
*sess
= container_of(work
, struct sbp_session
,
644 /* could be called while tearing down the session */
645 spin_lock_bh(&sess
->lock
);
646 if (list_empty(&sess
->login_list
)) {
647 spin_unlock_bh(&sess
->lock
);
650 spin_unlock_bh(&sess
->lock
);
652 if (sess
->node_id
!= -1) {
653 /* check for bus reset and make node_id invalid */
654 session_check_for_reset(sess
);
656 schedule_delayed_work(&sess
->maint_work
,
657 SESSION_MAINTENANCE_INTERVAL
);
658 } else if (!time_after64(get_jiffies_64(), sess
->reconnect_expires
)) {
659 /* still waiting for reconnect */
660 schedule_delayed_work(&sess
->maint_work
,
661 SESSION_MAINTENANCE_INTERVAL
);
663 /* reconnect timeout has expired */
664 session_reconnect_expired(sess
);
668 static int tgt_agent_rw_agent_state(struct fw_card
*card
, int tcode
, void *data
,
669 struct sbp_target_agent
*agent
)
674 case TCODE_READ_QUADLET_REQUEST
:
675 pr_debug("tgt_agent AGENT_STATE READ\n");
677 spin_lock_bh(&agent
->lock
);
678 state
= agent
->state
;
679 spin_unlock_bh(&agent
->lock
);
681 *(__be32
*)data
= cpu_to_be32(state
);
683 return RCODE_COMPLETE
;
685 case TCODE_WRITE_QUADLET_REQUEST
:
687 return RCODE_COMPLETE
;
690 return RCODE_TYPE_ERROR
;
694 static int tgt_agent_rw_agent_reset(struct fw_card
*card
, int tcode
, void *data
,
695 struct sbp_target_agent
*agent
)
698 case TCODE_WRITE_QUADLET_REQUEST
:
699 pr_debug("tgt_agent AGENT_RESET\n");
700 spin_lock_bh(&agent
->lock
);
701 agent
->state
= AGENT_STATE_RESET
;
702 spin_unlock_bh(&agent
->lock
);
703 return RCODE_COMPLETE
;
706 return RCODE_TYPE_ERROR
;
710 static int tgt_agent_rw_orb_pointer(struct fw_card
*card
, int tcode
, void *data
,
711 struct sbp_target_agent
*agent
)
713 struct sbp2_pointer
*ptr
= data
;
716 case TCODE_WRITE_BLOCK_REQUEST
:
717 spin_lock_bh(&agent
->lock
);
718 if (agent
->state
!= AGENT_STATE_SUSPENDED
&&
719 agent
->state
!= AGENT_STATE_RESET
) {
720 spin_unlock_bh(&agent
->lock
);
721 pr_notice("Ignoring ORB_POINTER write while active.\n");
722 return RCODE_CONFLICT_ERROR
;
724 agent
->state
= AGENT_STATE_ACTIVE
;
725 spin_unlock_bh(&agent
->lock
);
727 agent
->orb_pointer
= sbp2_pointer_to_addr(ptr
);
728 agent
->doorbell
= false;
730 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
733 queue_work(system_unbound_wq
, &agent
->work
);
735 return RCODE_COMPLETE
;
737 case TCODE_READ_BLOCK_REQUEST
:
738 pr_debug("tgt_agent ORB_POINTER READ\n");
739 spin_lock_bh(&agent
->lock
);
740 addr_to_sbp2_pointer(agent
->orb_pointer
, ptr
);
741 spin_unlock_bh(&agent
->lock
);
742 return RCODE_COMPLETE
;
745 return RCODE_TYPE_ERROR
;
749 static int tgt_agent_rw_doorbell(struct fw_card
*card
, int tcode
, void *data
,
750 struct sbp_target_agent
*agent
)
753 case TCODE_WRITE_QUADLET_REQUEST
:
754 spin_lock_bh(&agent
->lock
);
755 if (agent
->state
!= AGENT_STATE_SUSPENDED
) {
756 spin_unlock_bh(&agent
->lock
);
757 pr_debug("Ignoring DOORBELL while active.\n");
758 return RCODE_CONFLICT_ERROR
;
760 agent
->state
= AGENT_STATE_ACTIVE
;
761 spin_unlock_bh(&agent
->lock
);
763 agent
->doorbell
= true;
765 pr_debug("tgt_agent DOORBELL\n");
767 queue_work(system_unbound_wq
, &agent
->work
);
769 return RCODE_COMPLETE
;
771 case TCODE_READ_QUADLET_REQUEST
:
772 return RCODE_COMPLETE
;
775 return RCODE_TYPE_ERROR
;
779 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card
*card
,
780 int tcode
, void *data
, struct sbp_target_agent
*agent
)
783 case TCODE_WRITE_QUADLET_REQUEST
:
784 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
785 /* ignored as we don't send unsolicited status */
786 return RCODE_COMPLETE
;
788 case TCODE_READ_QUADLET_REQUEST
:
789 return RCODE_COMPLETE
;
792 return RCODE_TYPE_ERROR
;
796 static void tgt_agent_rw(struct fw_card
*card
, struct fw_request
*request
,
797 int tcode
, int destination
, int source
, int generation
,
798 unsigned long long offset
, void *data
, size_t length
,
801 struct sbp_target_agent
*agent
= callback_data
;
802 struct sbp_session
*sess
= agent
->login
->sess
;
803 int sess_gen
, sess_node
, rcode
;
805 spin_lock_bh(&sess
->lock
);
806 sess_gen
= sess
->generation
;
807 sess_node
= sess
->node_id
;
808 spin_unlock_bh(&sess
->lock
);
810 if (generation
!= sess_gen
) {
811 pr_notice("ignoring request with wrong generation\n");
812 rcode
= RCODE_TYPE_ERROR
;
816 if (source
!= sess_node
) {
817 pr_notice("ignoring request from foreign node (%x != %x)\n",
819 rcode
= RCODE_TYPE_ERROR
;
823 /* turn offset into the offset from the start of the block */
824 offset
-= agent
->handler
.offset
;
826 if (offset
== 0x00 && length
== 4) {
828 rcode
= tgt_agent_rw_agent_state(card
, tcode
, data
, agent
);
829 } else if (offset
== 0x04 && length
== 4) {
831 rcode
= tgt_agent_rw_agent_reset(card
, tcode
, data
, agent
);
832 } else if (offset
== 0x08 && length
== 8) {
834 rcode
= tgt_agent_rw_orb_pointer(card
, tcode
, data
, agent
);
835 } else if (offset
== 0x10 && length
== 4) {
837 rcode
= tgt_agent_rw_doorbell(card
, tcode
, data
, agent
);
838 } else if (offset
== 0x14 && length
== 4) {
839 /* UNSOLICITED_STATUS_ENABLE */
840 rcode
= tgt_agent_rw_unsolicited_status_enable(card
, tcode
,
843 rcode
= RCODE_ADDRESS_ERROR
;
847 fw_send_response(card
, request
, rcode
);
850 static void sbp_handle_command(struct sbp_target_request
*);
851 static int sbp_send_status(struct sbp_target_request
*);
852 static void sbp_free_request(struct sbp_target_request
*);
854 static void tgt_agent_process_work(struct work_struct
*work
)
856 struct sbp_target_request
*req
=
857 container_of(work
, struct sbp_target_request
, work
);
859 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
861 sbp2_pointer_to_addr(&req
->orb
.next_orb
),
862 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
863 be32_to_cpu(req
->orb
.misc
));
865 if (req
->orb_pointer
>> 32)
866 pr_debug("ORB with high bits set\n");
868 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
))) {
869 case 0:/* Format specified by this standard */
870 sbp_handle_command(req
);
872 case 1: /* Reserved for future standardization */
873 case 2: /* Vendor-dependent */
874 req
->status
.status
|= cpu_to_be32(
876 STATUS_RESP_REQUEST_COMPLETE
) |
877 STATUS_BLOCK_DEAD(0) |
878 STATUS_BLOCK_LEN(1) |
879 STATUS_BLOCK_SBP_STATUS(
880 SBP_STATUS_REQ_TYPE_NOTSUPP
));
881 sbp_send_status(req
);
883 case 3: /* Dummy ORB */
884 req
->status
.status
|= cpu_to_be32(
886 STATUS_RESP_REQUEST_COMPLETE
) |
887 STATUS_BLOCK_DEAD(0) |
888 STATUS_BLOCK_LEN(1) |
889 STATUS_BLOCK_SBP_STATUS(
890 SBP_STATUS_DUMMY_ORB_COMPLETE
));
891 sbp_send_status(req
);
898 /* used to double-check we haven't been issued an AGENT_RESET */
899 static inline bool tgt_agent_check_active(struct sbp_target_agent
*agent
)
903 spin_lock_bh(&agent
->lock
);
904 active
= (agent
->state
== AGENT_STATE_ACTIVE
);
905 spin_unlock_bh(&agent
->lock
);
910 static struct sbp_target_request
*sbp_mgt_get_req(struct sbp_session
*sess
,
911 struct fw_card
*card
, u64 next_orb
)
913 struct se_session
*se_sess
= sess
->se_sess
;
914 struct sbp_target_request
*req
;
917 tag
= sbitmap_queue_get(&se_sess
->sess_tag_pool
, &cpu
);
919 return ERR_PTR(-ENOMEM
);
921 req
= &((struct sbp_target_request
*)se_sess
->sess_cmd_map
)[tag
];
922 memset(req
, 0, sizeof(*req
));
923 req
->se_cmd
.map_tag
= tag
;
924 req
->se_cmd
.map_cpu
= cpu
;
925 req
->se_cmd
.tag
= next_orb
;
930 static void tgt_agent_fetch_work(struct work_struct
*work
)
932 struct sbp_target_agent
*agent
=
933 container_of(work
, struct sbp_target_agent
, work
);
934 struct sbp_session
*sess
= agent
->login
->sess
;
935 struct sbp_target_request
*req
;
937 bool doorbell
= agent
->doorbell
;
938 u64 next_orb
= agent
->orb_pointer
;
940 while (next_orb
&& tgt_agent_check_active(agent
)) {
941 req
= sbp_mgt_get_req(sess
, sess
->card
, next_orb
);
943 spin_lock_bh(&agent
->lock
);
944 agent
->state
= AGENT_STATE_DEAD
;
945 spin_unlock_bh(&agent
->lock
);
949 req
->login
= agent
->login
;
950 req
->orb_pointer
= next_orb
;
952 req
->status
.status
= cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
953 req
->orb_pointer
>> 32));
954 req
->status
.orb_low
= cpu_to_be32(
955 req
->orb_pointer
& 0xfffffffc);
957 /* read in the ORB */
958 ret
= sbp_run_transaction(sess
->card
, TCODE_READ_BLOCK_REQUEST
,
959 sess
->node_id
, sess
->generation
, sess
->speed
,
960 req
->orb_pointer
, &req
->orb
, sizeof(req
->orb
));
961 if (ret
!= RCODE_COMPLETE
) {
962 pr_debug("tgt_orb fetch failed: %x\n", ret
);
963 req
->status
.status
|= cpu_to_be32(
965 STATUS_SRC_ORB_FINISHED
) |
967 STATUS_RESP_TRANSPORT_FAILURE
) |
968 STATUS_BLOCK_DEAD(1) |
969 STATUS_BLOCK_LEN(1) |
970 STATUS_BLOCK_SBP_STATUS(
971 SBP_STATUS_UNSPECIFIED_ERROR
));
972 spin_lock_bh(&agent
->lock
);
973 agent
->state
= AGENT_STATE_DEAD
;
974 spin_unlock_bh(&agent
->lock
);
976 sbp_send_status(req
);
980 /* check the next_ORB field */
981 if (be32_to_cpu(req
->orb
.next_orb
.high
) & 0x80000000) {
983 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
984 STATUS_SRC_ORB_FINISHED
));
986 next_orb
= sbp2_pointer_to_addr(&req
->orb
.next_orb
);
987 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
988 STATUS_SRC_ORB_CONTINUING
));
991 if (tgt_agent_check_active(agent
) && !doorbell
) {
992 INIT_WORK(&req
->work
, tgt_agent_process_work
);
993 queue_work(system_unbound_wq
, &req
->work
);
995 /* don't process this request, just check next_ORB */
996 sbp_free_request(req
);
999 spin_lock_bh(&agent
->lock
);
1000 doorbell
= agent
->doorbell
= false;
1002 /* check if we should carry on processing */
1004 agent
->orb_pointer
= next_orb
;
1006 agent
->state
= AGENT_STATE_SUSPENDED
;
1008 spin_unlock_bh(&agent
->lock
);
1012 static struct sbp_target_agent
*sbp_target_agent_register(
1013 struct sbp_login_descriptor
*login
)
1015 struct sbp_target_agent
*agent
;
1018 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1020 return ERR_PTR(-ENOMEM
);
1022 spin_lock_init(&agent
->lock
);
1024 agent
->handler
.length
= 0x20;
1025 agent
->handler
.address_callback
= tgt_agent_rw
;
1026 agent
->handler
.callback_data
= agent
;
1028 agent
->login
= login
;
1029 agent
->state
= AGENT_STATE_RESET
;
1030 INIT_WORK(&agent
->work
, tgt_agent_fetch_work
);
1031 agent
->orb_pointer
= 0;
1032 agent
->doorbell
= false;
1034 ret
= fw_core_add_address_handler(&agent
->handler
,
1035 &sbp_register_region
);
1038 return ERR_PTR(ret
);
1044 static void sbp_target_agent_unregister(struct sbp_target_agent
*agent
)
1046 fw_core_remove_address_handler(&agent
->handler
);
1047 cancel_work_sync(&agent
->work
);
1052 * Simple wrapper around fw_run_transaction that retries the transaction several
1053 * times in case of failure, with an exponential backoff.
1055 static int sbp_run_transaction(struct fw_card
*card
, int tcode
, int destination_id
,
1056 int generation
, int speed
, unsigned long long offset
,
1057 void *payload
, size_t length
)
1059 int attempt
, ret
, delay
;
1061 for (attempt
= 1; attempt
<= 5; attempt
++) {
1062 ret
= fw_run_transaction(card
, tcode
, destination_id
,
1063 generation
, speed
, offset
, payload
, length
);
1066 case RCODE_COMPLETE
:
1067 case RCODE_TYPE_ERROR
:
1068 case RCODE_ADDRESS_ERROR
:
1069 case RCODE_GENERATION
:
1073 delay
= 5 * attempt
* attempt
;
1074 usleep_range(delay
, delay
* 2);
1082 * Wrapper around sbp_run_transaction that gets the card, destination,
1083 * generation and speed out of the request's session.
1085 static int sbp_run_request_transaction(struct sbp_target_request
*req
,
1086 int tcode
, unsigned long long offset
, void *payload
,
1089 struct sbp_login_descriptor
*login
= req
->login
;
1090 struct sbp_session
*sess
= login
->sess
;
1091 struct fw_card
*card
;
1092 int node_id
, generation
, speed
, ret
;
1094 spin_lock_bh(&sess
->lock
);
1095 card
= fw_card_get(sess
->card
);
1096 node_id
= sess
->node_id
;
1097 generation
= sess
->generation
;
1098 speed
= sess
->speed
;
1099 spin_unlock_bh(&sess
->lock
);
1101 ret
= sbp_run_transaction(card
, tcode
, node_id
, generation
, speed
,
1102 offset
, payload
, length
);
1109 static int sbp_fetch_command(struct sbp_target_request
*req
)
1111 int ret
, cmd_len
, copy_len
;
1113 cmd_len
= scsi_command_size(req
->orb
.command_block
);
1115 req
->cmd_buf
= kmalloc(cmd_len
, GFP_KERNEL
);
1119 memcpy(req
->cmd_buf
, req
->orb
.command_block
,
1120 min_t(int, cmd_len
, sizeof(req
->orb
.command_block
)));
1122 if (cmd_len
> sizeof(req
->orb
.command_block
)) {
1123 pr_debug("sbp_fetch_command: filling in long command\n");
1124 copy_len
= cmd_len
- sizeof(req
->orb
.command_block
);
1126 ret
= sbp_run_request_transaction(req
,
1127 TCODE_READ_BLOCK_REQUEST
,
1128 req
->orb_pointer
+ sizeof(req
->orb
),
1129 req
->cmd_buf
+ sizeof(req
->orb
.command_block
),
1131 if (ret
!= RCODE_COMPLETE
)
1138 static int sbp_fetch_page_table(struct sbp_target_request
*req
)
1141 struct sbp_page_table_entry
*pg_tbl
;
1143 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req
->orb
.misc
)))
1146 pg_tbl_sz
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
)) *
1147 sizeof(struct sbp_page_table_entry
);
1149 pg_tbl
= kmalloc(pg_tbl_sz
, GFP_KERNEL
);
1153 ret
= sbp_run_request_transaction(req
, TCODE_READ_BLOCK_REQUEST
,
1154 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
1156 if (ret
!= RCODE_COMPLETE
) {
1161 req
->pg_tbl
= pg_tbl
;
1165 static void sbp_calc_data_length_direction(struct sbp_target_request
*req
,
1166 u32
*data_len
, enum dma_data_direction
*data_dir
)
1168 int data_size
, direction
, idx
;
1170 data_size
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1171 direction
= CMDBLK_ORB_DIRECTION(be32_to_cpu(req
->orb
.misc
));
1175 *data_dir
= DMA_NONE
;
1179 *data_dir
= direction
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1183 for (idx
= 0; idx
< data_size
; idx
++) {
1184 *data_len
+= be16_to_cpu(
1185 req
->pg_tbl
[idx
].segment_length
);
1188 *data_len
= data_size
;
1192 static void sbp_handle_command(struct sbp_target_request
*req
)
1194 struct sbp_login_descriptor
*login
= req
->login
;
1195 struct sbp_session
*sess
= login
->sess
;
1196 int ret
, unpacked_lun
;
1198 enum dma_data_direction data_dir
;
1200 ret
= sbp_fetch_command(req
);
1202 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret
);
1206 ret
= sbp_fetch_page_table(req
);
1208 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1213 unpacked_lun
= req
->login
->login_lun
;
1214 sbp_calc_data_length_direction(req
, &data_length
, &data_dir
);
1216 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1217 req
->orb_pointer
, unpacked_lun
, data_length
, data_dir
);
1219 /* only used for printk until we do TMRs */
1220 req
->se_cmd
.tag
= req
->orb_pointer
;
1221 if (target_submit_cmd(&req
->se_cmd
, sess
->se_sess
, req
->cmd_buf
,
1222 req
->sense_buf
, unpacked_lun
, data_length
,
1223 TCM_SIMPLE_TAG
, data_dir
, TARGET_SCF_ACK_KREF
))
1229 req
->status
.status
|= cpu_to_be32(
1230 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1231 STATUS_BLOCK_DEAD(0) |
1232 STATUS_BLOCK_LEN(1) |
1233 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1234 sbp_send_status(req
);
1238 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1239 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1241 static int sbp_rw_data(struct sbp_target_request
*req
)
1243 struct sbp_session
*sess
= req
->login
->sess
;
1244 int tcode
, sg_miter_flags
, max_payload
, pg_size
, speed
, node_id
,
1245 generation
, num_pte
, length
, tfr_length
,
1246 rcode
= RCODE_COMPLETE
;
1247 struct sbp_page_table_entry
*pte
;
1248 unsigned long long offset
;
1249 struct fw_card
*card
;
1250 struct sg_mapping_iter iter
;
1252 if (req
->se_cmd
.data_direction
== DMA_FROM_DEVICE
) {
1253 tcode
= TCODE_WRITE_BLOCK_REQUEST
;
1254 sg_miter_flags
= SG_MITER_FROM_SG
;
1256 tcode
= TCODE_READ_BLOCK_REQUEST
;
1257 sg_miter_flags
= SG_MITER_TO_SG
;
1260 max_payload
= 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req
->orb
.misc
));
1261 speed
= CMDBLK_ORB_SPEED(be32_to_cpu(req
->orb
.misc
));
1263 pg_size
= CMDBLK_ORB_PG_SIZE(be32_to_cpu(req
->orb
.misc
));
1265 pr_err("sbp_run_transaction: page size ignored\n");
1266 pg_size
= 0x100 << pg_size
;
1269 spin_lock_bh(&sess
->lock
);
1270 card
= fw_card_get(sess
->card
);
1271 node_id
= sess
->node_id
;
1272 generation
= sess
->generation
;
1273 spin_unlock_bh(&sess
->lock
);
1277 num_pte
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1285 offset
= sbp2_pointer_to_addr(&req
->orb
.data_descriptor
);
1286 length
= req
->se_cmd
.data_length
;
1289 sg_miter_start(&iter
, req
->se_cmd
.t_data_sg
, req
->se_cmd
.t_data_nents
,
1292 while (length
|| num_pte
) {
1294 offset
= (u64
)be16_to_cpu(pte
->segment_base_hi
) << 32 |
1295 be32_to_cpu(pte
->segment_base_lo
);
1296 length
= be16_to_cpu(pte
->segment_length
);
1302 sg_miter_next(&iter
);
1304 tfr_length
= min3(length
, max_payload
, (int)iter
.length
);
1306 /* FIXME: take page_size into account */
1308 rcode
= sbp_run_transaction(card
, tcode
, node_id
,
1310 offset
, iter
.addr
, tfr_length
);
1312 if (rcode
!= RCODE_COMPLETE
)
1315 length
-= tfr_length
;
1316 offset
+= tfr_length
;
1317 iter
.consumed
= tfr_length
;
1320 sg_miter_stop(&iter
);
1323 if (rcode
== RCODE_COMPLETE
) {
1324 WARN_ON(length
!= 0);
1331 static int sbp_send_status(struct sbp_target_request
*req
)
1333 int rc
, ret
= 0, length
;
1334 struct sbp_login_descriptor
*login
= req
->login
;
1336 length
= (((be32_to_cpu(req
->status
.status
) >> 24) & 0x07) + 1) * 4;
1338 rc
= sbp_run_request_transaction(req
, TCODE_WRITE_BLOCK_REQUEST
,
1339 login
->status_fifo_addr
, &req
->status
, length
);
1340 if (rc
!= RCODE_COMPLETE
) {
1341 pr_debug("sbp_send_status: write failed: 0x%x\n", rc
);
1346 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1349 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1350 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1351 * final se_cmd->cmd_kref put.
1354 target_put_sess_cmd(&req
->se_cmd
);
1358 static void sbp_sense_mangle(struct sbp_target_request
*req
)
1360 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1361 u8
*sense
= req
->sense_buf
;
1362 u8
*status
= req
->status
.data
;
1364 WARN_ON(se_cmd
->scsi_sense_length
< 18);
1366 switch (sense
[0] & 0x7f) { /* sfmt */
1367 case 0x70: /* current, fixed */
1370 case 0x71: /* deferred, fixed */
1373 case 0x72: /* current, descriptor */
1374 case 0x73: /* deferred, descriptor */
1377 * TODO: SBP-3 specifies what we should do with descriptor
1380 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1382 req
->status
.status
|= cpu_to_be32(
1383 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1384 STATUS_BLOCK_DEAD(0) |
1385 STATUS_BLOCK_LEN(1) |
1386 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED
));
1390 status
[0] |= se_cmd
->scsi_status
& 0x3f;/* status */
1392 (sense
[0] & 0x80) | /* valid */
1393 ((sense
[2] & 0xe0) >> 1) | /* mark, eom, ili */
1394 (sense
[2] & 0x0f); /* sense_key */
1395 status
[2] = se_cmd
->scsi_asc
; /* sense_code */
1396 status
[3] = se_cmd
->scsi_ascq
; /* sense_qualifier */
1399 status
[4] = sense
[3];
1400 status
[5] = sense
[4];
1401 status
[6] = sense
[5];
1402 status
[7] = sense
[6];
1405 status
[8] = sense
[8];
1406 status
[9] = sense
[9];
1407 status
[10] = sense
[10];
1408 status
[11] = sense
[11];
1411 status
[12] = sense
[14];
1413 /* sense_key-dependent */
1414 status
[13] = sense
[15];
1415 status
[14] = sense
[16];
1416 status
[15] = sense
[17];
1418 req
->status
.status
|= cpu_to_be32(
1419 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1420 STATUS_BLOCK_DEAD(0) |
1421 STATUS_BLOCK_LEN(5) |
1422 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1425 static int sbp_send_sense(struct sbp_target_request
*req
)
1427 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1429 if (se_cmd
->scsi_sense_length
) {
1430 sbp_sense_mangle(req
);
1432 req
->status
.status
|= cpu_to_be32(
1433 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1434 STATUS_BLOCK_DEAD(0) |
1435 STATUS_BLOCK_LEN(1) |
1436 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1439 return sbp_send_status(req
);
1442 static void sbp_free_request(struct sbp_target_request
*req
)
1444 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1445 struct se_session
*se_sess
= se_cmd
->se_sess
;
1448 kfree(req
->cmd_buf
);
1450 target_free_tag(se_sess
, se_cmd
);
1453 static void sbp_mgt_agent_process(struct work_struct
*work
)
1455 struct sbp_management_agent
*agent
=
1456 container_of(work
, struct sbp_management_agent
, work
);
1457 struct sbp_management_request
*req
= agent
->request
;
1459 int status_data_len
= 0;
1461 /* fetch the ORB from the initiator */
1462 ret
= sbp_run_transaction(req
->card
, TCODE_READ_BLOCK_REQUEST
,
1463 req
->node_addr
, req
->generation
, req
->speed
,
1464 agent
->orb_offset
, &req
->orb
, sizeof(req
->orb
));
1465 if (ret
!= RCODE_COMPLETE
) {
1466 pr_debug("mgt_orb fetch failed: %x\n", ret
);
1470 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1471 sbp2_pointer_to_addr(&req
->orb
.ptr1
),
1472 sbp2_pointer_to_addr(&req
->orb
.ptr2
),
1473 be32_to_cpu(req
->orb
.misc
), be32_to_cpu(req
->orb
.length
),
1474 sbp2_pointer_to_addr(&req
->orb
.status_fifo
));
1476 if (!ORB_NOTIFY(be32_to_cpu(req
->orb
.misc
)) ||
1477 ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
)) != 0) {
1478 pr_err("mgt_orb bad request\n");
1482 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
))) {
1483 case MANAGEMENT_ORB_FUNCTION_LOGIN
:
1484 sbp_management_request_login(agent
, req
, &status_data_len
);
1487 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS
:
1488 sbp_management_request_query_logins(agent
, req
,
1492 case MANAGEMENT_ORB_FUNCTION_RECONNECT
:
1493 sbp_management_request_reconnect(agent
, req
, &status_data_len
);
1496 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD
:
1497 pr_notice("SET PASSWORD not implemented\n");
1499 req
->status
.status
= cpu_to_be32(
1500 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1501 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1505 case MANAGEMENT_ORB_FUNCTION_LOGOUT
:
1506 sbp_management_request_logout(agent
, req
, &status_data_len
);
1509 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK
:
1510 pr_notice("ABORT TASK not implemented\n");
1512 req
->status
.status
= cpu_to_be32(
1513 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1514 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1518 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET
:
1519 pr_notice("ABORT TASK SET not implemented\n");
1521 req
->status
.status
= cpu_to_be32(
1522 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1523 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1527 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET
:
1528 pr_notice("LOGICAL UNIT RESET not implemented\n");
1530 req
->status
.status
= cpu_to_be32(
1531 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1532 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1536 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET
:
1537 pr_notice("TARGET RESET not implemented\n");
1539 req
->status
.status
= cpu_to_be32(
1540 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1541 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1546 pr_notice("unknown management function 0x%x\n",
1547 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
)));
1549 req
->status
.status
= cpu_to_be32(
1550 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1551 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1556 req
->status
.status
|= cpu_to_be32(
1557 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1558 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len
, 4) + 1) |
1559 STATUS_BLOCK_ORB_OFFSET_HIGH(agent
->orb_offset
>> 32));
1560 req
->status
.orb_low
= cpu_to_be32(agent
->orb_offset
);
1562 /* write the status block back to the initiator */
1563 ret
= sbp_run_transaction(req
->card
, TCODE_WRITE_BLOCK_REQUEST
,
1564 req
->node_addr
, req
->generation
, req
->speed
,
1565 sbp2_pointer_to_addr(&req
->orb
.status_fifo
),
1566 &req
->status
, 8 + status_data_len
);
1567 if (ret
!= RCODE_COMPLETE
) {
1568 pr_debug("mgt_orb status write failed: %x\n", ret
);
1573 fw_card_put(req
->card
);
1576 spin_lock_bh(&agent
->lock
);
1577 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1578 spin_unlock_bh(&agent
->lock
);
1581 static void sbp_mgt_agent_rw(struct fw_card
*card
,
1582 struct fw_request
*request
, int tcode
, int destination
, int source
,
1583 int generation
, unsigned long long offset
, void *data
, size_t length
,
1584 void *callback_data
)
1586 struct sbp_management_agent
*agent
= callback_data
;
1587 struct sbp2_pointer
*ptr
= data
;
1588 int rcode
= RCODE_ADDRESS_ERROR
;
1590 if (!agent
->tport
->enable
)
1593 if ((offset
!= agent
->handler
.offset
) || (length
!= 8))
1596 if (tcode
== TCODE_WRITE_BLOCK_REQUEST
) {
1597 struct sbp_management_request
*req
;
1600 spin_lock_bh(&agent
->lock
);
1601 prev_state
= agent
->state
;
1602 agent
->state
= MANAGEMENT_AGENT_STATE_BUSY
;
1603 spin_unlock_bh(&agent
->lock
);
1605 if (prev_state
== MANAGEMENT_AGENT_STATE_BUSY
) {
1606 pr_notice("ignoring management request while busy\n");
1607 rcode
= RCODE_CONFLICT_ERROR
;
1610 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
1612 rcode
= RCODE_CONFLICT_ERROR
;
1616 req
->card
= fw_card_get(card
);
1617 req
->generation
= generation
;
1618 req
->node_addr
= source
;
1619 req
->speed
= fw_get_request_speed(request
);
1621 agent
->orb_offset
= sbp2_pointer_to_addr(ptr
);
1622 agent
->request
= req
;
1624 queue_work(system_unbound_wq
, &agent
->work
);
1625 rcode
= RCODE_COMPLETE
;
1626 } else if (tcode
== TCODE_READ_BLOCK_REQUEST
) {
1627 addr_to_sbp2_pointer(agent
->orb_offset
, ptr
);
1628 rcode
= RCODE_COMPLETE
;
1630 rcode
= RCODE_TYPE_ERROR
;
1634 fw_send_response(card
, request
, rcode
);
1637 static struct sbp_management_agent
*sbp_management_agent_register(
1638 struct sbp_tport
*tport
)
1641 struct sbp_management_agent
*agent
;
1643 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1645 return ERR_PTR(-ENOMEM
);
1647 spin_lock_init(&agent
->lock
);
1648 agent
->tport
= tport
;
1649 agent
->handler
.length
= 0x08;
1650 agent
->handler
.address_callback
= sbp_mgt_agent_rw
;
1651 agent
->handler
.callback_data
= agent
;
1652 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1653 INIT_WORK(&agent
->work
, sbp_mgt_agent_process
);
1654 agent
->orb_offset
= 0;
1655 agent
->request
= NULL
;
1657 ret
= fw_core_add_address_handler(&agent
->handler
,
1658 &sbp_register_region
);
1661 return ERR_PTR(ret
);
1667 static void sbp_management_agent_unregister(struct sbp_management_agent
*agent
)
1669 fw_core_remove_address_handler(&agent
->handler
);
1670 cancel_work_sync(&agent
->work
);
1674 static int sbp_check_true(struct se_portal_group
*se_tpg
)
1679 static int sbp_check_false(struct se_portal_group
*se_tpg
)
1684 static char *sbp_get_fabric_wwn(struct se_portal_group
*se_tpg
)
1686 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1687 struct sbp_tport
*tport
= tpg
->tport
;
1689 return &tport
->tport_name
[0];
1692 static u16
sbp_get_tag(struct se_portal_group
*se_tpg
)
1694 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1695 return tpg
->tport_tpgt
;
1698 static u32
sbp_tpg_get_inst_index(struct se_portal_group
*se_tpg
)
1703 static void sbp_release_cmd(struct se_cmd
*se_cmd
)
1705 struct sbp_target_request
*req
= container_of(se_cmd
,
1706 struct sbp_target_request
, se_cmd
);
1708 sbp_free_request(req
);
1711 static u32
sbp_sess_get_index(struct se_session
*se_sess
)
1716 static int sbp_write_pending(struct se_cmd
*se_cmd
)
1718 struct sbp_target_request
*req
= container_of(se_cmd
,
1719 struct sbp_target_request
, se_cmd
);
1722 ret
= sbp_rw_data(req
);
1724 req
->status
.status
|= cpu_to_be32(
1726 STATUS_RESP_TRANSPORT_FAILURE
) |
1727 STATUS_BLOCK_DEAD(0) |
1728 STATUS_BLOCK_LEN(1) |
1729 STATUS_BLOCK_SBP_STATUS(
1730 SBP_STATUS_UNSPECIFIED_ERROR
));
1731 sbp_send_status(req
);
1735 target_execute_cmd(se_cmd
);
1739 static void sbp_set_default_node_attrs(struct se_node_acl
*nacl
)
1744 static int sbp_get_cmd_state(struct se_cmd
*se_cmd
)
1749 static int sbp_queue_data_in(struct se_cmd
*se_cmd
)
1751 struct sbp_target_request
*req
= container_of(se_cmd
,
1752 struct sbp_target_request
, se_cmd
);
1755 ret
= sbp_rw_data(req
);
1757 req
->status
.status
|= cpu_to_be32(
1758 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1759 STATUS_BLOCK_DEAD(0) |
1760 STATUS_BLOCK_LEN(1) |
1761 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1762 sbp_send_status(req
);
1766 return sbp_send_sense(req
);
1770 * Called after command (no data transfer) or after the write (to device)
1771 * operation is completed
1773 static int sbp_queue_status(struct se_cmd
*se_cmd
)
1775 struct sbp_target_request
*req
= container_of(se_cmd
,
1776 struct sbp_target_request
, se_cmd
);
1778 return sbp_send_sense(req
);
1781 static void sbp_queue_tm_rsp(struct se_cmd
*se_cmd
)
1785 static void sbp_aborted_task(struct se_cmd
*se_cmd
)
1790 static int sbp_check_stop_free(struct se_cmd
*se_cmd
)
1792 struct sbp_target_request
*req
= container_of(se_cmd
,
1793 struct sbp_target_request
, se_cmd
);
1795 return transport_generic_free_cmd(&req
->se_cmd
, 0);
1798 static int sbp_count_se_tpg_luns(struct se_portal_group
*tpg
)
1804 hlist_for_each_entry_rcu(lun
, &tpg
->tpg_lun_hlist
, link
)
1811 static int sbp_update_unit_directory(struct sbp_tport
*tport
)
1814 int num_luns
, num_entries
, idx
= 0, mgt_agt_addr
, ret
;
1817 if (tport
->unit_directory
.data
) {
1818 fw_core_remove_descriptor(&tport
->unit_directory
);
1819 kfree(tport
->unit_directory
.data
);
1820 tport
->unit_directory
.data
= NULL
;
1823 if (!tport
->enable
|| !tport
->tpg
)
1826 num_luns
= sbp_count_se_tpg_luns(&tport
->tpg
->se_tpg
);
1829 * Number of entries in the final unit directory:
1830 * - all of those in the template
1831 * - management_agent
1832 * - unit_characteristics
1833 * - reconnect_timeout
1835 * - one for each LUN
1837 * MUST NOT include leaf or sub-directory entries
1839 num_entries
= ARRAY_SIZE(sbp_unit_directory_template
) + 4 + num_luns
;
1841 if (tport
->directory_id
!= -1)
1844 /* allocate num_entries + 4 for the header and unique ID leaf */
1845 data
= kcalloc((num_entries
+ 4), sizeof(u32
), GFP_KERNEL
);
1849 /* directory_length */
1850 data
[idx
++] = num_entries
<< 16;
1853 if (tport
->directory_id
!= -1)
1854 data
[idx
++] = (CSR_DIRECTORY_ID
<< 24) | tport
->directory_id
;
1856 /* unit directory template */
1857 memcpy(&data
[idx
], sbp_unit_directory_template
,
1858 sizeof(sbp_unit_directory_template
));
1859 idx
+= ARRAY_SIZE(sbp_unit_directory_template
);
1861 /* management_agent */
1862 mgt_agt_addr
= (tport
->mgt_agt
->handler
.offset
- CSR_REGISTER_BASE
) / 4;
1863 data
[idx
++] = 0x54000000 | (mgt_agt_addr
& 0x00ffffff);
1865 /* unit_characteristics */
1866 data
[idx
++] = 0x3a000000 |
1867 (((tport
->mgt_orb_timeout
* 2) << 8) & 0xff00) |
1870 /* reconnect_timeout */
1871 data
[idx
++] = 0x3d000000 | (tport
->max_reconnect_timeout
& 0xffff);
1873 /* unit unique ID (leaf is just after LUNs) */
1874 data
[idx
++] = 0x8d000000 | (num_luns
+ 1);
1877 hlist_for_each_entry_rcu(lun
, &tport
->tpg
->se_tpg
.tpg_lun_hlist
, link
) {
1878 struct se_device
*dev
;
1881 * rcu_dereference_raw protected by se_lun->lun_group symlink
1882 * reference to se_device->dev_group.
1884 dev
= rcu_dereference_raw(lun
->lun_se_dev
);
1885 type
= dev
->transport
->get_device_type(dev
);
1887 /* logical_unit_number */
1888 data
[idx
++] = 0x14000000 |
1889 ((type
<< 16) & 0x1f0000) |
1890 (lun
->unpacked_lun
& 0xffff);
1894 /* unit unique ID leaf */
1895 data
[idx
++] = 2 << 16;
1896 data
[idx
++] = tport
->guid
>> 32;
1897 data
[idx
++] = tport
->guid
;
1899 tport
->unit_directory
.length
= idx
;
1900 tport
->unit_directory
.key
= (CSR_DIRECTORY
| CSR_UNIT
) << 24;
1901 tport
->unit_directory
.data
= data
;
1903 ret
= fw_core_add_descriptor(&tport
->unit_directory
);
1905 kfree(tport
->unit_directory
.data
);
1906 tport
->unit_directory
.data
= NULL
;
1912 static ssize_t
sbp_parse_wwn(const char *name
, u64
*wwn
)
1919 for (cp
= name
; cp
< &name
[SBP_NAMELEN
- 1]; cp
++) {
1921 if (c
== '\n' && cp
[1] == '\0')
1932 else if (isxdigit(c
))
1933 nibble
= tolower(c
) - 'a' + 10;
1936 *wwn
= (*wwn
<< 4) | nibble
;
1941 printk(KERN_INFO
"err %u len %zu pos %u\n",
1942 err
, cp
- name
, pos
);
1946 static ssize_t
sbp_format_wwn(char *buf
, size_t len
, u64 wwn
)
1948 return snprintf(buf
, len
, "%016llx", wwn
);
1951 static int sbp_init_nodeacl(struct se_node_acl
*se_nacl
, const char *name
)
1955 if (sbp_parse_wwn(name
, &guid
) < 0)
1960 static int sbp_post_link_lun(
1961 struct se_portal_group
*se_tpg
,
1962 struct se_lun
*se_lun
)
1964 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1966 return sbp_update_unit_directory(tpg
->tport
);
1969 static void sbp_pre_unlink_lun(
1970 struct se_portal_group
*se_tpg
,
1971 struct se_lun
*se_lun
)
1973 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1974 struct sbp_tport
*tport
= tpg
->tport
;
1977 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0)
1980 ret
= sbp_update_unit_directory(tport
);
1982 pr_err("unlink LUN: failed to update unit directory\n");
1985 static struct se_portal_group
*sbp_make_tpg(struct se_wwn
*wwn
,
1988 struct sbp_tport
*tport
=
1989 container_of(wwn
, struct sbp_tport
, tport_wwn
);
1991 struct sbp_tpg
*tpg
;
1995 if (strstr(name
, "tpgt_") != name
)
1996 return ERR_PTR(-EINVAL
);
1997 if (kstrtoul(name
+ 5, 10, &tpgt
) || tpgt
> UINT_MAX
)
1998 return ERR_PTR(-EINVAL
);
2001 pr_err("Only one TPG per Unit is possible.\n");
2002 return ERR_PTR(-EBUSY
);
2005 tpg
= kzalloc(sizeof(*tpg
), GFP_KERNEL
);
2007 return ERR_PTR(-ENOMEM
);
2010 tpg
->tport_tpgt
= tpgt
;
2013 /* default attribute values */
2015 tport
->directory_id
= -1;
2016 tport
->mgt_orb_timeout
= 15;
2017 tport
->max_reconnect_timeout
= 5;
2018 tport
->max_logins_per_lun
= 1;
2020 tport
->mgt_agt
= sbp_management_agent_register(tport
);
2021 if (IS_ERR(tport
->mgt_agt
)) {
2022 ret
= PTR_ERR(tport
->mgt_agt
);
2026 ret
= core_tpg_register(wwn
, &tpg
->se_tpg
, SCSI_PROTOCOL_SBP
);
2028 goto out_unreg_mgt_agt
;
2030 return &tpg
->se_tpg
;
2033 sbp_management_agent_unregister(tport
->mgt_agt
);
2037 return ERR_PTR(ret
);
2040 static void sbp_drop_tpg(struct se_portal_group
*se_tpg
)
2042 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2043 struct sbp_tport
*tport
= tpg
->tport
;
2045 core_tpg_deregister(se_tpg
);
2046 sbp_management_agent_unregister(tport
->mgt_agt
);
2051 static struct se_wwn
*sbp_make_tport(
2052 struct target_fabric_configfs
*tf
,
2053 struct config_group
*group
,
2056 struct sbp_tport
*tport
;
2059 if (sbp_parse_wwn(name
, &guid
) < 0)
2060 return ERR_PTR(-EINVAL
);
2062 tport
= kzalloc(sizeof(*tport
), GFP_KERNEL
);
2064 return ERR_PTR(-ENOMEM
);
2067 sbp_format_wwn(tport
->tport_name
, SBP_NAMELEN
, guid
);
2069 return &tport
->tport_wwn
;
2072 static void sbp_drop_tport(struct se_wwn
*wwn
)
2074 struct sbp_tport
*tport
=
2075 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2080 static ssize_t
sbp_wwn_version_show(struct config_item
*item
, char *page
)
2082 return sprintf(page
, "FireWire SBP fabric module %s\n", SBP_VERSION
);
2085 CONFIGFS_ATTR_RO(sbp_wwn_
, version
);
2087 static struct configfs_attribute
*sbp_wwn_attrs
[] = {
2088 &sbp_wwn_attr_version
,
2092 static ssize_t
sbp_tpg_directory_id_show(struct config_item
*item
, char *page
)
2094 struct se_portal_group
*se_tpg
= to_tpg(item
);
2095 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2096 struct sbp_tport
*tport
= tpg
->tport
;
2098 if (tport
->directory_id
== -1)
2099 return sprintf(page
, "implicit\n");
2101 return sprintf(page
, "%06x\n", tport
->directory_id
);
2104 static ssize_t
sbp_tpg_directory_id_store(struct config_item
*item
,
2105 const char *page
, size_t count
)
2107 struct se_portal_group
*se_tpg
= to_tpg(item
);
2108 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2109 struct sbp_tport
*tport
= tpg
->tport
;
2112 if (tport
->enable
) {
2113 pr_err("Cannot change the directory_id on an active target.\n");
2117 if (strstr(page
, "implicit") == page
) {
2118 tport
->directory_id
= -1;
2120 if (kstrtoul(page
, 16, &val
) < 0)
2125 tport
->directory_id
= val
;
2131 static ssize_t
sbp_tpg_enable_show(struct config_item
*item
, char *page
)
2133 struct se_portal_group
*se_tpg
= to_tpg(item
);
2134 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2135 struct sbp_tport
*tport
= tpg
->tport
;
2136 return sprintf(page
, "%d\n", tport
->enable
);
2139 static ssize_t
sbp_tpg_enable_store(struct config_item
*item
,
2140 const char *page
, size_t count
)
2142 struct se_portal_group
*se_tpg
= to_tpg(item
);
2143 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2144 struct sbp_tport
*tport
= tpg
->tport
;
2148 if (kstrtoul(page
, 0, &val
) < 0)
2150 if ((val
!= 0) && (val
!= 1))
2153 if (tport
->enable
== val
)
2157 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0) {
2158 pr_err("Cannot enable a target with no LUNs!\n");
2162 /* XXX: force-shutdown sessions instead? */
2163 spin_lock_bh(&se_tpg
->session_lock
);
2164 if (!list_empty(&se_tpg
->tpg_sess_list
)) {
2165 spin_unlock_bh(&se_tpg
->session_lock
);
2168 spin_unlock_bh(&se_tpg
->session_lock
);
2171 tport
->enable
= val
;
2173 ret
= sbp_update_unit_directory(tport
);
2175 pr_err("Could not update Config ROM\n");
2182 CONFIGFS_ATTR(sbp_tpg_
, directory_id
);
2183 CONFIGFS_ATTR(sbp_tpg_
, enable
);
2185 static struct configfs_attribute
*sbp_tpg_base_attrs
[] = {
2186 &sbp_tpg_attr_directory_id
,
2187 &sbp_tpg_attr_enable
,
2191 static ssize_t
sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item
*item
,
2194 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2195 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2196 struct sbp_tport
*tport
= tpg
->tport
;
2197 return sprintf(page
, "%d\n", tport
->mgt_orb_timeout
);
2200 static ssize_t
sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item
*item
,
2201 const char *page
, size_t count
)
2203 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2204 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2205 struct sbp_tport
*tport
= tpg
->tport
;
2209 if (kstrtoul(page
, 0, &val
) < 0)
2211 if ((val
< 1) || (val
> 127))
2214 if (tport
->mgt_orb_timeout
== val
)
2217 tport
->mgt_orb_timeout
= val
;
2219 ret
= sbp_update_unit_directory(tport
);
2226 static ssize_t
sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item
*item
,
2229 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2230 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2231 struct sbp_tport
*tport
= tpg
->tport
;
2232 return sprintf(page
, "%d\n", tport
->max_reconnect_timeout
);
2235 static ssize_t
sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item
*item
,
2236 const char *page
, size_t count
)
2238 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2239 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2240 struct sbp_tport
*tport
= tpg
->tport
;
2244 if (kstrtoul(page
, 0, &val
) < 0)
2246 if ((val
< 1) || (val
> 32767))
2249 if (tport
->max_reconnect_timeout
== val
)
2252 tport
->max_reconnect_timeout
= val
;
2254 ret
= sbp_update_unit_directory(tport
);
2261 static ssize_t
sbp_tpg_attrib_max_logins_per_lun_show(struct config_item
*item
,
2264 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2265 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2266 struct sbp_tport
*tport
= tpg
->tport
;
2267 return sprintf(page
, "%d\n", tport
->max_logins_per_lun
);
2270 static ssize_t
sbp_tpg_attrib_max_logins_per_lun_store(struct config_item
*item
,
2271 const char *page
, size_t count
)
2273 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2274 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2275 struct sbp_tport
*tport
= tpg
->tport
;
2278 if (kstrtoul(page
, 0, &val
) < 0)
2280 if ((val
< 1) || (val
> 127))
2283 /* XXX: also check against current count? */
2285 tport
->max_logins_per_lun
= val
;
2290 CONFIGFS_ATTR(sbp_tpg_attrib_
, mgt_orb_timeout
);
2291 CONFIGFS_ATTR(sbp_tpg_attrib_
, max_reconnect_timeout
);
2292 CONFIGFS_ATTR(sbp_tpg_attrib_
, max_logins_per_lun
);
2294 static struct configfs_attribute
*sbp_tpg_attrib_attrs
[] = {
2295 &sbp_tpg_attrib_attr_mgt_orb_timeout
,
2296 &sbp_tpg_attrib_attr_max_reconnect_timeout
,
2297 &sbp_tpg_attrib_attr_max_logins_per_lun
,
2301 static const struct target_core_fabric_ops sbp_ops
= {
2302 .module
= THIS_MODULE
,
2303 .fabric_name
= "sbp",
2304 .tpg_get_wwn
= sbp_get_fabric_wwn
,
2305 .tpg_get_tag
= sbp_get_tag
,
2306 .tpg_check_demo_mode
= sbp_check_true
,
2307 .tpg_check_demo_mode_cache
= sbp_check_true
,
2308 .tpg_check_demo_mode_write_protect
= sbp_check_false
,
2309 .tpg_check_prod_mode_write_protect
= sbp_check_false
,
2310 .tpg_get_inst_index
= sbp_tpg_get_inst_index
,
2311 .release_cmd
= sbp_release_cmd
,
2312 .sess_get_index
= sbp_sess_get_index
,
2313 .write_pending
= sbp_write_pending
,
2314 .set_default_node_attributes
= sbp_set_default_node_attrs
,
2315 .get_cmd_state
= sbp_get_cmd_state
,
2316 .queue_data_in
= sbp_queue_data_in
,
2317 .queue_status
= sbp_queue_status
,
2318 .queue_tm_rsp
= sbp_queue_tm_rsp
,
2319 .aborted_task
= sbp_aborted_task
,
2320 .check_stop_free
= sbp_check_stop_free
,
2322 .fabric_make_wwn
= sbp_make_tport
,
2323 .fabric_drop_wwn
= sbp_drop_tport
,
2324 .fabric_make_tpg
= sbp_make_tpg
,
2325 .fabric_drop_tpg
= sbp_drop_tpg
,
2326 .fabric_post_link
= sbp_post_link_lun
,
2327 .fabric_pre_unlink
= sbp_pre_unlink_lun
,
2328 .fabric_make_np
= NULL
,
2329 .fabric_drop_np
= NULL
,
2330 .fabric_init_nodeacl
= sbp_init_nodeacl
,
2332 .tfc_wwn_attrs
= sbp_wwn_attrs
,
2333 .tfc_tpg_base_attrs
= sbp_tpg_base_attrs
,
2334 .tfc_tpg_attrib_attrs
= sbp_tpg_attrib_attrs
,
2337 static int __init
sbp_init(void)
2339 return target_register_template(&sbp_ops
);
2342 static void __exit
sbp_exit(void)
2344 target_unregister_template(&sbp_ops
);
2347 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2348 MODULE_LICENSE("GPL");
2349 module_init(sbp_init
);
2350 module_exit(sbp_exit
);