2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/firewire.h>
32 #include <linux/firewire-constants.h>
33 #include <scsi/scsi_proto.h>
34 #include <scsi/scsi_tcq.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <asm/unaligned.h>
40 #include "sbp_target.h"
42 static const struct target_core_fabric_ops sbp_ops
;
44 /* FireWire address region for management and command block address handlers */
45 static const struct fw_address_region sbp_register_region
= {
46 .start
= CSR_REGISTER_BASE
+ 0x10000,
47 .end
= 0x1000000000000ULL
,
50 static const u32 sbp_unit_directory_template
[] = {
51 0x1200609e, /* unit_specifier_id: NCITS/T10 */
52 0x13010483, /* unit_sw_version: 1155D Rev 4 */
53 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
54 0x390104d8, /* command_set: SPC-2 */
55 0x3b000000, /* command_set_revision: 0 */
56 0x3c000001, /* firmware_revision: 1 */
59 #define SESSION_MAINTENANCE_INTERVAL HZ
61 static atomic_t login_id
= ATOMIC_INIT(0);
63 static void session_maintenance_work(struct work_struct
*);
64 static int sbp_run_transaction(struct fw_card
*, int, int, int, int,
65 unsigned long long, void *, size_t);
67 static int read_peer_guid(u64
*guid
, const struct sbp_management_request
*req
)
72 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
73 req
->node_addr
, req
->generation
, req
->speed
,
74 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 3 * 4,
76 if (ret
!= RCODE_COMPLETE
)
79 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
80 req
->node_addr
, req
->generation
, req
->speed
,
81 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 4 * 4,
83 if (ret
!= RCODE_COMPLETE
)
86 *guid
= (u64
)be32_to_cpu(high
) << 32 | be32_to_cpu(low
);
88 return RCODE_COMPLETE
;
91 static struct sbp_session
*sbp_session_find_by_guid(
92 struct sbp_tpg
*tpg
, u64 guid
)
94 struct se_session
*se_sess
;
95 struct sbp_session
*sess
, *found
= NULL
;
97 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
98 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
99 sess
= se_sess
->fabric_sess_ptr
;
100 if (sess
->guid
== guid
)
103 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
108 static struct sbp_login_descriptor
*sbp_login_find_by_lun(
109 struct sbp_session
*session
, u32 unpacked_lun
)
111 struct sbp_login_descriptor
*login
, *found
= NULL
;
113 spin_lock_bh(&session
->lock
);
114 list_for_each_entry(login
, &session
->login_list
, link
) {
115 if (login
->login_lun
== unpacked_lun
)
118 spin_unlock_bh(&session
->lock
);
123 static int sbp_login_count_all_by_lun(
128 struct se_session
*se_sess
;
129 struct sbp_session
*sess
;
130 struct sbp_login_descriptor
*login
;
133 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
134 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
135 sess
= se_sess
->fabric_sess_ptr
;
137 spin_lock_bh(&sess
->lock
);
138 list_for_each_entry(login
, &sess
->login_list
, link
) {
139 if (login
->login_lun
!= unpacked_lun
)
142 if (!exclusive
|| login
->exclusive
)
145 spin_unlock_bh(&sess
->lock
);
147 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
152 static struct sbp_login_descriptor
*sbp_login_find_by_id(
153 struct sbp_tpg
*tpg
, int login_id
)
155 struct se_session
*se_sess
;
156 struct sbp_session
*sess
;
157 struct sbp_login_descriptor
*login
, *found
= NULL
;
159 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
160 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
161 sess
= se_sess
->fabric_sess_ptr
;
163 spin_lock_bh(&sess
->lock
);
164 list_for_each_entry(login
, &sess
->login_list
, link
) {
165 if (login
->login_id
== login_id
)
168 spin_unlock_bh(&sess
->lock
);
170 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
175 static u32
sbp_get_lun_from_tpg(struct sbp_tpg
*tpg
, u32 login_lun
, int *err
)
177 struct se_portal_group
*se_tpg
= &tpg
->se_tpg
;
178 struct se_lun
*se_lun
;
181 hlist_for_each_entry_rcu(se_lun
, &se_tpg
->tpg_lun_hlist
, link
) {
182 if (se_lun
->unpacked_lun
== login_lun
) {
194 static struct sbp_session
*sbp_session_create(
198 struct sbp_session
*sess
;
201 struct se_node_acl
*se_nacl
;
203 sess
= kmalloc(sizeof(*sess
), GFP_KERNEL
);
205 pr_err("failed to allocate session descriptor\n");
206 return ERR_PTR(-ENOMEM
);
209 sess
->se_sess
= transport_init_session(TARGET_PROT_NORMAL
);
210 if (IS_ERR(sess
->se_sess
)) {
211 pr_err("failed to init se_session\n");
213 ret
= PTR_ERR(sess
->se_sess
);
218 snprintf(guid_str
, sizeof(guid_str
), "%016llx", guid
);
220 se_nacl
= core_tpg_check_initiator_node_acl(&tpg
->se_tpg
, guid_str
);
222 pr_warn("Node ACL not found for %s\n", guid_str
);
224 transport_free_session(sess
->se_sess
);
227 return ERR_PTR(-EPERM
);
230 sess
->se_sess
->se_node_acl
= se_nacl
;
232 spin_lock_init(&sess
->lock
);
233 INIT_LIST_HEAD(&sess
->login_list
);
234 INIT_DELAYED_WORK(&sess
->maint_work
, session_maintenance_work
);
238 transport_register_session(&tpg
->se_tpg
, se_nacl
, sess
->se_sess
, sess
);
243 static void sbp_session_release(struct sbp_session
*sess
, bool cancel_work
)
245 spin_lock_bh(&sess
->lock
);
246 if (!list_empty(&sess
->login_list
)) {
247 spin_unlock_bh(&sess
->lock
);
250 spin_unlock_bh(&sess
->lock
);
253 cancel_delayed_work_sync(&sess
->maint_work
);
255 transport_deregister_session_configfs(sess
->se_sess
);
256 transport_deregister_session(sess
->se_sess
);
259 fw_card_put(sess
->card
);
264 static void sbp_target_agent_unregister(struct sbp_target_agent
*);
266 static void sbp_login_release(struct sbp_login_descriptor
*login
,
269 struct sbp_session
*sess
= login
->sess
;
271 /* FIXME: abort/wait on tasks */
273 sbp_target_agent_unregister(login
->tgt_agt
);
276 spin_lock_bh(&sess
->lock
);
277 list_del(&login
->link
);
278 spin_unlock_bh(&sess
->lock
);
280 sbp_session_release(sess
, cancel_work
);
286 static struct sbp_target_agent
*sbp_target_agent_register(
287 struct sbp_login_descriptor
*);
289 static void sbp_management_request_login(
290 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
291 int *status_data_size
)
293 struct sbp_tport
*tport
= agent
->tport
;
294 struct sbp_tpg
*tpg
= tport
->tpg
;
295 struct sbp_session
*sess
;
296 struct sbp_login_descriptor
*login
;
297 struct sbp_login_response_block
*response
;
300 int login_response_len
, ret
;
302 unpacked_lun
= sbp_get_lun_from_tpg(tpg
,
303 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)), &ret
);
305 pr_notice("login to unknown LUN: %d\n",
306 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)));
308 req
->status
.status
= cpu_to_be32(
309 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
310 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP
));
314 ret
= read_peer_guid(&guid
, req
);
315 if (ret
!= RCODE_COMPLETE
) {
316 pr_warn("failed to read peer GUID: %d\n", ret
);
318 req
->status
.status
= cpu_to_be32(
319 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
320 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
324 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
327 sess
= sbp_session_find_by_guid(tpg
, guid
);
329 login
= sbp_login_find_by_lun(sess
, unpacked_lun
);
331 pr_notice("initiator already logged-in\n");
334 * SBP-2 R4 says we should return access denied, but
335 * that can confuse initiators. Instead we need to
336 * treat this like a reconnect, but send the login
337 * response block like a fresh login.
339 * This is required particularly in the case of Apple
340 * devices booting off the FireWire target, where
341 * the firmware has an active login to the target. When
342 * the OS takes control of the session it issues its own
343 * LOGIN rather than a RECONNECT. To avoid the machine
344 * waiting until the reconnect_hold expires, we can skip
345 * the ACCESS_DENIED errors to speed things up.
348 goto already_logged_in
;
353 * check exclusive bit in login request
354 * reject with access_denied if any logins present
356 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
)) &&
357 sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 0)) {
358 pr_warn("refusing exclusive login with other active logins\n");
360 req
->status
.status
= cpu_to_be32(
361 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
362 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
367 * check exclusive bit in any existing login descriptor
368 * reject with access_denied if any exclusive logins present
370 if (sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 1)) {
371 pr_warn("refusing login while another exclusive login present\n");
373 req
->status
.status
= cpu_to_be32(
374 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
375 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
380 * check we haven't exceeded the number of allowed logins
381 * reject with resources_unavailable if we have
383 if (sbp_login_count_all_by_lun(tpg
, unpacked_lun
, 0) >=
384 tport
->max_logins_per_lun
) {
385 pr_warn("max number of logins reached\n");
387 req
->status
.status
= cpu_to_be32(
388 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
389 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
394 sess
= sbp_session_create(tpg
, guid
);
396 switch (PTR_ERR(sess
)) {
398 ret
= SBP_STATUS_ACCESS_DENIED
;
401 ret
= SBP_STATUS_RESOURCES_UNAVAIL
;
405 req
->status
.status
= cpu_to_be32(
407 STATUS_RESP_REQUEST_COMPLETE
) |
408 STATUS_BLOCK_SBP_STATUS(ret
));
412 sess
->node_id
= req
->node_addr
;
413 sess
->card
= fw_card_get(req
->card
);
414 sess
->generation
= req
->generation
;
415 sess
->speed
= req
->speed
;
417 schedule_delayed_work(&sess
->maint_work
,
418 SESSION_MAINTENANCE_INTERVAL
);
421 /* only take the latest reconnect_hold into account */
422 sess
->reconnect_hold
= min(
423 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req
->orb
.misc
)),
424 tport
->max_reconnect_timeout
) - 1;
426 login
= kmalloc(sizeof(*login
), GFP_KERNEL
);
428 pr_err("failed to allocate login descriptor\n");
430 sbp_session_release(sess
, true);
432 req
->status
.status
= cpu_to_be32(
433 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
434 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
439 login
->login_lun
= unpacked_lun
;
440 login
->status_fifo_addr
= sbp2_pointer_to_addr(&req
->orb
.status_fifo
);
441 login
->exclusive
= LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
));
442 login
->login_id
= atomic_inc_return(&login_id
);
444 login
->tgt_agt
= sbp_target_agent_register(login
);
445 if (IS_ERR(login
->tgt_agt
)) {
446 ret
= PTR_ERR(login
->tgt_agt
);
447 pr_err("failed to map command block handler: %d\n", ret
);
449 sbp_session_release(sess
, true);
452 req
->status
.status
= cpu_to_be32(
453 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
454 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
458 spin_lock_bh(&sess
->lock
);
459 list_add_tail(&login
->link
, &sess
->login_list
);
460 spin_unlock_bh(&sess
->lock
);
463 response
= kzalloc(sizeof(*response
), GFP_KERNEL
);
465 pr_err("failed to allocate login response block\n");
467 sbp_login_release(login
, true);
469 req
->status
.status
= cpu_to_be32(
470 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
471 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
475 login_response_len
= clamp_val(
476 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req
->orb
.length
)),
477 12, sizeof(*response
));
478 response
->misc
= cpu_to_be32(
479 ((login_response_len
& 0xffff) << 16) |
480 (login
->login_id
& 0xffff));
481 response
->reconnect_hold
= cpu_to_be32(sess
->reconnect_hold
& 0xffff);
482 addr_to_sbp2_pointer(login
->tgt_agt
->handler
.offset
,
483 &response
->command_block_agent
);
485 ret
= sbp_run_transaction(sess
->card
, TCODE_WRITE_BLOCK_REQUEST
,
486 sess
->node_id
, sess
->generation
, sess
->speed
,
487 sbp2_pointer_to_addr(&req
->orb
.ptr2
), response
,
489 if (ret
!= RCODE_COMPLETE
) {
490 pr_debug("failed to write login response block: %x\n", ret
);
493 sbp_login_release(login
, true);
495 req
->status
.status
= cpu_to_be32(
496 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
497 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
503 req
->status
.status
= cpu_to_be32(
504 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
505 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
508 static void sbp_management_request_query_logins(
509 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
510 int *status_data_size
)
512 pr_notice("QUERY LOGINS not implemented\n");
513 /* FIXME: implement */
515 req
->status
.status
= cpu_to_be32(
516 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
517 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
520 static void sbp_management_request_reconnect(
521 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
522 int *status_data_size
)
524 struct sbp_tport
*tport
= agent
->tport
;
525 struct sbp_tpg
*tpg
= tport
->tpg
;
528 struct sbp_login_descriptor
*login
;
530 ret
= read_peer_guid(&guid
, req
);
531 if (ret
!= RCODE_COMPLETE
) {
532 pr_warn("failed to read peer GUID: %d\n", ret
);
534 req
->status
.status
= cpu_to_be32(
535 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
536 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
540 pr_notice("mgt_agent RECONNECT from %016llx\n", guid
);
542 login
= sbp_login_find_by_id(tpg
,
543 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
)));
546 pr_err("mgt_agent RECONNECT unknown login ID\n");
548 req
->status
.status
= cpu_to_be32(
549 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
550 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
554 if (login
->sess
->guid
!= guid
) {
555 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
557 req
->status
.status
= cpu_to_be32(
558 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
559 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
563 spin_lock_bh(&login
->sess
->lock
);
564 if (login
->sess
->card
)
565 fw_card_put(login
->sess
->card
);
567 /* update the node details */
568 login
->sess
->generation
= req
->generation
;
569 login
->sess
->node_id
= req
->node_addr
;
570 login
->sess
->card
= fw_card_get(req
->card
);
571 login
->sess
->speed
= req
->speed
;
572 spin_unlock_bh(&login
->sess
->lock
);
574 req
->status
.status
= cpu_to_be32(
575 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
576 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
579 static void sbp_management_request_logout(
580 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
581 int *status_data_size
)
583 struct sbp_tport
*tport
= agent
->tport
;
584 struct sbp_tpg
*tpg
= tport
->tpg
;
586 struct sbp_login_descriptor
*login
;
588 id
= LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
));
590 login
= sbp_login_find_by_id(tpg
, id
);
592 pr_warn("cannot find login: %d\n", id
);
594 req
->status
.status
= cpu_to_be32(
595 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
596 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN
));
600 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
601 login
->login_lun
, login
->login_id
);
603 if (req
->node_addr
!= login
->sess
->node_id
) {
604 pr_warn("logout from different node ID\n");
606 req
->status
.status
= cpu_to_be32(
607 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
608 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
612 sbp_login_release(login
, true);
614 req
->status
.status
= cpu_to_be32(
615 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
616 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
619 static void session_check_for_reset(struct sbp_session
*sess
)
621 bool card_valid
= false;
623 spin_lock_bh(&sess
->lock
);
626 spin_lock_irq(&sess
->card
->lock
);
627 card_valid
= (sess
->card
->local_node
!= NULL
);
628 spin_unlock_irq(&sess
->card
->lock
);
631 fw_card_put(sess
->card
);
636 if (!card_valid
|| (sess
->generation
!= sess
->card
->generation
)) {
637 pr_info("Waiting for reconnect from node: %016llx\n",
641 sess
->reconnect_expires
= get_jiffies_64() +
642 ((sess
->reconnect_hold
+ 1) * HZ
);
645 spin_unlock_bh(&sess
->lock
);
648 static void session_reconnect_expired(struct sbp_session
*sess
)
650 struct sbp_login_descriptor
*login
, *temp
;
651 LIST_HEAD(login_list
);
653 pr_info("Reconnect timer expired for node: %016llx\n", sess
->guid
);
655 spin_lock_bh(&sess
->lock
);
656 list_for_each_entry_safe(login
, temp
, &sess
->login_list
, link
) {
658 list_move_tail(&login
->link
, &login_list
);
660 spin_unlock_bh(&sess
->lock
);
662 list_for_each_entry_safe(login
, temp
, &login_list
, link
) {
663 list_del(&login
->link
);
664 sbp_login_release(login
, false);
667 sbp_session_release(sess
, false);
670 static void session_maintenance_work(struct work_struct
*work
)
672 struct sbp_session
*sess
= container_of(work
, struct sbp_session
,
675 /* could be called while tearing down the session */
676 spin_lock_bh(&sess
->lock
);
677 if (list_empty(&sess
->login_list
)) {
678 spin_unlock_bh(&sess
->lock
);
681 spin_unlock_bh(&sess
->lock
);
683 if (sess
->node_id
!= -1) {
684 /* check for bus reset and make node_id invalid */
685 session_check_for_reset(sess
);
687 schedule_delayed_work(&sess
->maint_work
,
688 SESSION_MAINTENANCE_INTERVAL
);
689 } else if (!time_after64(get_jiffies_64(), sess
->reconnect_expires
)) {
690 /* still waiting for reconnect */
691 schedule_delayed_work(&sess
->maint_work
,
692 SESSION_MAINTENANCE_INTERVAL
);
694 /* reconnect timeout has expired */
695 session_reconnect_expired(sess
);
699 static int tgt_agent_rw_agent_state(struct fw_card
*card
, int tcode
, void *data
,
700 struct sbp_target_agent
*agent
)
705 case TCODE_READ_QUADLET_REQUEST
:
706 pr_debug("tgt_agent AGENT_STATE READ\n");
708 spin_lock_bh(&agent
->lock
);
709 state
= agent
->state
;
710 spin_unlock_bh(&agent
->lock
);
712 *(__be32
*)data
= cpu_to_be32(state
);
714 return RCODE_COMPLETE
;
716 case TCODE_WRITE_QUADLET_REQUEST
:
718 return RCODE_COMPLETE
;
721 return RCODE_TYPE_ERROR
;
725 static int tgt_agent_rw_agent_reset(struct fw_card
*card
, int tcode
, void *data
,
726 struct sbp_target_agent
*agent
)
729 case TCODE_WRITE_QUADLET_REQUEST
:
730 pr_debug("tgt_agent AGENT_RESET\n");
731 spin_lock_bh(&agent
->lock
);
732 agent
->state
= AGENT_STATE_RESET
;
733 spin_unlock_bh(&agent
->lock
);
734 return RCODE_COMPLETE
;
737 return RCODE_TYPE_ERROR
;
741 static int tgt_agent_rw_orb_pointer(struct fw_card
*card
, int tcode
, void *data
,
742 struct sbp_target_agent
*agent
)
744 struct sbp2_pointer
*ptr
= data
;
747 case TCODE_WRITE_BLOCK_REQUEST
:
748 spin_lock_bh(&agent
->lock
);
749 if (agent
->state
!= AGENT_STATE_SUSPENDED
&&
750 agent
->state
!= AGENT_STATE_RESET
) {
751 spin_unlock_bh(&agent
->lock
);
752 pr_notice("Ignoring ORB_POINTER write while active.\n");
753 return RCODE_CONFLICT_ERROR
;
755 agent
->state
= AGENT_STATE_ACTIVE
;
756 spin_unlock_bh(&agent
->lock
);
758 agent
->orb_pointer
= sbp2_pointer_to_addr(ptr
);
759 agent
->doorbell
= false;
761 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
764 queue_work(system_unbound_wq
, &agent
->work
);
766 return RCODE_COMPLETE
;
768 case TCODE_READ_BLOCK_REQUEST
:
769 pr_debug("tgt_agent ORB_POINTER READ\n");
770 spin_lock_bh(&agent
->lock
);
771 addr_to_sbp2_pointer(agent
->orb_pointer
, ptr
);
772 spin_unlock_bh(&agent
->lock
);
773 return RCODE_COMPLETE
;
776 return RCODE_TYPE_ERROR
;
780 static int tgt_agent_rw_doorbell(struct fw_card
*card
, int tcode
, void *data
,
781 struct sbp_target_agent
*agent
)
784 case TCODE_WRITE_QUADLET_REQUEST
:
785 spin_lock_bh(&agent
->lock
);
786 if (agent
->state
!= AGENT_STATE_SUSPENDED
) {
787 spin_unlock_bh(&agent
->lock
);
788 pr_debug("Ignoring DOORBELL while active.\n");
789 return RCODE_CONFLICT_ERROR
;
791 agent
->state
= AGENT_STATE_ACTIVE
;
792 spin_unlock_bh(&agent
->lock
);
794 agent
->doorbell
= true;
796 pr_debug("tgt_agent DOORBELL\n");
798 queue_work(system_unbound_wq
, &agent
->work
);
800 return RCODE_COMPLETE
;
802 case TCODE_READ_QUADLET_REQUEST
:
803 return RCODE_COMPLETE
;
806 return RCODE_TYPE_ERROR
;
810 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card
*card
,
811 int tcode
, void *data
, struct sbp_target_agent
*agent
)
814 case TCODE_WRITE_QUADLET_REQUEST
:
815 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
816 /* ignored as we don't send unsolicited status */
817 return RCODE_COMPLETE
;
819 case TCODE_READ_QUADLET_REQUEST
:
820 return RCODE_COMPLETE
;
823 return RCODE_TYPE_ERROR
;
827 static void tgt_agent_rw(struct fw_card
*card
, struct fw_request
*request
,
828 int tcode
, int destination
, int source
, int generation
,
829 unsigned long long offset
, void *data
, size_t length
,
832 struct sbp_target_agent
*agent
= callback_data
;
833 struct sbp_session
*sess
= agent
->login
->sess
;
834 int sess_gen
, sess_node
, rcode
;
836 spin_lock_bh(&sess
->lock
);
837 sess_gen
= sess
->generation
;
838 sess_node
= sess
->node_id
;
839 spin_unlock_bh(&sess
->lock
);
841 if (generation
!= sess_gen
) {
842 pr_notice("ignoring request with wrong generation\n");
843 rcode
= RCODE_TYPE_ERROR
;
847 if (source
!= sess_node
) {
848 pr_notice("ignoring request from foreign node (%x != %x)\n",
850 rcode
= RCODE_TYPE_ERROR
;
854 /* turn offset into the offset from the start of the block */
855 offset
-= agent
->handler
.offset
;
857 if (offset
== 0x00 && length
== 4) {
859 rcode
= tgt_agent_rw_agent_state(card
, tcode
, data
, agent
);
860 } else if (offset
== 0x04 && length
== 4) {
862 rcode
= tgt_agent_rw_agent_reset(card
, tcode
, data
, agent
);
863 } else if (offset
== 0x08 && length
== 8) {
865 rcode
= tgt_agent_rw_orb_pointer(card
, tcode
, data
, agent
);
866 } else if (offset
== 0x10 && length
== 4) {
868 rcode
= tgt_agent_rw_doorbell(card
, tcode
, data
, agent
);
869 } else if (offset
== 0x14 && length
== 4) {
870 /* UNSOLICITED_STATUS_ENABLE */
871 rcode
= tgt_agent_rw_unsolicited_status_enable(card
, tcode
,
874 rcode
= RCODE_ADDRESS_ERROR
;
878 fw_send_response(card
, request
, rcode
);
881 static void sbp_handle_command(struct sbp_target_request
*);
882 static int sbp_send_status(struct sbp_target_request
*);
883 static void sbp_free_request(struct sbp_target_request
*);
885 static void tgt_agent_process_work(struct work_struct
*work
)
887 struct sbp_target_request
*req
=
888 container_of(work
, struct sbp_target_request
, work
);
890 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
892 sbp2_pointer_to_addr(&req
->orb
.next_orb
),
893 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
894 be32_to_cpu(req
->orb
.misc
));
896 if (req
->orb_pointer
>> 32)
897 pr_debug("ORB with high bits set\n");
899 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
))) {
900 case 0:/* Format specified by this standard */
901 sbp_handle_command(req
);
903 case 1: /* Reserved for future standardization */
904 case 2: /* Vendor-dependent */
905 req
->status
.status
|= cpu_to_be32(
907 STATUS_RESP_REQUEST_COMPLETE
) |
908 STATUS_BLOCK_DEAD(0) |
909 STATUS_BLOCK_LEN(1) |
910 STATUS_BLOCK_SBP_STATUS(
911 SBP_STATUS_REQ_TYPE_NOTSUPP
));
912 sbp_send_status(req
);
913 sbp_free_request(req
);
915 case 3: /* Dummy ORB */
916 req
->status
.status
|= cpu_to_be32(
918 STATUS_RESP_REQUEST_COMPLETE
) |
919 STATUS_BLOCK_DEAD(0) |
920 STATUS_BLOCK_LEN(1) |
921 STATUS_BLOCK_SBP_STATUS(
922 SBP_STATUS_DUMMY_ORB_COMPLETE
));
923 sbp_send_status(req
);
924 sbp_free_request(req
);
931 /* used to double-check we haven't been issued an AGENT_RESET */
932 static inline bool tgt_agent_check_active(struct sbp_target_agent
*agent
)
936 spin_lock_bh(&agent
->lock
);
937 active
= (agent
->state
== AGENT_STATE_ACTIVE
);
938 spin_unlock_bh(&agent
->lock
);
943 static void tgt_agent_fetch_work(struct work_struct
*work
)
945 struct sbp_target_agent
*agent
=
946 container_of(work
, struct sbp_target_agent
, work
);
947 struct sbp_session
*sess
= agent
->login
->sess
;
948 struct sbp_target_request
*req
;
950 bool doorbell
= agent
->doorbell
;
951 u64 next_orb
= agent
->orb_pointer
;
953 while (next_orb
&& tgt_agent_check_active(agent
)) {
954 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
956 spin_lock_bh(&agent
->lock
);
957 agent
->state
= AGENT_STATE_DEAD
;
958 spin_unlock_bh(&agent
->lock
);
962 req
->login
= agent
->login
;
963 req
->orb_pointer
= next_orb
;
965 req
->status
.status
= cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
966 req
->orb_pointer
>> 32));
967 req
->status
.orb_low
= cpu_to_be32(
968 req
->orb_pointer
& 0xfffffffc);
970 /* read in the ORB */
971 ret
= sbp_run_transaction(sess
->card
, TCODE_READ_BLOCK_REQUEST
,
972 sess
->node_id
, sess
->generation
, sess
->speed
,
973 req
->orb_pointer
, &req
->orb
, sizeof(req
->orb
));
974 if (ret
!= RCODE_COMPLETE
) {
975 pr_debug("tgt_orb fetch failed: %x\n", ret
);
976 req
->status
.status
|= cpu_to_be32(
978 STATUS_SRC_ORB_FINISHED
) |
980 STATUS_RESP_TRANSPORT_FAILURE
) |
981 STATUS_BLOCK_DEAD(1) |
982 STATUS_BLOCK_LEN(1) |
983 STATUS_BLOCK_SBP_STATUS(
984 SBP_STATUS_UNSPECIFIED_ERROR
));
985 spin_lock_bh(&agent
->lock
);
986 agent
->state
= AGENT_STATE_DEAD
;
987 spin_unlock_bh(&agent
->lock
);
989 sbp_send_status(req
);
990 sbp_free_request(req
);
994 /* check the next_ORB field */
995 if (be32_to_cpu(req
->orb
.next_orb
.high
) & 0x80000000) {
997 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
998 STATUS_SRC_ORB_FINISHED
));
1000 next_orb
= sbp2_pointer_to_addr(&req
->orb
.next_orb
);
1001 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
1002 STATUS_SRC_ORB_CONTINUING
));
1005 if (tgt_agent_check_active(agent
) && !doorbell
) {
1006 INIT_WORK(&req
->work
, tgt_agent_process_work
);
1007 queue_work(system_unbound_wq
, &req
->work
);
1009 /* don't process this request, just check next_ORB */
1010 sbp_free_request(req
);
1013 spin_lock_bh(&agent
->lock
);
1014 doorbell
= agent
->doorbell
= false;
1016 /* check if we should carry on processing */
1018 agent
->orb_pointer
= next_orb
;
1020 agent
->state
= AGENT_STATE_SUSPENDED
;
1022 spin_unlock_bh(&agent
->lock
);
1026 static struct sbp_target_agent
*sbp_target_agent_register(
1027 struct sbp_login_descriptor
*login
)
1029 struct sbp_target_agent
*agent
;
1032 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1034 return ERR_PTR(-ENOMEM
);
1036 spin_lock_init(&agent
->lock
);
1038 agent
->handler
.length
= 0x20;
1039 agent
->handler
.address_callback
= tgt_agent_rw
;
1040 agent
->handler
.callback_data
= agent
;
1042 agent
->login
= login
;
1043 agent
->state
= AGENT_STATE_RESET
;
1044 INIT_WORK(&agent
->work
, tgt_agent_fetch_work
);
1045 agent
->orb_pointer
= 0;
1046 agent
->doorbell
= false;
1048 ret
= fw_core_add_address_handler(&agent
->handler
,
1049 &sbp_register_region
);
1052 return ERR_PTR(ret
);
1058 static void sbp_target_agent_unregister(struct sbp_target_agent
*agent
)
1060 fw_core_remove_address_handler(&agent
->handler
);
1061 cancel_work_sync(&agent
->work
);
1066 * Simple wrapper around fw_run_transaction that retries the transaction several
1067 * times in case of failure, with an exponential backoff.
1069 static int sbp_run_transaction(struct fw_card
*card
, int tcode
, int destination_id
,
1070 int generation
, int speed
, unsigned long long offset
,
1071 void *payload
, size_t length
)
1073 int attempt
, ret
, delay
;
1075 for (attempt
= 1; attempt
<= 5; attempt
++) {
1076 ret
= fw_run_transaction(card
, tcode
, destination_id
,
1077 generation
, speed
, offset
, payload
, length
);
1080 case RCODE_COMPLETE
:
1081 case RCODE_TYPE_ERROR
:
1082 case RCODE_ADDRESS_ERROR
:
1083 case RCODE_GENERATION
:
1087 delay
= 5 * attempt
* attempt
;
1088 usleep_range(delay
, delay
* 2);
1096 * Wrapper around sbp_run_transaction that gets the card, destination,
1097 * generation and speed out of the request's session.
1099 static int sbp_run_request_transaction(struct sbp_target_request
*req
,
1100 int tcode
, unsigned long long offset
, void *payload
,
1103 struct sbp_login_descriptor
*login
= req
->login
;
1104 struct sbp_session
*sess
= login
->sess
;
1105 struct fw_card
*card
;
1106 int node_id
, generation
, speed
, ret
;
1108 spin_lock_bh(&sess
->lock
);
1109 card
= fw_card_get(sess
->card
);
1110 node_id
= sess
->node_id
;
1111 generation
= sess
->generation
;
1112 speed
= sess
->speed
;
1113 spin_unlock_bh(&sess
->lock
);
1115 ret
= sbp_run_transaction(card
, tcode
, node_id
, generation
, speed
,
1116 offset
, payload
, length
);
1123 static int sbp_fetch_command(struct sbp_target_request
*req
)
1125 int ret
, cmd_len
, copy_len
;
1127 cmd_len
= scsi_command_size(req
->orb
.command_block
);
1129 req
->cmd_buf
= kmalloc(cmd_len
, GFP_KERNEL
);
1133 memcpy(req
->cmd_buf
, req
->orb
.command_block
,
1134 min_t(int, cmd_len
, sizeof(req
->orb
.command_block
)));
1136 if (cmd_len
> sizeof(req
->orb
.command_block
)) {
1137 pr_debug("sbp_fetch_command: filling in long command\n");
1138 copy_len
= cmd_len
- sizeof(req
->orb
.command_block
);
1140 ret
= sbp_run_request_transaction(req
,
1141 TCODE_READ_BLOCK_REQUEST
,
1142 req
->orb_pointer
+ sizeof(req
->orb
),
1143 req
->cmd_buf
+ sizeof(req
->orb
.command_block
),
1145 if (ret
!= RCODE_COMPLETE
)
1152 static int sbp_fetch_page_table(struct sbp_target_request
*req
)
1155 struct sbp_page_table_entry
*pg_tbl
;
1157 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req
->orb
.misc
)))
1160 pg_tbl_sz
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
)) *
1161 sizeof(struct sbp_page_table_entry
);
1163 pg_tbl
= kmalloc(pg_tbl_sz
, GFP_KERNEL
);
1167 ret
= sbp_run_request_transaction(req
, TCODE_READ_BLOCK_REQUEST
,
1168 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
1170 if (ret
!= RCODE_COMPLETE
) {
1175 req
->pg_tbl
= pg_tbl
;
1179 static void sbp_calc_data_length_direction(struct sbp_target_request
*req
,
1180 u32
*data_len
, enum dma_data_direction
*data_dir
)
1182 int data_size
, direction
, idx
;
1184 data_size
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1185 direction
= CMDBLK_ORB_DIRECTION(be32_to_cpu(req
->orb
.misc
));
1189 *data_dir
= DMA_NONE
;
1193 *data_dir
= direction
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1197 for (idx
= 0; idx
< data_size
; idx
++) {
1198 *data_len
+= be16_to_cpu(
1199 req
->pg_tbl
[idx
].segment_length
);
1202 *data_len
= data_size
;
1206 static void sbp_handle_command(struct sbp_target_request
*req
)
1208 struct sbp_login_descriptor
*login
= req
->login
;
1209 struct sbp_session
*sess
= login
->sess
;
1210 int ret
, unpacked_lun
;
1212 enum dma_data_direction data_dir
;
1214 ret
= sbp_fetch_command(req
);
1216 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret
);
1220 ret
= sbp_fetch_page_table(req
);
1222 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1227 unpacked_lun
= req
->login
->login_lun
;
1228 sbp_calc_data_length_direction(req
, &data_length
, &data_dir
);
1230 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1231 req
->orb_pointer
, unpacked_lun
, data_length
, data_dir
);
1233 /* only used for printk until we do TMRs */
1234 req
->se_cmd
.tag
= req
->orb_pointer
;
1235 if (target_submit_cmd(&req
->se_cmd
, sess
->se_sess
, req
->cmd_buf
,
1236 req
->sense_buf
, unpacked_lun
, data_length
,
1237 TCM_SIMPLE_TAG
, data_dir
, 0))
1243 req
->status
.status
|= cpu_to_be32(
1244 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1245 STATUS_BLOCK_DEAD(0) |
1246 STATUS_BLOCK_LEN(1) |
1247 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1248 sbp_send_status(req
);
1249 sbp_free_request(req
);
1253 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1254 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1256 static int sbp_rw_data(struct sbp_target_request
*req
)
1258 struct sbp_session
*sess
= req
->login
->sess
;
1259 int tcode
, sg_miter_flags
, max_payload
, pg_size
, speed
, node_id
,
1260 generation
, num_pte
, length
, tfr_length
,
1261 rcode
= RCODE_COMPLETE
;
1262 struct sbp_page_table_entry
*pte
;
1263 unsigned long long offset
;
1264 struct fw_card
*card
;
1265 struct sg_mapping_iter iter
;
1267 if (req
->se_cmd
.data_direction
== DMA_FROM_DEVICE
) {
1268 tcode
= TCODE_WRITE_BLOCK_REQUEST
;
1269 sg_miter_flags
= SG_MITER_FROM_SG
;
1271 tcode
= TCODE_READ_BLOCK_REQUEST
;
1272 sg_miter_flags
= SG_MITER_TO_SG
;
1275 max_payload
= 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req
->orb
.misc
));
1276 speed
= CMDBLK_ORB_SPEED(be32_to_cpu(req
->orb
.misc
));
1278 pg_size
= CMDBLK_ORB_PG_SIZE(be32_to_cpu(req
->orb
.misc
));
1280 pr_err("sbp_run_transaction: page size ignored\n");
1281 pg_size
= 0x100 << pg_size
;
1284 spin_lock_bh(&sess
->lock
);
1285 card
= fw_card_get(sess
->card
);
1286 node_id
= sess
->node_id
;
1287 generation
= sess
->generation
;
1288 spin_unlock_bh(&sess
->lock
);
1292 num_pte
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1300 offset
= sbp2_pointer_to_addr(&req
->orb
.data_descriptor
);
1301 length
= req
->se_cmd
.data_length
;
1304 sg_miter_start(&iter
, req
->se_cmd
.t_data_sg
, req
->se_cmd
.t_data_nents
,
1307 while (length
|| num_pte
) {
1309 offset
= (u64
)be16_to_cpu(pte
->segment_base_hi
) << 32 |
1310 be32_to_cpu(pte
->segment_base_lo
);
1311 length
= be16_to_cpu(pte
->segment_length
);
1317 sg_miter_next(&iter
);
1319 tfr_length
= min3(length
, max_payload
, (int)iter
.length
);
1321 /* FIXME: take page_size into account */
1323 rcode
= sbp_run_transaction(card
, tcode
, node_id
,
1325 offset
, iter
.addr
, tfr_length
);
1327 if (rcode
!= RCODE_COMPLETE
)
1330 length
-= tfr_length
;
1331 offset
+= tfr_length
;
1332 iter
.consumed
= tfr_length
;
1335 sg_miter_stop(&iter
);
1338 if (rcode
== RCODE_COMPLETE
) {
1339 WARN_ON(length
!= 0);
1346 static int sbp_send_status(struct sbp_target_request
*req
)
1349 struct sbp_login_descriptor
*login
= req
->login
;
1351 length
= (((be32_to_cpu(req
->status
.status
) >> 24) & 0x07) + 1) * 4;
1353 ret
= sbp_run_request_transaction(req
, TCODE_WRITE_BLOCK_REQUEST
,
1354 login
->status_fifo_addr
, &req
->status
, length
);
1355 if (ret
!= RCODE_COMPLETE
) {
1356 pr_debug("sbp_send_status: write failed: 0x%x\n", ret
);
1360 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1366 static void sbp_sense_mangle(struct sbp_target_request
*req
)
1368 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1369 u8
*sense
= req
->sense_buf
;
1370 u8
*status
= req
->status
.data
;
1372 WARN_ON(se_cmd
->scsi_sense_length
< 18);
1374 switch (sense
[0] & 0x7f) { /* sfmt */
1375 case 0x70: /* current, fixed */
1378 case 0x71: /* deferred, fixed */
1381 case 0x72: /* current, descriptor */
1382 case 0x73: /* deferred, descriptor */
1385 * TODO: SBP-3 specifies what we should do with descriptor
1388 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1390 req
->status
.status
|= cpu_to_be32(
1391 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1392 STATUS_BLOCK_DEAD(0) |
1393 STATUS_BLOCK_LEN(1) |
1394 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED
));
1398 status
[0] |= se_cmd
->scsi_status
& 0x3f;/* status */
1400 (sense
[0] & 0x80) | /* valid */
1401 ((sense
[2] & 0xe0) >> 1) | /* mark, eom, ili */
1402 (sense
[2] & 0x0f); /* sense_key */
1403 status
[2] = se_cmd
->scsi_asc
; /* sense_code */
1404 status
[3] = se_cmd
->scsi_ascq
; /* sense_qualifier */
1407 status
[4] = sense
[3];
1408 status
[5] = sense
[4];
1409 status
[6] = sense
[5];
1410 status
[7] = sense
[6];
1413 status
[8] = sense
[8];
1414 status
[9] = sense
[9];
1415 status
[10] = sense
[10];
1416 status
[11] = sense
[11];
1419 status
[12] = sense
[14];
1421 /* sense_key-dependent */
1422 status
[13] = sense
[15];
1423 status
[14] = sense
[16];
1424 status
[15] = sense
[17];
1426 req
->status
.status
|= cpu_to_be32(
1427 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1428 STATUS_BLOCK_DEAD(0) |
1429 STATUS_BLOCK_LEN(5) |
1430 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1433 static int sbp_send_sense(struct sbp_target_request
*req
)
1435 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1437 if (se_cmd
->scsi_sense_length
) {
1438 sbp_sense_mangle(req
);
1440 req
->status
.status
|= cpu_to_be32(
1441 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1442 STATUS_BLOCK_DEAD(0) |
1443 STATUS_BLOCK_LEN(1) |
1444 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1447 return sbp_send_status(req
);
1450 static void sbp_free_request(struct sbp_target_request
*req
)
1453 kfree(req
->cmd_buf
);
1457 static void sbp_mgt_agent_process(struct work_struct
*work
)
1459 struct sbp_management_agent
*agent
=
1460 container_of(work
, struct sbp_management_agent
, work
);
1461 struct sbp_management_request
*req
= agent
->request
;
1463 int status_data_len
= 0;
1465 /* fetch the ORB from the initiator */
1466 ret
= sbp_run_transaction(req
->card
, TCODE_READ_BLOCK_REQUEST
,
1467 req
->node_addr
, req
->generation
, req
->speed
,
1468 agent
->orb_offset
, &req
->orb
, sizeof(req
->orb
));
1469 if (ret
!= RCODE_COMPLETE
) {
1470 pr_debug("mgt_orb fetch failed: %x\n", ret
);
1474 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1475 sbp2_pointer_to_addr(&req
->orb
.ptr1
),
1476 sbp2_pointer_to_addr(&req
->orb
.ptr2
),
1477 be32_to_cpu(req
->orb
.misc
), be32_to_cpu(req
->orb
.length
),
1478 sbp2_pointer_to_addr(&req
->orb
.status_fifo
));
1480 if (!ORB_NOTIFY(be32_to_cpu(req
->orb
.misc
)) ||
1481 ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
)) != 0) {
1482 pr_err("mgt_orb bad request\n");
1486 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
))) {
1487 case MANAGEMENT_ORB_FUNCTION_LOGIN
:
1488 sbp_management_request_login(agent
, req
, &status_data_len
);
1491 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS
:
1492 sbp_management_request_query_logins(agent
, req
,
1496 case MANAGEMENT_ORB_FUNCTION_RECONNECT
:
1497 sbp_management_request_reconnect(agent
, req
, &status_data_len
);
1500 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD
:
1501 pr_notice("SET PASSWORD not implemented\n");
1503 req
->status
.status
= cpu_to_be32(
1504 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1505 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1509 case MANAGEMENT_ORB_FUNCTION_LOGOUT
:
1510 sbp_management_request_logout(agent
, req
, &status_data_len
);
1513 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK
:
1514 pr_notice("ABORT TASK not implemented\n");
1516 req
->status
.status
= cpu_to_be32(
1517 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1518 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1522 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET
:
1523 pr_notice("ABORT TASK SET not implemented\n");
1525 req
->status
.status
= cpu_to_be32(
1526 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1527 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1531 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET
:
1532 pr_notice("LOGICAL UNIT RESET not implemented\n");
1534 req
->status
.status
= cpu_to_be32(
1535 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1536 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1540 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET
:
1541 pr_notice("TARGET RESET not implemented\n");
1543 req
->status
.status
= cpu_to_be32(
1544 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1545 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1550 pr_notice("unknown management function 0x%x\n",
1551 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
)));
1553 req
->status
.status
= cpu_to_be32(
1554 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1555 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1560 req
->status
.status
|= cpu_to_be32(
1561 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1562 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len
, 4) + 1) |
1563 STATUS_BLOCK_ORB_OFFSET_HIGH(agent
->orb_offset
>> 32));
1564 req
->status
.orb_low
= cpu_to_be32(agent
->orb_offset
);
1566 /* write the status block back to the initiator */
1567 ret
= sbp_run_transaction(req
->card
, TCODE_WRITE_BLOCK_REQUEST
,
1568 req
->node_addr
, req
->generation
, req
->speed
,
1569 sbp2_pointer_to_addr(&req
->orb
.status_fifo
),
1570 &req
->status
, 8 + status_data_len
);
1571 if (ret
!= RCODE_COMPLETE
) {
1572 pr_debug("mgt_orb status write failed: %x\n", ret
);
1577 fw_card_put(req
->card
);
1580 spin_lock_bh(&agent
->lock
);
1581 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1582 spin_unlock_bh(&agent
->lock
);
1585 static void sbp_mgt_agent_rw(struct fw_card
*card
,
1586 struct fw_request
*request
, int tcode
, int destination
, int source
,
1587 int generation
, unsigned long long offset
, void *data
, size_t length
,
1588 void *callback_data
)
1590 struct sbp_management_agent
*agent
= callback_data
;
1591 struct sbp2_pointer
*ptr
= data
;
1592 int rcode
= RCODE_ADDRESS_ERROR
;
1594 if (!agent
->tport
->enable
)
1597 if ((offset
!= agent
->handler
.offset
) || (length
!= 8))
1600 if (tcode
== TCODE_WRITE_BLOCK_REQUEST
) {
1601 struct sbp_management_request
*req
;
1604 spin_lock_bh(&agent
->lock
);
1605 prev_state
= agent
->state
;
1606 agent
->state
= MANAGEMENT_AGENT_STATE_BUSY
;
1607 spin_unlock_bh(&agent
->lock
);
1609 if (prev_state
== MANAGEMENT_AGENT_STATE_BUSY
) {
1610 pr_notice("ignoring management request while busy\n");
1611 rcode
= RCODE_CONFLICT_ERROR
;
1615 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
1617 rcode
= RCODE_CONFLICT_ERROR
;
1621 req
->card
= fw_card_get(card
);
1622 req
->generation
= generation
;
1623 req
->node_addr
= source
;
1624 req
->speed
= fw_get_request_speed(request
);
1626 agent
->orb_offset
= sbp2_pointer_to_addr(ptr
);
1627 agent
->request
= req
;
1629 queue_work(system_unbound_wq
, &agent
->work
);
1630 rcode
= RCODE_COMPLETE
;
1631 } else if (tcode
== TCODE_READ_BLOCK_REQUEST
) {
1632 addr_to_sbp2_pointer(agent
->orb_offset
, ptr
);
1633 rcode
= RCODE_COMPLETE
;
1635 rcode
= RCODE_TYPE_ERROR
;
1639 fw_send_response(card
, request
, rcode
);
1642 static struct sbp_management_agent
*sbp_management_agent_register(
1643 struct sbp_tport
*tport
)
1646 struct sbp_management_agent
*agent
;
1648 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1650 return ERR_PTR(-ENOMEM
);
1652 spin_lock_init(&agent
->lock
);
1653 agent
->tport
= tport
;
1654 agent
->handler
.length
= 0x08;
1655 agent
->handler
.address_callback
= sbp_mgt_agent_rw
;
1656 agent
->handler
.callback_data
= agent
;
1657 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1658 INIT_WORK(&agent
->work
, sbp_mgt_agent_process
);
1659 agent
->orb_offset
= 0;
1660 agent
->request
= NULL
;
1662 ret
= fw_core_add_address_handler(&agent
->handler
,
1663 &sbp_register_region
);
1666 return ERR_PTR(ret
);
1672 static void sbp_management_agent_unregister(struct sbp_management_agent
*agent
)
1674 fw_core_remove_address_handler(&agent
->handler
);
1675 cancel_work_sync(&agent
->work
);
1679 static int sbp_check_true(struct se_portal_group
*se_tpg
)
1684 static int sbp_check_false(struct se_portal_group
*se_tpg
)
1689 static char *sbp_get_fabric_name(void)
1694 static char *sbp_get_fabric_wwn(struct se_portal_group
*se_tpg
)
1696 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1697 struct sbp_tport
*tport
= tpg
->tport
;
1699 return &tport
->tport_name
[0];
1702 static u16
sbp_get_tag(struct se_portal_group
*se_tpg
)
1704 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1705 return tpg
->tport_tpgt
;
1708 static u32
sbp_tpg_get_inst_index(struct se_portal_group
*se_tpg
)
1713 static void sbp_release_cmd(struct se_cmd
*se_cmd
)
1715 struct sbp_target_request
*req
= container_of(se_cmd
,
1716 struct sbp_target_request
, se_cmd
);
1718 sbp_free_request(req
);
1721 static int sbp_shutdown_session(struct se_session
*se_sess
)
1726 static void sbp_close_session(struct se_session
*se_sess
)
1731 static u32
sbp_sess_get_index(struct se_session
*se_sess
)
1736 static int sbp_write_pending(struct se_cmd
*se_cmd
)
1738 struct sbp_target_request
*req
= container_of(se_cmd
,
1739 struct sbp_target_request
, se_cmd
);
1742 ret
= sbp_rw_data(req
);
1744 req
->status
.status
|= cpu_to_be32(
1746 STATUS_RESP_TRANSPORT_FAILURE
) |
1747 STATUS_BLOCK_DEAD(0) |
1748 STATUS_BLOCK_LEN(1) |
1749 STATUS_BLOCK_SBP_STATUS(
1750 SBP_STATUS_UNSPECIFIED_ERROR
));
1751 sbp_send_status(req
);
1755 target_execute_cmd(se_cmd
);
1759 static int sbp_write_pending_status(struct se_cmd
*se_cmd
)
1764 static void sbp_set_default_node_attrs(struct se_node_acl
*nacl
)
1769 static int sbp_get_cmd_state(struct se_cmd
*se_cmd
)
1774 static int sbp_queue_data_in(struct se_cmd
*se_cmd
)
1776 struct sbp_target_request
*req
= container_of(se_cmd
,
1777 struct sbp_target_request
, se_cmd
);
1780 ret
= sbp_rw_data(req
);
1782 req
->status
.status
|= cpu_to_be32(
1783 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1784 STATUS_BLOCK_DEAD(0) |
1785 STATUS_BLOCK_LEN(1) |
1786 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1787 sbp_send_status(req
);
1791 return sbp_send_sense(req
);
1795 * Called after command (no data transfer) or after the write (to device)
1796 * operation is completed
1798 static int sbp_queue_status(struct se_cmd
*se_cmd
)
1800 struct sbp_target_request
*req
= container_of(se_cmd
,
1801 struct sbp_target_request
, se_cmd
);
1803 return sbp_send_sense(req
);
1806 static void sbp_queue_tm_rsp(struct se_cmd
*se_cmd
)
1810 static void sbp_aborted_task(struct se_cmd
*se_cmd
)
1815 static int sbp_check_stop_free(struct se_cmd
*se_cmd
)
1817 struct sbp_target_request
*req
= container_of(se_cmd
,
1818 struct sbp_target_request
, se_cmd
);
1820 transport_generic_free_cmd(&req
->se_cmd
, 0);
1824 static int sbp_count_se_tpg_luns(struct se_portal_group
*tpg
)
1830 hlist_for_each_entry_rcu(lun
, &tpg
->tpg_lun_hlist
, link
)
1837 static int sbp_update_unit_directory(struct sbp_tport
*tport
)
1840 int num_luns
, num_entries
, idx
= 0, mgt_agt_addr
, ret
;
1843 if (tport
->unit_directory
.data
) {
1844 fw_core_remove_descriptor(&tport
->unit_directory
);
1845 kfree(tport
->unit_directory
.data
);
1846 tport
->unit_directory
.data
= NULL
;
1849 if (!tport
->enable
|| !tport
->tpg
)
1852 num_luns
= sbp_count_se_tpg_luns(&tport
->tpg
->se_tpg
);
1855 * Number of entries in the final unit directory:
1856 * - all of those in the template
1857 * - management_agent
1858 * - unit_characteristics
1859 * - reconnect_timeout
1861 * - one for each LUN
1863 * MUST NOT include leaf or sub-directory entries
1865 num_entries
= ARRAY_SIZE(sbp_unit_directory_template
) + 4 + num_luns
;
1867 if (tport
->directory_id
!= -1)
1870 /* allocate num_entries + 4 for the header and unique ID leaf */
1871 data
= kcalloc((num_entries
+ 4), sizeof(u32
), GFP_KERNEL
);
1875 /* directory_length */
1876 data
[idx
++] = num_entries
<< 16;
1879 if (tport
->directory_id
!= -1)
1880 data
[idx
++] = (CSR_DIRECTORY_ID
<< 24) | tport
->directory_id
;
1882 /* unit directory template */
1883 memcpy(&data
[idx
], sbp_unit_directory_template
,
1884 sizeof(sbp_unit_directory_template
));
1885 idx
+= ARRAY_SIZE(sbp_unit_directory_template
);
1887 /* management_agent */
1888 mgt_agt_addr
= (tport
->mgt_agt
->handler
.offset
- CSR_REGISTER_BASE
) / 4;
1889 data
[idx
++] = 0x54000000 | (mgt_agt_addr
& 0x00ffffff);
1891 /* unit_characteristics */
1892 data
[idx
++] = 0x3a000000 |
1893 (((tport
->mgt_orb_timeout
* 2) << 8) & 0xff00) |
1896 /* reconnect_timeout */
1897 data
[idx
++] = 0x3d000000 | (tport
->max_reconnect_timeout
& 0xffff);
1899 /* unit unique ID (leaf is just after LUNs) */
1900 data
[idx
++] = 0x8d000000 | (num_luns
+ 1);
1903 hlist_for_each_entry_rcu(lun
, &tport
->tpg
->se_tpg
.tpg_lun_hlist
, link
) {
1904 struct se_device
*dev
;
1907 * rcu_dereference_raw protected by se_lun->lun_group symlink
1908 * reference to se_device->dev_group.
1910 dev
= rcu_dereference_raw(lun
->lun_se_dev
);
1911 type
= dev
->transport
->get_device_type(dev
);
1913 /* logical_unit_number */
1914 data
[idx
++] = 0x14000000 |
1915 ((type
<< 16) & 0x1f0000) |
1916 (lun
->unpacked_lun
& 0xffff);
1920 /* unit unique ID leaf */
1921 data
[idx
++] = 2 << 16;
1922 data
[idx
++] = tport
->guid
>> 32;
1923 data
[idx
++] = tport
->guid
;
1925 tport
->unit_directory
.length
= idx
;
1926 tport
->unit_directory
.key
= (CSR_DIRECTORY
| CSR_UNIT
) << 24;
1927 tport
->unit_directory
.data
= data
;
1929 ret
= fw_core_add_descriptor(&tport
->unit_directory
);
1931 kfree(tport
->unit_directory
.data
);
1932 tport
->unit_directory
.data
= NULL
;
1938 static ssize_t
sbp_parse_wwn(const char *name
, u64
*wwn
)
1945 for (cp
= name
; cp
< &name
[SBP_NAMELEN
- 1]; cp
++) {
1947 if (c
== '\n' && cp
[1] == '\0')
1958 else if (isxdigit(c
))
1959 nibble
= tolower(c
) - 'a' + 10;
1962 *wwn
= (*wwn
<< 4) | nibble
;
1967 printk(KERN_INFO
"err %u len %zu pos %u\n",
1968 err
, cp
- name
, pos
);
1972 static ssize_t
sbp_format_wwn(char *buf
, size_t len
, u64 wwn
)
1974 return snprintf(buf
, len
, "%016llx", wwn
);
1977 static int sbp_init_nodeacl(struct se_node_acl
*se_nacl
, const char *name
)
1981 if (sbp_parse_wwn(name
, &guid
) < 0)
1986 static int sbp_post_link_lun(
1987 struct se_portal_group
*se_tpg
,
1988 struct se_lun
*se_lun
)
1990 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1992 return sbp_update_unit_directory(tpg
->tport
);
1995 static void sbp_pre_unlink_lun(
1996 struct se_portal_group
*se_tpg
,
1997 struct se_lun
*se_lun
)
1999 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2000 struct sbp_tport
*tport
= tpg
->tport
;
2003 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0)
2006 ret
= sbp_update_unit_directory(tport
);
2008 pr_err("unlink LUN: failed to update unit directory\n");
2011 static struct se_portal_group
*sbp_make_tpg(
2013 struct config_group
*group
,
2016 struct sbp_tport
*tport
=
2017 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2019 struct sbp_tpg
*tpg
;
2023 if (strstr(name
, "tpgt_") != name
)
2024 return ERR_PTR(-EINVAL
);
2025 if (kstrtoul(name
+ 5, 10, &tpgt
) || tpgt
> UINT_MAX
)
2026 return ERR_PTR(-EINVAL
);
2029 pr_err("Only one TPG per Unit is possible.\n");
2030 return ERR_PTR(-EBUSY
);
2033 tpg
= kzalloc(sizeof(*tpg
), GFP_KERNEL
);
2035 pr_err("Unable to allocate struct sbp_tpg\n");
2036 return ERR_PTR(-ENOMEM
);
2040 tpg
->tport_tpgt
= tpgt
;
2043 /* default attribute values */
2045 tport
->directory_id
= -1;
2046 tport
->mgt_orb_timeout
= 15;
2047 tport
->max_reconnect_timeout
= 5;
2048 tport
->max_logins_per_lun
= 1;
2050 tport
->mgt_agt
= sbp_management_agent_register(tport
);
2051 if (IS_ERR(tport
->mgt_agt
)) {
2052 ret
= PTR_ERR(tport
->mgt_agt
);
2056 ret
= core_tpg_register(wwn
, &tpg
->se_tpg
, SCSI_PROTOCOL_SBP
);
2058 goto out_unreg_mgt_agt
;
2060 return &tpg
->se_tpg
;
2063 sbp_management_agent_unregister(tport
->mgt_agt
);
2067 return ERR_PTR(ret
);
2070 static void sbp_drop_tpg(struct se_portal_group
*se_tpg
)
2072 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2073 struct sbp_tport
*tport
= tpg
->tport
;
2075 core_tpg_deregister(se_tpg
);
2076 sbp_management_agent_unregister(tport
->mgt_agt
);
2081 static struct se_wwn
*sbp_make_tport(
2082 struct target_fabric_configfs
*tf
,
2083 struct config_group
*group
,
2086 struct sbp_tport
*tport
;
2089 if (sbp_parse_wwn(name
, &guid
) < 0)
2090 return ERR_PTR(-EINVAL
);
2092 tport
= kzalloc(sizeof(*tport
), GFP_KERNEL
);
2094 pr_err("Unable to allocate struct sbp_tport\n");
2095 return ERR_PTR(-ENOMEM
);
2099 sbp_format_wwn(tport
->tport_name
, SBP_NAMELEN
, guid
);
2101 return &tport
->tport_wwn
;
2104 static void sbp_drop_tport(struct se_wwn
*wwn
)
2106 struct sbp_tport
*tport
=
2107 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2112 static ssize_t
sbp_wwn_version_show(struct config_item
*item
, char *page
)
2114 return sprintf(page
, "FireWire SBP fabric module %s\n", SBP_VERSION
);
2117 CONFIGFS_ATTR_RO(sbp_wwn_
, version
);
2119 static struct configfs_attribute
*sbp_wwn_attrs
[] = {
2120 &sbp_wwn_attr_version
,
2124 static ssize_t
sbp_tpg_directory_id_show(struct config_item
*item
, char *page
)
2126 struct se_portal_group
*se_tpg
= to_tpg(item
);
2127 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2128 struct sbp_tport
*tport
= tpg
->tport
;
2130 if (tport
->directory_id
== -1)
2131 return sprintf(page
, "implicit\n");
2133 return sprintf(page
, "%06x\n", tport
->directory_id
);
2136 static ssize_t
sbp_tpg_directory_id_store(struct config_item
*item
,
2137 const char *page
, size_t count
)
2139 struct se_portal_group
*se_tpg
= to_tpg(item
);
2140 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2141 struct sbp_tport
*tport
= tpg
->tport
;
2144 if (tport
->enable
) {
2145 pr_err("Cannot change the directory_id on an active target.\n");
2149 if (strstr(page
, "implicit") == page
) {
2150 tport
->directory_id
= -1;
2152 if (kstrtoul(page
, 16, &val
) < 0)
2157 tport
->directory_id
= val
;
2163 static ssize_t
sbp_tpg_enable_show(struct config_item
*item
, char *page
)
2165 struct se_portal_group
*se_tpg
= to_tpg(item
);
2166 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2167 struct sbp_tport
*tport
= tpg
->tport
;
2168 return sprintf(page
, "%d\n", tport
->enable
);
2171 static ssize_t
sbp_tpg_enable_store(struct config_item
*item
,
2172 const char *page
, size_t count
)
2174 struct se_portal_group
*se_tpg
= to_tpg(item
);
2175 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2176 struct sbp_tport
*tport
= tpg
->tport
;
2180 if (kstrtoul(page
, 0, &val
) < 0)
2182 if ((val
!= 0) && (val
!= 1))
2185 if (tport
->enable
== val
)
2189 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0) {
2190 pr_err("Cannot enable a target with no LUNs!\n");
2194 /* XXX: force-shutdown sessions instead? */
2195 spin_lock_bh(&se_tpg
->session_lock
);
2196 if (!list_empty(&se_tpg
->tpg_sess_list
)) {
2197 spin_unlock_bh(&se_tpg
->session_lock
);
2200 spin_unlock_bh(&se_tpg
->session_lock
);
2203 tport
->enable
= val
;
2205 ret
= sbp_update_unit_directory(tport
);
2207 pr_err("Could not update Config ROM\n");
2214 CONFIGFS_ATTR(sbp_tpg_
, directory_id
);
2215 CONFIGFS_ATTR(sbp_tpg_
, enable
);
2217 static struct configfs_attribute
*sbp_tpg_base_attrs
[] = {
2218 &sbp_tpg_attr_directory_id
,
2219 &sbp_tpg_attr_enable
,
2223 static ssize_t
sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item
*item
,
2226 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2227 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2228 struct sbp_tport
*tport
= tpg
->tport
;
2229 return sprintf(page
, "%d\n", tport
->mgt_orb_timeout
);
2232 static ssize_t
sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item
*item
,
2233 const char *page
, size_t count
)
2235 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2236 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2237 struct sbp_tport
*tport
= tpg
->tport
;
2241 if (kstrtoul(page
, 0, &val
) < 0)
2243 if ((val
< 1) || (val
> 127))
2246 if (tport
->mgt_orb_timeout
== val
)
2249 tport
->mgt_orb_timeout
= val
;
2251 ret
= sbp_update_unit_directory(tport
);
2258 static ssize_t
sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item
*item
,
2261 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2262 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2263 struct sbp_tport
*tport
= tpg
->tport
;
2264 return sprintf(page
, "%d\n", tport
->max_reconnect_timeout
);
2267 static ssize_t
sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item
*item
,
2268 const char *page
, size_t count
)
2270 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2271 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2272 struct sbp_tport
*tport
= tpg
->tport
;
2276 if (kstrtoul(page
, 0, &val
) < 0)
2278 if ((val
< 1) || (val
> 32767))
2281 if (tport
->max_reconnect_timeout
== val
)
2284 tport
->max_reconnect_timeout
= val
;
2286 ret
= sbp_update_unit_directory(tport
);
2293 static ssize_t
sbp_tpg_attrib_max_logins_per_lun_show(struct config_item
*item
,
2296 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2297 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2298 struct sbp_tport
*tport
= tpg
->tport
;
2299 return sprintf(page
, "%d\n", tport
->max_logins_per_lun
);
2302 static ssize_t
sbp_tpg_attrib_max_logins_per_lun_store(struct config_item
*item
,
2303 const char *page
, size_t count
)
2305 struct se_portal_group
*se_tpg
= attrib_to_tpg(item
);
2306 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2307 struct sbp_tport
*tport
= tpg
->tport
;
2310 if (kstrtoul(page
, 0, &val
) < 0)
2312 if ((val
< 1) || (val
> 127))
2315 /* XXX: also check against current count? */
2317 tport
->max_logins_per_lun
= val
;
2322 CONFIGFS_ATTR(sbp_tpg_attrib_
, mgt_orb_timeout
);
2323 CONFIGFS_ATTR(sbp_tpg_attrib_
, max_reconnect_timeout
);
2324 CONFIGFS_ATTR(sbp_tpg_attrib_
, max_logins_per_lun
);
2326 static struct configfs_attribute
*sbp_tpg_attrib_attrs
[] = {
2327 &sbp_tpg_attrib_attr_mgt_orb_timeout
,
2328 &sbp_tpg_attrib_attr_max_reconnect_timeout
,
2329 &sbp_tpg_attrib_attr_max_logins_per_lun
,
2333 static const struct target_core_fabric_ops sbp_ops
= {
2334 .module
= THIS_MODULE
,
2336 .get_fabric_name
= sbp_get_fabric_name
,
2337 .tpg_get_wwn
= sbp_get_fabric_wwn
,
2338 .tpg_get_tag
= sbp_get_tag
,
2339 .tpg_check_demo_mode
= sbp_check_true
,
2340 .tpg_check_demo_mode_cache
= sbp_check_true
,
2341 .tpg_check_demo_mode_write_protect
= sbp_check_false
,
2342 .tpg_check_prod_mode_write_protect
= sbp_check_false
,
2343 .tpg_get_inst_index
= sbp_tpg_get_inst_index
,
2344 .release_cmd
= sbp_release_cmd
,
2345 .shutdown_session
= sbp_shutdown_session
,
2346 .close_session
= sbp_close_session
,
2347 .sess_get_index
= sbp_sess_get_index
,
2348 .write_pending
= sbp_write_pending
,
2349 .write_pending_status
= sbp_write_pending_status
,
2350 .set_default_node_attributes
= sbp_set_default_node_attrs
,
2351 .get_cmd_state
= sbp_get_cmd_state
,
2352 .queue_data_in
= sbp_queue_data_in
,
2353 .queue_status
= sbp_queue_status
,
2354 .queue_tm_rsp
= sbp_queue_tm_rsp
,
2355 .aborted_task
= sbp_aborted_task
,
2356 .check_stop_free
= sbp_check_stop_free
,
2358 .fabric_make_wwn
= sbp_make_tport
,
2359 .fabric_drop_wwn
= sbp_drop_tport
,
2360 .fabric_make_tpg
= sbp_make_tpg
,
2361 .fabric_drop_tpg
= sbp_drop_tpg
,
2362 .fabric_post_link
= sbp_post_link_lun
,
2363 .fabric_pre_unlink
= sbp_pre_unlink_lun
,
2364 .fabric_make_np
= NULL
,
2365 .fabric_drop_np
= NULL
,
2366 .fabric_init_nodeacl
= sbp_init_nodeacl
,
2368 .tfc_wwn_attrs
= sbp_wwn_attrs
,
2369 .tfc_tpg_base_attrs
= sbp_tpg_base_attrs
,
2370 .tfc_tpg_attrib_attrs
= sbp_tpg_attrib_attrs
,
2373 static int __init
sbp_init(void)
2375 return target_register_template(&sbp_ops
);
2378 static void __exit
sbp_exit(void)
2380 target_unregister_template(&sbp_ops
);
2383 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2384 MODULE_LICENSE("GPL");
2385 module_init(sbp_init
);
2386 module_exit(sbp_exit
);