2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/firewire.h>
32 #include <linux/firewire-constants.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_tcq.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <target/target_core_fabric_configfs.h>
39 #include <target/target_core_configfs.h>
40 #include <target/configfs_macros.h>
41 #include <asm/unaligned.h>
43 #include "sbp_target.h"
45 /* Local pointer to allocated TCM configfs fabric module */
46 static struct target_fabric_configfs
*sbp_fabric_configfs
;
48 /* FireWire address region for management and command block address handlers */
49 static const struct fw_address_region sbp_register_region
= {
50 .start
= CSR_REGISTER_BASE
+ 0x10000,
51 .end
= 0x1000000000000ULL
,
54 static const u32 sbp_unit_directory_template
[] = {
55 0x1200609e, /* unit_specifier_id: NCITS/T10 */
56 0x13010483, /* unit_sw_version: 1155D Rev 4 */
57 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
58 0x390104d8, /* command_set: SPC-2 */
59 0x3b000000, /* command_set_revision: 0 */
60 0x3c000001, /* firmware_revision: 1 */
63 #define SESSION_MAINTENANCE_INTERVAL HZ
65 static atomic_t login_id
= ATOMIC_INIT(0);
67 static void session_maintenance_work(struct work_struct
*);
68 static int sbp_run_transaction(struct fw_card
*, int, int, int, int,
69 unsigned long long, void *, size_t);
71 static int read_peer_guid(u64
*guid
, const struct sbp_management_request
*req
)
76 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
77 req
->node_addr
, req
->generation
, req
->speed
,
78 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 3 * 4,
80 if (ret
!= RCODE_COMPLETE
)
83 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
84 req
->node_addr
, req
->generation
, req
->speed
,
85 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 4 * 4,
87 if (ret
!= RCODE_COMPLETE
)
90 *guid
= (u64
)be32_to_cpu(high
) << 32 | be32_to_cpu(low
);
92 return RCODE_COMPLETE
;
95 static struct sbp_session
*sbp_session_find_by_guid(
96 struct sbp_tpg
*tpg
, u64 guid
)
98 struct se_session
*se_sess
;
99 struct sbp_session
*sess
, *found
= NULL
;
101 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
102 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
103 sess
= se_sess
->fabric_sess_ptr
;
104 if (sess
->guid
== guid
)
107 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
112 static struct sbp_login_descriptor
*sbp_login_find_by_lun(
113 struct sbp_session
*session
, struct se_lun
*lun
)
115 struct sbp_login_descriptor
*login
, *found
= NULL
;
117 spin_lock_bh(&session
->lock
);
118 list_for_each_entry(login
, &session
->login_list
, link
) {
119 if (login
->lun
== lun
)
122 spin_unlock_bh(&session
->lock
);
127 static int sbp_login_count_all_by_lun(
132 struct se_session
*se_sess
;
133 struct sbp_session
*sess
;
134 struct sbp_login_descriptor
*login
;
137 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
138 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
139 sess
= se_sess
->fabric_sess_ptr
;
141 spin_lock_bh(&sess
->lock
);
142 list_for_each_entry(login
, &sess
->login_list
, link
) {
143 if (login
->lun
!= lun
)
146 if (!exclusive
|| login
->exclusive
)
149 spin_unlock_bh(&sess
->lock
);
151 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
156 static struct sbp_login_descriptor
*sbp_login_find_by_id(
157 struct sbp_tpg
*tpg
, int login_id
)
159 struct se_session
*se_sess
;
160 struct sbp_session
*sess
;
161 struct sbp_login_descriptor
*login
, *found
= NULL
;
163 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
164 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
165 sess
= se_sess
->fabric_sess_ptr
;
167 spin_lock_bh(&sess
->lock
);
168 list_for_each_entry(login
, &sess
->login_list
, link
) {
169 if (login
->login_id
== login_id
)
172 spin_unlock_bh(&sess
->lock
);
174 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
179 static struct se_lun
*sbp_get_lun_from_tpg(struct sbp_tpg
*tpg
, int lun
)
181 struct se_portal_group
*se_tpg
= &tpg
->se_tpg
;
182 struct se_lun
*se_lun
;
184 if (lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
185 return ERR_PTR(-EINVAL
);
187 spin_lock(&se_tpg
->tpg_lun_lock
);
188 se_lun
= se_tpg
->tpg_lun_list
[lun
];
190 if (se_lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
191 se_lun
= ERR_PTR(-ENODEV
);
193 spin_unlock(&se_tpg
->tpg_lun_lock
);
198 static struct sbp_session
*sbp_session_create(
202 struct sbp_session
*sess
;
205 struct se_node_acl
*se_nacl
;
207 sess
= kmalloc(sizeof(*sess
), GFP_KERNEL
);
209 pr_err("failed to allocate session descriptor\n");
210 return ERR_PTR(-ENOMEM
);
213 sess
->se_sess
= transport_init_session();
214 if (IS_ERR(sess
->se_sess
)) {
215 pr_err("failed to init se_session\n");
217 ret
= PTR_ERR(sess
->se_sess
);
222 snprintf(guid_str
, sizeof(guid_str
), "%016llx", guid
);
224 se_nacl
= core_tpg_check_initiator_node_acl(&tpg
->se_tpg
, guid_str
);
226 pr_warn("Node ACL not found for %s\n", guid_str
);
228 transport_free_session(sess
->se_sess
);
231 return ERR_PTR(-EPERM
);
234 sess
->se_sess
->se_node_acl
= se_nacl
;
236 spin_lock_init(&sess
->lock
);
237 INIT_LIST_HEAD(&sess
->login_list
);
238 INIT_DELAYED_WORK(&sess
->maint_work
, session_maintenance_work
);
242 transport_register_session(&tpg
->se_tpg
, se_nacl
, sess
->se_sess
, sess
);
247 static void sbp_session_release(struct sbp_session
*sess
, bool cancel_work
)
249 spin_lock_bh(&sess
->lock
);
250 if (!list_empty(&sess
->login_list
)) {
251 spin_unlock_bh(&sess
->lock
);
254 spin_unlock_bh(&sess
->lock
);
257 cancel_delayed_work_sync(&sess
->maint_work
);
259 transport_deregister_session_configfs(sess
->se_sess
);
260 transport_deregister_session(sess
->se_sess
);
263 fw_card_put(sess
->card
);
268 static void sbp_target_agent_unregister(struct sbp_target_agent
*);
270 static void sbp_login_release(struct sbp_login_descriptor
*login
,
273 struct sbp_session
*sess
= login
->sess
;
275 /* FIXME: abort/wait on tasks */
277 sbp_target_agent_unregister(login
->tgt_agt
);
280 spin_lock_bh(&sess
->lock
);
281 list_del(&login
->link
);
282 spin_unlock_bh(&sess
->lock
);
284 sbp_session_release(sess
, cancel_work
);
290 static struct sbp_target_agent
*sbp_target_agent_register(
291 struct sbp_login_descriptor
*);
293 static void sbp_management_request_login(
294 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
295 int *status_data_size
)
297 struct sbp_tport
*tport
= agent
->tport
;
298 struct sbp_tpg
*tpg
= tport
->tpg
;
299 struct se_lun
*se_lun
;
302 struct sbp_session
*sess
;
303 struct sbp_login_descriptor
*login
;
304 struct sbp_login_response_block
*response
;
305 int login_response_len
;
307 se_lun
= sbp_get_lun_from_tpg(tpg
,
308 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)));
309 if (IS_ERR(se_lun
)) {
310 pr_notice("login to unknown LUN: %d\n",
311 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)));
313 req
->status
.status
= cpu_to_be32(
314 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
315 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP
));
319 ret
= read_peer_guid(&guid
, req
);
320 if (ret
!= RCODE_COMPLETE
) {
321 pr_warn("failed to read peer GUID: %d\n", ret
);
323 req
->status
.status
= cpu_to_be32(
324 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
325 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
329 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
330 se_lun
->unpacked_lun
, guid
);
332 sess
= sbp_session_find_by_guid(tpg
, guid
);
334 login
= sbp_login_find_by_lun(sess
, se_lun
);
336 pr_notice("initiator already logged-in\n");
339 * SBP-2 R4 says we should return access denied, but
340 * that can confuse initiators. Instead we need to
341 * treat this like a reconnect, but send the login
342 * response block like a fresh login.
344 * This is required particularly in the case of Apple
345 * devices booting off the FireWire target, where
346 * the firmware has an active login to the target. When
347 * the OS takes control of the session it issues its own
348 * LOGIN rather than a RECONNECT. To avoid the machine
349 * waiting until the reconnect_hold expires, we can skip
350 * the ACCESS_DENIED errors to speed things up.
353 goto already_logged_in
;
358 * check exclusive bit in login request
359 * reject with access_denied if any logins present
361 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
)) &&
362 sbp_login_count_all_by_lun(tpg
, se_lun
, 0)) {
363 pr_warn("refusing exclusive login with other active logins\n");
365 req
->status
.status
= cpu_to_be32(
366 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
367 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
372 * check exclusive bit in any existing login descriptor
373 * reject with access_denied if any exclusive logins present
375 if (sbp_login_count_all_by_lun(tpg
, se_lun
, 1)) {
376 pr_warn("refusing login while another exclusive login present\n");
378 req
->status
.status
= cpu_to_be32(
379 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
380 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
385 * check we haven't exceeded the number of allowed logins
386 * reject with resources_unavailable if we have
388 if (sbp_login_count_all_by_lun(tpg
, se_lun
, 0) >=
389 tport
->max_logins_per_lun
) {
390 pr_warn("max number of logins reached\n");
392 req
->status
.status
= cpu_to_be32(
393 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
394 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
399 sess
= sbp_session_create(tpg
, guid
);
401 switch (PTR_ERR(sess
)) {
403 ret
= SBP_STATUS_ACCESS_DENIED
;
406 ret
= SBP_STATUS_RESOURCES_UNAVAIL
;
410 req
->status
.status
= cpu_to_be32(
412 STATUS_RESP_REQUEST_COMPLETE
) |
413 STATUS_BLOCK_SBP_STATUS(ret
));
417 sess
->node_id
= req
->node_addr
;
418 sess
->card
= fw_card_get(req
->card
);
419 sess
->generation
= req
->generation
;
420 sess
->speed
= req
->speed
;
422 schedule_delayed_work(&sess
->maint_work
,
423 SESSION_MAINTENANCE_INTERVAL
);
426 /* only take the latest reconnect_hold into account */
427 sess
->reconnect_hold
= min(
428 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req
->orb
.misc
)),
429 tport
->max_reconnect_timeout
) - 1;
431 login
= kmalloc(sizeof(*login
), GFP_KERNEL
);
433 pr_err("failed to allocate login descriptor\n");
435 sbp_session_release(sess
, true);
437 req
->status
.status
= cpu_to_be32(
438 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
439 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
445 login
->status_fifo_addr
= sbp2_pointer_to_addr(&req
->orb
.status_fifo
);
446 login
->exclusive
= LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
));
447 login
->login_id
= atomic_inc_return(&login_id
);
449 login
->tgt_agt
= sbp_target_agent_register(login
);
450 if (IS_ERR(login
->tgt_agt
)) {
451 ret
= PTR_ERR(login
->tgt_agt
);
452 pr_err("failed to map command block handler: %d\n", ret
);
454 sbp_session_release(sess
, true);
457 req
->status
.status
= cpu_to_be32(
458 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
459 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
463 spin_lock_bh(&sess
->lock
);
464 list_add_tail(&login
->link
, &sess
->login_list
);
465 spin_unlock_bh(&sess
->lock
);
468 response
= kzalloc(sizeof(*response
), GFP_KERNEL
);
470 pr_err("failed to allocate login response block\n");
472 sbp_login_release(login
, true);
474 req
->status
.status
= cpu_to_be32(
475 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
476 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
480 login_response_len
= clamp_val(
481 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req
->orb
.length
)),
482 12, sizeof(*response
));
483 response
->misc
= cpu_to_be32(
484 ((login_response_len
& 0xffff) << 16) |
485 (login
->login_id
& 0xffff));
486 response
->reconnect_hold
= cpu_to_be32(sess
->reconnect_hold
& 0xffff);
487 addr_to_sbp2_pointer(login
->tgt_agt
->handler
.offset
,
488 &response
->command_block_agent
);
490 ret
= sbp_run_transaction(sess
->card
, TCODE_WRITE_BLOCK_REQUEST
,
491 sess
->node_id
, sess
->generation
, sess
->speed
,
492 sbp2_pointer_to_addr(&req
->orb
.ptr2
), response
,
494 if (ret
!= RCODE_COMPLETE
) {
495 pr_debug("failed to write login response block: %x\n", ret
);
498 sbp_login_release(login
, true);
500 req
->status
.status
= cpu_to_be32(
501 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
502 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
508 req
->status
.status
= cpu_to_be32(
509 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
510 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
513 static void sbp_management_request_query_logins(
514 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
515 int *status_data_size
)
517 pr_notice("QUERY LOGINS not implemented\n");
518 /* FIXME: implement */
520 req
->status
.status
= cpu_to_be32(
521 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
522 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
525 static void sbp_management_request_reconnect(
526 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
527 int *status_data_size
)
529 struct sbp_tport
*tport
= agent
->tport
;
530 struct sbp_tpg
*tpg
= tport
->tpg
;
533 struct sbp_login_descriptor
*login
;
535 ret
= read_peer_guid(&guid
, req
);
536 if (ret
!= RCODE_COMPLETE
) {
537 pr_warn("failed to read peer GUID: %d\n", ret
);
539 req
->status
.status
= cpu_to_be32(
540 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
541 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
545 pr_notice("mgt_agent RECONNECT from %016llx\n", guid
);
547 login
= sbp_login_find_by_id(tpg
,
548 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
)));
551 pr_err("mgt_agent RECONNECT unknown login ID\n");
553 req
->status
.status
= cpu_to_be32(
554 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
555 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
559 if (login
->sess
->guid
!= guid
) {
560 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
562 req
->status
.status
= cpu_to_be32(
563 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
564 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
568 spin_lock_bh(&login
->sess
->lock
);
569 if (login
->sess
->card
)
570 fw_card_put(login
->sess
->card
);
572 /* update the node details */
573 login
->sess
->generation
= req
->generation
;
574 login
->sess
->node_id
= req
->node_addr
;
575 login
->sess
->card
= fw_card_get(req
->card
);
576 login
->sess
->speed
= req
->speed
;
577 spin_unlock_bh(&login
->sess
->lock
);
579 req
->status
.status
= cpu_to_be32(
580 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
581 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
584 static void sbp_management_request_logout(
585 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
586 int *status_data_size
)
588 struct sbp_tport
*tport
= agent
->tport
;
589 struct sbp_tpg
*tpg
= tport
->tpg
;
591 struct sbp_login_descriptor
*login
;
593 id
= LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
));
595 login
= sbp_login_find_by_id(tpg
, id
);
597 pr_warn("cannot find login: %d\n", id
);
599 req
->status
.status
= cpu_to_be32(
600 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
601 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN
));
605 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
606 login
->lun
->unpacked_lun
, login
->login_id
);
608 if (req
->node_addr
!= login
->sess
->node_id
) {
609 pr_warn("logout from different node ID\n");
611 req
->status
.status
= cpu_to_be32(
612 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
613 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
617 sbp_login_release(login
, true);
619 req
->status
.status
= cpu_to_be32(
620 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
621 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
624 static void session_check_for_reset(struct sbp_session
*sess
)
626 bool card_valid
= false;
628 spin_lock_bh(&sess
->lock
);
631 spin_lock_irq(&sess
->card
->lock
);
632 card_valid
= (sess
->card
->local_node
!= NULL
);
633 spin_unlock_irq(&sess
->card
->lock
);
636 fw_card_put(sess
->card
);
641 if (!card_valid
|| (sess
->generation
!= sess
->card
->generation
)) {
642 pr_info("Waiting for reconnect from node: %016llx\n",
646 sess
->reconnect_expires
= get_jiffies_64() +
647 ((sess
->reconnect_hold
+ 1) * HZ
);
650 spin_unlock_bh(&sess
->lock
);
653 static void session_reconnect_expired(struct sbp_session
*sess
)
655 struct sbp_login_descriptor
*login
, *temp
;
656 LIST_HEAD(login_list
);
658 pr_info("Reconnect timer expired for node: %016llx\n", sess
->guid
);
660 spin_lock_bh(&sess
->lock
);
661 list_for_each_entry_safe(login
, temp
, &sess
->login_list
, link
) {
663 list_move_tail(&login
->link
, &login_list
);
665 spin_unlock_bh(&sess
->lock
);
667 list_for_each_entry_safe(login
, temp
, &login_list
, link
) {
668 list_del(&login
->link
);
669 sbp_login_release(login
, false);
672 sbp_session_release(sess
, false);
675 static void session_maintenance_work(struct work_struct
*work
)
677 struct sbp_session
*sess
= container_of(work
, struct sbp_session
,
680 /* could be called while tearing down the session */
681 spin_lock_bh(&sess
->lock
);
682 if (list_empty(&sess
->login_list
)) {
683 spin_unlock_bh(&sess
->lock
);
686 spin_unlock_bh(&sess
->lock
);
688 if (sess
->node_id
!= -1) {
689 /* check for bus reset and make node_id invalid */
690 session_check_for_reset(sess
);
692 schedule_delayed_work(&sess
->maint_work
,
693 SESSION_MAINTENANCE_INTERVAL
);
694 } else if (!time_after64(get_jiffies_64(), sess
->reconnect_expires
)) {
695 /* still waiting for reconnect */
696 schedule_delayed_work(&sess
->maint_work
,
697 SESSION_MAINTENANCE_INTERVAL
);
699 /* reconnect timeout has expired */
700 session_reconnect_expired(sess
);
704 static int tgt_agent_rw_agent_state(struct fw_card
*card
, int tcode
, void *data
,
705 struct sbp_target_agent
*agent
)
710 case TCODE_READ_QUADLET_REQUEST
:
711 pr_debug("tgt_agent AGENT_STATE READ\n");
713 spin_lock_bh(&agent
->lock
);
714 state
= agent
->state
;
715 spin_unlock_bh(&agent
->lock
);
717 *(__be32
*)data
= cpu_to_be32(state
);
719 return RCODE_COMPLETE
;
721 case TCODE_WRITE_QUADLET_REQUEST
:
723 return RCODE_COMPLETE
;
726 return RCODE_TYPE_ERROR
;
730 static int tgt_agent_rw_agent_reset(struct fw_card
*card
, int tcode
, void *data
,
731 struct sbp_target_agent
*agent
)
734 case TCODE_WRITE_QUADLET_REQUEST
:
735 pr_debug("tgt_agent AGENT_RESET\n");
736 spin_lock_bh(&agent
->lock
);
737 agent
->state
= AGENT_STATE_RESET
;
738 spin_unlock_bh(&agent
->lock
);
739 return RCODE_COMPLETE
;
742 return RCODE_TYPE_ERROR
;
746 static int tgt_agent_rw_orb_pointer(struct fw_card
*card
, int tcode
, void *data
,
747 struct sbp_target_agent
*agent
)
749 struct sbp2_pointer
*ptr
= data
;
752 case TCODE_WRITE_BLOCK_REQUEST
:
753 spin_lock_bh(&agent
->lock
);
754 if (agent
->state
!= AGENT_STATE_SUSPENDED
&&
755 agent
->state
!= AGENT_STATE_RESET
) {
756 spin_unlock_bh(&agent
->lock
);
757 pr_notice("Ignoring ORB_POINTER write while active.\n");
758 return RCODE_CONFLICT_ERROR
;
760 agent
->state
= AGENT_STATE_ACTIVE
;
761 spin_unlock_bh(&agent
->lock
);
763 agent
->orb_pointer
= sbp2_pointer_to_addr(ptr
);
764 agent
->doorbell
= false;
766 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
769 queue_work(system_unbound_wq
, &agent
->work
);
771 return RCODE_COMPLETE
;
773 case TCODE_READ_BLOCK_REQUEST
:
774 pr_debug("tgt_agent ORB_POINTER READ\n");
775 spin_lock_bh(&agent
->lock
);
776 addr_to_sbp2_pointer(agent
->orb_pointer
, ptr
);
777 spin_unlock_bh(&agent
->lock
);
778 return RCODE_COMPLETE
;
781 return RCODE_TYPE_ERROR
;
785 static int tgt_agent_rw_doorbell(struct fw_card
*card
, int tcode
, void *data
,
786 struct sbp_target_agent
*agent
)
789 case TCODE_WRITE_QUADLET_REQUEST
:
790 spin_lock_bh(&agent
->lock
);
791 if (agent
->state
!= AGENT_STATE_SUSPENDED
) {
792 spin_unlock_bh(&agent
->lock
);
793 pr_debug("Ignoring DOORBELL while active.\n");
794 return RCODE_CONFLICT_ERROR
;
796 agent
->state
= AGENT_STATE_ACTIVE
;
797 spin_unlock_bh(&agent
->lock
);
799 agent
->doorbell
= true;
801 pr_debug("tgt_agent DOORBELL\n");
803 queue_work(system_unbound_wq
, &agent
->work
);
805 return RCODE_COMPLETE
;
807 case TCODE_READ_QUADLET_REQUEST
:
808 return RCODE_COMPLETE
;
811 return RCODE_TYPE_ERROR
;
815 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card
*card
,
816 int tcode
, void *data
, struct sbp_target_agent
*agent
)
819 case TCODE_WRITE_QUADLET_REQUEST
:
820 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
821 /* ignored as we don't send unsolicited status */
822 return RCODE_COMPLETE
;
824 case TCODE_READ_QUADLET_REQUEST
:
825 return RCODE_COMPLETE
;
828 return RCODE_TYPE_ERROR
;
832 static void tgt_agent_rw(struct fw_card
*card
, struct fw_request
*request
,
833 int tcode
, int destination
, int source
, int generation
,
834 unsigned long long offset
, void *data
, size_t length
,
837 struct sbp_target_agent
*agent
= callback_data
;
838 struct sbp_session
*sess
= agent
->login
->sess
;
839 int sess_gen
, sess_node
, rcode
;
841 spin_lock_bh(&sess
->lock
);
842 sess_gen
= sess
->generation
;
843 sess_node
= sess
->node_id
;
844 spin_unlock_bh(&sess
->lock
);
846 if (generation
!= sess_gen
) {
847 pr_notice("ignoring request with wrong generation\n");
848 rcode
= RCODE_TYPE_ERROR
;
852 if (source
!= sess_node
) {
853 pr_notice("ignoring request from foreign node (%x != %x)\n",
855 rcode
= RCODE_TYPE_ERROR
;
859 /* turn offset into the offset from the start of the block */
860 offset
-= agent
->handler
.offset
;
862 if (offset
== 0x00 && length
== 4) {
864 rcode
= tgt_agent_rw_agent_state(card
, tcode
, data
, agent
);
865 } else if (offset
== 0x04 && length
== 4) {
867 rcode
= tgt_agent_rw_agent_reset(card
, tcode
, data
, agent
);
868 } else if (offset
== 0x08 && length
== 8) {
870 rcode
= tgt_agent_rw_orb_pointer(card
, tcode
, data
, agent
);
871 } else if (offset
== 0x10 && length
== 4) {
873 rcode
= tgt_agent_rw_doorbell(card
, tcode
, data
, agent
);
874 } else if (offset
== 0x14 && length
== 4) {
875 /* UNSOLICITED_STATUS_ENABLE */
876 rcode
= tgt_agent_rw_unsolicited_status_enable(card
, tcode
,
879 rcode
= RCODE_ADDRESS_ERROR
;
883 fw_send_response(card
, request
, rcode
);
886 static void sbp_handle_command(struct sbp_target_request
*);
887 static int sbp_send_status(struct sbp_target_request
*);
888 static void sbp_free_request(struct sbp_target_request
*);
890 static void tgt_agent_process_work(struct work_struct
*work
)
892 struct sbp_target_request
*req
=
893 container_of(work
, struct sbp_target_request
, work
);
895 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
897 sbp2_pointer_to_addr(&req
->orb
.next_orb
),
898 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
899 be32_to_cpu(req
->orb
.misc
));
901 if (req
->orb_pointer
>> 32)
902 pr_debug("ORB with high bits set\n");
904 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
))) {
905 case 0:/* Format specified by this standard */
906 sbp_handle_command(req
);
908 case 1: /* Reserved for future standardization */
909 case 2: /* Vendor-dependent */
910 req
->status
.status
|= cpu_to_be32(
912 STATUS_RESP_REQUEST_COMPLETE
) |
913 STATUS_BLOCK_DEAD(0) |
914 STATUS_BLOCK_LEN(1) |
915 STATUS_BLOCK_SBP_STATUS(
916 SBP_STATUS_REQ_TYPE_NOTSUPP
));
917 sbp_send_status(req
);
918 sbp_free_request(req
);
920 case 3: /* Dummy ORB */
921 req
->status
.status
|= cpu_to_be32(
923 STATUS_RESP_REQUEST_COMPLETE
) |
924 STATUS_BLOCK_DEAD(0) |
925 STATUS_BLOCK_LEN(1) |
926 STATUS_BLOCK_SBP_STATUS(
927 SBP_STATUS_DUMMY_ORB_COMPLETE
));
928 sbp_send_status(req
);
929 sbp_free_request(req
);
936 /* used to double-check we haven't been issued an AGENT_RESET */
937 static inline bool tgt_agent_check_active(struct sbp_target_agent
*agent
)
941 spin_lock_bh(&agent
->lock
);
942 active
= (agent
->state
== AGENT_STATE_ACTIVE
);
943 spin_unlock_bh(&agent
->lock
);
948 static void tgt_agent_fetch_work(struct work_struct
*work
)
950 struct sbp_target_agent
*agent
=
951 container_of(work
, struct sbp_target_agent
, work
);
952 struct sbp_session
*sess
= agent
->login
->sess
;
953 struct sbp_target_request
*req
;
955 bool doorbell
= agent
->doorbell
;
956 u64 next_orb
= agent
->orb_pointer
;
958 while (next_orb
&& tgt_agent_check_active(agent
)) {
959 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
961 spin_lock_bh(&agent
->lock
);
962 agent
->state
= AGENT_STATE_DEAD
;
963 spin_unlock_bh(&agent
->lock
);
967 req
->login
= agent
->login
;
968 req
->orb_pointer
= next_orb
;
970 req
->status
.status
= cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
971 req
->orb_pointer
>> 32));
972 req
->status
.orb_low
= cpu_to_be32(
973 req
->orb_pointer
& 0xfffffffc);
975 /* read in the ORB */
976 ret
= sbp_run_transaction(sess
->card
, TCODE_READ_BLOCK_REQUEST
,
977 sess
->node_id
, sess
->generation
, sess
->speed
,
978 req
->orb_pointer
, &req
->orb
, sizeof(req
->orb
));
979 if (ret
!= RCODE_COMPLETE
) {
980 pr_debug("tgt_orb fetch failed: %x\n", ret
);
981 req
->status
.status
|= cpu_to_be32(
983 STATUS_SRC_ORB_FINISHED
) |
985 STATUS_RESP_TRANSPORT_FAILURE
) |
986 STATUS_BLOCK_DEAD(1) |
987 STATUS_BLOCK_LEN(1) |
988 STATUS_BLOCK_SBP_STATUS(
989 SBP_STATUS_UNSPECIFIED_ERROR
));
990 spin_lock_bh(&agent
->lock
);
991 agent
->state
= AGENT_STATE_DEAD
;
992 spin_unlock_bh(&agent
->lock
);
994 sbp_send_status(req
);
995 sbp_free_request(req
);
999 /* check the next_ORB field */
1000 if (be32_to_cpu(req
->orb
.next_orb
.high
) & 0x80000000) {
1002 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
1003 STATUS_SRC_ORB_FINISHED
));
1005 next_orb
= sbp2_pointer_to_addr(&req
->orb
.next_orb
);
1006 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
1007 STATUS_SRC_ORB_CONTINUING
));
1010 if (tgt_agent_check_active(agent
) && !doorbell
) {
1011 INIT_WORK(&req
->work
, tgt_agent_process_work
);
1012 queue_work(system_unbound_wq
, &req
->work
);
1014 /* don't process this request, just check next_ORB */
1015 sbp_free_request(req
);
1018 spin_lock_bh(&agent
->lock
);
1019 doorbell
= agent
->doorbell
= false;
1021 /* check if we should carry on processing */
1023 agent
->orb_pointer
= next_orb
;
1025 agent
->state
= AGENT_STATE_SUSPENDED
;
1027 spin_unlock_bh(&agent
->lock
);
1031 static struct sbp_target_agent
*sbp_target_agent_register(
1032 struct sbp_login_descriptor
*login
)
1034 struct sbp_target_agent
*agent
;
1037 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1039 return ERR_PTR(-ENOMEM
);
1041 spin_lock_init(&agent
->lock
);
1043 agent
->handler
.length
= 0x20;
1044 agent
->handler
.address_callback
= tgt_agent_rw
;
1045 agent
->handler
.callback_data
= agent
;
1047 agent
->login
= login
;
1048 agent
->state
= AGENT_STATE_RESET
;
1049 INIT_WORK(&agent
->work
, tgt_agent_fetch_work
);
1050 agent
->orb_pointer
= 0;
1051 agent
->doorbell
= false;
1053 ret
= fw_core_add_address_handler(&agent
->handler
,
1054 &sbp_register_region
);
1057 return ERR_PTR(ret
);
1063 static void sbp_target_agent_unregister(struct sbp_target_agent
*agent
)
1065 fw_core_remove_address_handler(&agent
->handler
);
1066 cancel_work_sync(&agent
->work
);
1071 * Simple wrapper around fw_run_transaction that retries the transaction several
1072 * times in case of failure, with an exponential backoff.
1074 static int sbp_run_transaction(struct fw_card
*card
, int tcode
, int destination_id
,
1075 int generation
, int speed
, unsigned long long offset
,
1076 void *payload
, size_t length
)
1078 int attempt
, ret
, delay
;
1080 for (attempt
= 1; attempt
<= 5; attempt
++) {
1081 ret
= fw_run_transaction(card
, tcode
, destination_id
,
1082 generation
, speed
, offset
, payload
, length
);
1085 case RCODE_COMPLETE
:
1086 case RCODE_TYPE_ERROR
:
1087 case RCODE_ADDRESS_ERROR
:
1088 case RCODE_GENERATION
:
1092 delay
= 5 * attempt
* attempt
;
1093 usleep_range(delay
, delay
* 2);
1101 * Wrapper around sbp_run_transaction that gets the card, destination,
1102 * generation and speed out of the request's session.
1104 static int sbp_run_request_transaction(struct sbp_target_request
*req
,
1105 int tcode
, unsigned long long offset
, void *payload
,
1108 struct sbp_login_descriptor
*login
= req
->login
;
1109 struct sbp_session
*sess
= login
->sess
;
1110 struct fw_card
*card
;
1111 int node_id
, generation
, speed
, ret
;
1113 spin_lock_bh(&sess
->lock
);
1114 card
= fw_card_get(sess
->card
);
1115 node_id
= sess
->node_id
;
1116 generation
= sess
->generation
;
1117 speed
= sess
->speed
;
1118 spin_unlock_bh(&sess
->lock
);
1120 ret
= sbp_run_transaction(card
, tcode
, node_id
, generation
, speed
,
1121 offset
, payload
, length
);
1128 static int sbp_fetch_command(struct sbp_target_request
*req
)
1130 int ret
, cmd_len
, copy_len
;
1132 cmd_len
= scsi_command_size(req
->orb
.command_block
);
1134 req
->cmd_buf
= kmalloc(cmd_len
, GFP_KERNEL
);
1138 memcpy(req
->cmd_buf
, req
->orb
.command_block
,
1139 min_t(int, cmd_len
, sizeof(req
->orb
.command_block
)));
1141 if (cmd_len
> sizeof(req
->orb
.command_block
)) {
1142 pr_debug("sbp_fetch_command: filling in long command\n");
1143 copy_len
= cmd_len
- sizeof(req
->orb
.command_block
);
1145 ret
= sbp_run_request_transaction(req
,
1146 TCODE_READ_BLOCK_REQUEST
,
1147 req
->orb_pointer
+ sizeof(req
->orb
),
1148 req
->cmd_buf
+ sizeof(req
->orb
.command_block
),
1150 if (ret
!= RCODE_COMPLETE
)
1157 static int sbp_fetch_page_table(struct sbp_target_request
*req
)
1160 struct sbp_page_table_entry
*pg_tbl
;
1162 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req
->orb
.misc
)))
1165 pg_tbl_sz
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
)) *
1166 sizeof(struct sbp_page_table_entry
);
1168 pg_tbl
= kmalloc(pg_tbl_sz
, GFP_KERNEL
);
1172 ret
= sbp_run_request_transaction(req
, TCODE_READ_BLOCK_REQUEST
,
1173 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
1175 if (ret
!= RCODE_COMPLETE
) {
1180 req
->pg_tbl
= pg_tbl
;
1184 static void sbp_calc_data_length_direction(struct sbp_target_request
*req
,
1185 u32
*data_len
, enum dma_data_direction
*data_dir
)
1187 int data_size
, direction
, idx
;
1189 data_size
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1190 direction
= CMDBLK_ORB_DIRECTION(be32_to_cpu(req
->orb
.misc
));
1194 *data_dir
= DMA_NONE
;
1198 *data_dir
= direction
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1202 for (idx
= 0; idx
< data_size
; idx
++) {
1203 *data_len
+= be16_to_cpu(
1204 req
->pg_tbl
[idx
].segment_length
);
1207 *data_len
= data_size
;
1211 static void sbp_handle_command(struct sbp_target_request
*req
)
1213 struct sbp_login_descriptor
*login
= req
->login
;
1214 struct sbp_session
*sess
= login
->sess
;
1215 int ret
, unpacked_lun
;
1217 enum dma_data_direction data_dir
;
1219 ret
= sbp_fetch_command(req
);
1221 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret
);
1225 ret
= sbp_fetch_page_table(req
);
1227 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1232 unpacked_lun
= req
->login
->lun
->unpacked_lun
;
1233 sbp_calc_data_length_direction(req
, &data_length
, &data_dir
);
1235 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1236 req
->orb_pointer
, unpacked_lun
, data_length
, data_dir
);
1238 if (target_submit_cmd(&req
->se_cmd
, sess
->se_sess
, req
->cmd_buf
,
1239 req
->sense_buf
, unpacked_lun
, data_length
,
1240 MSG_SIMPLE_TAG
, data_dir
, 0))
1246 req
->status
.status
|= cpu_to_be32(
1247 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1248 STATUS_BLOCK_DEAD(0) |
1249 STATUS_BLOCK_LEN(1) |
1250 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1251 sbp_send_status(req
);
1252 sbp_free_request(req
);
1256 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1257 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1259 static int sbp_rw_data(struct sbp_target_request
*req
)
1261 struct sbp_session
*sess
= req
->login
->sess
;
1262 int tcode
, sg_miter_flags
, max_payload
, pg_size
, speed
, node_id
,
1263 generation
, num_pte
, length
, tfr_length
,
1264 rcode
= RCODE_COMPLETE
;
1265 struct sbp_page_table_entry
*pte
;
1266 unsigned long long offset
;
1267 struct fw_card
*card
;
1268 struct sg_mapping_iter iter
;
1270 if (req
->se_cmd
.data_direction
== DMA_FROM_DEVICE
) {
1271 tcode
= TCODE_WRITE_BLOCK_REQUEST
;
1272 sg_miter_flags
= SG_MITER_FROM_SG
;
1274 tcode
= TCODE_READ_BLOCK_REQUEST
;
1275 sg_miter_flags
= SG_MITER_TO_SG
;
1278 max_payload
= 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req
->orb
.misc
));
1279 speed
= CMDBLK_ORB_SPEED(be32_to_cpu(req
->orb
.misc
));
1281 pg_size
= CMDBLK_ORB_PG_SIZE(be32_to_cpu(req
->orb
.misc
));
1283 pr_err("sbp_run_transaction: page size ignored\n");
1284 pg_size
= 0x100 << pg_size
;
1287 spin_lock_bh(&sess
->lock
);
1288 card
= fw_card_get(sess
->card
);
1289 node_id
= sess
->node_id
;
1290 generation
= sess
->generation
;
1291 spin_unlock_bh(&sess
->lock
);
1295 num_pte
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1303 offset
= sbp2_pointer_to_addr(&req
->orb
.data_descriptor
);
1304 length
= req
->se_cmd
.data_length
;
1307 sg_miter_start(&iter
, req
->se_cmd
.t_data_sg
, req
->se_cmd
.t_data_nents
,
1310 while (length
|| num_pte
) {
1312 offset
= (u64
)be16_to_cpu(pte
->segment_base_hi
) << 32 |
1313 be32_to_cpu(pte
->segment_base_lo
);
1314 length
= be16_to_cpu(pte
->segment_length
);
1320 sg_miter_next(&iter
);
1322 tfr_length
= min3(length
, max_payload
, (int)iter
.length
);
1324 /* FIXME: take page_size into account */
1326 rcode
= sbp_run_transaction(card
, tcode
, node_id
,
1328 offset
, iter
.addr
, tfr_length
);
1330 if (rcode
!= RCODE_COMPLETE
)
1333 length
-= tfr_length
;
1334 offset
+= tfr_length
;
1335 iter
.consumed
= tfr_length
;
1338 sg_miter_stop(&iter
);
1341 if (rcode
== RCODE_COMPLETE
) {
1342 WARN_ON(length
!= 0);
1349 static int sbp_send_status(struct sbp_target_request
*req
)
1352 struct sbp_login_descriptor
*login
= req
->login
;
1354 length
= (((be32_to_cpu(req
->status
.status
) >> 24) & 0x07) + 1) * 4;
1356 ret
= sbp_run_request_transaction(req
, TCODE_WRITE_BLOCK_REQUEST
,
1357 login
->status_fifo_addr
, &req
->status
, length
);
1358 if (ret
!= RCODE_COMPLETE
) {
1359 pr_debug("sbp_send_status: write failed: 0x%x\n", ret
);
1363 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1369 static void sbp_sense_mangle(struct sbp_target_request
*req
)
1371 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1372 u8
*sense
= req
->sense_buf
;
1373 u8
*status
= req
->status
.data
;
1375 WARN_ON(se_cmd
->scsi_sense_length
< 18);
1377 switch (sense
[0] & 0x7f) { /* sfmt */
1378 case 0x70: /* current, fixed */
1381 case 0x71: /* deferred, fixed */
1384 case 0x72: /* current, descriptor */
1385 case 0x73: /* deferred, descriptor */
1388 * TODO: SBP-3 specifies what we should do with descriptor
1391 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1393 req
->status
.status
|= cpu_to_be32(
1394 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1395 STATUS_BLOCK_DEAD(0) |
1396 STATUS_BLOCK_LEN(1) |
1397 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED
));
1401 status
[0] |= se_cmd
->scsi_status
& 0x3f;/* status */
1403 (sense
[0] & 0x80) | /* valid */
1404 ((sense
[2] & 0xe0) >> 1) | /* mark, eom, ili */
1405 (sense
[2] & 0x0f); /* sense_key */
1406 status
[2] = se_cmd
->scsi_asc
; /* sense_code */
1407 status
[3] = se_cmd
->scsi_ascq
; /* sense_qualifier */
1410 status
[4] = sense
[3];
1411 status
[5] = sense
[4];
1412 status
[6] = sense
[5];
1413 status
[7] = sense
[6];
1416 status
[8] = sense
[8];
1417 status
[9] = sense
[9];
1418 status
[10] = sense
[10];
1419 status
[11] = sense
[11];
1422 status
[12] = sense
[14];
1424 /* sense_key-dependent */
1425 status
[13] = sense
[15];
1426 status
[14] = sense
[16];
1427 status
[15] = sense
[17];
1429 req
->status
.status
|= cpu_to_be32(
1430 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1431 STATUS_BLOCK_DEAD(0) |
1432 STATUS_BLOCK_LEN(5) |
1433 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1436 static int sbp_send_sense(struct sbp_target_request
*req
)
1438 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1440 if (se_cmd
->scsi_sense_length
) {
1441 sbp_sense_mangle(req
);
1443 req
->status
.status
|= cpu_to_be32(
1444 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1445 STATUS_BLOCK_DEAD(0) |
1446 STATUS_BLOCK_LEN(1) |
1447 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1450 return sbp_send_status(req
);
1453 static void sbp_free_request(struct sbp_target_request
*req
)
1456 kfree(req
->cmd_buf
);
1460 static void sbp_mgt_agent_process(struct work_struct
*work
)
1462 struct sbp_management_agent
*agent
=
1463 container_of(work
, struct sbp_management_agent
, work
);
1464 struct sbp_management_request
*req
= agent
->request
;
1466 int status_data_len
= 0;
1468 /* fetch the ORB from the initiator */
1469 ret
= sbp_run_transaction(req
->card
, TCODE_READ_BLOCK_REQUEST
,
1470 req
->node_addr
, req
->generation
, req
->speed
,
1471 agent
->orb_offset
, &req
->orb
, sizeof(req
->orb
));
1472 if (ret
!= RCODE_COMPLETE
) {
1473 pr_debug("mgt_orb fetch failed: %x\n", ret
);
1477 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1478 sbp2_pointer_to_addr(&req
->orb
.ptr1
),
1479 sbp2_pointer_to_addr(&req
->orb
.ptr2
),
1480 be32_to_cpu(req
->orb
.misc
), be32_to_cpu(req
->orb
.length
),
1481 sbp2_pointer_to_addr(&req
->orb
.status_fifo
));
1483 if (!ORB_NOTIFY(be32_to_cpu(req
->orb
.misc
)) ||
1484 ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
)) != 0) {
1485 pr_err("mgt_orb bad request\n");
1489 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
))) {
1490 case MANAGEMENT_ORB_FUNCTION_LOGIN
:
1491 sbp_management_request_login(agent
, req
, &status_data_len
);
1494 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS
:
1495 sbp_management_request_query_logins(agent
, req
,
1499 case MANAGEMENT_ORB_FUNCTION_RECONNECT
:
1500 sbp_management_request_reconnect(agent
, req
, &status_data_len
);
1503 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD
:
1504 pr_notice("SET PASSWORD not implemented\n");
1506 req
->status
.status
= cpu_to_be32(
1507 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1508 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1512 case MANAGEMENT_ORB_FUNCTION_LOGOUT
:
1513 sbp_management_request_logout(agent
, req
, &status_data_len
);
1516 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK
:
1517 pr_notice("ABORT TASK not implemented\n");
1519 req
->status
.status
= cpu_to_be32(
1520 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1521 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1525 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET
:
1526 pr_notice("ABORT TASK SET not implemented\n");
1528 req
->status
.status
= cpu_to_be32(
1529 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1530 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1534 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET
:
1535 pr_notice("LOGICAL UNIT RESET not implemented\n");
1537 req
->status
.status
= cpu_to_be32(
1538 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1539 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1543 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET
:
1544 pr_notice("TARGET RESET not implemented\n");
1546 req
->status
.status
= cpu_to_be32(
1547 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1548 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1553 pr_notice("unknown management function 0x%x\n",
1554 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
)));
1556 req
->status
.status
= cpu_to_be32(
1557 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1558 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1563 req
->status
.status
|= cpu_to_be32(
1564 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1565 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len
, 4) + 1) |
1566 STATUS_BLOCK_ORB_OFFSET_HIGH(agent
->orb_offset
>> 32));
1567 req
->status
.orb_low
= cpu_to_be32(agent
->orb_offset
);
1569 /* write the status block back to the initiator */
1570 ret
= sbp_run_transaction(req
->card
, TCODE_WRITE_BLOCK_REQUEST
,
1571 req
->node_addr
, req
->generation
, req
->speed
,
1572 sbp2_pointer_to_addr(&req
->orb
.status_fifo
),
1573 &req
->status
, 8 + status_data_len
);
1574 if (ret
!= RCODE_COMPLETE
) {
1575 pr_debug("mgt_orb status write failed: %x\n", ret
);
1580 fw_card_put(req
->card
);
1583 spin_lock_bh(&agent
->lock
);
1584 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1585 spin_unlock_bh(&agent
->lock
);
1588 static void sbp_mgt_agent_rw(struct fw_card
*card
,
1589 struct fw_request
*request
, int tcode
, int destination
, int source
,
1590 int generation
, unsigned long long offset
, void *data
, size_t length
,
1591 void *callback_data
)
1593 struct sbp_management_agent
*agent
= callback_data
;
1594 struct sbp2_pointer
*ptr
= data
;
1595 int rcode
= RCODE_ADDRESS_ERROR
;
1597 if (!agent
->tport
->enable
)
1600 if ((offset
!= agent
->handler
.offset
) || (length
!= 8))
1603 if (tcode
== TCODE_WRITE_BLOCK_REQUEST
) {
1604 struct sbp_management_request
*req
;
1607 spin_lock_bh(&agent
->lock
);
1608 prev_state
= agent
->state
;
1609 agent
->state
= MANAGEMENT_AGENT_STATE_BUSY
;
1610 spin_unlock_bh(&agent
->lock
);
1612 if (prev_state
== MANAGEMENT_AGENT_STATE_BUSY
) {
1613 pr_notice("ignoring management request while busy\n");
1614 rcode
= RCODE_CONFLICT_ERROR
;
1618 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
1620 rcode
= RCODE_CONFLICT_ERROR
;
1624 req
->card
= fw_card_get(card
);
1625 req
->generation
= generation
;
1626 req
->node_addr
= source
;
1627 req
->speed
= fw_get_request_speed(request
);
1629 agent
->orb_offset
= sbp2_pointer_to_addr(ptr
);
1630 agent
->request
= req
;
1632 queue_work(system_unbound_wq
, &agent
->work
);
1633 rcode
= RCODE_COMPLETE
;
1634 } else if (tcode
== TCODE_READ_BLOCK_REQUEST
) {
1635 addr_to_sbp2_pointer(agent
->orb_offset
, ptr
);
1636 rcode
= RCODE_COMPLETE
;
1638 rcode
= RCODE_TYPE_ERROR
;
1642 fw_send_response(card
, request
, rcode
);
1645 static struct sbp_management_agent
*sbp_management_agent_register(
1646 struct sbp_tport
*tport
)
1649 struct sbp_management_agent
*agent
;
1651 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1653 return ERR_PTR(-ENOMEM
);
1655 spin_lock_init(&agent
->lock
);
1656 agent
->tport
= tport
;
1657 agent
->handler
.length
= 0x08;
1658 agent
->handler
.address_callback
= sbp_mgt_agent_rw
;
1659 agent
->handler
.callback_data
= agent
;
1660 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1661 INIT_WORK(&agent
->work
, sbp_mgt_agent_process
);
1662 agent
->orb_offset
= 0;
1663 agent
->request
= NULL
;
1665 ret
= fw_core_add_address_handler(&agent
->handler
,
1666 &sbp_register_region
);
1669 return ERR_PTR(ret
);
1675 static void sbp_management_agent_unregister(struct sbp_management_agent
*agent
)
1677 fw_core_remove_address_handler(&agent
->handler
);
1678 cancel_work_sync(&agent
->work
);
1682 static int sbp_check_true(struct se_portal_group
*se_tpg
)
1687 static int sbp_check_false(struct se_portal_group
*se_tpg
)
1692 static char *sbp_get_fabric_name(void)
1697 static char *sbp_get_fabric_wwn(struct se_portal_group
*se_tpg
)
1699 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1700 struct sbp_tport
*tport
= tpg
->tport
;
1702 return &tport
->tport_name
[0];
1705 static u16
sbp_get_tag(struct se_portal_group
*se_tpg
)
1707 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1708 return tpg
->tport_tpgt
;
1711 static u32
sbp_get_default_depth(struct se_portal_group
*se_tpg
)
1716 static struct se_node_acl
*sbp_alloc_fabric_acl(struct se_portal_group
*se_tpg
)
1718 struct sbp_nacl
*nacl
;
1720 nacl
= kzalloc(sizeof(struct sbp_nacl
), GFP_KERNEL
);
1722 pr_err("Unable to allocate struct sbp_nacl\n");
1726 return &nacl
->se_node_acl
;
1729 static void sbp_release_fabric_acl(
1730 struct se_portal_group
*se_tpg
,
1731 struct se_node_acl
*se_nacl
)
1733 struct sbp_nacl
*nacl
=
1734 container_of(se_nacl
, struct sbp_nacl
, se_node_acl
);
1738 static u32
sbp_tpg_get_inst_index(struct se_portal_group
*se_tpg
)
1743 static void sbp_release_cmd(struct se_cmd
*se_cmd
)
1745 struct sbp_target_request
*req
= container_of(se_cmd
,
1746 struct sbp_target_request
, se_cmd
);
1748 sbp_free_request(req
);
1751 static int sbp_shutdown_session(struct se_session
*se_sess
)
1756 static void sbp_close_session(struct se_session
*se_sess
)
1761 static u32
sbp_sess_get_index(struct se_session
*se_sess
)
1766 static int sbp_write_pending(struct se_cmd
*se_cmd
)
1768 struct sbp_target_request
*req
= container_of(se_cmd
,
1769 struct sbp_target_request
, se_cmd
);
1772 ret
= sbp_rw_data(req
);
1774 req
->status
.status
|= cpu_to_be32(
1776 STATUS_RESP_TRANSPORT_FAILURE
) |
1777 STATUS_BLOCK_DEAD(0) |
1778 STATUS_BLOCK_LEN(1) |
1779 STATUS_BLOCK_SBP_STATUS(
1780 SBP_STATUS_UNSPECIFIED_ERROR
));
1781 sbp_send_status(req
);
1785 target_execute_cmd(se_cmd
);
1789 static int sbp_write_pending_status(struct se_cmd
*se_cmd
)
1794 static void sbp_set_default_node_attrs(struct se_node_acl
*nacl
)
1799 static u32
sbp_get_task_tag(struct se_cmd
*se_cmd
)
1801 struct sbp_target_request
*req
= container_of(se_cmd
,
1802 struct sbp_target_request
, se_cmd
);
1804 /* only used for printk until we do TMRs */
1805 return (u32
)req
->orb_pointer
;
1808 static int sbp_get_cmd_state(struct se_cmd
*se_cmd
)
1813 static int sbp_queue_data_in(struct se_cmd
*se_cmd
)
1815 struct sbp_target_request
*req
= container_of(se_cmd
,
1816 struct sbp_target_request
, se_cmd
);
1819 ret
= sbp_rw_data(req
);
1821 req
->status
.status
|= cpu_to_be32(
1822 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1823 STATUS_BLOCK_DEAD(0) |
1824 STATUS_BLOCK_LEN(1) |
1825 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1826 sbp_send_status(req
);
1830 return sbp_send_sense(req
);
1834 * Called after command (no data transfer) or after the write (to device)
1835 * operation is completed
1837 static int sbp_queue_status(struct se_cmd
*se_cmd
)
1839 struct sbp_target_request
*req
= container_of(se_cmd
,
1840 struct sbp_target_request
, se_cmd
);
1842 return sbp_send_sense(req
);
1845 static void sbp_queue_tm_rsp(struct se_cmd
*se_cmd
)
1849 static int sbp_check_stop_free(struct se_cmd
*se_cmd
)
1851 struct sbp_target_request
*req
= container_of(se_cmd
,
1852 struct sbp_target_request
, se_cmd
);
1854 transport_generic_free_cmd(&req
->se_cmd
, 0);
1859 * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
1861 static u8
sbp_get_fabric_proto_ident(struct se_portal_group
*se_tpg
)
1864 * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
1865 * This is defined in section 7.5.1 Table 362 in spc4r17
1867 return SCSI_PROTOCOL_SBP
;
1870 static u32
sbp_get_pr_transport_id(
1871 struct se_portal_group
*se_tpg
,
1872 struct se_node_acl
*se_nacl
,
1873 struct t10_pr_registration
*pr_reg
,
1880 * Set PROTOCOL IDENTIFIER to 3h for SBP
1882 buf
[0] = SCSI_PROTOCOL_SBP
;
1884 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1887 ret
= hex2bin(&buf
[8], se_nacl
->initiatorname
, 8);
1889 pr_debug("sbp transport_id: invalid hex string\n");
1892 * The IEEE 1394 Transport ID is a hardcoded 24-byte length
1897 static u32
sbp_get_pr_transport_id_len(
1898 struct se_portal_group
*se_tpg
,
1899 struct se_node_acl
*se_nacl
,
1900 struct t10_pr_registration
*pr_reg
,
1905 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1908 * The SBP Transport ID is a hardcoded 24-byte length
1914 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
1915 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
1917 static char *sbp_parse_pr_out_transport_id(
1918 struct se_portal_group
*se_tpg
,
1921 char **port_nexus_ptr
)
1924 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
1925 * for initiator ports using SCSI over SBP Serial SCSI Protocol
1927 * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
1928 * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
1929 * so we return the **port_nexus_ptr set to NULL.
1931 *port_nexus_ptr
= NULL
;
1934 return (char *)&buf
[8];
1937 static int sbp_count_se_tpg_luns(struct se_portal_group
*tpg
)
1941 spin_lock(&tpg
->tpg_lun_lock
);
1942 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
1943 struct se_lun
*se_lun
= tpg
->tpg_lun_list
[i
];
1945 if (se_lun
->lun_status
== TRANSPORT_LUN_STATUS_FREE
)
1950 spin_unlock(&tpg
->tpg_lun_lock
);
1955 static int sbp_update_unit_directory(struct sbp_tport
*tport
)
1957 int num_luns
, num_entries
, idx
= 0, mgt_agt_addr
, ret
, i
;
1960 if (tport
->unit_directory
.data
) {
1961 fw_core_remove_descriptor(&tport
->unit_directory
);
1962 kfree(tport
->unit_directory
.data
);
1963 tport
->unit_directory
.data
= NULL
;
1966 if (!tport
->enable
|| !tport
->tpg
)
1969 num_luns
= sbp_count_se_tpg_luns(&tport
->tpg
->se_tpg
);
1972 * Number of entries in the final unit directory:
1973 * - all of those in the template
1974 * - management_agent
1975 * - unit_characteristics
1976 * - reconnect_timeout
1978 * - one for each LUN
1980 * MUST NOT include leaf or sub-directory entries
1982 num_entries
= ARRAY_SIZE(sbp_unit_directory_template
) + 4 + num_luns
;
1984 if (tport
->directory_id
!= -1)
1987 /* allocate num_entries + 4 for the header and unique ID leaf */
1988 data
= kcalloc((num_entries
+ 4), sizeof(u32
), GFP_KERNEL
);
1992 /* directory_length */
1993 data
[idx
++] = num_entries
<< 16;
1996 if (tport
->directory_id
!= -1)
1997 data
[idx
++] = (CSR_DIRECTORY_ID
<< 24) | tport
->directory_id
;
1999 /* unit directory template */
2000 memcpy(&data
[idx
], sbp_unit_directory_template
,
2001 sizeof(sbp_unit_directory_template
));
2002 idx
+= ARRAY_SIZE(sbp_unit_directory_template
);
2004 /* management_agent */
2005 mgt_agt_addr
= (tport
->mgt_agt
->handler
.offset
- CSR_REGISTER_BASE
) / 4;
2006 data
[idx
++] = 0x54000000 | (mgt_agt_addr
& 0x00ffffff);
2008 /* unit_characteristics */
2009 data
[idx
++] = 0x3a000000 |
2010 (((tport
->mgt_orb_timeout
* 2) << 8) & 0xff00) |
2013 /* reconnect_timeout */
2014 data
[idx
++] = 0x3d000000 | (tport
->max_reconnect_timeout
& 0xffff);
2016 /* unit unique ID (leaf is just after LUNs) */
2017 data
[idx
++] = 0x8d000000 | (num_luns
+ 1);
2019 spin_lock(&tport
->tpg
->se_tpg
.tpg_lun_lock
);
2020 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
2021 struct se_lun
*se_lun
= tport
->tpg
->se_tpg
.tpg_lun_list
[i
];
2022 struct se_device
*dev
;
2025 if (se_lun
->lun_status
== TRANSPORT_LUN_STATUS_FREE
)
2028 spin_unlock(&tport
->tpg
->se_tpg
.tpg_lun_lock
);
2030 dev
= se_lun
->lun_se_dev
;
2031 type
= dev
->transport
->get_device_type(dev
);
2033 /* logical_unit_number */
2034 data
[idx
++] = 0x14000000 |
2035 ((type
<< 16) & 0x1f0000) |
2036 (se_lun
->unpacked_lun
& 0xffff);
2038 spin_lock(&tport
->tpg
->se_tpg
.tpg_lun_lock
);
2040 spin_unlock(&tport
->tpg
->se_tpg
.tpg_lun_lock
);
2042 /* unit unique ID leaf */
2043 data
[idx
++] = 2 << 16;
2044 data
[idx
++] = tport
->guid
>> 32;
2045 data
[idx
++] = tport
->guid
;
2047 tport
->unit_directory
.length
= idx
;
2048 tport
->unit_directory
.key
= (CSR_DIRECTORY
| CSR_UNIT
) << 24;
2049 tport
->unit_directory
.data
= data
;
2051 ret
= fw_core_add_descriptor(&tport
->unit_directory
);
2053 kfree(tport
->unit_directory
.data
);
2054 tport
->unit_directory
.data
= NULL
;
2060 static ssize_t
sbp_parse_wwn(const char *name
, u64
*wwn
)
2067 for (cp
= name
; cp
< &name
[SBP_NAMELEN
- 1]; cp
++) {
2069 if (c
== '\n' && cp
[1] == '\0')
2080 else if (isxdigit(c
))
2081 nibble
= tolower(c
) - 'a' + 10;
2084 *wwn
= (*wwn
<< 4) | nibble
;
2089 printk(KERN_INFO
"err %u len %zu pos %u\n",
2090 err
, cp
- name
, pos
);
2094 static ssize_t
sbp_format_wwn(char *buf
, size_t len
, u64 wwn
)
2096 return snprintf(buf
, len
, "%016llx", wwn
);
2099 static struct se_node_acl
*sbp_make_nodeacl(
2100 struct se_portal_group
*se_tpg
,
2101 struct config_group
*group
,
2104 struct se_node_acl
*se_nacl
, *se_nacl_new
;
2105 struct sbp_nacl
*nacl
;
2107 u32 nexus_depth
= 1;
2109 if (sbp_parse_wwn(name
, &guid
) < 0)
2110 return ERR_PTR(-EINVAL
);
2112 se_nacl_new
= sbp_alloc_fabric_acl(se_tpg
);
2114 return ERR_PTR(-ENOMEM
);
2117 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
2118 * when converting a NodeACL from demo mode -> explict
2120 se_nacl
= core_tpg_add_initiator_node_acl(se_tpg
, se_nacl_new
,
2122 if (IS_ERR(se_nacl
)) {
2123 sbp_release_fabric_acl(se_tpg
, se_nacl_new
);
2127 nacl
= container_of(se_nacl
, struct sbp_nacl
, se_node_acl
);
2129 sbp_format_wwn(nacl
->iport_name
, SBP_NAMELEN
, guid
);
2134 static void sbp_drop_nodeacl(struct se_node_acl
*se_acl
)
2136 struct sbp_nacl
*nacl
=
2137 container_of(se_acl
, struct sbp_nacl
, se_node_acl
);
2139 core_tpg_del_initiator_node_acl(se_acl
->se_tpg
, se_acl
, 1);
2143 static int sbp_post_link_lun(
2144 struct se_portal_group
*se_tpg
,
2145 struct se_lun
*se_lun
)
2147 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2149 return sbp_update_unit_directory(tpg
->tport
);
2152 static void sbp_pre_unlink_lun(
2153 struct se_portal_group
*se_tpg
,
2154 struct se_lun
*se_lun
)
2156 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2157 struct sbp_tport
*tport
= tpg
->tport
;
2160 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0)
2163 ret
= sbp_update_unit_directory(tport
);
2165 pr_err("unlink LUN: failed to update unit directory\n");
2168 static struct se_portal_group
*sbp_make_tpg(
2170 struct config_group
*group
,
2173 struct sbp_tport
*tport
=
2174 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2176 struct sbp_tpg
*tpg
;
2180 if (strstr(name
, "tpgt_") != name
)
2181 return ERR_PTR(-EINVAL
);
2182 if (kstrtoul(name
+ 5, 10, &tpgt
) || tpgt
> UINT_MAX
)
2183 return ERR_PTR(-EINVAL
);
2186 pr_err("Only one TPG per Unit is possible.\n");
2187 return ERR_PTR(-EBUSY
);
2190 tpg
= kzalloc(sizeof(*tpg
), GFP_KERNEL
);
2192 pr_err("Unable to allocate struct sbp_tpg\n");
2193 return ERR_PTR(-ENOMEM
);
2197 tpg
->tport_tpgt
= tpgt
;
2200 /* default attribute values */
2202 tport
->directory_id
= -1;
2203 tport
->mgt_orb_timeout
= 15;
2204 tport
->max_reconnect_timeout
= 5;
2205 tport
->max_logins_per_lun
= 1;
2207 tport
->mgt_agt
= sbp_management_agent_register(tport
);
2208 if (IS_ERR(tport
->mgt_agt
)) {
2209 ret
= PTR_ERR(tport
->mgt_agt
);
2213 ret
= core_tpg_register(&sbp_fabric_configfs
->tf_ops
, wwn
,
2214 &tpg
->se_tpg
, (void *)tpg
,
2215 TRANSPORT_TPG_TYPE_NORMAL
);
2217 goto out_unreg_mgt_agt
;
2219 return &tpg
->se_tpg
;
2222 sbp_management_agent_unregister(tport
->mgt_agt
);
2226 return ERR_PTR(ret
);
2229 static void sbp_drop_tpg(struct se_portal_group
*se_tpg
)
2231 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2232 struct sbp_tport
*tport
= tpg
->tport
;
2234 core_tpg_deregister(se_tpg
);
2235 sbp_management_agent_unregister(tport
->mgt_agt
);
2240 static struct se_wwn
*sbp_make_tport(
2241 struct target_fabric_configfs
*tf
,
2242 struct config_group
*group
,
2245 struct sbp_tport
*tport
;
2248 if (sbp_parse_wwn(name
, &guid
) < 0)
2249 return ERR_PTR(-EINVAL
);
2251 tport
= kzalloc(sizeof(*tport
), GFP_KERNEL
);
2253 pr_err("Unable to allocate struct sbp_tport\n");
2254 return ERR_PTR(-ENOMEM
);
2258 sbp_format_wwn(tport
->tport_name
, SBP_NAMELEN
, guid
);
2260 return &tport
->tport_wwn
;
2263 static void sbp_drop_tport(struct se_wwn
*wwn
)
2265 struct sbp_tport
*tport
=
2266 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2271 static ssize_t
sbp_wwn_show_attr_version(
2272 struct target_fabric_configfs
*tf
,
2275 return sprintf(page
, "FireWire SBP fabric module %s\n", SBP_VERSION
);
2278 TF_WWN_ATTR_RO(sbp
, version
);
2280 static struct configfs_attribute
*sbp_wwn_attrs
[] = {
2281 &sbp_wwn_version
.attr
,
2285 static ssize_t
sbp_tpg_show_directory_id(
2286 struct se_portal_group
*se_tpg
,
2289 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2290 struct sbp_tport
*tport
= tpg
->tport
;
2292 if (tport
->directory_id
== -1)
2293 return sprintf(page
, "implicit\n");
2295 return sprintf(page
, "%06x\n", tport
->directory_id
);
2298 static ssize_t
sbp_tpg_store_directory_id(
2299 struct se_portal_group
*se_tpg
,
2303 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2304 struct sbp_tport
*tport
= tpg
->tport
;
2307 if (tport
->enable
) {
2308 pr_err("Cannot change the directory_id on an active target.\n");
2312 if (strstr(page
, "implicit") == page
) {
2313 tport
->directory_id
= -1;
2315 if (kstrtoul(page
, 16, &val
) < 0)
2320 tport
->directory_id
= val
;
2326 static ssize_t
sbp_tpg_show_enable(
2327 struct se_portal_group
*se_tpg
,
2330 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2331 struct sbp_tport
*tport
= tpg
->tport
;
2332 return sprintf(page
, "%d\n", tport
->enable
);
2335 static ssize_t
sbp_tpg_store_enable(
2336 struct se_portal_group
*se_tpg
,
2340 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2341 struct sbp_tport
*tport
= tpg
->tport
;
2345 if (kstrtoul(page
, 0, &val
) < 0)
2347 if ((val
!= 0) && (val
!= 1))
2350 if (tport
->enable
== val
)
2354 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0) {
2355 pr_err("Cannot enable a target with no LUNs!\n");
2359 /* XXX: force-shutdown sessions instead? */
2360 spin_lock_bh(&se_tpg
->session_lock
);
2361 if (!list_empty(&se_tpg
->tpg_sess_list
)) {
2362 spin_unlock_bh(&se_tpg
->session_lock
);
2365 spin_unlock_bh(&se_tpg
->session_lock
);
2368 tport
->enable
= val
;
2370 ret
= sbp_update_unit_directory(tport
);
2372 pr_err("Could not update Config ROM\n");
2379 TF_TPG_BASE_ATTR(sbp
, directory_id
, S_IRUGO
| S_IWUSR
);
2380 TF_TPG_BASE_ATTR(sbp
, enable
, S_IRUGO
| S_IWUSR
);
2382 static struct configfs_attribute
*sbp_tpg_base_attrs
[] = {
2383 &sbp_tpg_directory_id
.attr
,
2384 &sbp_tpg_enable
.attr
,
2388 static ssize_t
sbp_tpg_attrib_show_mgt_orb_timeout(
2389 struct se_portal_group
*se_tpg
,
2392 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2393 struct sbp_tport
*tport
= tpg
->tport
;
2394 return sprintf(page
, "%d\n", tport
->mgt_orb_timeout
);
2397 static ssize_t
sbp_tpg_attrib_store_mgt_orb_timeout(
2398 struct se_portal_group
*se_tpg
,
2402 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2403 struct sbp_tport
*tport
= tpg
->tport
;
2407 if (kstrtoul(page
, 0, &val
) < 0)
2409 if ((val
< 1) || (val
> 127))
2412 if (tport
->mgt_orb_timeout
== val
)
2415 tport
->mgt_orb_timeout
= val
;
2417 ret
= sbp_update_unit_directory(tport
);
2424 static ssize_t
sbp_tpg_attrib_show_max_reconnect_timeout(
2425 struct se_portal_group
*se_tpg
,
2428 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2429 struct sbp_tport
*tport
= tpg
->tport
;
2430 return sprintf(page
, "%d\n", tport
->max_reconnect_timeout
);
2433 static ssize_t
sbp_tpg_attrib_store_max_reconnect_timeout(
2434 struct se_portal_group
*se_tpg
,
2438 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2439 struct sbp_tport
*tport
= tpg
->tport
;
2443 if (kstrtoul(page
, 0, &val
) < 0)
2445 if ((val
< 1) || (val
> 32767))
2448 if (tport
->max_reconnect_timeout
== val
)
2451 tport
->max_reconnect_timeout
= val
;
2453 ret
= sbp_update_unit_directory(tport
);
2460 static ssize_t
sbp_tpg_attrib_show_max_logins_per_lun(
2461 struct se_portal_group
*se_tpg
,
2464 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2465 struct sbp_tport
*tport
= tpg
->tport
;
2466 return sprintf(page
, "%d\n", tport
->max_logins_per_lun
);
2469 static ssize_t
sbp_tpg_attrib_store_max_logins_per_lun(
2470 struct se_portal_group
*se_tpg
,
2474 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2475 struct sbp_tport
*tport
= tpg
->tport
;
2478 if (kstrtoul(page
, 0, &val
) < 0)
2480 if ((val
< 1) || (val
> 127))
2483 /* XXX: also check against current count? */
2485 tport
->max_logins_per_lun
= val
;
2490 TF_TPG_ATTRIB_ATTR(sbp
, mgt_orb_timeout
, S_IRUGO
| S_IWUSR
);
2491 TF_TPG_ATTRIB_ATTR(sbp
, max_reconnect_timeout
, S_IRUGO
| S_IWUSR
);
2492 TF_TPG_ATTRIB_ATTR(sbp
, max_logins_per_lun
, S_IRUGO
| S_IWUSR
);
2494 static struct configfs_attribute
*sbp_tpg_attrib_attrs
[] = {
2495 &sbp_tpg_attrib_mgt_orb_timeout
.attr
,
2496 &sbp_tpg_attrib_max_reconnect_timeout
.attr
,
2497 &sbp_tpg_attrib_max_logins_per_lun
.attr
,
2501 static struct target_core_fabric_ops sbp_ops
= {
2502 .get_fabric_name
= sbp_get_fabric_name
,
2503 .get_fabric_proto_ident
= sbp_get_fabric_proto_ident
,
2504 .tpg_get_wwn
= sbp_get_fabric_wwn
,
2505 .tpg_get_tag
= sbp_get_tag
,
2506 .tpg_get_default_depth
= sbp_get_default_depth
,
2507 .tpg_get_pr_transport_id
= sbp_get_pr_transport_id
,
2508 .tpg_get_pr_transport_id_len
= sbp_get_pr_transport_id_len
,
2509 .tpg_parse_pr_out_transport_id
= sbp_parse_pr_out_transport_id
,
2510 .tpg_check_demo_mode
= sbp_check_true
,
2511 .tpg_check_demo_mode_cache
= sbp_check_true
,
2512 .tpg_check_demo_mode_write_protect
= sbp_check_false
,
2513 .tpg_check_prod_mode_write_protect
= sbp_check_false
,
2514 .tpg_alloc_fabric_acl
= sbp_alloc_fabric_acl
,
2515 .tpg_release_fabric_acl
= sbp_release_fabric_acl
,
2516 .tpg_get_inst_index
= sbp_tpg_get_inst_index
,
2517 .release_cmd
= sbp_release_cmd
,
2518 .shutdown_session
= sbp_shutdown_session
,
2519 .close_session
= sbp_close_session
,
2520 .sess_get_index
= sbp_sess_get_index
,
2521 .write_pending
= sbp_write_pending
,
2522 .write_pending_status
= sbp_write_pending_status
,
2523 .set_default_node_attributes
= sbp_set_default_node_attrs
,
2524 .get_task_tag
= sbp_get_task_tag
,
2525 .get_cmd_state
= sbp_get_cmd_state
,
2526 .queue_data_in
= sbp_queue_data_in
,
2527 .queue_status
= sbp_queue_status
,
2528 .queue_tm_rsp
= sbp_queue_tm_rsp
,
2529 .check_stop_free
= sbp_check_stop_free
,
2531 .fabric_make_wwn
= sbp_make_tport
,
2532 .fabric_drop_wwn
= sbp_drop_tport
,
2533 .fabric_make_tpg
= sbp_make_tpg
,
2534 .fabric_drop_tpg
= sbp_drop_tpg
,
2535 .fabric_post_link
= sbp_post_link_lun
,
2536 .fabric_pre_unlink
= sbp_pre_unlink_lun
,
2537 .fabric_make_np
= NULL
,
2538 .fabric_drop_np
= NULL
,
2539 .fabric_make_nodeacl
= sbp_make_nodeacl
,
2540 .fabric_drop_nodeacl
= sbp_drop_nodeacl
,
2543 static int sbp_register_configfs(void)
2545 struct target_fabric_configfs
*fabric
;
2548 fabric
= target_fabric_configfs_init(THIS_MODULE
, "sbp");
2549 if (IS_ERR(fabric
)) {
2550 pr_err("target_fabric_configfs_init() failed\n");
2551 return PTR_ERR(fabric
);
2554 fabric
->tf_ops
= sbp_ops
;
2557 * Setup default attribute lists for various fabric->tf_cit_tmpl
2559 TF_CIT_TMPL(fabric
)->tfc_wwn_cit
.ct_attrs
= sbp_wwn_attrs
;
2560 TF_CIT_TMPL(fabric
)->tfc_tpg_base_cit
.ct_attrs
= sbp_tpg_base_attrs
;
2561 TF_CIT_TMPL(fabric
)->tfc_tpg_attrib_cit
.ct_attrs
= sbp_tpg_attrib_attrs
;
2562 TF_CIT_TMPL(fabric
)->tfc_tpg_param_cit
.ct_attrs
= NULL
;
2563 TF_CIT_TMPL(fabric
)->tfc_tpg_np_base_cit
.ct_attrs
= NULL
;
2564 TF_CIT_TMPL(fabric
)->tfc_tpg_nacl_base_cit
.ct_attrs
= NULL
;
2565 TF_CIT_TMPL(fabric
)->tfc_tpg_nacl_attrib_cit
.ct_attrs
= NULL
;
2566 TF_CIT_TMPL(fabric
)->tfc_tpg_nacl_auth_cit
.ct_attrs
= NULL
;
2567 TF_CIT_TMPL(fabric
)->tfc_tpg_nacl_param_cit
.ct_attrs
= NULL
;
2569 ret
= target_fabric_configfs_register(fabric
);
2571 pr_err("target_fabric_configfs_register() failed for SBP\n");
2575 sbp_fabric_configfs
= fabric
;
2580 static void sbp_deregister_configfs(void)
2582 if (!sbp_fabric_configfs
)
2585 target_fabric_configfs_deregister(sbp_fabric_configfs
);
2586 sbp_fabric_configfs
= NULL
;
2589 static int __init
sbp_init(void)
2593 ret
= sbp_register_configfs();
2600 static void __exit
sbp_exit(void)
2602 sbp_deregister_configfs();
2605 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2606 MODULE_LICENSE("GPL");
2607 module_init(sbp_init
);
2608 module_exit(sbp_exit
);