2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/firewire.h>
32 #include <linux/firewire-constants.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_tcq.h>
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <target/target_core_fabric_configfs.h>
39 #include <target/target_core_configfs.h>
40 #include <target/configfs_macros.h>
41 #include <asm/unaligned.h>
43 #include "sbp_target.h"
45 static const struct target_core_fabric_ops sbp_ops
;
47 /* FireWire address region for management and command block address handlers */
48 static const struct fw_address_region sbp_register_region
= {
49 .start
= CSR_REGISTER_BASE
+ 0x10000,
50 .end
= 0x1000000000000ULL
,
53 static const u32 sbp_unit_directory_template
[] = {
54 0x1200609e, /* unit_specifier_id: NCITS/T10 */
55 0x13010483, /* unit_sw_version: 1155D Rev 4 */
56 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
57 0x390104d8, /* command_set: SPC-2 */
58 0x3b000000, /* command_set_revision: 0 */
59 0x3c000001, /* firmware_revision: 1 */
62 #define SESSION_MAINTENANCE_INTERVAL HZ
64 static atomic_t login_id
= ATOMIC_INIT(0);
66 static void session_maintenance_work(struct work_struct
*);
67 static int sbp_run_transaction(struct fw_card
*, int, int, int, int,
68 unsigned long long, void *, size_t);
70 static int read_peer_guid(u64
*guid
, const struct sbp_management_request
*req
)
75 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
76 req
->node_addr
, req
->generation
, req
->speed
,
77 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 3 * 4,
79 if (ret
!= RCODE_COMPLETE
)
82 ret
= sbp_run_transaction(req
->card
, TCODE_READ_QUADLET_REQUEST
,
83 req
->node_addr
, req
->generation
, req
->speed
,
84 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + 4 * 4,
86 if (ret
!= RCODE_COMPLETE
)
89 *guid
= (u64
)be32_to_cpu(high
) << 32 | be32_to_cpu(low
);
91 return RCODE_COMPLETE
;
94 static struct sbp_session
*sbp_session_find_by_guid(
95 struct sbp_tpg
*tpg
, u64 guid
)
97 struct se_session
*se_sess
;
98 struct sbp_session
*sess
, *found
= NULL
;
100 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
101 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
102 sess
= se_sess
->fabric_sess_ptr
;
103 if (sess
->guid
== guid
)
106 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
111 static struct sbp_login_descriptor
*sbp_login_find_by_lun(
112 struct sbp_session
*session
, struct se_lun
*lun
)
114 struct sbp_login_descriptor
*login
, *found
= NULL
;
116 spin_lock_bh(&session
->lock
);
117 list_for_each_entry(login
, &session
->login_list
, link
) {
118 if (login
->lun
== lun
)
121 spin_unlock_bh(&session
->lock
);
126 static int sbp_login_count_all_by_lun(
131 struct se_session
*se_sess
;
132 struct sbp_session
*sess
;
133 struct sbp_login_descriptor
*login
;
136 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
137 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
138 sess
= se_sess
->fabric_sess_ptr
;
140 spin_lock_bh(&sess
->lock
);
141 list_for_each_entry(login
, &sess
->login_list
, link
) {
142 if (login
->lun
!= lun
)
145 if (!exclusive
|| login
->exclusive
)
148 spin_unlock_bh(&sess
->lock
);
150 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
155 static struct sbp_login_descriptor
*sbp_login_find_by_id(
156 struct sbp_tpg
*tpg
, int login_id
)
158 struct se_session
*se_sess
;
159 struct sbp_session
*sess
;
160 struct sbp_login_descriptor
*login
, *found
= NULL
;
162 spin_lock_bh(&tpg
->se_tpg
.session_lock
);
163 list_for_each_entry(se_sess
, &tpg
->se_tpg
.tpg_sess_list
, sess_list
) {
164 sess
= se_sess
->fabric_sess_ptr
;
166 spin_lock_bh(&sess
->lock
);
167 list_for_each_entry(login
, &sess
->login_list
, link
) {
168 if (login
->login_id
== login_id
)
171 spin_unlock_bh(&sess
->lock
);
173 spin_unlock_bh(&tpg
->se_tpg
.session_lock
);
178 static struct se_lun
*sbp_get_lun_from_tpg(struct sbp_tpg
*tpg
, int lun
)
180 struct se_portal_group
*se_tpg
= &tpg
->se_tpg
;
181 struct se_lun
*se_lun
;
183 if (lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
184 return ERR_PTR(-EINVAL
);
186 spin_lock(&se_tpg
->tpg_lun_lock
);
187 se_lun
= se_tpg
->tpg_lun_list
[lun
];
189 if (se_lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
)
190 se_lun
= ERR_PTR(-ENODEV
);
192 spin_unlock(&se_tpg
->tpg_lun_lock
);
197 static struct sbp_session
*sbp_session_create(
201 struct sbp_session
*sess
;
204 struct se_node_acl
*se_nacl
;
206 sess
= kmalloc(sizeof(*sess
), GFP_KERNEL
);
208 pr_err("failed to allocate session descriptor\n");
209 return ERR_PTR(-ENOMEM
);
212 sess
->se_sess
= transport_init_session(TARGET_PROT_NORMAL
);
213 if (IS_ERR(sess
->se_sess
)) {
214 pr_err("failed to init se_session\n");
216 ret
= PTR_ERR(sess
->se_sess
);
221 snprintf(guid_str
, sizeof(guid_str
), "%016llx", guid
);
223 se_nacl
= core_tpg_check_initiator_node_acl(&tpg
->se_tpg
, guid_str
);
225 pr_warn("Node ACL not found for %s\n", guid_str
);
227 transport_free_session(sess
->se_sess
);
230 return ERR_PTR(-EPERM
);
233 sess
->se_sess
->se_node_acl
= se_nacl
;
235 spin_lock_init(&sess
->lock
);
236 INIT_LIST_HEAD(&sess
->login_list
);
237 INIT_DELAYED_WORK(&sess
->maint_work
, session_maintenance_work
);
241 transport_register_session(&tpg
->se_tpg
, se_nacl
, sess
->se_sess
, sess
);
246 static void sbp_session_release(struct sbp_session
*sess
, bool cancel_work
)
248 spin_lock_bh(&sess
->lock
);
249 if (!list_empty(&sess
->login_list
)) {
250 spin_unlock_bh(&sess
->lock
);
253 spin_unlock_bh(&sess
->lock
);
256 cancel_delayed_work_sync(&sess
->maint_work
);
258 transport_deregister_session_configfs(sess
->se_sess
);
259 transport_deregister_session(sess
->se_sess
);
262 fw_card_put(sess
->card
);
267 static void sbp_target_agent_unregister(struct sbp_target_agent
*);
269 static void sbp_login_release(struct sbp_login_descriptor
*login
,
272 struct sbp_session
*sess
= login
->sess
;
274 /* FIXME: abort/wait on tasks */
276 sbp_target_agent_unregister(login
->tgt_agt
);
279 spin_lock_bh(&sess
->lock
);
280 list_del(&login
->link
);
281 spin_unlock_bh(&sess
->lock
);
283 sbp_session_release(sess
, cancel_work
);
289 static struct sbp_target_agent
*sbp_target_agent_register(
290 struct sbp_login_descriptor
*);
292 static void sbp_management_request_login(
293 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
294 int *status_data_size
)
296 struct sbp_tport
*tport
= agent
->tport
;
297 struct sbp_tpg
*tpg
= tport
->tpg
;
298 struct se_lun
*se_lun
;
301 struct sbp_session
*sess
;
302 struct sbp_login_descriptor
*login
;
303 struct sbp_login_response_block
*response
;
304 int login_response_len
;
306 se_lun
= sbp_get_lun_from_tpg(tpg
,
307 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)));
308 if (IS_ERR(se_lun
)) {
309 pr_notice("login to unknown LUN: %d\n",
310 LOGIN_ORB_LUN(be32_to_cpu(req
->orb
.misc
)));
312 req
->status
.status
= cpu_to_be32(
313 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
314 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP
));
318 ret
= read_peer_guid(&guid
, req
);
319 if (ret
!= RCODE_COMPLETE
) {
320 pr_warn("failed to read peer GUID: %d\n", ret
);
322 req
->status
.status
= cpu_to_be32(
323 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
324 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
328 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
329 se_lun
->unpacked_lun
, guid
);
331 sess
= sbp_session_find_by_guid(tpg
, guid
);
333 login
= sbp_login_find_by_lun(sess
, se_lun
);
335 pr_notice("initiator already logged-in\n");
338 * SBP-2 R4 says we should return access denied, but
339 * that can confuse initiators. Instead we need to
340 * treat this like a reconnect, but send the login
341 * response block like a fresh login.
343 * This is required particularly in the case of Apple
344 * devices booting off the FireWire target, where
345 * the firmware has an active login to the target. When
346 * the OS takes control of the session it issues its own
347 * LOGIN rather than a RECONNECT. To avoid the machine
348 * waiting until the reconnect_hold expires, we can skip
349 * the ACCESS_DENIED errors to speed things up.
352 goto already_logged_in
;
357 * check exclusive bit in login request
358 * reject with access_denied if any logins present
360 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
)) &&
361 sbp_login_count_all_by_lun(tpg
, se_lun
, 0)) {
362 pr_warn("refusing exclusive login with other active logins\n");
364 req
->status
.status
= cpu_to_be32(
365 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
366 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
371 * check exclusive bit in any existing login descriptor
372 * reject with access_denied if any exclusive logins present
374 if (sbp_login_count_all_by_lun(tpg
, se_lun
, 1)) {
375 pr_warn("refusing login while another exclusive login present\n");
377 req
->status
.status
= cpu_to_be32(
378 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
379 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
384 * check we haven't exceeded the number of allowed logins
385 * reject with resources_unavailable if we have
387 if (sbp_login_count_all_by_lun(tpg
, se_lun
, 0) >=
388 tport
->max_logins_per_lun
) {
389 pr_warn("max number of logins reached\n");
391 req
->status
.status
= cpu_to_be32(
392 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
393 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
398 sess
= sbp_session_create(tpg
, guid
);
400 switch (PTR_ERR(sess
)) {
402 ret
= SBP_STATUS_ACCESS_DENIED
;
405 ret
= SBP_STATUS_RESOURCES_UNAVAIL
;
409 req
->status
.status
= cpu_to_be32(
411 STATUS_RESP_REQUEST_COMPLETE
) |
412 STATUS_BLOCK_SBP_STATUS(ret
));
416 sess
->node_id
= req
->node_addr
;
417 sess
->card
= fw_card_get(req
->card
);
418 sess
->generation
= req
->generation
;
419 sess
->speed
= req
->speed
;
421 schedule_delayed_work(&sess
->maint_work
,
422 SESSION_MAINTENANCE_INTERVAL
);
425 /* only take the latest reconnect_hold into account */
426 sess
->reconnect_hold
= min(
427 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req
->orb
.misc
)),
428 tport
->max_reconnect_timeout
) - 1;
430 login
= kmalloc(sizeof(*login
), GFP_KERNEL
);
432 pr_err("failed to allocate login descriptor\n");
434 sbp_session_release(sess
, true);
436 req
->status
.status
= cpu_to_be32(
437 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
438 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
444 login
->status_fifo_addr
= sbp2_pointer_to_addr(&req
->orb
.status_fifo
);
445 login
->exclusive
= LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req
->orb
.misc
));
446 login
->login_id
= atomic_inc_return(&login_id
);
448 login
->tgt_agt
= sbp_target_agent_register(login
);
449 if (IS_ERR(login
->tgt_agt
)) {
450 ret
= PTR_ERR(login
->tgt_agt
);
451 pr_err("failed to map command block handler: %d\n", ret
);
453 sbp_session_release(sess
, true);
456 req
->status
.status
= cpu_to_be32(
457 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
458 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
462 spin_lock_bh(&sess
->lock
);
463 list_add_tail(&login
->link
, &sess
->login_list
);
464 spin_unlock_bh(&sess
->lock
);
467 response
= kzalloc(sizeof(*response
), GFP_KERNEL
);
469 pr_err("failed to allocate login response block\n");
471 sbp_login_release(login
, true);
473 req
->status
.status
= cpu_to_be32(
474 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
475 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL
));
479 login_response_len
= clamp_val(
480 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req
->orb
.length
)),
481 12, sizeof(*response
));
482 response
->misc
= cpu_to_be32(
483 ((login_response_len
& 0xffff) << 16) |
484 (login
->login_id
& 0xffff));
485 response
->reconnect_hold
= cpu_to_be32(sess
->reconnect_hold
& 0xffff);
486 addr_to_sbp2_pointer(login
->tgt_agt
->handler
.offset
,
487 &response
->command_block_agent
);
489 ret
= sbp_run_transaction(sess
->card
, TCODE_WRITE_BLOCK_REQUEST
,
490 sess
->node_id
, sess
->generation
, sess
->speed
,
491 sbp2_pointer_to_addr(&req
->orb
.ptr2
), response
,
493 if (ret
!= RCODE_COMPLETE
) {
494 pr_debug("failed to write login response block: %x\n", ret
);
497 sbp_login_release(login
, true);
499 req
->status
.status
= cpu_to_be32(
500 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
501 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
507 req
->status
.status
= cpu_to_be32(
508 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
509 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
512 static void sbp_management_request_query_logins(
513 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
514 int *status_data_size
)
516 pr_notice("QUERY LOGINS not implemented\n");
517 /* FIXME: implement */
519 req
->status
.status
= cpu_to_be32(
520 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
521 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
524 static void sbp_management_request_reconnect(
525 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
526 int *status_data_size
)
528 struct sbp_tport
*tport
= agent
->tport
;
529 struct sbp_tpg
*tpg
= tport
->tpg
;
532 struct sbp_login_descriptor
*login
;
534 ret
= read_peer_guid(&guid
, req
);
535 if (ret
!= RCODE_COMPLETE
) {
536 pr_warn("failed to read peer GUID: %d\n", ret
);
538 req
->status
.status
= cpu_to_be32(
539 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
540 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
544 pr_notice("mgt_agent RECONNECT from %016llx\n", guid
);
546 login
= sbp_login_find_by_id(tpg
,
547 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
)));
550 pr_err("mgt_agent RECONNECT unknown login ID\n");
552 req
->status
.status
= cpu_to_be32(
553 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
554 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
558 if (login
->sess
->guid
!= guid
) {
559 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
561 req
->status
.status
= cpu_to_be32(
562 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
563 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
567 spin_lock_bh(&login
->sess
->lock
);
568 if (login
->sess
->card
)
569 fw_card_put(login
->sess
->card
);
571 /* update the node details */
572 login
->sess
->generation
= req
->generation
;
573 login
->sess
->node_id
= req
->node_addr
;
574 login
->sess
->card
= fw_card_get(req
->card
);
575 login
->sess
->speed
= req
->speed
;
576 spin_unlock_bh(&login
->sess
->lock
);
578 req
->status
.status
= cpu_to_be32(
579 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
580 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
583 static void sbp_management_request_logout(
584 struct sbp_management_agent
*agent
, struct sbp_management_request
*req
,
585 int *status_data_size
)
587 struct sbp_tport
*tport
= agent
->tport
;
588 struct sbp_tpg
*tpg
= tport
->tpg
;
590 struct sbp_login_descriptor
*login
;
592 id
= LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req
->orb
.misc
));
594 login
= sbp_login_find_by_id(tpg
, id
);
596 pr_warn("cannot find login: %d\n", id
);
598 req
->status
.status
= cpu_to_be32(
599 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
600 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN
));
604 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
605 login
->lun
->unpacked_lun
, login
->login_id
);
607 if (req
->node_addr
!= login
->sess
->node_id
) {
608 pr_warn("logout from different node ID\n");
610 req
->status
.status
= cpu_to_be32(
611 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
612 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED
));
616 sbp_login_release(login
, true);
618 req
->status
.status
= cpu_to_be32(
619 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
620 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
623 static void session_check_for_reset(struct sbp_session
*sess
)
625 bool card_valid
= false;
627 spin_lock_bh(&sess
->lock
);
630 spin_lock_irq(&sess
->card
->lock
);
631 card_valid
= (sess
->card
->local_node
!= NULL
);
632 spin_unlock_irq(&sess
->card
->lock
);
635 fw_card_put(sess
->card
);
640 if (!card_valid
|| (sess
->generation
!= sess
->card
->generation
)) {
641 pr_info("Waiting for reconnect from node: %016llx\n",
645 sess
->reconnect_expires
= get_jiffies_64() +
646 ((sess
->reconnect_hold
+ 1) * HZ
);
649 spin_unlock_bh(&sess
->lock
);
652 static void session_reconnect_expired(struct sbp_session
*sess
)
654 struct sbp_login_descriptor
*login
, *temp
;
655 LIST_HEAD(login_list
);
657 pr_info("Reconnect timer expired for node: %016llx\n", sess
->guid
);
659 spin_lock_bh(&sess
->lock
);
660 list_for_each_entry_safe(login
, temp
, &sess
->login_list
, link
) {
662 list_move_tail(&login
->link
, &login_list
);
664 spin_unlock_bh(&sess
->lock
);
666 list_for_each_entry_safe(login
, temp
, &login_list
, link
) {
667 list_del(&login
->link
);
668 sbp_login_release(login
, false);
671 sbp_session_release(sess
, false);
674 static void session_maintenance_work(struct work_struct
*work
)
676 struct sbp_session
*sess
= container_of(work
, struct sbp_session
,
679 /* could be called while tearing down the session */
680 spin_lock_bh(&sess
->lock
);
681 if (list_empty(&sess
->login_list
)) {
682 spin_unlock_bh(&sess
->lock
);
685 spin_unlock_bh(&sess
->lock
);
687 if (sess
->node_id
!= -1) {
688 /* check for bus reset and make node_id invalid */
689 session_check_for_reset(sess
);
691 schedule_delayed_work(&sess
->maint_work
,
692 SESSION_MAINTENANCE_INTERVAL
);
693 } else if (!time_after64(get_jiffies_64(), sess
->reconnect_expires
)) {
694 /* still waiting for reconnect */
695 schedule_delayed_work(&sess
->maint_work
,
696 SESSION_MAINTENANCE_INTERVAL
);
698 /* reconnect timeout has expired */
699 session_reconnect_expired(sess
);
703 static int tgt_agent_rw_agent_state(struct fw_card
*card
, int tcode
, void *data
,
704 struct sbp_target_agent
*agent
)
709 case TCODE_READ_QUADLET_REQUEST
:
710 pr_debug("tgt_agent AGENT_STATE READ\n");
712 spin_lock_bh(&agent
->lock
);
713 state
= agent
->state
;
714 spin_unlock_bh(&agent
->lock
);
716 *(__be32
*)data
= cpu_to_be32(state
);
718 return RCODE_COMPLETE
;
720 case TCODE_WRITE_QUADLET_REQUEST
:
722 return RCODE_COMPLETE
;
725 return RCODE_TYPE_ERROR
;
729 static int tgt_agent_rw_agent_reset(struct fw_card
*card
, int tcode
, void *data
,
730 struct sbp_target_agent
*agent
)
733 case TCODE_WRITE_QUADLET_REQUEST
:
734 pr_debug("tgt_agent AGENT_RESET\n");
735 spin_lock_bh(&agent
->lock
);
736 agent
->state
= AGENT_STATE_RESET
;
737 spin_unlock_bh(&agent
->lock
);
738 return RCODE_COMPLETE
;
741 return RCODE_TYPE_ERROR
;
745 static int tgt_agent_rw_orb_pointer(struct fw_card
*card
, int tcode
, void *data
,
746 struct sbp_target_agent
*agent
)
748 struct sbp2_pointer
*ptr
= data
;
751 case TCODE_WRITE_BLOCK_REQUEST
:
752 spin_lock_bh(&agent
->lock
);
753 if (agent
->state
!= AGENT_STATE_SUSPENDED
&&
754 agent
->state
!= AGENT_STATE_RESET
) {
755 spin_unlock_bh(&agent
->lock
);
756 pr_notice("Ignoring ORB_POINTER write while active.\n");
757 return RCODE_CONFLICT_ERROR
;
759 agent
->state
= AGENT_STATE_ACTIVE
;
760 spin_unlock_bh(&agent
->lock
);
762 agent
->orb_pointer
= sbp2_pointer_to_addr(ptr
);
763 agent
->doorbell
= false;
765 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
768 queue_work(system_unbound_wq
, &agent
->work
);
770 return RCODE_COMPLETE
;
772 case TCODE_READ_BLOCK_REQUEST
:
773 pr_debug("tgt_agent ORB_POINTER READ\n");
774 spin_lock_bh(&agent
->lock
);
775 addr_to_sbp2_pointer(agent
->orb_pointer
, ptr
);
776 spin_unlock_bh(&agent
->lock
);
777 return RCODE_COMPLETE
;
780 return RCODE_TYPE_ERROR
;
784 static int tgt_agent_rw_doorbell(struct fw_card
*card
, int tcode
, void *data
,
785 struct sbp_target_agent
*agent
)
788 case TCODE_WRITE_QUADLET_REQUEST
:
789 spin_lock_bh(&agent
->lock
);
790 if (agent
->state
!= AGENT_STATE_SUSPENDED
) {
791 spin_unlock_bh(&agent
->lock
);
792 pr_debug("Ignoring DOORBELL while active.\n");
793 return RCODE_CONFLICT_ERROR
;
795 agent
->state
= AGENT_STATE_ACTIVE
;
796 spin_unlock_bh(&agent
->lock
);
798 agent
->doorbell
= true;
800 pr_debug("tgt_agent DOORBELL\n");
802 queue_work(system_unbound_wq
, &agent
->work
);
804 return RCODE_COMPLETE
;
806 case TCODE_READ_QUADLET_REQUEST
:
807 return RCODE_COMPLETE
;
810 return RCODE_TYPE_ERROR
;
814 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card
*card
,
815 int tcode
, void *data
, struct sbp_target_agent
*agent
)
818 case TCODE_WRITE_QUADLET_REQUEST
:
819 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
820 /* ignored as we don't send unsolicited status */
821 return RCODE_COMPLETE
;
823 case TCODE_READ_QUADLET_REQUEST
:
824 return RCODE_COMPLETE
;
827 return RCODE_TYPE_ERROR
;
831 static void tgt_agent_rw(struct fw_card
*card
, struct fw_request
*request
,
832 int tcode
, int destination
, int source
, int generation
,
833 unsigned long long offset
, void *data
, size_t length
,
836 struct sbp_target_agent
*agent
= callback_data
;
837 struct sbp_session
*sess
= agent
->login
->sess
;
838 int sess_gen
, sess_node
, rcode
;
840 spin_lock_bh(&sess
->lock
);
841 sess_gen
= sess
->generation
;
842 sess_node
= sess
->node_id
;
843 spin_unlock_bh(&sess
->lock
);
845 if (generation
!= sess_gen
) {
846 pr_notice("ignoring request with wrong generation\n");
847 rcode
= RCODE_TYPE_ERROR
;
851 if (source
!= sess_node
) {
852 pr_notice("ignoring request from foreign node (%x != %x)\n",
854 rcode
= RCODE_TYPE_ERROR
;
858 /* turn offset into the offset from the start of the block */
859 offset
-= agent
->handler
.offset
;
861 if (offset
== 0x00 && length
== 4) {
863 rcode
= tgt_agent_rw_agent_state(card
, tcode
, data
, agent
);
864 } else if (offset
== 0x04 && length
== 4) {
866 rcode
= tgt_agent_rw_agent_reset(card
, tcode
, data
, agent
);
867 } else if (offset
== 0x08 && length
== 8) {
869 rcode
= tgt_agent_rw_orb_pointer(card
, tcode
, data
, agent
);
870 } else if (offset
== 0x10 && length
== 4) {
872 rcode
= tgt_agent_rw_doorbell(card
, tcode
, data
, agent
);
873 } else if (offset
== 0x14 && length
== 4) {
874 /* UNSOLICITED_STATUS_ENABLE */
875 rcode
= tgt_agent_rw_unsolicited_status_enable(card
, tcode
,
878 rcode
= RCODE_ADDRESS_ERROR
;
882 fw_send_response(card
, request
, rcode
);
885 static void sbp_handle_command(struct sbp_target_request
*);
886 static int sbp_send_status(struct sbp_target_request
*);
887 static void sbp_free_request(struct sbp_target_request
*);
889 static void tgt_agent_process_work(struct work_struct
*work
)
891 struct sbp_target_request
*req
=
892 container_of(work
, struct sbp_target_request
, work
);
894 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
896 sbp2_pointer_to_addr(&req
->orb
.next_orb
),
897 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
898 be32_to_cpu(req
->orb
.misc
));
900 if (req
->orb_pointer
>> 32)
901 pr_debug("ORB with high bits set\n");
903 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
))) {
904 case 0:/* Format specified by this standard */
905 sbp_handle_command(req
);
907 case 1: /* Reserved for future standardization */
908 case 2: /* Vendor-dependent */
909 req
->status
.status
|= cpu_to_be32(
911 STATUS_RESP_REQUEST_COMPLETE
) |
912 STATUS_BLOCK_DEAD(0) |
913 STATUS_BLOCK_LEN(1) |
914 STATUS_BLOCK_SBP_STATUS(
915 SBP_STATUS_REQ_TYPE_NOTSUPP
));
916 sbp_send_status(req
);
917 sbp_free_request(req
);
919 case 3: /* Dummy ORB */
920 req
->status
.status
|= cpu_to_be32(
922 STATUS_RESP_REQUEST_COMPLETE
) |
923 STATUS_BLOCK_DEAD(0) |
924 STATUS_BLOCK_LEN(1) |
925 STATUS_BLOCK_SBP_STATUS(
926 SBP_STATUS_DUMMY_ORB_COMPLETE
));
927 sbp_send_status(req
);
928 sbp_free_request(req
);
935 /* used to double-check we haven't been issued an AGENT_RESET */
936 static inline bool tgt_agent_check_active(struct sbp_target_agent
*agent
)
940 spin_lock_bh(&agent
->lock
);
941 active
= (agent
->state
== AGENT_STATE_ACTIVE
);
942 spin_unlock_bh(&agent
->lock
);
947 static void tgt_agent_fetch_work(struct work_struct
*work
)
949 struct sbp_target_agent
*agent
=
950 container_of(work
, struct sbp_target_agent
, work
);
951 struct sbp_session
*sess
= agent
->login
->sess
;
952 struct sbp_target_request
*req
;
954 bool doorbell
= agent
->doorbell
;
955 u64 next_orb
= agent
->orb_pointer
;
957 while (next_orb
&& tgt_agent_check_active(agent
)) {
958 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
960 spin_lock_bh(&agent
->lock
);
961 agent
->state
= AGENT_STATE_DEAD
;
962 spin_unlock_bh(&agent
->lock
);
966 req
->login
= agent
->login
;
967 req
->orb_pointer
= next_orb
;
969 req
->status
.status
= cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
970 req
->orb_pointer
>> 32));
971 req
->status
.orb_low
= cpu_to_be32(
972 req
->orb_pointer
& 0xfffffffc);
974 /* read in the ORB */
975 ret
= sbp_run_transaction(sess
->card
, TCODE_READ_BLOCK_REQUEST
,
976 sess
->node_id
, sess
->generation
, sess
->speed
,
977 req
->orb_pointer
, &req
->orb
, sizeof(req
->orb
));
978 if (ret
!= RCODE_COMPLETE
) {
979 pr_debug("tgt_orb fetch failed: %x\n", ret
);
980 req
->status
.status
|= cpu_to_be32(
982 STATUS_SRC_ORB_FINISHED
) |
984 STATUS_RESP_TRANSPORT_FAILURE
) |
985 STATUS_BLOCK_DEAD(1) |
986 STATUS_BLOCK_LEN(1) |
987 STATUS_BLOCK_SBP_STATUS(
988 SBP_STATUS_UNSPECIFIED_ERROR
));
989 spin_lock_bh(&agent
->lock
);
990 agent
->state
= AGENT_STATE_DEAD
;
991 spin_unlock_bh(&agent
->lock
);
993 sbp_send_status(req
);
994 sbp_free_request(req
);
998 /* check the next_ORB field */
999 if (be32_to_cpu(req
->orb
.next_orb
.high
) & 0x80000000) {
1001 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
1002 STATUS_SRC_ORB_FINISHED
));
1004 next_orb
= sbp2_pointer_to_addr(&req
->orb
.next_orb
);
1005 req
->status
.status
|= cpu_to_be32(STATUS_BLOCK_SRC(
1006 STATUS_SRC_ORB_CONTINUING
));
1009 if (tgt_agent_check_active(agent
) && !doorbell
) {
1010 INIT_WORK(&req
->work
, tgt_agent_process_work
);
1011 queue_work(system_unbound_wq
, &req
->work
);
1013 /* don't process this request, just check next_ORB */
1014 sbp_free_request(req
);
1017 spin_lock_bh(&agent
->lock
);
1018 doorbell
= agent
->doorbell
= false;
1020 /* check if we should carry on processing */
1022 agent
->orb_pointer
= next_orb
;
1024 agent
->state
= AGENT_STATE_SUSPENDED
;
1026 spin_unlock_bh(&agent
->lock
);
1030 static struct sbp_target_agent
*sbp_target_agent_register(
1031 struct sbp_login_descriptor
*login
)
1033 struct sbp_target_agent
*agent
;
1036 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1038 return ERR_PTR(-ENOMEM
);
1040 spin_lock_init(&agent
->lock
);
1042 agent
->handler
.length
= 0x20;
1043 agent
->handler
.address_callback
= tgt_agent_rw
;
1044 agent
->handler
.callback_data
= agent
;
1046 agent
->login
= login
;
1047 agent
->state
= AGENT_STATE_RESET
;
1048 INIT_WORK(&agent
->work
, tgt_agent_fetch_work
);
1049 agent
->orb_pointer
= 0;
1050 agent
->doorbell
= false;
1052 ret
= fw_core_add_address_handler(&agent
->handler
,
1053 &sbp_register_region
);
1056 return ERR_PTR(ret
);
1062 static void sbp_target_agent_unregister(struct sbp_target_agent
*agent
)
1064 fw_core_remove_address_handler(&agent
->handler
);
1065 cancel_work_sync(&agent
->work
);
1070 * Simple wrapper around fw_run_transaction that retries the transaction several
1071 * times in case of failure, with an exponential backoff.
1073 static int sbp_run_transaction(struct fw_card
*card
, int tcode
, int destination_id
,
1074 int generation
, int speed
, unsigned long long offset
,
1075 void *payload
, size_t length
)
1077 int attempt
, ret
, delay
;
1079 for (attempt
= 1; attempt
<= 5; attempt
++) {
1080 ret
= fw_run_transaction(card
, tcode
, destination_id
,
1081 generation
, speed
, offset
, payload
, length
);
1084 case RCODE_COMPLETE
:
1085 case RCODE_TYPE_ERROR
:
1086 case RCODE_ADDRESS_ERROR
:
1087 case RCODE_GENERATION
:
1091 delay
= 5 * attempt
* attempt
;
1092 usleep_range(delay
, delay
* 2);
1100 * Wrapper around sbp_run_transaction that gets the card, destination,
1101 * generation and speed out of the request's session.
1103 static int sbp_run_request_transaction(struct sbp_target_request
*req
,
1104 int tcode
, unsigned long long offset
, void *payload
,
1107 struct sbp_login_descriptor
*login
= req
->login
;
1108 struct sbp_session
*sess
= login
->sess
;
1109 struct fw_card
*card
;
1110 int node_id
, generation
, speed
, ret
;
1112 spin_lock_bh(&sess
->lock
);
1113 card
= fw_card_get(sess
->card
);
1114 node_id
= sess
->node_id
;
1115 generation
= sess
->generation
;
1116 speed
= sess
->speed
;
1117 spin_unlock_bh(&sess
->lock
);
1119 ret
= sbp_run_transaction(card
, tcode
, node_id
, generation
, speed
,
1120 offset
, payload
, length
);
1127 static int sbp_fetch_command(struct sbp_target_request
*req
)
1129 int ret
, cmd_len
, copy_len
;
1131 cmd_len
= scsi_command_size(req
->orb
.command_block
);
1133 req
->cmd_buf
= kmalloc(cmd_len
, GFP_KERNEL
);
1137 memcpy(req
->cmd_buf
, req
->orb
.command_block
,
1138 min_t(int, cmd_len
, sizeof(req
->orb
.command_block
)));
1140 if (cmd_len
> sizeof(req
->orb
.command_block
)) {
1141 pr_debug("sbp_fetch_command: filling in long command\n");
1142 copy_len
= cmd_len
- sizeof(req
->orb
.command_block
);
1144 ret
= sbp_run_request_transaction(req
,
1145 TCODE_READ_BLOCK_REQUEST
,
1146 req
->orb_pointer
+ sizeof(req
->orb
),
1147 req
->cmd_buf
+ sizeof(req
->orb
.command_block
),
1149 if (ret
!= RCODE_COMPLETE
)
1156 static int sbp_fetch_page_table(struct sbp_target_request
*req
)
1159 struct sbp_page_table_entry
*pg_tbl
;
1161 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req
->orb
.misc
)))
1164 pg_tbl_sz
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
)) *
1165 sizeof(struct sbp_page_table_entry
);
1167 pg_tbl
= kmalloc(pg_tbl_sz
, GFP_KERNEL
);
1171 ret
= sbp_run_request_transaction(req
, TCODE_READ_BLOCK_REQUEST
,
1172 sbp2_pointer_to_addr(&req
->orb
.data_descriptor
),
1174 if (ret
!= RCODE_COMPLETE
) {
1179 req
->pg_tbl
= pg_tbl
;
1183 static void sbp_calc_data_length_direction(struct sbp_target_request
*req
,
1184 u32
*data_len
, enum dma_data_direction
*data_dir
)
1186 int data_size
, direction
, idx
;
1188 data_size
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1189 direction
= CMDBLK_ORB_DIRECTION(be32_to_cpu(req
->orb
.misc
));
1193 *data_dir
= DMA_NONE
;
1197 *data_dir
= direction
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
1201 for (idx
= 0; idx
< data_size
; idx
++) {
1202 *data_len
+= be16_to_cpu(
1203 req
->pg_tbl
[idx
].segment_length
);
1206 *data_len
= data_size
;
1210 static void sbp_handle_command(struct sbp_target_request
*req
)
1212 struct sbp_login_descriptor
*login
= req
->login
;
1213 struct sbp_session
*sess
= login
->sess
;
1214 int ret
, unpacked_lun
;
1216 enum dma_data_direction data_dir
;
1218 ret
= sbp_fetch_command(req
);
1220 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret
);
1224 ret
= sbp_fetch_page_table(req
);
1226 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1231 unpacked_lun
= req
->login
->lun
->unpacked_lun
;
1232 sbp_calc_data_length_direction(req
, &data_length
, &data_dir
);
1234 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1235 req
->orb_pointer
, unpacked_lun
, data_length
, data_dir
);
1237 if (target_submit_cmd(&req
->se_cmd
, sess
->se_sess
, req
->cmd_buf
,
1238 req
->sense_buf
, unpacked_lun
, data_length
,
1239 TCM_SIMPLE_TAG
, data_dir
, 0))
1245 req
->status
.status
|= cpu_to_be32(
1246 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1247 STATUS_BLOCK_DEAD(0) |
1248 STATUS_BLOCK_LEN(1) |
1249 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1250 sbp_send_status(req
);
1251 sbp_free_request(req
);
1255 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1256 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1258 static int sbp_rw_data(struct sbp_target_request
*req
)
1260 struct sbp_session
*sess
= req
->login
->sess
;
1261 int tcode
, sg_miter_flags
, max_payload
, pg_size
, speed
, node_id
,
1262 generation
, num_pte
, length
, tfr_length
,
1263 rcode
= RCODE_COMPLETE
;
1264 struct sbp_page_table_entry
*pte
;
1265 unsigned long long offset
;
1266 struct fw_card
*card
;
1267 struct sg_mapping_iter iter
;
1269 if (req
->se_cmd
.data_direction
== DMA_FROM_DEVICE
) {
1270 tcode
= TCODE_WRITE_BLOCK_REQUEST
;
1271 sg_miter_flags
= SG_MITER_FROM_SG
;
1273 tcode
= TCODE_READ_BLOCK_REQUEST
;
1274 sg_miter_flags
= SG_MITER_TO_SG
;
1277 max_payload
= 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req
->orb
.misc
));
1278 speed
= CMDBLK_ORB_SPEED(be32_to_cpu(req
->orb
.misc
));
1280 pg_size
= CMDBLK_ORB_PG_SIZE(be32_to_cpu(req
->orb
.misc
));
1282 pr_err("sbp_run_transaction: page size ignored\n");
1283 pg_size
= 0x100 << pg_size
;
1286 spin_lock_bh(&sess
->lock
);
1287 card
= fw_card_get(sess
->card
);
1288 node_id
= sess
->node_id
;
1289 generation
= sess
->generation
;
1290 spin_unlock_bh(&sess
->lock
);
1294 num_pte
= CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req
->orb
.misc
));
1302 offset
= sbp2_pointer_to_addr(&req
->orb
.data_descriptor
);
1303 length
= req
->se_cmd
.data_length
;
1306 sg_miter_start(&iter
, req
->se_cmd
.t_data_sg
, req
->se_cmd
.t_data_nents
,
1309 while (length
|| num_pte
) {
1311 offset
= (u64
)be16_to_cpu(pte
->segment_base_hi
) << 32 |
1312 be32_to_cpu(pte
->segment_base_lo
);
1313 length
= be16_to_cpu(pte
->segment_length
);
1319 sg_miter_next(&iter
);
1321 tfr_length
= min3(length
, max_payload
, (int)iter
.length
);
1323 /* FIXME: take page_size into account */
1325 rcode
= sbp_run_transaction(card
, tcode
, node_id
,
1327 offset
, iter
.addr
, tfr_length
);
1329 if (rcode
!= RCODE_COMPLETE
)
1332 length
-= tfr_length
;
1333 offset
+= tfr_length
;
1334 iter
.consumed
= tfr_length
;
1337 sg_miter_stop(&iter
);
1340 if (rcode
== RCODE_COMPLETE
) {
1341 WARN_ON(length
!= 0);
1348 static int sbp_send_status(struct sbp_target_request
*req
)
1351 struct sbp_login_descriptor
*login
= req
->login
;
1353 length
= (((be32_to_cpu(req
->status
.status
) >> 24) & 0x07) + 1) * 4;
1355 ret
= sbp_run_request_transaction(req
, TCODE_WRITE_BLOCK_REQUEST
,
1356 login
->status_fifo_addr
, &req
->status
, length
);
1357 if (ret
!= RCODE_COMPLETE
) {
1358 pr_debug("sbp_send_status: write failed: 0x%x\n", ret
);
1362 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1368 static void sbp_sense_mangle(struct sbp_target_request
*req
)
1370 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1371 u8
*sense
= req
->sense_buf
;
1372 u8
*status
= req
->status
.data
;
1374 WARN_ON(se_cmd
->scsi_sense_length
< 18);
1376 switch (sense
[0] & 0x7f) { /* sfmt */
1377 case 0x70: /* current, fixed */
1380 case 0x71: /* deferred, fixed */
1383 case 0x72: /* current, descriptor */
1384 case 0x73: /* deferred, descriptor */
1387 * TODO: SBP-3 specifies what we should do with descriptor
1390 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1392 req
->status
.status
|= cpu_to_be32(
1393 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1394 STATUS_BLOCK_DEAD(0) |
1395 STATUS_BLOCK_LEN(1) |
1396 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED
));
1400 status
[0] |= se_cmd
->scsi_status
& 0x3f;/* status */
1402 (sense
[0] & 0x80) | /* valid */
1403 ((sense
[2] & 0xe0) >> 1) | /* mark, eom, ili */
1404 (sense
[2] & 0x0f); /* sense_key */
1405 status
[2] = se_cmd
->scsi_asc
; /* sense_code */
1406 status
[3] = se_cmd
->scsi_ascq
; /* sense_qualifier */
1409 status
[4] = sense
[3];
1410 status
[5] = sense
[4];
1411 status
[6] = sense
[5];
1412 status
[7] = sense
[6];
1415 status
[8] = sense
[8];
1416 status
[9] = sense
[9];
1417 status
[10] = sense
[10];
1418 status
[11] = sense
[11];
1421 status
[12] = sense
[14];
1423 /* sense_key-dependent */
1424 status
[13] = sense
[15];
1425 status
[14] = sense
[16];
1426 status
[15] = sense
[17];
1428 req
->status
.status
|= cpu_to_be32(
1429 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1430 STATUS_BLOCK_DEAD(0) |
1431 STATUS_BLOCK_LEN(5) |
1432 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1435 static int sbp_send_sense(struct sbp_target_request
*req
)
1437 struct se_cmd
*se_cmd
= &req
->se_cmd
;
1439 if (se_cmd
->scsi_sense_length
) {
1440 sbp_sense_mangle(req
);
1442 req
->status
.status
|= cpu_to_be32(
1443 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1444 STATUS_BLOCK_DEAD(0) |
1445 STATUS_BLOCK_LEN(1) |
1446 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK
));
1449 return sbp_send_status(req
);
1452 static void sbp_free_request(struct sbp_target_request
*req
)
1455 kfree(req
->cmd_buf
);
1459 static void sbp_mgt_agent_process(struct work_struct
*work
)
1461 struct sbp_management_agent
*agent
=
1462 container_of(work
, struct sbp_management_agent
, work
);
1463 struct sbp_management_request
*req
= agent
->request
;
1465 int status_data_len
= 0;
1467 /* fetch the ORB from the initiator */
1468 ret
= sbp_run_transaction(req
->card
, TCODE_READ_BLOCK_REQUEST
,
1469 req
->node_addr
, req
->generation
, req
->speed
,
1470 agent
->orb_offset
, &req
->orb
, sizeof(req
->orb
));
1471 if (ret
!= RCODE_COMPLETE
) {
1472 pr_debug("mgt_orb fetch failed: %x\n", ret
);
1476 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1477 sbp2_pointer_to_addr(&req
->orb
.ptr1
),
1478 sbp2_pointer_to_addr(&req
->orb
.ptr2
),
1479 be32_to_cpu(req
->orb
.misc
), be32_to_cpu(req
->orb
.length
),
1480 sbp2_pointer_to_addr(&req
->orb
.status_fifo
));
1482 if (!ORB_NOTIFY(be32_to_cpu(req
->orb
.misc
)) ||
1483 ORB_REQUEST_FORMAT(be32_to_cpu(req
->orb
.misc
)) != 0) {
1484 pr_err("mgt_orb bad request\n");
1488 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
))) {
1489 case MANAGEMENT_ORB_FUNCTION_LOGIN
:
1490 sbp_management_request_login(agent
, req
, &status_data_len
);
1493 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS
:
1494 sbp_management_request_query_logins(agent
, req
,
1498 case MANAGEMENT_ORB_FUNCTION_RECONNECT
:
1499 sbp_management_request_reconnect(agent
, req
, &status_data_len
);
1502 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD
:
1503 pr_notice("SET PASSWORD not implemented\n");
1505 req
->status
.status
= cpu_to_be32(
1506 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1507 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1511 case MANAGEMENT_ORB_FUNCTION_LOGOUT
:
1512 sbp_management_request_logout(agent
, req
, &status_data_len
);
1515 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK
:
1516 pr_notice("ABORT TASK not implemented\n");
1518 req
->status
.status
= cpu_to_be32(
1519 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1520 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1524 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET
:
1525 pr_notice("ABORT TASK SET not implemented\n");
1527 req
->status
.status
= cpu_to_be32(
1528 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1529 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1533 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET
:
1534 pr_notice("LOGICAL UNIT RESET not implemented\n");
1536 req
->status
.status
= cpu_to_be32(
1537 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1538 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1542 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET
:
1543 pr_notice("TARGET RESET not implemented\n");
1545 req
->status
.status
= cpu_to_be32(
1546 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1547 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1552 pr_notice("unknown management function 0x%x\n",
1553 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req
->orb
.misc
)));
1555 req
->status
.status
= cpu_to_be32(
1556 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE
) |
1557 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP
));
1562 req
->status
.status
|= cpu_to_be32(
1563 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1564 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len
, 4) + 1) |
1565 STATUS_BLOCK_ORB_OFFSET_HIGH(agent
->orb_offset
>> 32));
1566 req
->status
.orb_low
= cpu_to_be32(agent
->orb_offset
);
1568 /* write the status block back to the initiator */
1569 ret
= sbp_run_transaction(req
->card
, TCODE_WRITE_BLOCK_REQUEST
,
1570 req
->node_addr
, req
->generation
, req
->speed
,
1571 sbp2_pointer_to_addr(&req
->orb
.status_fifo
),
1572 &req
->status
, 8 + status_data_len
);
1573 if (ret
!= RCODE_COMPLETE
) {
1574 pr_debug("mgt_orb status write failed: %x\n", ret
);
1579 fw_card_put(req
->card
);
1582 spin_lock_bh(&agent
->lock
);
1583 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1584 spin_unlock_bh(&agent
->lock
);
1587 static void sbp_mgt_agent_rw(struct fw_card
*card
,
1588 struct fw_request
*request
, int tcode
, int destination
, int source
,
1589 int generation
, unsigned long long offset
, void *data
, size_t length
,
1590 void *callback_data
)
1592 struct sbp_management_agent
*agent
= callback_data
;
1593 struct sbp2_pointer
*ptr
= data
;
1594 int rcode
= RCODE_ADDRESS_ERROR
;
1596 if (!agent
->tport
->enable
)
1599 if ((offset
!= agent
->handler
.offset
) || (length
!= 8))
1602 if (tcode
== TCODE_WRITE_BLOCK_REQUEST
) {
1603 struct sbp_management_request
*req
;
1606 spin_lock_bh(&agent
->lock
);
1607 prev_state
= agent
->state
;
1608 agent
->state
= MANAGEMENT_AGENT_STATE_BUSY
;
1609 spin_unlock_bh(&agent
->lock
);
1611 if (prev_state
== MANAGEMENT_AGENT_STATE_BUSY
) {
1612 pr_notice("ignoring management request while busy\n");
1613 rcode
= RCODE_CONFLICT_ERROR
;
1617 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
1619 rcode
= RCODE_CONFLICT_ERROR
;
1623 req
->card
= fw_card_get(card
);
1624 req
->generation
= generation
;
1625 req
->node_addr
= source
;
1626 req
->speed
= fw_get_request_speed(request
);
1628 agent
->orb_offset
= sbp2_pointer_to_addr(ptr
);
1629 agent
->request
= req
;
1631 queue_work(system_unbound_wq
, &agent
->work
);
1632 rcode
= RCODE_COMPLETE
;
1633 } else if (tcode
== TCODE_READ_BLOCK_REQUEST
) {
1634 addr_to_sbp2_pointer(agent
->orb_offset
, ptr
);
1635 rcode
= RCODE_COMPLETE
;
1637 rcode
= RCODE_TYPE_ERROR
;
1641 fw_send_response(card
, request
, rcode
);
1644 static struct sbp_management_agent
*sbp_management_agent_register(
1645 struct sbp_tport
*tport
)
1648 struct sbp_management_agent
*agent
;
1650 agent
= kmalloc(sizeof(*agent
), GFP_KERNEL
);
1652 return ERR_PTR(-ENOMEM
);
1654 spin_lock_init(&agent
->lock
);
1655 agent
->tport
= tport
;
1656 agent
->handler
.length
= 0x08;
1657 agent
->handler
.address_callback
= sbp_mgt_agent_rw
;
1658 agent
->handler
.callback_data
= agent
;
1659 agent
->state
= MANAGEMENT_AGENT_STATE_IDLE
;
1660 INIT_WORK(&agent
->work
, sbp_mgt_agent_process
);
1661 agent
->orb_offset
= 0;
1662 agent
->request
= NULL
;
1664 ret
= fw_core_add_address_handler(&agent
->handler
,
1665 &sbp_register_region
);
1668 return ERR_PTR(ret
);
1674 static void sbp_management_agent_unregister(struct sbp_management_agent
*agent
)
1676 fw_core_remove_address_handler(&agent
->handler
);
1677 cancel_work_sync(&agent
->work
);
1681 static int sbp_check_true(struct se_portal_group
*se_tpg
)
1686 static int sbp_check_false(struct se_portal_group
*se_tpg
)
1691 static char *sbp_get_fabric_name(void)
1696 static char *sbp_get_fabric_wwn(struct se_portal_group
*se_tpg
)
1698 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1699 struct sbp_tport
*tport
= tpg
->tport
;
1701 return &tport
->tport_name
[0];
1704 static u16
sbp_get_tag(struct se_portal_group
*se_tpg
)
1706 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
1707 return tpg
->tport_tpgt
;
1710 static u32
sbp_get_default_depth(struct se_portal_group
*se_tpg
)
1715 static struct se_node_acl
*sbp_alloc_fabric_acl(struct se_portal_group
*se_tpg
)
1717 struct sbp_nacl
*nacl
;
1719 nacl
= kzalloc(sizeof(struct sbp_nacl
), GFP_KERNEL
);
1721 pr_err("Unable to allocate struct sbp_nacl\n");
1725 return &nacl
->se_node_acl
;
1728 static void sbp_release_fabric_acl(
1729 struct se_portal_group
*se_tpg
,
1730 struct se_node_acl
*se_nacl
)
1732 struct sbp_nacl
*nacl
=
1733 container_of(se_nacl
, struct sbp_nacl
, se_node_acl
);
1737 static u32
sbp_tpg_get_inst_index(struct se_portal_group
*se_tpg
)
1742 static void sbp_release_cmd(struct se_cmd
*se_cmd
)
1744 struct sbp_target_request
*req
= container_of(se_cmd
,
1745 struct sbp_target_request
, se_cmd
);
1747 sbp_free_request(req
);
1750 static int sbp_shutdown_session(struct se_session
*se_sess
)
1755 static void sbp_close_session(struct se_session
*se_sess
)
1760 static u32
sbp_sess_get_index(struct se_session
*se_sess
)
1765 static int sbp_write_pending(struct se_cmd
*se_cmd
)
1767 struct sbp_target_request
*req
= container_of(se_cmd
,
1768 struct sbp_target_request
, se_cmd
);
1771 ret
= sbp_rw_data(req
);
1773 req
->status
.status
|= cpu_to_be32(
1775 STATUS_RESP_TRANSPORT_FAILURE
) |
1776 STATUS_BLOCK_DEAD(0) |
1777 STATUS_BLOCK_LEN(1) |
1778 STATUS_BLOCK_SBP_STATUS(
1779 SBP_STATUS_UNSPECIFIED_ERROR
));
1780 sbp_send_status(req
);
1784 target_execute_cmd(se_cmd
);
1788 static int sbp_write_pending_status(struct se_cmd
*se_cmd
)
1793 static void sbp_set_default_node_attrs(struct se_node_acl
*nacl
)
1798 static u32
sbp_get_task_tag(struct se_cmd
*se_cmd
)
1800 struct sbp_target_request
*req
= container_of(se_cmd
,
1801 struct sbp_target_request
, se_cmd
);
1803 /* only used for printk until we do TMRs */
1804 return (u32
)req
->orb_pointer
;
1807 static int sbp_get_cmd_state(struct se_cmd
*se_cmd
)
1812 static int sbp_queue_data_in(struct se_cmd
*se_cmd
)
1814 struct sbp_target_request
*req
= container_of(se_cmd
,
1815 struct sbp_target_request
, se_cmd
);
1818 ret
= sbp_rw_data(req
);
1820 req
->status
.status
|= cpu_to_be32(
1821 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE
) |
1822 STATUS_BLOCK_DEAD(0) |
1823 STATUS_BLOCK_LEN(1) |
1824 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR
));
1825 sbp_send_status(req
);
1829 return sbp_send_sense(req
);
1833 * Called after command (no data transfer) or after the write (to device)
1834 * operation is completed
1836 static int sbp_queue_status(struct se_cmd
*se_cmd
)
1838 struct sbp_target_request
*req
= container_of(se_cmd
,
1839 struct sbp_target_request
, se_cmd
);
1841 return sbp_send_sense(req
);
1844 static void sbp_queue_tm_rsp(struct se_cmd
*se_cmd
)
1848 static void sbp_aborted_task(struct se_cmd
*se_cmd
)
1853 static int sbp_check_stop_free(struct se_cmd
*se_cmd
)
1855 struct sbp_target_request
*req
= container_of(se_cmd
,
1856 struct sbp_target_request
, se_cmd
);
1858 transport_generic_free_cmd(&req
->se_cmd
, 0);
1863 * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
1865 static u8
sbp_get_fabric_proto_ident(struct se_portal_group
*se_tpg
)
1868 * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
1869 * This is defined in section 7.5.1 Table 362 in spc4r17
1871 return SCSI_PROTOCOL_SBP
;
1874 static u32
sbp_get_pr_transport_id(
1875 struct se_portal_group
*se_tpg
,
1876 struct se_node_acl
*se_nacl
,
1877 struct t10_pr_registration
*pr_reg
,
1884 * Set PROTOCOL IDENTIFIER to 3h for SBP
1886 buf
[0] = SCSI_PROTOCOL_SBP
;
1888 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1891 ret
= hex2bin(&buf
[8], se_nacl
->initiatorname
, 8);
1893 pr_debug("sbp transport_id: invalid hex string\n");
1896 * The IEEE 1394 Transport ID is a hardcoded 24-byte length
1901 static u32
sbp_get_pr_transport_id_len(
1902 struct se_portal_group
*se_tpg
,
1903 struct se_node_acl
*se_nacl
,
1904 struct t10_pr_registration
*pr_reg
,
1909 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
1912 * The SBP Transport ID is a hardcoded 24-byte length
1918 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
1919 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
1921 static char *sbp_parse_pr_out_transport_id(
1922 struct se_portal_group
*se_tpg
,
1925 char **port_nexus_ptr
)
1928 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
1929 * for initiator ports using SCSI over SBP Serial SCSI Protocol
1931 * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
1932 * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
1933 * so we return the **port_nexus_ptr set to NULL.
1935 *port_nexus_ptr
= NULL
;
1938 return (char *)&buf
[8];
1941 static int sbp_count_se_tpg_luns(struct se_portal_group
*tpg
)
1945 spin_lock(&tpg
->tpg_lun_lock
);
1946 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
1947 struct se_lun
*se_lun
= tpg
->tpg_lun_list
[i
];
1949 if (se_lun
->lun_status
== TRANSPORT_LUN_STATUS_FREE
)
1954 spin_unlock(&tpg
->tpg_lun_lock
);
1959 static int sbp_update_unit_directory(struct sbp_tport
*tport
)
1961 int num_luns
, num_entries
, idx
= 0, mgt_agt_addr
, ret
, i
;
1964 if (tport
->unit_directory
.data
) {
1965 fw_core_remove_descriptor(&tport
->unit_directory
);
1966 kfree(tport
->unit_directory
.data
);
1967 tport
->unit_directory
.data
= NULL
;
1970 if (!tport
->enable
|| !tport
->tpg
)
1973 num_luns
= sbp_count_se_tpg_luns(&tport
->tpg
->se_tpg
);
1976 * Number of entries in the final unit directory:
1977 * - all of those in the template
1978 * - management_agent
1979 * - unit_characteristics
1980 * - reconnect_timeout
1982 * - one for each LUN
1984 * MUST NOT include leaf or sub-directory entries
1986 num_entries
= ARRAY_SIZE(sbp_unit_directory_template
) + 4 + num_luns
;
1988 if (tport
->directory_id
!= -1)
1991 /* allocate num_entries + 4 for the header and unique ID leaf */
1992 data
= kcalloc((num_entries
+ 4), sizeof(u32
), GFP_KERNEL
);
1996 /* directory_length */
1997 data
[idx
++] = num_entries
<< 16;
2000 if (tport
->directory_id
!= -1)
2001 data
[idx
++] = (CSR_DIRECTORY_ID
<< 24) | tport
->directory_id
;
2003 /* unit directory template */
2004 memcpy(&data
[idx
], sbp_unit_directory_template
,
2005 sizeof(sbp_unit_directory_template
));
2006 idx
+= ARRAY_SIZE(sbp_unit_directory_template
);
2008 /* management_agent */
2009 mgt_agt_addr
= (tport
->mgt_agt
->handler
.offset
- CSR_REGISTER_BASE
) / 4;
2010 data
[idx
++] = 0x54000000 | (mgt_agt_addr
& 0x00ffffff);
2012 /* unit_characteristics */
2013 data
[idx
++] = 0x3a000000 |
2014 (((tport
->mgt_orb_timeout
* 2) << 8) & 0xff00) |
2017 /* reconnect_timeout */
2018 data
[idx
++] = 0x3d000000 | (tport
->max_reconnect_timeout
& 0xffff);
2020 /* unit unique ID (leaf is just after LUNs) */
2021 data
[idx
++] = 0x8d000000 | (num_luns
+ 1);
2023 spin_lock(&tport
->tpg
->se_tpg
.tpg_lun_lock
);
2024 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
2025 struct se_lun
*se_lun
= tport
->tpg
->se_tpg
.tpg_lun_list
[i
];
2026 struct se_device
*dev
;
2029 if (se_lun
->lun_status
== TRANSPORT_LUN_STATUS_FREE
)
2032 spin_unlock(&tport
->tpg
->se_tpg
.tpg_lun_lock
);
2034 dev
= se_lun
->lun_se_dev
;
2035 type
= dev
->transport
->get_device_type(dev
);
2037 /* logical_unit_number */
2038 data
[idx
++] = 0x14000000 |
2039 ((type
<< 16) & 0x1f0000) |
2040 (se_lun
->unpacked_lun
& 0xffff);
2042 spin_lock(&tport
->tpg
->se_tpg
.tpg_lun_lock
);
2044 spin_unlock(&tport
->tpg
->se_tpg
.tpg_lun_lock
);
2046 /* unit unique ID leaf */
2047 data
[idx
++] = 2 << 16;
2048 data
[idx
++] = tport
->guid
>> 32;
2049 data
[idx
++] = tport
->guid
;
2051 tport
->unit_directory
.length
= idx
;
2052 tport
->unit_directory
.key
= (CSR_DIRECTORY
| CSR_UNIT
) << 24;
2053 tport
->unit_directory
.data
= data
;
2055 ret
= fw_core_add_descriptor(&tport
->unit_directory
);
2057 kfree(tport
->unit_directory
.data
);
2058 tport
->unit_directory
.data
= NULL
;
2064 static ssize_t
sbp_parse_wwn(const char *name
, u64
*wwn
)
2071 for (cp
= name
; cp
< &name
[SBP_NAMELEN
- 1]; cp
++) {
2073 if (c
== '\n' && cp
[1] == '\0')
2084 else if (isxdigit(c
))
2085 nibble
= tolower(c
) - 'a' + 10;
2088 *wwn
= (*wwn
<< 4) | nibble
;
2093 printk(KERN_INFO
"err %u len %zu pos %u\n",
2094 err
, cp
- name
, pos
);
2098 static ssize_t
sbp_format_wwn(char *buf
, size_t len
, u64 wwn
)
2100 return snprintf(buf
, len
, "%016llx", wwn
);
2103 static struct se_node_acl
*sbp_make_nodeacl(
2104 struct se_portal_group
*se_tpg
,
2105 struct config_group
*group
,
2108 struct se_node_acl
*se_nacl
, *se_nacl_new
;
2109 struct sbp_nacl
*nacl
;
2111 u32 nexus_depth
= 1;
2113 if (sbp_parse_wwn(name
, &guid
) < 0)
2114 return ERR_PTR(-EINVAL
);
2116 se_nacl_new
= sbp_alloc_fabric_acl(se_tpg
);
2118 return ERR_PTR(-ENOMEM
);
2121 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
2122 * when converting a NodeACL from demo mode -> explict
2124 se_nacl
= core_tpg_add_initiator_node_acl(se_tpg
, se_nacl_new
,
2126 if (IS_ERR(se_nacl
)) {
2127 sbp_release_fabric_acl(se_tpg
, se_nacl_new
);
2131 nacl
= container_of(se_nacl
, struct sbp_nacl
, se_node_acl
);
2133 sbp_format_wwn(nacl
->iport_name
, SBP_NAMELEN
, guid
);
2138 static void sbp_drop_nodeacl(struct se_node_acl
*se_acl
)
2140 struct sbp_nacl
*nacl
=
2141 container_of(se_acl
, struct sbp_nacl
, se_node_acl
);
2143 core_tpg_del_initiator_node_acl(se_acl
->se_tpg
, se_acl
, 1);
2147 static int sbp_post_link_lun(
2148 struct se_portal_group
*se_tpg
,
2149 struct se_lun
*se_lun
)
2151 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2153 return sbp_update_unit_directory(tpg
->tport
);
2156 static void sbp_pre_unlink_lun(
2157 struct se_portal_group
*se_tpg
,
2158 struct se_lun
*se_lun
)
2160 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2161 struct sbp_tport
*tport
= tpg
->tport
;
2164 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0)
2167 ret
= sbp_update_unit_directory(tport
);
2169 pr_err("unlink LUN: failed to update unit directory\n");
2172 static struct se_portal_group
*sbp_make_tpg(
2174 struct config_group
*group
,
2177 struct sbp_tport
*tport
=
2178 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2180 struct sbp_tpg
*tpg
;
2184 if (strstr(name
, "tpgt_") != name
)
2185 return ERR_PTR(-EINVAL
);
2186 if (kstrtoul(name
+ 5, 10, &tpgt
) || tpgt
> UINT_MAX
)
2187 return ERR_PTR(-EINVAL
);
2190 pr_err("Only one TPG per Unit is possible.\n");
2191 return ERR_PTR(-EBUSY
);
2194 tpg
= kzalloc(sizeof(*tpg
), GFP_KERNEL
);
2196 pr_err("Unable to allocate struct sbp_tpg\n");
2197 return ERR_PTR(-ENOMEM
);
2201 tpg
->tport_tpgt
= tpgt
;
2204 /* default attribute values */
2206 tport
->directory_id
= -1;
2207 tport
->mgt_orb_timeout
= 15;
2208 tport
->max_reconnect_timeout
= 5;
2209 tport
->max_logins_per_lun
= 1;
2211 tport
->mgt_agt
= sbp_management_agent_register(tport
);
2212 if (IS_ERR(tport
->mgt_agt
)) {
2213 ret
= PTR_ERR(tport
->mgt_agt
);
2217 ret
= core_tpg_register(&sbp_ops
, wwn
, &tpg
->se_tpg
, tpg
,
2218 TRANSPORT_TPG_TYPE_NORMAL
);
2220 goto out_unreg_mgt_agt
;
2222 return &tpg
->se_tpg
;
2225 sbp_management_agent_unregister(tport
->mgt_agt
);
2229 return ERR_PTR(ret
);
2232 static void sbp_drop_tpg(struct se_portal_group
*se_tpg
)
2234 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2235 struct sbp_tport
*tport
= tpg
->tport
;
2237 core_tpg_deregister(se_tpg
);
2238 sbp_management_agent_unregister(tport
->mgt_agt
);
2243 static struct se_wwn
*sbp_make_tport(
2244 struct target_fabric_configfs
*tf
,
2245 struct config_group
*group
,
2248 struct sbp_tport
*tport
;
2251 if (sbp_parse_wwn(name
, &guid
) < 0)
2252 return ERR_PTR(-EINVAL
);
2254 tport
= kzalloc(sizeof(*tport
), GFP_KERNEL
);
2256 pr_err("Unable to allocate struct sbp_tport\n");
2257 return ERR_PTR(-ENOMEM
);
2261 sbp_format_wwn(tport
->tport_name
, SBP_NAMELEN
, guid
);
2263 return &tport
->tport_wwn
;
2266 static void sbp_drop_tport(struct se_wwn
*wwn
)
2268 struct sbp_tport
*tport
=
2269 container_of(wwn
, struct sbp_tport
, tport_wwn
);
2274 static ssize_t
sbp_wwn_show_attr_version(
2275 struct target_fabric_configfs
*tf
,
2278 return sprintf(page
, "FireWire SBP fabric module %s\n", SBP_VERSION
);
2281 TF_WWN_ATTR_RO(sbp
, version
);
2283 static struct configfs_attribute
*sbp_wwn_attrs
[] = {
2284 &sbp_wwn_version
.attr
,
2288 static ssize_t
sbp_tpg_show_directory_id(
2289 struct se_portal_group
*se_tpg
,
2292 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2293 struct sbp_tport
*tport
= tpg
->tport
;
2295 if (tport
->directory_id
== -1)
2296 return sprintf(page
, "implicit\n");
2298 return sprintf(page
, "%06x\n", tport
->directory_id
);
2301 static ssize_t
sbp_tpg_store_directory_id(
2302 struct se_portal_group
*se_tpg
,
2306 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2307 struct sbp_tport
*tport
= tpg
->tport
;
2310 if (tport
->enable
) {
2311 pr_err("Cannot change the directory_id on an active target.\n");
2315 if (strstr(page
, "implicit") == page
) {
2316 tport
->directory_id
= -1;
2318 if (kstrtoul(page
, 16, &val
) < 0)
2323 tport
->directory_id
= val
;
2329 static ssize_t
sbp_tpg_show_enable(
2330 struct se_portal_group
*se_tpg
,
2333 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2334 struct sbp_tport
*tport
= tpg
->tport
;
2335 return sprintf(page
, "%d\n", tport
->enable
);
2338 static ssize_t
sbp_tpg_store_enable(
2339 struct se_portal_group
*se_tpg
,
2343 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2344 struct sbp_tport
*tport
= tpg
->tport
;
2348 if (kstrtoul(page
, 0, &val
) < 0)
2350 if ((val
!= 0) && (val
!= 1))
2353 if (tport
->enable
== val
)
2357 if (sbp_count_se_tpg_luns(&tpg
->se_tpg
) == 0) {
2358 pr_err("Cannot enable a target with no LUNs!\n");
2362 /* XXX: force-shutdown sessions instead? */
2363 spin_lock_bh(&se_tpg
->session_lock
);
2364 if (!list_empty(&se_tpg
->tpg_sess_list
)) {
2365 spin_unlock_bh(&se_tpg
->session_lock
);
2368 spin_unlock_bh(&se_tpg
->session_lock
);
2371 tport
->enable
= val
;
2373 ret
= sbp_update_unit_directory(tport
);
2375 pr_err("Could not update Config ROM\n");
2382 TF_TPG_BASE_ATTR(sbp
, directory_id
, S_IRUGO
| S_IWUSR
);
2383 TF_TPG_BASE_ATTR(sbp
, enable
, S_IRUGO
| S_IWUSR
);
2385 static struct configfs_attribute
*sbp_tpg_base_attrs
[] = {
2386 &sbp_tpg_directory_id
.attr
,
2387 &sbp_tpg_enable
.attr
,
2391 static ssize_t
sbp_tpg_attrib_show_mgt_orb_timeout(
2392 struct se_portal_group
*se_tpg
,
2395 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2396 struct sbp_tport
*tport
= tpg
->tport
;
2397 return sprintf(page
, "%d\n", tport
->mgt_orb_timeout
);
2400 static ssize_t
sbp_tpg_attrib_store_mgt_orb_timeout(
2401 struct se_portal_group
*se_tpg
,
2405 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2406 struct sbp_tport
*tport
= tpg
->tport
;
2410 if (kstrtoul(page
, 0, &val
) < 0)
2412 if ((val
< 1) || (val
> 127))
2415 if (tport
->mgt_orb_timeout
== val
)
2418 tport
->mgt_orb_timeout
= val
;
2420 ret
= sbp_update_unit_directory(tport
);
2427 static ssize_t
sbp_tpg_attrib_show_max_reconnect_timeout(
2428 struct se_portal_group
*se_tpg
,
2431 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2432 struct sbp_tport
*tport
= tpg
->tport
;
2433 return sprintf(page
, "%d\n", tport
->max_reconnect_timeout
);
2436 static ssize_t
sbp_tpg_attrib_store_max_reconnect_timeout(
2437 struct se_portal_group
*se_tpg
,
2441 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2442 struct sbp_tport
*tport
= tpg
->tport
;
2446 if (kstrtoul(page
, 0, &val
) < 0)
2448 if ((val
< 1) || (val
> 32767))
2451 if (tport
->max_reconnect_timeout
== val
)
2454 tport
->max_reconnect_timeout
= val
;
2456 ret
= sbp_update_unit_directory(tport
);
2463 static ssize_t
sbp_tpg_attrib_show_max_logins_per_lun(
2464 struct se_portal_group
*se_tpg
,
2467 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2468 struct sbp_tport
*tport
= tpg
->tport
;
2469 return sprintf(page
, "%d\n", tport
->max_logins_per_lun
);
2472 static ssize_t
sbp_tpg_attrib_store_max_logins_per_lun(
2473 struct se_portal_group
*se_tpg
,
2477 struct sbp_tpg
*tpg
= container_of(se_tpg
, struct sbp_tpg
, se_tpg
);
2478 struct sbp_tport
*tport
= tpg
->tport
;
2481 if (kstrtoul(page
, 0, &val
) < 0)
2483 if ((val
< 1) || (val
> 127))
2486 /* XXX: also check against current count? */
2488 tport
->max_logins_per_lun
= val
;
2493 TF_TPG_ATTRIB_ATTR(sbp
, mgt_orb_timeout
, S_IRUGO
| S_IWUSR
);
2494 TF_TPG_ATTRIB_ATTR(sbp
, max_reconnect_timeout
, S_IRUGO
| S_IWUSR
);
2495 TF_TPG_ATTRIB_ATTR(sbp
, max_logins_per_lun
, S_IRUGO
| S_IWUSR
);
2497 static struct configfs_attribute
*sbp_tpg_attrib_attrs
[] = {
2498 &sbp_tpg_attrib_mgt_orb_timeout
.attr
,
2499 &sbp_tpg_attrib_max_reconnect_timeout
.attr
,
2500 &sbp_tpg_attrib_max_logins_per_lun
.attr
,
2504 static const struct target_core_fabric_ops sbp_ops
= {
2505 .module
= THIS_MODULE
,
2507 .get_fabric_name
= sbp_get_fabric_name
,
2508 .get_fabric_proto_ident
= sbp_get_fabric_proto_ident
,
2509 .tpg_get_wwn
= sbp_get_fabric_wwn
,
2510 .tpg_get_tag
= sbp_get_tag
,
2511 .tpg_get_default_depth
= sbp_get_default_depth
,
2512 .tpg_get_pr_transport_id
= sbp_get_pr_transport_id
,
2513 .tpg_get_pr_transport_id_len
= sbp_get_pr_transport_id_len
,
2514 .tpg_parse_pr_out_transport_id
= sbp_parse_pr_out_transport_id
,
2515 .tpg_check_demo_mode
= sbp_check_true
,
2516 .tpg_check_demo_mode_cache
= sbp_check_true
,
2517 .tpg_check_demo_mode_write_protect
= sbp_check_false
,
2518 .tpg_check_prod_mode_write_protect
= sbp_check_false
,
2519 .tpg_alloc_fabric_acl
= sbp_alloc_fabric_acl
,
2520 .tpg_release_fabric_acl
= sbp_release_fabric_acl
,
2521 .tpg_get_inst_index
= sbp_tpg_get_inst_index
,
2522 .release_cmd
= sbp_release_cmd
,
2523 .shutdown_session
= sbp_shutdown_session
,
2524 .close_session
= sbp_close_session
,
2525 .sess_get_index
= sbp_sess_get_index
,
2526 .write_pending
= sbp_write_pending
,
2527 .write_pending_status
= sbp_write_pending_status
,
2528 .set_default_node_attributes
= sbp_set_default_node_attrs
,
2529 .get_task_tag
= sbp_get_task_tag
,
2530 .get_cmd_state
= sbp_get_cmd_state
,
2531 .queue_data_in
= sbp_queue_data_in
,
2532 .queue_status
= sbp_queue_status
,
2533 .queue_tm_rsp
= sbp_queue_tm_rsp
,
2534 .aborted_task
= sbp_aborted_task
,
2535 .check_stop_free
= sbp_check_stop_free
,
2537 .fabric_make_wwn
= sbp_make_tport
,
2538 .fabric_drop_wwn
= sbp_drop_tport
,
2539 .fabric_make_tpg
= sbp_make_tpg
,
2540 .fabric_drop_tpg
= sbp_drop_tpg
,
2541 .fabric_post_link
= sbp_post_link_lun
,
2542 .fabric_pre_unlink
= sbp_pre_unlink_lun
,
2543 .fabric_make_np
= NULL
,
2544 .fabric_drop_np
= NULL
,
2545 .fabric_make_nodeacl
= sbp_make_nodeacl
,
2546 .fabric_drop_nodeacl
= sbp_drop_nodeacl
,
2548 .tfc_wwn_attrs
= sbp_wwn_attrs
,
2549 .tfc_tpg_base_attrs
= sbp_tpg_base_attrs
,
2550 .tfc_tpg_attrib_attrs
= sbp_tpg_attrib_attrs
,
2553 static int __init
sbp_init(void)
2555 return target_register_template(&sbp_ops
);
2558 static void __exit
sbp_exit(void)
2560 target_unregister_template(&sbp_ops
);
2563 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2564 MODULE_LICENSE("GPL");
2565 module_init(sbp_init
);
2566 module_exit(sbp_exit
);