sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / target / sbp / sbp_target.c
blobe5c3e5f827d0b8f163bbe9f1e78c2da7cfc2bae5
1 /*
2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #define KMSG_COMPONENT "sbp_target"
22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/configfs.h>
30 #include <linux/ctype.h>
31 #include <linux/delay.h>
32 #include <linux/firewire.h>
33 #include <linux/firewire-constants.h>
34 #include <scsi/scsi_proto.h>
35 #include <scsi/scsi_tcq.h>
36 #include <target/target_core_base.h>
37 #include <target/target_core_backend.h>
38 #include <target/target_core_fabric.h>
39 #include <asm/unaligned.h>
41 #include "sbp_target.h"
43 /* FireWire address region for management and command block address handlers */
44 static const struct fw_address_region sbp_register_region = {
45 .start = CSR_REGISTER_BASE + 0x10000,
46 .end = 0x1000000000000ULL,
49 static const u32 sbp_unit_directory_template[] = {
50 0x1200609e, /* unit_specifier_id: NCITS/T10 */
51 0x13010483, /* unit_sw_version: 1155D Rev 4 */
52 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
53 0x390104d8, /* command_set: SPC-2 */
54 0x3b000000, /* command_set_revision: 0 */
55 0x3c000001, /* firmware_revision: 1 */
58 #define SESSION_MAINTENANCE_INTERVAL HZ
60 static atomic_t login_id = ATOMIC_INIT(0);
62 static void session_maintenance_work(struct work_struct *);
63 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
64 unsigned long long, void *, size_t);
66 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
68 int ret;
69 __be32 high, low;
71 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
72 req->node_addr, req->generation, req->speed,
73 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
74 &high, sizeof(high));
75 if (ret != RCODE_COMPLETE)
76 return ret;
78 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
79 req->node_addr, req->generation, req->speed,
80 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
81 &low, sizeof(low));
82 if (ret != RCODE_COMPLETE)
83 return ret;
85 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
87 return RCODE_COMPLETE;
90 static struct sbp_session *sbp_session_find_by_guid(
91 struct sbp_tpg *tpg, u64 guid)
93 struct se_session *se_sess;
94 struct sbp_session *sess, *found = NULL;
96 spin_lock_bh(&tpg->se_tpg.session_lock);
97 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
98 sess = se_sess->fabric_sess_ptr;
99 if (sess->guid == guid)
100 found = sess;
102 spin_unlock_bh(&tpg->se_tpg.session_lock);
104 return found;
107 static struct sbp_login_descriptor *sbp_login_find_by_lun(
108 struct sbp_session *session, u32 unpacked_lun)
110 struct sbp_login_descriptor *login, *found = NULL;
112 spin_lock_bh(&session->lock);
113 list_for_each_entry(login, &session->login_list, link) {
114 if (login->login_lun == unpacked_lun)
115 found = login;
117 spin_unlock_bh(&session->lock);
119 return found;
122 static int sbp_login_count_all_by_lun(
123 struct sbp_tpg *tpg,
124 u32 unpacked_lun,
125 int exclusive)
127 struct se_session *se_sess;
128 struct sbp_session *sess;
129 struct sbp_login_descriptor *login;
130 int count = 0;
132 spin_lock_bh(&tpg->se_tpg.session_lock);
133 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
134 sess = se_sess->fabric_sess_ptr;
136 spin_lock_bh(&sess->lock);
137 list_for_each_entry(login, &sess->login_list, link) {
138 if (login->login_lun != unpacked_lun)
139 continue;
141 if (!exclusive || login->exclusive)
142 count++;
144 spin_unlock_bh(&sess->lock);
146 spin_unlock_bh(&tpg->se_tpg.session_lock);
148 return count;
151 static struct sbp_login_descriptor *sbp_login_find_by_id(
152 struct sbp_tpg *tpg, int login_id)
154 struct se_session *se_sess;
155 struct sbp_session *sess;
156 struct sbp_login_descriptor *login, *found = NULL;
158 spin_lock_bh(&tpg->se_tpg.session_lock);
159 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
160 sess = se_sess->fabric_sess_ptr;
162 spin_lock_bh(&sess->lock);
163 list_for_each_entry(login, &sess->login_list, link) {
164 if (login->login_id == login_id)
165 found = login;
167 spin_unlock_bh(&sess->lock);
169 spin_unlock_bh(&tpg->se_tpg.session_lock);
171 return found;
174 static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
176 struct se_portal_group *se_tpg = &tpg->se_tpg;
177 struct se_lun *se_lun;
179 rcu_read_lock();
180 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
181 if (se_lun->unpacked_lun == login_lun) {
182 rcu_read_unlock();
183 *err = 0;
184 return login_lun;
187 rcu_read_unlock();
189 *err = -ENODEV;
190 return login_lun;
193 static struct sbp_session *sbp_session_create(
194 struct sbp_tpg *tpg,
195 u64 guid)
197 struct sbp_session *sess;
198 int ret;
199 char guid_str[17];
201 snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
203 sess = kmalloc(sizeof(*sess), GFP_KERNEL);
204 if (!sess) {
205 pr_err("failed to allocate session descriptor\n");
206 return ERR_PTR(-ENOMEM);
208 spin_lock_init(&sess->lock);
209 INIT_LIST_HEAD(&sess->login_list);
210 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
211 sess->guid = guid;
213 sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
214 sizeof(struct sbp_target_request),
215 TARGET_PROT_NORMAL, guid_str,
216 sess, NULL);
217 if (IS_ERR(sess->se_sess)) {
218 pr_err("failed to init se_session\n");
219 ret = PTR_ERR(sess->se_sess);
220 kfree(sess);
221 return ERR_PTR(ret);
224 return sess;
227 static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
229 spin_lock_bh(&sess->lock);
230 if (!list_empty(&sess->login_list)) {
231 spin_unlock_bh(&sess->lock);
232 return;
234 spin_unlock_bh(&sess->lock);
236 if (cancel_work)
237 cancel_delayed_work_sync(&sess->maint_work);
239 transport_deregister_session_configfs(sess->se_sess);
240 transport_deregister_session(sess->se_sess);
242 if (sess->card)
243 fw_card_put(sess->card);
245 kfree(sess);
248 static void sbp_target_agent_unregister(struct sbp_target_agent *);
250 static void sbp_login_release(struct sbp_login_descriptor *login,
251 bool cancel_work)
253 struct sbp_session *sess = login->sess;
255 /* FIXME: abort/wait on tasks */
257 sbp_target_agent_unregister(login->tgt_agt);
259 if (sess) {
260 spin_lock_bh(&sess->lock);
261 list_del(&login->link);
262 spin_unlock_bh(&sess->lock);
264 sbp_session_release(sess, cancel_work);
267 kfree(login);
270 static struct sbp_target_agent *sbp_target_agent_register(
271 struct sbp_login_descriptor *);
273 static void sbp_management_request_login(
274 struct sbp_management_agent *agent, struct sbp_management_request *req,
275 int *status_data_size)
277 struct sbp_tport *tport = agent->tport;
278 struct sbp_tpg *tpg = tport->tpg;
279 struct sbp_session *sess;
280 struct sbp_login_descriptor *login;
281 struct sbp_login_response_block *response;
282 u64 guid;
283 u32 unpacked_lun;
284 int login_response_len, ret;
286 unpacked_lun = sbp_get_lun_from_tpg(tpg,
287 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
288 if (ret) {
289 pr_notice("login to unknown LUN: %d\n",
290 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
292 req->status.status = cpu_to_be32(
293 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
294 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
295 return;
298 ret = read_peer_guid(&guid, req);
299 if (ret != RCODE_COMPLETE) {
300 pr_warn("failed to read peer GUID: %d\n", ret);
302 req->status.status = cpu_to_be32(
303 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
304 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
305 return;
308 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
309 unpacked_lun, guid);
311 sess = sbp_session_find_by_guid(tpg, guid);
312 if (sess) {
313 login = sbp_login_find_by_lun(sess, unpacked_lun);
314 if (login) {
315 pr_notice("initiator already logged-in\n");
318 * SBP-2 R4 says we should return access denied, but
319 * that can confuse initiators. Instead we need to
320 * treat this like a reconnect, but send the login
321 * response block like a fresh login.
323 * This is required particularly in the case of Apple
324 * devices booting off the FireWire target, where
325 * the firmware has an active login to the target. When
326 * the OS takes control of the session it issues its own
327 * LOGIN rather than a RECONNECT. To avoid the machine
328 * waiting until the reconnect_hold expires, we can skip
329 * the ACCESS_DENIED errors to speed things up.
332 goto already_logged_in;
337 * check exclusive bit in login request
338 * reject with access_denied if any logins present
340 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
341 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
342 pr_warn("refusing exclusive login with other active logins\n");
344 req->status.status = cpu_to_be32(
345 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
346 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
347 return;
351 * check exclusive bit in any existing login descriptor
352 * reject with access_denied if any exclusive logins present
354 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
355 pr_warn("refusing login while another exclusive login present\n");
357 req->status.status = cpu_to_be32(
358 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
359 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
360 return;
364 * check we haven't exceeded the number of allowed logins
365 * reject with resources_unavailable if we have
367 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
368 tport->max_logins_per_lun) {
369 pr_warn("max number of logins reached\n");
371 req->status.status = cpu_to_be32(
372 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
373 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
374 return;
377 if (!sess) {
378 sess = sbp_session_create(tpg, guid);
379 if (IS_ERR(sess)) {
380 switch (PTR_ERR(sess)) {
381 case -EPERM:
382 ret = SBP_STATUS_ACCESS_DENIED;
383 break;
384 default:
385 ret = SBP_STATUS_RESOURCES_UNAVAIL;
386 break;
389 req->status.status = cpu_to_be32(
390 STATUS_BLOCK_RESP(
391 STATUS_RESP_REQUEST_COMPLETE) |
392 STATUS_BLOCK_SBP_STATUS(ret));
393 return;
396 sess->node_id = req->node_addr;
397 sess->card = fw_card_get(req->card);
398 sess->generation = req->generation;
399 sess->speed = req->speed;
401 schedule_delayed_work(&sess->maint_work,
402 SESSION_MAINTENANCE_INTERVAL);
405 /* only take the latest reconnect_hold into account */
406 sess->reconnect_hold = min(
407 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
408 tport->max_reconnect_timeout) - 1;
410 login = kmalloc(sizeof(*login), GFP_KERNEL);
411 if (!login) {
412 pr_err("failed to allocate login descriptor\n");
414 sbp_session_release(sess, true);
416 req->status.status = cpu_to_be32(
417 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
418 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
419 return;
422 login->sess = sess;
423 login->login_lun = unpacked_lun;
424 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
425 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
426 login->login_id = atomic_inc_return(&login_id);
428 login->tgt_agt = sbp_target_agent_register(login);
429 if (IS_ERR(login->tgt_agt)) {
430 ret = PTR_ERR(login->tgt_agt);
431 pr_err("failed to map command block handler: %d\n", ret);
433 sbp_session_release(sess, true);
434 kfree(login);
436 req->status.status = cpu_to_be32(
437 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
438 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
439 return;
442 spin_lock_bh(&sess->lock);
443 list_add_tail(&login->link, &sess->login_list);
444 spin_unlock_bh(&sess->lock);
446 already_logged_in:
447 response = kzalloc(sizeof(*response), GFP_KERNEL);
448 if (!response) {
449 pr_err("failed to allocate login response block\n");
451 sbp_login_release(login, true);
453 req->status.status = cpu_to_be32(
454 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
455 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
456 return;
459 login_response_len = clamp_val(
460 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
461 12, sizeof(*response));
462 response->misc = cpu_to_be32(
463 ((login_response_len & 0xffff) << 16) |
464 (login->login_id & 0xffff));
465 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
466 addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
467 &response->command_block_agent);
469 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
470 sess->node_id, sess->generation, sess->speed,
471 sbp2_pointer_to_addr(&req->orb.ptr2), response,
472 login_response_len);
473 if (ret != RCODE_COMPLETE) {
474 pr_debug("failed to write login response block: %x\n", ret);
476 kfree(response);
477 sbp_login_release(login, true);
479 req->status.status = cpu_to_be32(
480 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
481 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
482 return;
485 kfree(response);
487 req->status.status = cpu_to_be32(
488 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
489 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
492 static void sbp_management_request_query_logins(
493 struct sbp_management_agent *agent, struct sbp_management_request *req,
494 int *status_data_size)
496 pr_notice("QUERY LOGINS not implemented\n");
497 /* FIXME: implement */
499 req->status.status = cpu_to_be32(
500 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
501 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
504 static void sbp_management_request_reconnect(
505 struct sbp_management_agent *agent, struct sbp_management_request *req,
506 int *status_data_size)
508 struct sbp_tport *tport = agent->tport;
509 struct sbp_tpg *tpg = tport->tpg;
510 int ret;
511 u64 guid;
512 struct sbp_login_descriptor *login;
514 ret = read_peer_guid(&guid, req);
515 if (ret != RCODE_COMPLETE) {
516 pr_warn("failed to read peer GUID: %d\n", ret);
518 req->status.status = cpu_to_be32(
519 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
520 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
521 return;
524 pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
526 login = sbp_login_find_by_id(tpg,
527 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
529 if (!login) {
530 pr_err("mgt_agent RECONNECT unknown login ID\n");
532 req->status.status = cpu_to_be32(
533 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
534 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
535 return;
538 if (login->sess->guid != guid) {
539 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
541 req->status.status = cpu_to_be32(
542 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
543 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
544 return;
547 spin_lock_bh(&login->sess->lock);
548 if (login->sess->card)
549 fw_card_put(login->sess->card);
551 /* update the node details */
552 login->sess->generation = req->generation;
553 login->sess->node_id = req->node_addr;
554 login->sess->card = fw_card_get(req->card);
555 login->sess->speed = req->speed;
556 spin_unlock_bh(&login->sess->lock);
558 req->status.status = cpu_to_be32(
559 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
560 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
563 static void sbp_management_request_logout(
564 struct sbp_management_agent *agent, struct sbp_management_request *req,
565 int *status_data_size)
567 struct sbp_tport *tport = agent->tport;
568 struct sbp_tpg *tpg = tport->tpg;
569 int id;
570 struct sbp_login_descriptor *login;
572 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
574 login = sbp_login_find_by_id(tpg, id);
575 if (!login) {
576 pr_warn("cannot find login: %d\n", id);
578 req->status.status = cpu_to_be32(
579 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
580 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
581 return;
584 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
585 login->login_lun, login->login_id);
587 if (req->node_addr != login->sess->node_id) {
588 pr_warn("logout from different node ID\n");
590 req->status.status = cpu_to_be32(
591 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
592 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
593 return;
596 sbp_login_release(login, true);
598 req->status.status = cpu_to_be32(
599 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
600 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
603 static void session_check_for_reset(struct sbp_session *sess)
605 bool card_valid = false;
607 spin_lock_bh(&sess->lock);
609 if (sess->card) {
610 spin_lock_irq(&sess->card->lock);
611 card_valid = (sess->card->local_node != NULL);
612 spin_unlock_irq(&sess->card->lock);
614 if (!card_valid) {
615 fw_card_put(sess->card);
616 sess->card = NULL;
620 if (!card_valid || (sess->generation != sess->card->generation)) {
621 pr_info("Waiting for reconnect from node: %016llx\n",
622 sess->guid);
624 sess->node_id = -1;
625 sess->reconnect_expires = get_jiffies_64() +
626 ((sess->reconnect_hold + 1) * HZ);
629 spin_unlock_bh(&sess->lock);
632 static void session_reconnect_expired(struct sbp_session *sess)
634 struct sbp_login_descriptor *login, *temp;
635 LIST_HEAD(login_list);
637 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
639 spin_lock_bh(&sess->lock);
640 list_for_each_entry_safe(login, temp, &sess->login_list, link) {
641 login->sess = NULL;
642 list_move_tail(&login->link, &login_list);
644 spin_unlock_bh(&sess->lock);
646 list_for_each_entry_safe(login, temp, &login_list, link) {
647 list_del(&login->link);
648 sbp_login_release(login, false);
651 sbp_session_release(sess, false);
654 static void session_maintenance_work(struct work_struct *work)
656 struct sbp_session *sess = container_of(work, struct sbp_session,
657 maint_work.work);
659 /* could be called while tearing down the session */
660 spin_lock_bh(&sess->lock);
661 if (list_empty(&sess->login_list)) {
662 spin_unlock_bh(&sess->lock);
663 return;
665 spin_unlock_bh(&sess->lock);
667 if (sess->node_id != -1) {
668 /* check for bus reset and make node_id invalid */
669 session_check_for_reset(sess);
671 schedule_delayed_work(&sess->maint_work,
672 SESSION_MAINTENANCE_INTERVAL);
673 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
674 /* still waiting for reconnect */
675 schedule_delayed_work(&sess->maint_work,
676 SESSION_MAINTENANCE_INTERVAL);
677 } else {
678 /* reconnect timeout has expired */
679 session_reconnect_expired(sess);
683 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
684 struct sbp_target_agent *agent)
686 int state;
688 switch (tcode) {
689 case TCODE_READ_QUADLET_REQUEST:
690 pr_debug("tgt_agent AGENT_STATE READ\n");
692 spin_lock_bh(&agent->lock);
693 state = agent->state;
694 spin_unlock_bh(&agent->lock);
696 *(__be32 *)data = cpu_to_be32(state);
698 return RCODE_COMPLETE;
700 case TCODE_WRITE_QUADLET_REQUEST:
701 /* ignored */
702 return RCODE_COMPLETE;
704 default:
705 return RCODE_TYPE_ERROR;
709 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
710 struct sbp_target_agent *agent)
712 switch (tcode) {
713 case TCODE_WRITE_QUADLET_REQUEST:
714 pr_debug("tgt_agent AGENT_RESET\n");
715 spin_lock_bh(&agent->lock);
716 agent->state = AGENT_STATE_RESET;
717 spin_unlock_bh(&agent->lock);
718 return RCODE_COMPLETE;
720 default:
721 return RCODE_TYPE_ERROR;
725 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
726 struct sbp_target_agent *agent)
728 struct sbp2_pointer *ptr = data;
730 switch (tcode) {
731 case TCODE_WRITE_BLOCK_REQUEST:
732 spin_lock_bh(&agent->lock);
733 if (agent->state != AGENT_STATE_SUSPENDED &&
734 agent->state != AGENT_STATE_RESET) {
735 spin_unlock_bh(&agent->lock);
736 pr_notice("Ignoring ORB_POINTER write while active.\n");
737 return RCODE_CONFLICT_ERROR;
739 agent->state = AGENT_STATE_ACTIVE;
740 spin_unlock_bh(&agent->lock);
742 agent->orb_pointer = sbp2_pointer_to_addr(ptr);
743 agent->doorbell = false;
745 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
746 agent->orb_pointer);
748 queue_work(system_unbound_wq, &agent->work);
750 return RCODE_COMPLETE;
752 case TCODE_READ_BLOCK_REQUEST:
753 pr_debug("tgt_agent ORB_POINTER READ\n");
754 spin_lock_bh(&agent->lock);
755 addr_to_sbp2_pointer(agent->orb_pointer, ptr);
756 spin_unlock_bh(&agent->lock);
757 return RCODE_COMPLETE;
759 default:
760 return RCODE_TYPE_ERROR;
764 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
765 struct sbp_target_agent *agent)
767 switch (tcode) {
768 case TCODE_WRITE_QUADLET_REQUEST:
769 spin_lock_bh(&agent->lock);
770 if (agent->state != AGENT_STATE_SUSPENDED) {
771 spin_unlock_bh(&agent->lock);
772 pr_debug("Ignoring DOORBELL while active.\n");
773 return RCODE_CONFLICT_ERROR;
775 agent->state = AGENT_STATE_ACTIVE;
776 spin_unlock_bh(&agent->lock);
778 agent->doorbell = true;
780 pr_debug("tgt_agent DOORBELL\n");
782 queue_work(system_unbound_wq, &agent->work);
784 return RCODE_COMPLETE;
786 case TCODE_READ_QUADLET_REQUEST:
787 return RCODE_COMPLETE;
789 default:
790 return RCODE_TYPE_ERROR;
794 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
795 int tcode, void *data, struct sbp_target_agent *agent)
797 switch (tcode) {
798 case TCODE_WRITE_QUADLET_REQUEST:
799 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
800 /* ignored as we don't send unsolicited status */
801 return RCODE_COMPLETE;
803 case TCODE_READ_QUADLET_REQUEST:
804 return RCODE_COMPLETE;
806 default:
807 return RCODE_TYPE_ERROR;
811 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
812 int tcode, int destination, int source, int generation,
813 unsigned long long offset, void *data, size_t length,
814 void *callback_data)
816 struct sbp_target_agent *agent = callback_data;
817 struct sbp_session *sess = agent->login->sess;
818 int sess_gen, sess_node, rcode;
820 spin_lock_bh(&sess->lock);
821 sess_gen = sess->generation;
822 sess_node = sess->node_id;
823 spin_unlock_bh(&sess->lock);
825 if (generation != sess_gen) {
826 pr_notice("ignoring request with wrong generation\n");
827 rcode = RCODE_TYPE_ERROR;
828 goto out;
831 if (source != sess_node) {
832 pr_notice("ignoring request from foreign node (%x != %x)\n",
833 source, sess_node);
834 rcode = RCODE_TYPE_ERROR;
835 goto out;
838 /* turn offset into the offset from the start of the block */
839 offset -= agent->handler.offset;
841 if (offset == 0x00 && length == 4) {
842 /* AGENT_STATE */
843 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
844 } else if (offset == 0x04 && length == 4) {
845 /* AGENT_RESET */
846 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
847 } else if (offset == 0x08 && length == 8) {
848 /* ORB_POINTER */
849 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
850 } else if (offset == 0x10 && length == 4) {
851 /* DOORBELL */
852 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
853 } else if (offset == 0x14 && length == 4) {
854 /* UNSOLICITED_STATUS_ENABLE */
855 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
856 data, agent);
857 } else {
858 rcode = RCODE_ADDRESS_ERROR;
861 out:
862 fw_send_response(card, request, rcode);
865 static void sbp_handle_command(struct sbp_target_request *);
866 static int sbp_send_status(struct sbp_target_request *);
867 static void sbp_free_request(struct sbp_target_request *);
869 static void tgt_agent_process_work(struct work_struct *work)
871 struct sbp_target_request *req =
872 container_of(work, struct sbp_target_request, work);
874 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
875 req->orb_pointer,
876 sbp2_pointer_to_addr(&req->orb.next_orb),
877 sbp2_pointer_to_addr(&req->orb.data_descriptor),
878 be32_to_cpu(req->orb.misc));
880 if (req->orb_pointer >> 32)
881 pr_debug("ORB with high bits set\n");
883 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
884 case 0:/* Format specified by this standard */
885 sbp_handle_command(req);
886 return;
887 case 1: /* Reserved for future standardization */
888 case 2: /* Vendor-dependent */
889 req->status.status |= cpu_to_be32(
890 STATUS_BLOCK_RESP(
891 STATUS_RESP_REQUEST_COMPLETE) |
892 STATUS_BLOCK_DEAD(0) |
893 STATUS_BLOCK_LEN(1) |
894 STATUS_BLOCK_SBP_STATUS(
895 SBP_STATUS_REQ_TYPE_NOTSUPP));
896 sbp_send_status(req);
897 return;
898 case 3: /* Dummy ORB */
899 req->status.status |= cpu_to_be32(
900 STATUS_BLOCK_RESP(
901 STATUS_RESP_REQUEST_COMPLETE) |
902 STATUS_BLOCK_DEAD(0) |
903 STATUS_BLOCK_LEN(1) |
904 STATUS_BLOCK_SBP_STATUS(
905 SBP_STATUS_DUMMY_ORB_COMPLETE));
906 sbp_send_status(req);
907 return;
908 default:
909 BUG();
913 /* used to double-check we haven't been issued an AGENT_RESET */
914 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
916 bool active;
918 spin_lock_bh(&agent->lock);
919 active = (agent->state == AGENT_STATE_ACTIVE);
920 spin_unlock_bh(&agent->lock);
922 return active;
925 static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
926 struct fw_card *card, u64 next_orb)
928 struct se_session *se_sess = sess->se_sess;
929 struct sbp_target_request *req;
930 int tag;
932 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
933 if (tag < 0)
934 return ERR_PTR(-ENOMEM);
936 req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
937 memset(req, 0, sizeof(*req));
938 req->se_cmd.map_tag = tag;
939 req->se_cmd.tag = next_orb;
941 return req;
944 static void tgt_agent_fetch_work(struct work_struct *work)
946 struct sbp_target_agent *agent =
947 container_of(work, struct sbp_target_agent, work);
948 struct sbp_session *sess = agent->login->sess;
949 struct sbp_target_request *req;
950 int ret;
951 bool doorbell = agent->doorbell;
952 u64 next_orb = agent->orb_pointer;
954 while (next_orb && tgt_agent_check_active(agent)) {
955 req = sbp_mgt_get_req(sess, sess->card, next_orb);
956 if (IS_ERR(req)) {
957 spin_lock_bh(&agent->lock);
958 agent->state = AGENT_STATE_DEAD;
959 spin_unlock_bh(&agent->lock);
960 return;
963 req->login = agent->login;
964 req->orb_pointer = next_orb;
966 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
967 req->orb_pointer >> 32));
968 req->status.orb_low = cpu_to_be32(
969 req->orb_pointer & 0xfffffffc);
971 /* read in the ORB */
972 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
973 sess->node_id, sess->generation, sess->speed,
974 req->orb_pointer, &req->orb, sizeof(req->orb));
975 if (ret != RCODE_COMPLETE) {
976 pr_debug("tgt_orb fetch failed: %x\n", ret);
977 req->status.status |= cpu_to_be32(
978 STATUS_BLOCK_SRC(
979 STATUS_SRC_ORB_FINISHED) |
980 STATUS_BLOCK_RESP(
981 STATUS_RESP_TRANSPORT_FAILURE) |
982 STATUS_BLOCK_DEAD(1) |
983 STATUS_BLOCK_LEN(1) |
984 STATUS_BLOCK_SBP_STATUS(
985 SBP_STATUS_UNSPECIFIED_ERROR));
986 spin_lock_bh(&agent->lock);
987 agent->state = AGENT_STATE_DEAD;
988 spin_unlock_bh(&agent->lock);
990 sbp_send_status(req);
991 return;
994 /* check the next_ORB field */
995 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
996 next_orb = 0;
997 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
998 STATUS_SRC_ORB_FINISHED));
999 } else {
1000 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1001 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1002 STATUS_SRC_ORB_CONTINUING));
1005 if (tgt_agent_check_active(agent) && !doorbell) {
1006 INIT_WORK(&req->work, tgt_agent_process_work);
1007 queue_work(system_unbound_wq, &req->work);
1008 } else {
1009 /* don't process this request, just check next_ORB */
1010 sbp_free_request(req);
1013 spin_lock_bh(&agent->lock);
1014 doorbell = agent->doorbell = false;
1016 /* check if we should carry on processing */
1017 if (next_orb)
1018 agent->orb_pointer = next_orb;
1019 else
1020 agent->state = AGENT_STATE_SUSPENDED;
1022 spin_unlock_bh(&agent->lock);
1026 static struct sbp_target_agent *sbp_target_agent_register(
1027 struct sbp_login_descriptor *login)
1029 struct sbp_target_agent *agent;
1030 int ret;
1032 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1033 if (!agent)
1034 return ERR_PTR(-ENOMEM);
1036 spin_lock_init(&agent->lock);
1038 agent->handler.length = 0x20;
1039 agent->handler.address_callback = tgt_agent_rw;
1040 agent->handler.callback_data = agent;
1042 agent->login = login;
1043 agent->state = AGENT_STATE_RESET;
1044 INIT_WORK(&agent->work, tgt_agent_fetch_work);
1045 agent->orb_pointer = 0;
1046 agent->doorbell = false;
1048 ret = fw_core_add_address_handler(&agent->handler,
1049 &sbp_register_region);
1050 if (ret < 0) {
1051 kfree(agent);
1052 return ERR_PTR(ret);
1055 return agent;
1058 static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1060 fw_core_remove_address_handler(&agent->handler);
1061 cancel_work_sync(&agent->work);
1062 kfree(agent);
1066 * Simple wrapper around fw_run_transaction that retries the transaction several
1067 * times in case of failure, with an exponential backoff.
1069 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1070 int generation, int speed, unsigned long long offset,
1071 void *payload, size_t length)
1073 int attempt, ret, delay;
1075 for (attempt = 1; attempt <= 5; attempt++) {
1076 ret = fw_run_transaction(card, tcode, destination_id,
1077 generation, speed, offset, payload, length);
1079 switch (ret) {
1080 case RCODE_COMPLETE:
1081 case RCODE_TYPE_ERROR:
1082 case RCODE_ADDRESS_ERROR:
1083 case RCODE_GENERATION:
1084 return ret;
1086 default:
1087 delay = 5 * attempt * attempt;
1088 usleep_range(delay, delay * 2);
1092 return ret;
1096 * Wrapper around sbp_run_transaction that gets the card, destination,
1097 * generation and speed out of the request's session.
1099 static int sbp_run_request_transaction(struct sbp_target_request *req,
1100 int tcode, unsigned long long offset, void *payload,
1101 size_t length)
1103 struct sbp_login_descriptor *login = req->login;
1104 struct sbp_session *sess = login->sess;
1105 struct fw_card *card;
1106 int node_id, generation, speed, ret;
1108 spin_lock_bh(&sess->lock);
1109 card = fw_card_get(sess->card);
1110 node_id = sess->node_id;
1111 generation = sess->generation;
1112 speed = sess->speed;
1113 spin_unlock_bh(&sess->lock);
1115 ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1116 offset, payload, length);
1118 fw_card_put(card);
1120 return ret;
1123 static int sbp_fetch_command(struct sbp_target_request *req)
1125 int ret, cmd_len, copy_len;
1127 cmd_len = scsi_command_size(req->orb.command_block);
1129 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1130 if (!req->cmd_buf)
1131 return -ENOMEM;
1133 memcpy(req->cmd_buf, req->orb.command_block,
1134 min_t(int, cmd_len, sizeof(req->orb.command_block)));
1136 if (cmd_len > sizeof(req->orb.command_block)) {
1137 pr_debug("sbp_fetch_command: filling in long command\n");
1138 copy_len = cmd_len - sizeof(req->orb.command_block);
1140 ret = sbp_run_request_transaction(req,
1141 TCODE_READ_BLOCK_REQUEST,
1142 req->orb_pointer + sizeof(req->orb),
1143 req->cmd_buf + sizeof(req->orb.command_block),
1144 copy_len);
1145 if (ret != RCODE_COMPLETE)
1146 return -EIO;
1149 return 0;
1152 static int sbp_fetch_page_table(struct sbp_target_request *req)
1154 int pg_tbl_sz, ret;
1155 struct sbp_page_table_entry *pg_tbl;
1157 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1158 return 0;
1160 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1161 sizeof(struct sbp_page_table_entry);
1163 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1164 if (!pg_tbl)
1165 return -ENOMEM;
1167 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1168 sbp2_pointer_to_addr(&req->orb.data_descriptor),
1169 pg_tbl, pg_tbl_sz);
1170 if (ret != RCODE_COMPLETE) {
1171 kfree(pg_tbl);
1172 return -EIO;
1175 req->pg_tbl = pg_tbl;
1176 return 0;
1179 static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1180 u32 *data_len, enum dma_data_direction *data_dir)
1182 int data_size, direction, idx;
1184 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1185 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1187 if (!data_size) {
1188 *data_len = 0;
1189 *data_dir = DMA_NONE;
1190 return;
1193 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1195 if (req->pg_tbl) {
1196 *data_len = 0;
1197 for (idx = 0; idx < data_size; idx++) {
1198 *data_len += be16_to_cpu(
1199 req->pg_tbl[idx].segment_length);
1201 } else {
1202 *data_len = data_size;
1206 static void sbp_handle_command(struct sbp_target_request *req)
1208 struct sbp_login_descriptor *login = req->login;
1209 struct sbp_session *sess = login->sess;
1210 int ret, unpacked_lun;
1211 u32 data_length;
1212 enum dma_data_direction data_dir;
1214 ret = sbp_fetch_command(req);
1215 if (ret) {
1216 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1217 goto err;
1220 ret = sbp_fetch_page_table(req);
1221 if (ret) {
1222 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1223 ret);
1224 goto err;
1227 unpacked_lun = req->login->login_lun;
1228 sbp_calc_data_length_direction(req, &data_length, &data_dir);
1230 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1231 req->orb_pointer, unpacked_lun, data_length, data_dir);
1233 /* only used for printk until we do TMRs */
1234 req->se_cmd.tag = req->orb_pointer;
1235 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1236 req->sense_buf, unpacked_lun, data_length,
1237 TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
1238 goto err;
1240 return;
1242 err:
1243 req->status.status |= cpu_to_be32(
1244 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1245 STATUS_BLOCK_DEAD(0) |
1246 STATUS_BLOCK_LEN(1) |
1247 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1248 sbp_send_status(req);
1252 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1253 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1255 static int sbp_rw_data(struct sbp_target_request *req)
1257 struct sbp_session *sess = req->login->sess;
1258 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1259 generation, num_pte, length, tfr_length,
1260 rcode = RCODE_COMPLETE;
1261 struct sbp_page_table_entry *pte;
1262 unsigned long long offset;
1263 struct fw_card *card;
1264 struct sg_mapping_iter iter;
1266 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1267 tcode = TCODE_WRITE_BLOCK_REQUEST;
1268 sg_miter_flags = SG_MITER_FROM_SG;
1269 } else {
1270 tcode = TCODE_READ_BLOCK_REQUEST;
1271 sg_miter_flags = SG_MITER_TO_SG;
1274 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1275 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1277 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1278 if (pg_size) {
1279 pr_err("sbp_run_transaction: page size ignored\n");
1280 pg_size = 0x100 << pg_size;
1283 spin_lock_bh(&sess->lock);
1284 card = fw_card_get(sess->card);
1285 node_id = sess->node_id;
1286 generation = sess->generation;
1287 spin_unlock_bh(&sess->lock);
1289 if (req->pg_tbl) {
1290 pte = req->pg_tbl;
1291 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1293 offset = 0;
1294 length = 0;
1295 } else {
1296 pte = NULL;
1297 num_pte = 0;
1299 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1300 length = req->se_cmd.data_length;
1303 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1304 sg_miter_flags);
1306 while (length || num_pte) {
1307 if (!length) {
1308 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1309 be32_to_cpu(pte->segment_base_lo);
1310 length = be16_to_cpu(pte->segment_length);
1312 pte++;
1313 num_pte--;
1316 sg_miter_next(&iter);
1318 tfr_length = min3(length, max_payload, (int)iter.length);
1320 /* FIXME: take page_size into account */
1322 rcode = sbp_run_transaction(card, tcode, node_id,
1323 generation, speed,
1324 offset, iter.addr, tfr_length);
1326 if (rcode != RCODE_COMPLETE)
1327 break;
1329 length -= tfr_length;
1330 offset += tfr_length;
1331 iter.consumed = tfr_length;
1334 sg_miter_stop(&iter);
1335 fw_card_put(card);
1337 if (rcode == RCODE_COMPLETE) {
1338 WARN_ON(length != 0);
1339 return 0;
1340 } else {
1341 return -EIO;
1345 static int sbp_send_status(struct sbp_target_request *req)
1347 int rc, ret = 0, length;
1348 struct sbp_login_descriptor *login = req->login;
1350 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1352 rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1353 login->status_fifo_addr, &req->status, length);
1354 if (rc != RCODE_COMPLETE) {
1355 pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1356 ret = -EIO;
1357 goto put_ref;
1360 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1361 req->orb_pointer);
1363 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1364 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1365 * final se_cmd->cmd_kref put.
1367 put_ref:
1368 target_put_sess_cmd(&req->se_cmd);
1369 return ret;
1372 static void sbp_sense_mangle(struct sbp_target_request *req)
1374 struct se_cmd *se_cmd = &req->se_cmd;
1375 u8 *sense = req->sense_buf;
1376 u8 *status = req->status.data;
1378 WARN_ON(se_cmd->scsi_sense_length < 18);
1380 switch (sense[0] & 0x7f) { /* sfmt */
1381 case 0x70: /* current, fixed */
1382 status[0] = 0 << 6;
1383 break;
1384 case 0x71: /* deferred, fixed */
1385 status[0] = 1 << 6;
1386 break;
1387 case 0x72: /* current, descriptor */
1388 case 0x73: /* deferred, descriptor */
1389 default:
1391 * TODO: SBP-3 specifies what we should do with descriptor
1392 * format sense data
1394 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1395 sense[0]);
1396 req->status.status |= cpu_to_be32(
1397 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1398 STATUS_BLOCK_DEAD(0) |
1399 STATUS_BLOCK_LEN(1) |
1400 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1401 return;
1404 status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1405 status[1] =
1406 (sense[0] & 0x80) | /* valid */
1407 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
1408 (sense[2] & 0x0f); /* sense_key */
1409 status[2] = se_cmd->scsi_asc; /* sense_code */
1410 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
1412 /* information */
1413 status[4] = sense[3];
1414 status[5] = sense[4];
1415 status[6] = sense[5];
1416 status[7] = sense[6];
1418 /* CDB-dependent */
1419 status[8] = sense[8];
1420 status[9] = sense[9];
1421 status[10] = sense[10];
1422 status[11] = sense[11];
1424 /* fru */
1425 status[12] = sense[14];
1427 /* sense_key-dependent */
1428 status[13] = sense[15];
1429 status[14] = sense[16];
1430 status[15] = sense[17];
1432 req->status.status |= cpu_to_be32(
1433 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1434 STATUS_BLOCK_DEAD(0) |
1435 STATUS_BLOCK_LEN(5) |
1436 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1439 static int sbp_send_sense(struct sbp_target_request *req)
1441 struct se_cmd *se_cmd = &req->se_cmd;
1443 if (se_cmd->scsi_sense_length) {
1444 sbp_sense_mangle(req);
1445 } else {
1446 req->status.status |= cpu_to_be32(
1447 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1448 STATUS_BLOCK_DEAD(0) |
1449 STATUS_BLOCK_LEN(1) |
1450 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1453 return sbp_send_status(req);
1456 static void sbp_free_request(struct sbp_target_request *req)
1458 struct se_cmd *se_cmd = &req->se_cmd;
1459 struct se_session *se_sess = se_cmd->se_sess;
1461 kfree(req->pg_tbl);
1462 kfree(req->cmd_buf);
1464 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
1467 static void sbp_mgt_agent_process(struct work_struct *work)
1469 struct sbp_management_agent *agent =
1470 container_of(work, struct sbp_management_agent, work);
1471 struct sbp_management_request *req = agent->request;
1472 int ret;
1473 int status_data_len = 0;
1475 /* fetch the ORB from the initiator */
1476 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1477 req->node_addr, req->generation, req->speed,
1478 agent->orb_offset, &req->orb, sizeof(req->orb));
1479 if (ret != RCODE_COMPLETE) {
1480 pr_debug("mgt_orb fetch failed: %x\n", ret);
1481 goto out;
1484 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1485 sbp2_pointer_to_addr(&req->orb.ptr1),
1486 sbp2_pointer_to_addr(&req->orb.ptr2),
1487 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1488 sbp2_pointer_to_addr(&req->orb.status_fifo));
1490 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1491 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1492 pr_err("mgt_orb bad request\n");
1493 goto out;
1496 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1497 case MANAGEMENT_ORB_FUNCTION_LOGIN:
1498 sbp_management_request_login(agent, req, &status_data_len);
1499 break;
1501 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1502 sbp_management_request_query_logins(agent, req,
1503 &status_data_len);
1504 break;
1506 case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1507 sbp_management_request_reconnect(agent, req, &status_data_len);
1508 break;
1510 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1511 pr_notice("SET PASSWORD not implemented\n");
1513 req->status.status = cpu_to_be32(
1514 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1515 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1517 break;
1519 case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1520 sbp_management_request_logout(agent, req, &status_data_len);
1521 break;
1523 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1524 pr_notice("ABORT TASK not implemented\n");
1526 req->status.status = cpu_to_be32(
1527 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1528 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1530 break;
1532 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1533 pr_notice("ABORT TASK SET not implemented\n");
1535 req->status.status = cpu_to_be32(
1536 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1537 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1539 break;
1541 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1542 pr_notice("LOGICAL UNIT RESET not implemented\n");
1544 req->status.status = cpu_to_be32(
1545 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1546 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1548 break;
1550 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1551 pr_notice("TARGET RESET not implemented\n");
1553 req->status.status = cpu_to_be32(
1554 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1555 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1557 break;
1559 default:
1560 pr_notice("unknown management function 0x%x\n",
1561 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1563 req->status.status = cpu_to_be32(
1564 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1565 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1567 break;
1570 req->status.status |= cpu_to_be32(
1571 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1572 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1573 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1574 req->status.orb_low = cpu_to_be32(agent->orb_offset);
1576 /* write the status block back to the initiator */
1577 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1578 req->node_addr, req->generation, req->speed,
1579 sbp2_pointer_to_addr(&req->orb.status_fifo),
1580 &req->status, 8 + status_data_len);
1581 if (ret != RCODE_COMPLETE) {
1582 pr_debug("mgt_orb status write failed: %x\n", ret);
1583 goto out;
1586 out:
1587 fw_card_put(req->card);
1588 kfree(req);
1590 spin_lock_bh(&agent->lock);
1591 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1592 spin_unlock_bh(&agent->lock);
1595 static void sbp_mgt_agent_rw(struct fw_card *card,
1596 struct fw_request *request, int tcode, int destination, int source,
1597 int generation, unsigned long long offset, void *data, size_t length,
1598 void *callback_data)
1600 struct sbp_management_agent *agent = callback_data;
1601 struct sbp2_pointer *ptr = data;
1602 int rcode = RCODE_ADDRESS_ERROR;
1604 if (!agent->tport->enable)
1605 goto out;
1607 if ((offset != agent->handler.offset) || (length != 8))
1608 goto out;
1610 if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1611 struct sbp_management_request *req;
1612 int prev_state;
1614 spin_lock_bh(&agent->lock);
1615 prev_state = agent->state;
1616 agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1617 spin_unlock_bh(&agent->lock);
1619 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1620 pr_notice("ignoring management request while busy\n");
1621 rcode = RCODE_CONFLICT_ERROR;
1622 goto out;
1624 req = kzalloc(sizeof(*req), GFP_ATOMIC);
1625 if (!req) {
1626 rcode = RCODE_CONFLICT_ERROR;
1627 goto out;
1630 req->card = fw_card_get(card);
1631 req->generation = generation;
1632 req->node_addr = source;
1633 req->speed = fw_get_request_speed(request);
1635 agent->orb_offset = sbp2_pointer_to_addr(ptr);
1636 agent->request = req;
1638 queue_work(system_unbound_wq, &agent->work);
1639 rcode = RCODE_COMPLETE;
1640 } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1641 addr_to_sbp2_pointer(agent->orb_offset, ptr);
1642 rcode = RCODE_COMPLETE;
1643 } else {
1644 rcode = RCODE_TYPE_ERROR;
1647 out:
1648 fw_send_response(card, request, rcode);
1651 static struct sbp_management_agent *sbp_management_agent_register(
1652 struct sbp_tport *tport)
1654 int ret;
1655 struct sbp_management_agent *agent;
1657 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1658 if (!agent)
1659 return ERR_PTR(-ENOMEM);
1661 spin_lock_init(&agent->lock);
1662 agent->tport = tport;
1663 agent->handler.length = 0x08;
1664 agent->handler.address_callback = sbp_mgt_agent_rw;
1665 agent->handler.callback_data = agent;
1666 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1667 INIT_WORK(&agent->work, sbp_mgt_agent_process);
1668 agent->orb_offset = 0;
1669 agent->request = NULL;
1671 ret = fw_core_add_address_handler(&agent->handler,
1672 &sbp_register_region);
1673 if (ret < 0) {
1674 kfree(agent);
1675 return ERR_PTR(ret);
1678 return agent;
1681 static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1683 fw_core_remove_address_handler(&agent->handler);
1684 cancel_work_sync(&agent->work);
1685 kfree(agent);
1688 static int sbp_check_true(struct se_portal_group *se_tpg)
1690 return 1;
1693 static int sbp_check_false(struct se_portal_group *se_tpg)
1695 return 0;
1698 static char *sbp_get_fabric_name(void)
1700 return "sbp";
1703 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1705 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1706 struct sbp_tport *tport = tpg->tport;
1708 return &tport->tport_name[0];
1711 static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1713 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1714 return tpg->tport_tpgt;
1717 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1719 return 1;
1722 static void sbp_release_cmd(struct se_cmd *se_cmd)
1724 struct sbp_target_request *req = container_of(se_cmd,
1725 struct sbp_target_request, se_cmd);
1727 sbp_free_request(req);
1730 static u32 sbp_sess_get_index(struct se_session *se_sess)
1732 return 0;
1735 static int sbp_write_pending(struct se_cmd *se_cmd)
1737 struct sbp_target_request *req = container_of(se_cmd,
1738 struct sbp_target_request, se_cmd);
1739 int ret;
1741 ret = sbp_rw_data(req);
1742 if (ret) {
1743 req->status.status |= cpu_to_be32(
1744 STATUS_BLOCK_RESP(
1745 STATUS_RESP_TRANSPORT_FAILURE) |
1746 STATUS_BLOCK_DEAD(0) |
1747 STATUS_BLOCK_LEN(1) |
1748 STATUS_BLOCK_SBP_STATUS(
1749 SBP_STATUS_UNSPECIFIED_ERROR));
1750 sbp_send_status(req);
1751 return ret;
1754 target_execute_cmd(se_cmd);
1755 return 0;
1758 static int sbp_write_pending_status(struct se_cmd *se_cmd)
1760 return 0;
1763 static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1765 return;
1768 static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1770 return 0;
1773 static int sbp_queue_data_in(struct se_cmd *se_cmd)
1775 struct sbp_target_request *req = container_of(se_cmd,
1776 struct sbp_target_request, se_cmd);
1777 int ret;
1779 ret = sbp_rw_data(req);
1780 if (ret) {
1781 req->status.status |= cpu_to_be32(
1782 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1783 STATUS_BLOCK_DEAD(0) |
1784 STATUS_BLOCK_LEN(1) |
1785 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1786 sbp_send_status(req);
1787 return ret;
1790 return sbp_send_sense(req);
1794 * Called after command (no data transfer) or after the write (to device)
1795 * operation is completed
1797 static int sbp_queue_status(struct se_cmd *se_cmd)
1799 struct sbp_target_request *req = container_of(se_cmd,
1800 struct sbp_target_request, se_cmd);
1802 return sbp_send_sense(req);
1805 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1809 static void sbp_aborted_task(struct se_cmd *se_cmd)
1811 return;
1814 static int sbp_check_stop_free(struct se_cmd *se_cmd)
1816 struct sbp_target_request *req = container_of(se_cmd,
1817 struct sbp_target_request, se_cmd);
1819 return transport_generic_free_cmd(&req->se_cmd, 0);
1822 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1824 struct se_lun *lun;
1825 int count = 0;
1827 rcu_read_lock();
1828 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1829 count++;
1830 rcu_read_unlock();
1832 return count;
1835 static int sbp_update_unit_directory(struct sbp_tport *tport)
1837 struct se_lun *lun;
1838 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1839 u32 *data;
1841 if (tport->unit_directory.data) {
1842 fw_core_remove_descriptor(&tport->unit_directory);
1843 kfree(tport->unit_directory.data);
1844 tport->unit_directory.data = NULL;
1847 if (!tport->enable || !tport->tpg)
1848 return 0;
1850 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1853 * Number of entries in the final unit directory:
1854 * - all of those in the template
1855 * - management_agent
1856 * - unit_characteristics
1857 * - reconnect_timeout
1858 * - unit unique ID
1859 * - one for each LUN
1861 * MUST NOT include leaf or sub-directory entries
1863 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1865 if (tport->directory_id != -1)
1866 num_entries++;
1868 /* allocate num_entries + 4 for the header and unique ID leaf */
1869 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1870 if (!data)
1871 return -ENOMEM;
1873 /* directory_length */
1874 data[idx++] = num_entries << 16;
1876 /* directory_id */
1877 if (tport->directory_id != -1)
1878 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1880 /* unit directory template */
1881 memcpy(&data[idx], sbp_unit_directory_template,
1882 sizeof(sbp_unit_directory_template));
1883 idx += ARRAY_SIZE(sbp_unit_directory_template);
1885 /* management_agent */
1886 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1887 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1889 /* unit_characteristics */
1890 data[idx++] = 0x3a000000 |
1891 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1892 SBP_ORB_FETCH_SIZE;
1894 /* reconnect_timeout */
1895 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1897 /* unit unique ID (leaf is just after LUNs) */
1898 data[idx++] = 0x8d000000 | (num_luns + 1);
1900 rcu_read_lock();
1901 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1902 struct se_device *dev;
1903 int type;
1905 * rcu_dereference_raw protected by se_lun->lun_group symlink
1906 * reference to se_device->dev_group.
1908 dev = rcu_dereference_raw(lun->lun_se_dev);
1909 type = dev->transport->get_device_type(dev);
1911 /* logical_unit_number */
1912 data[idx++] = 0x14000000 |
1913 ((type << 16) & 0x1f0000) |
1914 (lun->unpacked_lun & 0xffff);
1916 rcu_read_unlock();
1918 /* unit unique ID leaf */
1919 data[idx++] = 2 << 16;
1920 data[idx++] = tport->guid >> 32;
1921 data[idx++] = tport->guid;
1923 tport->unit_directory.length = idx;
1924 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1925 tport->unit_directory.data = data;
1927 ret = fw_core_add_descriptor(&tport->unit_directory);
1928 if (ret < 0) {
1929 kfree(tport->unit_directory.data);
1930 tport->unit_directory.data = NULL;
1933 return ret;
1936 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1938 const char *cp;
1939 char c, nibble;
1940 int pos = 0, err;
1942 *wwn = 0;
1943 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1944 c = *cp;
1945 if (c == '\n' && cp[1] == '\0')
1946 continue;
1947 if (c == '\0') {
1948 err = 2;
1949 if (pos != 16)
1950 goto fail;
1951 return cp - name;
1953 err = 3;
1954 if (isdigit(c))
1955 nibble = c - '0';
1956 else if (isxdigit(c))
1957 nibble = tolower(c) - 'a' + 10;
1958 else
1959 goto fail;
1960 *wwn = (*wwn << 4) | nibble;
1961 pos++;
1963 err = 4;
1964 fail:
1965 printk(KERN_INFO "err %u len %zu pos %u\n",
1966 err, cp - name, pos);
1967 return -1;
1970 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1972 return snprintf(buf, len, "%016llx", wwn);
1975 static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1977 u64 guid = 0;
1979 if (sbp_parse_wwn(name, &guid) < 0)
1980 return -EINVAL;
1981 return 0;
1984 static int sbp_post_link_lun(
1985 struct se_portal_group *se_tpg,
1986 struct se_lun *se_lun)
1988 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1990 return sbp_update_unit_directory(tpg->tport);
1993 static void sbp_pre_unlink_lun(
1994 struct se_portal_group *se_tpg,
1995 struct se_lun *se_lun)
1997 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1998 struct sbp_tport *tport = tpg->tport;
1999 int ret;
2001 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2002 tport->enable = 0;
2004 ret = sbp_update_unit_directory(tport);
2005 if (ret < 0)
2006 pr_err("unlink LUN: failed to update unit directory\n");
2009 static struct se_portal_group *sbp_make_tpg(
2010 struct se_wwn *wwn,
2011 struct config_group *group,
2012 const char *name)
2014 struct sbp_tport *tport =
2015 container_of(wwn, struct sbp_tport, tport_wwn);
2017 struct sbp_tpg *tpg;
2018 unsigned long tpgt;
2019 int ret;
2021 if (strstr(name, "tpgt_") != name)
2022 return ERR_PTR(-EINVAL);
2023 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2024 return ERR_PTR(-EINVAL);
2026 if (tport->tpg) {
2027 pr_err("Only one TPG per Unit is possible.\n");
2028 return ERR_PTR(-EBUSY);
2031 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2032 if (!tpg) {
2033 pr_err("Unable to allocate struct sbp_tpg\n");
2034 return ERR_PTR(-ENOMEM);
2037 tpg->tport = tport;
2038 tpg->tport_tpgt = tpgt;
2039 tport->tpg = tpg;
2041 /* default attribute values */
2042 tport->enable = 0;
2043 tport->directory_id = -1;
2044 tport->mgt_orb_timeout = 15;
2045 tport->max_reconnect_timeout = 5;
2046 tport->max_logins_per_lun = 1;
2048 tport->mgt_agt = sbp_management_agent_register(tport);
2049 if (IS_ERR(tport->mgt_agt)) {
2050 ret = PTR_ERR(tport->mgt_agt);
2051 goto out_free_tpg;
2054 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2055 if (ret < 0)
2056 goto out_unreg_mgt_agt;
2058 return &tpg->se_tpg;
2060 out_unreg_mgt_agt:
2061 sbp_management_agent_unregister(tport->mgt_agt);
2062 out_free_tpg:
2063 tport->tpg = NULL;
2064 kfree(tpg);
2065 return ERR_PTR(ret);
2068 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2070 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2071 struct sbp_tport *tport = tpg->tport;
2073 core_tpg_deregister(se_tpg);
2074 sbp_management_agent_unregister(tport->mgt_agt);
2075 tport->tpg = NULL;
2076 kfree(tpg);
2079 static struct se_wwn *sbp_make_tport(
2080 struct target_fabric_configfs *tf,
2081 struct config_group *group,
2082 const char *name)
2084 struct sbp_tport *tport;
2085 u64 guid = 0;
2087 if (sbp_parse_wwn(name, &guid) < 0)
2088 return ERR_PTR(-EINVAL);
2090 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2091 if (!tport) {
2092 pr_err("Unable to allocate struct sbp_tport\n");
2093 return ERR_PTR(-ENOMEM);
2096 tport->guid = guid;
2097 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2099 return &tport->tport_wwn;
2102 static void sbp_drop_tport(struct se_wwn *wwn)
2104 struct sbp_tport *tport =
2105 container_of(wwn, struct sbp_tport, tport_wwn);
2107 kfree(tport);
2110 static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2112 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2115 CONFIGFS_ATTR_RO(sbp_wwn_, version);
2117 static struct configfs_attribute *sbp_wwn_attrs[] = {
2118 &sbp_wwn_attr_version,
2119 NULL,
2122 static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2124 struct se_portal_group *se_tpg = to_tpg(item);
2125 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2126 struct sbp_tport *tport = tpg->tport;
2128 if (tport->directory_id == -1)
2129 return sprintf(page, "implicit\n");
2130 else
2131 return sprintf(page, "%06x\n", tport->directory_id);
2134 static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2135 const char *page, size_t count)
2137 struct se_portal_group *se_tpg = to_tpg(item);
2138 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2139 struct sbp_tport *tport = tpg->tport;
2140 unsigned long val;
2142 if (tport->enable) {
2143 pr_err("Cannot change the directory_id on an active target.\n");
2144 return -EBUSY;
2147 if (strstr(page, "implicit") == page) {
2148 tport->directory_id = -1;
2149 } else {
2150 if (kstrtoul(page, 16, &val) < 0)
2151 return -EINVAL;
2152 if (val > 0xffffff)
2153 return -EINVAL;
2155 tport->directory_id = val;
2158 return count;
2161 static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2163 struct se_portal_group *se_tpg = to_tpg(item);
2164 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2165 struct sbp_tport *tport = tpg->tport;
2166 return sprintf(page, "%d\n", tport->enable);
2169 static ssize_t sbp_tpg_enable_store(struct config_item *item,
2170 const char *page, size_t count)
2172 struct se_portal_group *se_tpg = to_tpg(item);
2173 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2174 struct sbp_tport *tport = tpg->tport;
2175 unsigned long val;
2176 int ret;
2178 if (kstrtoul(page, 0, &val) < 0)
2179 return -EINVAL;
2180 if ((val != 0) && (val != 1))
2181 return -EINVAL;
2183 if (tport->enable == val)
2184 return count;
2186 if (val) {
2187 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2188 pr_err("Cannot enable a target with no LUNs!\n");
2189 return -EINVAL;
2191 } else {
2192 /* XXX: force-shutdown sessions instead? */
2193 spin_lock_bh(&se_tpg->session_lock);
2194 if (!list_empty(&se_tpg->tpg_sess_list)) {
2195 spin_unlock_bh(&se_tpg->session_lock);
2196 return -EBUSY;
2198 spin_unlock_bh(&se_tpg->session_lock);
2201 tport->enable = val;
2203 ret = sbp_update_unit_directory(tport);
2204 if (ret < 0) {
2205 pr_err("Could not update Config ROM\n");
2206 return ret;
2209 return count;
2212 CONFIGFS_ATTR(sbp_tpg_, directory_id);
2213 CONFIGFS_ATTR(sbp_tpg_, enable);
2215 static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2216 &sbp_tpg_attr_directory_id,
2217 &sbp_tpg_attr_enable,
2218 NULL,
2221 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2222 char *page)
2224 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2225 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2226 struct sbp_tport *tport = tpg->tport;
2227 return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2230 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2231 const char *page, size_t count)
2233 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2234 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2235 struct sbp_tport *tport = tpg->tport;
2236 unsigned long val;
2237 int ret;
2239 if (kstrtoul(page, 0, &val) < 0)
2240 return -EINVAL;
2241 if ((val < 1) || (val > 127))
2242 return -EINVAL;
2244 if (tport->mgt_orb_timeout == val)
2245 return count;
2247 tport->mgt_orb_timeout = val;
2249 ret = sbp_update_unit_directory(tport);
2250 if (ret < 0)
2251 return ret;
2253 return count;
2256 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2257 char *page)
2259 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2260 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2261 struct sbp_tport *tport = tpg->tport;
2262 return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2265 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2266 const char *page, size_t count)
2268 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2269 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2270 struct sbp_tport *tport = tpg->tport;
2271 unsigned long val;
2272 int ret;
2274 if (kstrtoul(page, 0, &val) < 0)
2275 return -EINVAL;
2276 if ((val < 1) || (val > 32767))
2277 return -EINVAL;
2279 if (tport->max_reconnect_timeout == val)
2280 return count;
2282 tport->max_reconnect_timeout = val;
2284 ret = sbp_update_unit_directory(tport);
2285 if (ret < 0)
2286 return ret;
2288 return count;
2291 static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2292 char *page)
2294 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2295 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2296 struct sbp_tport *tport = tpg->tport;
2297 return sprintf(page, "%d\n", tport->max_logins_per_lun);
2300 static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2301 const char *page, size_t count)
2303 struct se_portal_group *se_tpg = attrib_to_tpg(item);
2304 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2305 struct sbp_tport *tport = tpg->tport;
2306 unsigned long val;
2308 if (kstrtoul(page, 0, &val) < 0)
2309 return -EINVAL;
2310 if ((val < 1) || (val > 127))
2311 return -EINVAL;
2313 /* XXX: also check against current count? */
2315 tport->max_logins_per_lun = val;
2317 return count;
2320 CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2321 CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2322 CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2324 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2325 &sbp_tpg_attrib_attr_mgt_orb_timeout,
2326 &sbp_tpg_attrib_attr_max_reconnect_timeout,
2327 &sbp_tpg_attrib_attr_max_logins_per_lun,
2328 NULL,
2331 static const struct target_core_fabric_ops sbp_ops = {
2332 .module = THIS_MODULE,
2333 .name = "sbp",
2334 .get_fabric_name = sbp_get_fabric_name,
2335 .tpg_get_wwn = sbp_get_fabric_wwn,
2336 .tpg_get_tag = sbp_get_tag,
2337 .tpg_check_demo_mode = sbp_check_true,
2338 .tpg_check_demo_mode_cache = sbp_check_true,
2339 .tpg_check_demo_mode_write_protect = sbp_check_false,
2340 .tpg_check_prod_mode_write_protect = sbp_check_false,
2341 .tpg_get_inst_index = sbp_tpg_get_inst_index,
2342 .release_cmd = sbp_release_cmd,
2343 .sess_get_index = sbp_sess_get_index,
2344 .write_pending = sbp_write_pending,
2345 .write_pending_status = sbp_write_pending_status,
2346 .set_default_node_attributes = sbp_set_default_node_attrs,
2347 .get_cmd_state = sbp_get_cmd_state,
2348 .queue_data_in = sbp_queue_data_in,
2349 .queue_status = sbp_queue_status,
2350 .queue_tm_rsp = sbp_queue_tm_rsp,
2351 .aborted_task = sbp_aborted_task,
2352 .check_stop_free = sbp_check_stop_free,
2354 .fabric_make_wwn = sbp_make_tport,
2355 .fabric_drop_wwn = sbp_drop_tport,
2356 .fabric_make_tpg = sbp_make_tpg,
2357 .fabric_drop_tpg = sbp_drop_tpg,
2358 .fabric_post_link = sbp_post_link_lun,
2359 .fabric_pre_unlink = sbp_pre_unlink_lun,
2360 .fabric_make_np = NULL,
2361 .fabric_drop_np = NULL,
2362 .fabric_init_nodeacl = sbp_init_nodeacl,
2364 .tfc_wwn_attrs = sbp_wwn_attrs,
2365 .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
2366 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
2369 static int __init sbp_init(void)
2371 return target_register_template(&sbp_ops);
2374 static void __exit sbp_exit(void)
2376 target_unregister_template(&sbp_ops);
2379 MODULE_DESCRIPTION("FireWire SBP fabric driver");
2380 MODULE_LICENSE("GPL");
2381 module_init(sbp_init);
2382 module_exit(sbp_exit);