1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * IBM Virtual SCSI Target Driver
4 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
5 * Santiago Leon (santil@us.ibm.com) IBM Corp.
6 * Linda Xie (lxie@us.ibm.com) IBM Corp.
8 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
9 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
11 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
12 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
14 ****************************************************************************/
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/list.h>
23 #include <linux/string.h>
24 #include <linux/delay.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
30 #include <asm/hvcall.h>
33 #include <scsi/viosrp.h>
35 #include "ibmvscsi_tgt.h"
37 #define IBMVSCSIS_VERSION "v0.2"
39 #define INITIAL_SRP_LIMIT 1024
40 #define DEFAULT_MAX_SECTORS 256
41 #define MAX_TXU 1024 * 1024
43 static uint max_vdma_size
= MAX_H_COPY_RDMA
;
45 static char system_id
[SYS_ID_NAME_LEN
] = "";
46 static char partition_name
[PARTITION_NAMELEN
] = "UNKNOWN";
47 static uint partition_number
= -1;
49 /* Adapter list and lock to control it */
50 static DEFINE_SPINLOCK(ibmvscsis_dev_lock
);
51 static LIST_HEAD(ibmvscsis_dev_list
);
53 static long ibmvscsis_parse_command(struct scsi_info
*vscsi
,
54 struct viosrp_crq
*crq
);
56 static void ibmvscsis_adapter_idle(struct scsi_info
*vscsi
);
58 static void ibmvscsis_determine_resid(struct se_cmd
*se_cmd
,
61 u32 residual_count
= se_cmd
->residual_count
;
66 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
67 if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
68 /* residual data from an underflow write */
69 rsp
->flags
= SRP_RSP_FLAG_DOUNDER
;
70 rsp
->data_out_res_cnt
= cpu_to_be32(residual_count
);
71 } else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
) {
72 /* residual data from an underflow read */
73 rsp
->flags
= SRP_RSP_FLAG_DIUNDER
;
74 rsp
->data_in_res_cnt
= cpu_to_be32(residual_count
);
76 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
77 if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
78 /* residual data from an overflow write */
79 rsp
->flags
= SRP_RSP_FLAG_DOOVER
;
80 rsp
->data_out_res_cnt
= cpu_to_be32(residual_count
);
81 } else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
) {
82 /* residual data from an overflow read */
83 rsp
->flags
= SRP_RSP_FLAG_DIOVER
;
84 rsp
->data_in_res_cnt
= cpu_to_be32(residual_count
);
90 * connection_broken() - Determine if the connection to the client is good
91 * @vscsi: Pointer to our adapter structure
93 * This function attempts to send a ping MAD to the client. If the call to
94 * queue the request returns H_CLOSED then the connection has been broken
95 * and the function returns TRUE.
97 * EXECUTION ENVIRONMENT:
98 * Interrupt or Process environment
100 static bool connection_broken(struct scsi_info
*vscsi
)
102 struct viosrp_crq
*crq
;
103 u64 buffer
[2] = { 0, 0 };
107 /* create a PING crq */
108 crq
= (struct viosrp_crq
*)&buffer
;
109 crq
->valid
= VALID_CMD_RESP_EL
;
110 crq
->format
= MESSAGE_IN_CRQ
;
113 h_return_code
= h_send_crq(vscsi
->dds
.unit_id
,
114 cpu_to_be64(buffer
[MSG_HI
]),
115 cpu_to_be64(buffer
[MSG_LOW
]));
117 dev_dbg(&vscsi
->dev
, "Connection_broken: rc %ld\n", h_return_code
);
119 if (h_return_code
== H_CLOSED
)
126 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
127 * @vscsi: Pointer to our adapter structure
129 * This function calls h_free_q then frees the interrupt bit etc.
130 * It must release the lock before doing so because of the time it can take
131 * for h_free_crq in PHYP
132 * NOTE: * the caller must make sure that state and or flags will prevent
133 * interrupt handler from scheduling work.
134 * * anyone calling this function may need to set the CRQ_CLOSED flag
135 * we can't do it here, because we don't have the lock
137 * EXECUTION ENVIRONMENT:
140 static long ibmvscsis_unregister_command_q(struct scsi_info
*vscsi
)
143 long rc
= ADAPT_SUCCESS
;
147 qrc
= h_free_crq(vscsi
->dds
.unit_id
);
150 spin_lock_bh(&vscsi
->intr_lock
);
151 vscsi
->flags
&= ~PREP_FOR_SUSPEND_FLAGS
;
152 spin_unlock_bh(&vscsi
->intr_lock
);
157 dev_err(&vscsi
->dev
, "unregister_command_q: error from h_free_crq %ld\n",
163 case H_LONG_BUSY_ORDER_1_MSEC
:
164 /* msleep not good for small values */
165 usleep_range(1000, 2000);
168 case H_LONG_BUSY_ORDER_10_MSEC
:
169 usleep_range(10000, 20000);
172 case H_LONG_BUSY_ORDER_100_MSEC
:
176 case H_LONG_BUSY_ORDER_1_SEC
:
180 case H_LONG_BUSY_ORDER_10_SEC
:
184 case H_LONG_BUSY_ORDER_100_SEC
:
189 dev_err(&vscsi
->dev
, "unregister_command_q: unknown error %ld from h_free_crq\n",
196 * dont wait more then 300 seconds
197 * ticks are in milliseconds more or less
199 if (ticks
> 300000 && qrc
!= H_SUCCESS
) {
201 dev_err(&vscsi
->dev
, "Excessive wait for h_free_crq\n");
203 } while (qrc
!= H_SUCCESS
&& rc
== ADAPT_SUCCESS
);
205 dev_dbg(&vscsi
->dev
, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc
, rc
);
211 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
212 * @vscsi: Pointer to our adapter structure
213 * @client_closed: True if client closed its queue
215 * Deletes information specific to the client when the client goes away
217 * EXECUTION ENVIRONMENT:
218 * Interrupt or Process
220 static void ibmvscsis_delete_client_info(struct scsi_info
*vscsi
,
223 vscsi
->client_cap
= 0;
226 * Some things we don't want to clear if we're closing the queue,
227 * because some clients don't resend the host handshake when they
228 * get a transport event.
231 vscsi
->client_data
.os_type
= 0;
235 * ibmvscsis_free_command_q() - Free Command Queue
236 * @vscsi: Pointer to our adapter structure
238 * This function calls unregister_command_q, then clears interrupts and
239 * any pending interrupt acknowledgments associated with the command q.
240 * It also clears memory if there is no error.
242 * PHYP did not meet the PAPR architecture so that we must give up the
243 * lock. This causes a timing hole regarding state change. To close the
244 * hole this routine does accounting on any change that occurred during
245 * the time the lock is not held.
246 * NOTE: must give up and then acquire the interrupt lock, the caller must
247 * make sure that state and or flags will prevent interrupt handler from
250 * EXECUTION ENVIRONMENT:
251 * Process level, interrupt lock is held
253 static long ibmvscsis_free_command_q(struct scsi_info
*vscsi
)
256 u32 flags_under_lock
;
257 u16 state_under_lock
;
258 long rc
= ADAPT_SUCCESS
;
260 if (!(vscsi
->flags
& CRQ_CLOSED
)) {
261 vio_disable_interrupts(vscsi
->dma_dev
);
263 state_under_lock
= vscsi
->new_state
;
264 flags_under_lock
= vscsi
->flags
;
265 vscsi
->phyp_acr_state
= 0;
266 vscsi
->phyp_acr_flags
= 0;
268 spin_unlock_bh(&vscsi
->intr_lock
);
269 rc
= ibmvscsis_unregister_command_q(vscsi
);
270 spin_lock_bh(&vscsi
->intr_lock
);
272 if (state_under_lock
!= vscsi
->new_state
)
273 vscsi
->phyp_acr_state
= vscsi
->new_state
;
275 vscsi
->phyp_acr_flags
= ((~flags_under_lock
) & vscsi
->flags
);
277 if (rc
== ADAPT_SUCCESS
) {
278 bytes
= vscsi
->cmd_q
.size
* PAGE_SIZE
;
279 memset(vscsi
->cmd_q
.base_addr
, 0, bytes
);
280 vscsi
->cmd_q
.index
= 0;
281 vscsi
->flags
|= CRQ_CLOSED
;
283 ibmvscsis_delete_client_info(vscsi
, false);
286 dev_dbg(&vscsi
->dev
, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
287 vscsi
->flags
, vscsi
->state
, vscsi
->phyp_acr_flags
,
288 vscsi
->phyp_acr_state
);
294 * ibmvscsis_cmd_q_dequeue() - Get valid Command element
295 * @mask: Mask to use in case index wraps
296 * @current_index: Current index into command queue
297 * @base_addr: Pointer to start of command queue
299 * Returns a pointer to a valid command element or NULL, if the command
302 * EXECUTION ENVIRONMENT:
303 * Interrupt environment, interrupt lock held
305 static struct viosrp_crq
*ibmvscsis_cmd_q_dequeue(uint mask
,
307 struct viosrp_crq
*base_addr
)
309 struct viosrp_crq
*ptr
;
311 ptr
= base_addr
+ *current_index
;
314 *current_index
= (*current_index
+ 1) & mask
;
324 * ibmvscsis_send_init_message() - send initialize message to the client
325 * @vscsi: Pointer to our adapter structure
326 * @format: Which Init Message format to send
328 * EXECUTION ENVIRONMENT:
329 * Interrupt environment interrupt lock held
331 static long ibmvscsis_send_init_message(struct scsi_info
*vscsi
, u8 format
)
333 struct viosrp_crq
*crq
;
334 u64 buffer
[2] = { 0, 0 };
337 crq
= (struct viosrp_crq
*)&buffer
;
338 crq
->valid
= VALID_INIT_MSG
;
339 crq
->format
= format
;
340 rc
= h_send_crq(vscsi
->dds
.unit_id
, cpu_to_be64(buffer
[MSG_HI
]),
341 cpu_to_be64(buffer
[MSG_LOW
]));
347 * ibmvscsis_check_init_msg() - Check init message valid
348 * @vscsi: Pointer to our adapter structure
349 * @format: Pointer to return format of Init Message, if any.
350 * Set to UNUSED_FORMAT if no Init Message in queue.
352 * Checks if an initialize message was queued by the initiatior
353 * after the queue was created and before the interrupt was enabled.
355 * EXECUTION ENVIRONMENT:
356 * Process level only, interrupt lock held
358 static long ibmvscsis_check_init_msg(struct scsi_info
*vscsi
, uint
*format
)
360 struct viosrp_crq
*crq
;
361 long rc
= ADAPT_SUCCESS
;
363 crq
= ibmvscsis_cmd_q_dequeue(vscsi
->cmd_q
.mask
, &vscsi
->cmd_q
.index
,
364 vscsi
->cmd_q
.base_addr
);
366 *format
= (uint
)UNUSED_FORMAT
;
367 } else if (crq
->valid
== VALID_INIT_MSG
&& crq
->format
== INIT_MSG
) {
368 *format
= (uint
)INIT_MSG
;
369 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
373 * the caller has ensured no initialize message was
374 * sent after the queue was
375 * created so there should be no other message on the queue.
377 crq
= ibmvscsis_cmd_q_dequeue(vscsi
->cmd_q
.mask
,
379 vscsi
->cmd_q
.base_addr
);
381 *format
= (uint
)(crq
->format
);
383 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
387 *format
= (uint
)(crq
->format
);
389 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
397 * ibmvscsis_disconnect() - Helper function to disconnect
398 * @work: Pointer to work_struct, gives access to our adapter structure
400 * An error has occurred or the driver received a Transport event,
401 * and the driver is requesting that the command queue be de-registered
402 * in a safe manner. If there is no outstanding I/O then we can stop the
403 * queue. If we are restarting the queue it will be reflected in the
404 * the state of the adapter.
406 * EXECUTION ENVIRONMENT:
407 * Process environment
409 static void ibmvscsis_disconnect(struct work_struct
*work
)
411 struct scsi_info
*vscsi
= container_of(work
, struct scsi_info
,
414 bool wait_idle
= false;
416 spin_lock_bh(&vscsi
->intr_lock
);
417 new_state
= vscsi
->new_state
;
418 vscsi
->new_state
= 0;
420 vscsi
->flags
|= DISCONNECT_SCHEDULED
;
421 vscsi
->flags
&= ~SCHEDULE_DISCONNECT
;
423 dev_dbg(&vscsi
->dev
, "disconnect: flags 0x%x, state 0x%hx\n",
424 vscsi
->flags
, vscsi
->state
);
427 * check which state we are in and see if we
428 * should transitition to the new state
430 switch (vscsi
->state
) {
431 /* Should never be called while in this state. */
434 * Can never transition from this state;
435 * igonore errors and logout.
440 /* can transition from this state to UNCONFIGURING */
442 if (new_state
== UNCONFIGURING
)
443 vscsi
->state
= new_state
;
447 * Can transition from this state to unconfiguring
450 case ERR_DISCONNECT_RECONNECT
:
454 vscsi
->state
= new_state
;
464 /* can transition from this state to UNCONFIGURING */
465 case ERR_DISCONNECTED
:
466 if (new_state
== UNCONFIGURING
)
467 vscsi
->state
= new_state
;
473 vscsi
->state
= new_state
;
474 vscsi
->flags
|= RESPONSE_Q_DOWN
;
475 vscsi
->flags
&= ~(SCHEDULE_DISCONNECT
|
476 DISCONNECT_SCHEDULED
);
478 if (vscsi
->flags
& CFG_SLEEPING
) {
479 vscsi
->flags
&= ~CFG_SLEEPING
;
480 complete(&vscsi
->unconfig
);
484 /* should never happen */
486 case ERR_DISCONNECT_RECONNECT
:
488 dev_err(&vscsi
->dev
, "disconnect: invalid state %d for WAIT_IDLE\n",
497 vscsi
->flags
|= RESPONSE_Q_DOWN
;
498 vscsi
->state
= new_state
;
499 vscsi
->flags
&= ~(SCHEDULE_DISCONNECT
|
500 DISCONNECT_SCHEDULED
);
501 ibmvscsis_free_command_q(vscsi
);
504 case ERR_DISCONNECT_RECONNECT
:
505 vscsi
->state
= new_state
;
511 * Initiator has not done a successful srp login
512 * or has done a successful srp logout ( adapter was not
513 * busy). In the first case there can be responses queued
514 * waiting for space on the initiators response queue (MAD)
515 * The second case the adapter is idle. Assume the worse case,
516 * i.e. the second case.
518 case WAIT_CONNECTION
:
522 vscsi
->state
= new_state
;
525 /* can transition from this state to UNCONFIGURING */
527 if (new_state
== UNCONFIGURING
)
528 vscsi
->state
= new_state
;
535 dev_dbg(&vscsi
->dev
, "disconnect start wait, active %d, sched %d\n",
536 (int)list_empty(&vscsi
->active_q
),
537 (int)list_empty(&vscsi
->schedule_q
));
538 if (!list_empty(&vscsi
->active_q
) ||
539 !list_empty(&vscsi
->schedule_q
)) {
540 vscsi
->flags
|= WAIT_FOR_IDLE
;
541 dev_dbg(&vscsi
->dev
, "disconnect flags 0x%x\n",
544 * This routine is can not be called with the interrupt
547 spin_unlock_bh(&vscsi
->intr_lock
);
548 wait_for_completion(&vscsi
->wait_idle
);
549 spin_lock_bh(&vscsi
->intr_lock
);
551 dev_dbg(&vscsi
->dev
, "disconnect stop wait\n");
553 ibmvscsis_adapter_idle(vscsi
);
556 spin_unlock_bh(&vscsi
->intr_lock
);
560 * ibmvscsis_post_disconnect() - Schedule the disconnect
561 * @vscsi: Pointer to our adapter structure
562 * @new_state: State to move to after disconnecting
563 * @flag_bits: Flags to turn on in adapter structure
565 * If it's already been scheduled, then see if we need to "upgrade"
566 * the new state (if the one passed in is more "severe" than the
570 * interrupt lock is held
572 static void ibmvscsis_post_disconnect(struct scsi_info
*vscsi
, uint new_state
,
577 /* check the validity of the new state */
581 case ERR_DISCONNECT_RECONNECT
:
586 dev_err(&vscsi
->dev
, "post_disconnect: Invalid new state %d\n",
591 vscsi
->flags
|= flag_bits
;
593 dev_dbg(&vscsi
->dev
, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
594 new_state
, flag_bits
, vscsi
->flags
, vscsi
->state
);
596 if (!(vscsi
->flags
& (DISCONNECT_SCHEDULED
| SCHEDULE_DISCONNECT
))) {
597 vscsi
->flags
|= SCHEDULE_DISCONNECT
;
598 vscsi
->new_state
= new_state
;
600 INIT_WORK(&vscsi
->proc_work
, ibmvscsis_disconnect
);
601 (void)queue_work(vscsi
->work_q
, &vscsi
->proc_work
);
603 if (vscsi
->new_state
)
604 state
= vscsi
->new_state
;
606 state
= vscsi
->state
;
613 case ERR_DISCONNECTED
:
616 if (new_state
== UNCONFIGURING
)
617 vscsi
->new_state
= new_state
;
620 case ERR_DISCONNECT_RECONNECT
:
624 vscsi
->new_state
= new_state
;
633 case WAIT_CONNECTION
:
636 vscsi
->new_state
= new_state
;
644 dev_dbg(&vscsi
->dev
, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
645 vscsi
->flags
, vscsi
->new_state
);
649 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
650 * @vscsi: Pointer to our adapter structure
652 * Must be called with interrupt lock held.
654 static long ibmvscsis_handle_init_compl_msg(struct scsi_info
*vscsi
)
656 long rc
= ADAPT_SUCCESS
;
658 switch (vscsi
->state
) {
661 case ERR_DISCONNECT_RECONNECT
:
662 case ERR_DISCONNECTED
:
668 case WAIT_CONNECTION
:
669 vscsi
->state
= CONNECTED
;
678 dev_err(&vscsi
->dev
, "init_msg: invalid state %d to get init compl msg\n",
680 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
688 * ibmvscsis_handle_init_msg() - Respond to an Init Message
689 * @vscsi: Pointer to our adapter structure
691 * Must be called with interrupt lock held.
693 static long ibmvscsis_handle_init_msg(struct scsi_info
*vscsi
)
695 long rc
= ADAPT_SUCCESS
;
697 switch (vscsi
->state
) {
698 case WAIT_CONNECTION
:
699 rc
= ibmvscsis_send_init_message(vscsi
, INIT_COMPLETE_MSG
);
702 vscsi
->state
= CONNECTED
;
706 dev_err(&vscsi
->dev
, "init_msg: failed to send, rc %ld\n",
708 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
, 0);
712 dev_err(&vscsi
->dev
, "init_msg: failed to send, rc %ld\n",
715 ibmvscsis_post_disconnect(vscsi
,
716 ERR_DISCONNECT_RECONNECT
, 0);
720 dev_warn(&vscsi
->dev
, "init_msg: failed to send, rc %ld\n",
740 case ERR_DISCONNECT_RECONNECT
:
741 case ERR_DISCONNECTED
:
744 dev_err(&vscsi
->dev
, "init_msg: invalid state %d to get init msg\n",
746 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
754 * ibmvscsis_init_msg() - Respond to an init message
755 * @vscsi: Pointer to our adapter structure
756 * @crq: Pointer to CRQ element containing the Init Message
758 * EXECUTION ENVIRONMENT:
759 * Interrupt, interrupt lock held
761 static long ibmvscsis_init_msg(struct scsi_info
*vscsi
, struct viosrp_crq
*crq
)
763 long rc
= ADAPT_SUCCESS
;
765 dev_dbg(&vscsi
->dev
, "init_msg: state 0x%hx\n", vscsi
->state
);
767 rc
= h_vioctl(vscsi
->dds
.unit_id
, H_GET_PARTNER_INFO
,
768 (u64
)vscsi
->map_ioba
| ((u64
)PAGE_SIZE
<< 32), 0, 0, 0,
770 if (rc
== H_SUCCESS
) {
771 vscsi
->client_data
.partition_number
=
772 be64_to_cpu(*(u64
*)vscsi
->map_buf
);
773 dev_dbg(&vscsi
->dev
, "init_msg, part num %d\n",
774 vscsi
->client_data
.partition_number
);
776 dev_dbg(&vscsi
->dev
, "init_msg h_vioctl rc %ld\n", rc
);
780 if (crq
->format
== INIT_MSG
) {
781 rc
= ibmvscsis_handle_init_msg(vscsi
);
782 } else if (crq
->format
== INIT_COMPLETE_MSG
) {
783 rc
= ibmvscsis_handle_init_compl_msg(vscsi
);
786 dev_err(&vscsi
->dev
, "init_msg: invalid format %d\n",
788 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
795 * ibmvscsis_establish_new_q() - Establish new CRQ queue
796 * @vscsi: Pointer to our adapter structure
798 * Must be called with interrupt lock held.
800 static long ibmvscsis_establish_new_q(struct scsi_info
*vscsi
)
802 long rc
= ADAPT_SUCCESS
;
805 rc
= h_vioctl(vscsi
->dds
.unit_id
, H_ENABLE_PREPARE_FOR_SUSPEND
, 30000,
808 vscsi
->flags
|= PREP_FOR_SUSPEND_ENABLED
;
809 else if (rc
!= H_NOT_FOUND
)
810 dev_err(&vscsi
->dev
, "Error from Enable Prepare for Suspend: %ld\n",
813 vscsi
->flags
&= PRESERVE_FLAG_FIELDS
;
814 vscsi
->rsp_q_timer
.timer_pops
= 0;
818 rc
= vio_enable_interrupts(vscsi
->dma_dev
);
820 dev_warn(&vscsi
->dev
, "establish_new_q: failed to enable interrupts, rc %ld\n",
825 rc
= ibmvscsis_check_init_msg(vscsi
, &format
);
827 dev_err(&vscsi
->dev
, "establish_new_q: check_init_msg failed, rc %ld\n",
832 if (format
== UNUSED_FORMAT
) {
833 rc
= ibmvscsis_send_init_message(vscsi
, INIT_MSG
);
846 vscsi
->state
= UNDEFINED
;
850 } else if (format
== INIT_MSG
) {
851 rc
= ibmvscsis_handle_init_msg(vscsi
);
858 * ibmvscsis_reset_queue() - Reset CRQ Queue
859 * @vscsi: Pointer to our adapter structure
861 * This function calls h_free_q and then calls h_reg_q and does all
862 * of the bookkeeping to get us back to where we can communicate.
864 * Actually, we don't always call h_free_crq. A problem was discovered
865 * where one partition would close and reopen his queue, which would
866 * cause his partner to get a transport event, which would cause him to
867 * close and reopen his queue, which would cause the original partition
868 * to get a transport event, etc., etc. To prevent this, we don't
869 * actually close our queue if the client initiated the reset, (i.e.
870 * either we got a transport event or we have detected that the client's
873 * EXECUTION ENVIRONMENT:
874 * Process environment, called with interrupt lock held
876 static void ibmvscsis_reset_queue(struct scsi_info
*vscsi
)
879 long rc
= ADAPT_SUCCESS
;
881 dev_dbg(&vscsi
->dev
, "reset_queue: flags 0x%x\n", vscsi
->flags
);
883 /* don't reset, the client did it for us */
884 if (vscsi
->flags
& (CLIENT_FAILED
| TRANS_EVENT
)) {
885 vscsi
->flags
&= PRESERVE_FLAG_FIELDS
;
886 vscsi
->rsp_q_timer
.timer_pops
= 0;
889 vscsi
->state
= WAIT_CONNECTION
;
890 vio_enable_interrupts(vscsi
->dma_dev
);
892 rc
= ibmvscsis_free_command_q(vscsi
);
893 if (rc
== ADAPT_SUCCESS
) {
894 vscsi
->state
= WAIT_CONNECTION
;
896 bytes
= vscsi
->cmd_q
.size
* PAGE_SIZE
;
897 rc
= h_reg_crq(vscsi
->dds
.unit_id
,
898 vscsi
->cmd_q
.crq_token
, bytes
);
899 if (rc
== H_CLOSED
|| rc
== H_SUCCESS
) {
900 rc
= ibmvscsis_establish_new_q(vscsi
);
903 if (rc
!= ADAPT_SUCCESS
) {
904 dev_dbg(&vscsi
->dev
, "reset_queue: reg_crq rc %ld\n",
907 vscsi
->state
= ERR_DISCONNECTED
;
908 vscsi
->flags
|= RESPONSE_Q_DOWN
;
909 ibmvscsis_free_command_q(vscsi
);
912 vscsi
->state
= ERR_DISCONNECTED
;
913 vscsi
->flags
|= RESPONSE_Q_DOWN
;
919 * ibmvscsis_free_cmd_resources() - Free command resources
920 * @vscsi: Pointer to our adapter structure
921 * @cmd: Command which is not longer in use
923 * Must be called with interrupt lock held.
925 static void ibmvscsis_free_cmd_resources(struct scsi_info
*vscsi
,
926 struct ibmvscsis_cmd
*cmd
)
928 struct iu_entry
*iue
= cmd
->iue
;
931 case TASK_MANAGEMENT
:
934 * When the queue goes down this value is cleared, so it
935 * cannot be cleared in this general purpose function.
941 vscsi
->flags
&= ~PROCESSING_MAD
;
946 dev_err(&vscsi
->dev
, "free_cmd_resources unknown type %d\n",
952 list_add_tail(&cmd
->list
, &vscsi
->free_cmd
);
955 if (list_empty(&vscsi
->active_q
) && list_empty(&vscsi
->schedule_q
) &&
956 list_empty(&vscsi
->waiting_rsp
) && (vscsi
->flags
& WAIT_FOR_IDLE
)) {
957 vscsi
->flags
&= ~WAIT_FOR_IDLE
;
958 complete(&vscsi
->wait_idle
);
963 * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
964 * @vscsi: Pointer to our adapter structure
965 * @idle: Indicates whether we were called from adapter_idle. This
966 * is important to know if we need to do a disconnect, since if
967 * we're called from adapter_idle, we're still processing the
968 * current disconnect, so we can't just call post_disconnect.
970 * This function is called when the adapter is idle when phyp has sent
971 * us a Prepare for Suspend Transport Event.
973 * EXECUTION ENVIRONMENT:
974 * Process or interrupt environment called with interrupt lock held
976 static long ibmvscsis_ready_for_suspend(struct scsi_info
*vscsi
, bool idle
)
979 struct viosrp_crq
*crq
;
981 /* See if there is a Resume event in the queue */
982 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
984 dev_dbg(&vscsi
->dev
, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
985 vscsi
->flags
, vscsi
->state
, (int)crq
->valid
);
987 if (!(vscsi
->flags
& PREP_FOR_SUSPEND_ABORTED
) && !(crq
->valid
)) {
988 rc
= h_vioctl(vscsi
->dds
.unit_id
, H_READY_FOR_SUSPEND
, 0, 0, 0,
991 dev_err(&vscsi
->dev
, "Ready for Suspend Vioctl failed: %ld\n",
995 } else if (((vscsi
->flags
& PREP_FOR_SUSPEND_OVERWRITE
) &&
996 (vscsi
->flags
& PREP_FOR_SUSPEND_ABORTED
)) ||
997 ((crq
->valid
) && ((crq
->valid
!= VALID_TRANS_EVENT
) ||
998 (crq
->format
!= RESUME_FROM_SUSP
)))) {
1000 vscsi
->state
= ERR_DISCONNECT_RECONNECT
;
1001 ibmvscsis_reset_queue(vscsi
);
1003 } else if (vscsi
->state
== CONNECTED
) {
1004 ibmvscsis_post_disconnect(vscsi
,
1005 ERR_DISCONNECT_RECONNECT
, 0);
1008 vscsi
->flags
&= ~PREP_FOR_SUSPEND_OVERWRITE
;
1010 if ((crq
->valid
) && ((crq
->valid
!= VALID_TRANS_EVENT
) ||
1011 (crq
->format
!= RESUME_FROM_SUSP
)))
1012 dev_err(&vscsi
->dev
, "Invalid element in CRQ after Prepare for Suspend");
1015 vscsi
->flags
&= ~(PREP_FOR_SUSPEND_PENDING
| PREP_FOR_SUSPEND_ABORTED
);
1021 * ibmvscsis_trans_event() - Handle a Transport Event
1022 * @vscsi: Pointer to our adapter structure
1023 * @crq: Pointer to CRQ entry containing the Transport Event
1025 * Do the logic to close the I_T nexus. This function may not
1026 * behave to specification.
1028 * EXECUTION ENVIRONMENT:
1029 * Interrupt, interrupt lock held
1031 static long ibmvscsis_trans_event(struct scsi_info
*vscsi
,
1032 struct viosrp_crq
*crq
)
1034 long rc
= ADAPT_SUCCESS
;
1036 dev_dbg(&vscsi
->dev
, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
1037 (int)crq
->format
, vscsi
->flags
, vscsi
->state
);
1039 switch (crq
->format
) {
1041 case PARTNER_FAILED
:
1042 case PARTNER_DEREGISTER
:
1043 ibmvscsis_delete_client_info(vscsi
, true);
1044 if (crq
->format
== MIGRATED
)
1045 vscsi
->flags
&= ~PREP_FOR_SUSPEND_OVERWRITE
;
1046 switch (vscsi
->state
) {
1048 case ERR_DISCONNECTED
:
1053 vscsi
->flags
|= (RESPONSE_Q_DOWN
| TRANS_EVENT
);
1059 case WAIT_CONNECTION
:
1063 ibmvscsis_post_disconnect(vscsi
, WAIT_IDLE
,
1068 case SRP_PROCESSING
:
1069 if ((vscsi
->debit
> 0) ||
1070 !list_empty(&vscsi
->schedule_q
) ||
1071 !list_empty(&vscsi
->waiting_rsp
) ||
1072 !list_empty(&vscsi
->active_q
)) {
1073 dev_dbg(&vscsi
->dev
, "debit %d, sched %d, wait %d, active %d\n",
1075 (int)list_empty(&vscsi
->schedule_q
),
1076 (int)list_empty(&vscsi
->waiting_rsp
),
1077 (int)list_empty(&vscsi
->active_q
));
1078 dev_warn(&vscsi
->dev
, "connection lost with outstanding work\n");
1080 dev_dbg(&vscsi
->dev
, "trans_event: SRP Processing, but no outstanding work\n");
1083 ibmvscsis_post_disconnect(vscsi
, WAIT_IDLE
,
1088 case ERR_DISCONNECT
:
1089 case ERR_DISCONNECT_RECONNECT
:
1091 vscsi
->flags
|= (RESPONSE_Q_DOWN
| TRANS_EVENT
);
1096 case PREPARE_FOR_SUSPEND
:
1097 dev_dbg(&vscsi
->dev
, "Prep for Suspend, crq status = 0x%x\n",
1099 switch (vscsi
->state
) {
1100 case ERR_DISCONNECTED
:
1101 case WAIT_CONNECTION
:
1103 ibmvscsis_ready_for_suspend(vscsi
, false);
1105 case SRP_PROCESSING
:
1106 vscsi
->resume_state
= vscsi
->state
;
1107 vscsi
->flags
|= PREP_FOR_SUSPEND_PENDING
;
1108 if (crq
->status
== CRQ_ENTRY_OVERWRITTEN
)
1109 vscsi
->flags
|= PREP_FOR_SUSPEND_OVERWRITE
;
1110 ibmvscsis_post_disconnect(vscsi
, WAIT_IDLE
, 0);
1116 case ERR_DISCONNECT
:
1117 case ERR_DISCONNECT_RECONNECT
:
1119 dev_err(&vscsi
->dev
, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
1125 case RESUME_FROM_SUSP
:
1126 dev_dbg(&vscsi
->dev
, "Resume from Suspend, crq status = 0x%x\n",
1128 if (vscsi
->flags
& PREP_FOR_SUSPEND_PENDING
) {
1129 vscsi
->flags
|= PREP_FOR_SUSPEND_ABORTED
;
1131 if ((crq
->status
== CRQ_ENTRY_OVERWRITTEN
) ||
1132 (vscsi
->flags
& PREP_FOR_SUSPEND_OVERWRITE
)) {
1133 ibmvscsis_post_disconnect(vscsi
,
1134 ERR_DISCONNECT_RECONNECT
,
1136 vscsi
->flags
&= ~PREP_FOR_SUSPEND_OVERWRITE
;
1143 dev_err(&vscsi
->dev
, "trans_event: invalid format %d\n",
1145 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
,
1150 rc
= vscsi
->flags
& SCHEDULE_DISCONNECT
;
1152 dev_dbg(&vscsi
->dev
, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
1153 vscsi
->flags
, vscsi
->state
, rc
);
1159 * ibmvscsis_poll_cmd_q() - Poll Command Queue
1160 * @vscsi: Pointer to our adapter structure
1162 * Called to handle command elements that may have arrived while
1163 * interrupts were disabled.
1165 * EXECUTION ENVIRONMENT:
1166 * intr_lock must be held
1168 static void ibmvscsis_poll_cmd_q(struct scsi_info
*vscsi
)
1170 struct viosrp_crq
*crq
;
1175 dev_dbg(&vscsi
->dev
, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
1176 vscsi
->flags
, vscsi
->state
, vscsi
->cmd_q
.index
);
1178 rc
= vscsi
->flags
& SCHEDULE_DISCONNECT
;
1179 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
1185 vscsi
->cmd_q
.index
=
1186 (vscsi
->cmd_q
.index
+ 1) & vscsi
->cmd_q
.mask
;
1189 rc
= ibmvscsis_parse_command(vscsi
, crq
);
1191 if ((uint
)crq
->valid
== VALID_TRANS_EVENT
) {
1193 * must service the transport layer events even
1194 * in an error state, dont break out until all
1195 * the consecutive transport events have been
1198 rc
= ibmvscsis_trans_event(vscsi
, crq
);
1199 } else if (vscsi
->flags
& TRANS_EVENT
) {
1201 * if a tranport event has occurred leave
1202 * everything but transport events on the queue
1204 dev_dbg(&vscsi
->dev
, "poll_cmd_q, ignoring\n");
1207 * need to decrement the queue index so we can
1208 * look at the elment again
1210 if (vscsi
->cmd_q
.index
)
1211 vscsi
->cmd_q
.index
-= 1;
1214 * index is at 0 it just wrapped.
1215 * have it index last element in q
1217 vscsi
->cmd_q
.index
= vscsi
->cmd_q
.mask
;
1222 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
1224 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
1231 vio_enable_interrupts(vscsi
->dma_dev
);
1233 dev_dbg(&vscsi
->dev
, "poll_cmd_q, reenabling interrupts\n");
1241 dev_dbg(&vscsi
->dev
, "Leaving poll_cmd_q: rc %ld\n", rc
);
1245 * ibmvscsis_free_cmd_qs() - Free elements in queue
1246 * @vscsi: Pointer to our adapter structure
1248 * Free all of the elements on all queues that are waiting for
1252 * Called with interrupt lock held
1254 static void ibmvscsis_free_cmd_qs(struct scsi_info
*vscsi
)
1256 struct ibmvscsis_cmd
*cmd
, *nxt
;
1258 dev_dbg(&vscsi
->dev
, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1259 (int)list_empty(&vscsi
->waiting_rsp
),
1260 vscsi
->rsp_q_timer
.started
);
1262 list_for_each_entry_safe(cmd
, nxt
, &vscsi
->waiting_rsp
, list
) {
1263 list_del(&cmd
->list
);
1264 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
1269 * ibmvscsis_get_free_cmd() - Get free command from list
1270 * @vscsi: Pointer to our adapter structure
1272 * Must be called with interrupt lock held.
1274 static struct ibmvscsis_cmd
*ibmvscsis_get_free_cmd(struct scsi_info
*vscsi
)
1276 struct ibmvscsis_cmd
*cmd
= NULL
;
1277 struct iu_entry
*iue
;
1279 iue
= srp_iu_get(&vscsi
->target
);
1281 cmd
= list_first_entry_or_null(&vscsi
->free_cmd
,
1282 struct ibmvscsis_cmd
, list
);
1285 cmd
->abort_cmd
= NULL
;
1286 cmd
->flags
&= ~(DELAY_SEND
);
1287 list_del(&cmd
->list
);
1289 cmd
->type
= UNSET_TYPE
;
1290 memset(&cmd
->se_cmd
, 0, sizeof(cmd
->se_cmd
));
1300 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1301 * @vscsi: Pointer to our adapter structure
1303 * This function is called when the adapter is idle when the driver
1304 * is attempting to clear an error condition.
1305 * The adapter is considered busy if any of its cmd queues
1306 * are non-empty. This function can be invoked
1307 * from the off level disconnect function.
1309 * EXECUTION ENVIRONMENT:
1310 * Process environment called with interrupt lock held
1312 static void ibmvscsis_adapter_idle(struct scsi_info
*vscsi
)
1314 int free_qs
= false;
1317 dev_dbg(&vscsi
->dev
, "adapter_idle: flags 0x%x, state 0x%hx\n",
1318 vscsi
->flags
, vscsi
->state
);
1320 /* Only need to free qs if we're disconnecting from client */
1321 if (vscsi
->state
!= WAIT_CONNECTION
|| vscsi
->flags
& TRANS_EVENT
)
1324 switch (vscsi
->state
) {
1326 ibmvscsis_free_command_q(vscsi
);
1329 if (vscsi
->flags
& CFG_SLEEPING
) {
1330 vscsi
->flags
&= ~CFG_SLEEPING
;
1331 complete(&vscsi
->unconfig
);
1334 case ERR_DISCONNECT_RECONNECT
:
1335 ibmvscsis_reset_queue(vscsi
);
1336 dev_dbg(&vscsi
->dev
, "adapter_idle, disc_rec: flags 0x%x\n",
1340 case ERR_DISCONNECT
:
1341 ibmvscsis_free_command_q(vscsi
);
1342 vscsi
->flags
&= ~(SCHEDULE_DISCONNECT
| DISCONNECT_SCHEDULED
);
1343 vscsi
->flags
|= RESPONSE_Q_DOWN
;
1344 if (vscsi
->tport
.enabled
)
1345 vscsi
->state
= ERR_DISCONNECTED
;
1347 vscsi
->state
= WAIT_ENABLED
;
1348 dev_dbg(&vscsi
->dev
, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1349 vscsi
->flags
, vscsi
->state
);
1353 vscsi
->rsp_q_timer
.timer_pops
= 0;
1356 if (vscsi
->flags
& PREP_FOR_SUSPEND_PENDING
) {
1357 vscsi
->state
= vscsi
->resume_state
;
1358 vscsi
->resume_state
= 0;
1359 rc
= ibmvscsis_ready_for_suspend(vscsi
, true);
1360 vscsi
->flags
&= ~DISCONNECT_SCHEDULED
;
1363 } else if (vscsi
->flags
& TRANS_EVENT
) {
1364 vscsi
->state
= WAIT_CONNECTION
;
1365 vscsi
->flags
&= PRESERVE_FLAG_FIELDS
;
1367 vscsi
->state
= CONNECTED
;
1368 vscsi
->flags
&= ~DISCONNECT_SCHEDULED
;
1371 dev_dbg(&vscsi
->dev
, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1372 vscsi
->flags
, vscsi
->state
);
1373 ibmvscsis_poll_cmd_q(vscsi
);
1376 case ERR_DISCONNECTED
:
1377 vscsi
->flags
&= ~DISCONNECT_SCHEDULED
;
1378 dev_dbg(&vscsi
->dev
, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1379 vscsi
->flags
, vscsi
->state
);
1383 dev_err(&vscsi
->dev
, "adapter_idle: in invalid state %d\n",
1389 ibmvscsis_free_cmd_qs(vscsi
);
1392 * There is a timing window where we could lose a disconnect request.
1393 * The known path to this window occurs during the DISCONNECT_RECONNECT
1394 * case above: reset_queue calls free_command_q, which will release the
1395 * interrupt lock. During that time, a new post_disconnect call can be
1396 * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1397 * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1398 * will only set the new_state. Now free_command_q reacquires the intr
1399 * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1400 * FIELDS), and the disconnect is lost. This is particularly bad when
1401 * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1403 * Fix is that free command queue sets acr state and acr flags if there
1404 * is a change under the lock
1405 * note free command queue writes to this state it clears it
1406 * before releasing the lock, different drivers call the free command
1407 * queue different times so dont initialize above
1409 if (vscsi
->phyp_acr_state
!= 0) {
1411 * set any bits in flags that may have been cleared by
1412 * a call to free command queue in switch statement
1415 vscsi
->flags
|= vscsi
->phyp_acr_flags
;
1416 ibmvscsis_post_disconnect(vscsi
, vscsi
->phyp_acr_state
, 0);
1417 vscsi
->phyp_acr_state
= 0;
1418 vscsi
->phyp_acr_flags
= 0;
1420 dev_dbg(&vscsi
->dev
, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1421 vscsi
->flags
, vscsi
->state
, vscsi
->phyp_acr_flags
,
1422 vscsi
->phyp_acr_state
);
1425 dev_dbg(&vscsi
->dev
, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1426 vscsi
->flags
, vscsi
->state
, vscsi
->new_state
);
1430 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1431 * @vscsi: Pointer to our adapter structure
1432 * @cmd: Pointer to command element to use to process the request
1433 * @crq: Pointer to CRQ entry containing the request
1435 * Copy the srp information unit from the hosted
1436 * partition using remote dma
1438 * EXECUTION ENVIRONMENT:
1439 * Interrupt, interrupt lock held
1441 static long ibmvscsis_copy_crq_packet(struct scsi_info
*vscsi
,
1442 struct ibmvscsis_cmd
*cmd
,
1443 struct viosrp_crq
*crq
)
1445 struct iu_entry
*iue
= cmd
->iue
;
1449 len
= be16_to_cpu(crq
->IU_length
);
1450 if ((len
> SRP_MAX_IU_LEN
) || (len
== 0)) {
1451 dev_err(&vscsi
->dev
, "copy_crq: Invalid len %d passed", len
);
1452 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
1453 return SRP_VIOLATION
;
1456 rc
= h_copy_rdma(len
, vscsi
->dds
.window
[REMOTE
].liobn
,
1457 be64_to_cpu(crq
->IU_data_ptr
),
1458 vscsi
->dds
.window
[LOCAL
].liobn
, iue
->sbuf
->dma
);
1462 cmd
->init_time
= mftb();
1463 iue
->remote_token
= crq
->IU_data_ptr
;
1465 dev_dbg(&vscsi
->dev
, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1466 be64_to_cpu(crq
->IU_data_ptr
), cmd
->init_time
);
1469 if (connection_broken(vscsi
))
1470 ibmvscsis_post_disconnect(vscsi
,
1471 ERR_DISCONNECT_RECONNECT
,
1475 ibmvscsis_post_disconnect(vscsi
,
1476 ERR_DISCONNECT_RECONNECT
, 0);
1478 dev_err(&vscsi
->dev
, "copy_crq: h_copy_rdma failed, rc %ld\n",
1484 dev_err(&vscsi
->dev
, "copy_crq: h_copy_rdma failed, rc %ld\n",
1486 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
1494 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1495 * @vscsi: Pointer to our adapter structure
1496 * @iue: Information Unit containing the Adapter Info MAD request
1498 * EXECUTION ENVIRONMENT:
1499 * Interrupt adapter lock is held
1501 static long ibmvscsis_adapter_info(struct scsi_info
*vscsi
,
1502 struct iu_entry
*iue
)
1504 struct viosrp_adapter_info
*mad
= &vio_iu(iue
)->mad
.adapter_info
;
1505 struct mad_adapter_info_data
*info
;
1510 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_SUCCESS
);
1512 if (be16_to_cpu(mad
->common
.length
) > sizeof(*info
)) {
1513 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1517 info
= dma_alloc_coherent(&vscsi
->dma_dev
->dev
, sizeof(*info
), &token
,
1520 dev_err(&vscsi
->dev
, "bad dma_alloc_coherent %p\n",
1522 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1526 /* Get remote info */
1527 rc
= h_copy_rdma(be16_to_cpu(mad
->common
.length
),
1528 vscsi
->dds
.window
[REMOTE
].liobn
,
1529 be64_to_cpu(mad
->buffer
),
1530 vscsi
->dds
.window
[LOCAL
].liobn
, token
);
1532 if (rc
!= H_SUCCESS
) {
1533 if (rc
== H_PERMISSION
) {
1534 if (connection_broken(vscsi
))
1535 flag_bits
= (RESPONSE_Q_DOWN
| CLIENT_FAILED
);
1537 dev_warn(&vscsi
->dev
, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
1539 dev_dbg(&vscsi
->dev
, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1540 be64_to_cpu(mad
->buffer
), vscsi
->flags
, flag_bits
);
1541 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
,
1547 * Copy client info, but ignore partition number, which we
1548 * already got from phyp - unless we failed to get it from
1549 * phyp (e.g. if we're running on a p5 system).
1551 if (vscsi
->client_data
.partition_number
== 0)
1552 vscsi
->client_data
.partition_number
=
1553 be32_to_cpu(info
->partition_number
);
1554 strscpy(vscsi
->client_data
.srp_version
, info
->srp_version
,
1555 sizeof(vscsi
->client_data
.srp_version
));
1556 strscpy(vscsi
->client_data
.partition_name
, info
->partition_name
,
1557 sizeof(vscsi
->client_data
.partition_name
));
1558 vscsi
->client_data
.mad_version
= be32_to_cpu(info
->mad_version
);
1559 vscsi
->client_data
.os_type
= be32_to_cpu(info
->os_type
);
1562 strscpy_pad(info
->srp_version
, SRP_VERSION
,
1563 sizeof(info
->srp_version
));
1564 strscpy_pad(info
->partition_name
, vscsi
->dds
.partition_name
,
1565 sizeof(info
->partition_name
));
1566 info
->partition_number
= cpu_to_be32(vscsi
->dds
.partition_num
);
1567 info
->mad_version
= cpu_to_be32(MAD_VERSION_1
);
1568 info
->os_type
= cpu_to_be32(LINUX
);
1569 memset(&info
->port_max_txu
[0], 0, sizeof(info
->port_max_txu
));
1570 info
->port_max_txu
[0] = cpu_to_be32(MAX_TXU
);
1573 rc
= h_copy_rdma(sizeof(*info
), vscsi
->dds
.window
[LOCAL
].liobn
,
1574 token
, vscsi
->dds
.window
[REMOTE
].liobn
,
1575 be64_to_cpu(mad
->buffer
));
1583 if (connection_broken(vscsi
))
1584 flag_bits
= (RESPONSE_Q_DOWN
| CLIENT_FAILED
);
1587 dev_err(&vscsi
->dev
, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1589 ibmvscsis_post_disconnect(vscsi
,
1590 ERR_DISCONNECT_RECONNECT
,
1596 dma_free_coherent(&vscsi
->dma_dev
->dev
, sizeof(*info
), info
, token
);
1597 dev_dbg(&vscsi
->dev
, "Leaving adapter_info, rc %ld\n", rc
);
1603 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1604 * @vscsi: Pointer to our adapter structure
1605 * @iue: Information Unit containing the Capabilities MAD request
1607 * NOTE: if you return an error from this routine you must be
1608 * disconnecting or you will cause a hang
1610 * EXECUTION ENVIRONMENT:
1611 * Interrupt called with adapter lock held
1613 static int ibmvscsis_cap_mad(struct scsi_info
*vscsi
, struct iu_entry
*iue
)
1615 struct viosrp_capabilities
*mad
= &vio_iu(iue
)->mad
.capabilities
;
1616 struct capabilities
*cap
;
1617 struct mad_capability_common
*common
;
1619 u16 olen
, len
, status
, min_len
, cap_len
;
1624 olen
= be16_to_cpu(mad
->common
.length
);
1626 * struct capabilities hardcodes a couple capabilities after the
1627 * header, but the capabilities can actually be in any order.
1629 min_len
= offsetof(struct capabilities
, migration
);
1630 if ((olen
< min_len
) || (olen
> PAGE_SIZE
)) {
1631 dev_warn(&vscsi
->dev
, "cap_mad: invalid len %d\n", olen
);
1632 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1636 cap
= dma_alloc_coherent(&vscsi
->dma_dev
->dev
, olen
, &token
,
1639 dev_err(&vscsi
->dev
, "bad dma_alloc_coherent %p\n",
1641 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1644 rc
= h_copy_rdma(olen
, vscsi
->dds
.window
[REMOTE
].liobn
,
1645 be64_to_cpu(mad
->buffer
),
1646 vscsi
->dds
.window
[LOCAL
].liobn
, token
);
1647 if (rc
== H_SUCCESS
) {
1648 strscpy_pad(cap
->name
, dev_name(&vscsi
->dma_dev
->dev
),
1651 len
= olen
- min_len
;
1652 status
= VIOSRP_MAD_SUCCESS
;
1653 common
= (struct mad_capability_common
*)&cap
->migration
;
1655 while ((len
> 0) && (status
== VIOSRP_MAD_SUCCESS
) && !rc
) {
1656 dev_dbg(&vscsi
->dev
, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
1657 len
, be32_to_cpu(common
->cap_type
),
1658 be16_to_cpu(common
->length
));
1660 cap_len
= be16_to_cpu(common
->length
);
1661 if (cap_len
> len
) {
1662 dev_err(&vscsi
->dev
, "cap_mad: cap len mismatch with total len\n");
1663 status
= VIOSRP_MAD_FAILED
;
1668 dev_err(&vscsi
->dev
, "cap_mad: cap len is 0\n");
1669 status
= VIOSRP_MAD_FAILED
;
1673 switch (common
->cap_type
) {
1675 dev_dbg(&vscsi
->dev
, "cap_mad: unsupported capability\n");
1676 common
->server_support
= 0;
1677 flag
= cpu_to_be32((u32
)CAP_LIST_SUPPORTED
);
1678 cap
->flags
&= ~flag
;
1682 len
= len
- cap_len
;
1683 common
= (struct mad_capability_common
*)
1684 ((char *)common
+ cap_len
);
1687 mad
->common
.status
= cpu_to_be16(status
);
1690 rc
= h_copy_rdma(olen
, vscsi
->dds
.window
[LOCAL
].liobn
, token
,
1691 vscsi
->dds
.window
[REMOTE
].liobn
,
1692 be64_to_cpu(mad
->buffer
));
1694 if (rc
!= H_SUCCESS
) {
1695 dev_dbg(&vscsi
->dev
, "cap_mad: failed to copy to client, rc %ld\n",
1698 if (rc
== H_PERMISSION
) {
1699 if (connection_broken(vscsi
))
1700 flag_bits
= (RESPONSE_Q_DOWN
|
1704 dev_warn(&vscsi
->dev
, "cap_mad: error copying data to client, rc %ld\n",
1706 ibmvscsis_post_disconnect(vscsi
,
1707 ERR_DISCONNECT_RECONNECT
,
1712 dma_free_coherent(&vscsi
->dma_dev
->dev
, olen
, cap
, token
);
1714 dev_dbg(&vscsi
->dev
, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1715 rc
, vscsi
->client_cap
);
1721 * ibmvscsis_process_mad() - Service a MAnagement Data gram
1722 * @vscsi: Pointer to our adapter structure
1723 * @iue: Information Unit containing the MAD request
1725 * Must be called with interrupt lock held.
1727 static long ibmvscsis_process_mad(struct scsi_info
*vscsi
, struct iu_entry
*iue
)
1729 struct mad_common
*mad
= (struct mad_common
*)&vio_iu(iue
)->mad
;
1730 struct viosrp_empty_iu
*empty
;
1731 long rc
= ADAPT_SUCCESS
;
1733 switch (be32_to_cpu(mad
->type
)) {
1734 case VIOSRP_EMPTY_IU_TYPE
:
1735 empty
= &vio_iu(iue
)->mad
.empty_iu
;
1736 vscsi
->empty_iu_id
= be64_to_cpu(empty
->buffer
);
1737 vscsi
->empty_iu_tag
= be64_to_cpu(empty
->common
.tag
);
1738 mad
->status
= cpu_to_be16(VIOSRP_MAD_SUCCESS
);
1740 case VIOSRP_ADAPTER_INFO_TYPE
:
1741 rc
= ibmvscsis_adapter_info(vscsi
, iue
);
1743 case VIOSRP_CAPABILITIES_TYPE
:
1744 rc
= ibmvscsis_cap_mad(vscsi
, iue
);
1746 case VIOSRP_ENABLE_FAST_FAIL
:
1747 if (vscsi
->state
== CONNECTED
) {
1748 vscsi
->fast_fail
= true;
1749 mad
->status
= cpu_to_be16(VIOSRP_MAD_SUCCESS
);
1751 dev_warn(&vscsi
->dev
, "fast fail mad sent after login\n");
1752 mad
->status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1756 mad
->status
= cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED
);
1764 * srp_snd_msg_failed() - Handle an error when sending a response
1765 * @vscsi: Pointer to our adapter structure
1766 * @rc: The return code from the h_send_crq command
1768 * Must be called with interrupt lock held.
1770 static void srp_snd_msg_failed(struct scsi_info
*vscsi
, long rc
)
1774 if (rc
!= H_DROPPED
) {
1775 ibmvscsis_free_cmd_qs(vscsi
);
1778 vscsi
->flags
|= CLIENT_FAILED
;
1780 /* don't flag the same problem multiple times */
1781 if (!(vscsi
->flags
& RESPONSE_Q_DOWN
)) {
1782 vscsi
->flags
|= RESPONSE_Q_DOWN
;
1783 if (!(vscsi
->state
& (ERR_DISCONNECT
|
1784 ERR_DISCONNECT_RECONNECT
|
1785 ERR_DISCONNECTED
| UNDEFINED
))) {
1786 dev_err(&vscsi
->dev
, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1787 vscsi
->state
, vscsi
->flags
, rc
);
1789 ibmvscsis_post_disconnect(vscsi
,
1790 ERR_DISCONNECT_RECONNECT
, 0);
1796 * The response queue is full.
1797 * If the server is processing SRP requests, i.e.
1798 * the client has successfully done an
1799 * SRP_LOGIN, then it will wait forever for room in
1800 * the queue. However if the system admin
1801 * is attempting to unconfigure the server then one
1802 * or more children will be in a state where
1803 * they are being removed. So if there is even one
1804 * child being removed then the driver assumes
1805 * the system admin is attempting to break the
1806 * connection with the client and MAX_TIMER_POPS
1809 if ((vscsi
->rsp_q_timer
.timer_pops
< MAX_TIMER_POPS
) ||
1810 (vscsi
->state
== SRP_PROCESSING
)) {
1811 dev_dbg(&vscsi
->dev
, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1812 vscsi
->flags
, (int)vscsi
->rsp_q_timer
.started
,
1813 vscsi
->rsp_q_timer
.timer_pops
);
1816 * Check if the timer is running; if it
1817 * is not then start it up.
1819 if (!vscsi
->rsp_q_timer
.started
) {
1820 if (vscsi
->rsp_q_timer
.timer_pops
<
1822 kt
= WAIT_NANO_SECONDS
;
1825 * slide the timeslice if the maximum
1826 * timer pops have already happened
1828 kt
= ktime_set(WAIT_SECONDS
, 0);
1831 vscsi
->rsp_q_timer
.started
= true;
1832 hrtimer_start(&vscsi
->rsp_q_timer
.timer
, kt
,
1837 * TBD: Do we need to worry about this? Need to get
1841 * waited a long time and it appears the system admin
1842 * is bring this driver down
1844 vscsi
->flags
|= RESPONSE_Q_DOWN
;
1845 ibmvscsis_free_cmd_qs(vscsi
);
1847 * if the driver is already attempting to disconnect
1848 * from the client and has already logged an error
1849 * trace this event but don't put it in the error log
1851 if (!(vscsi
->state
& (ERR_DISCONNECT
|
1852 ERR_DISCONNECT_RECONNECT
|
1853 ERR_DISCONNECTED
| UNDEFINED
))) {
1854 dev_err(&vscsi
->dev
, "client crq full too long\n");
1855 ibmvscsis_post_disconnect(vscsi
,
1856 ERR_DISCONNECT_RECONNECT
,
1863 * ibmvscsis_send_messages() - Send a Response
1864 * @vscsi: Pointer to our adapter structure
1866 * Send a response, first checking the waiting queue. Responses are
1867 * sent in order they are received. If the response cannot be sent,
1868 * because the client queue is full, it stays on the waiting queue.
1871 * Called with interrupt lock held
1873 static void ibmvscsis_send_messages(struct scsi_info
*vscsi
)
1875 struct viosrp_crq empty_crq
= { };
1876 struct viosrp_crq
*crq
= &empty_crq
;
1877 struct ibmvscsis_cmd
*cmd
, *nxt
;
1878 long rc
= ADAPT_SUCCESS
;
1881 if (!(vscsi
->flags
& RESPONSE_Q_DOWN
)) {
1884 list_for_each_entry_safe(cmd
, nxt
, &vscsi
->waiting_rsp
,
1887 * Check to make sure abort cmd gets processed
1888 * prior to the abort tmr cmd
1890 if (cmd
->flags
& DELAY_SEND
)
1893 if (cmd
->abort_cmd
) {
1895 cmd
->abort_cmd
->flags
&= ~(DELAY_SEND
);
1896 cmd
->abort_cmd
= NULL
;
1900 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
1901 * the case where LIO issued a
1902 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
1903 * case then we dont send a response, since it
1906 if (cmd
->se_cmd
.transport_state
& CMD_T_ABORTED
&&
1907 !(cmd
->se_cmd
.transport_state
& CMD_T_TAS
)) {
1908 list_del(&cmd
->list
);
1909 ibmvscsis_free_cmd_resources(vscsi
,
1912 * With a successfully aborted op
1913 * through LIO we want to increment the
1914 * the vscsi credit so that when we dont
1915 * send a rsp to the original scsi abort
1916 * op (h_send_crq), but the tm rsp to
1917 * the abort is sent, the credit is
1918 * correctly sent with the abort tm rsp.
1919 * We would need 1 for the abort tm rsp
1920 * and 1 credit for the aborted scsi op.
1921 * Thus we need to increment here.
1922 * Also we want to increment the credit
1923 * here because we want to make sure
1924 * cmd is actually released first
1925 * otherwise the client will think it
1926 * it can send a new cmd, and we could
1927 * find ourselves short of cmd elements.
1931 crq
->valid
= VALID_CMD_RESP_EL
;
1932 crq
->format
= cmd
->rsp
.format
;
1934 if (cmd
->flags
& CMD_FAST_FAIL
)
1935 crq
->status
= VIOSRP_ADAPTER_FAIL
;
1937 crq
->IU_length
= cpu_to_be16(cmd
->rsp
.len
);
1939 rc
= h_send_crq(vscsi
->dma_dev
->unit_address
,
1940 be64_to_cpu(crq
->high
),
1941 be64_to_cpu(cmd
->rsp
.tag
));
1943 dev_dbg(&vscsi
->dev
, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1944 cmd
, be64_to_cpu(cmd
->rsp
.tag
),
1947 /* if all ok free up the command
1950 if (rc
== H_SUCCESS
) {
1951 /* some movement has occurred */
1952 vscsi
->rsp_q_timer
.timer_pops
= 0;
1953 list_del(&cmd
->list
);
1955 ibmvscsis_free_cmd_resources(vscsi
,
1958 srp_snd_msg_failed(vscsi
, rc
);
1967 * The timer could pop with the queue empty. If
1968 * this happens, rc will always indicate a
1969 * success; clear the pop count.
1971 vscsi
->rsp_q_timer
.timer_pops
= 0;
1974 ibmvscsis_free_cmd_qs(vscsi
);
1978 /* Called with intr lock held */
1979 static void ibmvscsis_send_mad_resp(struct scsi_info
*vscsi
,
1980 struct ibmvscsis_cmd
*cmd
,
1981 struct viosrp_crq
*crq
)
1983 struct iu_entry
*iue
= cmd
->iue
;
1984 struct mad_common
*mad
= (struct mad_common
*)&vio_iu(iue
)->mad
;
1989 rc
= h_copy_rdma(sizeof(struct mad_common
),
1990 vscsi
->dds
.window
[LOCAL
].liobn
, iue
->sbuf
->dma
,
1991 vscsi
->dds
.window
[REMOTE
].liobn
,
1992 be64_to_cpu(crq
->IU_data_ptr
));
1994 cmd
->rsp
.format
= VIOSRP_MAD_FORMAT
;
1995 cmd
->rsp
.len
= sizeof(struct mad_common
);
1996 cmd
->rsp
.tag
= mad
->tag
;
1997 list_add_tail(&cmd
->list
, &vscsi
->waiting_rsp
);
1998 ibmvscsis_send_messages(vscsi
);
2000 dev_dbg(&vscsi
->dev
, "Error sending mad response, rc %ld\n",
2002 if (rc
== H_PERMISSION
) {
2003 if (connection_broken(vscsi
))
2004 flag_bits
= (RESPONSE_Q_DOWN
| CLIENT_FAILED
);
2006 dev_err(&vscsi
->dev
, "mad: failed to copy to client, rc %ld\n",
2009 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2010 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
,
2016 * ibmvscsis_mad() - Service a MAnagement Data gram.
2017 * @vscsi: Pointer to our adapter structure
2018 * @crq: Pointer to the CRQ entry containing the MAD request
2020 * EXECUTION ENVIRONMENT:
2021 * Interrupt, called with adapter lock held
2023 static long ibmvscsis_mad(struct scsi_info
*vscsi
, struct viosrp_crq
*crq
)
2025 struct iu_entry
*iue
;
2026 struct ibmvscsis_cmd
*cmd
;
2027 struct mad_common
*mad
;
2028 long rc
= ADAPT_SUCCESS
;
2030 switch (vscsi
->state
) {
2032 * We have not exchanged Init Msgs yet, so this MAD was sent
2033 * before the last Transport Event; client will not be
2034 * expecting a response.
2036 case WAIT_CONNECTION
:
2037 dev_dbg(&vscsi
->dev
, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
2039 return ADAPT_SUCCESS
;
2041 case SRP_PROCESSING
:
2046 * We should never get here while we're in these states.
2047 * Just log an error and get out.
2051 case ERR_DISCONNECT
:
2052 case ERR_DISCONNECT_RECONNECT
:
2054 dev_err(&vscsi
->dev
, "mad: invalid adapter state %d for mad\n",
2056 return ADAPT_SUCCESS
;
2059 cmd
= ibmvscsis_get_free_cmd(vscsi
);
2061 dev_err(&vscsi
->dev
, "mad: failed to get cmd, debit %d\n",
2063 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2067 cmd
->type
= ADAPTER_MAD
;
2069 rc
= ibmvscsis_copy_crq_packet(vscsi
, cmd
, crq
);
2071 mad
= (struct mad_common
*)&vio_iu(iue
)->mad
;
2073 dev_dbg(&vscsi
->dev
, "mad: type %d\n", be32_to_cpu(mad
->type
));
2075 rc
= ibmvscsis_process_mad(vscsi
, iue
);
2077 dev_dbg(&vscsi
->dev
, "mad: status %hd, rc %ld\n",
2078 be16_to_cpu(mad
->status
), rc
);
2081 ibmvscsis_send_mad_resp(vscsi
, cmd
, crq
);
2083 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2086 dev_dbg(&vscsi
->dev
, "Leaving mad, rc %ld\n", rc
);
2091 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
2092 * @vscsi: Pointer to our adapter structure
2093 * @cmd: Pointer to the command for the SRP Login request
2095 * EXECUTION ENVIRONMENT:
2096 * Interrupt, interrupt lock held
2098 static long ibmvscsis_login_rsp(struct scsi_info
*vscsi
,
2099 struct ibmvscsis_cmd
*cmd
)
2101 struct iu_entry
*iue
= cmd
->iue
;
2102 struct srp_login_rsp
*rsp
= &vio_iu(iue
)->srp
.login_rsp
;
2103 struct format_code
*fmt
;
2105 long rc
= ADAPT_SUCCESS
;
2107 memset(rsp
, 0, sizeof(struct srp_login_rsp
));
2109 rsp
->opcode
= SRP_LOGIN_RSP
;
2110 rsp
->req_lim_delta
= cpu_to_be32(vscsi
->request_limit
);
2111 rsp
->tag
= cmd
->rsp
.tag
;
2112 rsp
->max_it_iu_len
= cpu_to_be32(SRP_MAX_IU_LEN
);
2113 rsp
->max_ti_iu_len
= cpu_to_be32(SRP_MAX_IU_LEN
);
2114 fmt
= (struct format_code
*)&rsp
->buf_fmt
;
2115 fmt
->buffers
= SUPPORTED_FORMATS
;
2118 cmd
->rsp
.len
= sizeof(struct srp_login_rsp
);
2121 rc
= h_copy_rdma(cmd
->rsp
.len
, vscsi
->dds
.window
[LOCAL
].liobn
,
2122 iue
->sbuf
->dma
, vscsi
->dds
.window
[REMOTE
].liobn
,
2123 be64_to_cpu(iue
->remote_token
));
2130 if (connection_broken(vscsi
))
2131 flag_bits
= RESPONSE_Q_DOWN
| CLIENT_FAILED
;
2132 dev_err(&vscsi
->dev
, "login_rsp: error copying to client, rc %ld\n",
2134 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
,
2140 dev_err(&vscsi
->dev
, "login_rsp: error copying to client, rc %ld\n",
2142 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2150 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
2151 * @vscsi: Pointer to our adapter structure
2152 * @cmd: Pointer to the command for the SRP Login request
2153 * @reason: The reason the SRP Login is being rejected, per SRP protocol
2155 * EXECUTION ENVIRONMENT:
2156 * Interrupt, interrupt lock held
2158 static long ibmvscsis_srp_login_rej(struct scsi_info
*vscsi
,
2159 struct ibmvscsis_cmd
*cmd
, u32 reason
)
2161 struct iu_entry
*iue
= cmd
->iue
;
2162 struct srp_login_rej
*rej
= &vio_iu(iue
)->srp
.login_rej
;
2163 struct format_code
*fmt
;
2165 long rc
= ADAPT_SUCCESS
;
2167 memset(rej
, 0, sizeof(*rej
));
2169 rej
->opcode
= SRP_LOGIN_REJ
;
2170 rej
->reason
= cpu_to_be32(reason
);
2171 rej
->tag
= cmd
->rsp
.tag
;
2172 fmt
= (struct format_code
*)&rej
->buf_fmt
;
2173 fmt
->buffers
= SUPPORTED_FORMATS
;
2175 cmd
->rsp
.len
= sizeof(*rej
);
2178 rc
= h_copy_rdma(cmd
->rsp
.len
, vscsi
->dds
.window
[LOCAL
].liobn
,
2179 iue
->sbuf
->dma
, vscsi
->dds
.window
[REMOTE
].liobn
,
2180 be64_to_cpu(iue
->remote_token
));
2186 if (connection_broken(vscsi
))
2187 flag_bits
= RESPONSE_Q_DOWN
| CLIENT_FAILED
;
2188 dev_err(&vscsi
->dev
, "login_rej: error copying to client, rc %ld\n",
2190 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
,
2196 dev_err(&vscsi
->dev
, "login_rej: error copying to client, rc %ld\n",
2198 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2205 static int ibmvscsis_make_nexus(struct ibmvscsis_tport
*tport
)
2207 char *name
= tport
->tport_name
;
2208 struct ibmvscsis_nexus
*nexus
;
2209 struct scsi_info
*vscsi
= container_of(tport
, struct scsi_info
, tport
);
2212 if (tport
->ibmv_nexus
) {
2213 dev_dbg(&vscsi
->dev
, "tport->ibmv_nexus already exists\n");
2217 nexus
= kzalloc(sizeof(*nexus
), GFP_KERNEL
);
2219 dev_err(&vscsi
->dev
, "Unable to allocate struct ibmvscsis_nexus\n");
2223 nexus
->se_sess
= target_setup_session(&tport
->se_tpg
, 0, 0,
2224 TARGET_PROT_NORMAL
, name
, nexus
,
2226 if (IS_ERR(nexus
->se_sess
)) {
2227 rc
= PTR_ERR(nexus
->se_sess
);
2228 goto transport_init_fail
;
2231 tport
->ibmv_nexus
= nexus
;
2235 transport_init_fail
:
2240 static int ibmvscsis_drop_nexus(struct ibmvscsis_tport
*tport
)
2242 struct se_session
*se_sess
;
2243 struct ibmvscsis_nexus
*nexus
;
2245 nexus
= tport
->ibmv_nexus
;
2249 se_sess
= nexus
->se_sess
;
2254 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
2256 target_remove_session(se_sess
);
2257 tport
->ibmv_nexus
= NULL
;
2264 * ibmvscsis_srp_login() - Process an SRP Login Request
2265 * @vscsi: Pointer to our adapter structure
2266 * @cmd: Command element to use to process the SRP Login request
2267 * @crq: Pointer to CRQ entry containing the SRP Login request
2269 * EXECUTION ENVIRONMENT:
2270 * Interrupt, called with interrupt lock held
2272 static long ibmvscsis_srp_login(struct scsi_info
*vscsi
,
2273 struct ibmvscsis_cmd
*cmd
,
2274 struct viosrp_crq
*crq
)
2276 struct iu_entry
*iue
= cmd
->iue
;
2277 struct srp_login_req
*req
= &vio_iu(iue
)->srp
.login_req
;
2279 __be64 id_extension
;
2282 struct format_code
*fmt
;
2284 long rc
= ADAPT_SUCCESS
;
2286 iport
= (struct port_id
*)req
->initiator_port_id
;
2287 tport
= (struct port_id
*)req
->target_port_id
;
2288 fmt
= (struct format_code
*)&req
->req_buf_fmt
;
2289 if (be32_to_cpu(req
->req_it_iu_len
) > SRP_MAX_IU_LEN
)
2290 reason
= SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
;
2291 else if (be32_to_cpu(req
->req_it_iu_len
) < 64)
2292 reason
= SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL
;
2293 else if ((be64_to_cpu(iport
->id_extension
) > (MAX_NUM_PORTS
- 1)) ||
2294 (be64_to_cpu(tport
->id_extension
) > (MAX_NUM_PORTS
- 1)))
2295 reason
= SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL
;
2296 else if (req
->req_flags
& SRP_MULTICHAN_MULTI
)
2297 reason
= SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED
;
2298 else if (fmt
->buffers
& (~SUPPORTED_FORMATS
))
2299 reason
= SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT
;
2300 else if ((fmt
->buffers
& SUPPORTED_FORMATS
) == 0)
2301 reason
= SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT
;
2303 if (vscsi
->state
== SRP_PROCESSING
)
2304 reason
= SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED
;
2306 rc
= ibmvscsis_make_nexus(&vscsi
->tport
);
2308 reason
= SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL
;
2310 cmd
->rsp
.format
= VIOSRP_SRP_FORMAT
;
2311 cmd
->rsp
.tag
= req
->tag
;
2313 dev_dbg(&vscsi
->dev
, "srp_login: reason 0x%x\n", reason
);
2316 rc
= ibmvscsis_srp_login_rej(vscsi
, cmd
, reason
);
2318 rc
= ibmvscsis_login_rsp(vscsi
, cmd
);
2322 vscsi
->state
= SRP_PROCESSING
;
2324 list_add_tail(&cmd
->list
, &vscsi
->waiting_rsp
);
2325 ibmvscsis_send_messages(vscsi
);
2327 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2330 dev_dbg(&vscsi
->dev
, "Leaving srp_login, rc %ld\n", rc
);
2335 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2336 * @vscsi: Pointer to our adapter structure
2337 * @cmd: Command element to use to process the Implicit Logout request
2338 * @crq: Pointer to CRQ entry containing the Implicit Logout request
2340 * Do the logic to close the I_T nexus. This function may not
2341 * behave to specification.
2343 * EXECUTION ENVIRONMENT:
2344 * Interrupt, interrupt lock held
2346 static long ibmvscsis_srp_i_logout(struct scsi_info
*vscsi
,
2347 struct ibmvscsis_cmd
*cmd
,
2348 struct viosrp_crq
*crq
)
2350 struct iu_entry
*iue
= cmd
->iue
;
2351 struct srp_i_logout
*log_out
= &vio_iu(iue
)->srp
.i_logout
;
2353 if ((vscsi
->debit
> 0) || !list_empty(&vscsi
->schedule_q
) ||
2354 !list_empty(&vscsi
->waiting_rsp
)) {
2355 dev_err(&vscsi
->dev
, "i_logout: outstanding work\n");
2356 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
, 0);
2358 cmd
->rsp
.format
= SRP_FORMAT
;
2359 cmd
->rsp
.tag
= log_out
->tag
;
2360 cmd
->rsp
.len
= sizeof(struct mad_common
);
2361 list_add_tail(&cmd
->list
, &vscsi
->waiting_rsp
);
2362 ibmvscsis_send_messages(vscsi
);
2364 ibmvscsis_post_disconnect(vscsi
, WAIT_IDLE
, 0);
2367 return ADAPT_SUCCESS
;
2370 /* Called with intr lock held */
2371 static void ibmvscsis_srp_cmd(struct scsi_info
*vscsi
, struct viosrp_crq
*crq
)
2373 struct ibmvscsis_cmd
*cmd
;
2374 struct iu_entry
*iue
;
2375 struct srp_cmd
*srp
;
2376 struct srp_tsk_mgmt
*tsk
;
2379 if (vscsi
->request_limit
- vscsi
->debit
<= 0) {
2380 /* Client has exceeded request limit */
2381 dev_err(&vscsi
->dev
, "Client exceeded the request limit (%d), debit %d\n",
2382 vscsi
->request_limit
, vscsi
->debit
);
2383 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2387 cmd
= ibmvscsis_get_free_cmd(vscsi
);
2389 dev_err(&vscsi
->dev
, "srp_cmd failed to get cmd, debit %d\n",
2391 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2395 srp
= &vio_iu(iue
)->srp
.cmd
;
2397 rc
= ibmvscsis_copy_crq_packet(vscsi
, cmd
, crq
);
2399 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2403 if (vscsi
->state
== SRP_PROCESSING
) {
2404 switch (srp
->opcode
) {
2406 rc
= ibmvscsis_srp_login(vscsi
, cmd
, crq
);
2410 tsk
= &vio_iu(iue
)->srp
.tsk_mgmt
;
2411 dev_dbg(&vscsi
->dev
, "tsk_mgmt tag: %llu (0x%llx)\n",
2412 tsk
->tag
, tsk
->tag
);
2413 cmd
->rsp
.tag
= tsk
->tag
;
2415 cmd
->type
= TASK_MANAGEMENT
;
2416 list_add_tail(&cmd
->list
, &vscsi
->schedule_q
);
2417 queue_work(vscsi
->work_q
, &cmd
->work
);
2421 dev_dbg(&vscsi
->dev
, "srp_cmd tag: %llu (0x%llx)\n",
2422 srp
->tag
, srp
->tag
);
2423 cmd
->rsp
.tag
= srp
->tag
;
2425 cmd
->type
= SCSI_CDB
;
2427 * We want to keep track of work waiting for
2430 list_add_tail(&cmd
->list
, &vscsi
->schedule_q
);
2431 queue_work(vscsi
->work_q
, &cmd
->work
);
2435 rc
= ibmvscsis_srp_i_logout(vscsi
, cmd
, crq
);
2441 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2442 dev_err(&vscsi
->dev
, "invalid srp cmd, opcode %d\n",
2444 ibmvscsis_post_disconnect(vscsi
,
2445 ERR_DISCONNECT_RECONNECT
, 0);
2448 } else if (srp
->opcode
== SRP_LOGIN_REQ
&& vscsi
->state
== CONNECTED
) {
2449 rc
= ibmvscsis_srp_login(vscsi
, cmd
, crq
);
2451 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2452 dev_err(&vscsi
->dev
, "Invalid state %d to handle srp cmd\n",
2454 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2459 * ibmvscsis_ping_response() - Respond to a ping request
2460 * @vscsi: Pointer to our adapter structure
2462 * Let the client know that the server is alive and waiting on
2463 * its native I/O stack.
2464 * If any type of error occurs from the call to queue a ping
2465 * response then the client is either not accepting or receiving
2466 * interrupts. Disconnect with an error.
2468 * EXECUTION ENVIRONMENT:
2469 * Interrupt, interrupt lock held
2471 static long ibmvscsis_ping_response(struct scsi_info
*vscsi
)
2473 struct viosrp_crq
*crq
;
2474 u64 buffer
[2] = { 0, 0 };
2477 crq
= (struct viosrp_crq
*)&buffer
;
2478 crq
->valid
= VALID_CMD_RESP_EL
;
2479 crq
->format
= (u8
)MESSAGE_IN_CRQ
;
2480 crq
->status
= PING_RESPONSE
;
2482 rc
= h_send_crq(vscsi
->dds
.unit_id
, cpu_to_be64(buffer
[MSG_HI
]),
2483 cpu_to_be64(buffer
[MSG_LOW
]));
2489 vscsi
->flags
|= CLIENT_FAILED
;
2492 vscsi
->flags
|= RESPONSE_Q_DOWN
;
2495 dev_err(&vscsi
->dev
, "ping_response: h_send_crq failed, rc %ld\n",
2497 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2500 dev_err(&vscsi
->dev
, "ping_response: h_send_crq returned unknown rc %ld\n",
2502 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
, 0);
2510 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2511 * @vscsi: Pointer to our adapter structure
2512 * @crq: Pointer to CRQ element containing the SRP request
2514 * This function will return success if the command queue element is valid
2515 * and the srp iu or MAD request it pointed to was also valid. That does
2516 * not mean that an error was not returned to the client.
2518 * EXECUTION ENVIRONMENT:
2519 * Interrupt, intr lock held
2521 static long ibmvscsis_parse_command(struct scsi_info
*vscsi
,
2522 struct viosrp_crq
*crq
)
2524 long rc
= ADAPT_SUCCESS
;
2526 switch (crq
->valid
) {
2527 case VALID_CMD_RESP_EL
:
2528 switch (crq
->format
) {
2533 if (vscsi
->flags
& PROCESSING_MAD
) {
2535 dev_err(&vscsi
->dev
, "parse_command: already processing mad\n");
2536 ibmvscsis_post_disconnect(vscsi
,
2537 ERR_DISCONNECT_RECONNECT
,
2540 vscsi
->flags
|= PROCESSING_MAD
;
2541 rc
= ibmvscsis_mad(vscsi
, crq
);
2546 ibmvscsis_srp_cmd(vscsi
, crq
);
2549 case MESSAGE_IN_CRQ
:
2550 if (crq
->status
== PING
)
2551 ibmvscsis_ping_response(vscsi
);
2555 dev_err(&vscsi
->dev
, "parse_command: invalid format %d\n",
2557 ibmvscsis_post_disconnect(vscsi
,
2558 ERR_DISCONNECT_RECONNECT
, 0);
2563 case VALID_TRANS_EVENT
:
2564 rc
= ibmvscsis_trans_event(vscsi
, crq
);
2567 case VALID_INIT_MSG
:
2568 rc
= ibmvscsis_init_msg(vscsi
, crq
);
2572 dev_err(&vscsi
->dev
, "parse_command: invalid valid field %d\n",
2574 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2579 * Return only what the interrupt handler cares
2580 * about. Most errors we keep right on trucking.
2582 rc
= vscsi
->flags
& SCHEDULE_DISCONNECT
;
2587 static int read_dma_window(struct scsi_info
*vscsi
)
2589 struct vio_dev
*vdev
= vscsi
->dma_dev
;
2590 const __be32
*dma_window
;
2593 /* TODO Using of_parse_dma_window would be better, but it doesn't give
2594 * a way to read multiple windows without already knowing the size of
2595 * a window or the number of windows.
2597 dma_window
= (const __be32
*)vio_get_attribute(vdev
,
2598 "ibm,my-dma-window",
2601 dev_err(&vscsi
->dev
, "Couldn't find ibm,my-dma-window property\n");
2605 vscsi
->dds
.window
[LOCAL
].liobn
= be32_to_cpu(*dma_window
);
2608 prop
= (const __be32
*)vio_get_attribute(vdev
, "ibm,#dma-address-cells",
2611 dev_warn(&vscsi
->dev
, "Couldn't find ibm,#dma-address-cells property\n");
2614 dma_window
+= be32_to_cpu(*prop
);
2617 prop
= (const __be32
*)vio_get_attribute(vdev
, "ibm,#dma-size-cells",
2620 dev_warn(&vscsi
->dev
, "Couldn't find ibm,#dma-size-cells property\n");
2623 dma_window
+= be32_to_cpu(*prop
);
2626 /* dma_window should point to the second window now */
2627 vscsi
->dds
.window
[REMOTE
].liobn
= be32_to_cpu(*dma_window
);
2632 static struct ibmvscsis_tport
*ibmvscsis_lookup_port(const char *name
)
2634 struct ibmvscsis_tport
*tport
= NULL
;
2635 struct vio_dev
*vdev
;
2636 struct scsi_info
*vscsi
;
2638 spin_lock_bh(&ibmvscsis_dev_lock
);
2639 list_for_each_entry(vscsi
, &ibmvscsis_dev_list
, list
) {
2640 vdev
= vscsi
->dma_dev
;
2641 if (!strcmp(dev_name(&vdev
->dev
), name
)) {
2642 tport
= &vscsi
->tport
;
2646 spin_unlock_bh(&ibmvscsis_dev_lock
);
2652 * ibmvscsis_parse_cmd() - Parse SRP Command
2653 * @vscsi: Pointer to our adapter structure
2654 * @cmd: Pointer to command element with SRP command
2656 * Parse the srp command; if it is valid then submit it to tcm.
2657 * Note: The return code does not reflect the status of the SCSI CDB.
2659 * EXECUTION ENVIRONMENT:
2662 static void ibmvscsis_parse_cmd(struct scsi_info
*vscsi
,
2663 struct ibmvscsis_cmd
*cmd
)
2665 struct iu_entry
*iue
= cmd
->iue
;
2666 struct srp_cmd
*srp
= (struct srp_cmd
*)iue
->sbuf
->buf
;
2667 struct ibmvscsis_nexus
*nexus
;
2669 enum dma_data_direction dir
;
2672 nexus
= vscsi
->tport
.ibmv_nexus
;
2674 * additional length in bytes. Note that the SRP spec says that
2675 * additional length is in 4-byte words, but technically the
2676 * additional length field is only the upper 6 bits of the byte.
2677 * The lower 2 bits are reserved. If the lower 2 bits are 0 (as
2678 * all reserved fields should be), then interpreting the byte as
2679 * an int will yield the length in bytes.
2681 if (srp
->add_cdb_len
& 0x03) {
2682 dev_err(&vscsi
->dev
, "parse_cmd: reserved bits set in IU\n");
2683 spin_lock_bh(&vscsi
->intr_lock
);
2684 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2685 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2686 spin_unlock_bh(&vscsi
->intr_lock
);
2690 if (srp_get_desc_table(srp
, &dir
, &data_len
)) {
2691 dev_err(&vscsi
->dev
, "0x%llx: parsing SRP descriptor table failed.\n",
2696 cmd
->rsp
.sol_not
= srp
->sol_not
;
2698 switch (srp
->task_attr
) {
2699 case SRP_SIMPLE_TASK
:
2700 attr
= TCM_SIMPLE_TAG
;
2702 case SRP_ORDERED_TASK
:
2703 attr
= TCM_ORDERED_TAG
;
2706 attr
= TCM_HEAD_TAG
;
2712 dev_err(&vscsi
->dev
, "Invalid task attribute %d\n",
2717 cmd
->se_cmd
.tag
= be64_to_cpu(srp
->tag
);
2719 spin_lock_bh(&vscsi
->intr_lock
);
2720 list_add_tail(&cmd
->list
, &vscsi
->active_q
);
2721 spin_unlock_bh(&vscsi
->intr_lock
);
2723 srp
->lun
.scsi_lun
[0] &= 0x3f;
2725 target_submit_cmd(&cmd
->se_cmd
, nexus
->se_sess
, srp
->cdb
,
2726 cmd
->sense_buf
, scsilun_to_int(&srp
->lun
),
2727 data_len
, attr
, dir
, 0);
2731 spin_lock_bh(&vscsi
->intr_lock
);
2732 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2733 spin_unlock_bh(&vscsi
->intr_lock
);
2737 * ibmvscsis_parse_task() - Parse SRP Task Management Request
2738 * @vscsi: Pointer to our adapter structure
2739 * @cmd: Pointer to command element with SRP task management request
2741 * Parse the srp task management request; if it is valid then submit it to tcm.
2742 * Note: The return code does not reflect the status of the task management
2745 * EXECUTION ENVIRONMENT:
2748 static void ibmvscsis_parse_task(struct scsi_info
*vscsi
,
2749 struct ibmvscsis_cmd
*cmd
)
2751 struct iu_entry
*iue
= cmd
->iue
;
2752 struct srp_tsk_mgmt
*srp_tsk
= &vio_iu(iue
)->srp
.tsk_mgmt
;
2754 u64 tag_to_abort
= 0;
2756 struct ibmvscsis_nexus
*nexus
;
2758 nexus
= vscsi
->tport
.ibmv_nexus
;
2760 cmd
->rsp
.sol_not
= srp_tsk
->sol_not
;
2762 switch (srp_tsk
->tsk_mgmt_func
) {
2763 case SRP_TSK_ABORT_TASK
:
2764 tcm_type
= TMR_ABORT_TASK
;
2765 tag_to_abort
= be64_to_cpu(srp_tsk
->task_tag
);
2767 case SRP_TSK_ABORT_TASK_SET
:
2768 tcm_type
= TMR_ABORT_TASK_SET
;
2770 case SRP_TSK_CLEAR_TASK_SET
:
2771 tcm_type
= TMR_CLEAR_TASK_SET
;
2773 case SRP_TSK_LUN_RESET
:
2774 tcm_type
= TMR_LUN_RESET
;
2776 case SRP_TSK_CLEAR_ACA
:
2777 tcm_type
= TMR_CLEAR_ACA
;
2780 dev_err(&vscsi
->dev
, "unknown task mgmt func %d\n",
2781 srp_tsk
->tsk_mgmt_func
);
2782 cmd
->se_cmd
.se_tmr_req
->response
=
2783 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED
;
2789 cmd
->se_cmd
.tag
= be64_to_cpu(srp_tsk
->tag
);
2791 spin_lock_bh(&vscsi
->intr_lock
);
2792 list_add_tail(&cmd
->list
, &vscsi
->active_q
);
2793 spin_unlock_bh(&vscsi
->intr_lock
);
2795 srp_tsk
->lun
.scsi_lun
[0] &= 0x3f;
2797 dev_dbg(&vscsi
->dev
, "calling submit_tmr, func %d\n",
2798 srp_tsk
->tsk_mgmt_func
);
2799 rc
= target_submit_tmr(&cmd
->se_cmd
, nexus
->se_sess
, NULL
,
2800 scsilun_to_int(&srp_tsk
->lun
), srp_tsk
,
2801 tcm_type
, GFP_KERNEL
, tag_to_abort
, 0);
2803 dev_err(&vscsi
->dev
, "target_submit_tmr failed, rc %d\n",
2805 spin_lock_bh(&vscsi
->intr_lock
);
2806 list_del(&cmd
->list
);
2807 spin_unlock_bh(&vscsi
->intr_lock
);
2808 cmd
->se_cmd
.se_tmr_req
->response
=
2809 TMR_FUNCTION_REJECTED
;
2814 transport_send_check_condition_and_sense(&cmd
->se_cmd
, 0, 0);
2817 static void ibmvscsis_scheduler(struct work_struct
*work
)
2819 struct ibmvscsis_cmd
*cmd
= container_of(work
, struct ibmvscsis_cmd
,
2821 struct scsi_info
*vscsi
= cmd
->adapter
;
2823 spin_lock_bh(&vscsi
->intr_lock
);
2825 /* Remove from schedule_q */
2826 list_del(&cmd
->list
);
2828 /* Don't submit cmd if we're disconnecting */
2829 if (vscsi
->flags
& (SCHEDULE_DISCONNECT
| DISCONNECT_SCHEDULED
)) {
2830 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2832 /* ibmvscsis_disconnect might be waiting for us */
2833 if (list_empty(&vscsi
->active_q
) &&
2834 list_empty(&vscsi
->schedule_q
) &&
2835 (vscsi
->flags
& WAIT_FOR_IDLE
)) {
2836 vscsi
->flags
&= ~WAIT_FOR_IDLE
;
2837 complete(&vscsi
->wait_idle
);
2840 spin_unlock_bh(&vscsi
->intr_lock
);
2844 spin_unlock_bh(&vscsi
->intr_lock
);
2846 switch (cmd
->type
) {
2848 ibmvscsis_parse_cmd(vscsi
, cmd
);
2850 case TASK_MANAGEMENT
:
2851 ibmvscsis_parse_task(vscsi
, cmd
);
2854 dev_err(&vscsi
->dev
, "scheduler, invalid cmd type %d\n",
2856 spin_lock_bh(&vscsi
->intr_lock
);
2857 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2858 spin_unlock_bh(&vscsi
->intr_lock
);
2863 static int ibmvscsis_alloc_cmds(struct scsi_info
*vscsi
, int num
)
2865 struct ibmvscsis_cmd
*cmd
;
2868 INIT_LIST_HEAD(&vscsi
->free_cmd
);
2869 vscsi
->cmd_pool
= kcalloc(num
, sizeof(struct ibmvscsis_cmd
),
2871 if (!vscsi
->cmd_pool
)
2874 for (i
= 0, cmd
= (struct ibmvscsis_cmd
*)vscsi
->cmd_pool
; i
< num
;
2876 cmd
->abort_cmd
= NULL
;
2877 cmd
->adapter
= vscsi
;
2878 INIT_WORK(&cmd
->work
, ibmvscsis_scheduler
);
2879 list_add_tail(&cmd
->list
, &vscsi
->free_cmd
);
2885 static void ibmvscsis_free_cmds(struct scsi_info
*vscsi
)
2887 kfree(vscsi
->cmd_pool
);
2888 vscsi
->cmd_pool
= NULL
;
2889 INIT_LIST_HEAD(&vscsi
->free_cmd
);
2893 * ibmvscsis_service_wait_q() - Service Waiting Queue
2894 * @timer: Pointer to timer which has expired
2896 * This routine is called when the timer pops to service the waiting
2897 * queue. Elements on the queue have completed, their responses have been
2898 * copied to the client, but the client's response queue was full so
2899 * the queue message could not be sent. The routine grabs the proper locks
2900 * and calls send messages.
2902 * EXECUTION ENVIRONMENT:
2903 * called at interrupt level
2905 static enum hrtimer_restart
ibmvscsis_service_wait_q(struct hrtimer
*timer
)
2907 struct timer_cb
*p_timer
= container_of(timer
, struct timer_cb
, timer
);
2908 struct scsi_info
*vscsi
= container_of(p_timer
, struct scsi_info
,
2911 spin_lock_bh(&vscsi
->intr_lock
);
2912 p_timer
->timer_pops
+= 1;
2913 p_timer
->started
= false;
2914 ibmvscsis_send_messages(vscsi
);
2915 spin_unlock_bh(&vscsi
->intr_lock
);
2917 return HRTIMER_NORESTART
;
2920 static long ibmvscsis_alloctimer(struct scsi_info
*vscsi
)
2922 struct timer_cb
*p_timer
;
2924 p_timer
= &vscsi
->rsp_q_timer
;
2925 hrtimer_init(&p_timer
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2927 p_timer
->timer
.function
= ibmvscsis_service_wait_q
;
2928 p_timer
->started
= false;
2929 p_timer
->timer_pops
= 0;
2931 return ADAPT_SUCCESS
;
2934 static void ibmvscsis_freetimer(struct scsi_info
*vscsi
)
2936 struct timer_cb
*p_timer
;
2938 p_timer
= &vscsi
->rsp_q_timer
;
2940 (void)hrtimer_cancel(&p_timer
->timer
);
2942 p_timer
->started
= false;
2943 p_timer
->timer_pops
= 0;
2946 static irqreturn_t
ibmvscsis_interrupt(int dummy
, void *data
)
2948 struct scsi_info
*vscsi
= data
;
2950 vio_disable_interrupts(vscsi
->dma_dev
);
2951 tasklet_schedule(&vscsi
->work_task
);
2957 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2958 * @vscsi: Pointer to our adapter structure
2960 * This function determines our new state now that we are enabled. This
2961 * may involve sending an Init Complete message to the client.
2963 * Must be called with interrupt lock held.
2965 static long ibmvscsis_enable_change_state(struct scsi_info
*vscsi
)
2968 long rc
= ADAPT_SUCCESS
;
2970 bytes
= vscsi
->cmd_q
.size
* PAGE_SIZE
;
2971 rc
= h_reg_crq(vscsi
->dds
.unit_id
, vscsi
->cmd_q
.crq_token
, bytes
);
2972 if (rc
== H_CLOSED
|| rc
== H_SUCCESS
) {
2973 vscsi
->state
= WAIT_CONNECTION
;
2974 rc
= ibmvscsis_establish_new_q(vscsi
);
2977 if (rc
!= ADAPT_SUCCESS
) {
2978 vscsi
->state
= ERR_DISCONNECTED
;
2979 vscsi
->flags
|= RESPONSE_Q_DOWN
;
2986 * ibmvscsis_create_command_q() - Create Command Queue
2987 * @vscsi: Pointer to our adapter structure
2988 * @num_cmds: Currently unused. In the future, may be used to determine
2989 * the size of the CRQ.
2991 * Allocates memory for command queue maps remote memory into an ioba
2992 * initializes the command response queue
2994 * EXECUTION ENVIRONMENT:
2995 * Process level only
2997 static long ibmvscsis_create_command_q(struct scsi_info
*vscsi
, int num_cmds
)
3000 struct vio_dev
*vdev
= vscsi
->dma_dev
;
3002 /* We might support multiple pages in the future, but just 1 for now */
3005 vscsi
->cmd_q
.size
= pages
;
3007 vscsi
->cmd_q
.base_addr
=
3008 (struct viosrp_crq
*)get_zeroed_page(GFP_KERNEL
);
3009 if (!vscsi
->cmd_q
.base_addr
)
3012 vscsi
->cmd_q
.mask
= ((uint
)pages
* CRQ_PER_PAGE
) - 1;
3014 vscsi
->cmd_q
.crq_token
= dma_map_single(&vdev
->dev
,
3015 vscsi
->cmd_q
.base_addr
,
3016 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3017 if (dma_mapping_error(&vdev
->dev
, vscsi
->cmd_q
.crq_token
)) {
3018 free_page((unsigned long)vscsi
->cmd_q
.base_addr
);
3026 * ibmvscsis_destroy_command_q - Destroy Command Queue
3027 * @vscsi: Pointer to our adapter structure
3029 * Releases memory for command queue and unmaps mapped remote memory.
3031 * EXECUTION ENVIRONMENT:
3032 * Process level only
3034 static void ibmvscsis_destroy_command_q(struct scsi_info
*vscsi
)
3036 dma_unmap_single(&vscsi
->dma_dev
->dev
, vscsi
->cmd_q
.crq_token
,
3037 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3038 free_page((unsigned long)vscsi
->cmd_q
.base_addr
);
3039 vscsi
->cmd_q
.base_addr
= NULL
;
3040 vscsi
->state
= NO_QUEUE
;
3043 static u8
ibmvscsis_fast_fail(struct scsi_info
*vscsi
,
3044 struct ibmvscsis_cmd
*cmd
)
3046 struct iu_entry
*iue
= cmd
->iue
;
3047 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3048 struct srp_cmd
*srp
= (struct srp_cmd
*)iue
->sbuf
->buf
;
3049 struct scsi_sense_hdr sshdr
;
3050 u8 rc
= se_cmd
->scsi_status
;
3052 if (vscsi
->fast_fail
&& (READ_CMD(srp
->cdb
) || WRITE_CMD(srp
->cdb
)))
3053 if (scsi_normalize_sense(se_cmd
->sense_buffer
,
3054 se_cmd
->scsi_sense_length
, &sshdr
))
3055 if (sshdr
.sense_key
== HARDWARE_ERROR
&&
3056 (se_cmd
->residual_count
== 0 ||
3057 se_cmd
->residual_count
== se_cmd
->data_length
)) {
3059 cmd
->flags
|= CMD_FAST_FAIL
;
3066 * srp_build_response() - Build an SRP response buffer
3067 * @vscsi: Pointer to our adapter structure
3068 * @cmd: Pointer to command for which to send the response
3069 * @len_p: Where to return the length of the IU response sent. This
3070 * is needed to construct the CRQ response.
3072 * Build the SRP response buffer and copy it to the client's memory space.
3074 static long srp_build_response(struct scsi_info
*vscsi
,
3075 struct ibmvscsis_cmd
*cmd
, uint
*len_p
)
3077 struct iu_entry
*iue
= cmd
->iue
;
3078 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3079 struct srp_rsp
*rsp
;
3084 long rc
= ADAPT_SUCCESS
;
3086 spin_lock_bh(&vscsi
->intr_lock
);
3088 rsp
= &vio_iu(iue
)->srp
.rsp
;
3090 memset(rsp
, 0, len
);
3093 rsp
->opcode
= SRP_RSP
;
3095 rsp
->req_lim_delta
= cpu_to_be32(1 + vscsi
->credit
);
3096 rsp
->tag
= cmd
->rsp
.tag
;
3099 if (cmd
->type
== SCSI_CDB
) {
3100 rsp
->status
= ibmvscsis_fast_fail(vscsi
, cmd
);
3102 dev_dbg(&vscsi
->dev
, "build_resp: cmd %p, scsi status %d\n",
3103 cmd
, (int)rsp
->status
);
3104 ibmvscsis_determine_resid(se_cmd
, rsp
);
3105 if (se_cmd
->scsi_sense_length
&& se_cmd
->sense_buffer
) {
3106 rsp
->sense_data_len
=
3107 cpu_to_be32(se_cmd
->scsi_sense_length
);
3108 rsp
->flags
|= SRP_RSP_FLAG_SNSVALID
;
3109 len
+= se_cmd
->scsi_sense_length
;
3110 memcpy(data
, se_cmd
->sense_buffer
,
3111 se_cmd
->scsi_sense_length
);
3113 rsp
->sol_not
= (cmd
->rsp
.sol_not
& UCSOLNT
) >>
3115 } else if (cmd
->flags
& CMD_FAST_FAIL
) {
3116 dev_dbg(&vscsi
->dev
, "build_resp: cmd %p, fast fail\n",
3118 rsp
->sol_not
= (cmd
->rsp
.sol_not
& UCSOLNT
) >>
3121 rsp
->sol_not
= (cmd
->rsp
.sol_not
& SCSOLNT
) >>
3125 /* this is task management */
3127 rsp
->resp_data_len
= cpu_to_be32(4);
3128 rsp
->flags
|= SRP_RSP_FLAG_RSPVALID
;
3130 switch (se_cmd
->se_tmr_req
->response
) {
3131 case TMR_FUNCTION_COMPLETE
:
3132 case TMR_TASK_DOES_NOT_EXIST
:
3133 rsp_code
= SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE
;
3134 rsp
->sol_not
= (cmd
->rsp
.sol_not
& SCSOLNT
) >>
3137 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED
:
3138 case TMR_LUN_DOES_NOT_EXIST
:
3139 rsp_code
= SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED
;
3140 rsp
->sol_not
= (cmd
->rsp
.sol_not
& UCSOLNT
) >>
3143 case TMR_FUNCTION_FAILED
:
3144 case TMR_FUNCTION_REJECTED
:
3146 rsp_code
= SRP_TASK_MANAGEMENT_FUNCTION_FAILED
;
3147 rsp
->sol_not
= (cmd
->rsp
.sol_not
& UCSOLNT
) >>
3152 tsk_status
= (u32
*)data
;
3153 *tsk_status
= cpu_to_be32(rsp_code
);
3154 data
= (char *)(tsk_status
+ 1);
3159 rc
= h_copy_rdma(len
, vscsi
->dds
.window
[LOCAL
].liobn
, iue
->sbuf
->dma
,
3160 vscsi
->dds
.window
[REMOTE
].liobn
,
3161 be64_to_cpu(iue
->remote_token
));
3169 if (connection_broken(vscsi
))
3170 vscsi
->flags
|= RESPONSE_Q_DOWN
| CLIENT_FAILED
;
3172 dev_err(&vscsi
->dev
, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3173 rc
, vscsi
->flags
, vscsi
->state
);
3178 dev_err(&vscsi
->dev
, "build_response: error copying to client, rc %ld\n",
3183 spin_unlock_bh(&vscsi
->intr_lock
);
3188 static int ibmvscsis_rdma(struct ibmvscsis_cmd
*cmd
, struct scatterlist
*sg
,
3189 int nsg
, struct srp_direct_buf
*md
, int nmd
,
3190 enum dma_data_direction dir
, unsigned int bytes
)
3192 struct iu_entry
*iue
= cmd
->iue
;
3193 struct srp_target
*target
= iue
->target
;
3194 struct scsi_info
*vscsi
= target
->ldata
;
3195 struct scatterlist
*sgp
;
3196 dma_addr_t client_ioba
, server_ioba
;
3198 ulong client_len
, server_len
;
3213 if (client_len
== 0) {
3214 if (md_idx
>= nmd
) {
3215 dev_err(&vscsi
->dev
, "rdma: ran out of client memory descriptors\n");
3219 client_ioba
= be64_to_cpu(md
[md_idx
].va
);
3220 client_len
= be32_to_cpu(md
[md_idx
].len
);
3222 if (server_len
== 0) {
3224 dev_err(&vscsi
->dev
, "rdma: ran out of scatter/gather list\n");
3228 server_ioba
= sg_dma_address(sgp
);
3229 server_len
= sg_dma_len(sgp
);
3234 if (buf_len
> client_len
)
3235 buf_len
= client_len
;
3237 if (buf_len
> server_len
)
3238 buf_len
= server_len
;
3240 if (buf_len
> max_vdma_size
)
3241 buf_len
= max_vdma_size
;
3243 if (dir
== DMA_TO_DEVICE
) {
3244 /* read from client */
3245 rc
= h_copy_rdma(buf_len
,
3246 vscsi
->dds
.window
[REMOTE
].liobn
,
3248 vscsi
->dds
.window
[LOCAL
].liobn
,
3251 /* The h_copy_rdma will cause phyp, running in another
3252 * partition, to read memory, so we need to make sure
3253 * the data has been written out, hence these syncs.
3255 /* ensure that everything is in memory */
3257 /* ensure that memory has been made visible */
3259 rc
= h_copy_rdma(buf_len
,
3260 vscsi
->dds
.window
[LOCAL
].liobn
,
3262 vscsi
->dds
.window
[REMOTE
].liobn
,
3271 if (connection_broken(vscsi
)) {
3272 spin_lock_bh(&vscsi
->intr_lock
);
3274 (RESPONSE_Q_DOWN
| CLIENT_FAILED
);
3275 spin_unlock_bh(&vscsi
->intr_lock
);
3277 dev_err(&vscsi
->dev
, "rdma: h_copy_rdma failed, rc %ld\n",
3282 dev_err(&vscsi
->dev
, "rdma: unknown error %ld from h_copy_rdma\n",
3290 client_len
-= buf_len
;
3291 if (client_len
== 0)
3294 client_ioba
+= buf_len
;
3296 server_len
-= buf_len
;
3297 if (server_len
== 0)
3300 server_ioba
+= buf_len
;
3311 * ibmvscsis_handle_crq() - Handle CRQ
3312 * @data: Pointer to our adapter structure
3314 * Read the command elements from the command queue and copy the payloads
3315 * associated with the command elements to local memory and execute the
3318 * Note: this is an edge triggered interrupt. It can not be shared.
3320 static void ibmvscsis_handle_crq(unsigned long data
)
3322 struct scsi_info
*vscsi
= (struct scsi_info
*)data
;
3323 struct viosrp_crq
*crq
;
3328 spin_lock_bh(&vscsi
->intr_lock
);
3330 dev_dbg(&vscsi
->dev
, "got interrupt\n");
3333 * if we are in a path where we are waiting for all pending commands
3334 * to complete because we received a transport event and anything in
3335 * the command queue is for a new connection, do nothing
3337 if (TARGET_STOP(vscsi
)) {
3338 vio_enable_interrupts(vscsi
->dma_dev
);
3340 dev_dbg(&vscsi
->dev
, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3341 vscsi
->flags
, vscsi
->state
);
3342 spin_unlock_bh(&vscsi
->intr_lock
);
3346 rc
= vscsi
->flags
& SCHEDULE_DISCONNECT
;
3347 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
3353 * These are edege triggered interrupts. After dropping out of
3354 * the while loop, the code must check for work since an
3355 * interrupt could be lost, and an elment be left on the queue,
3359 vscsi
->cmd_q
.index
=
3360 (vscsi
->cmd_q
.index
+ 1) & vscsi
->cmd_q
.mask
;
3363 rc
= ibmvscsis_parse_command(vscsi
, crq
);
3365 if ((uint
)crq
->valid
== VALID_TRANS_EVENT
) {
3367 * must service the transport layer events even
3368 * in an error state, dont break out until all
3369 * the consecutive transport events have been
3372 rc
= ibmvscsis_trans_event(vscsi
, crq
);
3373 } else if (vscsi
->flags
& TRANS_EVENT
) {
3375 * if a transport event has occurred leave
3376 * everything but transport events on the queue
3378 * need to decrement the queue index so we can
3379 * look at the element again
3381 if (vscsi
->cmd_q
.index
)
3382 vscsi
->cmd_q
.index
-= 1;
3385 * index is at 0 it just wrapped.
3386 * have it index last element in q
3388 vscsi
->cmd_q
.index
= vscsi
->cmd_q
.mask
;
3393 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
3395 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
3402 vio_enable_interrupts(vscsi
->dma_dev
);
3404 dev_dbg(&vscsi
->dev
, "handle_crq, reenabling interrupts\n");
3411 dev_dbg(&vscsi
->dev
, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3412 vscsi
->flags
, vscsi
->state
, vscsi
->cmd_q
.index
);
3415 dev_dbg(&vscsi
->dev
, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3416 (int)list_empty(&vscsi
->schedule_q
), vscsi
->flags
,
3419 spin_unlock_bh(&vscsi
->intr_lock
);
3422 static int ibmvscsis_probe(struct vio_dev
*vdev
,
3423 const struct vio_device_id
*id
)
3425 struct scsi_info
*vscsi
;
3429 vscsi
= kzalloc(sizeof(*vscsi
), GFP_KERNEL
);
3432 dev_err(&vdev
->dev
, "probe: allocation of adapter failed\n");
3436 vscsi
->dma_dev
= vdev
;
3437 vscsi
->dev
= vdev
->dev
;
3438 INIT_LIST_HEAD(&vscsi
->schedule_q
);
3439 INIT_LIST_HEAD(&vscsi
->waiting_rsp
);
3440 INIT_LIST_HEAD(&vscsi
->active_q
);
3442 snprintf(vscsi
->tport
.tport_name
, IBMVSCSIS_NAMELEN
, "%s",
3443 dev_name(&vdev
->dev
));
3445 dev_dbg(&vscsi
->dev
, "probe tport_name: %s\n", vscsi
->tport
.tport_name
);
3447 rc
= read_dma_window(vscsi
);
3450 dev_dbg(&vscsi
->dev
, "Probe: liobn 0x%x, riobn 0x%x\n",
3451 vscsi
->dds
.window
[LOCAL
].liobn
,
3452 vscsi
->dds
.window
[REMOTE
].liobn
);
3454 snprintf(vscsi
->eye
, sizeof(vscsi
->eye
), "VSCSI %s", vdev
->name
);
3456 vscsi
->dds
.unit_id
= vdev
->unit_address
;
3457 strscpy(vscsi
->dds
.partition_name
, partition_name
,
3458 sizeof(vscsi
->dds
.partition_name
));
3459 vscsi
->dds
.partition_num
= partition_number
;
3461 spin_lock_bh(&ibmvscsis_dev_lock
);
3462 list_add_tail(&vscsi
->list
, &ibmvscsis_dev_list
);
3463 spin_unlock_bh(&ibmvscsis_dev_lock
);
3466 * TBD: How do we determine # of cmds to request? Do we know how
3467 * many "children" we have?
3469 vscsi
->request_limit
= INITIAL_SRP_LIMIT
;
3470 rc
= srp_target_alloc(&vscsi
->target
, &vdev
->dev
, vscsi
->request_limit
,
3475 vscsi
->target
.ldata
= vscsi
;
3477 rc
= ibmvscsis_alloc_cmds(vscsi
, vscsi
->request_limit
);
3479 dev_err(&vscsi
->dev
, "alloc_cmds failed, rc %d, num %d\n",
3480 rc
, vscsi
->request_limit
);
3485 * Note: the lock is used in freeing timers, so must initialize
3486 * first so that ordering in case of error is correct.
3488 spin_lock_init(&vscsi
->intr_lock
);
3490 rc
= ibmvscsis_alloctimer(vscsi
);
3492 dev_err(&vscsi
->dev
, "probe: alloctimer failed, rc %d\n", rc
);
3496 rc
= ibmvscsis_create_command_q(vscsi
, 256);
3498 dev_err(&vscsi
->dev
, "probe: create_command_q failed, rc %d\n",
3503 vscsi
->map_buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
3504 if (!vscsi
->map_buf
) {
3506 dev_err(&vscsi
->dev
, "probe: allocating cmd buffer failed\n");
3510 vscsi
->map_ioba
= dma_map_single(&vdev
->dev
, vscsi
->map_buf
, PAGE_SIZE
,
3512 if (dma_mapping_error(&vdev
->dev
, vscsi
->map_ioba
)) {
3514 dev_err(&vscsi
->dev
, "probe: error mapping command buffer\n");
3518 hrc
= h_vioctl(vscsi
->dds
.unit_id
, H_GET_PARTNER_INFO
,
3519 (u64
)vscsi
->map_ioba
| ((u64
)PAGE_SIZE
<< 32), 0, 0, 0,
3521 if (hrc
== H_SUCCESS
)
3522 vscsi
->client_data
.partition_number
=
3523 be64_to_cpu(*(u64
*)vscsi
->map_buf
);
3525 * We expect the VIOCTL to fail if we're configured as "any
3526 * client can connect" and the client isn't activated yet.
3527 * We'll make the call again when he sends an init msg.
3529 dev_dbg(&vscsi
->dev
, "probe hrc %ld, client partition num %d\n",
3530 hrc
, vscsi
->client_data
.partition_number
);
3532 tasklet_init(&vscsi
->work_task
, ibmvscsis_handle_crq
,
3533 (unsigned long)vscsi
);
3535 init_completion(&vscsi
->wait_idle
);
3536 init_completion(&vscsi
->unconfig
);
3538 vscsi
->work_q
= alloc_workqueue("ibmvscsis%s", WQ_MEM_RECLAIM
, 1,
3539 dev_name(&vdev
->dev
));
3540 if (!vscsi
->work_q
) {
3542 dev_err(&vscsi
->dev
, "create_workqueue failed\n");
3546 rc
= request_irq(vdev
->irq
, ibmvscsis_interrupt
, 0, "ibmvscsis", vscsi
);
3549 dev_err(&vscsi
->dev
, "probe: request_irq failed, rc %d\n", rc
);
3553 vscsi
->state
= WAIT_ENABLED
;
3555 dev_set_drvdata(&vdev
->dev
, vscsi
);
3560 destroy_workqueue(vscsi
->work_q
);
3562 dma_unmap_single(&vdev
->dev
, vscsi
->map_ioba
, PAGE_SIZE
,
3565 kfree(vscsi
->map_buf
);
3567 tasklet_kill(&vscsi
->work_task
);
3568 ibmvscsis_unregister_command_q(vscsi
);
3569 ibmvscsis_destroy_command_q(vscsi
);
3571 ibmvscsis_freetimer(vscsi
);
3573 ibmvscsis_free_cmds(vscsi
);
3575 srp_target_free(&vscsi
->target
);
3577 spin_lock_bh(&ibmvscsis_dev_lock
);
3578 list_del(&vscsi
->list
);
3579 spin_unlock_bh(&ibmvscsis_dev_lock
);
3586 static void ibmvscsis_remove(struct vio_dev
*vdev
)
3588 struct scsi_info
*vscsi
= dev_get_drvdata(&vdev
->dev
);
3590 dev_dbg(&vscsi
->dev
, "remove (%s)\n", dev_name(&vscsi
->dma_dev
->dev
));
3592 spin_lock_bh(&vscsi
->intr_lock
);
3593 ibmvscsis_post_disconnect(vscsi
, UNCONFIGURING
, 0);
3594 vscsi
->flags
|= CFG_SLEEPING
;
3595 spin_unlock_bh(&vscsi
->intr_lock
);
3596 wait_for_completion(&vscsi
->unconfig
);
3598 vio_disable_interrupts(vdev
);
3599 free_irq(vdev
->irq
, vscsi
);
3600 destroy_workqueue(vscsi
->work_q
);
3601 dma_unmap_single(&vdev
->dev
, vscsi
->map_ioba
, PAGE_SIZE
,
3603 kfree(vscsi
->map_buf
);
3604 tasklet_kill(&vscsi
->work_task
);
3605 ibmvscsis_destroy_command_q(vscsi
);
3606 ibmvscsis_freetimer(vscsi
);
3607 ibmvscsis_free_cmds(vscsi
);
3608 srp_target_free(&vscsi
->target
);
3609 spin_lock_bh(&ibmvscsis_dev_lock
);
3610 list_del(&vscsi
->list
);
3611 spin_unlock_bh(&ibmvscsis_dev_lock
);
3615 static ssize_t
partition_number_show(struct device
*dev
,
3616 struct device_attribute
*attr
, char *buf
)
3618 return sysfs_emit(buf
, "%x\n", partition_number
);
3621 static ssize_t
unit_address_show(struct device
*dev
,
3622 struct device_attribute
*attr
, char *buf
)
3624 struct scsi_info
*vscsi
= container_of(dev
, struct scsi_info
, dev
);
3626 return sysfs_emit(buf
, "%x\n", vscsi
->dma_dev
->unit_address
);
3629 static int ibmvscsis_get_system_info(void)
3631 struct device_node
*rootdn
, *vdevdn
;
3632 const char *id
, *model
, *name
;
3635 rootdn
= of_find_node_by_path("/");
3639 model
= of_get_property(rootdn
, "model", NULL
);
3640 id
= of_get_property(rootdn
, "system-id", NULL
);
3642 snprintf(system_id
, sizeof(system_id
), "%s-%s", model
, id
);
3644 name
= of_get_property(rootdn
, "ibm,partition-name", NULL
);
3646 strscpy(partition_name
, name
, sizeof(partition_name
));
3648 num
= of_get_property(rootdn
, "ibm,partition-no", NULL
);
3650 partition_number
= of_read_number(num
, 1);
3652 of_node_put(rootdn
);
3654 vdevdn
= of_find_node_by_path("/vdevice");
3658 mvds
= of_get_property(vdevdn
, "ibm,max-virtual-dma-size",
3661 max_vdma_size
= *mvds
;
3662 of_node_put(vdevdn
);
3668 static char *ibmvscsis_get_fabric_wwn(struct se_portal_group
*se_tpg
)
3670 struct ibmvscsis_tport
*tport
=
3671 container_of(se_tpg
, struct ibmvscsis_tport
, se_tpg
);
3673 return tport
->tport_name
;
3676 static u16
ibmvscsis_get_tag(struct se_portal_group
*se_tpg
)
3678 struct ibmvscsis_tport
*tport
=
3679 container_of(se_tpg
, struct ibmvscsis_tport
, se_tpg
);
3681 return tport
->tport_tpgt
;
3684 static u32
ibmvscsis_get_default_depth(struct se_portal_group
*se_tpg
)
3689 static int ibmvscsis_check_true(struct se_portal_group
*se_tpg
)
3694 static int ibmvscsis_check_stop_free(struct se_cmd
*se_cmd
)
3696 return target_put_sess_cmd(se_cmd
);
3699 static void ibmvscsis_release_cmd(struct se_cmd
*se_cmd
)
3701 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3703 struct scsi_info
*vscsi
= cmd
->adapter
;
3705 spin_lock_bh(&vscsi
->intr_lock
);
3706 /* Remove from active_q */
3707 list_move_tail(&cmd
->list
, &vscsi
->waiting_rsp
);
3708 ibmvscsis_send_messages(vscsi
);
3709 spin_unlock_bh(&vscsi
->intr_lock
);
3712 static int ibmvscsis_write_pending(struct se_cmd
*se_cmd
)
3714 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3716 struct scsi_info
*vscsi
= cmd
->adapter
;
3717 struct iu_entry
*iue
= cmd
->iue
;
3721 * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
3722 * since LIO can't do anything about it, and we dont want to
3723 * attempt an srp_transfer_data.
3725 if ((vscsi
->flags
& (CLIENT_FAILED
| RESPONSE_Q_DOWN
))) {
3726 dev_err(&vscsi
->dev
, "write_pending failed since: %d\n",
3732 rc
= srp_transfer_data(cmd
, &vio_iu(iue
)->srp
.cmd
, ibmvscsis_rdma
,
3735 dev_err(&vscsi
->dev
, "srp_transfer_data() failed: %d\n", rc
);
3739 * We now tell TCM to add this WRITE CDB directly into the TCM storage
3740 * object execution queue.
3742 target_execute_cmd(se_cmd
);
3746 static int ibmvscsis_queue_data_in(struct se_cmd
*se_cmd
)
3748 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3750 struct iu_entry
*iue
= cmd
->iue
;
3751 struct scsi_info
*vscsi
= cmd
->adapter
;
3755 rc
= srp_transfer_data(cmd
, &vio_iu(iue
)->srp
.cmd
, ibmvscsis_rdma
, 1,
3758 dev_err(&vscsi
->dev
, "srp_transfer_data failed: %d\n", rc
);
3759 se_cmd
->scsi_sense_length
= 18;
3760 memset(se_cmd
->sense_buffer
, 0, se_cmd
->scsi_sense_length
);
3761 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3762 scsi_build_sense_buffer(0, se_cmd
->sense_buffer
, MEDIUM_ERROR
,
3766 srp_build_response(vscsi
, cmd
, &len
);
3767 cmd
->rsp
.format
= SRP_FORMAT
;
3773 static int ibmvscsis_queue_status(struct se_cmd
*se_cmd
)
3775 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3777 struct scsi_info
*vscsi
= cmd
->adapter
;
3780 dev_dbg(&vscsi
->dev
, "queue_status %p\n", se_cmd
);
3782 srp_build_response(vscsi
, cmd
, &len
);
3783 cmd
->rsp
.format
= SRP_FORMAT
;
3789 static void ibmvscsis_queue_tm_rsp(struct se_cmd
*se_cmd
)
3791 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3793 struct scsi_info
*vscsi
= cmd
->adapter
;
3794 struct ibmvscsis_cmd
*cmd_itr
;
3795 struct iu_entry
*iue
= iue
= cmd
->iue
;
3796 struct srp_tsk_mgmt
*srp_tsk
= &vio_iu(iue
)->srp
.tsk_mgmt
;
3797 u64 tag_to_abort
= be64_to_cpu(srp_tsk
->task_tag
);
3800 dev_dbg(&vscsi
->dev
, "queue_tm_rsp %p, status %d\n",
3801 se_cmd
, (int)se_cmd
->se_tmr_req
->response
);
3803 if (srp_tsk
->tsk_mgmt_func
== SRP_TSK_ABORT_TASK
&&
3804 cmd
->se_cmd
.se_tmr_req
->response
== TMR_TASK_DOES_NOT_EXIST
) {
3805 spin_lock_bh(&vscsi
->intr_lock
);
3806 list_for_each_entry(cmd_itr
, &vscsi
->active_q
, list
) {
3807 if (tag_to_abort
== cmd_itr
->se_cmd
.tag
) {
3808 cmd_itr
->abort_cmd
= cmd
;
3809 cmd
->flags
|= DELAY_SEND
;
3813 spin_unlock_bh(&vscsi
->intr_lock
);
3816 srp_build_response(vscsi
, cmd
, &len
);
3817 cmd
->rsp
.format
= SRP_FORMAT
;
3821 static void ibmvscsis_aborted_task(struct se_cmd
*se_cmd
)
3823 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3825 struct scsi_info
*vscsi
= cmd
->adapter
;
3827 dev_dbg(&vscsi
->dev
, "ibmvscsis_aborted_task %p task_tag: %llu\n",
3828 se_cmd
, se_cmd
->tag
);
3831 static struct se_wwn
*ibmvscsis_make_tport(struct target_fabric_configfs
*tf
,
3832 struct config_group
*group
,
3835 struct ibmvscsis_tport
*tport
;
3836 struct scsi_info
*vscsi
;
3838 tport
= ibmvscsis_lookup_port(name
);
3840 vscsi
= container_of(tport
, struct scsi_info
, tport
);
3841 tport
->tport_proto_id
= SCSI_PROTOCOL_SRP
;
3842 dev_dbg(&vscsi
->dev
, "make_tport(%s), pointer:%p, tport_id:%x\n",
3843 name
, tport
, tport
->tport_proto_id
);
3844 return &tport
->tport_wwn
;
3847 return ERR_PTR(-EINVAL
);
3850 static void ibmvscsis_drop_tport(struct se_wwn
*wwn
)
3852 struct ibmvscsis_tport
*tport
= container_of(wwn
,
3853 struct ibmvscsis_tport
,
3855 struct scsi_info
*vscsi
= container_of(tport
, struct scsi_info
, tport
);
3857 dev_dbg(&vscsi
->dev
, "drop_tport(%s)\n",
3858 config_item_name(&tport
->tport_wwn
.wwn_group
.cg_item
));
3861 static struct se_portal_group
*ibmvscsis_make_tpg(struct se_wwn
*wwn
,
3864 struct ibmvscsis_tport
*tport
=
3865 container_of(wwn
, struct ibmvscsis_tport
, tport_wwn
);
3869 if (strstr(name
, "tpgt_") != name
)
3870 return ERR_PTR(-EINVAL
);
3871 rc
= kstrtou16(name
+ 5, 0, &tpgt
);
3874 tport
->tport_tpgt
= tpgt
;
3876 tport
->releasing
= false;
3878 rc
= core_tpg_register(&tport
->tport_wwn
, &tport
->se_tpg
,
3879 tport
->tport_proto_id
);
3883 return &tport
->se_tpg
;
3886 static void ibmvscsis_drop_tpg(struct se_portal_group
*se_tpg
)
3888 struct ibmvscsis_tport
*tport
= container_of(se_tpg
,
3889 struct ibmvscsis_tport
,
3892 tport
->releasing
= true;
3893 tport
->enabled
= false;
3896 * Release the virtual I_T Nexus for this ibmvscsis TPG
3898 ibmvscsis_drop_nexus(tport
);
3900 * Deregister the se_tpg from TCM..
3902 core_tpg_deregister(se_tpg
);
3905 static ssize_t
ibmvscsis_wwn_version_show(struct config_item
*item
,
3908 return scnprintf(page
, PAGE_SIZE
, "%s\n", IBMVSCSIS_VERSION
);
3910 CONFIGFS_ATTR_RO(ibmvscsis_wwn_
, version
);
3912 static struct configfs_attribute
*ibmvscsis_wwn_attrs
[] = {
3913 &ibmvscsis_wwn_attr_version
,
3918 static int ibmvscsis_enable_tpg(struct se_portal_group
*se_tpg
, bool enable
)
3920 struct ibmvscsis_tport
*tport
= container_of(se_tpg
,
3921 struct ibmvscsis_tport
,
3923 struct scsi_info
*vscsi
= container_of(tport
, struct scsi_info
, tport
);
3927 spin_lock_bh(&vscsi
->intr_lock
);
3928 tport
->enabled
= true;
3929 lrc
= ibmvscsis_enable_change_state(vscsi
);
3931 dev_err(&vscsi
->dev
, "enable_change_state failed, rc %ld state %d\n",
3933 spin_unlock_bh(&vscsi
->intr_lock
);
3935 spin_lock_bh(&vscsi
->intr_lock
);
3936 tport
->enabled
= false;
3937 /* This simulates the server going down */
3938 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
, 0);
3939 spin_unlock_bh(&vscsi
->intr_lock
);
3945 static const struct target_core_fabric_ops ibmvscsis_ops
= {
3946 .module
= THIS_MODULE
,
3947 .fabric_name
= "ibmvscsis",
3948 .max_data_sg_nents
= MAX_TXU
/ PAGE_SIZE
,
3949 .tpg_get_wwn
= ibmvscsis_get_fabric_wwn
,
3950 .tpg_get_tag
= ibmvscsis_get_tag
,
3951 .tpg_get_default_depth
= ibmvscsis_get_default_depth
,
3952 .tpg_check_demo_mode
= ibmvscsis_check_true
,
3953 .tpg_check_demo_mode_cache
= ibmvscsis_check_true
,
3954 .check_stop_free
= ibmvscsis_check_stop_free
,
3955 .release_cmd
= ibmvscsis_release_cmd
,
3956 .write_pending
= ibmvscsis_write_pending
,
3957 .queue_data_in
= ibmvscsis_queue_data_in
,
3958 .queue_status
= ibmvscsis_queue_status
,
3959 .queue_tm_rsp
= ibmvscsis_queue_tm_rsp
,
3960 .aborted_task
= ibmvscsis_aborted_task
,
3962 * Setup function pointers for logic in target_core_fabric_configfs.c
3964 .fabric_make_wwn
= ibmvscsis_make_tport
,
3965 .fabric_drop_wwn
= ibmvscsis_drop_tport
,
3966 .fabric_make_tpg
= ibmvscsis_make_tpg
,
3967 .fabric_enable_tpg
= ibmvscsis_enable_tpg
,
3968 .fabric_drop_tpg
= ibmvscsis_drop_tpg
,
3970 .tfc_wwn_attrs
= ibmvscsis_wwn_attrs
,
3972 .default_submit_type
= TARGET_DIRECT_SUBMIT
,
3973 .direct_submit_supp
= 1,
3976 static void ibmvscsis_dev_release(struct device
*dev
) {};
3978 static DEVICE_STRING_ATTR_RO(system_id
, S_IRUGO
, system_id
);
3980 static struct device_attribute dev_attr_partition_number
=
3981 __ATTR(partition_number
, S_IRUGO
, partition_number_show
, NULL
);
3983 static struct device_attribute dev_attr_unit_address
=
3984 __ATTR(unit_address
, S_IRUGO
, unit_address_show
, NULL
);
3986 static struct attribute
*ibmvscsis_dev_attrs
[] = {
3987 &dev_attr_system_id
.attr
.attr
,
3988 &dev_attr_partition_number
.attr
,
3989 &dev_attr_unit_address
.attr
,
3991 ATTRIBUTE_GROUPS(ibmvscsis_dev
);
3993 static struct class ibmvscsis_class
= {
3994 .name
= "ibmvscsis",
3995 .dev_release
= ibmvscsis_dev_release
,
3996 .dev_groups
= ibmvscsis_dev_groups
,
3999 static const struct vio_device_id ibmvscsis_device_table
[] = {
4000 { "v-scsi-host", "IBM,v-scsi-host" },
4003 MODULE_DEVICE_TABLE(vio
, ibmvscsis_device_table
);
4005 static struct vio_driver ibmvscsis_driver
= {
4006 .name
= "ibmvscsis",
4007 .id_table
= ibmvscsis_device_table
,
4008 .probe
= ibmvscsis_probe
,
4009 .remove
= ibmvscsis_remove
,
4013 * ibmvscsis_init() - Kernel Module initialization
4015 * Note: vio_register_driver() registers callback functions, and at least one
4016 * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4017 * the SCSI Target template must be registered before vio_register_driver()
4020 static int __init
ibmvscsis_init(void)
4024 rc
= ibmvscsis_get_system_info();
4026 pr_err("rc %d from get_system_info\n", rc
);
4030 rc
= class_register(&ibmvscsis_class
);
4032 pr_err("failed class register\n");
4036 rc
= target_register_template(&ibmvscsis_ops
);
4038 pr_err("rc %d from target_register_template\n", rc
);
4039 goto unregister_class
;
4042 rc
= vio_register_driver(&ibmvscsis_driver
);
4044 pr_err("rc %d from vio_register_driver\n", rc
);
4045 goto unregister_target
;
4051 target_unregister_template(&ibmvscsis_ops
);
4053 class_unregister(&ibmvscsis_class
);
4058 static void __exit
ibmvscsis_exit(void)
4060 pr_info("Unregister IBM virtual SCSI host driver\n");
4061 vio_unregister_driver(&ibmvscsis_driver
);
4062 target_unregister_template(&ibmvscsis_ops
);
4063 class_unregister(&ibmvscsis_class
);
4066 MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4067 MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4068 MODULE_LICENSE("GPL");
4069 MODULE_VERSION(IBMVSCSIS_VERSION
);
4070 module_init(ibmvscsis_init
);
4071 module_exit(ibmvscsis_exit
);