1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * IBM Virtual SCSI Target Driver
4 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
5 * Santiago Leon (santil@us.ibm.com) IBM Corp.
6 * Linda Xie (lxie@us.ibm.com) IBM Corp.
8 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
9 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
11 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
12 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
14 ****************************************************************************/
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/list.h>
23 #include <linux/string.h>
24 #include <linux/delay.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
29 #include <asm/hvcall.h>
32 #include <scsi/viosrp.h>
34 #include "ibmvscsi_tgt.h"
36 #define IBMVSCSIS_VERSION "v0.2"
38 #define INITIAL_SRP_LIMIT 800
39 #define DEFAULT_MAX_SECTORS 256
40 #define MAX_TXU 1024 * 1024
42 static uint max_vdma_size
= MAX_H_COPY_RDMA
;
44 static char system_id
[SYS_ID_NAME_LEN
] = "";
45 static char partition_name
[PARTITION_NAMELEN
] = "UNKNOWN";
46 static uint partition_number
= -1;
48 /* Adapter list and lock to control it */
49 static DEFINE_SPINLOCK(ibmvscsis_dev_lock
);
50 static LIST_HEAD(ibmvscsis_dev_list
);
52 static long ibmvscsis_parse_command(struct scsi_info
*vscsi
,
53 struct viosrp_crq
*crq
);
55 static void ibmvscsis_adapter_idle(struct scsi_info
*vscsi
);
57 static void ibmvscsis_determine_resid(struct se_cmd
*se_cmd
,
60 u32 residual_count
= se_cmd
->residual_count
;
65 if (se_cmd
->se_cmd_flags
& SCF_UNDERFLOW_BIT
) {
66 if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
67 /* residual data from an underflow write */
68 rsp
->flags
= SRP_RSP_FLAG_DOUNDER
;
69 rsp
->data_out_res_cnt
= cpu_to_be32(residual_count
);
70 } else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
) {
71 /* residual data from an underflow read */
72 rsp
->flags
= SRP_RSP_FLAG_DIUNDER
;
73 rsp
->data_in_res_cnt
= cpu_to_be32(residual_count
);
75 } else if (se_cmd
->se_cmd_flags
& SCF_OVERFLOW_BIT
) {
76 if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
77 /* residual data from an overflow write */
78 rsp
->flags
= SRP_RSP_FLAG_DOOVER
;
79 rsp
->data_out_res_cnt
= cpu_to_be32(residual_count
);
80 } else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
) {
81 /* residual data from an overflow read */
82 rsp
->flags
= SRP_RSP_FLAG_DIOVER
;
83 rsp
->data_in_res_cnt
= cpu_to_be32(residual_count
);
89 * connection_broken() - Determine if the connection to the client is good
90 * @vscsi: Pointer to our adapter structure
92 * This function attempts to send a ping MAD to the client. If the call to
93 * queue the request returns H_CLOSED then the connection has been broken
94 * and the function returns TRUE.
96 * EXECUTION ENVIRONMENT:
97 * Interrupt or Process environment
99 static bool connection_broken(struct scsi_info
*vscsi
)
101 struct viosrp_crq
*crq
;
102 u64 buffer
[2] = { 0, 0 };
106 /* create a PING crq */
107 crq
= (struct viosrp_crq
*)&buffer
;
108 crq
->valid
= VALID_CMD_RESP_EL
;
109 crq
->format
= MESSAGE_IN_CRQ
;
112 h_return_code
= h_send_crq(vscsi
->dds
.unit_id
,
113 cpu_to_be64(buffer
[MSG_HI
]),
114 cpu_to_be64(buffer
[MSG_LOW
]));
116 dev_dbg(&vscsi
->dev
, "Connection_broken: rc %ld\n", h_return_code
);
118 if (h_return_code
== H_CLOSED
)
125 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
126 * @vscsi: Pointer to our adapter structure
128 * This function calls h_free_q then frees the interrupt bit etc.
129 * It must release the lock before doing so because of the time it can take
130 * for h_free_crq in PHYP
131 * NOTE: the caller must make sure that state and or flags will prevent
132 * interrupt handler from scheduling work.
133 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
134 * we can't do it here, because we don't have the lock
136 * EXECUTION ENVIRONMENT:
139 static long ibmvscsis_unregister_command_q(struct scsi_info
*vscsi
)
142 long rc
= ADAPT_SUCCESS
;
146 qrc
= h_free_crq(vscsi
->dds
.unit_id
);
149 spin_lock_bh(&vscsi
->intr_lock
);
150 vscsi
->flags
&= ~PREP_FOR_SUSPEND_FLAGS
;
151 spin_unlock_bh(&vscsi
->intr_lock
);
156 dev_err(&vscsi
->dev
, "unregister_command_q: error from h_free_crq %ld\n",
162 case H_LONG_BUSY_ORDER_1_MSEC
:
163 /* msleep not good for small values */
164 usleep_range(1000, 2000);
167 case H_LONG_BUSY_ORDER_10_MSEC
:
168 usleep_range(10000, 20000);
171 case H_LONG_BUSY_ORDER_100_MSEC
:
175 case H_LONG_BUSY_ORDER_1_SEC
:
179 case H_LONG_BUSY_ORDER_10_SEC
:
183 case H_LONG_BUSY_ORDER_100_SEC
:
188 dev_err(&vscsi
->dev
, "unregister_command_q: unknown error %ld from h_free_crq\n",
195 * dont wait more then 300 seconds
196 * ticks are in milliseconds more or less
198 if (ticks
> 300000 && qrc
!= H_SUCCESS
) {
200 dev_err(&vscsi
->dev
, "Excessive wait for h_free_crq\n");
202 } while (qrc
!= H_SUCCESS
&& rc
== ADAPT_SUCCESS
);
204 dev_dbg(&vscsi
->dev
, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc
, rc
);
210 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
211 * @vscsi: Pointer to our adapter structure
212 * @client_closed: True if client closed its queue
214 * Deletes information specific to the client when the client goes away
216 * EXECUTION ENVIRONMENT:
217 * Interrupt or Process
219 static void ibmvscsis_delete_client_info(struct scsi_info
*vscsi
,
222 vscsi
->client_cap
= 0;
225 * Some things we don't want to clear if we're closing the queue,
226 * because some clients don't resend the host handshake when they
227 * get a transport event.
230 vscsi
->client_data
.os_type
= 0;
234 * ibmvscsis_free_command_q() - Free Command Queue
235 * @vscsi: Pointer to our adapter structure
237 * This function calls unregister_command_q, then clears interrupts and
238 * any pending interrupt acknowledgments associated with the command q.
239 * It also clears memory if there is no error.
241 * PHYP did not meet the PAPR architecture so that we must give up the
242 * lock. This causes a timing hole regarding state change. To close the
243 * hole this routine does accounting on any change that occurred during
244 * the time the lock is not held.
245 * NOTE: must give up and then acquire the interrupt lock, the caller must
246 * make sure that state and or flags will prevent interrupt handler from
249 * EXECUTION ENVIRONMENT:
250 * Process level, interrupt lock is held
252 static long ibmvscsis_free_command_q(struct scsi_info
*vscsi
)
255 u32 flags_under_lock
;
256 u16 state_under_lock
;
257 long rc
= ADAPT_SUCCESS
;
259 if (!(vscsi
->flags
& CRQ_CLOSED
)) {
260 vio_disable_interrupts(vscsi
->dma_dev
);
262 state_under_lock
= vscsi
->new_state
;
263 flags_under_lock
= vscsi
->flags
;
264 vscsi
->phyp_acr_state
= 0;
265 vscsi
->phyp_acr_flags
= 0;
267 spin_unlock_bh(&vscsi
->intr_lock
);
268 rc
= ibmvscsis_unregister_command_q(vscsi
);
269 spin_lock_bh(&vscsi
->intr_lock
);
271 if (state_under_lock
!= vscsi
->new_state
)
272 vscsi
->phyp_acr_state
= vscsi
->new_state
;
274 vscsi
->phyp_acr_flags
= ((~flags_under_lock
) & vscsi
->flags
);
276 if (rc
== ADAPT_SUCCESS
) {
277 bytes
= vscsi
->cmd_q
.size
* PAGE_SIZE
;
278 memset(vscsi
->cmd_q
.base_addr
, 0, bytes
);
279 vscsi
->cmd_q
.index
= 0;
280 vscsi
->flags
|= CRQ_CLOSED
;
282 ibmvscsis_delete_client_info(vscsi
, false);
285 dev_dbg(&vscsi
->dev
, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
286 vscsi
->flags
, vscsi
->state
, vscsi
->phyp_acr_flags
,
287 vscsi
->phyp_acr_state
);
293 * ibmvscsis_cmd_q_dequeue() - Get valid Command element
294 * @mask: Mask to use in case index wraps
295 * @current_index: Current index into command queue
296 * @base_addr: Pointer to start of command queue
298 * Returns a pointer to a valid command element or NULL, if the command
301 * EXECUTION ENVIRONMENT:
302 * Interrupt environment, interrupt lock held
304 static struct viosrp_crq
*ibmvscsis_cmd_q_dequeue(uint mask
,
306 struct viosrp_crq
*base_addr
)
308 struct viosrp_crq
*ptr
;
310 ptr
= base_addr
+ *current_index
;
313 *current_index
= (*current_index
+ 1) & mask
;
323 * ibmvscsis_send_init_message() - send initialize message to the client
324 * @vscsi: Pointer to our adapter structure
325 * @format: Which Init Message format to send
327 * EXECUTION ENVIRONMENT:
328 * Interrupt environment interrupt lock held
330 static long ibmvscsis_send_init_message(struct scsi_info
*vscsi
, u8 format
)
332 struct viosrp_crq
*crq
;
333 u64 buffer
[2] = { 0, 0 };
336 crq
= (struct viosrp_crq
*)&buffer
;
337 crq
->valid
= VALID_INIT_MSG
;
338 crq
->format
= format
;
339 rc
= h_send_crq(vscsi
->dds
.unit_id
, cpu_to_be64(buffer
[MSG_HI
]),
340 cpu_to_be64(buffer
[MSG_LOW
]));
346 * ibmvscsis_check_init_msg() - Check init message valid
347 * @vscsi: Pointer to our adapter structure
348 * @format: Pointer to return format of Init Message, if any.
349 * Set to UNUSED_FORMAT if no Init Message in queue.
351 * Checks if an initialize message was queued by the initiatior
352 * after the queue was created and before the interrupt was enabled.
354 * EXECUTION ENVIRONMENT:
355 * Process level only, interrupt lock held
357 static long ibmvscsis_check_init_msg(struct scsi_info
*vscsi
, uint
*format
)
359 struct viosrp_crq
*crq
;
360 long rc
= ADAPT_SUCCESS
;
362 crq
= ibmvscsis_cmd_q_dequeue(vscsi
->cmd_q
.mask
, &vscsi
->cmd_q
.index
,
363 vscsi
->cmd_q
.base_addr
);
365 *format
= (uint
)UNUSED_FORMAT
;
366 } else if (crq
->valid
== VALID_INIT_MSG
&& crq
->format
== INIT_MSG
) {
367 *format
= (uint
)INIT_MSG
;
368 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
372 * the caller has ensured no initialize message was
373 * sent after the queue was
374 * created so there should be no other message on the queue.
376 crq
= ibmvscsis_cmd_q_dequeue(vscsi
->cmd_q
.mask
,
378 vscsi
->cmd_q
.base_addr
);
380 *format
= (uint
)(crq
->format
);
382 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
386 *format
= (uint
)(crq
->format
);
388 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
396 * ibmvscsis_disconnect() - Helper function to disconnect
397 * @work: Pointer to work_struct, gives access to our adapter structure
399 * An error has occurred or the driver received a Transport event,
400 * and the driver is requesting that the command queue be de-registered
401 * in a safe manner. If there is no outstanding I/O then we can stop the
402 * queue. If we are restarting the queue it will be reflected in the
403 * the state of the adapter.
405 * EXECUTION ENVIRONMENT:
406 * Process environment
408 static void ibmvscsis_disconnect(struct work_struct
*work
)
410 struct scsi_info
*vscsi
= container_of(work
, struct scsi_info
,
413 bool wait_idle
= false;
415 spin_lock_bh(&vscsi
->intr_lock
);
416 new_state
= vscsi
->new_state
;
417 vscsi
->new_state
= 0;
419 vscsi
->flags
|= DISCONNECT_SCHEDULED
;
420 vscsi
->flags
&= ~SCHEDULE_DISCONNECT
;
422 dev_dbg(&vscsi
->dev
, "disconnect: flags 0x%x, state 0x%hx\n",
423 vscsi
->flags
, vscsi
->state
);
426 * check which state we are in and see if we
427 * should transitition to the new state
429 switch (vscsi
->state
) {
430 /* Should never be called while in this state. */
433 * Can never transition from this state;
434 * igonore errors and logout.
439 /* can transition from this state to UNCONFIGURING */
441 if (new_state
== UNCONFIGURING
)
442 vscsi
->state
= new_state
;
446 * Can transition from this state to to unconfiguring
449 case ERR_DISCONNECT_RECONNECT
:
453 vscsi
->state
= new_state
;
463 /* can transition from this state to UNCONFIGURING */
464 case ERR_DISCONNECTED
:
465 if (new_state
== UNCONFIGURING
)
466 vscsi
->state
= new_state
;
472 vscsi
->state
= new_state
;
473 vscsi
->flags
|= RESPONSE_Q_DOWN
;
474 vscsi
->flags
&= ~(SCHEDULE_DISCONNECT
|
475 DISCONNECT_SCHEDULED
);
477 if (vscsi
->flags
& CFG_SLEEPING
) {
478 vscsi
->flags
&= ~CFG_SLEEPING
;
479 complete(&vscsi
->unconfig
);
483 /* should never happen */
485 case ERR_DISCONNECT_RECONNECT
:
487 dev_err(&vscsi
->dev
, "disconnect: invalid state %d for WAIT_IDLE\n",
496 vscsi
->flags
|= RESPONSE_Q_DOWN
;
497 vscsi
->state
= new_state
;
498 vscsi
->flags
&= ~(SCHEDULE_DISCONNECT
|
499 DISCONNECT_SCHEDULED
);
500 ibmvscsis_free_command_q(vscsi
);
503 case ERR_DISCONNECT_RECONNECT
:
504 vscsi
->state
= new_state
;
510 * Initiator has not done a successful srp login
511 * or has done a successful srp logout ( adapter was not
512 * busy). In the first case there can be responses queued
513 * waiting for space on the initiators response queue (MAD)
514 * The second case the adapter is idle. Assume the worse case,
515 * i.e. the second case.
517 case WAIT_CONNECTION
:
521 vscsi
->state
= new_state
;
524 /* can transition from this state to UNCONFIGURING */
526 if (new_state
== UNCONFIGURING
)
527 vscsi
->state
= new_state
;
534 dev_dbg(&vscsi
->dev
, "disconnect start wait, active %d, sched %d\n",
535 (int)list_empty(&vscsi
->active_q
),
536 (int)list_empty(&vscsi
->schedule_q
));
537 if (!list_empty(&vscsi
->active_q
) ||
538 !list_empty(&vscsi
->schedule_q
)) {
539 vscsi
->flags
|= WAIT_FOR_IDLE
;
540 dev_dbg(&vscsi
->dev
, "disconnect flags 0x%x\n",
543 * This routine is can not be called with the interrupt
546 spin_unlock_bh(&vscsi
->intr_lock
);
547 wait_for_completion(&vscsi
->wait_idle
);
548 spin_lock_bh(&vscsi
->intr_lock
);
550 dev_dbg(&vscsi
->dev
, "disconnect stop wait\n");
552 ibmvscsis_adapter_idle(vscsi
);
555 spin_unlock_bh(&vscsi
->intr_lock
);
559 * ibmvscsis_post_disconnect() - Schedule the disconnect
560 * @vscsi: Pointer to our adapter structure
561 * @new_state: State to move to after disconnecting
562 * @flag_bits: Flags to turn on in adapter structure
564 * If it's already been scheduled, then see if we need to "upgrade"
565 * the new state (if the one passed in is more "severe" than the
569 * interrupt lock is held
571 static void ibmvscsis_post_disconnect(struct scsi_info
*vscsi
, uint new_state
,
576 /* check the validity of the new state */
580 case ERR_DISCONNECT_RECONNECT
:
585 dev_err(&vscsi
->dev
, "post_disconnect: Invalid new state %d\n",
590 vscsi
->flags
|= flag_bits
;
592 dev_dbg(&vscsi
->dev
, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
593 new_state
, flag_bits
, vscsi
->flags
, vscsi
->state
);
595 if (!(vscsi
->flags
& (DISCONNECT_SCHEDULED
| SCHEDULE_DISCONNECT
))) {
596 vscsi
->flags
|= SCHEDULE_DISCONNECT
;
597 vscsi
->new_state
= new_state
;
599 INIT_WORK(&vscsi
->proc_work
, ibmvscsis_disconnect
);
600 (void)queue_work(vscsi
->work_q
, &vscsi
->proc_work
);
602 if (vscsi
->new_state
)
603 state
= vscsi
->new_state
;
605 state
= vscsi
->state
;
612 case ERR_DISCONNECTED
:
615 if (new_state
== UNCONFIGURING
)
616 vscsi
->new_state
= new_state
;
619 case ERR_DISCONNECT_RECONNECT
:
623 vscsi
->new_state
= new_state
;
632 case WAIT_CONNECTION
:
635 vscsi
->new_state
= new_state
;
643 dev_dbg(&vscsi
->dev
, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
644 vscsi
->flags
, vscsi
->new_state
);
648 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
649 * @vscsi: Pointer to our adapter structure
651 * Must be called with interrupt lock held.
653 static long ibmvscsis_handle_init_compl_msg(struct scsi_info
*vscsi
)
655 long rc
= ADAPT_SUCCESS
;
657 switch (vscsi
->state
) {
660 case ERR_DISCONNECT_RECONNECT
:
661 case ERR_DISCONNECTED
:
667 case WAIT_CONNECTION
:
668 vscsi
->state
= CONNECTED
;
677 dev_err(&vscsi
->dev
, "init_msg: invalid state %d to get init compl msg\n",
679 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
687 * ibmvscsis_handle_init_msg() - Respond to an Init Message
688 * @vscsi: Pointer to our adapter structure
690 * Must be called with interrupt lock held.
692 static long ibmvscsis_handle_init_msg(struct scsi_info
*vscsi
)
694 long rc
= ADAPT_SUCCESS
;
696 switch (vscsi
->state
) {
697 case WAIT_CONNECTION
:
698 rc
= ibmvscsis_send_init_message(vscsi
, INIT_COMPLETE_MSG
);
701 vscsi
->state
= CONNECTED
;
705 dev_err(&vscsi
->dev
, "init_msg: failed to send, rc %ld\n",
707 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
, 0);
711 dev_err(&vscsi
->dev
, "init_msg: failed to send, rc %ld\n",
714 ibmvscsis_post_disconnect(vscsi
,
715 ERR_DISCONNECT_RECONNECT
, 0);
719 dev_warn(&vscsi
->dev
, "init_msg: failed to send, rc %ld\n",
739 case ERR_DISCONNECT_RECONNECT
:
740 case ERR_DISCONNECTED
:
743 dev_err(&vscsi
->dev
, "init_msg: invalid state %d to get init msg\n",
745 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
753 * ibmvscsis_init_msg() - Respond to an init message
754 * @vscsi: Pointer to our adapter structure
755 * @crq: Pointer to CRQ element containing the Init Message
757 * EXECUTION ENVIRONMENT:
758 * Interrupt, interrupt lock held
760 static long ibmvscsis_init_msg(struct scsi_info
*vscsi
, struct viosrp_crq
*crq
)
762 long rc
= ADAPT_SUCCESS
;
764 dev_dbg(&vscsi
->dev
, "init_msg: state 0x%hx\n", vscsi
->state
);
766 rc
= h_vioctl(vscsi
->dds
.unit_id
, H_GET_PARTNER_INFO
,
767 (u64
)vscsi
->map_ioba
| ((u64
)PAGE_SIZE
<< 32), 0, 0, 0,
769 if (rc
== H_SUCCESS
) {
770 vscsi
->client_data
.partition_number
=
771 be64_to_cpu(*(u64
*)vscsi
->map_buf
);
772 dev_dbg(&vscsi
->dev
, "init_msg, part num %d\n",
773 vscsi
->client_data
.partition_number
);
775 dev_dbg(&vscsi
->dev
, "init_msg h_vioctl rc %ld\n", rc
);
779 if (crq
->format
== INIT_MSG
) {
780 rc
= ibmvscsis_handle_init_msg(vscsi
);
781 } else if (crq
->format
== INIT_COMPLETE_MSG
) {
782 rc
= ibmvscsis_handle_init_compl_msg(vscsi
);
785 dev_err(&vscsi
->dev
, "init_msg: invalid format %d\n",
787 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
794 * ibmvscsis_establish_new_q() - Establish new CRQ queue
795 * @vscsi: Pointer to our adapter structure
797 * Must be called with interrupt lock held.
799 static long ibmvscsis_establish_new_q(struct scsi_info
*vscsi
)
801 long rc
= ADAPT_SUCCESS
;
804 rc
= h_vioctl(vscsi
->dds
.unit_id
, H_ENABLE_PREPARE_FOR_SUSPEND
, 30000,
807 vscsi
->flags
|= PREP_FOR_SUSPEND_ENABLED
;
808 else if (rc
!= H_NOT_FOUND
)
809 dev_err(&vscsi
->dev
, "Error from Enable Prepare for Suspend: %ld\n",
812 vscsi
->flags
&= PRESERVE_FLAG_FIELDS
;
813 vscsi
->rsp_q_timer
.timer_pops
= 0;
817 rc
= vio_enable_interrupts(vscsi
->dma_dev
);
819 dev_warn(&vscsi
->dev
, "establish_new_q: failed to enable interrupts, rc %ld\n",
824 rc
= ibmvscsis_check_init_msg(vscsi
, &format
);
826 dev_err(&vscsi
->dev
, "establish_new_q: check_init_msg failed, rc %ld\n",
831 if (format
== UNUSED_FORMAT
) {
832 rc
= ibmvscsis_send_init_message(vscsi
, INIT_MSG
);
845 vscsi
->state
= UNDEFINED
;
849 } else if (format
== INIT_MSG
) {
850 rc
= ibmvscsis_handle_init_msg(vscsi
);
857 * ibmvscsis_reset_queue() - Reset CRQ Queue
858 * @vscsi: Pointer to our adapter structure
860 * This function calls h_free_q and then calls h_reg_q and does all
861 * of the bookkeeping to get us back to where we can communicate.
863 * Actually, we don't always call h_free_crq. A problem was discovered
864 * where one partition would close and reopen his queue, which would
865 * cause his partner to get a transport event, which would cause him to
866 * close and reopen his queue, which would cause the original partition
867 * to get a transport event, etc., etc. To prevent this, we don't
868 * actually close our queue if the client initiated the reset, (i.e.
869 * either we got a transport event or we have detected that the client's
872 * EXECUTION ENVIRONMENT:
873 * Process environment, called with interrupt lock held
875 static void ibmvscsis_reset_queue(struct scsi_info
*vscsi
)
878 long rc
= ADAPT_SUCCESS
;
880 dev_dbg(&vscsi
->dev
, "reset_queue: flags 0x%x\n", vscsi
->flags
);
882 /* don't reset, the client did it for us */
883 if (vscsi
->flags
& (CLIENT_FAILED
| TRANS_EVENT
)) {
884 vscsi
->flags
&= PRESERVE_FLAG_FIELDS
;
885 vscsi
->rsp_q_timer
.timer_pops
= 0;
888 vscsi
->state
= WAIT_CONNECTION
;
889 vio_enable_interrupts(vscsi
->dma_dev
);
891 rc
= ibmvscsis_free_command_q(vscsi
);
892 if (rc
== ADAPT_SUCCESS
) {
893 vscsi
->state
= WAIT_CONNECTION
;
895 bytes
= vscsi
->cmd_q
.size
* PAGE_SIZE
;
896 rc
= h_reg_crq(vscsi
->dds
.unit_id
,
897 vscsi
->cmd_q
.crq_token
, bytes
);
898 if (rc
== H_CLOSED
|| rc
== H_SUCCESS
) {
899 rc
= ibmvscsis_establish_new_q(vscsi
);
902 if (rc
!= ADAPT_SUCCESS
) {
903 dev_dbg(&vscsi
->dev
, "reset_queue: reg_crq rc %ld\n",
906 vscsi
->state
= ERR_DISCONNECTED
;
907 vscsi
->flags
|= RESPONSE_Q_DOWN
;
908 ibmvscsis_free_command_q(vscsi
);
911 vscsi
->state
= ERR_DISCONNECTED
;
912 vscsi
->flags
|= RESPONSE_Q_DOWN
;
918 * ibmvscsis_free_cmd_resources() - Free command resources
919 * @vscsi: Pointer to our adapter structure
920 * @cmd: Command which is not longer in use
922 * Must be called with interrupt lock held.
924 static void ibmvscsis_free_cmd_resources(struct scsi_info
*vscsi
,
925 struct ibmvscsis_cmd
*cmd
)
927 struct iu_entry
*iue
= cmd
->iue
;
930 case TASK_MANAGEMENT
:
933 * When the queue goes down this value is cleared, so it
934 * cannot be cleared in this general purpose function.
940 vscsi
->flags
&= ~PROCESSING_MAD
;
945 dev_err(&vscsi
->dev
, "free_cmd_resources unknown type %d\n",
951 list_add_tail(&cmd
->list
, &vscsi
->free_cmd
);
954 if (list_empty(&vscsi
->active_q
) && list_empty(&vscsi
->schedule_q
) &&
955 list_empty(&vscsi
->waiting_rsp
) && (vscsi
->flags
& WAIT_FOR_IDLE
)) {
956 vscsi
->flags
&= ~WAIT_FOR_IDLE
;
957 complete(&vscsi
->wait_idle
);
962 * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
963 * @vscsi: Pointer to our adapter structure
964 * @idle: Indicates whether we were called from adapter_idle. This
965 * is important to know if we need to do a disconnect, since if
966 * we're called from adapter_idle, we're still processing the
967 * current disconnect, so we can't just call post_disconnect.
969 * This function is called when the adapter is idle when phyp has sent
970 * us a Prepare for Suspend Transport Event.
972 * EXECUTION ENVIRONMENT:
973 * Process or interrupt environment called with interrupt lock held
975 static long ibmvscsis_ready_for_suspend(struct scsi_info
*vscsi
, bool idle
)
978 struct viosrp_crq
*crq
;
980 /* See if there is a Resume event in the queue */
981 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
983 dev_dbg(&vscsi
->dev
, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
984 vscsi
->flags
, vscsi
->state
, (int)crq
->valid
);
986 if (!(vscsi
->flags
& PREP_FOR_SUSPEND_ABORTED
) && !(crq
->valid
)) {
987 rc
= h_vioctl(vscsi
->dds
.unit_id
, H_READY_FOR_SUSPEND
, 0, 0, 0,
990 dev_err(&vscsi
->dev
, "Ready for Suspend Vioctl failed: %ld\n",
994 } else if (((vscsi
->flags
& PREP_FOR_SUSPEND_OVERWRITE
) &&
995 (vscsi
->flags
& PREP_FOR_SUSPEND_ABORTED
)) ||
996 ((crq
->valid
) && ((crq
->valid
!= VALID_TRANS_EVENT
) ||
997 (crq
->format
!= RESUME_FROM_SUSP
)))) {
999 vscsi
->state
= ERR_DISCONNECT_RECONNECT
;
1000 ibmvscsis_reset_queue(vscsi
);
1002 } else if (vscsi
->state
== CONNECTED
) {
1003 ibmvscsis_post_disconnect(vscsi
,
1004 ERR_DISCONNECT_RECONNECT
, 0);
1007 vscsi
->flags
&= ~PREP_FOR_SUSPEND_OVERWRITE
;
1009 if ((crq
->valid
) && ((crq
->valid
!= VALID_TRANS_EVENT
) ||
1010 (crq
->format
!= RESUME_FROM_SUSP
)))
1011 dev_err(&vscsi
->dev
, "Invalid element in CRQ after Prepare for Suspend");
1014 vscsi
->flags
&= ~(PREP_FOR_SUSPEND_PENDING
| PREP_FOR_SUSPEND_ABORTED
);
1020 * ibmvscsis_trans_event() - Handle a Transport Event
1021 * @vscsi: Pointer to our adapter structure
1022 * @crq: Pointer to CRQ entry containing the Transport Event
1024 * Do the logic to close the I_T nexus. This function may not
1025 * behave to specification.
1027 * EXECUTION ENVIRONMENT:
1028 * Interrupt, interrupt lock held
1030 static long ibmvscsis_trans_event(struct scsi_info
*vscsi
,
1031 struct viosrp_crq
*crq
)
1033 long rc
= ADAPT_SUCCESS
;
1035 dev_dbg(&vscsi
->dev
, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
1036 (int)crq
->format
, vscsi
->flags
, vscsi
->state
);
1038 switch (crq
->format
) {
1040 case PARTNER_FAILED
:
1041 case PARTNER_DEREGISTER
:
1042 ibmvscsis_delete_client_info(vscsi
, true);
1043 if (crq
->format
== MIGRATED
)
1044 vscsi
->flags
&= ~PREP_FOR_SUSPEND_OVERWRITE
;
1045 switch (vscsi
->state
) {
1047 case ERR_DISCONNECTED
:
1052 vscsi
->flags
|= (RESPONSE_Q_DOWN
| TRANS_EVENT
);
1058 case WAIT_CONNECTION
:
1062 ibmvscsis_post_disconnect(vscsi
, WAIT_IDLE
,
1067 case SRP_PROCESSING
:
1068 if ((vscsi
->debit
> 0) ||
1069 !list_empty(&vscsi
->schedule_q
) ||
1070 !list_empty(&vscsi
->waiting_rsp
) ||
1071 !list_empty(&vscsi
->active_q
)) {
1072 dev_dbg(&vscsi
->dev
, "debit %d, sched %d, wait %d, active %d\n",
1074 (int)list_empty(&vscsi
->schedule_q
),
1075 (int)list_empty(&vscsi
->waiting_rsp
),
1076 (int)list_empty(&vscsi
->active_q
));
1077 dev_warn(&vscsi
->dev
, "connection lost with outstanding work\n");
1079 dev_dbg(&vscsi
->dev
, "trans_event: SRP Processing, but no outstanding work\n");
1082 ibmvscsis_post_disconnect(vscsi
, WAIT_IDLE
,
1087 case ERR_DISCONNECT
:
1088 case ERR_DISCONNECT_RECONNECT
:
1090 vscsi
->flags
|= (RESPONSE_Q_DOWN
| TRANS_EVENT
);
1095 case PREPARE_FOR_SUSPEND
:
1096 dev_dbg(&vscsi
->dev
, "Prep for Suspend, crq status = 0x%x\n",
1098 switch (vscsi
->state
) {
1099 case ERR_DISCONNECTED
:
1100 case WAIT_CONNECTION
:
1102 ibmvscsis_ready_for_suspend(vscsi
, false);
1104 case SRP_PROCESSING
:
1105 vscsi
->resume_state
= vscsi
->state
;
1106 vscsi
->flags
|= PREP_FOR_SUSPEND_PENDING
;
1107 if (crq
->status
== CRQ_ENTRY_OVERWRITTEN
)
1108 vscsi
->flags
|= PREP_FOR_SUSPEND_OVERWRITE
;
1109 ibmvscsis_post_disconnect(vscsi
, WAIT_IDLE
, 0);
1115 case ERR_DISCONNECT
:
1116 case ERR_DISCONNECT_RECONNECT
:
1118 dev_err(&vscsi
->dev
, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
1124 case RESUME_FROM_SUSP
:
1125 dev_dbg(&vscsi
->dev
, "Resume from Suspend, crq status = 0x%x\n",
1127 if (vscsi
->flags
& PREP_FOR_SUSPEND_PENDING
) {
1128 vscsi
->flags
|= PREP_FOR_SUSPEND_ABORTED
;
1130 if ((crq
->status
== CRQ_ENTRY_OVERWRITTEN
) ||
1131 (vscsi
->flags
& PREP_FOR_SUSPEND_OVERWRITE
)) {
1132 ibmvscsis_post_disconnect(vscsi
,
1133 ERR_DISCONNECT_RECONNECT
,
1135 vscsi
->flags
&= ~PREP_FOR_SUSPEND_OVERWRITE
;
1142 dev_err(&vscsi
->dev
, "trans_event: invalid format %d\n",
1144 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
,
1149 rc
= vscsi
->flags
& SCHEDULE_DISCONNECT
;
1151 dev_dbg(&vscsi
->dev
, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
1152 vscsi
->flags
, vscsi
->state
, rc
);
1158 * ibmvscsis_poll_cmd_q() - Poll Command Queue
1159 * @vscsi: Pointer to our adapter structure
1161 * Called to handle command elements that may have arrived while
1162 * interrupts were disabled.
1164 * EXECUTION ENVIRONMENT:
1165 * intr_lock must be held
1167 static void ibmvscsis_poll_cmd_q(struct scsi_info
*vscsi
)
1169 struct viosrp_crq
*crq
;
1174 dev_dbg(&vscsi
->dev
, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
1175 vscsi
->flags
, vscsi
->state
, vscsi
->cmd_q
.index
);
1177 rc
= vscsi
->flags
& SCHEDULE_DISCONNECT
;
1178 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
1184 vscsi
->cmd_q
.index
=
1185 (vscsi
->cmd_q
.index
+ 1) & vscsi
->cmd_q
.mask
;
1188 rc
= ibmvscsis_parse_command(vscsi
, crq
);
1190 if ((uint
)crq
->valid
== VALID_TRANS_EVENT
) {
1192 * must service the transport layer events even
1193 * in an error state, dont break out until all
1194 * the consecutive transport events have been
1197 rc
= ibmvscsis_trans_event(vscsi
, crq
);
1198 } else if (vscsi
->flags
& TRANS_EVENT
) {
1200 * if a tranport event has occurred leave
1201 * everything but transport events on the queue
1203 dev_dbg(&vscsi
->dev
, "poll_cmd_q, ignoring\n");
1206 * need to decrement the queue index so we can
1207 * look at the elment again
1209 if (vscsi
->cmd_q
.index
)
1210 vscsi
->cmd_q
.index
-= 1;
1213 * index is at 0 it just wrapped.
1214 * have it index last element in q
1216 vscsi
->cmd_q
.index
= vscsi
->cmd_q
.mask
;
1221 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
1223 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
1230 vio_enable_interrupts(vscsi
->dma_dev
);
1232 dev_dbg(&vscsi
->dev
, "poll_cmd_q, reenabling interrupts\n");
1240 dev_dbg(&vscsi
->dev
, "Leaving poll_cmd_q: rc %ld\n", rc
);
1244 * ibmvscsis_free_cmd_qs() - Free elements in queue
1245 * @vscsi: Pointer to our adapter structure
1247 * Free all of the elements on all queues that are waiting for
1251 * Called with interrupt lock held
1253 static void ibmvscsis_free_cmd_qs(struct scsi_info
*vscsi
)
1255 struct ibmvscsis_cmd
*cmd
, *nxt
;
1257 dev_dbg(&vscsi
->dev
, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
1258 (int)list_empty(&vscsi
->waiting_rsp
),
1259 vscsi
->rsp_q_timer
.started
);
1261 list_for_each_entry_safe(cmd
, nxt
, &vscsi
->waiting_rsp
, list
) {
1262 list_del(&cmd
->list
);
1263 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
1268 * ibmvscsis_get_free_cmd() - Get free command from list
1269 * @vscsi: Pointer to our adapter structure
1271 * Must be called with interrupt lock held.
1273 static struct ibmvscsis_cmd
*ibmvscsis_get_free_cmd(struct scsi_info
*vscsi
)
1275 struct ibmvscsis_cmd
*cmd
= NULL
;
1276 struct iu_entry
*iue
;
1278 iue
= srp_iu_get(&vscsi
->target
);
1280 cmd
= list_first_entry_or_null(&vscsi
->free_cmd
,
1281 struct ibmvscsis_cmd
, list
);
1284 cmd
->abort_cmd
= NULL
;
1285 cmd
->flags
&= ~(DELAY_SEND
);
1286 list_del(&cmd
->list
);
1288 cmd
->type
= UNSET_TYPE
;
1289 memset(&cmd
->se_cmd
, 0, sizeof(cmd
->se_cmd
));
1299 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
1300 * @vscsi: Pointer to our adapter structure
1302 * This function is called when the adapter is idle when the driver
1303 * is attempting to clear an error condition.
1304 * The adapter is considered busy if any of its cmd queues
1305 * are non-empty. This function can be invoked
1306 * from the off level disconnect function.
1308 * EXECUTION ENVIRONMENT:
1309 * Process environment called with interrupt lock held
1311 static void ibmvscsis_adapter_idle(struct scsi_info
*vscsi
)
1313 int free_qs
= false;
1316 dev_dbg(&vscsi
->dev
, "adapter_idle: flags 0x%x, state 0x%hx\n",
1317 vscsi
->flags
, vscsi
->state
);
1319 /* Only need to free qs if we're disconnecting from client */
1320 if (vscsi
->state
!= WAIT_CONNECTION
|| vscsi
->flags
& TRANS_EVENT
)
1323 switch (vscsi
->state
) {
1325 ibmvscsis_free_command_q(vscsi
);
1328 if (vscsi
->flags
& CFG_SLEEPING
) {
1329 vscsi
->flags
&= ~CFG_SLEEPING
;
1330 complete(&vscsi
->unconfig
);
1333 case ERR_DISCONNECT_RECONNECT
:
1334 ibmvscsis_reset_queue(vscsi
);
1335 dev_dbg(&vscsi
->dev
, "adapter_idle, disc_rec: flags 0x%x\n",
1339 case ERR_DISCONNECT
:
1340 ibmvscsis_free_command_q(vscsi
);
1341 vscsi
->flags
&= ~(SCHEDULE_DISCONNECT
| DISCONNECT_SCHEDULED
);
1342 vscsi
->flags
|= RESPONSE_Q_DOWN
;
1343 if (vscsi
->tport
.enabled
)
1344 vscsi
->state
= ERR_DISCONNECTED
;
1346 vscsi
->state
= WAIT_ENABLED
;
1347 dev_dbg(&vscsi
->dev
, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
1348 vscsi
->flags
, vscsi
->state
);
1352 vscsi
->rsp_q_timer
.timer_pops
= 0;
1355 if (vscsi
->flags
& PREP_FOR_SUSPEND_PENDING
) {
1356 vscsi
->state
= vscsi
->resume_state
;
1357 vscsi
->resume_state
= 0;
1358 rc
= ibmvscsis_ready_for_suspend(vscsi
, true);
1359 vscsi
->flags
&= ~DISCONNECT_SCHEDULED
;
1362 } else if (vscsi
->flags
& TRANS_EVENT
) {
1363 vscsi
->state
= WAIT_CONNECTION
;
1364 vscsi
->flags
&= PRESERVE_FLAG_FIELDS
;
1366 vscsi
->state
= CONNECTED
;
1367 vscsi
->flags
&= ~DISCONNECT_SCHEDULED
;
1370 dev_dbg(&vscsi
->dev
, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
1371 vscsi
->flags
, vscsi
->state
);
1372 ibmvscsis_poll_cmd_q(vscsi
);
1375 case ERR_DISCONNECTED
:
1376 vscsi
->flags
&= ~DISCONNECT_SCHEDULED
;
1377 dev_dbg(&vscsi
->dev
, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
1378 vscsi
->flags
, vscsi
->state
);
1382 dev_err(&vscsi
->dev
, "adapter_idle: in invalid state %d\n",
1388 ibmvscsis_free_cmd_qs(vscsi
);
1391 * There is a timing window where we could lose a disconnect request.
1392 * The known path to this window occurs during the DISCONNECT_RECONNECT
1393 * case above: reset_queue calls free_command_q, which will release the
1394 * interrupt lock. During that time, a new post_disconnect call can be
1395 * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
1396 * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
1397 * will only set the new_state. Now free_command_q reacquires the intr
1398 * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
1399 * FIELDS), and the disconnect is lost. This is particularly bad when
1400 * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
1402 * Fix is that free command queue sets acr state and acr flags if there
1403 * is a change under the lock
1404 * note free command queue writes to this state it clears it
1405 * before releasing the lock, different drivers call the free command
1406 * queue different times so dont initialize above
1408 if (vscsi
->phyp_acr_state
!= 0) {
1410 * set any bits in flags that may have been cleared by
1411 * a call to free command queue in switch statement
1414 vscsi
->flags
|= vscsi
->phyp_acr_flags
;
1415 ibmvscsis_post_disconnect(vscsi
, vscsi
->phyp_acr_state
, 0);
1416 vscsi
->phyp_acr_state
= 0;
1417 vscsi
->phyp_acr_flags
= 0;
1419 dev_dbg(&vscsi
->dev
, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
1420 vscsi
->flags
, vscsi
->state
, vscsi
->phyp_acr_flags
,
1421 vscsi
->phyp_acr_state
);
1424 dev_dbg(&vscsi
->dev
, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
1425 vscsi
->flags
, vscsi
->state
, vscsi
->new_state
);
1429 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
1430 * @vscsi: Pointer to our adapter structure
1431 * @cmd: Pointer to command element to use to process the request
1432 * @crq: Pointer to CRQ entry containing the request
1434 * Copy the srp information unit from the hosted
1435 * partition using remote dma
1437 * EXECUTION ENVIRONMENT:
1438 * Interrupt, interrupt lock held
1440 static long ibmvscsis_copy_crq_packet(struct scsi_info
*vscsi
,
1441 struct ibmvscsis_cmd
*cmd
,
1442 struct viosrp_crq
*crq
)
1444 struct iu_entry
*iue
= cmd
->iue
;
1448 len
= be16_to_cpu(crq
->IU_length
);
1449 if ((len
> SRP_MAX_IU_LEN
) || (len
== 0)) {
1450 dev_err(&vscsi
->dev
, "copy_crq: Invalid len %d passed", len
);
1451 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
1452 return SRP_VIOLATION
;
1455 rc
= h_copy_rdma(len
, vscsi
->dds
.window
[REMOTE
].liobn
,
1456 be64_to_cpu(crq
->IU_data_ptr
),
1457 vscsi
->dds
.window
[LOCAL
].liobn
, iue
->sbuf
->dma
);
1461 cmd
->init_time
= mftb();
1462 iue
->remote_token
= crq
->IU_data_ptr
;
1464 dev_dbg(&vscsi
->dev
, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
1465 be64_to_cpu(crq
->IU_data_ptr
), cmd
->init_time
);
1468 if (connection_broken(vscsi
))
1469 ibmvscsis_post_disconnect(vscsi
,
1470 ERR_DISCONNECT_RECONNECT
,
1474 ibmvscsis_post_disconnect(vscsi
,
1475 ERR_DISCONNECT_RECONNECT
, 0);
1477 dev_err(&vscsi
->dev
, "copy_crq: h_copy_rdma failed, rc %ld\n",
1483 dev_err(&vscsi
->dev
, "copy_crq: h_copy_rdma failed, rc %ld\n",
1485 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
1493 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
1494 * @vscsi: Pointer to our adapter structure
1495 * @iue: Information Unit containing the Adapter Info MAD request
1497 * EXECUTION ENVIRONMENT:
1498 * Interrupt adapter lock is held
1500 static long ibmvscsis_adapter_info(struct scsi_info
*vscsi
,
1501 struct iu_entry
*iue
)
1503 struct viosrp_adapter_info
*mad
= &vio_iu(iue
)->mad
.adapter_info
;
1504 struct mad_adapter_info_data
*info
;
1509 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_SUCCESS
);
1511 if (be16_to_cpu(mad
->common
.length
) > sizeof(*info
)) {
1512 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1516 info
= dma_alloc_coherent(&vscsi
->dma_dev
->dev
, sizeof(*info
), &token
,
1519 dev_err(&vscsi
->dev
, "bad dma_alloc_coherent %p\n",
1521 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1525 /* Get remote info */
1526 rc
= h_copy_rdma(be16_to_cpu(mad
->common
.length
),
1527 vscsi
->dds
.window
[REMOTE
].liobn
,
1528 be64_to_cpu(mad
->buffer
),
1529 vscsi
->dds
.window
[LOCAL
].liobn
, token
);
1531 if (rc
!= H_SUCCESS
) {
1532 if (rc
== H_PERMISSION
) {
1533 if (connection_broken(vscsi
))
1534 flag_bits
= (RESPONSE_Q_DOWN
| CLIENT_FAILED
);
1536 dev_warn(&vscsi
->dev
, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
1538 dev_dbg(&vscsi
->dev
, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
1539 be64_to_cpu(mad
->buffer
), vscsi
->flags
, flag_bits
);
1540 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
,
1546 * Copy client info, but ignore partition number, which we
1547 * already got from phyp - unless we failed to get it from
1548 * phyp (e.g. if we're running on a p5 system).
1550 if (vscsi
->client_data
.partition_number
== 0)
1551 vscsi
->client_data
.partition_number
=
1552 be32_to_cpu(info
->partition_number
);
1553 strncpy(vscsi
->client_data
.srp_version
, info
->srp_version
,
1554 sizeof(vscsi
->client_data
.srp_version
));
1555 strncpy(vscsi
->client_data
.partition_name
, info
->partition_name
,
1556 sizeof(vscsi
->client_data
.partition_name
));
1557 vscsi
->client_data
.mad_version
= be32_to_cpu(info
->mad_version
);
1558 vscsi
->client_data
.os_type
= be32_to_cpu(info
->os_type
);
1561 strncpy(info
->srp_version
, SRP_VERSION
,
1562 sizeof(info
->srp_version
));
1563 strncpy(info
->partition_name
, vscsi
->dds
.partition_name
,
1564 sizeof(info
->partition_name
));
1565 info
->partition_number
= cpu_to_be32(vscsi
->dds
.partition_num
);
1566 info
->mad_version
= cpu_to_be32(MAD_VERSION_1
);
1567 info
->os_type
= cpu_to_be32(LINUX
);
1568 memset(&info
->port_max_txu
[0], 0, sizeof(info
->port_max_txu
));
1569 info
->port_max_txu
[0] = cpu_to_be32(MAX_TXU
);
1572 rc
= h_copy_rdma(sizeof(*info
), vscsi
->dds
.window
[LOCAL
].liobn
,
1573 token
, vscsi
->dds
.window
[REMOTE
].liobn
,
1574 be64_to_cpu(mad
->buffer
));
1582 if (connection_broken(vscsi
))
1583 flag_bits
= (RESPONSE_Q_DOWN
| CLIENT_FAILED
);
1586 dev_err(&vscsi
->dev
, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
1588 ibmvscsis_post_disconnect(vscsi
,
1589 ERR_DISCONNECT_RECONNECT
,
1595 dma_free_coherent(&vscsi
->dma_dev
->dev
, sizeof(*info
), info
, token
);
1596 dev_dbg(&vscsi
->dev
, "Leaving adapter_info, rc %ld\n", rc
);
1602 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
1603 * @vscsi: Pointer to our adapter structure
1604 * @iue: Information Unit containing the Capabilities MAD request
1606 * NOTE: if you return an error from this routine you must be
1607 * disconnecting or you will cause a hang
1609 * EXECUTION ENVIRONMENT:
1610 * Interrupt called with adapter lock held
1612 static int ibmvscsis_cap_mad(struct scsi_info
*vscsi
, struct iu_entry
*iue
)
1614 struct viosrp_capabilities
*mad
= &vio_iu(iue
)->mad
.capabilities
;
1615 struct capabilities
*cap
;
1616 struct mad_capability_common
*common
;
1618 u16 olen
, len
, status
, min_len
, cap_len
;
1623 olen
= be16_to_cpu(mad
->common
.length
);
1625 * struct capabilities hardcodes a couple capabilities after the
1626 * header, but the capabilities can actually be in any order.
1628 min_len
= offsetof(struct capabilities
, migration
);
1629 if ((olen
< min_len
) || (olen
> PAGE_SIZE
)) {
1630 dev_warn(&vscsi
->dev
, "cap_mad: invalid len %d\n", olen
);
1631 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1635 cap
= dma_alloc_coherent(&vscsi
->dma_dev
->dev
, olen
, &token
,
1638 dev_err(&vscsi
->dev
, "bad dma_alloc_coherent %p\n",
1640 mad
->common
.status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1643 rc
= h_copy_rdma(olen
, vscsi
->dds
.window
[REMOTE
].liobn
,
1644 be64_to_cpu(mad
->buffer
),
1645 vscsi
->dds
.window
[LOCAL
].liobn
, token
);
1646 if (rc
== H_SUCCESS
) {
1647 strncpy(cap
->name
, dev_name(&vscsi
->dma_dev
->dev
),
1650 len
= olen
- min_len
;
1651 status
= VIOSRP_MAD_SUCCESS
;
1652 common
= (struct mad_capability_common
*)&cap
->migration
;
1654 while ((len
> 0) && (status
== VIOSRP_MAD_SUCCESS
) && !rc
) {
1655 dev_dbg(&vscsi
->dev
, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
1656 len
, be32_to_cpu(common
->cap_type
),
1657 be16_to_cpu(common
->length
));
1659 cap_len
= be16_to_cpu(common
->length
);
1660 if (cap_len
> len
) {
1661 dev_err(&vscsi
->dev
, "cap_mad: cap len mismatch with total len\n");
1662 status
= VIOSRP_MAD_FAILED
;
1667 dev_err(&vscsi
->dev
, "cap_mad: cap len is 0\n");
1668 status
= VIOSRP_MAD_FAILED
;
1672 switch (common
->cap_type
) {
1674 dev_dbg(&vscsi
->dev
, "cap_mad: unsupported capability\n");
1675 common
->server_support
= 0;
1676 flag
= cpu_to_be32((u32
)CAP_LIST_SUPPORTED
);
1677 cap
->flags
&= ~flag
;
1681 len
= len
- cap_len
;
1682 common
= (struct mad_capability_common
*)
1683 ((char *)common
+ cap_len
);
1686 mad
->common
.status
= cpu_to_be16(status
);
1689 rc
= h_copy_rdma(olen
, vscsi
->dds
.window
[LOCAL
].liobn
, token
,
1690 vscsi
->dds
.window
[REMOTE
].liobn
,
1691 be64_to_cpu(mad
->buffer
));
1693 if (rc
!= H_SUCCESS
) {
1694 dev_dbg(&vscsi
->dev
, "cap_mad: failed to copy to client, rc %ld\n",
1697 if (rc
== H_PERMISSION
) {
1698 if (connection_broken(vscsi
))
1699 flag_bits
= (RESPONSE_Q_DOWN
|
1703 dev_warn(&vscsi
->dev
, "cap_mad: error copying data to client, rc %ld\n",
1705 ibmvscsis_post_disconnect(vscsi
,
1706 ERR_DISCONNECT_RECONNECT
,
1711 dma_free_coherent(&vscsi
->dma_dev
->dev
, olen
, cap
, token
);
1713 dev_dbg(&vscsi
->dev
, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
1714 rc
, vscsi
->client_cap
);
1720 * ibmvscsis_process_mad() - Service a MAnagement Data gram
1721 * @vscsi: Pointer to our adapter structure
1722 * @iue: Information Unit containing the MAD request
1724 * Must be called with interrupt lock held.
1726 static long ibmvscsis_process_mad(struct scsi_info
*vscsi
, struct iu_entry
*iue
)
1728 struct mad_common
*mad
= (struct mad_common
*)&vio_iu(iue
)->mad
;
1729 struct viosrp_empty_iu
*empty
;
1730 long rc
= ADAPT_SUCCESS
;
1732 switch (be32_to_cpu(mad
->type
)) {
1733 case VIOSRP_EMPTY_IU_TYPE
:
1734 empty
= &vio_iu(iue
)->mad
.empty_iu
;
1735 vscsi
->empty_iu_id
= be64_to_cpu(empty
->buffer
);
1736 vscsi
->empty_iu_tag
= be64_to_cpu(empty
->common
.tag
);
1737 mad
->status
= cpu_to_be16(VIOSRP_MAD_SUCCESS
);
1739 case VIOSRP_ADAPTER_INFO_TYPE
:
1740 rc
= ibmvscsis_adapter_info(vscsi
, iue
);
1742 case VIOSRP_CAPABILITIES_TYPE
:
1743 rc
= ibmvscsis_cap_mad(vscsi
, iue
);
1745 case VIOSRP_ENABLE_FAST_FAIL
:
1746 if (vscsi
->state
== CONNECTED
) {
1747 vscsi
->fast_fail
= true;
1748 mad
->status
= cpu_to_be16(VIOSRP_MAD_SUCCESS
);
1750 dev_warn(&vscsi
->dev
, "fast fail mad sent after login\n");
1751 mad
->status
= cpu_to_be16(VIOSRP_MAD_FAILED
);
1755 mad
->status
= cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED
);
1763 * srp_snd_msg_failed() - Handle an error when sending a response
1764 * @vscsi: Pointer to our adapter structure
1765 * @rc: The return code from the h_send_crq command
1767 * Must be called with interrupt lock held.
1769 static void srp_snd_msg_failed(struct scsi_info
*vscsi
, long rc
)
1773 if (rc
!= H_DROPPED
) {
1774 ibmvscsis_free_cmd_qs(vscsi
);
1777 vscsi
->flags
|= CLIENT_FAILED
;
1779 /* don't flag the same problem multiple times */
1780 if (!(vscsi
->flags
& RESPONSE_Q_DOWN
)) {
1781 vscsi
->flags
|= RESPONSE_Q_DOWN
;
1782 if (!(vscsi
->state
& (ERR_DISCONNECT
|
1783 ERR_DISCONNECT_RECONNECT
|
1784 ERR_DISCONNECTED
| UNDEFINED
))) {
1785 dev_err(&vscsi
->dev
, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
1786 vscsi
->state
, vscsi
->flags
, rc
);
1788 ibmvscsis_post_disconnect(vscsi
,
1789 ERR_DISCONNECT_RECONNECT
, 0);
1795 * The response queue is full.
1796 * If the server is processing SRP requests, i.e.
1797 * the client has successfully done an
1798 * SRP_LOGIN, then it will wait forever for room in
1799 * the queue. However if the system admin
1800 * is attempting to unconfigure the server then one
1801 * or more children will be in a state where
1802 * they are being removed. So if there is even one
1803 * child being removed then the driver assumes
1804 * the system admin is attempting to break the
1805 * connection with the client and MAX_TIMER_POPS
1808 if ((vscsi
->rsp_q_timer
.timer_pops
< MAX_TIMER_POPS
) ||
1809 (vscsi
->state
== SRP_PROCESSING
)) {
1810 dev_dbg(&vscsi
->dev
, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
1811 vscsi
->flags
, (int)vscsi
->rsp_q_timer
.started
,
1812 vscsi
->rsp_q_timer
.timer_pops
);
1815 * Check if the timer is running; if it
1816 * is not then start it up.
1818 if (!vscsi
->rsp_q_timer
.started
) {
1819 if (vscsi
->rsp_q_timer
.timer_pops
<
1821 kt
= WAIT_NANO_SECONDS
;
1824 * slide the timeslice if the maximum
1825 * timer pops have already happened
1827 kt
= ktime_set(WAIT_SECONDS
, 0);
1830 vscsi
->rsp_q_timer
.started
= true;
1831 hrtimer_start(&vscsi
->rsp_q_timer
.timer
, kt
,
1836 * TBD: Do we need to worry about this? Need to get
1840 * waited a long time and it appears the system admin
1841 * is bring this driver down
1843 vscsi
->flags
|= RESPONSE_Q_DOWN
;
1844 ibmvscsis_free_cmd_qs(vscsi
);
1846 * if the driver is already attempting to disconnect
1847 * from the client and has already logged an error
1848 * trace this event but don't put it in the error log
1850 if (!(vscsi
->state
& (ERR_DISCONNECT
|
1851 ERR_DISCONNECT_RECONNECT
|
1852 ERR_DISCONNECTED
| UNDEFINED
))) {
1853 dev_err(&vscsi
->dev
, "client crq full too long\n");
1854 ibmvscsis_post_disconnect(vscsi
,
1855 ERR_DISCONNECT_RECONNECT
,
1862 * ibmvscsis_send_messages() - Send a Response
1863 * @vscsi: Pointer to our adapter structure
1865 * Send a response, first checking the waiting queue. Responses are
1866 * sent in order they are received. If the response cannot be sent,
1867 * because the client queue is full, it stays on the waiting queue.
1870 * Called with interrupt lock held
1872 static void ibmvscsis_send_messages(struct scsi_info
*vscsi
)
1875 /* note do not attempt to access the IU_data_ptr with this pointer
1878 struct viosrp_crq
*crq
= (struct viosrp_crq
*)&msg_hi
;
1879 struct ibmvscsis_cmd
*cmd
, *nxt
;
1880 long rc
= ADAPT_SUCCESS
;
1883 if (!(vscsi
->flags
& RESPONSE_Q_DOWN
)) {
1886 list_for_each_entry_safe(cmd
, nxt
, &vscsi
->waiting_rsp
,
1889 * Check to make sure abort cmd gets processed
1890 * prior to the abort tmr cmd
1892 if (cmd
->flags
& DELAY_SEND
)
1895 if (cmd
->abort_cmd
) {
1897 cmd
->abort_cmd
->flags
&= ~(DELAY_SEND
);
1898 cmd
->abort_cmd
= NULL
;
1902 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
1903 * the case where LIO issued a
1904 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
1905 * case then we dont send a response, since it
1908 if (cmd
->se_cmd
.transport_state
& CMD_T_ABORTED
&&
1909 !(cmd
->se_cmd
.transport_state
& CMD_T_TAS
)) {
1910 list_del(&cmd
->list
);
1911 ibmvscsis_free_cmd_resources(vscsi
,
1914 * With a successfully aborted op
1915 * through LIO we want to increment the
1916 * the vscsi credit so that when we dont
1917 * send a rsp to the original scsi abort
1918 * op (h_send_crq), but the tm rsp to
1919 * the abort is sent, the credit is
1920 * correctly sent with the abort tm rsp.
1921 * We would need 1 for the abort tm rsp
1922 * and 1 credit for the aborted scsi op.
1923 * Thus we need to increment here.
1924 * Also we want to increment the credit
1925 * here because we want to make sure
1926 * cmd is actually released first
1927 * otherwise the client will think it
1928 * it can send a new cmd, and we could
1929 * find ourselves short of cmd elements.
1933 crq
->valid
= VALID_CMD_RESP_EL
;
1934 crq
->format
= cmd
->rsp
.format
;
1936 if (cmd
->flags
& CMD_FAST_FAIL
)
1937 crq
->status
= VIOSRP_ADAPTER_FAIL
;
1939 crq
->IU_length
= cpu_to_be16(cmd
->rsp
.len
);
1941 rc
= h_send_crq(vscsi
->dma_dev
->unit_address
,
1942 be64_to_cpu(msg_hi
),
1943 be64_to_cpu(cmd
->rsp
.tag
));
1945 dev_dbg(&vscsi
->dev
, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
1946 cmd
, be64_to_cpu(cmd
->rsp
.tag
),
1949 /* if all ok free up the command
1952 if (rc
== H_SUCCESS
) {
1953 /* some movement has occurred */
1954 vscsi
->rsp_q_timer
.timer_pops
= 0;
1955 list_del(&cmd
->list
);
1957 ibmvscsis_free_cmd_resources(vscsi
,
1960 srp_snd_msg_failed(vscsi
, rc
);
1969 * The timer could pop with the queue empty. If
1970 * this happens, rc will always indicate a
1971 * success; clear the pop count.
1973 vscsi
->rsp_q_timer
.timer_pops
= 0;
1976 ibmvscsis_free_cmd_qs(vscsi
);
1980 /* Called with intr lock held */
1981 static void ibmvscsis_send_mad_resp(struct scsi_info
*vscsi
,
1982 struct ibmvscsis_cmd
*cmd
,
1983 struct viosrp_crq
*crq
)
1985 struct iu_entry
*iue
= cmd
->iue
;
1986 struct mad_common
*mad
= (struct mad_common
*)&vio_iu(iue
)->mad
;
1991 rc
= h_copy_rdma(sizeof(struct mad_common
),
1992 vscsi
->dds
.window
[LOCAL
].liobn
, iue
->sbuf
->dma
,
1993 vscsi
->dds
.window
[REMOTE
].liobn
,
1994 be64_to_cpu(crq
->IU_data_ptr
));
1996 cmd
->rsp
.format
= VIOSRP_MAD_FORMAT
;
1997 cmd
->rsp
.len
= sizeof(struct mad_common
);
1998 cmd
->rsp
.tag
= mad
->tag
;
1999 list_add_tail(&cmd
->list
, &vscsi
->waiting_rsp
);
2000 ibmvscsis_send_messages(vscsi
);
2002 dev_dbg(&vscsi
->dev
, "Error sending mad response, rc %ld\n",
2004 if (rc
== H_PERMISSION
) {
2005 if (connection_broken(vscsi
))
2006 flag_bits
= (RESPONSE_Q_DOWN
| CLIENT_FAILED
);
2008 dev_err(&vscsi
->dev
, "mad: failed to copy to client, rc %ld\n",
2011 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2012 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
,
2018 * ibmvscsis_mad() - Service a MAnagement Data gram.
2019 * @vscsi: Pointer to our adapter structure
2020 * @crq: Pointer to the CRQ entry containing the MAD request
2022 * EXECUTION ENVIRONMENT:
2023 * Interrupt, called with adapter lock held
2025 static long ibmvscsis_mad(struct scsi_info
*vscsi
, struct viosrp_crq
*crq
)
2027 struct iu_entry
*iue
;
2028 struct ibmvscsis_cmd
*cmd
;
2029 struct mad_common
*mad
;
2030 long rc
= ADAPT_SUCCESS
;
2032 switch (vscsi
->state
) {
2034 * We have not exchanged Init Msgs yet, so this MAD was sent
2035 * before the last Transport Event; client will not be
2036 * expecting a response.
2038 case WAIT_CONNECTION
:
2039 dev_dbg(&vscsi
->dev
, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
2041 return ADAPT_SUCCESS
;
2043 case SRP_PROCESSING
:
2048 * We should never get here while we're in these states.
2049 * Just log an error and get out.
2053 case ERR_DISCONNECT
:
2054 case ERR_DISCONNECT_RECONNECT
:
2056 dev_err(&vscsi
->dev
, "mad: invalid adapter state %d for mad\n",
2058 return ADAPT_SUCCESS
;
2061 cmd
= ibmvscsis_get_free_cmd(vscsi
);
2063 dev_err(&vscsi
->dev
, "mad: failed to get cmd, debit %d\n",
2065 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2069 cmd
->type
= ADAPTER_MAD
;
2071 rc
= ibmvscsis_copy_crq_packet(vscsi
, cmd
, crq
);
2073 mad
= (struct mad_common
*)&vio_iu(iue
)->mad
;
2075 dev_dbg(&vscsi
->dev
, "mad: type %d\n", be32_to_cpu(mad
->type
));
2077 rc
= ibmvscsis_process_mad(vscsi
, iue
);
2079 dev_dbg(&vscsi
->dev
, "mad: status %hd, rc %ld\n",
2080 be16_to_cpu(mad
->status
), rc
);
2083 ibmvscsis_send_mad_resp(vscsi
, cmd
, crq
);
2085 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2088 dev_dbg(&vscsi
->dev
, "Leaving mad, rc %ld\n", rc
);
2093 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
2094 * @vscsi: Pointer to our adapter structure
2095 * @cmd: Pointer to the command for the SRP Login request
2097 * EXECUTION ENVIRONMENT:
2098 * Interrupt, interrupt lock held
2100 static long ibmvscsis_login_rsp(struct scsi_info
*vscsi
,
2101 struct ibmvscsis_cmd
*cmd
)
2103 struct iu_entry
*iue
= cmd
->iue
;
2104 struct srp_login_rsp
*rsp
= &vio_iu(iue
)->srp
.login_rsp
;
2105 struct format_code
*fmt
;
2107 long rc
= ADAPT_SUCCESS
;
2109 memset(rsp
, 0, sizeof(struct srp_login_rsp
));
2111 rsp
->opcode
= SRP_LOGIN_RSP
;
2112 rsp
->req_lim_delta
= cpu_to_be32(vscsi
->request_limit
);
2113 rsp
->tag
= cmd
->rsp
.tag
;
2114 rsp
->max_it_iu_len
= cpu_to_be32(SRP_MAX_IU_LEN
);
2115 rsp
->max_ti_iu_len
= cpu_to_be32(SRP_MAX_IU_LEN
);
2116 fmt
= (struct format_code
*)&rsp
->buf_fmt
;
2117 fmt
->buffers
= SUPPORTED_FORMATS
;
2120 cmd
->rsp
.len
= sizeof(struct srp_login_rsp
);
2123 rc
= h_copy_rdma(cmd
->rsp
.len
, vscsi
->dds
.window
[LOCAL
].liobn
,
2124 iue
->sbuf
->dma
, vscsi
->dds
.window
[REMOTE
].liobn
,
2125 be64_to_cpu(iue
->remote_token
));
2132 if (connection_broken(vscsi
))
2133 flag_bits
= RESPONSE_Q_DOWN
| CLIENT_FAILED
;
2134 dev_err(&vscsi
->dev
, "login_rsp: error copying to client, rc %ld\n",
2136 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
,
2142 dev_err(&vscsi
->dev
, "login_rsp: error copying to client, rc %ld\n",
2144 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2152 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
2153 * @vscsi: Pointer to our adapter structure
2154 * @cmd: Pointer to the command for the SRP Login request
2155 * @reason: The reason the SRP Login is being rejected, per SRP protocol
2157 * EXECUTION ENVIRONMENT:
2158 * Interrupt, interrupt lock held
2160 static long ibmvscsis_srp_login_rej(struct scsi_info
*vscsi
,
2161 struct ibmvscsis_cmd
*cmd
, u32 reason
)
2163 struct iu_entry
*iue
= cmd
->iue
;
2164 struct srp_login_rej
*rej
= &vio_iu(iue
)->srp
.login_rej
;
2165 struct format_code
*fmt
;
2167 long rc
= ADAPT_SUCCESS
;
2169 memset(rej
, 0, sizeof(*rej
));
2171 rej
->opcode
= SRP_LOGIN_REJ
;
2172 rej
->reason
= cpu_to_be32(reason
);
2173 rej
->tag
= cmd
->rsp
.tag
;
2174 fmt
= (struct format_code
*)&rej
->buf_fmt
;
2175 fmt
->buffers
= SUPPORTED_FORMATS
;
2177 cmd
->rsp
.len
= sizeof(*rej
);
2180 rc
= h_copy_rdma(cmd
->rsp
.len
, vscsi
->dds
.window
[LOCAL
].liobn
,
2181 iue
->sbuf
->dma
, vscsi
->dds
.window
[REMOTE
].liobn
,
2182 be64_to_cpu(iue
->remote_token
));
2188 if (connection_broken(vscsi
))
2189 flag_bits
= RESPONSE_Q_DOWN
| CLIENT_FAILED
;
2190 dev_err(&vscsi
->dev
, "login_rej: error copying to client, rc %ld\n",
2192 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
,
2198 dev_err(&vscsi
->dev
, "login_rej: error copying to client, rc %ld\n",
2200 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2207 static int ibmvscsis_make_nexus(struct ibmvscsis_tport
*tport
)
2209 char *name
= tport
->tport_name
;
2210 struct ibmvscsis_nexus
*nexus
;
2211 struct scsi_info
*vscsi
= container_of(tport
, struct scsi_info
, tport
);
2214 if (tport
->ibmv_nexus
) {
2215 dev_dbg(&vscsi
->dev
, "tport->ibmv_nexus already exists\n");
2219 nexus
= kzalloc(sizeof(*nexus
), GFP_KERNEL
);
2221 dev_err(&vscsi
->dev
, "Unable to allocate struct ibmvscsis_nexus\n");
2225 nexus
->se_sess
= target_setup_session(&tport
->se_tpg
, 0, 0,
2226 TARGET_PROT_NORMAL
, name
, nexus
,
2228 if (IS_ERR(nexus
->se_sess
)) {
2229 rc
= PTR_ERR(nexus
->se_sess
);
2230 goto transport_init_fail
;
2233 tport
->ibmv_nexus
= nexus
;
2237 transport_init_fail
:
2242 static int ibmvscsis_drop_nexus(struct ibmvscsis_tport
*tport
)
2244 struct se_session
*se_sess
;
2245 struct ibmvscsis_nexus
*nexus
;
2247 nexus
= tport
->ibmv_nexus
;
2251 se_sess
= nexus
->se_sess
;
2256 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
2258 target_remove_session(se_sess
);
2259 tport
->ibmv_nexus
= NULL
;
2266 * ibmvscsis_srp_login() - Process an SRP Login Request
2267 * @vscsi: Pointer to our adapter structure
2268 * @cmd: Command element to use to process the SRP Login request
2269 * @crq: Pointer to CRQ entry containing the SRP Login request
2271 * EXECUTION ENVIRONMENT:
2272 * Interrupt, called with interrupt lock held
2274 static long ibmvscsis_srp_login(struct scsi_info
*vscsi
,
2275 struct ibmvscsis_cmd
*cmd
,
2276 struct viosrp_crq
*crq
)
2278 struct iu_entry
*iue
= cmd
->iue
;
2279 struct srp_login_req
*req
= &vio_iu(iue
)->srp
.login_req
;
2281 __be64 id_extension
;
2284 struct format_code
*fmt
;
2286 long rc
= ADAPT_SUCCESS
;
2288 iport
= (struct port_id
*)req
->initiator_port_id
;
2289 tport
= (struct port_id
*)req
->target_port_id
;
2290 fmt
= (struct format_code
*)&req
->req_buf_fmt
;
2291 if (be32_to_cpu(req
->req_it_iu_len
) > SRP_MAX_IU_LEN
)
2292 reason
= SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
;
2293 else if (be32_to_cpu(req
->req_it_iu_len
) < 64)
2294 reason
= SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL
;
2295 else if ((be64_to_cpu(iport
->id_extension
) > (MAX_NUM_PORTS
- 1)) ||
2296 (be64_to_cpu(tport
->id_extension
) > (MAX_NUM_PORTS
- 1)))
2297 reason
= SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL
;
2298 else if (req
->req_flags
& SRP_MULTICHAN_MULTI
)
2299 reason
= SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED
;
2300 else if (fmt
->buffers
& (~SUPPORTED_FORMATS
))
2301 reason
= SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT
;
2302 else if ((fmt
->buffers
& SUPPORTED_FORMATS
) == 0)
2303 reason
= SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT
;
2305 if (vscsi
->state
== SRP_PROCESSING
)
2306 reason
= SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED
;
2308 rc
= ibmvscsis_make_nexus(&vscsi
->tport
);
2310 reason
= SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL
;
2312 cmd
->rsp
.format
= VIOSRP_SRP_FORMAT
;
2313 cmd
->rsp
.tag
= req
->tag
;
2315 dev_dbg(&vscsi
->dev
, "srp_login: reason 0x%x\n", reason
);
2318 rc
= ibmvscsis_srp_login_rej(vscsi
, cmd
, reason
);
2320 rc
= ibmvscsis_login_rsp(vscsi
, cmd
);
2324 vscsi
->state
= SRP_PROCESSING
;
2326 list_add_tail(&cmd
->list
, &vscsi
->waiting_rsp
);
2327 ibmvscsis_send_messages(vscsi
);
2329 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2332 dev_dbg(&vscsi
->dev
, "Leaving srp_login, rc %ld\n", rc
);
2337 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
2338 * @vscsi: Pointer to our adapter structure
2339 * @cmd: Command element to use to process the Implicit Logout request
2340 * @crq: Pointer to CRQ entry containing the Implicit Logout request
2342 * Do the logic to close the I_T nexus. This function may not
2343 * behave to specification.
2345 * EXECUTION ENVIRONMENT:
2346 * Interrupt, interrupt lock held
2348 static long ibmvscsis_srp_i_logout(struct scsi_info
*vscsi
,
2349 struct ibmvscsis_cmd
*cmd
,
2350 struct viosrp_crq
*crq
)
2352 struct iu_entry
*iue
= cmd
->iue
;
2353 struct srp_i_logout
*log_out
= &vio_iu(iue
)->srp
.i_logout
;
2355 if ((vscsi
->debit
> 0) || !list_empty(&vscsi
->schedule_q
) ||
2356 !list_empty(&vscsi
->waiting_rsp
)) {
2357 dev_err(&vscsi
->dev
, "i_logout: outstanding work\n");
2358 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
, 0);
2360 cmd
->rsp
.format
= SRP_FORMAT
;
2361 cmd
->rsp
.tag
= log_out
->tag
;
2362 cmd
->rsp
.len
= sizeof(struct mad_common
);
2363 list_add_tail(&cmd
->list
, &vscsi
->waiting_rsp
);
2364 ibmvscsis_send_messages(vscsi
);
2366 ibmvscsis_post_disconnect(vscsi
, WAIT_IDLE
, 0);
2369 return ADAPT_SUCCESS
;
2372 /* Called with intr lock held */
2373 static void ibmvscsis_srp_cmd(struct scsi_info
*vscsi
, struct viosrp_crq
*crq
)
2375 struct ibmvscsis_cmd
*cmd
;
2376 struct iu_entry
*iue
;
2377 struct srp_cmd
*srp
;
2378 struct srp_tsk_mgmt
*tsk
;
2381 if (vscsi
->request_limit
- vscsi
->debit
<= 0) {
2382 /* Client has exceeded request limit */
2383 dev_err(&vscsi
->dev
, "Client exceeded the request limit (%d), debit %d\n",
2384 vscsi
->request_limit
, vscsi
->debit
);
2385 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2389 cmd
= ibmvscsis_get_free_cmd(vscsi
);
2391 dev_err(&vscsi
->dev
, "srp_cmd failed to get cmd, debit %d\n",
2393 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2397 srp
= &vio_iu(iue
)->srp
.cmd
;
2399 rc
= ibmvscsis_copy_crq_packet(vscsi
, cmd
, crq
);
2401 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2405 if (vscsi
->state
== SRP_PROCESSING
) {
2406 switch (srp
->opcode
) {
2408 rc
= ibmvscsis_srp_login(vscsi
, cmd
, crq
);
2412 tsk
= &vio_iu(iue
)->srp
.tsk_mgmt
;
2413 dev_dbg(&vscsi
->dev
, "tsk_mgmt tag: %llu (0x%llx)\n",
2414 tsk
->tag
, tsk
->tag
);
2415 cmd
->rsp
.tag
= tsk
->tag
;
2417 cmd
->type
= TASK_MANAGEMENT
;
2418 list_add_tail(&cmd
->list
, &vscsi
->schedule_q
);
2419 queue_work(vscsi
->work_q
, &cmd
->work
);
2423 dev_dbg(&vscsi
->dev
, "srp_cmd tag: %llu (0x%llx)\n",
2424 srp
->tag
, srp
->tag
);
2425 cmd
->rsp
.tag
= srp
->tag
;
2427 cmd
->type
= SCSI_CDB
;
2429 * We want to keep track of work waiting for
2432 list_add_tail(&cmd
->list
, &vscsi
->schedule_q
);
2433 queue_work(vscsi
->work_q
, &cmd
->work
);
2437 rc
= ibmvscsis_srp_i_logout(vscsi
, cmd
, crq
);
2443 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2444 dev_err(&vscsi
->dev
, "invalid srp cmd, opcode %d\n",
2446 ibmvscsis_post_disconnect(vscsi
,
2447 ERR_DISCONNECT_RECONNECT
, 0);
2450 } else if (srp
->opcode
== SRP_LOGIN_REQ
&& vscsi
->state
== CONNECTED
) {
2451 rc
= ibmvscsis_srp_login(vscsi
, cmd
, crq
);
2453 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2454 dev_err(&vscsi
->dev
, "Invalid state %d to handle srp cmd\n",
2456 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2461 * ibmvscsis_ping_response() - Respond to a ping request
2462 * @vscsi: Pointer to our adapter structure
2464 * Let the client know that the server is alive and waiting on
2465 * its native I/O stack.
2466 * If any type of error occurs from the call to queue a ping
2467 * response then the client is either not accepting or receiving
2468 * interrupts. Disconnect with an error.
2470 * EXECUTION ENVIRONMENT:
2471 * Interrupt, interrupt lock held
2473 static long ibmvscsis_ping_response(struct scsi_info
*vscsi
)
2475 struct viosrp_crq
*crq
;
2476 u64 buffer
[2] = { 0, 0 };
2479 crq
= (struct viosrp_crq
*)&buffer
;
2480 crq
->valid
= VALID_CMD_RESP_EL
;
2481 crq
->format
= (u8
)MESSAGE_IN_CRQ
;
2482 crq
->status
= PING_RESPONSE
;
2484 rc
= h_send_crq(vscsi
->dds
.unit_id
, cpu_to_be64(buffer
[MSG_HI
]),
2485 cpu_to_be64(buffer
[MSG_LOW
]));
2491 vscsi
->flags
|= CLIENT_FAILED
;
2494 vscsi
->flags
|= RESPONSE_Q_DOWN
;
2497 dev_err(&vscsi
->dev
, "ping_response: h_send_crq failed, rc %ld\n",
2499 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2502 dev_err(&vscsi
->dev
, "ping_response: h_send_crq returned unknown rc %ld\n",
2504 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
, 0);
2512 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
2513 * @vscsi: Pointer to our adapter structure
2514 * @crq: Pointer to CRQ element containing the SRP request
2516 * This function will return success if the command queue element is valid
2517 * and the srp iu or MAD request it pointed to was also valid. That does
2518 * not mean that an error was not returned to the client.
2520 * EXECUTION ENVIRONMENT:
2521 * Interrupt, intr lock held
2523 static long ibmvscsis_parse_command(struct scsi_info
*vscsi
,
2524 struct viosrp_crq
*crq
)
2526 long rc
= ADAPT_SUCCESS
;
2528 switch (crq
->valid
) {
2529 case VALID_CMD_RESP_EL
:
2530 switch (crq
->format
) {
2535 if (vscsi
->flags
& PROCESSING_MAD
) {
2537 dev_err(&vscsi
->dev
, "parse_command: already processing mad\n");
2538 ibmvscsis_post_disconnect(vscsi
,
2539 ERR_DISCONNECT_RECONNECT
,
2542 vscsi
->flags
|= PROCESSING_MAD
;
2543 rc
= ibmvscsis_mad(vscsi
, crq
);
2548 ibmvscsis_srp_cmd(vscsi
, crq
);
2551 case MESSAGE_IN_CRQ
:
2552 if (crq
->status
== PING
)
2553 ibmvscsis_ping_response(vscsi
);
2557 dev_err(&vscsi
->dev
, "parse_command: invalid format %d\n",
2559 ibmvscsis_post_disconnect(vscsi
,
2560 ERR_DISCONNECT_RECONNECT
, 0);
2565 case VALID_TRANS_EVENT
:
2566 rc
= ibmvscsis_trans_event(vscsi
, crq
);
2569 case VALID_INIT_MSG
:
2570 rc
= ibmvscsis_init_msg(vscsi
, crq
);
2574 dev_err(&vscsi
->dev
, "parse_command: invalid valid field %d\n",
2576 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2581 * Return only what the interrupt handler cares
2582 * about. Most errors we keep right on trucking.
2584 rc
= vscsi
->flags
& SCHEDULE_DISCONNECT
;
2589 static int read_dma_window(struct scsi_info
*vscsi
)
2591 struct vio_dev
*vdev
= vscsi
->dma_dev
;
2592 const __be32
*dma_window
;
2595 /* TODO Using of_parse_dma_window would be better, but it doesn't give
2596 * a way to read multiple windows without already knowing the size of
2597 * a window or the number of windows.
2599 dma_window
= (const __be32
*)vio_get_attribute(vdev
,
2600 "ibm,my-dma-window",
2603 dev_err(&vscsi
->dev
, "Couldn't find ibm,my-dma-window property\n");
2607 vscsi
->dds
.window
[LOCAL
].liobn
= be32_to_cpu(*dma_window
);
2610 prop
= (const __be32
*)vio_get_attribute(vdev
, "ibm,#dma-address-cells",
2613 dev_warn(&vscsi
->dev
, "Couldn't find ibm,#dma-address-cells property\n");
2616 dma_window
+= be32_to_cpu(*prop
);
2619 prop
= (const __be32
*)vio_get_attribute(vdev
, "ibm,#dma-size-cells",
2622 dev_warn(&vscsi
->dev
, "Couldn't find ibm,#dma-size-cells property\n");
2625 dma_window
+= be32_to_cpu(*prop
);
2628 /* dma_window should point to the second window now */
2629 vscsi
->dds
.window
[REMOTE
].liobn
= be32_to_cpu(*dma_window
);
2634 static struct ibmvscsis_tport
*ibmvscsis_lookup_port(const char *name
)
2636 struct ibmvscsis_tport
*tport
= NULL
;
2637 struct vio_dev
*vdev
;
2638 struct scsi_info
*vscsi
;
2640 spin_lock_bh(&ibmvscsis_dev_lock
);
2641 list_for_each_entry(vscsi
, &ibmvscsis_dev_list
, list
) {
2642 vdev
= vscsi
->dma_dev
;
2643 if (!strcmp(dev_name(&vdev
->dev
), name
)) {
2644 tport
= &vscsi
->tport
;
2648 spin_unlock_bh(&ibmvscsis_dev_lock
);
2654 * ibmvscsis_parse_cmd() - Parse SRP Command
2655 * @vscsi: Pointer to our adapter structure
2656 * @cmd: Pointer to command element with SRP command
2658 * Parse the srp command; if it is valid then submit it to tcm.
2659 * Note: The return code does not reflect the status of the SCSI CDB.
2661 * EXECUTION ENVIRONMENT:
2664 static void ibmvscsis_parse_cmd(struct scsi_info
*vscsi
,
2665 struct ibmvscsis_cmd
*cmd
)
2667 struct iu_entry
*iue
= cmd
->iue
;
2668 struct srp_cmd
*srp
= (struct srp_cmd
*)iue
->sbuf
->buf
;
2669 struct ibmvscsis_nexus
*nexus
;
2671 enum dma_data_direction dir
;
2675 nexus
= vscsi
->tport
.ibmv_nexus
;
2677 * additional length in bytes. Note that the SRP spec says that
2678 * additional length is in 4-byte words, but technically the
2679 * additional length field is only the upper 6 bits of the byte.
2680 * The lower 2 bits are reserved. If the lower 2 bits are 0 (as
2681 * all reserved fields should be), then interpreting the byte as
2682 * an int will yield the length in bytes.
2684 if (srp
->add_cdb_len
& 0x03) {
2685 dev_err(&vscsi
->dev
, "parse_cmd: reserved bits set in IU\n");
2686 spin_lock_bh(&vscsi
->intr_lock
);
2687 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2688 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2689 spin_unlock_bh(&vscsi
->intr_lock
);
2693 if (srp_get_desc_table(srp
, &dir
, &data_len
)) {
2694 dev_err(&vscsi
->dev
, "0x%llx: parsing SRP descriptor table failed.\n",
2699 cmd
->rsp
.sol_not
= srp
->sol_not
;
2701 switch (srp
->task_attr
) {
2702 case SRP_SIMPLE_TASK
:
2703 attr
= TCM_SIMPLE_TAG
;
2705 case SRP_ORDERED_TASK
:
2706 attr
= TCM_ORDERED_TAG
;
2709 attr
= TCM_HEAD_TAG
;
2715 dev_err(&vscsi
->dev
, "Invalid task attribute %d\n",
2720 cmd
->se_cmd
.tag
= be64_to_cpu(srp
->tag
);
2722 spin_lock_bh(&vscsi
->intr_lock
);
2723 list_add_tail(&cmd
->list
, &vscsi
->active_q
);
2724 spin_unlock_bh(&vscsi
->intr_lock
);
2726 srp
->lun
.scsi_lun
[0] &= 0x3f;
2728 rc
= target_submit_cmd(&cmd
->se_cmd
, nexus
->se_sess
, srp
->cdb
,
2729 cmd
->sense_buf
, scsilun_to_int(&srp
->lun
),
2730 data_len
, attr
, dir
, 0);
2732 dev_err(&vscsi
->dev
, "target_submit_cmd failed, rc %d\n", rc
);
2733 spin_lock_bh(&vscsi
->intr_lock
);
2734 list_del(&cmd
->list
);
2735 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2736 spin_unlock_bh(&vscsi
->intr_lock
);
2742 spin_lock_bh(&vscsi
->intr_lock
);
2743 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT_RECONNECT
, 0);
2744 spin_unlock_bh(&vscsi
->intr_lock
);
2748 * ibmvscsis_parse_task() - Parse SRP Task Management Request
2749 * @vscsi: Pointer to our adapter structure
2750 * @cmd: Pointer to command element with SRP task management request
2752 * Parse the srp task management request; if it is valid then submit it to tcm.
2753 * Note: The return code does not reflect the status of the task management
2756 * EXECUTION ENVIRONMENT:
2759 static void ibmvscsis_parse_task(struct scsi_info
*vscsi
,
2760 struct ibmvscsis_cmd
*cmd
)
2762 struct iu_entry
*iue
= cmd
->iue
;
2763 struct srp_tsk_mgmt
*srp_tsk
= &vio_iu(iue
)->srp
.tsk_mgmt
;
2765 u64 tag_to_abort
= 0;
2767 struct ibmvscsis_nexus
*nexus
;
2769 nexus
= vscsi
->tport
.ibmv_nexus
;
2771 cmd
->rsp
.sol_not
= srp_tsk
->sol_not
;
2773 switch (srp_tsk
->tsk_mgmt_func
) {
2774 case SRP_TSK_ABORT_TASK
:
2775 tcm_type
= TMR_ABORT_TASK
;
2776 tag_to_abort
= be64_to_cpu(srp_tsk
->task_tag
);
2778 case SRP_TSK_ABORT_TASK_SET
:
2779 tcm_type
= TMR_ABORT_TASK_SET
;
2781 case SRP_TSK_CLEAR_TASK_SET
:
2782 tcm_type
= TMR_CLEAR_TASK_SET
;
2784 case SRP_TSK_LUN_RESET
:
2785 tcm_type
= TMR_LUN_RESET
;
2787 case SRP_TSK_CLEAR_ACA
:
2788 tcm_type
= TMR_CLEAR_ACA
;
2791 dev_err(&vscsi
->dev
, "unknown task mgmt func %d\n",
2792 srp_tsk
->tsk_mgmt_func
);
2793 cmd
->se_cmd
.se_tmr_req
->response
=
2794 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED
;
2800 cmd
->se_cmd
.tag
= be64_to_cpu(srp_tsk
->tag
);
2802 spin_lock_bh(&vscsi
->intr_lock
);
2803 list_add_tail(&cmd
->list
, &vscsi
->active_q
);
2804 spin_unlock_bh(&vscsi
->intr_lock
);
2806 srp_tsk
->lun
.scsi_lun
[0] &= 0x3f;
2808 dev_dbg(&vscsi
->dev
, "calling submit_tmr, func %d\n",
2809 srp_tsk
->tsk_mgmt_func
);
2810 rc
= target_submit_tmr(&cmd
->se_cmd
, nexus
->se_sess
, NULL
,
2811 scsilun_to_int(&srp_tsk
->lun
), srp_tsk
,
2812 tcm_type
, GFP_KERNEL
, tag_to_abort
, 0);
2814 dev_err(&vscsi
->dev
, "target_submit_tmr failed, rc %d\n",
2816 spin_lock_bh(&vscsi
->intr_lock
);
2817 list_del(&cmd
->list
);
2818 spin_unlock_bh(&vscsi
->intr_lock
);
2819 cmd
->se_cmd
.se_tmr_req
->response
=
2820 TMR_FUNCTION_REJECTED
;
2825 transport_send_check_condition_and_sense(&cmd
->se_cmd
, 0, 0);
2828 static void ibmvscsis_scheduler(struct work_struct
*work
)
2830 struct ibmvscsis_cmd
*cmd
= container_of(work
, struct ibmvscsis_cmd
,
2832 struct scsi_info
*vscsi
= cmd
->adapter
;
2834 spin_lock_bh(&vscsi
->intr_lock
);
2836 /* Remove from schedule_q */
2837 list_del(&cmd
->list
);
2839 /* Don't submit cmd if we're disconnecting */
2840 if (vscsi
->flags
& (SCHEDULE_DISCONNECT
| DISCONNECT_SCHEDULED
)) {
2841 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2843 /* ibmvscsis_disconnect might be waiting for us */
2844 if (list_empty(&vscsi
->active_q
) &&
2845 list_empty(&vscsi
->schedule_q
) &&
2846 (vscsi
->flags
& WAIT_FOR_IDLE
)) {
2847 vscsi
->flags
&= ~WAIT_FOR_IDLE
;
2848 complete(&vscsi
->wait_idle
);
2851 spin_unlock_bh(&vscsi
->intr_lock
);
2855 spin_unlock_bh(&vscsi
->intr_lock
);
2857 switch (cmd
->type
) {
2859 ibmvscsis_parse_cmd(vscsi
, cmd
);
2861 case TASK_MANAGEMENT
:
2862 ibmvscsis_parse_task(vscsi
, cmd
);
2865 dev_err(&vscsi
->dev
, "scheduler, invalid cmd type %d\n",
2867 spin_lock_bh(&vscsi
->intr_lock
);
2868 ibmvscsis_free_cmd_resources(vscsi
, cmd
);
2869 spin_unlock_bh(&vscsi
->intr_lock
);
2874 static int ibmvscsis_alloc_cmds(struct scsi_info
*vscsi
, int num
)
2876 struct ibmvscsis_cmd
*cmd
;
2879 INIT_LIST_HEAD(&vscsi
->free_cmd
);
2880 vscsi
->cmd_pool
= kcalloc(num
, sizeof(struct ibmvscsis_cmd
),
2882 if (!vscsi
->cmd_pool
)
2885 for (i
= 0, cmd
= (struct ibmvscsis_cmd
*)vscsi
->cmd_pool
; i
< num
;
2887 cmd
->abort_cmd
= NULL
;
2888 cmd
->adapter
= vscsi
;
2889 INIT_WORK(&cmd
->work
, ibmvscsis_scheduler
);
2890 list_add_tail(&cmd
->list
, &vscsi
->free_cmd
);
2896 static void ibmvscsis_free_cmds(struct scsi_info
*vscsi
)
2898 kfree(vscsi
->cmd_pool
);
2899 vscsi
->cmd_pool
= NULL
;
2900 INIT_LIST_HEAD(&vscsi
->free_cmd
);
2904 * ibmvscsis_service_wait_q() - Service Waiting Queue
2905 * @timer: Pointer to timer which has expired
2907 * This routine is called when the timer pops to service the waiting
2908 * queue. Elements on the queue have completed, their responses have been
2909 * copied to the client, but the client's response queue was full so
2910 * the queue message could not be sent. The routine grabs the proper locks
2911 * and calls send messages.
2913 * EXECUTION ENVIRONMENT:
2914 * called at interrupt level
2916 static enum hrtimer_restart
ibmvscsis_service_wait_q(struct hrtimer
*timer
)
2918 struct timer_cb
*p_timer
= container_of(timer
, struct timer_cb
, timer
);
2919 struct scsi_info
*vscsi
= container_of(p_timer
, struct scsi_info
,
2922 spin_lock_bh(&vscsi
->intr_lock
);
2923 p_timer
->timer_pops
+= 1;
2924 p_timer
->started
= false;
2925 ibmvscsis_send_messages(vscsi
);
2926 spin_unlock_bh(&vscsi
->intr_lock
);
2928 return HRTIMER_NORESTART
;
2931 static long ibmvscsis_alloctimer(struct scsi_info
*vscsi
)
2933 struct timer_cb
*p_timer
;
2935 p_timer
= &vscsi
->rsp_q_timer
;
2936 hrtimer_init(&p_timer
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2938 p_timer
->timer
.function
= ibmvscsis_service_wait_q
;
2939 p_timer
->started
= false;
2940 p_timer
->timer_pops
= 0;
2942 return ADAPT_SUCCESS
;
2945 static void ibmvscsis_freetimer(struct scsi_info
*vscsi
)
2947 struct timer_cb
*p_timer
;
2949 p_timer
= &vscsi
->rsp_q_timer
;
2951 (void)hrtimer_cancel(&p_timer
->timer
);
2953 p_timer
->started
= false;
2954 p_timer
->timer_pops
= 0;
2957 static irqreturn_t
ibmvscsis_interrupt(int dummy
, void *data
)
2959 struct scsi_info
*vscsi
= data
;
2961 vio_disable_interrupts(vscsi
->dma_dev
);
2962 tasklet_schedule(&vscsi
->work_task
);
2968 * ibmvscsis_enable_change_state() - Set new state based on enabled status
2969 * @vscsi: Pointer to our adapter structure
2971 * This function determines our new state now that we are enabled. This
2972 * may involve sending an Init Complete message to the client.
2974 * Must be called with interrupt lock held.
2976 static long ibmvscsis_enable_change_state(struct scsi_info
*vscsi
)
2979 long rc
= ADAPT_SUCCESS
;
2981 bytes
= vscsi
->cmd_q
.size
* PAGE_SIZE
;
2982 rc
= h_reg_crq(vscsi
->dds
.unit_id
, vscsi
->cmd_q
.crq_token
, bytes
);
2983 if (rc
== H_CLOSED
|| rc
== H_SUCCESS
) {
2984 vscsi
->state
= WAIT_CONNECTION
;
2985 rc
= ibmvscsis_establish_new_q(vscsi
);
2988 if (rc
!= ADAPT_SUCCESS
) {
2989 vscsi
->state
= ERR_DISCONNECTED
;
2990 vscsi
->flags
|= RESPONSE_Q_DOWN
;
2997 * ibmvscsis_create_command_q() - Create Command Queue
2998 * @vscsi: Pointer to our adapter structure
2999 * @num_cmds: Currently unused. In the future, may be used to determine
3000 * the size of the CRQ.
3002 * Allocates memory for command queue maps remote memory into an ioba
3003 * initializes the command response queue
3005 * EXECUTION ENVIRONMENT:
3006 * Process level only
3008 static long ibmvscsis_create_command_q(struct scsi_info
*vscsi
, int num_cmds
)
3011 struct vio_dev
*vdev
= vscsi
->dma_dev
;
3013 /* We might support multiple pages in the future, but just 1 for now */
3016 vscsi
->cmd_q
.size
= pages
;
3018 vscsi
->cmd_q
.base_addr
=
3019 (struct viosrp_crq
*)get_zeroed_page(GFP_KERNEL
);
3020 if (!vscsi
->cmd_q
.base_addr
)
3023 vscsi
->cmd_q
.mask
= ((uint
)pages
* CRQ_PER_PAGE
) - 1;
3025 vscsi
->cmd_q
.crq_token
= dma_map_single(&vdev
->dev
,
3026 vscsi
->cmd_q
.base_addr
,
3027 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3028 if (dma_mapping_error(&vdev
->dev
, vscsi
->cmd_q
.crq_token
)) {
3029 free_page((unsigned long)vscsi
->cmd_q
.base_addr
);
3037 * ibmvscsis_destroy_command_q - Destroy Command Queue
3038 * @vscsi: Pointer to our adapter structure
3040 * Releases memory for command queue and unmaps mapped remote memory.
3042 * EXECUTION ENVIRONMENT:
3043 * Process level only
3045 static void ibmvscsis_destroy_command_q(struct scsi_info
*vscsi
)
3047 dma_unmap_single(&vscsi
->dma_dev
->dev
, vscsi
->cmd_q
.crq_token
,
3048 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
3049 free_page((unsigned long)vscsi
->cmd_q
.base_addr
);
3050 vscsi
->cmd_q
.base_addr
= NULL
;
3051 vscsi
->state
= NO_QUEUE
;
3054 static u8
ibmvscsis_fast_fail(struct scsi_info
*vscsi
,
3055 struct ibmvscsis_cmd
*cmd
)
3057 struct iu_entry
*iue
= cmd
->iue
;
3058 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3059 struct srp_cmd
*srp
= (struct srp_cmd
*)iue
->sbuf
->buf
;
3060 struct scsi_sense_hdr sshdr
;
3061 u8 rc
= se_cmd
->scsi_status
;
3063 if (vscsi
->fast_fail
&& (READ_CMD(srp
->cdb
) || WRITE_CMD(srp
->cdb
)))
3064 if (scsi_normalize_sense(se_cmd
->sense_buffer
,
3065 se_cmd
->scsi_sense_length
, &sshdr
))
3066 if (sshdr
.sense_key
== HARDWARE_ERROR
&&
3067 (se_cmd
->residual_count
== 0 ||
3068 se_cmd
->residual_count
== se_cmd
->data_length
)) {
3070 cmd
->flags
|= CMD_FAST_FAIL
;
3077 * srp_build_response() - Build an SRP response buffer
3078 * @vscsi: Pointer to our adapter structure
3079 * @cmd: Pointer to command for which to send the response
3080 * @len_p: Where to return the length of the IU response sent. This
3081 * is needed to construct the CRQ response.
3083 * Build the SRP response buffer and copy it to the client's memory space.
3085 static long srp_build_response(struct scsi_info
*vscsi
,
3086 struct ibmvscsis_cmd
*cmd
, uint
*len_p
)
3088 struct iu_entry
*iue
= cmd
->iue
;
3089 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
3090 struct srp_rsp
*rsp
;
3095 long rc
= ADAPT_SUCCESS
;
3097 spin_lock_bh(&vscsi
->intr_lock
);
3099 rsp
= &vio_iu(iue
)->srp
.rsp
;
3101 memset(rsp
, 0, len
);
3104 rsp
->opcode
= SRP_RSP
;
3106 rsp
->req_lim_delta
= cpu_to_be32(1 + vscsi
->credit
);
3107 rsp
->tag
= cmd
->rsp
.tag
;
3110 if (cmd
->type
== SCSI_CDB
) {
3111 rsp
->status
= ibmvscsis_fast_fail(vscsi
, cmd
);
3113 dev_dbg(&vscsi
->dev
, "build_resp: cmd %p, scsi status %d\n",
3114 cmd
, (int)rsp
->status
);
3115 ibmvscsis_determine_resid(se_cmd
, rsp
);
3116 if (se_cmd
->scsi_sense_length
&& se_cmd
->sense_buffer
) {
3117 rsp
->sense_data_len
=
3118 cpu_to_be32(se_cmd
->scsi_sense_length
);
3119 rsp
->flags
|= SRP_RSP_FLAG_SNSVALID
;
3120 len
+= se_cmd
->scsi_sense_length
;
3121 memcpy(data
, se_cmd
->sense_buffer
,
3122 se_cmd
->scsi_sense_length
);
3124 rsp
->sol_not
= (cmd
->rsp
.sol_not
& UCSOLNT
) >>
3126 } else if (cmd
->flags
& CMD_FAST_FAIL
) {
3127 dev_dbg(&vscsi
->dev
, "build_resp: cmd %p, fast fail\n",
3129 rsp
->sol_not
= (cmd
->rsp
.sol_not
& UCSOLNT
) >>
3132 rsp
->sol_not
= (cmd
->rsp
.sol_not
& SCSOLNT
) >>
3136 /* this is task management */
3138 rsp
->resp_data_len
= cpu_to_be32(4);
3139 rsp
->flags
|= SRP_RSP_FLAG_RSPVALID
;
3141 switch (se_cmd
->se_tmr_req
->response
) {
3142 case TMR_FUNCTION_COMPLETE
:
3143 case TMR_TASK_DOES_NOT_EXIST
:
3144 rsp_code
= SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE
;
3145 rsp
->sol_not
= (cmd
->rsp
.sol_not
& SCSOLNT
) >>
3148 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED
:
3149 case TMR_LUN_DOES_NOT_EXIST
:
3150 rsp_code
= SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED
;
3151 rsp
->sol_not
= (cmd
->rsp
.sol_not
& UCSOLNT
) >>
3154 case TMR_FUNCTION_FAILED
:
3155 case TMR_FUNCTION_REJECTED
:
3157 rsp_code
= SRP_TASK_MANAGEMENT_FUNCTION_FAILED
;
3158 rsp
->sol_not
= (cmd
->rsp
.sol_not
& UCSOLNT
) >>
3163 tsk_status
= (u32
*)data
;
3164 *tsk_status
= cpu_to_be32(rsp_code
);
3165 data
= (char *)(tsk_status
+ 1);
3170 rc
= h_copy_rdma(len
, vscsi
->dds
.window
[LOCAL
].liobn
, iue
->sbuf
->dma
,
3171 vscsi
->dds
.window
[REMOTE
].liobn
,
3172 be64_to_cpu(iue
->remote_token
));
3180 if (connection_broken(vscsi
))
3181 vscsi
->flags
|= RESPONSE_Q_DOWN
| CLIENT_FAILED
;
3183 dev_err(&vscsi
->dev
, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
3184 rc
, vscsi
->flags
, vscsi
->state
);
3189 dev_err(&vscsi
->dev
, "build_response: error copying to client, rc %ld\n",
3194 spin_unlock_bh(&vscsi
->intr_lock
);
3199 static int ibmvscsis_rdma(struct ibmvscsis_cmd
*cmd
, struct scatterlist
*sg
,
3200 int nsg
, struct srp_direct_buf
*md
, int nmd
,
3201 enum dma_data_direction dir
, unsigned int bytes
)
3203 struct iu_entry
*iue
= cmd
->iue
;
3204 struct srp_target
*target
= iue
->target
;
3205 struct scsi_info
*vscsi
= target
->ldata
;
3206 struct scatterlist
*sgp
;
3207 dma_addr_t client_ioba
, server_ioba
;
3209 ulong client_len
, server_len
;
3224 if (client_len
== 0) {
3225 if (md_idx
>= nmd
) {
3226 dev_err(&vscsi
->dev
, "rdma: ran out of client memory descriptors\n");
3230 client_ioba
= be64_to_cpu(md
[md_idx
].va
);
3231 client_len
= be32_to_cpu(md
[md_idx
].len
);
3233 if (server_len
== 0) {
3235 dev_err(&vscsi
->dev
, "rdma: ran out of scatter/gather list\n");
3239 server_ioba
= sg_dma_address(sgp
);
3240 server_len
= sg_dma_len(sgp
);
3245 if (buf_len
> client_len
)
3246 buf_len
= client_len
;
3248 if (buf_len
> server_len
)
3249 buf_len
= server_len
;
3251 if (buf_len
> max_vdma_size
)
3252 buf_len
= max_vdma_size
;
3254 if (dir
== DMA_TO_DEVICE
) {
3255 /* read from client */
3256 rc
= h_copy_rdma(buf_len
,
3257 vscsi
->dds
.window
[REMOTE
].liobn
,
3259 vscsi
->dds
.window
[LOCAL
].liobn
,
3262 /* The h_copy_rdma will cause phyp, running in another
3263 * partition, to read memory, so we need to make sure
3264 * the data has been written out, hence these syncs.
3266 /* ensure that everything is in memory */
3268 /* ensure that memory has been made visible */
3270 rc
= h_copy_rdma(buf_len
,
3271 vscsi
->dds
.window
[LOCAL
].liobn
,
3273 vscsi
->dds
.window
[REMOTE
].liobn
,
3282 if (connection_broken(vscsi
)) {
3283 spin_lock_bh(&vscsi
->intr_lock
);
3285 (RESPONSE_Q_DOWN
| CLIENT_FAILED
);
3286 spin_unlock_bh(&vscsi
->intr_lock
);
3288 dev_err(&vscsi
->dev
, "rdma: h_copy_rdma failed, rc %ld\n",
3293 dev_err(&vscsi
->dev
, "rdma: unknown error %ld from h_copy_rdma\n",
3301 client_len
-= buf_len
;
3302 if (client_len
== 0)
3305 client_ioba
+= buf_len
;
3307 server_len
-= buf_len
;
3308 if (server_len
== 0)
3311 server_ioba
+= buf_len
;
3322 * ibmvscsis_handle_crq() - Handle CRQ
3323 * @data: Pointer to our adapter structure
3325 * Read the command elements from the command queue and copy the payloads
3326 * associated with the command elements to local memory and execute the
3329 * Note: this is an edge triggered interrupt. It can not be shared.
3331 static void ibmvscsis_handle_crq(unsigned long data
)
3333 struct scsi_info
*vscsi
= (struct scsi_info
*)data
;
3334 struct viosrp_crq
*crq
;
3339 spin_lock_bh(&vscsi
->intr_lock
);
3341 dev_dbg(&vscsi
->dev
, "got interrupt\n");
3344 * if we are in a path where we are waiting for all pending commands
3345 * to complete because we received a transport event and anything in
3346 * the command queue is for a new connection, do nothing
3348 if (TARGET_STOP(vscsi
)) {
3349 vio_enable_interrupts(vscsi
->dma_dev
);
3351 dev_dbg(&vscsi
->dev
, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
3352 vscsi
->flags
, vscsi
->state
);
3353 spin_unlock_bh(&vscsi
->intr_lock
);
3357 rc
= vscsi
->flags
& SCHEDULE_DISCONNECT
;
3358 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
3364 * These are edege triggered interrupts. After dropping out of
3365 * the while loop, the code must check for work since an
3366 * interrupt could be lost, and an elment be left on the queue,
3370 vscsi
->cmd_q
.index
=
3371 (vscsi
->cmd_q
.index
+ 1) & vscsi
->cmd_q
.mask
;
3374 rc
= ibmvscsis_parse_command(vscsi
, crq
);
3376 if ((uint
)crq
->valid
== VALID_TRANS_EVENT
) {
3378 * must service the transport layer events even
3379 * in an error state, dont break out until all
3380 * the consecutive transport events have been
3383 rc
= ibmvscsis_trans_event(vscsi
, crq
);
3384 } else if (vscsi
->flags
& TRANS_EVENT
) {
3386 * if a transport event has occurred leave
3387 * everything but transport events on the queue
3389 * need to decrement the queue index so we can
3390 * look at the element again
3392 if (vscsi
->cmd_q
.index
)
3393 vscsi
->cmd_q
.index
-= 1;
3396 * index is at 0 it just wrapped.
3397 * have it index last element in q
3399 vscsi
->cmd_q
.index
= vscsi
->cmd_q
.mask
;
3404 crq
->valid
= INVALIDATE_CMD_RESP_EL
;
3406 crq
= vscsi
->cmd_q
.base_addr
+ vscsi
->cmd_q
.index
;
3413 vio_enable_interrupts(vscsi
->dma_dev
);
3415 dev_dbg(&vscsi
->dev
, "handle_crq, reenabling interrupts\n");
3422 dev_dbg(&vscsi
->dev
, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
3423 vscsi
->flags
, vscsi
->state
, vscsi
->cmd_q
.index
);
3426 dev_dbg(&vscsi
->dev
, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
3427 (int)list_empty(&vscsi
->schedule_q
), vscsi
->flags
,
3430 spin_unlock_bh(&vscsi
->intr_lock
);
3433 static int ibmvscsis_probe(struct vio_dev
*vdev
,
3434 const struct vio_device_id
*id
)
3436 struct scsi_info
*vscsi
;
3441 vscsi
= kzalloc(sizeof(*vscsi
), GFP_KERNEL
);
3444 dev_err(&vdev
->dev
, "probe: allocation of adapter failed\n");
3448 vscsi
->dma_dev
= vdev
;
3449 vscsi
->dev
= vdev
->dev
;
3450 INIT_LIST_HEAD(&vscsi
->schedule_q
);
3451 INIT_LIST_HEAD(&vscsi
->waiting_rsp
);
3452 INIT_LIST_HEAD(&vscsi
->active_q
);
3454 snprintf(vscsi
->tport
.tport_name
, IBMVSCSIS_NAMELEN
, "%s",
3455 dev_name(&vdev
->dev
));
3457 dev_dbg(&vscsi
->dev
, "probe tport_name: %s\n", vscsi
->tport
.tport_name
);
3459 rc
= read_dma_window(vscsi
);
3462 dev_dbg(&vscsi
->dev
, "Probe: liobn 0x%x, riobn 0x%x\n",
3463 vscsi
->dds
.window
[LOCAL
].liobn
,
3464 vscsi
->dds
.window
[REMOTE
].liobn
);
3466 snprintf(vscsi
->eye
, sizeof(vscsi
->eye
), "VSCSI %s", vdev
->name
);
3468 vscsi
->dds
.unit_id
= vdev
->unit_address
;
3469 strscpy(vscsi
->dds
.partition_name
, partition_name
,
3470 sizeof(vscsi
->dds
.partition_name
));
3471 vscsi
->dds
.partition_num
= partition_number
;
3473 spin_lock_bh(&ibmvscsis_dev_lock
);
3474 list_add_tail(&vscsi
->list
, &ibmvscsis_dev_list
);
3475 spin_unlock_bh(&ibmvscsis_dev_lock
);
3478 * TBD: How do we determine # of cmds to request? Do we know how
3479 * many "children" we have?
3481 vscsi
->request_limit
= INITIAL_SRP_LIMIT
;
3482 rc
= srp_target_alloc(&vscsi
->target
, &vdev
->dev
, vscsi
->request_limit
,
3487 vscsi
->target
.ldata
= vscsi
;
3489 rc
= ibmvscsis_alloc_cmds(vscsi
, vscsi
->request_limit
);
3491 dev_err(&vscsi
->dev
, "alloc_cmds failed, rc %d, num %d\n",
3492 rc
, vscsi
->request_limit
);
3497 * Note: the lock is used in freeing timers, so must initialize
3498 * first so that ordering in case of error is correct.
3500 spin_lock_init(&vscsi
->intr_lock
);
3502 rc
= ibmvscsis_alloctimer(vscsi
);
3504 dev_err(&vscsi
->dev
, "probe: alloctimer failed, rc %d\n", rc
);
3508 rc
= ibmvscsis_create_command_q(vscsi
, 256);
3510 dev_err(&vscsi
->dev
, "probe: create_command_q failed, rc %d\n",
3515 vscsi
->map_buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
3516 if (!vscsi
->map_buf
) {
3518 dev_err(&vscsi
->dev
, "probe: allocating cmd buffer failed\n");
3522 vscsi
->map_ioba
= dma_map_single(&vdev
->dev
, vscsi
->map_buf
, PAGE_SIZE
,
3524 if (dma_mapping_error(&vdev
->dev
, vscsi
->map_ioba
)) {
3526 dev_err(&vscsi
->dev
, "probe: error mapping command buffer\n");
3530 hrc
= h_vioctl(vscsi
->dds
.unit_id
, H_GET_PARTNER_INFO
,
3531 (u64
)vscsi
->map_ioba
| ((u64
)PAGE_SIZE
<< 32), 0, 0, 0,
3533 if (hrc
== H_SUCCESS
)
3534 vscsi
->client_data
.partition_number
=
3535 be64_to_cpu(*(u64
*)vscsi
->map_buf
);
3537 * We expect the VIOCTL to fail if we're configured as "any
3538 * client can connect" and the client isn't activated yet.
3539 * We'll make the call again when he sends an init msg.
3541 dev_dbg(&vscsi
->dev
, "probe hrc %ld, client partition num %d\n",
3542 hrc
, vscsi
->client_data
.partition_number
);
3544 tasklet_init(&vscsi
->work_task
, ibmvscsis_handle_crq
,
3545 (unsigned long)vscsi
);
3547 init_completion(&vscsi
->wait_idle
);
3548 init_completion(&vscsi
->unconfig
);
3550 snprintf(wq_name
, 24, "ibmvscsis%s", dev_name(&vdev
->dev
));
3551 vscsi
->work_q
= create_workqueue(wq_name
);
3552 if (!vscsi
->work_q
) {
3554 dev_err(&vscsi
->dev
, "create_workqueue failed\n");
3558 rc
= request_irq(vdev
->irq
, ibmvscsis_interrupt
, 0, "ibmvscsis", vscsi
);
3561 dev_err(&vscsi
->dev
, "probe: request_irq failed, rc %d\n", rc
);
3565 vscsi
->state
= WAIT_ENABLED
;
3567 dev_set_drvdata(&vdev
->dev
, vscsi
);
3572 destroy_workqueue(vscsi
->work_q
);
3574 dma_unmap_single(&vdev
->dev
, vscsi
->map_ioba
, PAGE_SIZE
,
3577 kfree(vscsi
->map_buf
);
3579 tasklet_kill(&vscsi
->work_task
);
3580 ibmvscsis_unregister_command_q(vscsi
);
3581 ibmvscsis_destroy_command_q(vscsi
);
3583 ibmvscsis_freetimer(vscsi
);
3585 ibmvscsis_free_cmds(vscsi
);
3587 srp_target_free(&vscsi
->target
);
3589 spin_lock_bh(&ibmvscsis_dev_lock
);
3590 list_del(&vscsi
->list
);
3591 spin_unlock_bh(&ibmvscsis_dev_lock
);
3598 static int ibmvscsis_remove(struct vio_dev
*vdev
)
3600 struct scsi_info
*vscsi
= dev_get_drvdata(&vdev
->dev
);
3602 dev_dbg(&vscsi
->dev
, "remove (%s)\n", dev_name(&vscsi
->dma_dev
->dev
));
3604 spin_lock_bh(&vscsi
->intr_lock
);
3605 ibmvscsis_post_disconnect(vscsi
, UNCONFIGURING
, 0);
3606 vscsi
->flags
|= CFG_SLEEPING
;
3607 spin_unlock_bh(&vscsi
->intr_lock
);
3608 wait_for_completion(&vscsi
->unconfig
);
3610 vio_disable_interrupts(vdev
);
3611 free_irq(vdev
->irq
, vscsi
);
3612 destroy_workqueue(vscsi
->work_q
);
3613 dma_unmap_single(&vdev
->dev
, vscsi
->map_ioba
, PAGE_SIZE
,
3615 kfree(vscsi
->map_buf
);
3616 tasklet_kill(&vscsi
->work_task
);
3617 ibmvscsis_destroy_command_q(vscsi
);
3618 ibmvscsis_freetimer(vscsi
);
3619 ibmvscsis_free_cmds(vscsi
);
3620 srp_target_free(&vscsi
->target
);
3621 spin_lock_bh(&ibmvscsis_dev_lock
);
3622 list_del(&vscsi
->list
);
3623 spin_unlock_bh(&ibmvscsis_dev_lock
);
3629 static ssize_t
system_id_show(struct device
*dev
,
3630 struct device_attribute
*attr
, char *buf
)
3632 return snprintf(buf
, PAGE_SIZE
, "%s\n", system_id
);
3635 static ssize_t
partition_number_show(struct device
*dev
,
3636 struct device_attribute
*attr
, char *buf
)
3638 return snprintf(buf
, PAGE_SIZE
, "%x\n", partition_number
);
3641 static ssize_t
unit_address_show(struct device
*dev
,
3642 struct device_attribute
*attr
, char *buf
)
3644 struct scsi_info
*vscsi
= container_of(dev
, struct scsi_info
, dev
);
3646 return snprintf(buf
, PAGE_SIZE
, "%x\n", vscsi
->dma_dev
->unit_address
);
3649 static int ibmvscsis_get_system_info(void)
3651 struct device_node
*rootdn
, *vdevdn
;
3652 const char *id
, *model
, *name
;
3655 rootdn
= of_find_node_by_path("/");
3659 model
= of_get_property(rootdn
, "model", NULL
);
3660 id
= of_get_property(rootdn
, "system-id", NULL
);
3662 snprintf(system_id
, sizeof(system_id
), "%s-%s", model
, id
);
3664 name
= of_get_property(rootdn
, "ibm,partition-name", NULL
);
3666 strncpy(partition_name
, name
, sizeof(partition_name
));
3668 num
= of_get_property(rootdn
, "ibm,partition-no", NULL
);
3670 partition_number
= of_read_number(num
, 1);
3672 of_node_put(rootdn
);
3674 vdevdn
= of_find_node_by_path("/vdevice");
3678 mvds
= of_get_property(vdevdn
, "ibm,max-virtual-dma-size",
3681 max_vdma_size
= *mvds
;
3682 of_node_put(vdevdn
);
3688 static char *ibmvscsis_get_fabric_wwn(struct se_portal_group
*se_tpg
)
3690 struct ibmvscsis_tport
*tport
=
3691 container_of(se_tpg
, struct ibmvscsis_tport
, se_tpg
);
3693 return tport
->tport_name
;
3696 static u16
ibmvscsis_get_tag(struct se_portal_group
*se_tpg
)
3698 struct ibmvscsis_tport
*tport
=
3699 container_of(se_tpg
, struct ibmvscsis_tport
, se_tpg
);
3701 return tport
->tport_tpgt
;
3704 static u32
ibmvscsis_get_default_depth(struct se_portal_group
*se_tpg
)
3709 static int ibmvscsis_check_true(struct se_portal_group
*se_tpg
)
3714 static int ibmvscsis_check_false(struct se_portal_group
*se_tpg
)
3719 static u32
ibmvscsis_tpg_get_inst_index(struct se_portal_group
*se_tpg
)
3724 static int ibmvscsis_check_stop_free(struct se_cmd
*se_cmd
)
3726 return target_put_sess_cmd(se_cmd
);
3729 static void ibmvscsis_release_cmd(struct se_cmd
*se_cmd
)
3731 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3733 struct scsi_info
*vscsi
= cmd
->adapter
;
3735 spin_lock_bh(&vscsi
->intr_lock
);
3736 /* Remove from active_q */
3737 list_move_tail(&cmd
->list
, &vscsi
->waiting_rsp
);
3738 ibmvscsis_send_messages(vscsi
);
3739 spin_unlock_bh(&vscsi
->intr_lock
);
3742 static u32
ibmvscsis_sess_get_index(struct se_session
*se_sess
)
3747 static int ibmvscsis_write_pending(struct se_cmd
*se_cmd
)
3749 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3751 struct scsi_info
*vscsi
= cmd
->adapter
;
3752 struct iu_entry
*iue
= cmd
->iue
;
3756 * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
3757 * since LIO can't do anything about it, and we dont want to
3758 * attempt an srp_transfer_data.
3760 if ((vscsi
->flags
& (CLIENT_FAILED
| RESPONSE_Q_DOWN
))) {
3761 dev_err(&vscsi
->dev
, "write_pending failed since: %d\n",
3767 rc
= srp_transfer_data(cmd
, &vio_iu(iue
)->srp
.cmd
, ibmvscsis_rdma
,
3770 dev_err(&vscsi
->dev
, "srp_transfer_data() failed: %d\n", rc
);
3774 * We now tell TCM to add this WRITE CDB directly into the TCM storage
3775 * object execution queue.
3777 target_execute_cmd(se_cmd
);
3781 static void ibmvscsis_set_default_node_attrs(struct se_node_acl
*nacl
)
3785 static int ibmvscsis_get_cmd_state(struct se_cmd
*se_cmd
)
3790 static int ibmvscsis_queue_data_in(struct se_cmd
*se_cmd
)
3792 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3794 struct iu_entry
*iue
= cmd
->iue
;
3795 struct scsi_info
*vscsi
= cmd
->adapter
;
3799 rc
= srp_transfer_data(cmd
, &vio_iu(iue
)->srp
.cmd
, ibmvscsis_rdma
, 1,
3802 dev_err(&vscsi
->dev
, "srp_transfer_data failed: %d\n", rc
);
3803 se_cmd
->scsi_sense_length
= 18;
3804 memset(se_cmd
->sense_buffer
, 0, se_cmd
->scsi_sense_length
);
3805 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
3806 scsi_build_sense_buffer(0, se_cmd
->sense_buffer
, MEDIUM_ERROR
,
3810 srp_build_response(vscsi
, cmd
, &len
);
3811 cmd
->rsp
.format
= SRP_FORMAT
;
3817 static int ibmvscsis_queue_status(struct se_cmd
*se_cmd
)
3819 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3821 struct scsi_info
*vscsi
= cmd
->adapter
;
3824 dev_dbg(&vscsi
->dev
, "queue_status %p\n", se_cmd
);
3826 srp_build_response(vscsi
, cmd
, &len
);
3827 cmd
->rsp
.format
= SRP_FORMAT
;
3833 static void ibmvscsis_queue_tm_rsp(struct se_cmd
*se_cmd
)
3835 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3837 struct scsi_info
*vscsi
= cmd
->adapter
;
3838 struct ibmvscsis_cmd
*cmd_itr
;
3839 struct iu_entry
*iue
= iue
= cmd
->iue
;
3840 struct srp_tsk_mgmt
*srp_tsk
= &vio_iu(iue
)->srp
.tsk_mgmt
;
3841 u64 tag_to_abort
= be64_to_cpu(srp_tsk
->task_tag
);
3844 dev_dbg(&vscsi
->dev
, "queue_tm_rsp %p, status %d\n",
3845 se_cmd
, (int)se_cmd
->se_tmr_req
->response
);
3847 if (srp_tsk
->tsk_mgmt_func
== SRP_TSK_ABORT_TASK
&&
3848 cmd
->se_cmd
.se_tmr_req
->response
== TMR_TASK_DOES_NOT_EXIST
) {
3849 spin_lock_bh(&vscsi
->intr_lock
);
3850 list_for_each_entry(cmd_itr
, &vscsi
->active_q
, list
) {
3851 if (tag_to_abort
== cmd_itr
->se_cmd
.tag
) {
3852 cmd_itr
->abort_cmd
= cmd
;
3853 cmd
->flags
|= DELAY_SEND
;
3857 spin_unlock_bh(&vscsi
->intr_lock
);
3860 srp_build_response(vscsi
, cmd
, &len
);
3861 cmd
->rsp
.format
= SRP_FORMAT
;
3865 static void ibmvscsis_aborted_task(struct se_cmd
*se_cmd
)
3867 struct ibmvscsis_cmd
*cmd
= container_of(se_cmd
, struct ibmvscsis_cmd
,
3869 struct scsi_info
*vscsi
= cmd
->adapter
;
3871 dev_dbg(&vscsi
->dev
, "ibmvscsis_aborted_task %p task_tag: %llu\n",
3872 se_cmd
, se_cmd
->tag
);
3875 static struct se_wwn
*ibmvscsis_make_tport(struct target_fabric_configfs
*tf
,
3876 struct config_group
*group
,
3879 struct ibmvscsis_tport
*tport
;
3880 struct scsi_info
*vscsi
;
3882 tport
= ibmvscsis_lookup_port(name
);
3884 vscsi
= container_of(tport
, struct scsi_info
, tport
);
3885 tport
->tport_proto_id
= SCSI_PROTOCOL_SRP
;
3886 dev_dbg(&vscsi
->dev
, "make_tport(%s), pointer:%p, tport_id:%x\n",
3887 name
, tport
, tport
->tport_proto_id
);
3888 return &tport
->tport_wwn
;
3891 return ERR_PTR(-EINVAL
);
3894 static void ibmvscsis_drop_tport(struct se_wwn
*wwn
)
3896 struct ibmvscsis_tport
*tport
= container_of(wwn
,
3897 struct ibmvscsis_tport
,
3899 struct scsi_info
*vscsi
= container_of(tport
, struct scsi_info
, tport
);
3901 dev_dbg(&vscsi
->dev
, "drop_tport(%s)\n",
3902 config_item_name(&tport
->tport_wwn
.wwn_group
.cg_item
));
3905 static struct se_portal_group
*ibmvscsis_make_tpg(struct se_wwn
*wwn
,
3908 struct ibmvscsis_tport
*tport
=
3909 container_of(wwn
, struct ibmvscsis_tport
, tport_wwn
);
3913 if (strstr(name
, "tpgt_") != name
)
3914 return ERR_PTR(-EINVAL
);
3915 rc
= kstrtou16(name
+ 5, 0, &tpgt
);
3918 tport
->tport_tpgt
= tpgt
;
3920 tport
->releasing
= false;
3922 rc
= core_tpg_register(&tport
->tport_wwn
, &tport
->se_tpg
,
3923 tport
->tport_proto_id
);
3927 return &tport
->se_tpg
;
3930 static void ibmvscsis_drop_tpg(struct se_portal_group
*se_tpg
)
3932 struct ibmvscsis_tport
*tport
= container_of(se_tpg
,
3933 struct ibmvscsis_tport
,
3936 tport
->releasing
= true;
3937 tport
->enabled
= false;
3940 * Release the virtual I_T Nexus for this ibmvscsis TPG
3942 ibmvscsis_drop_nexus(tport
);
3944 * Deregister the se_tpg from TCM..
3946 core_tpg_deregister(se_tpg
);
3949 static ssize_t
ibmvscsis_wwn_version_show(struct config_item
*item
,
3952 return scnprintf(page
, PAGE_SIZE
, "%s\n", IBMVSCSIS_VERSION
);
3954 CONFIGFS_ATTR_RO(ibmvscsis_wwn_
, version
);
3956 static struct configfs_attribute
*ibmvscsis_wwn_attrs
[] = {
3957 &ibmvscsis_wwn_attr_version
,
3961 static ssize_t
ibmvscsis_tpg_enable_show(struct config_item
*item
,
3964 struct se_portal_group
*se_tpg
= to_tpg(item
);
3965 struct ibmvscsis_tport
*tport
= container_of(se_tpg
,
3966 struct ibmvscsis_tport
,
3969 return snprintf(page
, PAGE_SIZE
, "%d\n", (tport
->enabled
) ? 1 : 0);
3972 static ssize_t
ibmvscsis_tpg_enable_store(struct config_item
*item
,
3973 const char *page
, size_t count
)
3975 struct se_portal_group
*se_tpg
= to_tpg(item
);
3976 struct ibmvscsis_tport
*tport
= container_of(se_tpg
,
3977 struct ibmvscsis_tport
,
3979 struct scsi_info
*vscsi
= container_of(tport
, struct scsi_info
, tport
);
3984 rc
= kstrtoul(page
, 0, &tmp
);
3986 dev_err(&vscsi
->dev
, "Unable to extract srpt_tpg_store_enable\n");
3990 if ((tmp
!= 0) && (tmp
!= 1)) {
3991 dev_err(&vscsi
->dev
, "Illegal value for srpt_tpg_store_enable\n");
3996 spin_lock_bh(&vscsi
->intr_lock
);
3997 tport
->enabled
= true;
3998 lrc
= ibmvscsis_enable_change_state(vscsi
);
4000 dev_err(&vscsi
->dev
, "enable_change_state failed, rc %ld state %d\n",
4002 spin_unlock_bh(&vscsi
->intr_lock
);
4004 spin_lock_bh(&vscsi
->intr_lock
);
4005 tport
->enabled
= false;
4006 /* This simulates the server going down */
4007 ibmvscsis_post_disconnect(vscsi
, ERR_DISCONNECT
, 0);
4008 spin_unlock_bh(&vscsi
->intr_lock
);
4011 dev_dbg(&vscsi
->dev
, "tpg_enable_store, tmp %ld, state %d\n", tmp
,
4016 CONFIGFS_ATTR(ibmvscsis_tpg_
, enable
);
4018 static struct configfs_attribute
*ibmvscsis_tpg_attrs
[] = {
4019 &ibmvscsis_tpg_attr_enable
,
4023 static const struct target_core_fabric_ops ibmvscsis_ops
= {
4024 .module
= THIS_MODULE
,
4025 .fabric_name
= "ibmvscsis",
4026 .max_data_sg_nents
= MAX_TXU
/ PAGE_SIZE
,
4027 .tpg_get_wwn
= ibmvscsis_get_fabric_wwn
,
4028 .tpg_get_tag
= ibmvscsis_get_tag
,
4029 .tpg_get_default_depth
= ibmvscsis_get_default_depth
,
4030 .tpg_check_demo_mode
= ibmvscsis_check_true
,
4031 .tpg_check_demo_mode_cache
= ibmvscsis_check_true
,
4032 .tpg_check_demo_mode_write_protect
= ibmvscsis_check_false
,
4033 .tpg_check_prod_mode_write_protect
= ibmvscsis_check_false
,
4034 .tpg_get_inst_index
= ibmvscsis_tpg_get_inst_index
,
4035 .check_stop_free
= ibmvscsis_check_stop_free
,
4036 .release_cmd
= ibmvscsis_release_cmd
,
4037 .sess_get_index
= ibmvscsis_sess_get_index
,
4038 .write_pending
= ibmvscsis_write_pending
,
4039 .set_default_node_attributes
= ibmvscsis_set_default_node_attrs
,
4040 .get_cmd_state
= ibmvscsis_get_cmd_state
,
4041 .queue_data_in
= ibmvscsis_queue_data_in
,
4042 .queue_status
= ibmvscsis_queue_status
,
4043 .queue_tm_rsp
= ibmvscsis_queue_tm_rsp
,
4044 .aborted_task
= ibmvscsis_aborted_task
,
4046 * Setup function pointers for logic in target_core_fabric_configfs.c
4048 .fabric_make_wwn
= ibmvscsis_make_tport
,
4049 .fabric_drop_wwn
= ibmvscsis_drop_tport
,
4050 .fabric_make_tpg
= ibmvscsis_make_tpg
,
4051 .fabric_drop_tpg
= ibmvscsis_drop_tpg
,
4053 .tfc_wwn_attrs
= ibmvscsis_wwn_attrs
,
4054 .tfc_tpg_base_attrs
= ibmvscsis_tpg_attrs
,
4057 static void ibmvscsis_dev_release(struct device
*dev
) {};
4059 static struct device_attribute dev_attr_system_id
=
4060 __ATTR(system_id
, S_IRUGO
, system_id_show
, NULL
);
4062 static struct device_attribute dev_attr_partition_number
=
4063 __ATTR(partition_number
, S_IRUGO
, partition_number_show
, NULL
);
4065 static struct device_attribute dev_attr_unit_address
=
4066 __ATTR(unit_address
, S_IRUGO
, unit_address_show
, NULL
);
4068 static struct attribute
*ibmvscsis_dev_attrs
[] = {
4069 &dev_attr_system_id
.attr
,
4070 &dev_attr_partition_number
.attr
,
4071 &dev_attr_unit_address
.attr
,
4073 ATTRIBUTE_GROUPS(ibmvscsis_dev
);
4075 static struct class ibmvscsis_class
= {
4076 .name
= "ibmvscsis",
4077 .dev_release
= ibmvscsis_dev_release
,
4078 .dev_groups
= ibmvscsis_dev_groups
,
4081 static const struct vio_device_id ibmvscsis_device_table
[] = {
4082 { "v-scsi-host", "IBM,v-scsi-host" },
4085 MODULE_DEVICE_TABLE(vio
, ibmvscsis_device_table
);
4087 static struct vio_driver ibmvscsis_driver
= {
4088 .name
= "ibmvscsis",
4089 .id_table
= ibmvscsis_device_table
,
4090 .probe
= ibmvscsis_probe
,
4091 .remove
= ibmvscsis_remove
,
4095 * ibmvscsis_init() - Kernel Module initialization
4097 * Note: vio_register_driver() registers callback functions, and at least one
4098 * of those callback functions calls TCM - Linux IO Target Subsystem, thus
4099 * the SCSI Target template must be registered before vio_register_driver()
4102 static int __init
ibmvscsis_init(void)
4106 rc
= ibmvscsis_get_system_info();
4108 pr_err("rc %d from get_system_info\n", rc
);
4112 rc
= class_register(&ibmvscsis_class
);
4114 pr_err("failed class register\n");
4118 rc
= target_register_template(&ibmvscsis_ops
);
4120 pr_err("rc %d from target_register_template\n", rc
);
4121 goto unregister_class
;
4124 rc
= vio_register_driver(&ibmvscsis_driver
);
4126 pr_err("rc %d from vio_register_driver\n", rc
);
4127 goto unregister_target
;
4133 target_unregister_template(&ibmvscsis_ops
);
4135 class_unregister(&ibmvscsis_class
);
4140 static void __exit
ibmvscsis_exit(void)
4142 pr_info("Unregister IBM virtual SCSI host driver\n");
4143 vio_unregister_driver(&ibmvscsis_driver
);
4144 target_unregister_template(&ibmvscsis_ops
);
4145 class_unregister(&ibmvscsis_class
);
4148 MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
4149 MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
4150 MODULE_LICENSE("GPL");
4151 MODULE_VERSION(IBMVSCSIS_VERSION
);
4152 module_init(ibmvscsis_init
);
4153 module_exit(ibmvscsis_exit
);