2 * linux/drivers/scsi/esas2r/esas2r_int.c
3 * esas2r interrupt handling
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
8 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
47 /* Local function prototypes */
48 static void esas2r_doorbell_interrupt(struct esas2r_adapter
*a
, u32 doorbell
);
49 static void esas2r_get_outbound_responses(struct esas2r_adapter
*a
);
50 static void esas2r_process_bus_reset(struct esas2r_adapter
*a
);
53 * Poll the adapter for interrupts and service them.
54 * This function handles both legacy interrupts and MSI.
56 void esas2r_polled_interrupt(struct esas2r_adapter
*a
)
61 esas2r_disable_chip_interrupts(a
);
63 intstat
= esas2r_read_register_dword(a
, MU_INT_STATUS_OUT
);
65 if (intstat
& MU_INTSTAT_POST_OUT
) {
66 /* clear the interrupt */
68 esas2r_write_register_dword(a
, MU_OUT_LIST_INT_STAT
,
70 esas2r_flush_register_dword(a
, MU_OUT_LIST_INT_STAT
);
72 esas2r_get_outbound_responses(a
);
75 if (intstat
& MU_INTSTAT_DRBL
) {
76 doorbell
= esas2r_read_register_dword(a
, MU_DOORBELL_OUT
);
78 esas2r_doorbell_interrupt(a
, doorbell
);
81 esas2r_enable_chip_interrupts(a
);
83 if (atomic_read(&a
->disable_cnt
) == 0)
84 esas2r_do_deferred_processes(a
);
88 * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
89 * schedules a TASKLET to process events, whereas the MSI handler just
90 * processes interrupt events directly.
92 irqreturn_t
esas2r_interrupt(int irq
, void *dev_id
)
94 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)dev_id
;
96 if (!esas2r_adapter_interrupt_pending(a
))
99 esas2r_lock_set_flags(&a
->flags2
, AF2_INT_PENDING
);
100 esas2r_schedule_tasklet(a
);
105 void esas2r_adapter_interrupt(struct esas2r_adapter
*a
)
109 if (likely(a
->int_stat
& MU_INTSTAT_POST_OUT
)) {
110 /* clear the interrupt */
111 esas2r_write_register_dword(a
, MU_OUT_LIST_INT_STAT
,
113 esas2r_flush_register_dword(a
, MU_OUT_LIST_INT_STAT
);
114 esas2r_get_outbound_responses(a
);
117 if (unlikely(a
->int_stat
& MU_INTSTAT_DRBL
)) {
118 doorbell
= esas2r_read_register_dword(a
, MU_DOORBELL_OUT
);
120 esas2r_doorbell_interrupt(a
, doorbell
);
123 a
->int_mask
= ESAS2R_INT_STS_MASK
;
125 esas2r_enable_chip_interrupts(a
);
127 if (likely(atomic_read(&a
->disable_cnt
) == 0))
128 esas2r_do_deferred_processes(a
);
131 irqreturn_t
esas2r_msi_interrupt(int irq
, void *dev_id
)
133 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)dev_id
;
137 intstat
= esas2r_read_register_dword(a
, MU_INT_STATUS_OUT
);
139 if (likely(intstat
& MU_INTSTAT_POST_OUT
)) {
140 /* clear the interrupt */
142 esas2r_write_register_dword(a
, MU_OUT_LIST_INT_STAT
,
144 esas2r_flush_register_dword(a
, MU_OUT_LIST_INT_STAT
);
146 esas2r_get_outbound_responses(a
);
149 if (unlikely(intstat
& MU_INTSTAT_DRBL
)) {
150 doorbell
= esas2r_read_register_dword(a
, MU_DOORBELL_OUT
);
152 esas2r_doorbell_interrupt(a
, doorbell
);
156 * Work around a chip bug and force a new MSI to be sent if one is
159 esas2r_disable_chip_interrupts(a
);
160 esas2r_enable_chip_interrupts(a
);
162 if (likely(atomic_read(&a
->disable_cnt
) == 0))
163 esas2r_do_deferred_processes(a
);
165 esas2r_do_tasklet_tasks(a
);
172 static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter
*a
,
173 struct esas2r_request
*rq
,
174 struct atto_vda_ob_rsp
*rsp
)
178 * For I/O requests, only copy the response if an error
179 * occurred and setup a callback to do error processing.
181 if (unlikely(rq
->req_stat
!= RS_SUCCESS
)) {
182 memcpy(&rq
->func_rsp
, &rsp
->func_rsp
, sizeof(rsp
->func_rsp
));
184 if (rq
->req_stat
== RS_ABORTED
) {
185 if (rq
->timeout
> RQ_MAX_TIMEOUT
)
186 rq
->req_stat
= RS_TIMEOUT
;
187 } else if (rq
->req_stat
== RS_SCSI_ERROR
) {
188 u8 scsistatus
= rq
->func_rsp
.scsi_rsp
.scsi_stat
;
190 esas2r_trace("scsistatus: %x", scsistatus
);
192 /* Any of these are a good result. */
193 if (scsistatus
== SAM_STAT_GOOD
|| scsistatus
==
194 SAM_STAT_CONDITION_MET
|| scsistatus
==
195 SAM_STAT_INTERMEDIATE
|| scsistatus
==
196 SAM_STAT_INTERMEDIATE_CONDITION_MET
) {
197 rq
->req_stat
= RS_SUCCESS
;
198 rq
->func_rsp
.scsi_rsp
.scsi_stat
=
205 static void esas2r_get_outbound_responses(struct esas2r_adapter
*a
)
207 struct atto_vda_ob_rsp
*rsp
;
210 struct esas2r_request
*rq
;
214 LIST_HEAD(comp_list
);
216 esas2r_trace_enter();
218 spin_lock_irqsave(&a
->queue_lock
, flags
);
220 /* Get the outbound limit and pointers */
221 rspput_ptr
= le32_to_cpu(*a
->outbound_copy
) & MU_OLC_WRT_PTR
;
222 rspget_ptr
= a
->last_read
;
224 esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr
, rspget_ptr
);
226 /* If we don't have anything to process, get out */
227 if (unlikely(rspget_ptr
== rspput_ptr
)) {
228 spin_unlock_irqrestore(&a
->queue_lock
, flags
);
233 /* Make sure the firmware is healthy */
234 if (unlikely(rspput_ptr
>= a
->list_size
)) {
235 spin_unlock_irqrestore(&a
->queue_lock
, flags
);
237 esas2r_local_reset_adapter(a
);
245 if (rspget_ptr
>= a
->list_size
)
248 rsp
= (struct atto_vda_ob_rsp
*)a
->outbound_list_md
.virt_addr
251 handle
= rsp
->handle
;
253 /* Verify the handle range */
254 if (unlikely(LOWORD(handle
) == 0
255 || LOWORD(handle
) > num_requests
+
256 num_ae_requests
+ 1)) {
261 /* Get the request for this handle */
262 rq
= a
->req_table
[LOWORD(handle
)];
264 if (unlikely(rq
== NULL
|| rq
->vrq
->scsi
.handle
!= handle
)) {
269 list_del(&rq
->req_list
);
271 /* Get the completion status */
272 rq
->req_stat
= rsp
->req_stat
;
274 esas2r_trace("handle: %x", handle
);
275 esas2r_trace("rq: %p", rq
);
276 esas2r_trace("req_status: %x", rq
->req_stat
);
278 if (likely(rq
->vrq
->scsi
.function
== VDA_FUNC_SCSI
)) {
279 esas2r_handle_outbound_rsp_err(a
, rq
, rsp
);
282 * Copy the outbound completion struct for non-I/O
285 memcpy(&rq
->func_rsp
, &rsp
->func_rsp
,
286 sizeof(rsp
->func_rsp
));
289 /* Queue the request for completion. */
290 list_add_tail(&rq
->comp_list
, &comp_list
);
292 } while (rspget_ptr
!= rspput_ptr
);
294 a
->last_read
= rspget_ptr
;
295 spin_unlock_irqrestore(&a
->queue_lock
, flags
);
297 esas2r_comp_list_drain(a
, &comp_list
);
302 * Perform all deferred processes for the adapter. Deferred
303 * processes can only be done while the current interrupt
304 * disable_cnt for the adapter is zero.
306 void esas2r_do_deferred_processes(struct esas2r_adapter
*a
)
309 struct esas2r_request
*rq
;
313 * startreqs is used to control starting requests
314 * that are on the deferred queue
315 * = 0 - do not start any requests
316 * = 1 - can start discovery requests
317 * = 2 - can start any request
320 if (a
->flags
& (AF_CHPRST_PENDING
| AF_FLASHING
))
322 else if (a
->flags
& AF_DISC_PENDING
)
325 atomic_inc(&a
->disable_cnt
);
327 /* Clear off the completed list to be processed later. */
329 if (esas2r_is_tasklet_pending(a
)) {
330 esas2r_schedule_tasklet(a
);
336 * If we can start requests then traverse the defer queue
337 * looking for requests to start or complete
339 if (startreqs
&& !list_empty(&a
->defer_list
)) {
340 LIST_HEAD(comp_list
);
341 struct list_head
*element
, *next
;
343 spin_lock_irqsave(&a
->queue_lock
, flags
);
345 list_for_each_safe(element
, next
, &a
->defer_list
) {
346 rq
= list_entry(element
, struct esas2r_request
,
349 if (rq
->req_stat
!= RS_PENDING
) {
351 list_add_tail(&rq
->comp_list
, &comp_list
);
354 * Process discovery and OS requests separately. We
355 * can't hold up discovery requests when discovery is
356 * pending. In general, there may be different sets of
357 * conditions for starting different types of requests.
359 else if (rq
->req_type
== RT_DISC_REQ
) {
361 esas2r_disc_local_start_request(a
, rq
);
362 } else if (startreqs
== 2) {
364 esas2r_local_start_request(a
, rq
);
367 * Flashing could have been set by last local
370 if (a
->flags
& AF_FLASHING
)
375 spin_unlock_irqrestore(&a
->queue_lock
, flags
);
376 esas2r_comp_list_drain(a
, &comp_list
);
379 atomic_dec(&a
->disable_cnt
);
383 * Process an adapter reset (or one that is about to happen)
384 * by making sure all outstanding requests are completed that
385 * haven't been already.
387 void esas2r_process_adapter_reset(struct esas2r_adapter
*a
)
389 struct esas2r_request
*rq
= &a
->general_req
;
391 struct esas2r_disc_context
*dc
;
393 LIST_HEAD(comp_list
);
394 struct list_head
*element
;
396 esas2r_trace_enter();
398 spin_lock_irqsave(&a
->queue_lock
, flags
);
400 /* abort the active discovery, if any. */
402 if (rq
->interrupt_cx
) {
403 dc
= (struct esas2r_disc_context
*)rq
->interrupt_cx
;
407 esas2r_lock_clear_flags(&a
->flags
, AF_DISC_IN_PROG
);
411 * just clear the interrupt callback for now. it will be dequeued if
412 * and when we find it on the active queue and we don't want the
413 * callback called. also set the dummy completion callback in case we
414 * were doing an I/O request.
417 rq
->interrupt_cx
= NULL
;
418 rq
->interrupt_cb
= NULL
;
420 rq
->comp_cb
= esas2r_dummy_complete
;
422 /* Reset the read and write pointers */
426 a
->last_read
= a
->list_size
- 1;
428 esas2r_lock_set_flags(&a
->flags
, AF_COMM_LIST_TOGGLE
);
430 /* Kill all the requests on the active list */
431 list_for_each(element
, &a
->defer_list
) {
432 rq
= list_entry(element
, struct esas2r_request
, req_list
);
434 if (rq
->req_stat
== RS_STARTED
)
435 if (esas2r_ioreq_aborted(a
, rq
, RS_ABORTED
))
436 list_add_tail(&rq
->comp_list
, &comp_list
);
439 spin_unlock_irqrestore(&a
->queue_lock
, flags
);
440 esas2r_comp_list_drain(a
, &comp_list
);
441 esas2r_process_bus_reset(a
);
445 static void esas2r_process_bus_reset(struct esas2r_adapter
*a
)
447 struct esas2r_request
*rq
;
448 struct list_head
*element
;
451 LIST_HEAD(comp_list
);
453 esas2r_trace_enter();
455 esas2r_hdebug("reset detected");
457 spin_lock_irqsave(&a
->queue_lock
, flags
);
459 /* kill all the requests on the deferred queue */
460 list_for_each(element
, &a
->defer_list
) {
461 rq
= list_entry(element
, struct esas2r_request
, req_list
);
462 if (esas2r_ioreq_aborted(a
, rq
, RS_ABORTED
))
463 list_add_tail(&rq
->comp_list
, &comp_list
);
466 spin_unlock_irqrestore(&a
->queue_lock
, flags
);
468 esas2r_comp_list_drain(a
, &comp_list
);
470 if (atomic_read(&a
->disable_cnt
) == 0)
471 esas2r_do_deferred_processes(a
);
473 esas2r_lock_clear_flags(&a
->flags
, AF_OS_RESET
);
478 static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter
*a
)
481 esas2r_lock_clear_flags(&a
->flags
, AF_CHPRST_NEEDED
);
482 esas2r_lock_clear_flags(&a
->flags
, AF_BUSRST_NEEDED
);
483 esas2r_lock_clear_flags(&a
->flags
, AF_BUSRST_DETECTED
);
484 esas2r_lock_clear_flags(&a
->flags
, AF_BUSRST_PENDING
);
486 * Make sure we don't get attempt more than 3 resets
487 * when the uptime between resets does not exceed one
488 * minute. This will stop any situation where there is
489 * really something wrong with the hardware. The way
490 * this works is that we start with uptime ticks at 0.
491 * Each time we do a reset, we add 20 seconds worth to
492 * the count. Each time a timer tick occurs, as long
493 * as a chip reset is not pending, we decrement the
494 * tick count. If the uptime ticks ever gets to 60
495 * seconds worth, we disable the adapter from that
496 * point forward. Three strikes, you're out.
498 if (!esas2r_is_adapter_present(a
) || (a
->chip_uptime
>=
499 ESAS2R_CHP_UPTIME_MAX
)) {
500 esas2r_hdebug("*** adapter disabled ***");
503 * Ok, some kind of hard failure. Make sure we
504 * exit this loop with chip interrupts
505 * permanently disabled so we don't lock up the
506 * entire system. Also flag degraded mode to
507 * prevent the heartbeat from trying to recover.
510 esas2r_lock_set_flags(&a
->flags
, AF_DEGRADED_MODE
);
511 esas2r_lock_set_flags(&a
->flags
, AF_DISABLED
);
512 esas2r_lock_clear_flags(&a
->flags
, AF_CHPRST_PENDING
);
513 esas2r_lock_clear_flags(&a
->flags
, AF_DISC_PENDING
);
515 esas2r_disable_chip_interrupts(a
);
517 esas2r_process_adapter_reset(a
);
519 esas2r_log(ESAS2R_LOG_CRIT
,
520 "Adapter disabled because of hardware failure");
523 esas2r_lock_set_flags(&a
->flags
, AF_CHPRST_STARTED
);
525 if (!(flags
& AF_CHPRST_STARTED
))
527 * Only disable interrupts if this is
528 * the first reset attempt.
530 esas2r_disable_chip_interrupts(a
);
532 if ((a
->flags
& AF_POWER_MGT
) && !(a
->flags
& AF_FIRST_INIT
) &&
533 !(flags
& AF_CHPRST_STARTED
)) {
535 * Don't reset the chip on the first
536 * deferred power up attempt.
539 esas2r_hdebug("*** resetting chip ***");
540 esas2r_reset_chip(a
);
543 /* Kick off the reinitialization */
544 a
->chip_uptime
+= ESAS2R_CHP_UPTIME_CNT
;
545 a
->chip_init_time
= jiffies_to_msecs(jiffies
);
546 if (!(a
->flags
& AF_POWER_MGT
)) {
547 esas2r_process_adapter_reset(a
);
549 if (!(flags
& AF_CHPRST_STARTED
)) {
550 /* Remove devices now that I/O is cleaned up. */
552 esas2r_targ_db_get_tgt_cnt(a
);
553 esas2r_targ_db_remove_all(a
, false);
561 static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter
*a
)
563 while (a
->flags
& AF_CHPRST_DETECTED
) {
565 * Balance the enable in esas2r_initadapter_hw.
566 * Esas2r_power_down already took care of it for power
569 if (!(a
->flags
& AF_DEGRADED_MODE
) && !(a
->flags
&
571 esas2r_disable_chip_interrupts(a
);
573 /* Reinitialize the chip. */
574 esas2r_check_adapter(a
);
575 esas2r_init_adapter_hw(a
, 0);
577 if (a
->flags
& AF_CHPRST_NEEDED
)
580 if (a
->flags
& AF_POWER_MGT
) {
581 /* Recovery from power management. */
582 if (a
->flags
& AF_FIRST_INIT
) {
583 /* Chip reset during normal power up */
584 esas2r_log(ESAS2R_LOG_CRIT
,
585 "The firmware was reset during a normal power-up sequence");
587 /* Deferred power up complete. */
588 esas2r_lock_clear_flags(&a
->flags
,
590 esas2r_send_reset_ae(a
, true);
593 /* Recovery from online chip reset. */
594 if (a
->flags
& AF_FIRST_INIT
) {
595 /* Chip reset during driver load */
597 /* Chip reset after driver load */
598 esas2r_send_reset_ae(a
, false);
601 esas2r_log(ESAS2R_LOG_CRIT
,
602 "Recovering from a chip reset while the chip was online");
605 esas2r_lock_clear_flags(&a
->flags
, AF_CHPRST_STARTED
);
606 esas2r_enable_chip_interrupts(a
);
609 * Clear this flag last! this indicates that the chip has been
610 * reset already during initialization.
612 esas2r_lock_clear_flags(&a
->flags
, AF_CHPRST_DETECTED
);
617 /* Perform deferred tasks when chip interrupts are disabled */
618 void esas2r_do_tasklet_tasks(struct esas2r_adapter
*a
)
620 if (a
->flags
& (AF_CHPRST_NEEDED
| AF_CHPRST_DETECTED
)) {
621 if (a
->flags
& AF_CHPRST_NEEDED
)
622 esas2r_chip_rst_needed_during_tasklet(a
);
624 esas2r_handle_chip_rst_during_tasklet(a
);
627 if (a
->flags
& AF_BUSRST_NEEDED
) {
628 esas2r_hdebug("hard resetting bus");
630 esas2r_lock_clear_flags(&a
->flags
, AF_BUSRST_NEEDED
);
632 if (a
->flags
& AF_FLASHING
)
633 esas2r_lock_set_flags(&a
->flags
, AF_BUSRST_DETECTED
);
635 esas2r_write_register_dword(a
, MU_DOORBELL_IN
,
639 if (a
->flags
& AF_BUSRST_DETECTED
) {
640 esas2r_process_bus_reset(a
);
642 esas2r_log_dev(ESAS2R_LOG_WARN
,
643 &(a
->host
->shost_gendev
),
644 "scsi_report_bus_reset() called");
646 scsi_report_bus_reset(a
->host
, 0);
648 esas2r_lock_clear_flags(&a
->flags
, AF_BUSRST_DETECTED
);
649 esas2r_lock_clear_flags(&a
->flags
, AF_BUSRST_PENDING
);
651 esas2r_log(ESAS2R_LOG_WARN
, "Bus reset complete");
654 if (a
->flags
& AF_PORT_CHANGE
) {
655 esas2r_lock_clear_flags(&a
->flags
, AF_PORT_CHANGE
);
657 esas2r_targ_db_report_changes(a
);
660 if (atomic_read(&a
->disable_cnt
) == 0)
661 esas2r_do_deferred_processes(a
);
664 static void esas2r_doorbell_interrupt(struct esas2r_adapter
*a
, u32 doorbell
)
666 if (!(doorbell
& DRBL_FORCE_INT
)) {
667 esas2r_trace_enter();
668 esas2r_trace("doorbell: %x", doorbell
);
671 /* First clear the doorbell bits */
672 esas2r_write_register_dword(a
, MU_DOORBELL_OUT
, doorbell
);
674 if (doorbell
& DRBL_RESET_BUS
)
675 esas2r_lock_set_flags(&a
->flags
, AF_BUSRST_DETECTED
);
677 if (doorbell
& DRBL_FORCE_INT
)
678 esas2r_lock_clear_flags(&a
->flags
, AF_HEARTBEAT
);
680 if (doorbell
& DRBL_PANIC_REASON_MASK
) {
681 esas2r_hdebug("*** Firmware Panic ***");
682 esas2r_log(ESAS2R_LOG_CRIT
, "The firmware has panicked");
685 if (doorbell
& DRBL_FW_RESET
) {
686 esas2r_lock_set_flags(&a
->flags2
, AF2_COREDUMP_AVAIL
);
687 esas2r_local_reset_adapter(a
);
690 if (!(doorbell
& DRBL_FORCE_INT
))
694 void esas2r_force_interrupt(struct esas2r_adapter
*a
)
696 esas2r_write_register_dword(a
, MU_DOORBELL_IN
, DRBL_FORCE_INT
|
701 static void esas2r_lun_event(struct esas2r_adapter
*a
, union atto_vda_ae
*ae
,
702 u16 target
, u32 length
)
704 struct esas2r_target
*t
= a
->targetdb
+ target
;
708 if (cplen
> sizeof(t
->lu_event
))
709 cplen
= sizeof(t
->lu_event
);
711 esas2r_trace("ae->lu.dwevent: %x", ae
->lu
.dwevent
);
712 esas2r_trace("ae->lu.bystate: %x", ae
->lu
.bystate
);
714 spin_lock_irqsave(&a
->mem_lock
, flags
);
716 t
->new_target_state
= TS_INVALID
;
718 if (ae
->lu
.dwevent
& VDAAE_LU_LOST
) {
719 t
->new_target_state
= TS_NOT_PRESENT
;
721 switch (ae
->lu
.bystate
) {
722 case VDAAE_LU_NOT_PRESENT
:
723 case VDAAE_LU_OFFLINE
:
724 case VDAAE_LU_DELETED
:
725 case VDAAE_LU_FACTORY_DISABLED
:
726 t
->new_target_state
= TS_NOT_PRESENT
;
729 case VDAAE_LU_ONLINE
:
730 case VDAAE_LU_DEGRADED
:
731 t
->new_target_state
= TS_PRESENT
;
736 if (t
->new_target_state
!= TS_INVALID
) {
737 memcpy(&t
->lu_event
, &ae
->lu
, cplen
);
739 esas2r_disc_queue_event(a
, DCDE_DEV_CHANGE
);
742 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
747 void esas2r_ae_complete(struct esas2r_adapter
*a
, struct esas2r_request
*rq
)
749 union atto_vda_ae
*ae
=
750 (union atto_vda_ae
*)rq
->vda_rsp_data
->ae_data
.event_data
;
751 u32 length
= le32_to_cpu(rq
->func_rsp
.ae_rsp
.length
);
752 union atto_vda_ae
*last
=
753 (union atto_vda_ae
*)(rq
->vda_rsp_data
->ae_data
.event_data
756 esas2r_trace_enter();
757 esas2r_trace("length: %d", length
);
759 if (length
> sizeof(struct atto_vda_ae_data
)
762 esas2r_log(ESAS2R_LOG_WARN
,
763 "The AE request response length (%p) is too long: %d",
766 esas2r_hdebug("aereq->length (0x%x) too long", length
);
775 esas2r_trace("ae: %p", ae
);
776 esas2r_trace("ae->hdr: %p", &(ae
->hdr
));
778 length
= ae
->hdr
.bylength
;
780 if (length
> (u32
)((u8
*)last
- (u8
*)ae
)
783 esas2r_log(ESAS2R_LOG_CRIT
,
784 "the async event length is invalid (%p): %d",
787 esas2r_hdebug("ae->hdr.length (0x%x) invalid", length
);
793 esas2r_nuxi_ae_data(ae
);
795 esas2r_queue_fw_event(a
, fw_event_vda_ae
, ae
,
796 sizeof(union atto_vda_ae
));
798 switch (ae
->hdr
.bytype
) {
799 case VDAAE_HDR_TYPE_RAID
:
801 if (ae
->raid
.dwflags
& (VDAAE_GROUP_STATE
805 esas2r_log(ESAS2R_LOG_INFO
,
806 "RAID event received - name:%s rebuild_state:%d group_state:%d",
808 ae
->raid
.byrebuild_state
,
809 ae
->raid
.bygroup_state
);
814 case VDAAE_HDR_TYPE_LU
:
815 esas2r_log(ESAS2R_LOG_INFO
,
816 "LUN event received: event:%d target_id:%d LUN:%d state:%d",
818 ae
->lu
.id
.tgtlun
.wtarget_id
,
819 ae
->lu
.id
.tgtlun
.bylun
,
822 target
= ae
->lu
.id
.tgtlun
.wtarget_id
;
824 if (target
< ESAS2R_MAX_TARGETS
)
825 esas2r_lun_event(a
, ae
, target
, length
);
829 case VDAAE_HDR_TYPE_DISK
:
830 esas2r_log(ESAS2R_LOG_INFO
, "Disk event received");
835 /* Silently ignore the rest and let the apps deal with
842 ae
= (union atto_vda_ae
*)((u8
*)ae
+ length
);
845 /* Now requeue it. */
846 esas2r_start_ae_request(a
, rq
);
850 /* Send an asynchronous event for a chip reset or power management. */
851 void esas2r_send_reset_ae(struct esas2r_adapter
*a
, bool pwr_mgt
)
853 struct atto_vda_ae_hdr ae
;
856 ae
.bytype
= VDAAE_HDR_TYPE_PWRMGT
;
858 ae
.bytype
= VDAAE_HDR_TYPE_RESET
;
860 ae
.byversion
= VDAAE_HDR_VER_0
;
862 ae
.bylength
= (u8
)sizeof(struct atto_vda_ae_hdr
);
865 esas2r_hdebug("*** sending power management AE ***");
867 esas2r_hdebug("*** sending reset AE ***");
869 esas2r_queue_fw_event(a
, fw_event_vda_ae
, &ae
,
870 sizeof(union atto_vda_ae
));
873 void esas2r_dummy_complete(struct esas2r_adapter
*a
, struct esas2r_request
*rq
)
876 static void esas2r_check_req_rsp_sense(struct esas2r_adapter
*a
,
877 struct esas2r_request
*rq
)
881 snslen
= snslen2
= rq
->func_rsp
.scsi_rsp
.sense_len
;
883 if (snslen
> rq
->sense_len
)
884 snslen
= rq
->sense_len
;
888 memcpy(rq
->sense_buf
, rq
->data_buf
, snslen
);
890 rq
->sense_buf
= (u8
*)rq
->data_buf
;
892 /* See about possible sense data */
893 if (snslen2
> 0x0c) {
894 u8
*s
= (u8
*)rq
->data_buf
;
896 esas2r_trace_enter();
898 /* Report LUNS data has changed */
899 if (s
[0x0c] == 0x3f && s
[0x0d] == 0x0E) {
900 esas2r_trace("rq->target_id: %d",
902 esas2r_target_state_changed(a
, rq
->target_id
,
906 esas2r_trace("add_sense_key=%x", s
[0x0c]);
907 esas2r_trace("add_sense_qual=%x", s
[0x0d]);
912 rq
->sense_len
= snslen
;
916 void esas2r_complete_request(struct esas2r_adapter
*a
,
917 struct esas2r_request
*rq
)
919 if (rq
->vrq
->scsi
.function
== VDA_FUNC_FLASH
920 && rq
->vrq
->flash
.sub_func
== VDA_FLASH_COMMIT
)
921 esas2r_lock_clear_flags(&a
->flags
, AF_FLASHING
);
923 /* See if we setup a callback to do special processing */
925 if (rq
->interrupt_cb
) {
926 (*rq
->interrupt_cb
)(a
, rq
);
928 if (rq
->req_stat
== RS_PENDING
) {
929 esas2r_start_request(a
, rq
);
934 if (likely(rq
->vrq
->scsi
.function
== VDA_FUNC_SCSI
)
935 && unlikely(rq
->req_stat
!= RS_SUCCESS
)) {
936 esas2r_check_req_rsp_sense(a
, rq
);
937 esas2r_log_request_failure(a
, rq
);
940 (*rq
->comp_cb
)(a
, rq
);