2 * linux/drivers/scsi/esas2r/esas2r_disc.c
3 * esas2r device discovery routines
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
8 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
47 /* Miscellaneous internal discovery routines */
48 static void esas2r_disc_abort(struct esas2r_adapter
*a
,
49 struct esas2r_request
*rq
);
50 static bool esas2r_disc_continue(struct esas2r_adapter
*a
,
51 struct esas2r_request
*rq
);
52 static void esas2r_disc_fix_curr_requests(struct esas2r_adapter
*a
);
53 static u32
esas2r_disc_get_phys_addr(struct esas2r_sg_context
*sgc
, u64
*addr
);
54 static bool esas2r_disc_start_request(struct esas2r_adapter
*a
,
55 struct esas2r_request
*rq
);
57 /* Internal discovery routines that process the states */
58 static bool esas2r_disc_block_dev_scan(struct esas2r_adapter
*a
,
59 struct esas2r_request
*rq
);
60 static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter
*a
,
61 struct esas2r_request
*rq
);
62 static bool esas2r_disc_dev_add(struct esas2r_adapter
*a
,
63 struct esas2r_request
*rq
);
64 static bool esas2r_disc_dev_remove(struct esas2r_adapter
*a
,
65 struct esas2r_request
*rq
);
66 static bool esas2r_disc_part_info(struct esas2r_adapter
*a
,
67 struct esas2r_request
*rq
);
68 static void esas2r_disc_part_info_cb(struct esas2r_adapter
*a
,
69 struct esas2r_request
*rq
);
70 static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter
*a
,
71 struct esas2r_request
*rq
);
72 static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter
*a
,
73 struct esas2r_request
*rq
);
74 static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter
*a
,
75 struct esas2r_request
*rq
);
76 static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter
*a
,
77 struct esas2r_request
*rq
);
78 static bool esas2r_disc_raid_grp_info(struct esas2r_adapter
*a
,
79 struct esas2r_request
*rq
);
80 static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter
*a
,
81 struct esas2r_request
*rq
);
83 void esas2r_disc_initialize(struct esas2r_adapter
*a
)
85 struct esas2r_sas_nvram
*nvr
= a
->nvram
;
89 clear_bit(AF_DISC_IN_PROG
, &a
->flags
);
90 clear_bit(AF2_DEV_SCAN
, &a
->flags2
);
91 clear_bit(AF2_DEV_CNT_OK
, &a
->flags2
);
93 a
->disc_start_time
= jiffies_to_msecs(jiffies
);
94 a
->disc_wait_time
= nvr
->dev_wait_time
* 1000;
95 a
->disc_wait_cnt
= nvr
->dev_wait_count
;
97 if (a
->disc_wait_cnt
> ESAS2R_MAX_TARGETS
)
98 a
->disc_wait_cnt
= ESAS2R_MAX_TARGETS
;
101 * If we are doing chip reset or power management processing, always
102 * wait for devices. use the NVRAM device count if it is greater than
103 * previously discovered devices.
106 esas2r_hdebug("starting discovery...");
108 a
->general_req
.interrupt_cx
= NULL
;
110 if (test_bit(AF_CHPRST_DETECTED
, &a
->flags
) ||
111 test_bit(AF_POWER_MGT
, &a
->flags
)) {
112 if (a
->prev_dev_cnt
== 0) {
113 /* Don't bother waiting if there is nothing to wait
116 a
->disc_wait_time
= 0;
119 * Set the device wait count to what was previously
120 * found. We don't care if the user only configured
121 * a time because we know the exact count to wait for.
122 * There is no need to honor the user's wishes to
123 * always wait the full time.
125 a
->disc_wait_cnt
= a
->prev_dev_cnt
;
128 * bump the minimum wait time to 15 seconds since the
129 * default is 3 (system boot or the boot driver usually
130 * buys us more time).
132 if (a
->disc_wait_time
< 15000)
133 a
->disc_wait_time
= 15000;
137 esas2r_trace("disc wait count: %d", a
->disc_wait_cnt
);
138 esas2r_trace("disc wait time: %d", a
->disc_wait_time
);
140 if (a
->disc_wait_time
== 0)
141 esas2r_disc_check_complete(a
);
146 void esas2r_disc_start_waiting(struct esas2r_adapter
*a
)
150 spin_lock_irqsave(&a
->mem_lock
, flags
);
152 if (a
->disc_ctx
.disc_evt
)
153 esas2r_disc_start_port(a
);
155 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
158 void esas2r_disc_check_for_work(struct esas2r_adapter
*a
)
160 struct esas2r_request
*rq
= &a
->general_req
;
162 /* service any pending interrupts first */
164 esas2r_polled_interrupt(a
);
167 * now, interrupt processing may have queued up a discovery event. go
168 * see if we have one to start. we couldn't start it in the ISR since
169 * polled discovery would cause a deadlock.
172 esas2r_disc_start_waiting(a
);
174 if (rq
->interrupt_cx
== NULL
)
177 if (rq
->req_stat
== RS_STARTED
178 && rq
->timeout
<= RQ_MAX_TIMEOUT
) {
179 /* wait for the current discovery request to complete. */
180 esas2r_wait_request(a
, rq
);
182 if (rq
->req_stat
== RS_TIMEOUT
) {
183 esas2r_disc_abort(a
, rq
);
184 esas2r_local_reset_adapter(a
);
189 if (rq
->req_stat
== RS_PENDING
190 || rq
->req_stat
== RS_STARTED
)
193 esas2r_disc_continue(a
, rq
);
196 void esas2r_disc_check_complete(struct esas2r_adapter
*a
)
200 esas2r_trace_enter();
202 /* check to see if we should be waiting for devices */
203 if (a
->disc_wait_time
) {
204 u32 currtime
= jiffies_to_msecs(jiffies
);
205 u32 time
= currtime
- a
->disc_start_time
;
208 * Wait until the device wait time is exhausted or the device
209 * wait count is satisfied.
211 if (time
< a
->disc_wait_time
212 && (esas2r_targ_db_get_tgt_cnt(a
) < a
->disc_wait_cnt
213 || a
->disc_wait_cnt
== 0)) {
214 /* After three seconds of waiting, schedule a scan. */
216 && !test_and_set_bit(AF2_DEV_SCAN
, &a
->flags2
)) {
217 spin_lock_irqsave(&a
->mem_lock
, flags
);
218 esas2r_disc_queue_event(a
, DCDE_DEV_SCAN
);
219 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
227 * We are done waiting...we think. Adjust the wait time to
228 * consume events after the count is met.
230 if (!test_and_set_bit(AF2_DEV_CNT_OK
, &a
->flags2
))
231 a
->disc_wait_time
= time
+ 3000;
233 /* If we haven't done a full scan yet, do it now. */
234 if (!test_and_set_bit(AF2_DEV_SCAN
, &a
->flags2
)) {
235 spin_lock_irqsave(&a
->mem_lock
, flags
);
236 esas2r_disc_queue_event(a
, DCDE_DEV_SCAN
);
237 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
243 * Now, if there is still time left to consume events, continue
246 if (time
< a
->disc_wait_time
) {
251 if (!test_and_set_bit(AF2_DEV_SCAN
, &a
->flags2
)) {
252 spin_lock_irqsave(&a
->mem_lock
, flags
);
253 esas2r_disc_queue_event(a
, DCDE_DEV_SCAN
);
254 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
258 /* We want to stop waiting for devices. */
259 a
->disc_wait_time
= 0;
261 if (test_bit(AF_DISC_POLLED
, &a
->flags
) &&
262 test_bit(AF_DISC_IN_PROG
, &a
->flags
)) {
264 * Polled discovery is still pending so continue the active
265 * discovery until it is done. At that point, we will stop
266 * polled discovery and transition to interrupt driven
271 * Done waiting for devices. Note that we get here immediately
272 * after deferred waiting completes because that is interrupt
273 * driven; i.e. There is no transition.
275 esas2r_disc_fix_curr_requests(a
);
276 clear_bit(AF_DISC_PENDING
, &a
->flags
);
279 * We have deferred target state changes until now because we
280 * don't want to report any removals (due to the first arrival)
281 * until the device wait time expires.
283 set_bit(AF_PORT_CHANGE
, &a
->flags
);
289 void esas2r_disc_queue_event(struct esas2r_adapter
*a
, u8 disc_evt
)
291 struct esas2r_disc_context
*dc
= &a
->disc_ctx
;
293 esas2r_trace_enter();
295 esas2r_trace("disc_event: %d", disc_evt
);
297 /* Initialize the discovery context */
298 dc
->disc_evt
|= disc_evt
;
301 * Don't start discovery before or during polled discovery. if we did,
302 * we would have a deadlock if we are in the ISR already.
304 if (!test_bit(AF_CHPRST_PENDING
, &a
->flags
) &&
305 !test_bit(AF_DISC_POLLED
, &a
->flags
))
306 esas2r_disc_start_port(a
);
311 bool esas2r_disc_start_port(struct esas2r_adapter
*a
)
313 struct esas2r_request
*rq
= &a
->general_req
;
314 struct esas2r_disc_context
*dc
= &a
->disc_ctx
;
317 esas2r_trace_enter();
319 if (test_bit(AF_DISC_IN_PROG
, &a
->flags
)) {
325 /* If there is a discovery waiting, process it. */
327 if (test_bit(AF_DISC_POLLED
, &a
->flags
)
328 && a
->disc_wait_time
== 0) {
330 * We are doing polled discovery, but we no longer want
331 * to wait for devices. Stop polled discovery and
332 * transition to interrupt driven discovery.
340 /* Discovery is complete. */
342 esas2r_hdebug("disc done");
344 set_bit(AF_PORT_CHANGE
, &a
->flags
);
351 /* Handle the discovery context */
352 esas2r_trace("disc_evt: %d", dc
->disc_evt
);
353 set_bit(AF_DISC_IN_PROG
, &a
->flags
);
356 if (test_bit(AF_DISC_POLLED
, &a
->flags
))
357 dc
->flags
|= DCF_POLLED
;
359 rq
->interrupt_cx
= dc
;
360 rq
->req_stat
= RS_SUCCESS
;
362 /* Decode the event code */
363 if (dc
->disc_evt
& DCDE_DEV_SCAN
) {
364 dc
->disc_evt
&= ~DCDE_DEV_SCAN
;
366 dc
->flags
|= DCF_DEV_SCAN
;
367 dc
->state
= DCS_BLOCK_DEV_SCAN
;
368 } else if (dc
->disc_evt
& DCDE_DEV_CHANGE
) {
369 dc
->disc_evt
&= ~DCDE_DEV_CHANGE
;
371 dc
->flags
|= DCF_DEV_CHANGE
;
372 dc
->state
= DCS_DEV_RMV
;
375 /* Continue interrupt driven discovery */
376 if (!test_bit(AF_DISC_POLLED
, &a
->flags
))
377 ret
= esas2r_disc_continue(a
, rq
);
386 static bool esas2r_disc_continue(struct esas2r_adapter
*a
,
387 struct esas2r_request
*rq
)
389 struct esas2r_disc_context
*dc
=
390 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
393 /* Device discovery/removal */
394 while (dc
->flags
& (DCF_DEV_CHANGE
| DCF_DEV_SCAN
)) {
400 rslt
= esas2r_disc_dev_remove(a
, rq
);
405 rslt
= esas2r_disc_dev_add(a
, rq
);
408 case DCS_BLOCK_DEV_SCAN
:
410 rslt
= esas2r_disc_block_dev_scan(a
, rq
);
413 case DCS_RAID_GRP_INFO
:
415 rslt
= esas2r_disc_raid_grp_info(a
, rq
);
420 rslt
= esas2r_disc_part_info(a
, rq
);
423 case DCS_PT_DEV_INFO
:
425 rslt
= esas2r_disc_passthru_dev_info(a
, rq
);
427 case DCS_PT_DEV_ADDR
:
429 rslt
= esas2r_disc_passthru_dev_addr(a
, rq
);
433 dc
->flags
&= ~(DCF_DEV_CHANGE
| DCF_DEV_SCAN
);
439 dc
->state
= DCS_DISC_DONE
;
447 /* Discovery is done...for now. */
448 rq
->interrupt_cx
= NULL
;
450 if (!test_bit(AF_DISC_PENDING
, &a
->flags
))
451 esas2r_disc_fix_curr_requests(a
);
453 clear_bit(AF_DISC_IN_PROG
, &a
->flags
);
455 /* Start the next discovery. */
456 return esas2r_disc_start_port(a
);
459 static bool esas2r_disc_start_request(struct esas2r_adapter
*a
,
460 struct esas2r_request
*rq
)
464 /* Set the timeout to a minimum value. */
465 if (rq
->timeout
< ESAS2R_DEFAULT_TMO
)
466 rq
->timeout
= ESAS2R_DEFAULT_TMO
;
469 * Override the request type to distinguish discovery requests. If we
470 * end up deferring the request, esas2r_disc_local_start_request()
471 * will be called to restart it.
473 rq
->req_type
= RT_DISC_REQ
;
475 spin_lock_irqsave(&a
->queue_lock
, flags
);
477 if (!test_bit(AF_CHPRST_PENDING
, &a
->flags
) &&
478 !test_bit(AF_FLASHING
, &a
->flags
))
479 esas2r_disc_local_start_request(a
, rq
);
481 list_add_tail(&rq
->req_list
, &a
->defer_list
);
483 spin_unlock_irqrestore(&a
->queue_lock
, flags
);
488 void esas2r_disc_local_start_request(struct esas2r_adapter
*a
,
489 struct esas2r_request
*rq
)
491 esas2r_trace_enter();
493 list_add_tail(&rq
->req_list
, &a
->active_list
);
495 esas2r_start_vda_request(a
, rq
);
502 static void esas2r_disc_abort(struct esas2r_adapter
*a
,
503 struct esas2r_request
*rq
)
505 struct esas2r_disc_context
*dc
=
506 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
508 esas2r_trace_enter();
510 /* abort the current discovery */
512 dc
->state
= DCS_DISC_DONE
;
517 static bool esas2r_disc_block_dev_scan(struct esas2r_adapter
*a
,
518 struct esas2r_request
*rq
)
520 struct esas2r_disc_context
*dc
=
521 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
524 esas2r_trace_enter();
526 esas2r_rq_init_request(rq
, a
);
528 esas2r_build_mgt_req(a
,
536 rq
->comp_cb
= esas2r_disc_block_dev_scan_cb
;
539 rq
->interrupt_cx
= dc
;
541 rslt
= esas2r_disc_start_request(a
, rq
);
548 static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter
*a
,
549 struct esas2r_request
*rq
)
551 struct esas2r_disc_context
*dc
=
552 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
555 esas2r_trace_enter();
557 spin_lock_irqsave(&a
->mem_lock
, flags
);
559 if (rq
->req_stat
== RS_SUCCESS
)
560 dc
->scan_gen
= rq
->func_rsp
.mgt_rsp
.scan_generation
;
562 dc
->state
= DCS_RAID_GRP_INFO
;
565 esas2r_rq_destroy_request(rq
, a
);
567 /* continue discovery if it's interrupt driven */
569 if (!(dc
->flags
& DCF_POLLED
))
570 esas2r_disc_continue(a
, rq
);
572 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
577 static bool esas2r_disc_raid_grp_info(struct esas2r_adapter
*a
,
578 struct esas2r_request
*rq
)
580 struct esas2r_disc_context
*dc
=
581 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
583 struct atto_vda_grp_info
*grpinfo
;
585 esas2r_trace_enter();
587 esas2r_trace("raid_group_idx: %d", dc
->raid_grp_ix
);
589 if (dc
->raid_grp_ix
>= VDA_MAX_RAID_GROUPS
) {
590 dc
->state
= DCS_DISC_DONE
;
597 esas2r_rq_init_request(rq
, a
);
599 grpinfo
= &rq
->vda_rsp_data
->mgt_data
.data
.grp_info
;
601 memset(grpinfo
, 0, sizeof(struct atto_vda_grp_info
));
603 esas2r_build_mgt_req(a
,
608 sizeof(struct atto_vda_grp_info
),
611 grpinfo
->grp_index
= dc
->raid_grp_ix
;
613 rq
->comp_cb
= esas2r_disc_raid_grp_info_cb
;
615 rq
->interrupt_cx
= dc
;
617 rslt
= esas2r_disc_start_request(a
, rq
);
624 static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter
*a
,
625 struct esas2r_request
*rq
)
627 struct esas2r_disc_context
*dc
=
628 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
630 struct atto_vda_grp_info
*grpinfo
;
632 esas2r_trace_enter();
634 spin_lock_irqsave(&a
->mem_lock
, flags
);
636 if (rq
->req_stat
== RS_SCAN_GEN
) {
637 dc
->scan_gen
= rq
->func_rsp
.mgt_rsp
.scan_generation
;
642 if (rq
->req_stat
== RS_SUCCESS
) {
643 grpinfo
= &rq
->vda_rsp_data
->mgt_data
.data
.grp_info
;
645 if (grpinfo
->status
!= VDA_GRP_STAT_ONLINE
646 && grpinfo
->status
!= VDA_GRP_STAT_DEGRADED
) {
647 /* go to the next group. */
651 memcpy(&dc
->raid_grp_name
[0],
652 &grpinfo
->grp_name
[0],
653 sizeof(grpinfo
->grp_name
));
655 dc
->interleave
= le32_to_cpu(grpinfo
->interleave
);
656 dc
->block_size
= le32_to_cpu(grpinfo
->block_size
);
658 dc
->state
= DCS_PART_INFO
;
662 if (!(rq
->req_stat
== RS_GRP_INVALID
)) {
663 esas2r_log(ESAS2R_LOG_WARN
,
664 "A request for RAID group info failed - "
670 dc
->state
= DCS_PT_DEV_INFO
;
675 esas2r_rq_destroy_request(rq
, a
);
677 /* continue discovery if it's interrupt driven */
679 if (!(dc
->flags
& DCF_POLLED
))
680 esas2r_disc_continue(a
, rq
);
682 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
687 static bool esas2r_disc_part_info(struct esas2r_adapter
*a
,
688 struct esas2r_request
*rq
)
690 struct esas2r_disc_context
*dc
=
691 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
693 struct atto_vdapart_info
*partinfo
;
695 esas2r_trace_enter();
697 esas2r_trace("part_num: %d", dc
->part_num
);
699 if (dc
->part_num
>= VDA_MAX_PARTITIONS
) {
700 dc
->state
= DCS_RAID_GRP_INFO
;
708 esas2r_rq_init_request(rq
, a
);
710 partinfo
= &rq
->vda_rsp_data
->mgt_data
.data
.part_info
;
712 memset(partinfo
, 0, sizeof(struct atto_vdapart_info
));
714 esas2r_build_mgt_req(a
,
719 sizeof(struct atto_vdapart_info
),
722 partinfo
->part_no
= dc
->part_num
;
724 memcpy(&partinfo
->grp_name
[0],
725 &dc
->raid_grp_name
[0],
726 sizeof(partinfo
->grp_name
));
728 rq
->comp_cb
= esas2r_disc_part_info_cb
;
730 rq
->interrupt_cx
= dc
;
732 rslt
= esas2r_disc_start_request(a
, rq
);
739 static void esas2r_disc_part_info_cb(struct esas2r_adapter
*a
,
740 struct esas2r_request
*rq
)
742 struct esas2r_disc_context
*dc
=
743 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
745 struct atto_vdapart_info
*partinfo
;
747 esas2r_trace_enter();
749 spin_lock_irqsave(&a
->mem_lock
, flags
);
751 if (rq
->req_stat
== RS_SCAN_GEN
) {
752 dc
->scan_gen
= rq
->func_rsp
.mgt_rsp
.scan_generation
;
754 dc
->state
= DCS_RAID_GRP_INFO
;
755 } else if (rq
->req_stat
== RS_SUCCESS
) {
756 partinfo
= &rq
->vda_rsp_data
->mgt_data
.data
.part_info
;
758 dc
->part_num
= partinfo
->part_no
;
760 dc
->curr_virt_id
= le16_to_cpu(partinfo
->target_id
);
762 esas2r_targ_db_add_raid(a
, dc
);
766 if (!(rq
->req_stat
== RS_PART_LAST
)) {
767 esas2r_log(ESAS2R_LOG_WARN
,
768 "A request for RAID group partition info "
769 "failed - status:%d", rq
->req_stat
);
772 dc
->state
= DCS_RAID_GRP_INFO
;
776 esas2r_rq_destroy_request(rq
, a
);
778 /* continue discovery if it's interrupt driven */
780 if (!(dc
->flags
& DCF_POLLED
))
781 esas2r_disc_continue(a
, rq
);
783 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
788 static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter
*a
,
789 struct esas2r_request
*rq
)
791 struct esas2r_disc_context
*dc
=
792 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
794 struct atto_vda_devinfo
*devinfo
;
796 esas2r_trace_enter();
798 esas2r_trace("dev_ix: %d", dc
->dev_ix
);
800 esas2r_rq_init_request(rq
, a
);
802 devinfo
= &rq
->vda_rsp_data
->mgt_data
.data
.dev_info
;
804 memset(devinfo
, 0, sizeof(struct atto_vda_devinfo
));
806 esas2r_build_mgt_req(a
,
811 sizeof(struct atto_vda_devinfo
),
814 rq
->comp_cb
= esas2r_disc_passthru_dev_info_cb
;
816 rq
->interrupt_cx
= dc
;
818 rslt
= esas2r_disc_start_request(a
, rq
);
825 static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter
*a
,
826 struct esas2r_request
*rq
)
828 struct esas2r_disc_context
*dc
=
829 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
831 struct atto_vda_devinfo
*devinfo
;
833 esas2r_trace_enter();
835 spin_lock_irqsave(&a
->mem_lock
, flags
);
837 if (rq
->req_stat
== RS_SCAN_GEN
) {
838 dc
->scan_gen
= rq
->func_rsp
.mgt_rsp
.scan_generation
;
840 dc
->state
= DCS_PT_DEV_INFO
;
841 } else if (rq
->req_stat
== RS_SUCCESS
) {
842 devinfo
= &rq
->vda_rsp_data
->mgt_data
.data
.dev_info
;
844 dc
->dev_ix
= le16_to_cpu(rq
->func_rsp
.mgt_rsp
.dev_index
);
846 dc
->curr_virt_id
= le16_to_cpu(devinfo
->target_id
);
848 if (le16_to_cpu(devinfo
->features
) & VDADEVFEAT_PHYS_ID
) {
850 le16_to_cpu(devinfo
->phys_target_id
);
851 dc
->dev_addr_type
= ATTO_GDA_AT_PORT
;
852 dc
->state
= DCS_PT_DEV_ADDR
;
854 esas2r_trace("curr_virt_id: %d", dc
->curr_virt_id
);
855 esas2r_trace("curr_phys_id: %d", dc
->curr_phys_id
);
860 if (!(rq
->req_stat
== RS_DEV_INVALID
)) {
861 esas2r_log(ESAS2R_LOG_WARN
,
862 "A request for device information failed - "
863 "status:%d", rq
->req_stat
);
866 dc
->state
= DCS_DISC_DONE
;
869 esas2r_rq_destroy_request(rq
, a
);
871 /* continue discovery if it's interrupt driven */
873 if (!(dc
->flags
& DCF_POLLED
))
874 esas2r_disc_continue(a
, rq
);
876 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
881 static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter
*a
,
882 struct esas2r_request
*rq
)
884 struct esas2r_disc_context
*dc
=
885 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
887 struct atto_ioctl
*hi
;
888 struct esas2r_sg_context sgc
;
890 esas2r_trace_enter();
892 esas2r_rq_init_request(rq
, a
);
894 /* format the request. */
896 sgc
.cur_offset
= NULL
;
897 sgc
.get_phys_addr
= (PGETPHYSADDR
)esas2r_disc_get_phys_addr
;
898 sgc
.length
= offsetof(struct atto_ioctl
, data
)
899 + sizeof(struct atto_hba_get_device_address
);
901 esas2r_sgc_init(&sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
903 esas2r_build_ioctl_req(a
, rq
, sgc
.length
, VDA_IOCTL_HBA
);
905 if (!esas2r_build_sg_list(a
, rq
, &sgc
)) {
906 esas2r_rq_destroy_request(rq
, a
);
913 rq
->comp_cb
= esas2r_disc_passthru_dev_addr_cb
;
915 rq
->interrupt_cx
= dc
;
917 /* format the IOCTL data. */
919 hi
= (struct atto_ioctl
*)a
->disc_buffer
;
921 memset(a
->disc_buffer
, 0, ESAS2R_DISC_BUF_LEN
);
923 hi
->version
= ATTO_VER_GET_DEV_ADDR0
;
924 hi
->function
= ATTO_FUNC_GET_DEV_ADDR
;
925 hi
->flags
= HBAF_TUNNEL
;
927 hi
->data
.get_dev_addr
.target_id
= le32_to_cpu(dc
->curr_phys_id
);
928 hi
->data
.get_dev_addr
.addr_type
= dc
->dev_addr_type
;
932 rslt
= esas2r_disc_start_request(a
, rq
);
939 static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter
*a
,
940 struct esas2r_request
*rq
)
942 struct esas2r_disc_context
*dc
=
943 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
944 struct esas2r_target
*t
= NULL
;
946 struct atto_ioctl
*hi
;
949 esas2r_trace_enter();
951 spin_lock_irqsave(&a
->mem_lock
, flags
);
953 hi
= (struct atto_ioctl
*)a
->disc_buffer
;
955 if (rq
->req_stat
== RS_SUCCESS
956 && hi
->status
== ATTO_STS_SUCCESS
) {
957 addrlen
= le16_to_cpu(hi
->data
.get_dev_addr
.addr_len
);
959 if (dc
->dev_addr_type
== ATTO_GDA_AT_PORT
) {
960 if (addrlen
== sizeof(u64
))
961 memcpy(&dc
->sas_addr
,
962 &hi
->data
.get_dev_addr
.address
[0],
965 memset(&dc
->sas_addr
, 0, sizeof(dc
->sas_addr
));
967 /* Get the unique identifier. */
968 dc
->dev_addr_type
= ATTO_GDA_AT_UNIQUE
;
972 /* Add the pass through target. */
973 if (HIBYTE(addrlen
) == 0) {
974 t
= esas2r_targ_db_add_pthru(a
,
984 memcpy(&t
->sas_addr
, &dc
->sas_addr
,
985 sizeof(t
->sas_addr
));
987 /* getting the back end data failed */
989 esas2r_log(ESAS2R_LOG_WARN
,
990 "an error occurred retrieving the "
991 "back end data (%s:%d)",
997 /* getting the back end data failed */
999 esas2r_log(ESAS2R_LOG_WARN
,
1000 "an error occurred retrieving the back end data - "
1001 "rq->req_stat:%d hi->status:%d",
1002 rq
->req_stat
, hi
->status
);
1005 /* proceed to the next device. */
1007 if (dc
->flags
& DCF_DEV_SCAN
) {
1009 dc
->state
= DCS_PT_DEV_INFO
;
1010 } else if (dc
->flags
& DCF_DEV_CHANGE
) {
1012 dc
->state
= DCS_DEV_ADD
;
1018 esas2r_rq_destroy_request(rq
, a
);
1020 /* continue discovery if it's interrupt driven */
1022 if (!(dc
->flags
& DCF_POLLED
))
1023 esas2r_disc_continue(a
, rq
);
1025 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
1027 esas2r_trace_exit();
1030 static u32
esas2r_disc_get_phys_addr(struct esas2r_sg_context
*sgc
, u64
*addr
)
1032 struct esas2r_adapter
*a
= sgc
->adapter
;
1034 if (sgc
->length
> ESAS2R_DISC_BUF_LEN
)
1037 *addr
= a
->uncached_phys
1038 + (u64
)((u8
*)a
->disc_buffer
- a
->uncached
);
1043 static bool esas2r_disc_dev_remove(struct esas2r_adapter
*a
,
1044 struct esas2r_request
*rq
)
1046 struct esas2r_disc_context
*dc
=
1047 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
1048 struct esas2r_target
*t
;
1049 struct esas2r_target
*t2
;
1051 esas2r_trace_enter();
1053 /* process removals. */
1055 for (t
= a
->targetdb
; t
< a
->targetdb_end
; t
++) {
1056 if (t
->new_target_state
!= TS_NOT_PRESENT
)
1059 t
->new_target_state
= TS_INVALID
;
1061 /* remove the right target! */
1064 esas2r_targ_db_find_by_virt_id(a
,
1065 esas2r_targ_get_id(t
,
1069 esas2r_targ_db_remove(a
, t2
);
1072 /* removals complete. process arrivals. */
1074 dc
->state
= DCS_DEV_ADD
;
1075 dc
->curr_targ
= a
->targetdb
;
1077 esas2r_trace_exit();
1082 static bool esas2r_disc_dev_add(struct esas2r_adapter
*a
,
1083 struct esas2r_request
*rq
)
1085 struct esas2r_disc_context
*dc
=
1086 (struct esas2r_disc_context
*)rq
->interrupt_cx
;
1087 struct esas2r_target
*t
= dc
->curr_targ
;
1089 if (t
>= a
->targetdb_end
) {
1090 /* done processing state changes. */
1092 dc
->state
= DCS_DISC_DONE
;
1093 } else if (t
->new_target_state
== TS_PRESENT
) {
1094 struct atto_vda_ae_lu
*luevt
= &t
->lu_event
;
1096 esas2r_trace_enter();
1098 /* clear this now in case more events come in. */
1100 t
->new_target_state
= TS_INVALID
;
1102 /* setup the discovery context for adding this device. */
1104 dc
->curr_virt_id
= esas2r_targ_get_id(t
, a
);
1106 if ((luevt
->hdr
.bylength
>= offsetof(struct atto_vda_ae_lu
, id
)
1107 + sizeof(struct atto_vda_ae_lu_tgt_lun_raid
))
1108 && !(luevt
->dwevent
& VDAAE_LU_PASSTHROUGH
)) {
1109 dc
->block_size
= luevt
->id
.tgtlun_raid
.dwblock_size
;
1110 dc
->interleave
= luevt
->id
.tgtlun_raid
.dwinterleave
;
1116 /* determine the device type being added. */
1118 if (luevt
->dwevent
& VDAAE_LU_PASSTHROUGH
) {
1119 if (luevt
->dwevent
& VDAAE_LU_PHYS_ID
) {
1120 dc
->state
= DCS_PT_DEV_ADDR
;
1121 dc
->dev_addr_type
= ATTO_GDA_AT_PORT
;
1122 dc
->curr_phys_id
= luevt
->wphys_target_id
;
1124 esas2r_log(ESAS2R_LOG_WARN
,
1125 "luevt->dwevent does not have the "
1126 "VDAAE_LU_PHYS_ID bit set (%s:%d)",
1127 __func__
, __LINE__
);
1130 dc
->raid_grp_name
[0] = 0;
1132 esas2r_targ_db_add_raid(a
, dc
);
1135 esas2r_trace("curr_virt_id: %d", dc
->curr_virt_id
);
1136 esas2r_trace("curr_phys_id: %d", dc
->curr_phys_id
);
1137 esas2r_trace("dwevent: %d", luevt
->dwevent
);
1139 esas2r_trace_exit();
1142 if (dc
->state
== DCS_DEV_ADD
) {
1143 /* go to the next device. */
1152 * When discovery is done, find all requests on defer queue and
1153 * test if they need to be modified. If a target is no longer present
1154 * then complete the request with RS_SEL. Otherwise, update the
1155 * target_id since after a hibernate it can be a different value.
1156 * VDA does not make passthrough target IDs persistent.
1158 static void esas2r_disc_fix_curr_requests(struct esas2r_adapter
*a
)
1160 unsigned long flags
;
1161 struct esas2r_target
*t
;
1162 struct esas2r_request
*rq
;
1163 struct list_head
*element
;
1165 /* update virt_targ_id in any outstanding esas2r_requests */
1167 spin_lock_irqsave(&a
->queue_lock
, flags
);
1169 list_for_each(element
, &a
->defer_list
) {
1170 rq
= list_entry(element
, struct esas2r_request
, req_list
);
1171 if (rq
->vrq
->scsi
.function
== VDA_FUNC_SCSI
) {
1172 t
= a
->targetdb
+ rq
->target_id
;
1174 if (t
->target_state
== TS_PRESENT
)
1175 rq
->vrq
->scsi
.target_id
= le16_to_cpu(
1178 rq
->req_stat
= RS_SEL
;
1183 spin_unlock_irqrestore(&a
->queue_lock
, flags
);