2 * linux/drivers/scsi/esas2r/esas2r_ioctl.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
47 * Buffered ioctl handlers. A buffered ioctl is one which requires that we
48 * allocate a DMA-able memory area to communicate with the firmware. In
49 * order to prevent continually allocating and freeing consistent memory,
50 * we will allocate a global buffer the first time we need it and re-use
51 * it for subsequent ioctl calls that require it.
54 u8
*esas2r_buffered_ioctl
;
55 dma_addr_t esas2r_buffered_ioctl_addr
;
56 u32 esas2r_buffered_ioctl_size
;
57 struct pci_dev
*esas2r_buffered_ioctl_pcid
;
59 static DEFINE_SEMAPHORE(buffered_ioctl_semaphore
);
60 typedef int (*BUFFERED_IOCTL_CALLBACK
)(struct esas2r_adapter
*,
61 struct esas2r_request
*,
62 struct esas2r_sg_context
*,
64 typedef void (*BUFFERED_IOCTL_DONE_CALLBACK
)(struct esas2r_adapter
*,
65 struct esas2r_request
*, void *);
67 struct esas2r_buffered_ioctl
{
68 struct esas2r_adapter
*a
;
73 BUFFERED_IOCTL_CALLBACK
76 BUFFERED_IOCTL_DONE_CALLBACK
82 static void complete_fm_api_req(struct esas2r_adapter
*a
,
83 struct esas2r_request
*rq
)
85 a
->fm_api_command_done
= 1;
86 wake_up_interruptible(&a
->fm_api_waiter
);
89 /* Callbacks for building scatter/gather lists for FM API requests */
90 static u32
get_physaddr_fm_api(struct esas2r_sg_context
*sgc
, u64
*addr
)
92 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
93 int offset
= sgc
->cur_offset
- a
->save_offset
;
95 (*addr
) = a
->firmware
.phys
+ offset
;
96 return a
->firmware
.orig_len
- offset
;
99 static u32
get_physaddr_fm_api_header(struct esas2r_sg_context
*sgc
, u64
*addr
)
101 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
102 int offset
= sgc
->cur_offset
- a
->save_offset
;
104 (*addr
) = a
->firmware
.header_buff_phys
+ offset
;
105 return sizeof(struct esas2r_flash_img
) - offset
;
108 /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
109 static void do_fm_api(struct esas2r_adapter
*a
, struct esas2r_flash_img
*fi
)
111 struct esas2r_request
*rq
;
113 if (down_interruptible(&a
->fm_api_semaphore
)) {
114 fi
->status
= FI_STAT_BUSY
;
118 rq
= esas2r_alloc_request(a
);
120 fi
->status
= FI_STAT_BUSY
;
124 if (fi
== &a
->firmware
.header
) {
125 a
->firmware
.header_buff
= dma_alloc_coherent(&a
->pcid
->dev
,
134 if (a
->firmware
.header_buff
== NULL
) {
135 esas2r_debug("failed to allocate header buffer!");
136 fi
->status
= FI_STAT_BUSY
;
140 memcpy(a
->firmware
.header_buff
, fi
,
141 sizeof(struct esas2r_flash_img
));
142 a
->save_offset
= a
->firmware
.header_buff
;
143 a
->fm_api_sgc
.get_phys_addr
=
144 (PGETPHYSADDR
)get_physaddr_fm_api_header
;
146 a
->save_offset
= (u8
*)fi
;
147 a
->fm_api_sgc
.get_phys_addr
=
148 (PGETPHYSADDR
)get_physaddr_fm_api
;
151 rq
->comp_cb
= complete_fm_api_req
;
152 a
->fm_api_command_done
= 0;
153 a
->fm_api_sgc
.cur_offset
= a
->save_offset
;
155 if (!esas2r_fm_api(a
, (struct esas2r_flash_img
*)a
->save_offset
, rq
,
159 /* Now wait around for it to complete. */
160 while (!a
->fm_api_command_done
)
161 wait_event_interruptible(a
->fm_api_waiter
,
162 a
->fm_api_command_done
);
164 if (fi
== &a
->firmware
.header
) {
165 memcpy(fi
, a
->firmware
.header_buff
,
166 sizeof(struct esas2r_flash_img
));
168 dma_free_coherent(&a
->pcid
->dev
,
169 (size_t)sizeof(struct esas2r_flash_img
),
170 a
->firmware
.header_buff
,
171 (dma_addr_t
)a
->firmware
.header_buff_phys
);
174 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
176 up(&a
->fm_api_semaphore
);
181 static void complete_nvr_req(struct esas2r_adapter
*a
,
182 struct esas2r_request
*rq
)
184 a
->nvram_command_done
= 1;
185 wake_up_interruptible(&a
->nvram_waiter
);
188 /* Callback for building scatter/gather lists for buffered ioctls */
189 static u32
get_physaddr_buffered_ioctl(struct esas2r_sg_context
*sgc
,
192 int offset
= (u8
*)sgc
->cur_offset
- esas2r_buffered_ioctl
;
194 (*addr
) = esas2r_buffered_ioctl_addr
+ offset
;
195 return esas2r_buffered_ioctl_size
- offset
;
198 static void complete_buffered_ioctl_req(struct esas2r_adapter
*a
,
199 struct esas2r_request
*rq
)
201 a
->buffered_ioctl_done
= 1;
202 wake_up_interruptible(&a
->buffered_ioctl_waiter
);
205 static u8
handle_buffered_ioctl(struct esas2r_buffered_ioctl
*bi
)
207 struct esas2r_adapter
*a
= bi
->a
;
208 struct esas2r_request
*rq
;
209 struct esas2r_sg_context sgc
;
210 u8 result
= IOCTL_SUCCESS
;
212 if (down_interruptible(&buffered_ioctl_semaphore
))
213 return IOCTL_OUT_OF_RESOURCES
;
215 /* allocate a buffer or use the existing buffer. */
216 if (esas2r_buffered_ioctl
) {
217 if (esas2r_buffered_ioctl_size
< bi
->length
) {
218 /* free the too-small buffer and get a new one */
219 dma_free_coherent(&a
->pcid
->dev
,
220 (size_t)esas2r_buffered_ioctl_size
,
221 esas2r_buffered_ioctl
,
222 esas2r_buffered_ioctl_addr
);
224 goto allocate_buffer
;
228 esas2r_buffered_ioctl_size
= bi
->length
;
229 esas2r_buffered_ioctl_pcid
= a
->pcid
;
230 esas2r_buffered_ioctl
= dma_alloc_coherent(&a
->pcid
->dev
,
232 esas2r_buffered_ioctl_size
,
234 esas2r_buffered_ioctl_addr
,
238 if (!esas2r_buffered_ioctl
) {
239 esas2r_log(ESAS2R_LOG_CRIT
,
240 "could not allocate %d bytes of consistent memory "
241 "for a buffered ioctl!",
244 esas2r_debug("buffered ioctl alloc failure");
245 result
= IOCTL_OUT_OF_RESOURCES
;
249 memcpy(esas2r_buffered_ioctl
, bi
->ioctl
, bi
->length
);
251 rq
= esas2r_alloc_request(a
);
253 esas2r_log(ESAS2R_LOG_CRIT
,
254 "could not allocate an internal request");
256 result
= IOCTL_OUT_OF_RESOURCES
;
257 esas2r_debug("buffered ioctl - no requests");
261 a
->buffered_ioctl_done
= 0;
262 rq
->comp_cb
= complete_buffered_ioctl_req
;
263 sgc
.cur_offset
= esas2r_buffered_ioctl
+ bi
->offset
;
264 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_buffered_ioctl
;
265 sgc
.length
= esas2r_buffered_ioctl_size
;
267 if (!(*bi
->callback
)(a
, rq
, &sgc
, bi
->context
)) {
268 /* completed immediately, no need to wait */
269 a
->buffered_ioctl_done
= 0;
270 goto free_andexit_cleanly
;
273 /* now wait around for it to complete. */
274 while (!a
->buffered_ioctl_done
)
275 wait_event_interruptible(a
->buffered_ioctl_waiter
,
276 a
->buffered_ioctl_done
);
278 free_andexit_cleanly
:
279 if (result
== IOCTL_SUCCESS
&& bi
->done_callback
)
280 (*bi
->done_callback
)(a
, rq
, bi
->done_context
);
282 esas2r_free_request(a
, rq
);
285 if (result
== IOCTL_SUCCESS
)
286 memcpy(bi
->ioctl
, esas2r_buffered_ioctl
, bi
->length
);
288 up(&buffered_ioctl_semaphore
);
292 /* SMP ioctl support */
293 static int smp_ioctl_callback(struct esas2r_adapter
*a
,
294 struct esas2r_request
*rq
,
295 struct esas2r_sg_context
*sgc
, void *context
)
297 struct atto_ioctl_smp
*si
=
298 (struct atto_ioctl_smp
*)esas2r_buffered_ioctl
;
300 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
301 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_SMP
);
303 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
304 si
->status
= ATTO_STS_OUT_OF_RSRC
;
308 esas2r_start_request(a
, rq
);
312 static u8
handle_smp_ioctl(struct esas2r_adapter
*a
, struct atto_ioctl_smp
*si
)
314 struct esas2r_buffered_ioctl bi
;
316 memset(&bi
, 0, sizeof(bi
));
320 bi
.length
= sizeof(struct atto_ioctl_smp
)
321 + le32_to_cpu(si
->req_length
)
322 + le32_to_cpu(si
->rsp_length
);
324 bi
.callback
= smp_ioctl_callback
;
325 return handle_buffered_ioctl(&bi
);
329 /* CSMI ioctl support */
330 static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter
*a
,
331 struct esas2r_request
*rq
)
333 rq
->target_id
= le16_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.target_id
);
334 rq
->vrq
->scsi
.flags
|= cpu_to_le32(rq
->func_rsp
.ioctl_rsp
.csmi
.lun
);
336 /* Now call the original completion callback. */
337 (*rq
->aux_req_cb
)(a
, rq
);
340 /* Tunnel a CSMI IOCTL to the back end driver for processing. */
341 static bool csmi_ioctl_tunnel(struct esas2r_adapter
*a
,
342 union atto_ioctl_csmi
*ci
,
343 struct esas2r_request
*rq
,
344 struct esas2r_sg_context
*sgc
,
348 struct atto_vda_ioctl_req
*ioctl
= &rq
->vrq
->ioctl
;
350 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
353 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
354 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_CSMI
);
355 ioctl
->csmi
.ctrl_code
= cpu_to_le32(ctrl_code
);
356 ioctl
->csmi
.target_id
= cpu_to_le16(target_id
);
357 ioctl
->csmi
.lun
= (u8
)le32_to_cpu(rq
->vrq
->scsi
.flags
);
360 * Always usurp the completion callback since the interrupt callback
361 * mechanism may be used.
364 rq
->aux_req_cb
= rq
->comp_cb
;
365 rq
->comp_cb
= esas2r_csmi_ioctl_tunnel_comp_cb
;
367 if (!esas2r_build_sg_list(a
, rq
, sgc
))
370 esas2r_start_request(a
, rq
);
374 static bool check_lun(struct scsi_lun lun
)
378 result
= ((lun
.scsi_lun
[7] == 0) &&
379 (lun
.scsi_lun
[6] == 0) &&
380 (lun
.scsi_lun
[5] == 0) &&
381 (lun
.scsi_lun
[4] == 0) &&
382 (lun
.scsi_lun
[3] == 0) &&
383 (lun
.scsi_lun
[2] == 0) &&
384 /* Byte 1 is intentionally skipped */
385 (lun
.scsi_lun
[0] == 0));
390 static int csmi_ioctl_callback(struct esas2r_adapter
*a
,
391 struct esas2r_request
*rq
,
392 struct esas2r_sg_context
*sgc
, void *context
)
394 struct atto_csmi
*ci
= (struct atto_csmi
*)context
;
395 union atto_ioctl_csmi
*ioctl_csmi
=
396 (union atto_ioctl_csmi
*)esas2r_buffered_ioctl
;
400 u32 sts
= CSMI_STS_SUCCESS
;
401 struct esas2r_target
*t
;
404 if (ci
->control_code
== CSMI_CC_GET_DEV_ADDR
) {
405 struct atto_csmi_get_dev_addr
*gda
= &ci
->data
.dev_addr
;
408 tid
= gda
->target_id
;
410 } else if (ci
->control_code
== CSMI_CC_TASK_MGT
) {
411 struct atto_csmi_task_mgmt
*tm
= &ci
->data
.tsk_mgt
;
419 rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
= cpu_to_le32(
425 rq
->vrq
->scsi
.flags
|= cpu_to_le32(lun
);
427 switch (ci
->control_code
) {
428 case CSMI_CC_GET_DRVR_INFO
:
430 struct atto_csmi_get_driver_info
*gdi
= &ioctl_csmi
->drvr_info
;
432 strcpy(gdi
->description
, esas2r_get_model_name(a
));
433 gdi
->csmi_major_rev
= CSMI_MAJOR_REV
;
434 gdi
->csmi_minor_rev
= CSMI_MINOR_REV
;
438 case CSMI_CC_GET_CNTLR_CFG
:
440 struct atto_csmi_get_cntlr_cfg
*gcc
= &ioctl_csmi
->cntlr_cfg
;
442 gcc
->base_io_addr
= 0;
443 pci_read_config_dword(a
->pcid
, PCI_BASE_ADDRESS_2
,
444 &gcc
->base_memaddr_lo
);
445 pci_read_config_dword(a
->pcid
, PCI_BASE_ADDRESS_3
,
446 &gcc
->base_memaddr_hi
);
447 gcc
->board_id
= MAKEDWORD(a
->pcid
->subsystem_device
,
448 a
->pcid
->subsystem_vendor
);
449 gcc
->slot_num
= CSMI_SLOT_NUM_UNKNOWN
;
450 gcc
->cntlr_class
= CSMI_CNTLR_CLASS_HBA
;
451 gcc
->io_bus_type
= CSMI_BUS_TYPE_PCI
;
452 gcc
->pci_addr
.bus_num
= a
->pcid
->bus
->number
;
453 gcc
->pci_addr
.device_num
= PCI_SLOT(a
->pcid
->devfn
);
454 gcc
->pci_addr
.function_num
= PCI_FUNC(a
->pcid
->devfn
);
456 memset(gcc
->serial_num
, 0, sizeof(gcc
->serial_num
));
458 gcc
->major_rev
= LOBYTE(LOWORD(a
->fw_version
));
459 gcc
->minor_rev
= HIBYTE(LOWORD(a
->fw_version
));
460 gcc
->build_rev
= LOBYTE(HIWORD(a
->fw_version
));
461 gcc
->release_rev
= HIBYTE(HIWORD(a
->fw_version
));
462 gcc
->bios_major_rev
= HIBYTE(HIWORD(a
->flash_ver
));
463 gcc
->bios_minor_rev
= LOBYTE(HIWORD(a
->flash_ver
));
464 gcc
->bios_build_rev
= LOWORD(a
->flash_ver
);
466 if (test_bit(AF2_THUNDERLINK
, &a
->flags2
))
467 gcc
->cntlr_flags
= CSMI_CNTLRF_SAS_HBA
468 | CSMI_CNTLRF_SATA_HBA
;
470 gcc
->cntlr_flags
= CSMI_CNTLRF_SAS_RAID
471 | CSMI_CNTLRF_SATA_RAID
;
473 gcc
->rrom_major_rev
= 0;
474 gcc
->rrom_minor_rev
= 0;
475 gcc
->rrom_build_rev
= 0;
476 gcc
->rrom_release_rev
= 0;
477 gcc
->rrom_biosmajor_rev
= 0;
478 gcc
->rrom_biosminor_rev
= 0;
479 gcc
->rrom_biosbuild_rev
= 0;
480 gcc
->rrom_biosrelease_rev
= 0;
484 case CSMI_CC_GET_CNTLR_STS
:
486 struct atto_csmi_get_cntlr_sts
*gcs
= &ioctl_csmi
->cntlr_sts
;
488 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
489 gcs
->status
= CSMI_CNTLR_STS_FAILED
;
491 gcs
->status
= CSMI_CNTLR_STS_GOOD
;
493 gcs
->offline_reason
= CSMI_OFFLINE_NO_REASON
;
497 case CSMI_CC_FW_DOWNLOAD
:
498 case CSMI_CC_GET_RAID_INFO
:
499 case CSMI_CC_GET_RAID_CFG
:
501 sts
= CSMI_STS_BAD_CTRL_CODE
;
504 case CSMI_CC_SMP_PASSTHRU
:
505 case CSMI_CC_SSP_PASSTHRU
:
506 case CSMI_CC_STP_PASSTHRU
:
507 case CSMI_CC_GET_PHY_INFO
:
508 case CSMI_CC_SET_PHY_INFO
:
509 case CSMI_CC_GET_LINK_ERRORS
:
510 case CSMI_CC_GET_SATA_SIG
:
511 case CSMI_CC_GET_CONN_INFO
:
512 case CSMI_CC_PHY_CTRL
:
514 if (!csmi_ioctl_tunnel(a
, ioctl_csmi
, rq
, sgc
,
516 ESAS2R_TARG_ID_INV
)) {
517 sts
= CSMI_STS_FAILED
;
523 case CSMI_CC_GET_SCSI_ADDR
:
525 struct atto_csmi_get_scsi_addr
*gsa
= &ioctl_csmi
->scsi_addr
;
529 memcpy(&lun
, gsa
->sas_lun
, sizeof(struct scsi_lun
));
531 if (!check_lun(lun
)) {
532 sts
= CSMI_STS_NO_SCSI_ADDR
;
536 /* make sure the device is present */
537 spin_lock_irqsave(&a
->mem_lock
, flags
);
538 t
= esas2r_targ_db_find_by_sas_addr(a
, (u64
*)gsa
->sas_addr
);
539 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
542 sts
= CSMI_STS_NO_SCSI_ADDR
;
546 gsa
->host_index
= 0xFF;
547 gsa
->lun
= gsa
->sas_lun
[1];
548 rq
->target_id
= esas2r_targ_get_id(t
, a
);
552 case CSMI_CC_GET_DEV_ADDR
:
554 struct atto_csmi_get_dev_addr
*gda
= &ioctl_csmi
->dev_addr
;
556 /* make sure the target is present */
557 t
= a
->targetdb
+ rq
->target_id
;
559 if (t
>= a
->targetdb_end
560 || t
->target_state
!= TS_PRESENT
561 || t
->sas_addr
== 0) {
562 sts
= CSMI_STS_NO_DEV_ADDR
;
566 /* fill in the result */
567 *(u64
*)gda
->sas_addr
= t
->sas_addr
;
568 memset(gda
->sas_lun
, 0, sizeof(gda
->sas_lun
));
569 gda
->sas_lun
[1] = (u8
)le32_to_cpu(rq
->vrq
->scsi
.flags
);
573 case CSMI_CC_TASK_MGT
:
575 /* make sure the target is present */
576 t
= a
->targetdb
+ rq
->target_id
;
578 if (t
>= a
->targetdb_end
579 || t
->target_state
!= TS_PRESENT
580 || !(t
->flags
& TF_PASS_THRU
)) {
581 sts
= CSMI_STS_NO_DEV_ADDR
;
585 if (!csmi_ioctl_tunnel(a
, ioctl_csmi
, rq
, sgc
,
588 sts
= CSMI_STS_FAILED
;
596 sts
= CSMI_STS_BAD_CTRL_CODE
;
600 rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
= cpu_to_le32(sts
);
606 static void csmi_ioctl_done_callback(struct esas2r_adapter
*a
,
607 struct esas2r_request
*rq
, void *context
)
609 struct atto_csmi
*ci
= (struct atto_csmi
*)context
;
610 union atto_ioctl_csmi
*ioctl_csmi
=
611 (union atto_ioctl_csmi
*)esas2r_buffered_ioctl
;
613 switch (ci
->control_code
) {
614 case CSMI_CC_GET_DRVR_INFO
:
616 struct atto_csmi_get_driver_info
*gdi
=
617 &ioctl_csmi
->drvr_info
;
619 strcpy(gdi
->name
, ESAS2R_VERSION_STR
);
621 gdi
->major_rev
= ESAS2R_MAJOR_REV
;
622 gdi
->minor_rev
= ESAS2R_MINOR_REV
;
624 gdi
->release_rev
= 0;
628 case CSMI_CC_GET_SCSI_ADDR
:
630 struct atto_csmi_get_scsi_addr
*gsa
= &ioctl_csmi
->scsi_addr
;
632 if (le32_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
) ==
634 gsa
->target_id
= rq
->target_id
;
642 ci
->status
= le32_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
);
646 static u8
handle_csmi_ioctl(struct esas2r_adapter
*a
, struct atto_csmi
*ci
)
648 struct esas2r_buffered_ioctl bi
;
650 memset(&bi
, 0, sizeof(bi
));
653 bi
.ioctl
= &ci
->data
;
654 bi
.length
= sizeof(union atto_ioctl_csmi
);
656 bi
.callback
= csmi_ioctl_callback
;
658 bi
.done_callback
= csmi_ioctl_done_callback
;
659 bi
.done_context
= ci
;
661 return handle_buffered_ioctl(&bi
);
664 /* ATTO HBA ioctl support */
666 /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
667 static bool hba_ioctl_tunnel(struct esas2r_adapter
*a
,
668 struct atto_ioctl
*hi
,
669 struct esas2r_request
*rq
,
670 struct esas2r_sg_context
*sgc
)
672 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
674 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_HBA
);
676 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
677 hi
->status
= ATTO_STS_OUT_OF_RSRC
;
682 esas2r_start_request(a
, rq
);
687 static void scsi_passthru_comp_cb(struct esas2r_adapter
*a
,
688 struct esas2r_request
*rq
)
690 struct atto_ioctl
*hi
= (struct atto_ioctl
*)rq
->aux_req_cx
;
691 struct atto_hba_scsi_pass_thru
*spt
= &hi
->data
.scsi_pass_thru
;
692 u8 sts
= ATTO_SPT_RS_FAILED
;
694 spt
->scsi_status
= rq
->func_rsp
.scsi_rsp
.scsi_stat
;
695 spt
->sense_length
= rq
->sense_len
;
696 spt
->residual_length
=
697 le32_to_cpu(rq
->func_rsp
.scsi_rsp
.residual_length
);
699 switch (rq
->req_stat
) {
702 sts
= ATTO_SPT_RS_SUCCESS
;
705 sts
= ATTO_SPT_RS_UNDERRUN
;
708 sts
= ATTO_SPT_RS_OVERRUN
;
712 sts
= ATTO_SPT_RS_NO_DEVICE
;
715 sts
= ATTO_SPT_RS_NO_LUN
;
718 sts
= ATTO_SPT_RS_TIMEOUT
;
721 sts
= ATTO_SPT_RS_DEGRADED
;
724 sts
= ATTO_SPT_RS_BUSY
;
727 sts
= ATTO_SPT_RS_ABORTED
;
730 sts
= ATTO_SPT_RS_BUS_RESET
;
734 spt
->req_status
= sts
;
736 /* Update the target ID to the next one present. */
738 esas2r_targ_db_find_next_present(a
, (u16
)spt
->target_id
);
740 /* Done, call the completion callback. */
741 (*rq
->aux_req_cb
)(a
, rq
);
744 static int hba_ioctl_callback(struct esas2r_adapter
*a
,
745 struct esas2r_request
*rq
,
746 struct esas2r_sg_context
*sgc
,
749 struct atto_ioctl
*hi
= (struct atto_ioctl
*)esas2r_buffered_ioctl
;
751 hi
->status
= ATTO_STS_SUCCESS
;
753 switch (hi
->function
) {
754 case ATTO_FUNC_GET_ADAP_INFO
:
756 u8
*class_code
= (u8
*)&a
->pcid
->class;
758 struct atto_hba_get_adapter_info
*gai
=
759 &hi
->data
.get_adap_info
;
762 if (hi
->flags
& HBAF_TUNNEL
) {
763 hi
->status
= ATTO_STS_UNSUPPORTED
;
767 if (hi
->version
> ATTO_VER_GET_ADAP_INFO0
) {
768 hi
->status
= ATTO_STS_INV_VERSION
;
769 hi
->version
= ATTO_VER_GET_ADAP_INFO0
;
773 memset(gai
, 0, sizeof(*gai
));
775 gai
->pci
.vendor_id
= a
->pcid
->vendor
;
776 gai
->pci
.device_id
= a
->pcid
->device
;
777 gai
->pci
.ss_vendor_id
= a
->pcid
->subsystem_vendor
;
778 gai
->pci
.ss_device_id
= a
->pcid
->subsystem_device
;
779 gai
->pci
.class_code
[0] = class_code
[0];
780 gai
->pci
.class_code
[1] = class_code
[1];
781 gai
->pci
.class_code
[2] = class_code
[2];
782 gai
->pci
.rev_id
= a
->pcid
->revision
;
783 gai
->pci
.bus_num
= a
->pcid
->bus
->number
;
784 gai
->pci
.dev_num
= PCI_SLOT(a
->pcid
->devfn
);
785 gai
->pci
.func_num
= PCI_FUNC(a
->pcid
->devfn
);
787 pcie_cap_reg
= pci_find_capability(a
->pcid
, PCI_CAP_ID_EXP
);
792 pci_read_config_word(a
->pcid
,
793 pcie_cap_reg
+ PCI_EXP_LNKSTA
,
795 pci_read_config_dword(a
->pcid
,
796 pcie_cap_reg
+ PCI_EXP_LNKCAP
,
799 gai
->pci
.link_speed_curr
=
800 (u8
)(stat
& PCI_EXP_LNKSTA_CLS
);
801 gai
->pci
.link_speed_max
=
802 (u8
)(caps
& PCI_EXP_LNKCAP_SLS
);
803 gai
->pci
.link_width_curr
=
804 (u8
)((stat
& PCI_EXP_LNKSTA_NLW
)
805 >> PCI_EXP_LNKSTA_NLW_SHIFT
);
806 gai
->pci
.link_width_max
=
807 (u8
)((caps
& PCI_EXP_LNKCAP_MLW
)
811 gai
->pci
.msi_vector_cnt
= 1;
813 if (a
->pcid
->msix_enabled
)
814 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_MSIX
;
815 else if (a
->pcid
->msi_enabled
)
816 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_MSI
;
818 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_LEGACY
;
820 gai
->adap_type
= ATTO_GAI_AT_ESASRAID2
;
822 if (test_bit(AF2_THUNDERLINK
, &a
->flags2
))
823 gai
->adap_type
= ATTO_GAI_AT_TLSASHBA
;
825 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
826 gai
->adap_flags
|= ATTO_GAI_AF_DEGRADED
;
828 gai
->adap_flags
|= ATTO_GAI_AF_SPT_SUPP
|
829 ATTO_GAI_AF_DEVADDR_SUPP
;
831 if (a
->pcid
->subsystem_device
== ATTO_ESAS_R60F
832 || a
->pcid
->subsystem_device
== ATTO_ESAS_R608
833 || a
->pcid
->subsystem_device
== ATTO_ESAS_R644
834 || a
->pcid
->subsystem_device
== ATTO_TSSC_3808E
)
835 gai
->adap_flags
|= ATTO_GAI_AF_VIRT_SES
;
837 gai
->num_ports
= ESAS2R_NUM_PHYS
;
838 gai
->num_phys
= ESAS2R_NUM_PHYS
;
840 strcpy(gai
->firmware_rev
, a
->fw_rev
);
841 strcpy(gai
->flash_rev
, a
->flash_rev
);
842 strcpy(gai
->model_name_short
, esas2r_get_model_name_short(a
));
843 strcpy(gai
->model_name
, esas2r_get_model_name(a
));
845 gai
->num_targets
= ESAS2R_MAX_TARGETS
;
848 gai
->num_targsper_bus
= gai
->num_targets
;
849 gai
->num_lunsper_targ
= 256;
851 if (a
->pcid
->subsystem_device
== ATTO_ESAS_R6F0
852 || a
->pcid
->subsystem_device
== ATTO_ESAS_R60F
)
853 gai
->num_connectors
= 4;
855 gai
->num_connectors
= 2;
857 gai
->adap_flags2
|= ATTO_GAI_AF2_ADAP_CTRL_SUPP
;
859 gai
->num_targets_backend
= a
->num_targets_backend
;
861 gai
->tunnel_flags
= a
->ioctl_tunnel
862 & (ATTO_GAI_TF_MEM_RW
864 | ATTO_GAI_TF_SCSI_PASS_THRU
865 | ATTO_GAI_TF_GET_DEV_ADDR
866 | ATTO_GAI_TF_PHY_CTRL
867 | ATTO_GAI_TF_CONN_CTRL
868 | ATTO_GAI_TF_GET_DEV_INFO
);
872 case ATTO_FUNC_GET_ADAP_ADDR
:
874 struct atto_hba_get_adapter_address
*gaa
=
875 &hi
->data
.get_adap_addr
;
877 if (hi
->flags
& HBAF_TUNNEL
) {
878 hi
->status
= ATTO_STS_UNSUPPORTED
;
882 if (hi
->version
> ATTO_VER_GET_ADAP_ADDR0
) {
883 hi
->status
= ATTO_STS_INV_VERSION
;
884 hi
->version
= ATTO_VER_GET_ADAP_ADDR0
;
885 } else if (gaa
->addr_type
== ATTO_GAA_AT_PORT
886 || gaa
->addr_type
== ATTO_GAA_AT_NODE
) {
887 if (gaa
->addr_type
== ATTO_GAA_AT_PORT
888 && gaa
->port_id
>= ESAS2R_NUM_PHYS
) {
889 hi
->status
= ATTO_STS_NOT_APPL
;
891 memcpy((u64
*)gaa
->address
,
892 &a
->nvram
->sas_addr
[0], sizeof(u64
));
893 gaa
->addr_len
= sizeof(u64
);
896 hi
->status
= ATTO_STS_INV_PARAM
;
902 case ATTO_FUNC_MEM_RW
:
904 if (hi
->flags
& HBAF_TUNNEL
) {
905 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
911 hi
->status
= ATTO_STS_UNSUPPORTED
;
916 case ATTO_FUNC_TRACE
:
918 struct atto_hba_trace
*trc
= &hi
->data
.trace
;
920 if (hi
->flags
& HBAF_TUNNEL
) {
921 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
927 if (hi
->version
> ATTO_VER_TRACE1
) {
928 hi
->status
= ATTO_STS_INV_VERSION
;
929 hi
->version
= ATTO_VER_TRACE1
;
933 if (trc
->trace_type
== ATTO_TRC_TT_FWCOREDUMP
934 && hi
->version
>= ATTO_VER_TRACE1
) {
935 if (trc
->trace_func
== ATTO_TRC_TF_UPLOAD
) {
936 u32 len
= hi
->data_length
;
937 u32 offset
= trc
->current_offset
;
938 u32 total_len
= ESAS2R_FWCOREDUMP_SZ
;
940 /* Size is zero if a core dump isn't present */
941 if (!test_bit(AF2_COREDUMP_SAVED
, &a
->flags2
))
947 if (offset
>= total_len
948 || offset
+ len
> total_len
950 hi
->status
= ATTO_STS_INV_PARAM
;
955 a
->fw_coredump_buff
+ offset
,
958 hi
->data_length
= len
;
959 } else if (trc
->trace_func
== ATTO_TRC_TF_RESET
) {
960 memset(a
->fw_coredump_buff
, 0,
961 ESAS2R_FWCOREDUMP_SZ
);
963 clear_bit(AF2_COREDUMP_SAVED
, &a
->flags2
);
964 } else if (trc
->trace_func
!= ATTO_TRC_TF_GET_INFO
) {
965 hi
->status
= ATTO_STS_UNSUPPORTED
;
969 /* Always return all the info we can. */
971 trc
->current_offset
= 0;
972 trc
->total_length
= ESAS2R_FWCOREDUMP_SZ
;
974 /* Return zero length buffer if core dump not present */
975 if (!test_bit(AF2_COREDUMP_SAVED
, &a
->flags2
))
976 trc
->total_length
= 0;
978 hi
->status
= ATTO_STS_UNSUPPORTED
;
984 case ATTO_FUNC_SCSI_PASS_THRU
:
986 struct atto_hba_scsi_pass_thru
*spt
= &hi
->data
.scsi_pass_thru
;
989 memcpy(&lun
, spt
->lun
, sizeof(struct scsi_lun
));
991 if (hi
->flags
& HBAF_TUNNEL
) {
992 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
998 if (hi
->version
> ATTO_VER_SCSI_PASS_THRU0
) {
999 hi
->status
= ATTO_STS_INV_VERSION
;
1000 hi
->version
= ATTO_VER_SCSI_PASS_THRU0
;
1004 if (spt
->target_id
>= ESAS2R_MAX_TARGETS
|| !check_lun(lun
)) {
1005 hi
->status
= ATTO_STS_INV_PARAM
;
1009 esas2r_sgc_init(sgc
, a
, rq
, NULL
);
1011 sgc
->length
= hi
->data_length
;
1012 sgc
->cur_offset
+= offsetof(struct atto_ioctl
, data
.byte
)
1013 + sizeof(struct atto_hba_scsi_pass_thru
);
1015 /* Finish request initialization */
1016 rq
->target_id
= (u16
)spt
->target_id
;
1017 rq
->vrq
->scsi
.flags
|= cpu_to_le32(spt
->lun
[1]);
1018 memcpy(rq
->vrq
->scsi
.cdb
, spt
->cdb
, 16);
1019 rq
->vrq
->scsi
.length
= cpu_to_le32(hi
->data_length
);
1020 rq
->sense_len
= spt
->sense_length
;
1021 rq
->sense_buf
= (u8
*)spt
->sense_data
;
1022 /* NOTE: we ignore spt->timeout */
1025 * always usurp the completion callback since the interrupt
1026 * callback mechanism may be used.
1029 rq
->aux_req_cx
= hi
;
1030 rq
->aux_req_cb
= rq
->comp_cb
;
1031 rq
->comp_cb
= scsi_passthru_comp_cb
;
1033 if (spt
->flags
& ATTO_SPTF_DATA_IN
) {
1034 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_RDD
);
1035 } else if (spt
->flags
& ATTO_SPTF_DATA_OUT
) {
1036 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_WRD
);
1039 hi
->status
= ATTO_STS_INV_PARAM
;
1044 if (spt
->flags
& ATTO_SPTF_ORDERED_Q
)
1045 rq
->vrq
->scsi
.flags
|=
1046 cpu_to_le32(FCP_CMND_TA_ORDRD_Q
);
1047 else if (spt
->flags
& ATTO_SPTF_HEAD_OF_Q
)
1048 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_TA_HEAD_Q
);
1051 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
1052 hi
->status
= ATTO_STS_OUT_OF_RSRC
;
1056 esas2r_start_request(a
, rq
);
1061 case ATTO_FUNC_GET_DEV_ADDR
:
1063 struct atto_hba_get_device_address
*gda
=
1064 &hi
->data
.get_dev_addr
;
1065 struct esas2r_target
*t
;
1067 if (hi
->flags
& HBAF_TUNNEL
) {
1068 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1074 if (hi
->version
> ATTO_VER_GET_DEV_ADDR0
) {
1075 hi
->status
= ATTO_STS_INV_VERSION
;
1076 hi
->version
= ATTO_VER_GET_DEV_ADDR0
;
1080 if (gda
->target_id
>= ESAS2R_MAX_TARGETS
) {
1081 hi
->status
= ATTO_STS_INV_PARAM
;
1085 t
= a
->targetdb
+ (u16
)gda
->target_id
;
1087 if (t
->target_state
!= TS_PRESENT
) {
1088 hi
->status
= ATTO_STS_FAILED
;
1089 } else if (gda
->addr_type
== ATTO_GDA_AT_PORT
) {
1090 if (t
->sas_addr
== 0) {
1091 hi
->status
= ATTO_STS_UNSUPPORTED
;
1093 *(u64
*)gda
->address
= t
->sas_addr
;
1095 gda
->addr_len
= sizeof(u64
);
1097 } else if (gda
->addr_type
== ATTO_GDA_AT_NODE
) {
1098 hi
->status
= ATTO_STS_NOT_APPL
;
1100 hi
->status
= ATTO_STS_INV_PARAM
;
1103 /* update the target ID to the next one present. */
1106 esas2r_targ_db_find_next_present(a
,
1107 (u16
)gda
->target_id
);
1111 case ATTO_FUNC_PHY_CTRL
:
1112 case ATTO_FUNC_CONN_CTRL
:
1114 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1120 case ATTO_FUNC_ADAP_CTRL
:
1122 struct atto_hba_adap_ctrl
*ac
= &hi
->data
.adap_ctrl
;
1124 if (hi
->flags
& HBAF_TUNNEL
) {
1125 hi
->status
= ATTO_STS_UNSUPPORTED
;
1129 if (hi
->version
> ATTO_VER_ADAP_CTRL0
) {
1130 hi
->status
= ATTO_STS_INV_VERSION
;
1131 hi
->version
= ATTO_VER_ADAP_CTRL0
;
1135 if (ac
->adap_func
== ATTO_AC_AF_HARD_RST
) {
1136 esas2r_reset_adapter(a
);
1137 } else if (ac
->adap_func
!= ATTO_AC_AF_GET_STATE
) {
1138 hi
->status
= ATTO_STS_UNSUPPORTED
;
1142 if (test_bit(AF_CHPRST_NEEDED
, &a
->flags
))
1143 ac
->adap_state
= ATTO_AC_AS_RST_SCHED
;
1144 else if (test_bit(AF_CHPRST_PENDING
, &a
->flags
))
1145 ac
->adap_state
= ATTO_AC_AS_RST_IN_PROG
;
1146 else if (test_bit(AF_DISC_PENDING
, &a
->flags
))
1147 ac
->adap_state
= ATTO_AC_AS_RST_DISC
;
1148 else if (test_bit(AF_DISABLED
, &a
->flags
))
1149 ac
->adap_state
= ATTO_AC_AS_DISABLED
;
1150 else if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
1151 ac
->adap_state
= ATTO_AC_AS_DEGRADED
;
1153 ac
->adap_state
= ATTO_AC_AS_OK
;
1158 case ATTO_FUNC_GET_DEV_INFO
:
1160 struct atto_hba_get_device_info
*gdi
= &hi
->data
.get_dev_info
;
1161 struct esas2r_target
*t
;
1163 if (hi
->flags
& HBAF_TUNNEL
) {
1164 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1170 if (hi
->version
> ATTO_VER_GET_DEV_INFO0
) {
1171 hi
->status
= ATTO_STS_INV_VERSION
;
1172 hi
->version
= ATTO_VER_GET_DEV_INFO0
;
1176 if (gdi
->target_id
>= ESAS2R_MAX_TARGETS
) {
1177 hi
->status
= ATTO_STS_INV_PARAM
;
1181 t
= a
->targetdb
+ (u16
)gdi
->target_id
;
1183 /* update the target ID to the next one present. */
1186 esas2r_targ_db_find_next_present(a
,
1187 (u16
)gdi
->target_id
);
1189 if (t
->target_state
!= TS_PRESENT
) {
1190 hi
->status
= ATTO_STS_FAILED
;
1194 hi
->status
= ATTO_STS_UNSUPPORTED
;
1200 hi
->status
= ATTO_STS_INV_FUNC
;
1207 static void hba_ioctl_done_callback(struct esas2r_adapter
*a
,
1208 struct esas2r_request
*rq
, void *context
)
1210 struct atto_ioctl
*ioctl_hba
=
1211 (struct atto_ioctl
*)esas2r_buffered_ioctl
;
1213 esas2r_debug("hba_ioctl_done_callback %d", a
->index
);
1215 if (ioctl_hba
->function
== ATTO_FUNC_GET_ADAP_INFO
) {
1216 struct atto_hba_get_adapter_info
*gai
=
1217 &ioctl_hba
->data
.get_adap_info
;
1219 esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
1221 gai
->drvr_rev_major
= ESAS2R_MAJOR_REV
;
1222 gai
->drvr_rev_minor
= ESAS2R_MINOR_REV
;
1224 strcpy(gai
->drvr_rev_ascii
, ESAS2R_VERSION_STR
);
1225 strcpy(gai
->drvr_name
, ESAS2R_DRVR_NAME
);
1227 gai
->num_busses
= 1;
1228 gai
->num_targsper_bus
= ESAS2R_MAX_ID
+ 1;
1229 gai
->num_lunsper_targ
= 1;
1233 u8
handle_hba_ioctl(struct esas2r_adapter
*a
,
1234 struct atto_ioctl
*ioctl_hba
)
1236 struct esas2r_buffered_ioctl bi
;
1238 memset(&bi
, 0, sizeof(bi
));
1241 bi
.ioctl
= ioctl_hba
;
1242 bi
.length
= sizeof(struct atto_ioctl
) + ioctl_hba
->data_length
;
1243 bi
.callback
= hba_ioctl_callback
;
1245 bi
.done_callback
= hba_ioctl_done_callback
;
1246 bi
.done_context
= NULL
;
1249 return handle_buffered_ioctl(&bi
);
1253 int esas2r_write_params(struct esas2r_adapter
*a
, struct esas2r_request
*rq
,
1254 struct esas2r_sas_nvram
*data
)
1258 a
->nvram_command_done
= 0;
1259 rq
->comp_cb
= complete_nvr_req
;
1261 if (esas2r_nvram_write(a
, rq
, data
)) {
1262 /* now wait around for it to complete. */
1263 while (!a
->nvram_command_done
)
1264 wait_event_interruptible(a
->nvram_waiter
,
1265 a
->nvram_command_done
);
1268 /* done, check the status. */
1269 if (rq
->req_stat
== RS_SUCCESS
)
1276 /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
1277 int esas2r_ioctl_handler(void *hostdata
, int cmd
, void __user
*arg
)
1279 struct atto_express_ioctl
*ioctl
= NULL
;
1280 struct esas2r_adapter
*a
;
1281 struct esas2r_request
*rq
;
1285 esas2r_log(ESAS2R_LOG_DEBG
, "ioctl (%p, %x, %p)", hostdata
, cmd
, arg
);
1288 || (cmd
< EXPRESS_IOCTL_MIN
)
1289 || (cmd
> EXPRESS_IOCTL_MAX
))
1292 if (!access_ok(VERIFY_WRITE
, arg
, sizeof(struct atto_express_ioctl
))) {
1293 esas2r_log(ESAS2R_LOG_WARN
,
1294 "ioctl_handler access_ok failed for cmd %d, "
1300 /* allocate a kernel memory buffer for the IOCTL data */
1301 ioctl
= kzalloc(sizeof(struct atto_express_ioctl
), GFP_KERNEL
);
1302 if (ioctl
== NULL
) {
1303 esas2r_log(ESAS2R_LOG_WARN
,
1304 "ioctl_handler kzalloc failed for %d bytes",
1305 sizeof(struct atto_express_ioctl
));
1309 err
= __copy_from_user(ioctl
, arg
, sizeof(struct atto_express_ioctl
));
1311 esas2r_log(ESAS2R_LOG_WARN
,
1312 "copy_from_user didn't copy everything (err %d, cmd %d)",
1320 /* verify the signature */
1322 if (memcmp(ioctl
->header
.signature
,
1323 EXPRESS_IOCTL_SIGNATURE
,
1324 EXPRESS_IOCTL_SIGNATURE_SIZE
) != 0) {
1325 esas2r_log(ESAS2R_LOG_WARN
, "invalid signature");
1331 /* assume success */
1333 ioctl
->header
.return_code
= IOCTL_SUCCESS
;
1337 * handle EXPRESS_IOCTL_GET_CHANNELS
1338 * without paying attention to channel
1341 if (cmd
== EXPRESS_IOCTL_GET_CHANNELS
) {
1344 ioctl
->data
.chanlist
.num_channels
= 0;
1346 while (i
< MAX_ADAPTERS
) {
1347 if (esas2r_adapters
[i
]) {
1348 ioctl
->data
.chanlist
.num_channels
++;
1349 ioctl
->data
.chanlist
.channel
[k
] = i
;
1358 /* get the channel */
1360 if (ioctl
->header
.channel
== 0xFF) {
1361 a
= (struct esas2r_adapter
*)hostdata
;
1363 a
= esas2r_adapters
[ioctl
->header
.channel
];
1364 if (ioctl
->header
.channel
>= MAX_ADAPTERS
|| (a
== NULL
)) {
1365 ioctl
->header
.return_code
= IOCTL_BAD_CHANNEL
;
1366 esas2r_log(ESAS2R_LOG_WARN
, "bad channel value");
1374 case EXPRESS_IOCTL_RW_FIRMWARE
:
1376 if (ioctl
->data
.fwrw
.img_type
== FW_IMG_FM_API
) {
1377 err
= esas2r_write_fw(a
,
1378 (char *)ioctl
->data
.fwrw
.image
,
1381 atto_express_ioctl
));
1384 err
= esas2r_read_fw(a
,
1385 (char *)ioctl
->data
.fwrw
.
1389 atto_express_ioctl
));
1391 } else if (ioctl
->data
.fwrw
.img_type
== FW_IMG_FS_API
) {
1392 err
= esas2r_write_fs(a
,
1393 (char *)ioctl
->data
.fwrw
.image
,
1396 atto_express_ioctl
));
1399 err
= esas2r_read_fs(a
,
1400 (char *)ioctl
->data
.fwrw
.
1404 atto_express_ioctl
));
1407 ioctl
->header
.return_code
= IOCTL_BAD_FLASH_IMGTYPE
;
1412 case EXPRESS_IOCTL_READ_PARAMS
:
1414 memcpy(ioctl
->data
.prw
.data_buffer
, a
->nvram
,
1415 sizeof(struct esas2r_sas_nvram
));
1416 ioctl
->data
.prw
.code
= 1;
1419 case EXPRESS_IOCTL_WRITE_PARAMS
:
1421 rq
= esas2r_alloc_request(a
);
1424 esas2r_log(ESAS2R_LOG_WARN
,
1425 "could not allocate an internal request");
1429 code
= esas2r_write_params(a
, rq
,
1430 (struct esas2r_sas_nvram
*)ioctl
->data
.prw
.data_buffer
);
1431 ioctl
->data
.prw
.code
= code
;
1433 esas2r_free_request(a
, rq
);
1437 case EXPRESS_IOCTL_DEFAULT_PARAMS
:
1439 esas2r_nvram_get_defaults(a
,
1440 (struct esas2r_sas_nvram
*)ioctl
->data
.prw
.data_buffer
);
1441 ioctl
->data
.prw
.code
= 1;
1444 case EXPRESS_IOCTL_CHAN_INFO
:
1446 ioctl
->data
.chaninfo
.major_rev
= ESAS2R_MAJOR_REV
;
1447 ioctl
->data
.chaninfo
.minor_rev
= ESAS2R_MINOR_REV
;
1448 ioctl
->data
.chaninfo
.IRQ
= a
->pcid
->irq
;
1449 ioctl
->data
.chaninfo
.device_id
= a
->pcid
->device
;
1450 ioctl
->data
.chaninfo
.vendor_id
= a
->pcid
->vendor
;
1451 ioctl
->data
.chaninfo
.ven_dev_id
= a
->pcid
->subsystem_device
;
1452 ioctl
->data
.chaninfo
.revision_id
= a
->pcid
->revision
;
1453 ioctl
->data
.chaninfo
.pci_bus
= a
->pcid
->bus
->number
;
1454 ioctl
->data
.chaninfo
.pci_dev_func
= a
->pcid
->devfn
;
1455 ioctl
->data
.chaninfo
.core_rev
= 0;
1456 ioctl
->data
.chaninfo
.host_no
= a
->host
->host_no
;
1457 ioctl
->data
.chaninfo
.hbaapi_rev
= 0;
1460 case EXPRESS_IOCTL_SMP
:
1461 ioctl
->header
.return_code
= handle_smp_ioctl(a
,
1467 ioctl
->header
.return_code
=
1468 handle_csmi_ioctl(a
, &ioctl
->data
.csmi
);
1471 case EXPRESS_IOCTL_HBA
:
1472 ioctl
->header
.return_code
= handle_hba_ioctl(a
,
1477 case EXPRESS_IOCTL_VDA
:
1478 err
= esas2r_write_vda(a
,
1479 (char *)&ioctl
->data
.ioctl_vda
,
1481 sizeof(struct atto_ioctl_vda
) +
1482 ioctl
->data
.ioctl_vda
.data_length
);
1485 err
= esas2r_read_vda(a
,
1486 (char *)&ioctl
->data
.ioctl_vda
,
1488 sizeof(struct atto_ioctl_vda
) +
1489 ioctl
->data
.ioctl_vda
.data_length
);
1497 case EXPRESS_IOCTL_GET_MOD_INFO
:
1499 ioctl
->data
.modinfo
.adapter
= a
;
1500 ioctl
->data
.modinfo
.pci_dev
= a
->pcid
;
1501 ioctl
->data
.modinfo
.scsi_host
= a
->host
;
1502 ioctl
->data
.modinfo
.host_no
= a
->host
->host_no
;
1507 esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd
);
1508 ioctl
->header
.return_code
= IOCTL_ERR_INVCMD
;
1514 esas2r_log(ESAS2R_LOG_WARN
, "err %d on ioctl cmd %d", err
,
1520 ioctl
->header
.return_code
= IOCTL_OUT_OF_RESOURCES
;
1525 ioctl
->header
.return_code
= IOCTL_INVALID_PARAM
;
1529 ioctl
->header
.return_code
= IOCTL_GENERAL_ERROR
;
1535 /* Always copy the buffer back, if only to pick up the status */
1536 err
= __copy_to_user(arg
, ioctl
, sizeof(struct atto_express_ioctl
));
1538 esas2r_log(ESAS2R_LOG_WARN
,
1539 "ioctl_handler copy_to_user didn't copy "
1540 "everything (err %d, cmd %d)", err
,
1552 int esas2r_ioctl(struct scsi_device
*sd
, int cmd
, void __user
*arg
)
1554 return esas2r_ioctl_handler(sd
->host
->hostdata
, cmd
, arg
);
1557 static void free_fw_buffers(struct esas2r_adapter
*a
)
1559 if (a
->firmware
.data
) {
1560 dma_free_coherent(&a
->pcid
->dev
,
1561 (size_t)a
->firmware
.orig_len
,
1563 (dma_addr_t
)a
->firmware
.phys
);
1565 a
->firmware
.data
= NULL
;
1569 static int allocate_fw_buffers(struct esas2r_adapter
*a
, u32 length
)
1573 a
->firmware
.orig_len
= length
;
1575 a
->firmware
.data
= (u8
*)dma_alloc_coherent(&a
->pcid
->dev
,
1577 (dma_addr_t
*)&a
->firmware
.
1581 if (!a
->firmware
.data
) {
1582 esas2r_debug("buffer alloc failed!");
1589 /* Handle a call to read firmware. */
1590 int esas2r_read_fw(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1592 esas2r_trace_enter();
1593 /* if the cached header is a status, simply copy it over and return. */
1594 if (a
->firmware
.state
== FW_STATUS_ST
) {
1595 int size
= min_t(int, count
, sizeof(a
->firmware
.header
));
1596 esas2r_trace_exit();
1597 memcpy(buf
, &a
->firmware
.header
, size
);
1598 esas2r_debug("esas2r_read_fw: STATUS size %d", size
);
1603 * if the cached header is a command, do it if at
1604 * offset 0, otherwise copy the pieces.
1607 if (a
->firmware
.state
== FW_COMMAND_ST
) {
1608 u32 length
= a
->firmware
.header
.length
;
1609 esas2r_trace_exit();
1611 esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
1616 if (a
->firmware
.header
.action
== FI_ACT_UP
) {
1617 if (!allocate_fw_buffers(a
, length
))
1621 /* copy header over */
1623 memcpy(a
->firmware
.data
,
1624 &a
->firmware
.header
,
1625 sizeof(a
->firmware
.header
));
1628 (struct esas2r_flash_img
*)a
->firmware
.data
);
1629 } else if (a
->firmware
.header
.action
== FI_ACT_UPSZ
) {
1632 (int)sizeof(a
->firmware
.header
));
1633 do_fm_api(a
, &a
->firmware
.header
);
1634 memcpy(buf
, &a
->firmware
.header
, size
);
1635 esas2r_debug("FI_ACT_UPSZ size %d", size
);
1638 esas2r_debug("invalid action %d",
1639 a
->firmware
.header
.action
);
1644 if (count
+ off
> length
)
1645 count
= length
- off
;
1650 if (!a
->firmware
.data
) {
1652 "read: nonzero offset but no buffer available!");
1656 esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off
,
1660 memcpy(buf
, &a
->firmware
.data
[off
], count
);
1662 /* when done, release the buffer */
1664 if (length
<= off
+ count
) {
1665 esas2r_debug("esas2r_read_fw: freeing buffer!");
1673 esas2r_trace_exit();
1674 esas2r_debug("esas2r_read_fw: invalid firmware state %d",
1680 /* Handle a call to write firmware. */
1681 int esas2r_write_fw(struct esas2r_adapter
*a
, const char *buf
, long off
,
1687 struct esas2r_flash_img
*header
=
1688 (struct esas2r_flash_img
*)buf
;
1690 /* assume version 0 flash image */
1692 int min_size
= sizeof(struct esas2r_flash_img_v0
);
1694 a
->firmware
.state
= FW_INVALID_ST
;
1696 /* validate the version field first */
1699 || header
->fi_version
> FI_VERSION_1
) {
1701 "esas2r_write_fw: short header or invalid version");
1705 /* See if its a version 1 flash image */
1707 if (header
->fi_version
== FI_VERSION_1
)
1708 min_size
= sizeof(struct esas2r_flash_img
);
1710 /* If this is the start, the header must be full and valid. */
1711 if (count
< min_size
) {
1712 esas2r_debug("esas2r_write_fw: short header, aborting");
1716 /* Make sure the size is reasonable. */
1717 length
= header
->length
;
1719 if (length
> 1024 * 1024) {
1721 "esas2r_write_fw: hosed, length %d fi_version %d",
1722 length
, header
->fi_version
);
1727 * If this is a write command, allocate memory because
1728 * we have to cache everything. otherwise, just cache
1729 * the header, because the read op will do the command.
1732 if (header
->action
== FI_ACT_DOWN
) {
1733 if (!allocate_fw_buffers(a
, length
))
1737 * Store the command, so there is context on subsequent
1740 memcpy(&a
->firmware
.header
,
1743 } else if (header
->action
== FI_ACT_UP
1744 || header
->action
== FI_ACT_UPSZ
) {
1745 /* Save the command, result will be picked up on read */
1746 memcpy(&a
->firmware
.header
,
1750 a
->firmware
.state
= FW_COMMAND_ST
;
1753 "esas2r_write_fw: COMMAND, count %d, action %d ",
1754 count
, header
->action
);
1757 * Pretend we took the whole buffer,
1758 * so we don't get bothered again.
1763 esas2r_debug("esas2r_write_fw: invalid action %d ",
1764 a
->firmware
.header
.action
);
1768 length
= a
->firmware
.header
.length
;
1772 * We only get here on a download command, regardless of offset.
1773 * the chunks written by the system need to be cached, and when
1774 * the final one arrives, issue the fmapi command.
1777 if (off
+ count
> length
)
1778 count
= length
- off
;
1781 esas2r_debug("esas2r_write_fw: off %d count %d length %d", off
,
1786 * On a full upload, the system tries sending the whole buffer.
1787 * there's nothing to do with it, so just drop it here, before
1788 * trying to copy over into unallocated memory!
1790 if (a
->firmware
.header
.action
== FI_ACT_UP
)
1793 if (!a
->firmware
.data
) {
1795 "write: nonzero offset but no buffer available!");
1799 memcpy(&a
->firmware
.data
[off
], buf
, count
);
1801 if (length
== off
+ count
) {
1803 (struct esas2r_flash_img
*)a
->firmware
.data
);
1806 * Now copy the header result to be picked up by the
1809 memcpy(&a
->firmware
.header
,
1811 sizeof(a
->firmware
.header
));
1813 a
->firmware
.state
= FW_STATUS_ST
;
1815 esas2r_debug("write completed");
1818 * Since the system has the data buffered, the only way
1819 * this can leak is if a root user writes a program
1820 * that writes a shorter buffer than it claims, and the
1830 /* Callback for the completion of a VDA request. */
1831 static void vda_complete_req(struct esas2r_adapter
*a
,
1832 struct esas2r_request
*rq
)
1834 a
->vda_command_done
= 1;
1835 wake_up_interruptible(&a
->vda_waiter
);
1838 /* Scatter/gather callback for VDA requests */
1839 static u32
get_physaddr_vda(struct esas2r_sg_context
*sgc
, u64
*addr
)
1841 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
1842 int offset
= (u8
*)sgc
->cur_offset
- (u8
*)a
->vda_buffer
;
1844 (*addr
) = a
->ppvda_buffer
+ offset
;
1845 return VDA_MAX_BUFFER_SIZE
- offset
;
1848 /* Handle a call to read a VDA command. */
1849 int esas2r_read_vda(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1855 struct esas2r_request
*rq
;
1856 struct atto_ioctl_vda
*vi
=
1857 (struct atto_ioctl_vda
*)a
->vda_buffer
;
1858 struct esas2r_sg_context sgc
;
1859 bool wait_for_completion
;
1862 * Presumeably, someone has already written to the vda_buffer,
1863 * and now they are reading the node the response, so now we
1864 * will actually issue the request to the chip and reply.
1867 /* allocate a request */
1868 rq
= esas2r_alloc_request(a
);
1870 esas2r_debug("esas2r_read_vda: out of requestss");
1874 rq
->comp_cb
= vda_complete_req
;
1878 sgc
.cur_offset
= a
->vda_buffer
+ VDA_BUFFER_HEADER_SZ
;
1879 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_vda
;
1881 a
->vda_command_done
= 0;
1883 wait_for_completion
=
1884 esas2r_process_vda_ioctl(a
, vi
, rq
, &sgc
);
1886 if (wait_for_completion
) {
1887 /* now wait around for it to complete. */
1889 while (!a
->vda_command_done
)
1890 wait_event_interruptible(a
->vda_waiter
,
1891 a
->vda_command_done
);
1894 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
1897 if (off
> VDA_MAX_BUFFER_SIZE
)
1900 if (count
+ off
> VDA_MAX_BUFFER_SIZE
)
1901 count
= VDA_MAX_BUFFER_SIZE
- off
;
1906 memcpy(buf
, a
->vda_buffer
+ off
, count
);
1911 /* Handle a call to write a VDA command. */
1912 int esas2r_write_vda(struct esas2r_adapter
*a
, const char *buf
, long off
,
1916 * allocate memory for it, if not already done. once allocated,
1917 * we will keep it around until the driver is unloaded.
1920 if (!a
->vda_buffer
) {
1921 dma_addr_t dma_addr
;
1922 a
->vda_buffer
= (u8
*)dma_alloc_coherent(&a
->pcid
->dev
,
1924 VDA_MAX_BUFFER_SIZE
,
1928 a
->ppvda_buffer
= dma_addr
;
1934 if (off
> VDA_MAX_BUFFER_SIZE
)
1937 if (count
+ off
> VDA_MAX_BUFFER_SIZE
)
1938 count
= VDA_MAX_BUFFER_SIZE
- off
;
1943 memcpy(a
->vda_buffer
+ off
, buf
, count
);
1948 /* Callback for the completion of an FS_API request.*/
1949 static void fs_api_complete_req(struct esas2r_adapter
*a
,
1950 struct esas2r_request
*rq
)
1952 a
->fs_api_command_done
= 1;
1954 wake_up_interruptible(&a
->fs_api_waiter
);
1957 /* Scatter/gather callback for VDA requests */
1958 static u32
get_physaddr_fs_api(struct esas2r_sg_context
*sgc
, u64
*addr
)
1960 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
1961 struct esas2r_ioctl_fs
*fs
=
1962 (struct esas2r_ioctl_fs
*)a
->fs_api_buffer
;
1963 u32 offset
= (u8
*)sgc
->cur_offset
- (u8
*)fs
;
1965 (*addr
) = a
->ppfs_api_buffer
+ offset
;
1967 return a
->fs_api_buffer_size
- offset
;
1970 /* Handle a call to read firmware via FS_API. */
1971 int esas2r_read_fs(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1973 if (!a
->fs_api_buffer
)
1977 struct esas2r_request
*rq
;
1978 struct esas2r_sg_context sgc
;
1979 struct esas2r_ioctl_fs
*fs
=
1980 (struct esas2r_ioctl_fs
*)a
->fs_api_buffer
;
1982 /* If another flash request is already in progress, return. */
1983 if (down_interruptible(&a
->fs_api_semaphore
)) {
1985 fs
->status
= ATTO_STS_OUT_OF_RSRC
;
1990 * Presumeably, someone has already written to the
1991 * fs_api_buffer, and now they are reading the node the
1992 * response, so now we will actually issue the request to the
1993 * chip and reply. Allocate a request
1996 rq
= esas2r_alloc_request(a
);
1998 esas2r_debug("esas2r_read_fs: out of requests");
1999 up(&a
->fs_api_semaphore
);
2003 rq
->comp_cb
= fs_api_complete_req
;
2005 /* Set up the SGCONTEXT for to build the s/g table */
2007 sgc
.cur_offset
= fs
->data
;
2008 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_fs_api
;
2010 a
->fs_api_command_done
= 0;
2012 if (!esas2r_process_fs_ioctl(a
, fs
, rq
, &sgc
)) {
2013 if (fs
->status
== ATTO_STS_OUT_OF_RSRC
)
2019 /* Now wait around for it to complete. */
2021 while (!a
->fs_api_command_done
)
2022 wait_event_interruptible(a
->fs_api_waiter
,
2023 a
->fs_api_command_done
);
2026 /* Free the request and keep going */
2027 up(&a
->fs_api_semaphore
);
2028 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
2030 /* Pick up possible error code from above */
2035 if (off
> a
->fs_api_buffer_size
)
2038 if (count
+ off
> a
->fs_api_buffer_size
)
2039 count
= a
->fs_api_buffer_size
- off
;
2044 memcpy(buf
, a
->fs_api_buffer
+ off
, count
);
2049 /* Handle a call to write firmware via FS_API. */
2050 int esas2r_write_fs(struct esas2r_adapter
*a
, const char *buf
, long off
,
2054 struct esas2r_ioctl_fs
*fs
= (struct esas2r_ioctl_fs
*)buf
;
2055 u32 length
= fs
->command
.length
+ offsetof(
2056 struct esas2r_ioctl_fs
,
2060 * Special case, for BEGIN commands, the length field
2061 * is lying to us, so just get enough for the header.
2064 if (fs
->command
.command
== ESAS2R_FS_CMD_BEGINW
)
2065 length
= offsetof(struct esas2r_ioctl_fs
, data
);
2068 * Beginning a command. We assume we'll get at least
2069 * enough in the first write so we can look at the
2070 * header and see how much we need to alloc.
2073 if (count
< offsetof(struct esas2r_ioctl_fs
, data
))
2076 /* Allocate a buffer or use the existing buffer. */
2077 if (a
->fs_api_buffer
) {
2078 if (a
->fs_api_buffer_size
< length
) {
2079 /* Free too-small buffer and get a new one */
2080 dma_free_coherent(&a
->pcid
->dev
,
2081 (size_t)a
->fs_api_buffer_size
,
2083 (dma_addr_t
)a
->ppfs_api_buffer
);
2085 goto re_allocate_buffer
;
2089 a
->fs_api_buffer_size
= length
;
2091 a
->fs_api_buffer
= (u8
*)dma_alloc_coherent(
2093 (size_t)a
->fs_api_buffer_size
,
2094 (dma_addr_t
*)&a
->ppfs_api_buffer
,
2099 if (!a
->fs_api_buffer
)
2102 if (off
> a
->fs_api_buffer_size
)
2105 if (count
+ off
> a
->fs_api_buffer_size
)
2106 count
= a
->fs_api_buffer_size
- off
;
2111 memcpy(a
->fs_api_buffer
+ off
, buf
, count
);