2 * linux/drivers/scsi/esas2r/esas2r_ioctl.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
47 * Buffered ioctl handlers. A buffered ioctl is one which requires that we
48 * allocate a DMA-able memory area to communicate with the firmware. In
49 * order to prevent continually allocating and freeing consistent memory,
50 * we will allocate a global buffer the first time we need it and re-use
51 * it for subsequent ioctl calls that require it.
54 u8
*esas2r_buffered_ioctl
;
55 dma_addr_t esas2r_buffered_ioctl_addr
;
56 u32 esas2r_buffered_ioctl_size
;
57 struct pci_dev
*esas2r_buffered_ioctl_pcid
;
59 static DEFINE_SEMAPHORE(buffered_ioctl_semaphore
);
60 typedef int (*BUFFERED_IOCTL_CALLBACK
)(struct esas2r_adapter
*,
61 struct esas2r_request
*,
62 struct esas2r_sg_context
*,
64 typedef void (*BUFFERED_IOCTL_DONE_CALLBACK
)(struct esas2r_adapter
*,
65 struct esas2r_request
*, void *);
67 struct esas2r_buffered_ioctl
{
68 struct esas2r_adapter
*a
;
73 BUFFERED_IOCTL_CALLBACK
76 BUFFERED_IOCTL_DONE_CALLBACK
82 static void complete_fm_api_req(struct esas2r_adapter
*a
,
83 struct esas2r_request
*rq
)
85 a
->fm_api_command_done
= 1;
86 wake_up_interruptible(&a
->fm_api_waiter
);
89 /* Callbacks for building scatter/gather lists for FM API requests */
90 static u32
get_physaddr_fm_api(struct esas2r_sg_context
*sgc
, u64
*addr
)
92 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
93 int offset
= sgc
->cur_offset
- a
->save_offset
;
95 (*addr
) = a
->firmware
.phys
+ offset
;
96 return a
->firmware
.orig_len
- offset
;
99 static u32
get_physaddr_fm_api_header(struct esas2r_sg_context
*sgc
, u64
*addr
)
101 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
102 int offset
= sgc
->cur_offset
- a
->save_offset
;
104 (*addr
) = a
->firmware
.header_buff_phys
+ offset
;
105 return sizeof(struct esas2r_flash_img
) - offset
;
108 /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
109 static void do_fm_api(struct esas2r_adapter
*a
, struct esas2r_flash_img
*fi
)
111 struct esas2r_request
*rq
;
113 if (mutex_lock_interruptible(&a
->fm_api_mutex
)) {
114 fi
->status
= FI_STAT_BUSY
;
118 rq
= esas2r_alloc_request(a
);
120 fi
->status
= FI_STAT_BUSY
;
124 if (fi
== &a
->firmware
.header
) {
125 a
->firmware
.header_buff
= dma_alloc_coherent(&a
->pcid
->dev
,
134 if (a
->firmware
.header_buff
== NULL
) {
135 esas2r_debug("failed to allocate header buffer!");
136 fi
->status
= FI_STAT_BUSY
;
140 memcpy(a
->firmware
.header_buff
, fi
,
141 sizeof(struct esas2r_flash_img
));
142 a
->save_offset
= a
->firmware
.header_buff
;
143 a
->fm_api_sgc
.get_phys_addr
=
144 (PGETPHYSADDR
)get_physaddr_fm_api_header
;
146 a
->save_offset
= (u8
*)fi
;
147 a
->fm_api_sgc
.get_phys_addr
=
148 (PGETPHYSADDR
)get_physaddr_fm_api
;
151 rq
->comp_cb
= complete_fm_api_req
;
152 a
->fm_api_command_done
= 0;
153 a
->fm_api_sgc
.cur_offset
= a
->save_offset
;
155 if (!esas2r_fm_api(a
, (struct esas2r_flash_img
*)a
->save_offset
, rq
,
159 /* Now wait around for it to complete. */
160 while (!a
->fm_api_command_done
)
161 wait_event_interruptible(a
->fm_api_waiter
,
162 a
->fm_api_command_done
);
164 if (fi
== &a
->firmware
.header
) {
165 memcpy(fi
, a
->firmware
.header_buff
,
166 sizeof(struct esas2r_flash_img
));
168 dma_free_coherent(&a
->pcid
->dev
,
169 (size_t)sizeof(struct esas2r_flash_img
),
170 a
->firmware
.header_buff
,
171 (dma_addr_t
)a
->firmware
.header_buff_phys
);
174 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
176 mutex_unlock(&a
->fm_api_mutex
);
181 static void complete_nvr_req(struct esas2r_adapter
*a
,
182 struct esas2r_request
*rq
)
184 a
->nvram_command_done
= 1;
185 wake_up_interruptible(&a
->nvram_waiter
);
188 /* Callback for building scatter/gather lists for buffered ioctls */
189 static u32
get_physaddr_buffered_ioctl(struct esas2r_sg_context
*sgc
,
192 int offset
= (u8
*)sgc
->cur_offset
- esas2r_buffered_ioctl
;
194 (*addr
) = esas2r_buffered_ioctl_addr
+ offset
;
195 return esas2r_buffered_ioctl_size
- offset
;
198 static void complete_buffered_ioctl_req(struct esas2r_adapter
*a
,
199 struct esas2r_request
*rq
)
201 a
->buffered_ioctl_done
= 1;
202 wake_up_interruptible(&a
->buffered_ioctl_waiter
);
205 static u8
handle_buffered_ioctl(struct esas2r_buffered_ioctl
*bi
)
207 struct esas2r_adapter
*a
= bi
->a
;
208 struct esas2r_request
*rq
;
209 struct esas2r_sg_context sgc
;
210 u8 result
= IOCTL_SUCCESS
;
212 if (down_interruptible(&buffered_ioctl_semaphore
))
213 return IOCTL_OUT_OF_RESOURCES
;
215 /* allocate a buffer or use the existing buffer. */
216 if (esas2r_buffered_ioctl
) {
217 if (esas2r_buffered_ioctl_size
< bi
->length
) {
218 /* free the too-small buffer and get a new one */
219 dma_free_coherent(&a
->pcid
->dev
,
220 (size_t)esas2r_buffered_ioctl_size
,
221 esas2r_buffered_ioctl
,
222 esas2r_buffered_ioctl_addr
);
224 goto allocate_buffer
;
228 esas2r_buffered_ioctl_size
= bi
->length
;
229 esas2r_buffered_ioctl_pcid
= a
->pcid
;
230 esas2r_buffered_ioctl
= dma_alloc_coherent(&a
->pcid
->dev
,
232 esas2r_buffered_ioctl_size
,
234 esas2r_buffered_ioctl_addr
,
238 if (!esas2r_buffered_ioctl
) {
239 esas2r_log(ESAS2R_LOG_CRIT
,
240 "could not allocate %d bytes of consistent memory "
241 "for a buffered ioctl!",
244 esas2r_debug("buffered ioctl alloc failure");
245 result
= IOCTL_OUT_OF_RESOURCES
;
249 memcpy(esas2r_buffered_ioctl
, bi
->ioctl
, bi
->length
);
251 rq
= esas2r_alloc_request(a
);
253 esas2r_log(ESAS2R_LOG_CRIT
,
254 "could not allocate an internal request");
256 result
= IOCTL_OUT_OF_RESOURCES
;
257 esas2r_debug("buffered ioctl - no requests");
261 a
->buffered_ioctl_done
= 0;
262 rq
->comp_cb
= complete_buffered_ioctl_req
;
263 sgc
.cur_offset
= esas2r_buffered_ioctl
+ bi
->offset
;
264 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_buffered_ioctl
;
265 sgc
.length
= esas2r_buffered_ioctl_size
;
267 if (!(*bi
->callback
)(a
, rq
, &sgc
, bi
->context
)) {
268 /* completed immediately, no need to wait */
269 a
->buffered_ioctl_done
= 0;
270 goto free_andexit_cleanly
;
273 /* now wait around for it to complete. */
274 while (!a
->buffered_ioctl_done
)
275 wait_event_interruptible(a
->buffered_ioctl_waiter
,
276 a
->buffered_ioctl_done
);
278 free_andexit_cleanly
:
279 if (result
== IOCTL_SUCCESS
&& bi
->done_callback
)
280 (*bi
->done_callback
)(a
, rq
, bi
->done_context
);
282 esas2r_free_request(a
, rq
);
285 if (result
== IOCTL_SUCCESS
)
286 memcpy(bi
->ioctl
, esas2r_buffered_ioctl
, bi
->length
);
288 up(&buffered_ioctl_semaphore
);
292 /* SMP ioctl support */
293 static int smp_ioctl_callback(struct esas2r_adapter
*a
,
294 struct esas2r_request
*rq
,
295 struct esas2r_sg_context
*sgc
, void *context
)
297 struct atto_ioctl_smp
*si
=
298 (struct atto_ioctl_smp
*)esas2r_buffered_ioctl
;
300 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
301 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_SMP
);
303 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
304 si
->status
= ATTO_STS_OUT_OF_RSRC
;
308 esas2r_start_request(a
, rq
);
312 static u8
handle_smp_ioctl(struct esas2r_adapter
*a
, struct atto_ioctl_smp
*si
)
314 struct esas2r_buffered_ioctl bi
;
316 memset(&bi
, 0, sizeof(bi
));
320 bi
.length
= sizeof(struct atto_ioctl_smp
)
321 + le32_to_cpu(si
->req_length
)
322 + le32_to_cpu(si
->rsp_length
);
324 bi
.callback
= smp_ioctl_callback
;
325 return handle_buffered_ioctl(&bi
);
329 /* CSMI ioctl support */
330 static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter
*a
,
331 struct esas2r_request
*rq
)
333 rq
->target_id
= le16_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.target_id
);
334 rq
->vrq
->scsi
.flags
|= cpu_to_le32(rq
->func_rsp
.ioctl_rsp
.csmi
.lun
);
336 /* Now call the original completion callback. */
337 (*rq
->aux_req_cb
)(a
, rq
);
340 /* Tunnel a CSMI IOCTL to the back end driver for processing. */
341 static bool csmi_ioctl_tunnel(struct esas2r_adapter
*a
,
342 union atto_ioctl_csmi
*ci
,
343 struct esas2r_request
*rq
,
344 struct esas2r_sg_context
*sgc
,
348 struct atto_vda_ioctl_req
*ioctl
= &rq
->vrq
->ioctl
;
350 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
353 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
354 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_CSMI
);
355 ioctl
->csmi
.ctrl_code
= cpu_to_le32(ctrl_code
);
356 ioctl
->csmi
.target_id
= cpu_to_le16(target_id
);
357 ioctl
->csmi
.lun
= (u8
)le32_to_cpu(rq
->vrq
->scsi
.flags
);
360 * Always usurp the completion callback since the interrupt callback
361 * mechanism may be used.
364 rq
->aux_req_cb
= rq
->comp_cb
;
365 rq
->comp_cb
= esas2r_csmi_ioctl_tunnel_comp_cb
;
367 if (!esas2r_build_sg_list(a
, rq
, sgc
))
370 esas2r_start_request(a
, rq
);
374 static bool check_lun(struct scsi_lun lun
)
378 result
= ((lun
.scsi_lun
[7] == 0) &&
379 (lun
.scsi_lun
[6] == 0) &&
380 (lun
.scsi_lun
[5] == 0) &&
381 (lun
.scsi_lun
[4] == 0) &&
382 (lun
.scsi_lun
[3] == 0) &&
383 (lun
.scsi_lun
[2] == 0) &&
384 /* Byte 1 is intentionally skipped */
385 (lun
.scsi_lun
[0] == 0));
390 static int csmi_ioctl_callback(struct esas2r_adapter
*a
,
391 struct esas2r_request
*rq
,
392 struct esas2r_sg_context
*sgc
, void *context
)
394 struct atto_csmi
*ci
= (struct atto_csmi
*)context
;
395 union atto_ioctl_csmi
*ioctl_csmi
=
396 (union atto_ioctl_csmi
*)esas2r_buffered_ioctl
;
400 u32 sts
= CSMI_STS_SUCCESS
;
401 struct esas2r_target
*t
;
404 if (ci
->control_code
== CSMI_CC_GET_DEV_ADDR
) {
405 struct atto_csmi_get_dev_addr
*gda
= &ci
->data
.dev_addr
;
408 tid
= gda
->target_id
;
410 } else if (ci
->control_code
== CSMI_CC_TASK_MGT
) {
411 struct atto_csmi_task_mgmt
*tm
= &ci
->data
.tsk_mgt
;
419 rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
= cpu_to_le32(
425 rq
->vrq
->scsi
.flags
|= cpu_to_le32(lun
);
427 switch (ci
->control_code
) {
428 case CSMI_CC_GET_DRVR_INFO
:
430 struct atto_csmi_get_driver_info
*gdi
= &ioctl_csmi
->drvr_info
;
432 strcpy(gdi
->description
, esas2r_get_model_name(a
));
433 gdi
->csmi_major_rev
= CSMI_MAJOR_REV
;
434 gdi
->csmi_minor_rev
= CSMI_MINOR_REV
;
438 case CSMI_CC_GET_CNTLR_CFG
:
440 struct atto_csmi_get_cntlr_cfg
*gcc
= &ioctl_csmi
->cntlr_cfg
;
442 gcc
->base_io_addr
= 0;
443 pci_read_config_dword(a
->pcid
, PCI_BASE_ADDRESS_2
,
444 &gcc
->base_memaddr_lo
);
445 pci_read_config_dword(a
->pcid
, PCI_BASE_ADDRESS_3
,
446 &gcc
->base_memaddr_hi
);
447 gcc
->board_id
= MAKEDWORD(a
->pcid
->subsystem_device
,
448 a
->pcid
->subsystem_vendor
);
449 gcc
->slot_num
= CSMI_SLOT_NUM_UNKNOWN
;
450 gcc
->cntlr_class
= CSMI_CNTLR_CLASS_HBA
;
451 gcc
->io_bus_type
= CSMI_BUS_TYPE_PCI
;
452 gcc
->pci_addr
.bus_num
= a
->pcid
->bus
->number
;
453 gcc
->pci_addr
.device_num
= PCI_SLOT(a
->pcid
->devfn
);
454 gcc
->pci_addr
.function_num
= PCI_FUNC(a
->pcid
->devfn
);
456 memset(gcc
->serial_num
, 0, sizeof(gcc
->serial_num
));
458 gcc
->major_rev
= LOBYTE(LOWORD(a
->fw_version
));
459 gcc
->minor_rev
= HIBYTE(LOWORD(a
->fw_version
));
460 gcc
->build_rev
= LOBYTE(HIWORD(a
->fw_version
));
461 gcc
->release_rev
= HIBYTE(HIWORD(a
->fw_version
));
462 gcc
->bios_major_rev
= HIBYTE(HIWORD(a
->flash_ver
));
463 gcc
->bios_minor_rev
= LOBYTE(HIWORD(a
->flash_ver
));
464 gcc
->bios_build_rev
= LOWORD(a
->flash_ver
);
466 if (test_bit(AF2_THUNDERLINK
, &a
->flags2
))
467 gcc
->cntlr_flags
= CSMI_CNTLRF_SAS_HBA
468 | CSMI_CNTLRF_SATA_HBA
;
470 gcc
->cntlr_flags
= CSMI_CNTLRF_SAS_RAID
471 | CSMI_CNTLRF_SATA_RAID
;
473 gcc
->rrom_major_rev
= 0;
474 gcc
->rrom_minor_rev
= 0;
475 gcc
->rrom_build_rev
= 0;
476 gcc
->rrom_release_rev
= 0;
477 gcc
->rrom_biosmajor_rev
= 0;
478 gcc
->rrom_biosminor_rev
= 0;
479 gcc
->rrom_biosbuild_rev
= 0;
480 gcc
->rrom_biosrelease_rev
= 0;
484 case CSMI_CC_GET_CNTLR_STS
:
486 struct atto_csmi_get_cntlr_sts
*gcs
= &ioctl_csmi
->cntlr_sts
;
488 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
489 gcs
->status
= CSMI_CNTLR_STS_FAILED
;
491 gcs
->status
= CSMI_CNTLR_STS_GOOD
;
493 gcs
->offline_reason
= CSMI_OFFLINE_NO_REASON
;
497 case CSMI_CC_FW_DOWNLOAD
:
498 case CSMI_CC_GET_RAID_INFO
:
499 case CSMI_CC_GET_RAID_CFG
:
501 sts
= CSMI_STS_BAD_CTRL_CODE
;
504 case CSMI_CC_SMP_PASSTHRU
:
505 case CSMI_CC_SSP_PASSTHRU
:
506 case CSMI_CC_STP_PASSTHRU
:
507 case CSMI_CC_GET_PHY_INFO
:
508 case CSMI_CC_SET_PHY_INFO
:
509 case CSMI_CC_GET_LINK_ERRORS
:
510 case CSMI_CC_GET_SATA_SIG
:
511 case CSMI_CC_GET_CONN_INFO
:
512 case CSMI_CC_PHY_CTRL
:
514 if (!csmi_ioctl_tunnel(a
, ioctl_csmi
, rq
, sgc
,
516 ESAS2R_TARG_ID_INV
)) {
517 sts
= CSMI_STS_FAILED
;
523 case CSMI_CC_GET_SCSI_ADDR
:
525 struct atto_csmi_get_scsi_addr
*gsa
= &ioctl_csmi
->scsi_addr
;
529 memcpy(&lun
, gsa
->sas_lun
, sizeof(struct scsi_lun
));
531 if (!check_lun(lun
)) {
532 sts
= CSMI_STS_NO_SCSI_ADDR
;
536 /* make sure the device is present */
537 spin_lock_irqsave(&a
->mem_lock
, flags
);
538 t
= esas2r_targ_db_find_by_sas_addr(a
, (u64
*)gsa
->sas_addr
);
539 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
542 sts
= CSMI_STS_NO_SCSI_ADDR
;
546 gsa
->host_index
= 0xFF;
547 gsa
->lun
= gsa
->sas_lun
[1];
548 rq
->target_id
= esas2r_targ_get_id(t
, a
);
552 case CSMI_CC_GET_DEV_ADDR
:
554 struct atto_csmi_get_dev_addr
*gda
= &ioctl_csmi
->dev_addr
;
556 /* make sure the target is present */
557 t
= a
->targetdb
+ rq
->target_id
;
559 if (t
>= a
->targetdb_end
560 || t
->target_state
!= TS_PRESENT
561 || t
->sas_addr
== 0) {
562 sts
= CSMI_STS_NO_DEV_ADDR
;
566 /* fill in the result */
567 *(u64
*)gda
->sas_addr
= t
->sas_addr
;
568 memset(gda
->sas_lun
, 0, sizeof(gda
->sas_lun
));
569 gda
->sas_lun
[1] = (u8
)le32_to_cpu(rq
->vrq
->scsi
.flags
);
573 case CSMI_CC_TASK_MGT
:
575 /* make sure the target is present */
576 t
= a
->targetdb
+ rq
->target_id
;
578 if (t
>= a
->targetdb_end
579 || t
->target_state
!= TS_PRESENT
580 || !(t
->flags
& TF_PASS_THRU
)) {
581 sts
= CSMI_STS_NO_DEV_ADDR
;
585 if (!csmi_ioctl_tunnel(a
, ioctl_csmi
, rq
, sgc
,
588 sts
= CSMI_STS_FAILED
;
596 sts
= CSMI_STS_BAD_CTRL_CODE
;
600 rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
= cpu_to_le32(sts
);
606 static void csmi_ioctl_done_callback(struct esas2r_adapter
*a
,
607 struct esas2r_request
*rq
, void *context
)
609 struct atto_csmi
*ci
= (struct atto_csmi
*)context
;
610 union atto_ioctl_csmi
*ioctl_csmi
=
611 (union atto_ioctl_csmi
*)esas2r_buffered_ioctl
;
613 switch (ci
->control_code
) {
614 case CSMI_CC_GET_DRVR_INFO
:
616 struct atto_csmi_get_driver_info
*gdi
=
617 &ioctl_csmi
->drvr_info
;
619 strcpy(gdi
->name
, ESAS2R_VERSION_STR
);
621 gdi
->major_rev
= ESAS2R_MAJOR_REV
;
622 gdi
->minor_rev
= ESAS2R_MINOR_REV
;
624 gdi
->release_rev
= 0;
628 case CSMI_CC_GET_SCSI_ADDR
:
630 struct atto_csmi_get_scsi_addr
*gsa
= &ioctl_csmi
->scsi_addr
;
632 if (le32_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
) ==
634 gsa
->target_id
= rq
->target_id
;
642 ci
->status
= le32_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
);
646 static u8
handle_csmi_ioctl(struct esas2r_adapter
*a
, struct atto_csmi
*ci
)
648 struct esas2r_buffered_ioctl bi
;
650 memset(&bi
, 0, sizeof(bi
));
653 bi
.ioctl
= &ci
->data
;
654 bi
.length
= sizeof(union atto_ioctl_csmi
);
656 bi
.callback
= csmi_ioctl_callback
;
658 bi
.done_callback
= csmi_ioctl_done_callback
;
659 bi
.done_context
= ci
;
661 return handle_buffered_ioctl(&bi
);
664 /* ATTO HBA ioctl support */
666 /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
667 static bool hba_ioctl_tunnel(struct esas2r_adapter
*a
,
668 struct atto_ioctl
*hi
,
669 struct esas2r_request
*rq
,
670 struct esas2r_sg_context
*sgc
)
672 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
674 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_HBA
);
676 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
677 hi
->status
= ATTO_STS_OUT_OF_RSRC
;
682 esas2r_start_request(a
, rq
);
687 static void scsi_passthru_comp_cb(struct esas2r_adapter
*a
,
688 struct esas2r_request
*rq
)
690 struct atto_ioctl
*hi
= (struct atto_ioctl
*)rq
->aux_req_cx
;
691 struct atto_hba_scsi_pass_thru
*spt
= &hi
->data
.scsi_pass_thru
;
692 u8 sts
= ATTO_SPT_RS_FAILED
;
694 spt
->scsi_status
= rq
->func_rsp
.scsi_rsp
.scsi_stat
;
695 spt
->sense_length
= rq
->sense_len
;
696 spt
->residual_length
=
697 le32_to_cpu(rq
->func_rsp
.scsi_rsp
.residual_length
);
699 switch (rq
->req_stat
) {
702 sts
= ATTO_SPT_RS_SUCCESS
;
705 sts
= ATTO_SPT_RS_UNDERRUN
;
708 sts
= ATTO_SPT_RS_OVERRUN
;
712 sts
= ATTO_SPT_RS_NO_DEVICE
;
715 sts
= ATTO_SPT_RS_NO_LUN
;
718 sts
= ATTO_SPT_RS_TIMEOUT
;
721 sts
= ATTO_SPT_RS_DEGRADED
;
724 sts
= ATTO_SPT_RS_BUSY
;
727 sts
= ATTO_SPT_RS_ABORTED
;
730 sts
= ATTO_SPT_RS_BUS_RESET
;
734 spt
->req_status
= sts
;
736 /* Update the target ID to the next one present. */
738 esas2r_targ_db_find_next_present(a
, (u16
)spt
->target_id
);
740 /* Done, call the completion callback. */
741 (*rq
->aux_req_cb
)(a
, rq
);
744 static int hba_ioctl_callback(struct esas2r_adapter
*a
,
745 struct esas2r_request
*rq
,
746 struct esas2r_sg_context
*sgc
,
749 struct atto_ioctl
*hi
= (struct atto_ioctl
*)esas2r_buffered_ioctl
;
751 hi
->status
= ATTO_STS_SUCCESS
;
753 switch (hi
->function
) {
754 case ATTO_FUNC_GET_ADAP_INFO
:
756 u8
*class_code
= (u8
*)&a
->pcid
->class;
758 struct atto_hba_get_adapter_info
*gai
=
759 &hi
->data
.get_adap_info
;
761 if (hi
->flags
& HBAF_TUNNEL
) {
762 hi
->status
= ATTO_STS_UNSUPPORTED
;
766 if (hi
->version
> ATTO_VER_GET_ADAP_INFO0
) {
767 hi
->status
= ATTO_STS_INV_VERSION
;
768 hi
->version
= ATTO_VER_GET_ADAP_INFO0
;
772 memset(gai
, 0, sizeof(*gai
));
774 gai
->pci
.vendor_id
= a
->pcid
->vendor
;
775 gai
->pci
.device_id
= a
->pcid
->device
;
776 gai
->pci
.ss_vendor_id
= a
->pcid
->subsystem_vendor
;
777 gai
->pci
.ss_device_id
= a
->pcid
->subsystem_device
;
778 gai
->pci
.class_code
[0] = class_code
[0];
779 gai
->pci
.class_code
[1] = class_code
[1];
780 gai
->pci
.class_code
[2] = class_code
[2];
781 gai
->pci
.rev_id
= a
->pcid
->revision
;
782 gai
->pci
.bus_num
= a
->pcid
->bus
->number
;
783 gai
->pci
.dev_num
= PCI_SLOT(a
->pcid
->devfn
);
784 gai
->pci
.func_num
= PCI_FUNC(a
->pcid
->devfn
);
786 if (pci_is_pcie(a
->pcid
)) {
790 pcie_capability_read_word(a
->pcid
, PCI_EXP_LNKSTA
,
792 pcie_capability_read_dword(a
->pcid
, PCI_EXP_LNKCAP
,
795 gai
->pci
.link_speed_curr
=
796 (u8
)(stat
& PCI_EXP_LNKSTA_CLS
);
797 gai
->pci
.link_speed_max
=
798 (u8
)(caps
& PCI_EXP_LNKCAP_SLS
);
799 gai
->pci
.link_width_curr
=
800 (u8
)((stat
& PCI_EXP_LNKSTA_NLW
)
801 >> PCI_EXP_LNKSTA_NLW_SHIFT
);
802 gai
->pci
.link_width_max
=
803 (u8
)((caps
& PCI_EXP_LNKCAP_MLW
)
807 gai
->pci
.msi_vector_cnt
= 1;
809 if (a
->pcid
->msix_enabled
)
810 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_MSIX
;
811 else if (a
->pcid
->msi_enabled
)
812 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_MSI
;
814 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_LEGACY
;
816 gai
->adap_type
= ATTO_GAI_AT_ESASRAID2
;
818 if (test_bit(AF2_THUNDERLINK
, &a
->flags2
))
819 gai
->adap_type
= ATTO_GAI_AT_TLSASHBA
;
821 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
822 gai
->adap_flags
|= ATTO_GAI_AF_DEGRADED
;
824 gai
->adap_flags
|= ATTO_GAI_AF_SPT_SUPP
|
825 ATTO_GAI_AF_DEVADDR_SUPP
;
827 if (a
->pcid
->subsystem_device
== ATTO_ESAS_R60F
828 || a
->pcid
->subsystem_device
== ATTO_ESAS_R608
829 || a
->pcid
->subsystem_device
== ATTO_ESAS_R644
830 || a
->pcid
->subsystem_device
== ATTO_TSSC_3808E
)
831 gai
->adap_flags
|= ATTO_GAI_AF_VIRT_SES
;
833 gai
->num_ports
= ESAS2R_NUM_PHYS
;
834 gai
->num_phys
= ESAS2R_NUM_PHYS
;
836 strcpy(gai
->firmware_rev
, a
->fw_rev
);
837 strcpy(gai
->flash_rev
, a
->flash_rev
);
838 strcpy(gai
->model_name_short
, esas2r_get_model_name_short(a
));
839 strcpy(gai
->model_name
, esas2r_get_model_name(a
));
841 gai
->num_targets
= ESAS2R_MAX_TARGETS
;
844 gai
->num_targsper_bus
= gai
->num_targets
;
845 gai
->num_lunsper_targ
= 256;
847 if (a
->pcid
->subsystem_device
== ATTO_ESAS_R6F0
848 || a
->pcid
->subsystem_device
== ATTO_ESAS_R60F
)
849 gai
->num_connectors
= 4;
851 gai
->num_connectors
= 2;
853 gai
->adap_flags2
|= ATTO_GAI_AF2_ADAP_CTRL_SUPP
;
855 gai
->num_targets_backend
= a
->num_targets_backend
;
857 gai
->tunnel_flags
= a
->ioctl_tunnel
858 & (ATTO_GAI_TF_MEM_RW
860 | ATTO_GAI_TF_SCSI_PASS_THRU
861 | ATTO_GAI_TF_GET_DEV_ADDR
862 | ATTO_GAI_TF_PHY_CTRL
863 | ATTO_GAI_TF_CONN_CTRL
864 | ATTO_GAI_TF_GET_DEV_INFO
);
868 case ATTO_FUNC_GET_ADAP_ADDR
:
870 struct atto_hba_get_adapter_address
*gaa
=
871 &hi
->data
.get_adap_addr
;
873 if (hi
->flags
& HBAF_TUNNEL
) {
874 hi
->status
= ATTO_STS_UNSUPPORTED
;
878 if (hi
->version
> ATTO_VER_GET_ADAP_ADDR0
) {
879 hi
->status
= ATTO_STS_INV_VERSION
;
880 hi
->version
= ATTO_VER_GET_ADAP_ADDR0
;
881 } else if (gaa
->addr_type
== ATTO_GAA_AT_PORT
882 || gaa
->addr_type
== ATTO_GAA_AT_NODE
) {
883 if (gaa
->addr_type
== ATTO_GAA_AT_PORT
884 && gaa
->port_id
>= ESAS2R_NUM_PHYS
) {
885 hi
->status
= ATTO_STS_NOT_APPL
;
887 memcpy((u64
*)gaa
->address
,
888 &a
->nvram
->sas_addr
[0], sizeof(u64
));
889 gaa
->addr_len
= sizeof(u64
);
892 hi
->status
= ATTO_STS_INV_PARAM
;
898 case ATTO_FUNC_MEM_RW
:
900 if (hi
->flags
& HBAF_TUNNEL
) {
901 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
907 hi
->status
= ATTO_STS_UNSUPPORTED
;
912 case ATTO_FUNC_TRACE
:
914 struct atto_hba_trace
*trc
= &hi
->data
.trace
;
916 if (hi
->flags
& HBAF_TUNNEL
) {
917 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
923 if (hi
->version
> ATTO_VER_TRACE1
) {
924 hi
->status
= ATTO_STS_INV_VERSION
;
925 hi
->version
= ATTO_VER_TRACE1
;
929 if (trc
->trace_type
== ATTO_TRC_TT_FWCOREDUMP
930 && hi
->version
>= ATTO_VER_TRACE1
) {
931 if (trc
->trace_func
== ATTO_TRC_TF_UPLOAD
) {
932 u32 len
= hi
->data_length
;
933 u32 offset
= trc
->current_offset
;
934 u32 total_len
= ESAS2R_FWCOREDUMP_SZ
;
936 /* Size is zero if a core dump isn't present */
937 if (!test_bit(AF2_COREDUMP_SAVED
, &a
->flags2
))
943 if (offset
>= total_len
944 || offset
+ len
> total_len
946 hi
->status
= ATTO_STS_INV_PARAM
;
951 a
->fw_coredump_buff
+ offset
,
954 hi
->data_length
= len
;
955 } else if (trc
->trace_func
== ATTO_TRC_TF_RESET
) {
956 memset(a
->fw_coredump_buff
, 0,
957 ESAS2R_FWCOREDUMP_SZ
);
959 clear_bit(AF2_COREDUMP_SAVED
, &a
->flags2
);
960 } else if (trc
->trace_func
!= ATTO_TRC_TF_GET_INFO
) {
961 hi
->status
= ATTO_STS_UNSUPPORTED
;
965 /* Always return all the info we can. */
967 trc
->current_offset
= 0;
968 trc
->total_length
= ESAS2R_FWCOREDUMP_SZ
;
970 /* Return zero length buffer if core dump not present */
971 if (!test_bit(AF2_COREDUMP_SAVED
, &a
->flags2
))
972 trc
->total_length
= 0;
974 hi
->status
= ATTO_STS_UNSUPPORTED
;
980 case ATTO_FUNC_SCSI_PASS_THRU
:
982 struct atto_hba_scsi_pass_thru
*spt
= &hi
->data
.scsi_pass_thru
;
985 memcpy(&lun
, spt
->lun
, sizeof(struct scsi_lun
));
987 if (hi
->flags
& HBAF_TUNNEL
) {
988 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
994 if (hi
->version
> ATTO_VER_SCSI_PASS_THRU0
) {
995 hi
->status
= ATTO_STS_INV_VERSION
;
996 hi
->version
= ATTO_VER_SCSI_PASS_THRU0
;
1000 if (spt
->target_id
>= ESAS2R_MAX_TARGETS
|| !check_lun(lun
)) {
1001 hi
->status
= ATTO_STS_INV_PARAM
;
1005 esas2r_sgc_init(sgc
, a
, rq
, NULL
);
1007 sgc
->length
= hi
->data_length
;
1008 sgc
->cur_offset
+= offsetof(struct atto_ioctl
, data
.byte
)
1009 + sizeof(struct atto_hba_scsi_pass_thru
);
1011 /* Finish request initialization */
1012 rq
->target_id
= (u16
)spt
->target_id
;
1013 rq
->vrq
->scsi
.flags
|= cpu_to_le32(spt
->lun
[1]);
1014 memcpy(rq
->vrq
->scsi
.cdb
, spt
->cdb
, 16);
1015 rq
->vrq
->scsi
.length
= cpu_to_le32(hi
->data_length
);
1016 rq
->sense_len
= spt
->sense_length
;
1017 rq
->sense_buf
= (u8
*)spt
->sense_data
;
1018 /* NOTE: we ignore spt->timeout */
1021 * always usurp the completion callback since the interrupt
1022 * callback mechanism may be used.
1025 rq
->aux_req_cx
= hi
;
1026 rq
->aux_req_cb
= rq
->comp_cb
;
1027 rq
->comp_cb
= scsi_passthru_comp_cb
;
1029 if (spt
->flags
& ATTO_SPTF_DATA_IN
) {
1030 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_RDD
);
1031 } else if (spt
->flags
& ATTO_SPTF_DATA_OUT
) {
1032 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_WRD
);
1035 hi
->status
= ATTO_STS_INV_PARAM
;
1040 if (spt
->flags
& ATTO_SPTF_ORDERED_Q
)
1041 rq
->vrq
->scsi
.flags
|=
1042 cpu_to_le32(FCP_CMND_TA_ORDRD_Q
);
1043 else if (spt
->flags
& ATTO_SPTF_HEAD_OF_Q
)
1044 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_TA_HEAD_Q
);
1047 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
1048 hi
->status
= ATTO_STS_OUT_OF_RSRC
;
1052 esas2r_start_request(a
, rq
);
1057 case ATTO_FUNC_GET_DEV_ADDR
:
1059 struct atto_hba_get_device_address
*gda
=
1060 &hi
->data
.get_dev_addr
;
1061 struct esas2r_target
*t
;
1063 if (hi
->flags
& HBAF_TUNNEL
) {
1064 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1070 if (hi
->version
> ATTO_VER_GET_DEV_ADDR0
) {
1071 hi
->status
= ATTO_STS_INV_VERSION
;
1072 hi
->version
= ATTO_VER_GET_DEV_ADDR0
;
1076 if (gda
->target_id
>= ESAS2R_MAX_TARGETS
) {
1077 hi
->status
= ATTO_STS_INV_PARAM
;
1081 t
= a
->targetdb
+ (u16
)gda
->target_id
;
1083 if (t
->target_state
!= TS_PRESENT
) {
1084 hi
->status
= ATTO_STS_FAILED
;
1085 } else if (gda
->addr_type
== ATTO_GDA_AT_PORT
) {
1086 if (t
->sas_addr
== 0) {
1087 hi
->status
= ATTO_STS_UNSUPPORTED
;
1089 *(u64
*)gda
->address
= t
->sas_addr
;
1091 gda
->addr_len
= sizeof(u64
);
1093 } else if (gda
->addr_type
== ATTO_GDA_AT_NODE
) {
1094 hi
->status
= ATTO_STS_NOT_APPL
;
1096 hi
->status
= ATTO_STS_INV_PARAM
;
1099 /* update the target ID to the next one present. */
1102 esas2r_targ_db_find_next_present(a
,
1103 (u16
)gda
->target_id
);
1107 case ATTO_FUNC_PHY_CTRL
:
1108 case ATTO_FUNC_CONN_CTRL
:
1110 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1116 case ATTO_FUNC_ADAP_CTRL
:
1118 struct atto_hba_adap_ctrl
*ac
= &hi
->data
.adap_ctrl
;
1120 if (hi
->flags
& HBAF_TUNNEL
) {
1121 hi
->status
= ATTO_STS_UNSUPPORTED
;
1125 if (hi
->version
> ATTO_VER_ADAP_CTRL0
) {
1126 hi
->status
= ATTO_STS_INV_VERSION
;
1127 hi
->version
= ATTO_VER_ADAP_CTRL0
;
1131 if (ac
->adap_func
== ATTO_AC_AF_HARD_RST
) {
1132 esas2r_reset_adapter(a
);
1133 } else if (ac
->adap_func
!= ATTO_AC_AF_GET_STATE
) {
1134 hi
->status
= ATTO_STS_UNSUPPORTED
;
1138 if (test_bit(AF_CHPRST_NEEDED
, &a
->flags
))
1139 ac
->adap_state
= ATTO_AC_AS_RST_SCHED
;
1140 else if (test_bit(AF_CHPRST_PENDING
, &a
->flags
))
1141 ac
->adap_state
= ATTO_AC_AS_RST_IN_PROG
;
1142 else if (test_bit(AF_DISC_PENDING
, &a
->flags
))
1143 ac
->adap_state
= ATTO_AC_AS_RST_DISC
;
1144 else if (test_bit(AF_DISABLED
, &a
->flags
))
1145 ac
->adap_state
= ATTO_AC_AS_DISABLED
;
1146 else if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
1147 ac
->adap_state
= ATTO_AC_AS_DEGRADED
;
1149 ac
->adap_state
= ATTO_AC_AS_OK
;
1154 case ATTO_FUNC_GET_DEV_INFO
:
1156 struct atto_hba_get_device_info
*gdi
= &hi
->data
.get_dev_info
;
1157 struct esas2r_target
*t
;
1159 if (hi
->flags
& HBAF_TUNNEL
) {
1160 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1166 if (hi
->version
> ATTO_VER_GET_DEV_INFO0
) {
1167 hi
->status
= ATTO_STS_INV_VERSION
;
1168 hi
->version
= ATTO_VER_GET_DEV_INFO0
;
1172 if (gdi
->target_id
>= ESAS2R_MAX_TARGETS
) {
1173 hi
->status
= ATTO_STS_INV_PARAM
;
1177 t
= a
->targetdb
+ (u16
)gdi
->target_id
;
1179 /* update the target ID to the next one present. */
1182 esas2r_targ_db_find_next_present(a
,
1183 (u16
)gdi
->target_id
);
1185 if (t
->target_state
!= TS_PRESENT
) {
1186 hi
->status
= ATTO_STS_FAILED
;
1190 hi
->status
= ATTO_STS_UNSUPPORTED
;
1196 hi
->status
= ATTO_STS_INV_FUNC
;
1203 static void hba_ioctl_done_callback(struct esas2r_adapter
*a
,
1204 struct esas2r_request
*rq
, void *context
)
1206 struct atto_ioctl
*ioctl_hba
=
1207 (struct atto_ioctl
*)esas2r_buffered_ioctl
;
1209 esas2r_debug("hba_ioctl_done_callback %d", a
->index
);
1211 if (ioctl_hba
->function
== ATTO_FUNC_GET_ADAP_INFO
) {
1212 struct atto_hba_get_adapter_info
*gai
=
1213 &ioctl_hba
->data
.get_adap_info
;
1215 esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
1217 gai
->drvr_rev_major
= ESAS2R_MAJOR_REV
;
1218 gai
->drvr_rev_minor
= ESAS2R_MINOR_REV
;
1220 strcpy(gai
->drvr_rev_ascii
, ESAS2R_VERSION_STR
);
1221 strcpy(gai
->drvr_name
, ESAS2R_DRVR_NAME
);
1223 gai
->num_busses
= 1;
1224 gai
->num_targsper_bus
= ESAS2R_MAX_ID
+ 1;
1225 gai
->num_lunsper_targ
= 1;
1229 u8
handle_hba_ioctl(struct esas2r_adapter
*a
,
1230 struct atto_ioctl
*ioctl_hba
)
1232 struct esas2r_buffered_ioctl bi
;
1234 memset(&bi
, 0, sizeof(bi
));
1237 bi
.ioctl
= ioctl_hba
;
1238 bi
.length
= sizeof(struct atto_ioctl
) + ioctl_hba
->data_length
;
1239 bi
.callback
= hba_ioctl_callback
;
1241 bi
.done_callback
= hba_ioctl_done_callback
;
1242 bi
.done_context
= NULL
;
1245 return handle_buffered_ioctl(&bi
);
1249 int esas2r_write_params(struct esas2r_adapter
*a
, struct esas2r_request
*rq
,
1250 struct esas2r_sas_nvram
*data
)
1254 a
->nvram_command_done
= 0;
1255 rq
->comp_cb
= complete_nvr_req
;
1257 if (esas2r_nvram_write(a
, rq
, data
)) {
1258 /* now wait around for it to complete. */
1259 while (!a
->nvram_command_done
)
1260 wait_event_interruptible(a
->nvram_waiter
,
1261 a
->nvram_command_done
);
1264 /* done, check the status. */
1265 if (rq
->req_stat
== RS_SUCCESS
)
1272 /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
1273 int esas2r_ioctl_handler(void *hostdata
, unsigned int cmd
, void __user
*arg
)
1275 struct atto_express_ioctl
*ioctl
= NULL
;
1276 struct esas2r_adapter
*a
;
1277 struct esas2r_request
*rq
;
1281 esas2r_log(ESAS2R_LOG_DEBG
, "ioctl (%p, %x, %p)", hostdata
, cmd
, arg
);
1284 || (cmd
< EXPRESS_IOCTL_MIN
)
1285 || (cmd
> EXPRESS_IOCTL_MAX
))
1288 ioctl
= memdup_user(arg
, sizeof(struct atto_express_ioctl
));
1289 if (IS_ERR(ioctl
)) {
1290 esas2r_log(ESAS2R_LOG_WARN
,
1291 "ioctl_handler access_ok failed for cmd %u, address %p",
1293 return PTR_ERR(ioctl
);
1296 /* verify the signature */
1298 if (memcmp(ioctl
->header
.signature
,
1299 EXPRESS_IOCTL_SIGNATURE
,
1300 EXPRESS_IOCTL_SIGNATURE_SIZE
) != 0) {
1301 esas2r_log(ESAS2R_LOG_WARN
, "invalid signature");
1307 /* assume success */
1309 ioctl
->header
.return_code
= IOCTL_SUCCESS
;
1313 * handle EXPRESS_IOCTL_GET_CHANNELS
1314 * without paying attention to channel
1317 if (cmd
== EXPRESS_IOCTL_GET_CHANNELS
) {
1320 ioctl
->data
.chanlist
.num_channels
= 0;
1322 while (i
< MAX_ADAPTERS
) {
1323 if (esas2r_adapters
[i
]) {
1324 ioctl
->data
.chanlist
.num_channels
++;
1325 ioctl
->data
.chanlist
.channel
[k
] = i
;
1334 /* get the channel */
1336 if (ioctl
->header
.channel
== 0xFF) {
1337 a
= (struct esas2r_adapter
*)hostdata
;
1339 if (ioctl
->header
.channel
>= MAX_ADAPTERS
||
1340 esas2r_adapters
[ioctl
->header
.channel
] == NULL
) {
1341 ioctl
->header
.return_code
= IOCTL_BAD_CHANNEL
;
1342 esas2r_log(ESAS2R_LOG_WARN
, "bad channel value");
1347 a
= esas2r_adapters
[ioctl
->header
.channel
];
1351 case EXPRESS_IOCTL_RW_FIRMWARE
:
1353 if (ioctl
->data
.fwrw
.img_type
== FW_IMG_FM_API
) {
1354 err
= esas2r_write_fw(a
,
1355 (char *)ioctl
->data
.fwrw
.image
,
1358 atto_express_ioctl
));
1361 err
= esas2r_read_fw(a
,
1362 (char *)ioctl
->data
.fwrw
.
1366 atto_express_ioctl
));
1368 } else if (ioctl
->data
.fwrw
.img_type
== FW_IMG_FS_API
) {
1369 err
= esas2r_write_fs(a
,
1370 (char *)ioctl
->data
.fwrw
.image
,
1373 atto_express_ioctl
));
1376 err
= esas2r_read_fs(a
,
1377 (char *)ioctl
->data
.fwrw
.
1381 atto_express_ioctl
));
1384 ioctl
->header
.return_code
= IOCTL_BAD_FLASH_IMGTYPE
;
1389 case EXPRESS_IOCTL_READ_PARAMS
:
1391 memcpy(ioctl
->data
.prw
.data_buffer
, a
->nvram
,
1392 sizeof(struct esas2r_sas_nvram
));
1393 ioctl
->data
.prw
.code
= 1;
1396 case EXPRESS_IOCTL_WRITE_PARAMS
:
1398 rq
= esas2r_alloc_request(a
);
1401 esas2r_log(ESAS2R_LOG_WARN
,
1402 "could not allocate an internal request");
1406 code
= esas2r_write_params(a
, rq
,
1407 (struct esas2r_sas_nvram
*)ioctl
->data
.prw
.data_buffer
);
1408 ioctl
->data
.prw
.code
= code
;
1410 esas2r_free_request(a
, rq
);
1414 case EXPRESS_IOCTL_DEFAULT_PARAMS
:
1416 esas2r_nvram_get_defaults(a
,
1417 (struct esas2r_sas_nvram
*)ioctl
->data
.prw
.data_buffer
);
1418 ioctl
->data
.prw
.code
= 1;
1421 case EXPRESS_IOCTL_CHAN_INFO
:
1423 ioctl
->data
.chaninfo
.major_rev
= ESAS2R_MAJOR_REV
;
1424 ioctl
->data
.chaninfo
.minor_rev
= ESAS2R_MINOR_REV
;
1425 ioctl
->data
.chaninfo
.IRQ
= a
->pcid
->irq
;
1426 ioctl
->data
.chaninfo
.device_id
= a
->pcid
->device
;
1427 ioctl
->data
.chaninfo
.vendor_id
= a
->pcid
->vendor
;
1428 ioctl
->data
.chaninfo
.ven_dev_id
= a
->pcid
->subsystem_device
;
1429 ioctl
->data
.chaninfo
.revision_id
= a
->pcid
->revision
;
1430 ioctl
->data
.chaninfo
.pci_bus
= a
->pcid
->bus
->number
;
1431 ioctl
->data
.chaninfo
.pci_dev_func
= a
->pcid
->devfn
;
1432 ioctl
->data
.chaninfo
.core_rev
= 0;
1433 ioctl
->data
.chaninfo
.host_no
= a
->host
->host_no
;
1434 ioctl
->data
.chaninfo
.hbaapi_rev
= 0;
1437 case EXPRESS_IOCTL_SMP
:
1438 ioctl
->header
.return_code
= handle_smp_ioctl(a
,
1444 ioctl
->header
.return_code
=
1445 handle_csmi_ioctl(a
, &ioctl
->data
.csmi
);
1448 case EXPRESS_IOCTL_HBA
:
1449 ioctl
->header
.return_code
= handle_hba_ioctl(a
,
1454 case EXPRESS_IOCTL_VDA
:
1455 err
= esas2r_write_vda(a
,
1456 (char *)&ioctl
->data
.ioctl_vda
,
1458 sizeof(struct atto_ioctl_vda
) +
1459 ioctl
->data
.ioctl_vda
.data_length
);
1462 err
= esas2r_read_vda(a
,
1463 (char *)&ioctl
->data
.ioctl_vda
,
1465 sizeof(struct atto_ioctl_vda
) +
1466 ioctl
->data
.ioctl_vda
.data_length
);
1474 case EXPRESS_IOCTL_GET_MOD_INFO
:
1476 ioctl
->data
.modinfo
.adapter
= a
;
1477 ioctl
->data
.modinfo
.pci_dev
= a
->pcid
;
1478 ioctl
->data
.modinfo
.scsi_host
= a
->host
;
1479 ioctl
->data
.modinfo
.host_no
= a
->host
->host_no
;
1484 esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd
);
1485 ioctl
->header
.return_code
= IOCTL_ERR_INVCMD
;
1491 esas2r_log(ESAS2R_LOG_WARN
, "err %d on ioctl cmd %u", err
,
1497 ioctl
->header
.return_code
= IOCTL_OUT_OF_RESOURCES
;
1502 ioctl
->header
.return_code
= IOCTL_INVALID_PARAM
;
1506 ioctl
->header
.return_code
= IOCTL_GENERAL_ERROR
;
1512 /* Always copy the buffer back, if only to pick up the status */
1513 err
= __copy_to_user(arg
, ioctl
, sizeof(struct atto_express_ioctl
));
1515 esas2r_log(ESAS2R_LOG_WARN
,
1516 "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)",
1528 int esas2r_ioctl(struct scsi_device
*sd
, unsigned int cmd
, void __user
*arg
)
1530 return esas2r_ioctl_handler(sd
->host
->hostdata
, cmd
, arg
);
1533 static void free_fw_buffers(struct esas2r_adapter
*a
)
1535 if (a
->firmware
.data
) {
1536 dma_free_coherent(&a
->pcid
->dev
,
1537 (size_t)a
->firmware
.orig_len
,
1539 (dma_addr_t
)a
->firmware
.phys
);
1541 a
->firmware
.data
= NULL
;
1545 static int allocate_fw_buffers(struct esas2r_adapter
*a
, u32 length
)
1549 a
->firmware
.orig_len
= length
;
1551 a
->firmware
.data
= (u8
*)dma_alloc_coherent(&a
->pcid
->dev
,
1553 (dma_addr_t
*)&a
->firmware
.
1557 if (!a
->firmware
.data
) {
1558 esas2r_debug("buffer alloc failed!");
1565 /* Handle a call to read firmware. */
1566 int esas2r_read_fw(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1568 esas2r_trace_enter();
1569 /* if the cached header is a status, simply copy it over and return. */
1570 if (a
->firmware
.state
== FW_STATUS_ST
) {
1571 int size
= min_t(int, count
, sizeof(a
->firmware
.header
));
1572 esas2r_trace_exit();
1573 memcpy(buf
, &a
->firmware
.header
, size
);
1574 esas2r_debug("esas2r_read_fw: STATUS size %d", size
);
1579 * if the cached header is a command, do it if at
1580 * offset 0, otherwise copy the pieces.
1583 if (a
->firmware
.state
== FW_COMMAND_ST
) {
1584 u32 length
= a
->firmware
.header
.length
;
1585 esas2r_trace_exit();
1587 esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
1592 if (a
->firmware
.header
.action
== FI_ACT_UP
) {
1593 if (!allocate_fw_buffers(a
, length
))
1597 /* copy header over */
1599 memcpy(a
->firmware
.data
,
1600 &a
->firmware
.header
,
1601 sizeof(a
->firmware
.header
));
1604 (struct esas2r_flash_img
*)a
->firmware
.data
);
1605 } else if (a
->firmware
.header
.action
== FI_ACT_UPSZ
) {
1608 (int)sizeof(a
->firmware
.header
));
1609 do_fm_api(a
, &a
->firmware
.header
);
1610 memcpy(buf
, &a
->firmware
.header
, size
);
1611 esas2r_debug("FI_ACT_UPSZ size %d", size
);
1614 esas2r_debug("invalid action %d",
1615 a
->firmware
.header
.action
);
1620 if (count
+ off
> length
)
1621 count
= length
- off
;
1626 if (!a
->firmware
.data
) {
1628 "read: nonzero offset but no buffer available!");
1632 esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off
,
1636 memcpy(buf
, &a
->firmware
.data
[off
], count
);
1638 /* when done, release the buffer */
1640 if (length
<= off
+ count
) {
1641 esas2r_debug("esas2r_read_fw: freeing buffer!");
1649 esas2r_trace_exit();
1650 esas2r_debug("esas2r_read_fw: invalid firmware state %d",
1656 /* Handle a call to write firmware. */
1657 int esas2r_write_fw(struct esas2r_adapter
*a
, const char *buf
, long off
,
1663 struct esas2r_flash_img
*header
=
1664 (struct esas2r_flash_img
*)buf
;
1666 /* assume version 0 flash image */
1668 int min_size
= sizeof(struct esas2r_flash_img_v0
);
1670 a
->firmware
.state
= FW_INVALID_ST
;
1672 /* validate the version field first */
1675 || header
->fi_version
> FI_VERSION_1
) {
1677 "esas2r_write_fw: short header or invalid version");
1681 /* See if its a version 1 flash image */
1683 if (header
->fi_version
== FI_VERSION_1
)
1684 min_size
= sizeof(struct esas2r_flash_img
);
1686 /* If this is the start, the header must be full and valid. */
1687 if (count
< min_size
) {
1688 esas2r_debug("esas2r_write_fw: short header, aborting");
1692 /* Make sure the size is reasonable. */
1693 length
= header
->length
;
1695 if (length
> 1024 * 1024) {
1697 "esas2r_write_fw: hosed, length %d fi_version %d",
1698 length
, header
->fi_version
);
1703 * If this is a write command, allocate memory because
1704 * we have to cache everything. otherwise, just cache
1705 * the header, because the read op will do the command.
1708 if (header
->action
== FI_ACT_DOWN
) {
1709 if (!allocate_fw_buffers(a
, length
))
1713 * Store the command, so there is context on subsequent
1716 memcpy(&a
->firmware
.header
,
1719 } else if (header
->action
== FI_ACT_UP
1720 || header
->action
== FI_ACT_UPSZ
) {
1721 /* Save the command, result will be picked up on read */
1722 memcpy(&a
->firmware
.header
,
1726 a
->firmware
.state
= FW_COMMAND_ST
;
1729 "esas2r_write_fw: COMMAND, count %d, action %d ",
1730 count
, header
->action
);
1733 * Pretend we took the whole buffer,
1734 * so we don't get bothered again.
1739 esas2r_debug("esas2r_write_fw: invalid action %d ",
1740 a
->firmware
.header
.action
);
1744 length
= a
->firmware
.header
.length
;
1748 * We only get here on a download command, regardless of offset.
1749 * the chunks written by the system need to be cached, and when
1750 * the final one arrives, issue the fmapi command.
1753 if (off
+ count
> length
)
1754 count
= length
- off
;
1757 esas2r_debug("esas2r_write_fw: off %d count %d length %d", off
,
1762 * On a full upload, the system tries sending the whole buffer.
1763 * there's nothing to do with it, so just drop it here, before
1764 * trying to copy over into unallocated memory!
1766 if (a
->firmware
.header
.action
== FI_ACT_UP
)
1769 if (!a
->firmware
.data
) {
1771 "write: nonzero offset but no buffer available!");
1775 memcpy(&a
->firmware
.data
[off
], buf
, count
);
1777 if (length
== off
+ count
) {
1779 (struct esas2r_flash_img
*)a
->firmware
.data
);
1782 * Now copy the header result to be picked up by the
1785 memcpy(&a
->firmware
.header
,
1787 sizeof(a
->firmware
.header
));
1789 a
->firmware
.state
= FW_STATUS_ST
;
1791 esas2r_debug("write completed");
1794 * Since the system has the data buffered, the only way
1795 * this can leak is if a root user writes a program
1796 * that writes a shorter buffer than it claims, and the
1806 /* Callback for the completion of a VDA request. */
1807 static void vda_complete_req(struct esas2r_adapter
*a
,
1808 struct esas2r_request
*rq
)
1810 a
->vda_command_done
= 1;
1811 wake_up_interruptible(&a
->vda_waiter
);
1814 /* Scatter/gather callback for VDA requests */
1815 static u32
get_physaddr_vda(struct esas2r_sg_context
*sgc
, u64
*addr
)
1817 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
1818 int offset
= (u8
*)sgc
->cur_offset
- (u8
*)a
->vda_buffer
;
1820 (*addr
) = a
->ppvda_buffer
+ offset
;
1821 return VDA_MAX_BUFFER_SIZE
- offset
;
1824 /* Handle a call to read a VDA command. */
1825 int esas2r_read_vda(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1831 struct esas2r_request
*rq
;
1832 struct atto_ioctl_vda
*vi
=
1833 (struct atto_ioctl_vda
*)a
->vda_buffer
;
1834 struct esas2r_sg_context sgc
;
1835 bool wait_for_completion
;
1838 * Presumeably, someone has already written to the vda_buffer,
1839 * and now they are reading the node the response, so now we
1840 * will actually issue the request to the chip and reply.
1843 /* allocate a request */
1844 rq
= esas2r_alloc_request(a
);
1846 esas2r_debug("esas2r_read_vda: out of requests");
1850 rq
->comp_cb
= vda_complete_req
;
1854 sgc
.cur_offset
= a
->vda_buffer
+ VDA_BUFFER_HEADER_SZ
;
1855 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_vda
;
1857 a
->vda_command_done
= 0;
1859 wait_for_completion
=
1860 esas2r_process_vda_ioctl(a
, vi
, rq
, &sgc
);
1862 if (wait_for_completion
) {
1863 /* now wait around for it to complete. */
1865 while (!a
->vda_command_done
)
1866 wait_event_interruptible(a
->vda_waiter
,
1867 a
->vda_command_done
);
1870 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
1873 if (off
> VDA_MAX_BUFFER_SIZE
)
1876 if (count
+ off
> VDA_MAX_BUFFER_SIZE
)
1877 count
= VDA_MAX_BUFFER_SIZE
- off
;
1882 memcpy(buf
, a
->vda_buffer
+ off
, count
);
1887 /* Handle a call to write a VDA command. */
1888 int esas2r_write_vda(struct esas2r_adapter
*a
, const char *buf
, long off
,
1892 * allocate memory for it, if not already done. once allocated,
1893 * we will keep it around until the driver is unloaded.
1896 if (!a
->vda_buffer
) {
1897 dma_addr_t dma_addr
;
1898 a
->vda_buffer
= (u8
*)dma_alloc_coherent(&a
->pcid
->dev
,
1900 VDA_MAX_BUFFER_SIZE
,
1904 a
->ppvda_buffer
= dma_addr
;
1910 if (off
> VDA_MAX_BUFFER_SIZE
)
1913 if (count
+ off
> VDA_MAX_BUFFER_SIZE
)
1914 count
= VDA_MAX_BUFFER_SIZE
- off
;
1919 memcpy(a
->vda_buffer
+ off
, buf
, count
);
1924 /* Callback for the completion of an FS_API request.*/
1925 static void fs_api_complete_req(struct esas2r_adapter
*a
,
1926 struct esas2r_request
*rq
)
1928 a
->fs_api_command_done
= 1;
1930 wake_up_interruptible(&a
->fs_api_waiter
);
1933 /* Scatter/gather callback for VDA requests */
1934 static u32
get_physaddr_fs_api(struct esas2r_sg_context
*sgc
, u64
*addr
)
1936 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
1937 struct esas2r_ioctl_fs
*fs
=
1938 (struct esas2r_ioctl_fs
*)a
->fs_api_buffer
;
1939 u32 offset
= (u8
*)sgc
->cur_offset
- (u8
*)fs
;
1941 (*addr
) = a
->ppfs_api_buffer
+ offset
;
1943 return a
->fs_api_buffer_size
- offset
;
1946 /* Handle a call to read firmware via FS_API. */
1947 int esas2r_read_fs(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1949 if (!a
->fs_api_buffer
)
1953 struct esas2r_request
*rq
;
1954 struct esas2r_sg_context sgc
;
1955 struct esas2r_ioctl_fs
*fs
=
1956 (struct esas2r_ioctl_fs
*)a
->fs_api_buffer
;
1958 /* If another flash request is already in progress, return. */
1959 if (mutex_lock_interruptible(&a
->fs_api_mutex
)) {
1961 fs
->status
= ATTO_STS_OUT_OF_RSRC
;
1966 * Presumeably, someone has already written to the
1967 * fs_api_buffer, and now they are reading the node the
1968 * response, so now we will actually issue the request to the
1969 * chip and reply. Allocate a request
1972 rq
= esas2r_alloc_request(a
);
1974 esas2r_debug("esas2r_read_fs: out of requests");
1975 mutex_unlock(&a
->fs_api_mutex
);
1979 rq
->comp_cb
= fs_api_complete_req
;
1981 /* Set up the SGCONTEXT for to build the s/g table */
1983 sgc
.cur_offset
= fs
->data
;
1984 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_fs_api
;
1986 a
->fs_api_command_done
= 0;
1988 if (!esas2r_process_fs_ioctl(a
, fs
, rq
, &sgc
)) {
1989 if (fs
->status
== ATTO_STS_OUT_OF_RSRC
)
1995 /* Now wait around for it to complete. */
1997 while (!a
->fs_api_command_done
)
1998 wait_event_interruptible(a
->fs_api_waiter
,
1999 a
->fs_api_command_done
);
2002 /* Free the request and keep going */
2003 mutex_unlock(&a
->fs_api_mutex
);
2004 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
2006 /* Pick up possible error code from above */
2011 if (off
> a
->fs_api_buffer_size
)
2014 if (count
+ off
> a
->fs_api_buffer_size
)
2015 count
= a
->fs_api_buffer_size
- off
;
2020 memcpy(buf
, a
->fs_api_buffer
+ off
, count
);
2025 /* Handle a call to write firmware via FS_API. */
2026 int esas2r_write_fs(struct esas2r_adapter
*a
, const char *buf
, long off
,
2030 struct esas2r_ioctl_fs
*fs
= (struct esas2r_ioctl_fs
*)buf
;
2031 u32 length
= fs
->command
.length
+ offsetof(
2032 struct esas2r_ioctl_fs
,
2036 * Special case, for BEGIN commands, the length field
2037 * is lying to us, so just get enough for the header.
2040 if (fs
->command
.command
== ESAS2R_FS_CMD_BEGINW
)
2041 length
= offsetof(struct esas2r_ioctl_fs
, data
);
2044 * Beginning a command. We assume we'll get at least
2045 * enough in the first write so we can look at the
2046 * header and see how much we need to alloc.
2049 if (count
< offsetof(struct esas2r_ioctl_fs
, data
))
2052 /* Allocate a buffer or use the existing buffer. */
2053 if (a
->fs_api_buffer
) {
2054 if (a
->fs_api_buffer_size
< length
) {
2055 /* Free too-small buffer and get a new one */
2056 dma_free_coherent(&a
->pcid
->dev
,
2057 (size_t)a
->fs_api_buffer_size
,
2059 (dma_addr_t
)a
->ppfs_api_buffer
);
2061 goto re_allocate_buffer
;
2065 a
->fs_api_buffer_size
= length
;
2067 a
->fs_api_buffer
= (u8
*)dma_alloc_coherent(
2069 (size_t)a
->fs_api_buffer_size
,
2070 (dma_addr_t
*)&a
->ppfs_api_buffer
,
2075 if (!a
->fs_api_buffer
)
2078 if (off
> a
->fs_api_buffer_size
)
2081 if (count
+ off
> a
->fs_api_buffer_size
)
2082 count
= a
->fs_api_buffer_size
- off
;
2087 memcpy(a
->fs_api_buffer
+ off
, buf
, count
);