2 * linux/drivers/scsi/esas2r/esas2r_ioctl.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
47 * Buffered ioctl handlers. A buffered ioctl is one which requires that we
48 * allocate a DMA-able memory area to communicate with the firmware. In
49 * order to prevent continually allocating and freeing consistent memory,
50 * we will allocate a global buffer the first time we need it and re-use
51 * it for subsequent ioctl calls that require it.
54 u8
*esas2r_buffered_ioctl
;
55 dma_addr_t esas2r_buffered_ioctl_addr
;
56 u32 esas2r_buffered_ioctl_size
;
57 struct pci_dev
*esas2r_buffered_ioctl_pcid
;
59 static DEFINE_SEMAPHORE(buffered_ioctl_semaphore
);
60 typedef int (*BUFFERED_IOCTL_CALLBACK
)(struct esas2r_adapter
*,
61 struct esas2r_request
*,
62 struct esas2r_sg_context
*,
64 typedef void (*BUFFERED_IOCTL_DONE_CALLBACK
)(struct esas2r_adapter
*,
65 struct esas2r_request
*, void *);
67 struct esas2r_buffered_ioctl
{
68 struct esas2r_adapter
*a
;
73 BUFFERED_IOCTL_CALLBACK
76 BUFFERED_IOCTL_DONE_CALLBACK
82 static void complete_fm_api_req(struct esas2r_adapter
*a
,
83 struct esas2r_request
*rq
)
85 a
->fm_api_command_done
= 1;
86 wake_up_interruptible(&a
->fm_api_waiter
);
89 /* Callbacks for building scatter/gather lists for FM API requests */
90 static u32
get_physaddr_fm_api(struct esas2r_sg_context
*sgc
, u64
*addr
)
92 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
93 int offset
= sgc
->cur_offset
- a
->save_offset
;
95 (*addr
) = a
->firmware
.phys
+ offset
;
96 return a
->firmware
.orig_len
- offset
;
99 static u32
get_physaddr_fm_api_header(struct esas2r_sg_context
*sgc
, u64
*addr
)
101 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
102 int offset
= sgc
->cur_offset
- a
->save_offset
;
104 (*addr
) = a
->firmware
.header_buff_phys
+ offset
;
105 return sizeof(struct esas2r_flash_img
) - offset
;
108 /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
109 static void do_fm_api(struct esas2r_adapter
*a
, struct esas2r_flash_img
*fi
)
111 struct esas2r_request
*rq
;
113 if (mutex_lock_interruptible(&a
->fm_api_mutex
)) {
114 fi
->status
= FI_STAT_BUSY
;
118 rq
= esas2r_alloc_request(a
);
120 fi
->status
= FI_STAT_BUSY
;
124 if (fi
== &a
->firmware
.header
) {
125 a
->firmware
.header_buff
= dma_alloc_coherent(&a
->pcid
->dev
,
134 if (a
->firmware
.header_buff
== NULL
) {
135 esas2r_debug("failed to allocate header buffer!");
136 fi
->status
= FI_STAT_BUSY
;
140 memcpy(a
->firmware
.header_buff
, fi
,
141 sizeof(struct esas2r_flash_img
));
142 a
->save_offset
= a
->firmware
.header_buff
;
143 a
->fm_api_sgc
.get_phys_addr
=
144 (PGETPHYSADDR
)get_physaddr_fm_api_header
;
146 a
->save_offset
= (u8
*)fi
;
147 a
->fm_api_sgc
.get_phys_addr
=
148 (PGETPHYSADDR
)get_physaddr_fm_api
;
151 rq
->comp_cb
= complete_fm_api_req
;
152 a
->fm_api_command_done
= 0;
153 a
->fm_api_sgc
.cur_offset
= a
->save_offset
;
155 if (!esas2r_fm_api(a
, (struct esas2r_flash_img
*)a
->save_offset
, rq
,
159 /* Now wait around for it to complete. */
160 while (!a
->fm_api_command_done
)
161 wait_event_interruptible(a
->fm_api_waiter
,
162 a
->fm_api_command_done
);
164 if (fi
== &a
->firmware
.header
) {
165 memcpy(fi
, a
->firmware
.header_buff
,
166 sizeof(struct esas2r_flash_img
));
168 dma_free_coherent(&a
->pcid
->dev
,
169 (size_t)sizeof(struct esas2r_flash_img
),
170 a
->firmware
.header_buff
,
171 (dma_addr_t
)a
->firmware
.header_buff_phys
);
174 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
176 mutex_unlock(&a
->fm_api_mutex
);
181 static void complete_nvr_req(struct esas2r_adapter
*a
,
182 struct esas2r_request
*rq
)
184 a
->nvram_command_done
= 1;
185 wake_up_interruptible(&a
->nvram_waiter
);
188 /* Callback for building scatter/gather lists for buffered ioctls */
189 static u32
get_physaddr_buffered_ioctl(struct esas2r_sg_context
*sgc
,
192 int offset
= (u8
*)sgc
->cur_offset
- esas2r_buffered_ioctl
;
194 (*addr
) = esas2r_buffered_ioctl_addr
+ offset
;
195 return esas2r_buffered_ioctl_size
- offset
;
198 static void complete_buffered_ioctl_req(struct esas2r_adapter
*a
,
199 struct esas2r_request
*rq
)
201 a
->buffered_ioctl_done
= 1;
202 wake_up_interruptible(&a
->buffered_ioctl_waiter
);
205 static u8
handle_buffered_ioctl(struct esas2r_buffered_ioctl
*bi
)
207 struct esas2r_adapter
*a
= bi
->a
;
208 struct esas2r_request
*rq
;
209 struct esas2r_sg_context sgc
;
210 u8 result
= IOCTL_SUCCESS
;
212 if (down_interruptible(&buffered_ioctl_semaphore
))
213 return IOCTL_OUT_OF_RESOURCES
;
215 /* allocate a buffer or use the existing buffer. */
216 if (esas2r_buffered_ioctl
) {
217 if (esas2r_buffered_ioctl_size
< bi
->length
) {
218 /* free the too-small buffer and get a new one */
219 dma_free_coherent(&a
->pcid
->dev
,
220 (size_t)esas2r_buffered_ioctl_size
,
221 esas2r_buffered_ioctl
,
222 esas2r_buffered_ioctl_addr
);
224 goto allocate_buffer
;
228 esas2r_buffered_ioctl_size
= bi
->length
;
229 esas2r_buffered_ioctl_pcid
= a
->pcid
;
230 esas2r_buffered_ioctl
= dma_alloc_coherent(&a
->pcid
->dev
,
232 esas2r_buffered_ioctl_size
,
234 esas2r_buffered_ioctl_addr
,
238 if (!esas2r_buffered_ioctl
) {
239 esas2r_log(ESAS2R_LOG_CRIT
,
240 "could not allocate %d bytes of consistent memory "
241 "for a buffered ioctl!",
244 esas2r_debug("buffered ioctl alloc failure");
245 result
= IOCTL_OUT_OF_RESOURCES
;
249 memcpy(esas2r_buffered_ioctl
, bi
->ioctl
, bi
->length
);
251 rq
= esas2r_alloc_request(a
);
253 esas2r_log(ESAS2R_LOG_CRIT
,
254 "could not allocate an internal request");
256 result
= IOCTL_OUT_OF_RESOURCES
;
257 esas2r_debug("buffered ioctl - no requests");
261 a
->buffered_ioctl_done
= 0;
262 rq
->comp_cb
= complete_buffered_ioctl_req
;
263 sgc
.cur_offset
= esas2r_buffered_ioctl
+ bi
->offset
;
264 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_buffered_ioctl
;
265 sgc
.length
= esas2r_buffered_ioctl_size
;
267 if (!(*bi
->callback
)(a
, rq
, &sgc
, bi
->context
)) {
268 /* completed immediately, no need to wait */
269 a
->buffered_ioctl_done
= 0;
270 goto free_andexit_cleanly
;
273 /* now wait around for it to complete. */
274 while (!a
->buffered_ioctl_done
)
275 wait_event_interruptible(a
->buffered_ioctl_waiter
,
276 a
->buffered_ioctl_done
);
278 free_andexit_cleanly
:
279 if (result
== IOCTL_SUCCESS
&& bi
->done_callback
)
280 (*bi
->done_callback
)(a
, rq
, bi
->done_context
);
282 esas2r_free_request(a
, rq
);
285 if (result
== IOCTL_SUCCESS
)
286 memcpy(bi
->ioctl
, esas2r_buffered_ioctl
, bi
->length
);
288 up(&buffered_ioctl_semaphore
);
292 /* SMP ioctl support */
293 static int smp_ioctl_callback(struct esas2r_adapter
*a
,
294 struct esas2r_request
*rq
,
295 struct esas2r_sg_context
*sgc
, void *context
)
297 struct atto_ioctl_smp
*si
=
298 (struct atto_ioctl_smp
*)esas2r_buffered_ioctl
;
300 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
301 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_SMP
);
303 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
304 si
->status
= ATTO_STS_OUT_OF_RSRC
;
308 esas2r_start_request(a
, rq
);
312 static u8
handle_smp_ioctl(struct esas2r_adapter
*a
, struct atto_ioctl_smp
*si
)
314 struct esas2r_buffered_ioctl bi
;
316 memset(&bi
, 0, sizeof(bi
));
320 bi
.length
= sizeof(struct atto_ioctl_smp
)
321 + le32_to_cpu(si
->req_length
)
322 + le32_to_cpu(si
->rsp_length
);
324 bi
.callback
= smp_ioctl_callback
;
325 return handle_buffered_ioctl(&bi
);
329 /* CSMI ioctl support */
330 static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter
*a
,
331 struct esas2r_request
*rq
)
333 rq
->target_id
= le16_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.target_id
);
334 rq
->vrq
->scsi
.flags
|= cpu_to_le32(rq
->func_rsp
.ioctl_rsp
.csmi
.lun
);
336 /* Now call the original completion callback. */
337 (*rq
->aux_req_cb
)(a
, rq
);
340 /* Tunnel a CSMI IOCTL to the back end driver for processing. */
341 static bool csmi_ioctl_tunnel(struct esas2r_adapter
*a
,
342 union atto_ioctl_csmi
*ci
,
343 struct esas2r_request
*rq
,
344 struct esas2r_sg_context
*sgc
,
348 struct atto_vda_ioctl_req
*ioctl
= &rq
->vrq
->ioctl
;
350 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
353 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
354 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_CSMI
);
355 ioctl
->csmi
.ctrl_code
= cpu_to_le32(ctrl_code
);
356 ioctl
->csmi
.target_id
= cpu_to_le16(target_id
);
357 ioctl
->csmi
.lun
= (u8
)le32_to_cpu(rq
->vrq
->scsi
.flags
);
360 * Always usurp the completion callback since the interrupt callback
361 * mechanism may be used.
364 rq
->aux_req_cb
= rq
->comp_cb
;
365 rq
->comp_cb
= esas2r_csmi_ioctl_tunnel_comp_cb
;
367 if (!esas2r_build_sg_list(a
, rq
, sgc
))
370 esas2r_start_request(a
, rq
);
374 static bool check_lun(struct scsi_lun lun
)
378 result
= ((lun
.scsi_lun
[7] == 0) &&
379 (lun
.scsi_lun
[6] == 0) &&
380 (lun
.scsi_lun
[5] == 0) &&
381 (lun
.scsi_lun
[4] == 0) &&
382 (lun
.scsi_lun
[3] == 0) &&
383 (lun
.scsi_lun
[2] == 0) &&
384 /* Byte 1 is intentionally skipped */
385 (lun
.scsi_lun
[0] == 0));
390 static int csmi_ioctl_callback(struct esas2r_adapter
*a
,
391 struct esas2r_request
*rq
,
392 struct esas2r_sg_context
*sgc
, void *context
)
394 struct atto_csmi
*ci
= (struct atto_csmi
*)context
;
395 union atto_ioctl_csmi
*ioctl_csmi
=
396 (union atto_ioctl_csmi
*)esas2r_buffered_ioctl
;
400 u32 sts
= CSMI_STS_SUCCESS
;
401 struct esas2r_target
*t
;
404 if (ci
->control_code
== CSMI_CC_GET_DEV_ADDR
) {
405 struct atto_csmi_get_dev_addr
*gda
= &ci
->data
.dev_addr
;
408 tid
= gda
->target_id
;
410 } else if (ci
->control_code
== CSMI_CC_TASK_MGT
) {
411 struct atto_csmi_task_mgmt
*tm
= &ci
->data
.tsk_mgt
;
419 rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
= cpu_to_le32(
425 rq
->vrq
->scsi
.flags
|= cpu_to_le32(lun
);
427 switch (ci
->control_code
) {
428 case CSMI_CC_GET_DRVR_INFO
:
430 struct atto_csmi_get_driver_info
*gdi
= &ioctl_csmi
->drvr_info
;
432 strcpy(gdi
->description
, esas2r_get_model_name(a
));
433 gdi
->csmi_major_rev
= CSMI_MAJOR_REV
;
434 gdi
->csmi_minor_rev
= CSMI_MINOR_REV
;
438 case CSMI_CC_GET_CNTLR_CFG
:
440 struct atto_csmi_get_cntlr_cfg
*gcc
= &ioctl_csmi
->cntlr_cfg
;
442 gcc
->base_io_addr
= 0;
443 pci_read_config_dword(a
->pcid
, PCI_BASE_ADDRESS_2
,
444 &gcc
->base_memaddr_lo
);
445 pci_read_config_dword(a
->pcid
, PCI_BASE_ADDRESS_3
,
446 &gcc
->base_memaddr_hi
);
447 gcc
->board_id
= MAKEDWORD(a
->pcid
->subsystem_device
,
448 a
->pcid
->subsystem_vendor
);
449 gcc
->slot_num
= CSMI_SLOT_NUM_UNKNOWN
;
450 gcc
->cntlr_class
= CSMI_CNTLR_CLASS_HBA
;
451 gcc
->io_bus_type
= CSMI_BUS_TYPE_PCI
;
452 gcc
->pci_addr
.bus_num
= a
->pcid
->bus
->number
;
453 gcc
->pci_addr
.device_num
= PCI_SLOT(a
->pcid
->devfn
);
454 gcc
->pci_addr
.function_num
= PCI_FUNC(a
->pcid
->devfn
);
456 memset(gcc
->serial_num
, 0, sizeof(gcc
->serial_num
));
458 gcc
->major_rev
= LOBYTE(LOWORD(a
->fw_version
));
459 gcc
->minor_rev
= HIBYTE(LOWORD(a
->fw_version
));
460 gcc
->build_rev
= LOBYTE(HIWORD(a
->fw_version
));
461 gcc
->release_rev
= HIBYTE(HIWORD(a
->fw_version
));
462 gcc
->bios_major_rev
= HIBYTE(HIWORD(a
->flash_ver
));
463 gcc
->bios_minor_rev
= LOBYTE(HIWORD(a
->flash_ver
));
464 gcc
->bios_build_rev
= LOWORD(a
->flash_ver
);
466 if (test_bit(AF2_THUNDERLINK
, &a
->flags2
))
467 gcc
->cntlr_flags
= CSMI_CNTLRF_SAS_HBA
468 | CSMI_CNTLRF_SATA_HBA
;
470 gcc
->cntlr_flags
= CSMI_CNTLRF_SAS_RAID
471 | CSMI_CNTLRF_SATA_RAID
;
473 gcc
->rrom_major_rev
= 0;
474 gcc
->rrom_minor_rev
= 0;
475 gcc
->rrom_build_rev
= 0;
476 gcc
->rrom_release_rev
= 0;
477 gcc
->rrom_biosmajor_rev
= 0;
478 gcc
->rrom_biosminor_rev
= 0;
479 gcc
->rrom_biosbuild_rev
= 0;
480 gcc
->rrom_biosrelease_rev
= 0;
484 case CSMI_CC_GET_CNTLR_STS
:
486 struct atto_csmi_get_cntlr_sts
*gcs
= &ioctl_csmi
->cntlr_sts
;
488 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
489 gcs
->status
= CSMI_CNTLR_STS_FAILED
;
491 gcs
->status
= CSMI_CNTLR_STS_GOOD
;
493 gcs
->offline_reason
= CSMI_OFFLINE_NO_REASON
;
497 case CSMI_CC_FW_DOWNLOAD
:
498 case CSMI_CC_GET_RAID_INFO
:
499 case CSMI_CC_GET_RAID_CFG
:
501 sts
= CSMI_STS_BAD_CTRL_CODE
;
504 case CSMI_CC_SMP_PASSTHRU
:
505 case CSMI_CC_SSP_PASSTHRU
:
506 case CSMI_CC_STP_PASSTHRU
:
507 case CSMI_CC_GET_PHY_INFO
:
508 case CSMI_CC_SET_PHY_INFO
:
509 case CSMI_CC_GET_LINK_ERRORS
:
510 case CSMI_CC_GET_SATA_SIG
:
511 case CSMI_CC_GET_CONN_INFO
:
512 case CSMI_CC_PHY_CTRL
:
514 if (!csmi_ioctl_tunnel(a
, ioctl_csmi
, rq
, sgc
,
516 ESAS2R_TARG_ID_INV
)) {
517 sts
= CSMI_STS_FAILED
;
523 case CSMI_CC_GET_SCSI_ADDR
:
525 struct atto_csmi_get_scsi_addr
*gsa
= &ioctl_csmi
->scsi_addr
;
529 memcpy(&lun
, gsa
->sas_lun
, sizeof(struct scsi_lun
));
531 if (!check_lun(lun
)) {
532 sts
= CSMI_STS_NO_SCSI_ADDR
;
536 /* make sure the device is present */
537 spin_lock_irqsave(&a
->mem_lock
, flags
);
538 t
= esas2r_targ_db_find_by_sas_addr(a
, (u64
*)gsa
->sas_addr
);
539 spin_unlock_irqrestore(&a
->mem_lock
, flags
);
542 sts
= CSMI_STS_NO_SCSI_ADDR
;
546 gsa
->host_index
= 0xFF;
547 gsa
->lun
= gsa
->sas_lun
[1];
548 rq
->target_id
= esas2r_targ_get_id(t
, a
);
552 case CSMI_CC_GET_DEV_ADDR
:
554 struct atto_csmi_get_dev_addr
*gda
= &ioctl_csmi
->dev_addr
;
556 /* make sure the target is present */
557 t
= a
->targetdb
+ rq
->target_id
;
559 if (t
>= a
->targetdb_end
560 || t
->target_state
!= TS_PRESENT
561 || t
->sas_addr
== 0) {
562 sts
= CSMI_STS_NO_DEV_ADDR
;
566 /* fill in the result */
567 *(u64
*)gda
->sas_addr
= t
->sas_addr
;
568 memset(gda
->sas_lun
, 0, sizeof(gda
->sas_lun
));
569 gda
->sas_lun
[1] = (u8
)le32_to_cpu(rq
->vrq
->scsi
.flags
);
573 case CSMI_CC_TASK_MGT
:
575 /* make sure the target is present */
576 t
= a
->targetdb
+ rq
->target_id
;
578 if (t
>= a
->targetdb_end
579 || t
->target_state
!= TS_PRESENT
580 || !(t
->flags
& TF_PASS_THRU
)) {
581 sts
= CSMI_STS_NO_DEV_ADDR
;
585 if (!csmi_ioctl_tunnel(a
, ioctl_csmi
, rq
, sgc
,
588 sts
= CSMI_STS_FAILED
;
596 sts
= CSMI_STS_BAD_CTRL_CODE
;
600 rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
= cpu_to_le32(sts
);
606 static void csmi_ioctl_done_callback(struct esas2r_adapter
*a
,
607 struct esas2r_request
*rq
, void *context
)
609 struct atto_csmi
*ci
= (struct atto_csmi
*)context
;
610 union atto_ioctl_csmi
*ioctl_csmi
=
611 (union atto_ioctl_csmi
*)esas2r_buffered_ioctl
;
613 switch (ci
->control_code
) {
614 case CSMI_CC_GET_DRVR_INFO
:
616 struct atto_csmi_get_driver_info
*gdi
=
617 &ioctl_csmi
->drvr_info
;
619 strcpy(gdi
->name
, ESAS2R_VERSION_STR
);
621 gdi
->major_rev
= ESAS2R_MAJOR_REV
;
622 gdi
->minor_rev
= ESAS2R_MINOR_REV
;
624 gdi
->release_rev
= 0;
628 case CSMI_CC_GET_SCSI_ADDR
:
630 struct atto_csmi_get_scsi_addr
*gsa
= &ioctl_csmi
->scsi_addr
;
632 if (le32_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
) ==
634 gsa
->target_id
= rq
->target_id
;
642 ci
->status
= le32_to_cpu(rq
->func_rsp
.ioctl_rsp
.csmi
.csmi_status
);
646 static u8
handle_csmi_ioctl(struct esas2r_adapter
*a
, struct atto_csmi
*ci
)
648 struct esas2r_buffered_ioctl bi
;
650 memset(&bi
, 0, sizeof(bi
));
653 bi
.ioctl
= &ci
->data
;
654 bi
.length
= sizeof(union atto_ioctl_csmi
);
656 bi
.callback
= csmi_ioctl_callback
;
658 bi
.done_callback
= csmi_ioctl_done_callback
;
659 bi
.done_context
= ci
;
661 return handle_buffered_ioctl(&bi
);
664 /* ATTO HBA ioctl support */
666 /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
667 static bool hba_ioctl_tunnel(struct esas2r_adapter
*a
,
668 struct atto_ioctl
*hi
,
669 struct esas2r_request
*rq
,
670 struct esas2r_sg_context
*sgc
)
672 esas2r_sgc_init(sgc
, a
, rq
, rq
->vrq
->ioctl
.sge
);
674 esas2r_build_ioctl_req(a
, rq
, sgc
->length
, VDA_IOCTL_HBA
);
676 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
677 hi
->status
= ATTO_STS_OUT_OF_RSRC
;
682 esas2r_start_request(a
, rq
);
687 static void scsi_passthru_comp_cb(struct esas2r_adapter
*a
,
688 struct esas2r_request
*rq
)
690 struct atto_ioctl
*hi
= (struct atto_ioctl
*)rq
->aux_req_cx
;
691 struct atto_hba_scsi_pass_thru
*spt
= &hi
->data
.scsi_pass_thru
;
692 u8 sts
= ATTO_SPT_RS_FAILED
;
694 spt
->scsi_status
= rq
->func_rsp
.scsi_rsp
.scsi_stat
;
695 spt
->sense_length
= rq
->sense_len
;
696 spt
->residual_length
=
697 le32_to_cpu(rq
->func_rsp
.scsi_rsp
.residual_length
);
699 switch (rq
->req_stat
) {
702 sts
= ATTO_SPT_RS_SUCCESS
;
705 sts
= ATTO_SPT_RS_UNDERRUN
;
708 sts
= ATTO_SPT_RS_OVERRUN
;
712 sts
= ATTO_SPT_RS_NO_DEVICE
;
715 sts
= ATTO_SPT_RS_NO_LUN
;
718 sts
= ATTO_SPT_RS_TIMEOUT
;
721 sts
= ATTO_SPT_RS_DEGRADED
;
724 sts
= ATTO_SPT_RS_BUSY
;
727 sts
= ATTO_SPT_RS_ABORTED
;
730 sts
= ATTO_SPT_RS_BUS_RESET
;
734 spt
->req_status
= sts
;
736 /* Update the target ID to the next one present. */
738 esas2r_targ_db_find_next_present(a
, (u16
)spt
->target_id
);
740 /* Done, call the completion callback. */
741 (*rq
->aux_req_cb
)(a
, rq
);
744 static int hba_ioctl_callback(struct esas2r_adapter
*a
,
745 struct esas2r_request
*rq
,
746 struct esas2r_sg_context
*sgc
,
749 struct atto_ioctl
*hi
= (struct atto_ioctl
*)esas2r_buffered_ioctl
;
751 hi
->status
= ATTO_STS_SUCCESS
;
753 switch (hi
->function
) {
754 case ATTO_FUNC_GET_ADAP_INFO
:
756 u8
*class_code
= (u8
*)&a
->pcid
->class;
758 struct atto_hba_get_adapter_info
*gai
=
759 &hi
->data
.get_adap_info
;
762 if (hi
->flags
& HBAF_TUNNEL
) {
763 hi
->status
= ATTO_STS_UNSUPPORTED
;
767 if (hi
->version
> ATTO_VER_GET_ADAP_INFO0
) {
768 hi
->status
= ATTO_STS_INV_VERSION
;
769 hi
->version
= ATTO_VER_GET_ADAP_INFO0
;
773 memset(gai
, 0, sizeof(*gai
));
775 gai
->pci
.vendor_id
= a
->pcid
->vendor
;
776 gai
->pci
.device_id
= a
->pcid
->device
;
777 gai
->pci
.ss_vendor_id
= a
->pcid
->subsystem_vendor
;
778 gai
->pci
.ss_device_id
= a
->pcid
->subsystem_device
;
779 gai
->pci
.class_code
[0] = class_code
[0];
780 gai
->pci
.class_code
[1] = class_code
[1];
781 gai
->pci
.class_code
[2] = class_code
[2];
782 gai
->pci
.rev_id
= a
->pcid
->revision
;
783 gai
->pci
.bus_num
= a
->pcid
->bus
->number
;
784 gai
->pci
.dev_num
= PCI_SLOT(a
->pcid
->devfn
);
785 gai
->pci
.func_num
= PCI_FUNC(a
->pcid
->devfn
);
787 pcie_cap_reg
= pci_find_capability(a
->pcid
, PCI_CAP_ID_EXP
);
792 pci_read_config_word(a
->pcid
,
793 pcie_cap_reg
+ PCI_EXP_LNKSTA
,
795 pci_read_config_dword(a
->pcid
,
796 pcie_cap_reg
+ PCI_EXP_LNKCAP
,
799 gai
->pci
.link_speed_curr
=
800 (u8
)(stat
& PCI_EXP_LNKSTA_CLS
);
801 gai
->pci
.link_speed_max
=
802 (u8
)(caps
& PCI_EXP_LNKCAP_SLS
);
803 gai
->pci
.link_width_curr
=
804 (u8
)((stat
& PCI_EXP_LNKSTA_NLW
)
805 >> PCI_EXP_LNKSTA_NLW_SHIFT
);
806 gai
->pci
.link_width_max
=
807 (u8
)((caps
& PCI_EXP_LNKCAP_MLW
)
811 gai
->pci
.msi_vector_cnt
= 1;
813 if (a
->pcid
->msix_enabled
)
814 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_MSIX
;
815 else if (a
->pcid
->msi_enabled
)
816 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_MSI
;
818 gai
->pci
.interrupt_mode
= ATTO_GAI_PCIIM_LEGACY
;
820 gai
->adap_type
= ATTO_GAI_AT_ESASRAID2
;
822 if (test_bit(AF2_THUNDERLINK
, &a
->flags2
))
823 gai
->adap_type
= ATTO_GAI_AT_TLSASHBA
;
825 if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
826 gai
->adap_flags
|= ATTO_GAI_AF_DEGRADED
;
828 gai
->adap_flags
|= ATTO_GAI_AF_SPT_SUPP
|
829 ATTO_GAI_AF_DEVADDR_SUPP
;
831 if (a
->pcid
->subsystem_device
== ATTO_ESAS_R60F
832 || a
->pcid
->subsystem_device
== ATTO_ESAS_R608
833 || a
->pcid
->subsystem_device
== ATTO_ESAS_R644
834 || a
->pcid
->subsystem_device
== ATTO_TSSC_3808E
)
835 gai
->adap_flags
|= ATTO_GAI_AF_VIRT_SES
;
837 gai
->num_ports
= ESAS2R_NUM_PHYS
;
838 gai
->num_phys
= ESAS2R_NUM_PHYS
;
840 strcpy(gai
->firmware_rev
, a
->fw_rev
);
841 strcpy(gai
->flash_rev
, a
->flash_rev
);
842 strcpy(gai
->model_name_short
, esas2r_get_model_name_short(a
));
843 strcpy(gai
->model_name
, esas2r_get_model_name(a
));
845 gai
->num_targets
= ESAS2R_MAX_TARGETS
;
848 gai
->num_targsper_bus
= gai
->num_targets
;
849 gai
->num_lunsper_targ
= 256;
851 if (a
->pcid
->subsystem_device
== ATTO_ESAS_R6F0
852 || a
->pcid
->subsystem_device
== ATTO_ESAS_R60F
)
853 gai
->num_connectors
= 4;
855 gai
->num_connectors
= 2;
857 gai
->adap_flags2
|= ATTO_GAI_AF2_ADAP_CTRL_SUPP
;
859 gai
->num_targets_backend
= a
->num_targets_backend
;
861 gai
->tunnel_flags
= a
->ioctl_tunnel
862 & (ATTO_GAI_TF_MEM_RW
864 | ATTO_GAI_TF_SCSI_PASS_THRU
865 | ATTO_GAI_TF_GET_DEV_ADDR
866 | ATTO_GAI_TF_PHY_CTRL
867 | ATTO_GAI_TF_CONN_CTRL
868 | ATTO_GAI_TF_GET_DEV_INFO
);
872 case ATTO_FUNC_GET_ADAP_ADDR
:
874 struct atto_hba_get_adapter_address
*gaa
=
875 &hi
->data
.get_adap_addr
;
877 if (hi
->flags
& HBAF_TUNNEL
) {
878 hi
->status
= ATTO_STS_UNSUPPORTED
;
882 if (hi
->version
> ATTO_VER_GET_ADAP_ADDR0
) {
883 hi
->status
= ATTO_STS_INV_VERSION
;
884 hi
->version
= ATTO_VER_GET_ADAP_ADDR0
;
885 } else if (gaa
->addr_type
== ATTO_GAA_AT_PORT
886 || gaa
->addr_type
== ATTO_GAA_AT_NODE
) {
887 if (gaa
->addr_type
== ATTO_GAA_AT_PORT
888 && gaa
->port_id
>= ESAS2R_NUM_PHYS
) {
889 hi
->status
= ATTO_STS_NOT_APPL
;
891 memcpy((u64
*)gaa
->address
,
892 &a
->nvram
->sas_addr
[0], sizeof(u64
));
893 gaa
->addr_len
= sizeof(u64
);
896 hi
->status
= ATTO_STS_INV_PARAM
;
902 case ATTO_FUNC_MEM_RW
:
904 if (hi
->flags
& HBAF_TUNNEL
) {
905 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
911 hi
->status
= ATTO_STS_UNSUPPORTED
;
916 case ATTO_FUNC_TRACE
:
918 struct atto_hba_trace
*trc
= &hi
->data
.trace
;
920 if (hi
->flags
& HBAF_TUNNEL
) {
921 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
927 if (hi
->version
> ATTO_VER_TRACE1
) {
928 hi
->status
= ATTO_STS_INV_VERSION
;
929 hi
->version
= ATTO_VER_TRACE1
;
933 if (trc
->trace_type
== ATTO_TRC_TT_FWCOREDUMP
934 && hi
->version
>= ATTO_VER_TRACE1
) {
935 if (trc
->trace_func
== ATTO_TRC_TF_UPLOAD
) {
936 u32 len
= hi
->data_length
;
937 u32 offset
= trc
->current_offset
;
938 u32 total_len
= ESAS2R_FWCOREDUMP_SZ
;
940 /* Size is zero if a core dump isn't present */
941 if (!test_bit(AF2_COREDUMP_SAVED
, &a
->flags2
))
947 if (offset
>= total_len
948 || offset
+ len
> total_len
950 hi
->status
= ATTO_STS_INV_PARAM
;
955 a
->fw_coredump_buff
+ offset
,
958 hi
->data_length
= len
;
959 } else if (trc
->trace_func
== ATTO_TRC_TF_RESET
) {
960 memset(a
->fw_coredump_buff
, 0,
961 ESAS2R_FWCOREDUMP_SZ
);
963 clear_bit(AF2_COREDUMP_SAVED
, &a
->flags2
);
964 } else if (trc
->trace_func
!= ATTO_TRC_TF_GET_INFO
) {
965 hi
->status
= ATTO_STS_UNSUPPORTED
;
969 /* Always return all the info we can. */
971 trc
->current_offset
= 0;
972 trc
->total_length
= ESAS2R_FWCOREDUMP_SZ
;
974 /* Return zero length buffer if core dump not present */
975 if (!test_bit(AF2_COREDUMP_SAVED
, &a
->flags2
))
976 trc
->total_length
= 0;
978 hi
->status
= ATTO_STS_UNSUPPORTED
;
984 case ATTO_FUNC_SCSI_PASS_THRU
:
986 struct atto_hba_scsi_pass_thru
*spt
= &hi
->data
.scsi_pass_thru
;
989 memcpy(&lun
, spt
->lun
, sizeof(struct scsi_lun
));
991 if (hi
->flags
& HBAF_TUNNEL
) {
992 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
998 if (hi
->version
> ATTO_VER_SCSI_PASS_THRU0
) {
999 hi
->status
= ATTO_STS_INV_VERSION
;
1000 hi
->version
= ATTO_VER_SCSI_PASS_THRU0
;
1004 if (spt
->target_id
>= ESAS2R_MAX_TARGETS
|| !check_lun(lun
)) {
1005 hi
->status
= ATTO_STS_INV_PARAM
;
1009 esas2r_sgc_init(sgc
, a
, rq
, NULL
);
1011 sgc
->length
= hi
->data_length
;
1012 sgc
->cur_offset
+= offsetof(struct atto_ioctl
, data
.byte
)
1013 + sizeof(struct atto_hba_scsi_pass_thru
);
1015 /* Finish request initialization */
1016 rq
->target_id
= (u16
)spt
->target_id
;
1017 rq
->vrq
->scsi
.flags
|= cpu_to_le32(spt
->lun
[1]);
1018 memcpy(rq
->vrq
->scsi
.cdb
, spt
->cdb
, 16);
1019 rq
->vrq
->scsi
.length
= cpu_to_le32(hi
->data_length
);
1020 rq
->sense_len
= spt
->sense_length
;
1021 rq
->sense_buf
= (u8
*)spt
->sense_data
;
1022 /* NOTE: we ignore spt->timeout */
1025 * always usurp the completion callback since the interrupt
1026 * callback mechanism may be used.
1029 rq
->aux_req_cx
= hi
;
1030 rq
->aux_req_cb
= rq
->comp_cb
;
1031 rq
->comp_cb
= scsi_passthru_comp_cb
;
1033 if (spt
->flags
& ATTO_SPTF_DATA_IN
) {
1034 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_RDD
);
1035 } else if (spt
->flags
& ATTO_SPTF_DATA_OUT
) {
1036 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_WRD
);
1039 hi
->status
= ATTO_STS_INV_PARAM
;
1044 if (spt
->flags
& ATTO_SPTF_ORDERED_Q
)
1045 rq
->vrq
->scsi
.flags
|=
1046 cpu_to_le32(FCP_CMND_TA_ORDRD_Q
);
1047 else if (spt
->flags
& ATTO_SPTF_HEAD_OF_Q
)
1048 rq
->vrq
->scsi
.flags
|= cpu_to_le32(FCP_CMND_TA_HEAD_Q
);
1051 if (!esas2r_build_sg_list(a
, rq
, sgc
)) {
1052 hi
->status
= ATTO_STS_OUT_OF_RSRC
;
1056 esas2r_start_request(a
, rq
);
1061 case ATTO_FUNC_GET_DEV_ADDR
:
1063 struct atto_hba_get_device_address
*gda
=
1064 &hi
->data
.get_dev_addr
;
1065 struct esas2r_target
*t
;
1067 if (hi
->flags
& HBAF_TUNNEL
) {
1068 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1074 if (hi
->version
> ATTO_VER_GET_DEV_ADDR0
) {
1075 hi
->status
= ATTO_STS_INV_VERSION
;
1076 hi
->version
= ATTO_VER_GET_DEV_ADDR0
;
1080 if (gda
->target_id
>= ESAS2R_MAX_TARGETS
) {
1081 hi
->status
= ATTO_STS_INV_PARAM
;
1085 t
= a
->targetdb
+ (u16
)gda
->target_id
;
1087 if (t
->target_state
!= TS_PRESENT
) {
1088 hi
->status
= ATTO_STS_FAILED
;
1089 } else if (gda
->addr_type
== ATTO_GDA_AT_PORT
) {
1090 if (t
->sas_addr
== 0) {
1091 hi
->status
= ATTO_STS_UNSUPPORTED
;
1093 *(u64
*)gda
->address
= t
->sas_addr
;
1095 gda
->addr_len
= sizeof(u64
);
1097 } else if (gda
->addr_type
== ATTO_GDA_AT_NODE
) {
1098 hi
->status
= ATTO_STS_NOT_APPL
;
1100 hi
->status
= ATTO_STS_INV_PARAM
;
1103 /* update the target ID to the next one present. */
1106 esas2r_targ_db_find_next_present(a
,
1107 (u16
)gda
->target_id
);
1111 case ATTO_FUNC_PHY_CTRL
:
1112 case ATTO_FUNC_CONN_CTRL
:
1114 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1120 case ATTO_FUNC_ADAP_CTRL
:
1122 struct atto_hba_adap_ctrl
*ac
= &hi
->data
.adap_ctrl
;
1124 if (hi
->flags
& HBAF_TUNNEL
) {
1125 hi
->status
= ATTO_STS_UNSUPPORTED
;
1129 if (hi
->version
> ATTO_VER_ADAP_CTRL0
) {
1130 hi
->status
= ATTO_STS_INV_VERSION
;
1131 hi
->version
= ATTO_VER_ADAP_CTRL0
;
1135 if (ac
->adap_func
== ATTO_AC_AF_HARD_RST
) {
1136 esas2r_reset_adapter(a
);
1137 } else if (ac
->adap_func
!= ATTO_AC_AF_GET_STATE
) {
1138 hi
->status
= ATTO_STS_UNSUPPORTED
;
1142 if (test_bit(AF_CHPRST_NEEDED
, &a
->flags
))
1143 ac
->adap_state
= ATTO_AC_AS_RST_SCHED
;
1144 else if (test_bit(AF_CHPRST_PENDING
, &a
->flags
))
1145 ac
->adap_state
= ATTO_AC_AS_RST_IN_PROG
;
1146 else if (test_bit(AF_DISC_PENDING
, &a
->flags
))
1147 ac
->adap_state
= ATTO_AC_AS_RST_DISC
;
1148 else if (test_bit(AF_DISABLED
, &a
->flags
))
1149 ac
->adap_state
= ATTO_AC_AS_DISABLED
;
1150 else if (test_bit(AF_DEGRADED_MODE
, &a
->flags
))
1151 ac
->adap_state
= ATTO_AC_AS_DEGRADED
;
1153 ac
->adap_state
= ATTO_AC_AS_OK
;
1158 case ATTO_FUNC_GET_DEV_INFO
:
1160 struct atto_hba_get_device_info
*gdi
= &hi
->data
.get_dev_info
;
1161 struct esas2r_target
*t
;
1163 if (hi
->flags
& HBAF_TUNNEL
) {
1164 if (hba_ioctl_tunnel(a
, hi
, rq
, sgc
))
1170 if (hi
->version
> ATTO_VER_GET_DEV_INFO0
) {
1171 hi
->status
= ATTO_STS_INV_VERSION
;
1172 hi
->version
= ATTO_VER_GET_DEV_INFO0
;
1176 if (gdi
->target_id
>= ESAS2R_MAX_TARGETS
) {
1177 hi
->status
= ATTO_STS_INV_PARAM
;
1181 t
= a
->targetdb
+ (u16
)gdi
->target_id
;
1183 /* update the target ID to the next one present. */
1186 esas2r_targ_db_find_next_present(a
,
1187 (u16
)gdi
->target_id
);
1189 if (t
->target_state
!= TS_PRESENT
) {
1190 hi
->status
= ATTO_STS_FAILED
;
1194 hi
->status
= ATTO_STS_UNSUPPORTED
;
1200 hi
->status
= ATTO_STS_INV_FUNC
;
1207 static void hba_ioctl_done_callback(struct esas2r_adapter
*a
,
1208 struct esas2r_request
*rq
, void *context
)
1210 struct atto_ioctl
*ioctl_hba
=
1211 (struct atto_ioctl
*)esas2r_buffered_ioctl
;
1213 esas2r_debug("hba_ioctl_done_callback %d", a
->index
);
1215 if (ioctl_hba
->function
== ATTO_FUNC_GET_ADAP_INFO
) {
1216 struct atto_hba_get_adapter_info
*gai
=
1217 &ioctl_hba
->data
.get_adap_info
;
1219 esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
1221 gai
->drvr_rev_major
= ESAS2R_MAJOR_REV
;
1222 gai
->drvr_rev_minor
= ESAS2R_MINOR_REV
;
1224 strcpy(gai
->drvr_rev_ascii
, ESAS2R_VERSION_STR
);
1225 strcpy(gai
->drvr_name
, ESAS2R_DRVR_NAME
);
1227 gai
->num_busses
= 1;
1228 gai
->num_targsper_bus
= ESAS2R_MAX_ID
+ 1;
1229 gai
->num_lunsper_targ
= 1;
1233 u8
handle_hba_ioctl(struct esas2r_adapter
*a
,
1234 struct atto_ioctl
*ioctl_hba
)
1236 struct esas2r_buffered_ioctl bi
;
1238 memset(&bi
, 0, sizeof(bi
));
1241 bi
.ioctl
= ioctl_hba
;
1242 bi
.length
= sizeof(struct atto_ioctl
) + ioctl_hba
->data_length
;
1243 bi
.callback
= hba_ioctl_callback
;
1245 bi
.done_callback
= hba_ioctl_done_callback
;
1246 bi
.done_context
= NULL
;
1249 return handle_buffered_ioctl(&bi
);
1253 int esas2r_write_params(struct esas2r_adapter
*a
, struct esas2r_request
*rq
,
1254 struct esas2r_sas_nvram
*data
)
1258 a
->nvram_command_done
= 0;
1259 rq
->comp_cb
= complete_nvr_req
;
1261 if (esas2r_nvram_write(a
, rq
, data
)) {
1262 /* now wait around for it to complete. */
1263 while (!a
->nvram_command_done
)
1264 wait_event_interruptible(a
->nvram_waiter
,
1265 a
->nvram_command_done
);
1268 /* done, check the status. */
1269 if (rq
->req_stat
== RS_SUCCESS
)
1276 /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
1277 int esas2r_ioctl_handler(void *hostdata
, int cmd
, void __user
*arg
)
1279 struct atto_express_ioctl
*ioctl
= NULL
;
1280 struct esas2r_adapter
*a
;
1281 struct esas2r_request
*rq
;
1285 esas2r_log(ESAS2R_LOG_DEBG
, "ioctl (%p, %x, %p)", hostdata
, cmd
, arg
);
1288 || (cmd
< EXPRESS_IOCTL_MIN
)
1289 || (cmd
> EXPRESS_IOCTL_MAX
))
1292 ioctl
= memdup_user(arg
, sizeof(struct atto_express_ioctl
));
1293 if (IS_ERR(ioctl
)) {
1294 esas2r_log(ESAS2R_LOG_WARN
,
1295 "ioctl_handler access_ok failed for cmd %d, "
1298 return PTR_ERR(ioctl
);
1301 /* verify the signature */
1303 if (memcmp(ioctl
->header
.signature
,
1304 EXPRESS_IOCTL_SIGNATURE
,
1305 EXPRESS_IOCTL_SIGNATURE_SIZE
) != 0) {
1306 esas2r_log(ESAS2R_LOG_WARN
, "invalid signature");
1312 /* assume success */
1314 ioctl
->header
.return_code
= IOCTL_SUCCESS
;
1318 * handle EXPRESS_IOCTL_GET_CHANNELS
1319 * without paying attention to channel
1322 if (cmd
== EXPRESS_IOCTL_GET_CHANNELS
) {
1325 ioctl
->data
.chanlist
.num_channels
= 0;
1327 while (i
< MAX_ADAPTERS
) {
1328 if (esas2r_adapters
[i
]) {
1329 ioctl
->data
.chanlist
.num_channels
++;
1330 ioctl
->data
.chanlist
.channel
[k
] = i
;
1339 /* get the channel */
1341 if (ioctl
->header
.channel
== 0xFF) {
1342 a
= (struct esas2r_adapter
*)hostdata
;
1344 if (ioctl
->header
.channel
>= MAX_ADAPTERS
||
1345 esas2r_adapters
[ioctl
->header
.channel
] == NULL
) {
1346 ioctl
->header
.return_code
= IOCTL_BAD_CHANNEL
;
1347 esas2r_log(ESAS2R_LOG_WARN
, "bad channel value");
1352 a
= esas2r_adapters
[ioctl
->header
.channel
];
1356 case EXPRESS_IOCTL_RW_FIRMWARE
:
1358 if (ioctl
->data
.fwrw
.img_type
== FW_IMG_FM_API
) {
1359 err
= esas2r_write_fw(a
,
1360 (char *)ioctl
->data
.fwrw
.image
,
1363 atto_express_ioctl
));
1366 err
= esas2r_read_fw(a
,
1367 (char *)ioctl
->data
.fwrw
.
1371 atto_express_ioctl
));
1373 } else if (ioctl
->data
.fwrw
.img_type
== FW_IMG_FS_API
) {
1374 err
= esas2r_write_fs(a
,
1375 (char *)ioctl
->data
.fwrw
.image
,
1378 atto_express_ioctl
));
1381 err
= esas2r_read_fs(a
,
1382 (char *)ioctl
->data
.fwrw
.
1386 atto_express_ioctl
));
1389 ioctl
->header
.return_code
= IOCTL_BAD_FLASH_IMGTYPE
;
1394 case EXPRESS_IOCTL_READ_PARAMS
:
1396 memcpy(ioctl
->data
.prw
.data_buffer
, a
->nvram
,
1397 sizeof(struct esas2r_sas_nvram
));
1398 ioctl
->data
.prw
.code
= 1;
1401 case EXPRESS_IOCTL_WRITE_PARAMS
:
1403 rq
= esas2r_alloc_request(a
);
1406 esas2r_log(ESAS2R_LOG_WARN
,
1407 "could not allocate an internal request");
1411 code
= esas2r_write_params(a
, rq
,
1412 (struct esas2r_sas_nvram
*)ioctl
->data
.prw
.data_buffer
);
1413 ioctl
->data
.prw
.code
= code
;
1415 esas2r_free_request(a
, rq
);
1419 case EXPRESS_IOCTL_DEFAULT_PARAMS
:
1421 esas2r_nvram_get_defaults(a
,
1422 (struct esas2r_sas_nvram
*)ioctl
->data
.prw
.data_buffer
);
1423 ioctl
->data
.prw
.code
= 1;
1426 case EXPRESS_IOCTL_CHAN_INFO
:
1428 ioctl
->data
.chaninfo
.major_rev
= ESAS2R_MAJOR_REV
;
1429 ioctl
->data
.chaninfo
.minor_rev
= ESAS2R_MINOR_REV
;
1430 ioctl
->data
.chaninfo
.IRQ
= a
->pcid
->irq
;
1431 ioctl
->data
.chaninfo
.device_id
= a
->pcid
->device
;
1432 ioctl
->data
.chaninfo
.vendor_id
= a
->pcid
->vendor
;
1433 ioctl
->data
.chaninfo
.ven_dev_id
= a
->pcid
->subsystem_device
;
1434 ioctl
->data
.chaninfo
.revision_id
= a
->pcid
->revision
;
1435 ioctl
->data
.chaninfo
.pci_bus
= a
->pcid
->bus
->number
;
1436 ioctl
->data
.chaninfo
.pci_dev_func
= a
->pcid
->devfn
;
1437 ioctl
->data
.chaninfo
.core_rev
= 0;
1438 ioctl
->data
.chaninfo
.host_no
= a
->host
->host_no
;
1439 ioctl
->data
.chaninfo
.hbaapi_rev
= 0;
1442 case EXPRESS_IOCTL_SMP
:
1443 ioctl
->header
.return_code
= handle_smp_ioctl(a
,
1449 ioctl
->header
.return_code
=
1450 handle_csmi_ioctl(a
, &ioctl
->data
.csmi
);
1453 case EXPRESS_IOCTL_HBA
:
1454 ioctl
->header
.return_code
= handle_hba_ioctl(a
,
1459 case EXPRESS_IOCTL_VDA
:
1460 err
= esas2r_write_vda(a
,
1461 (char *)&ioctl
->data
.ioctl_vda
,
1463 sizeof(struct atto_ioctl_vda
) +
1464 ioctl
->data
.ioctl_vda
.data_length
);
1467 err
= esas2r_read_vda(a
,
1468 (char *)&ioctl
->data
.ioctl_vda
,
1470 sizeof(struct atto_ioctl_vda
) +
1471 ioctl
->data
.ioctl_vda
.data_length
);
1479 case EXPRESS_IOCTL_GET_MOD_INFO
:
1481 ioctl
->data
.modinfo
.adapter
= a
;
1482 ioctl
->data
.modinfo
.pci_dev
= a
->pcid
;
1483 ioctl
->data
.modinfo
.scsi_host
= a
->host
;
1484 ioctl
->data
.modinfo
.host_no
= a
->host
->host_no
;
1489 esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd
);
1490 ioctl
->header
.return_code
= IOCTL_ERR_INVCMD
;
1496 esas2r_log(ESAS2R_LOG_WARN
, "err %d on ioctl cmd %d", err
,
1502 ioctl
->header
.return_code
= IOCTL_OUT_OF_RESOURCES
;
1507 ioctl
->header
.return_code
= IOCTL_INVALID_PARAM
;
1511 ioctl
->header
.return_code
= IOCTL_GENERAL_ERROR
;
1517 /* Always copy the buffer back, if only to pick up the status */
1518 err
= __copy_to_user(arg
, ioctl
, sizeof(struct atto_express_ioctl
));
1520 esas2r_log(ESAS2R_LOG_WARN
,
1521 "ioctl_handler copy_to_user didn't copy "
1522 "everything (err %d, cmd %d)", err
,
1534 int esas2r_ioctl(struct scsi_device
*sd
, int cmd
, void __user
*arg
)
1536 return esas2r_ioctl_handler(sd
->host
->hostdata
, cmd
, arg
);
1539 static void free_fw_buffers(struct esas2r_adapter
*a
)
1541 if (a
->firmware
.data
) {
1542 dma_free_coherent(&a
->pcid
->dev
,
1543 (size_t)a
->firmware
.orig_len
,
1545 (dma_addr_t
)a
->firmware
.phys
);
1547 a
->firmware
.data
= NULL
;
1551 static int allocate_fw_buffers(struct esas2r_adapter
*a
, u32 length
)
1555 a
->firmware
.orig_len
= length
;
1557 a
->firmware
.data
= (u8
*)dma_alloc_coherent(&a
->pcid
->dev
,
1559 (dma_addr_t
*)&a
->firmware
.
1563 if (!a
->firmware
.data
) {
1564 esas2r_debug("buffer alloc failed!");
1571 /* Handle a call to read firmware. */
1572 int esas2r_read_fw(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1574 esas2r_trace_enter();
1575 /* if the cached header is a status, simply copy it over and return. */
1576 if (a
->firmware
.state
== FW_STATUS_ST
) {
1577 int size
= min_t(int, count
, sizeof(a
->firmware
.header
));
1578 esas2r_trace_exit();
1579 memcpy(buf
, &a
->firmware
.header
, size
);
1580 esas2r_debug("esas2r_read_fw: STATUS size %d", size
);
1585 * if the cached header is a command, do it if at
1586 * offset 0, otherwise copy the pieces.
1589 if (a
->firmware
.state
== FW_COMMAND_ST
) {
1590 u32 length
= a
->firmware
.header
.length
;
1591 esas2r_trace_exit();
1593 esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
1598 if (a
->firmware
.header
.action
== FI_ACT_UP
) {
1599 if (!allocate_fw_buffers(a
, length
))
1603 /* copy header over */
1605 memcpy(a
->firmware
.data
,
1606 &a
->firmware
.header
,
1607 sizeof(a
->firmware
.header
));
1610 (struct esas2r_flash_img
*)a
->firmware
.data
);
1611 } else if (a
->firmware
.header
.action
== FI_ACT_UPSZ
) {
1614 (int)sizeof(a
->firmware
.header
));
1615 do_fm_api(a
, &a
->firmware
.header
);
1616 memcpy(buf
, &a
->firmware
.header
, size
);
1617 esas2r_debug("FI_ACT_UPSZ size %d", size
);
1620 esas2r_debug("invalid action %d",
1621 a
->firmware
.header
.action
);
1626 if (count
+ off
> length
)
1627 count
= length
- off
;
1632 if (!a
->firmware
.data
) {
1634 "read: nonzero offset but no buffer available!");
1638 esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off
,
1642 memcpy(buf
, &a
->firmware
.data
[off
], count
);
1644 /* when done, release the buffer */
1646 if (length
<= off
+ count
) {
1647 esas2r_debug("esas2r_read_fw: freeing buffer!");
1655 esas2r_trace_exit();
1656 esas2r_debug("esas2r_read_fw: invalid firmware state %d",
1662 /* Handle a call to write firmware. */
1663 int esas2r_write_fw(struct esas2r_adapter
*a
, const char *buf
, long off
,
1669 struct esas2r_flash_img
*header
=
1670 (struct esas2r_flash_img
*)buf
;
1672 /* assume version 0 flash image */
1674 int min_size
= sizeof(struct esas2r_flash_img_v0
);
1676 a
->firmware
.state
= FW_INVALID_ST
;
1678 /* validate the version field first */
1681 || header
->fi_version
> FI_VERSION_1
) {
1683 "esas2r_write_fw: short header or invalid version");
1687 /* See if its a version 1 flash image */
1689 if (header
->fi_version
== FI_VERSION_1
)
1690 min_size
= sizeof(struct esas2r_flash_img
);
1692 /* If this is the start, the header must be full and valid. */
1693 if (count
< min_size
) {
1694 esas2r_debug("esas2r_write_fw: short header, aborting");
1698 /* Make sure the size is reasonable. */
1699 length
= header
->length
;
1701 if (length
> 1024 * 1024) {
1703 "esas2r_write_fw: hosed, length %d fi_version %d",
1704 length
, header
->fi_version
);
1709 * If this is a write command, allocate memory because
1710 * we have to cache everything. otherwise, just cache
1711 * the header, because the read op will do the command.
1714 if (header
->action
== FI_ACT_DOWN
) {
1715 if (!allocate_fw_buffers(a
, length
))
1719 * Store the command, so there is context on subsequent
1722 memcpy(&a
->firmware
.header
,
1725 } else if (header
->action
== FI_ACT_UP
1726 || header
->action
== FI_ACT_UPSZ
) {
1727 /* Save the command, result will be picked up on read */
1728 memcpy(&a
->firmware
.header
,
1732 a
->firmware
.state
= FW_COMMAND_ST
;
1735 "esas2r_write_fw: COMMAND, count %d, action %d ",
1736 count
, header
->action
);
1739 * Pretend we took the whole buffer,
1740 * so we don't get bothered again.
1745 esas2r_debug("esas2r_write_fw: invalid action %d ",
1746 a
->firmware
.header
.action
);
1750 length
= a
->firmware
.header
.length
;
1754 * We only get here on a download command, regardless of offset.
1755 * the chunks written by the system need to be cached, and when
1756 * the final one arrives, issue the fmapi command.
1759 if (off
+ count
> length
)
1760 count
= length
- off
;
1763 esas2r_debug("esas2r_write_fw: off %d count %d length %d", off
,
1768 * On a full upload, the system tries sending the whole buffer.
1769 * there's nothing to do with it, so just drop it here, before
1770 * trying to copy over into unallocated memory!
1772 if (a
->firmware
.header
.action
== FI_ACT_UP
)
1775 if (!a
->firmware
.data
) {
1777 "write: nonzero offset but no buffer available!");
1781 memcpy(&a
->firmware
.data
[off
], buf
, count
);
1783 if (length
== off
+ count
) {
1785 (struct esas2r_flash_img
*)a
->firmware
.data
);
1788 * Now copy the header result to be picked up by the
1791 memcpy(&a
->firmware
.header
,
1793 sizeof(a
->firmware
.header
));
1795 a
->firmware
.state
= FW_STATUS_ST
;
1797 esas2r_debug("write completed");
1800 * Since the system has the data buffered, the only way
1801 * this can leak is if a root user writes a program
1802 * that writes a shorter buffer than it claims, and the
1812 /* Callback for the completion of a VDA request. */
1813 static void vda_complete_req(struct esas2r_adapter
*a
,
1814 struct esas2r_request
*rq
)
1816 a
->vda_command_done
= 1;
1817 wake_up_interruptible(&a
->vda_waiter
);
1820 /* Scatter/gather callback for VDA requests */
1821 static u32
get_physaddr_vda(struct esas2r_sg_context
*sgc
, u64
*addr
)
1823 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
1824 int offset
= (u8
*)sgc
->cur_offset
- (u8
*)a
->vda_buffer
;
1826 (*addr
) = a
->ppvda_buffer
+ offset
;
1827 return VDA_MAX_BUFFER_SIZE
- offset
;
1830 /* Handle a call to read a VDA command. */
1831 int esas2r_read_vda(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1837 struct esas2r_request
*rq
;
1838 struct atto_ioctl_vda
*vi
=
1839 (struct atto_ioctl_vda
*)a
->vda_buffer
;
1840 struct esas2r_sg_context sgc
;
1841 bool wait_for_completion
;
1844 * Presumeably, someone has already written to the vda_buffer,
1845 * and now they are reading the node the response, so now we
1846 * will actually issue the request to the chip and reply.
1849 /* allocate a request */
1850 rq
= esas2r_alloc_request(a
);
1852 esas2r_debug("esas2r_read_vda: out of requestss");
1856 rq
->comp_cb
= vda_complete_req
;
1860 sgc
.cur_offset
= a
->vda_buffer
+ VDA_BUFFER_HEADER_SZ
;
1861 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_vda
;
1863 a
->vda_command_done
= 0;
1865 wait_for_completion
=
1866 esas2r_process_vda_ioctl(a
, vi
, rq
, &sgc
);
1868 if (wait_for_completion
) {
1869 /* now wait around for it to complete. */
1871 while (!a
->vda_command_done
)
1872 wait_event_interruptible(a
->vda_waiter
,
1873 a
->vda_command_done
);
1876 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
1879 if (off
> VDA_MAX_BUFFER_SIZE
)
1882 if (count
+ off
> VDA_MAX_BUFFER_SIZE
)
1883 count
= VDA_MAX_BUFFER_SIZE
- off
;
1888 memcpy(buf
, a
->vda_buffer
+ off
, count
);
1893 /* Handle a call to write a VDA command. */
1894 int esas2r_write_vda(struct esas2r_adapter
*a
, const char *buf
, long off
,
1898 * allocate memory for it, if not already done. once allocated,
1899 * we will keep it around until the driver is unloaded.
1902 if (!a
->vda_buffer
) {
1903 dma_addr_t dma_addr
;
1904 a
->vda_buffer
= (u8
*)dma_alloc_coherent(&a
->pcid
->dev
,
1906 VDA_MAX_BUFFER_SIZE
,
1910 a
->ppvda_buffer
= dma_addr
;
1916 if (off
> VDA_MAX_BUFFER_SIZE
)
1919 if (count
+ off
> VDA_MAX_BUFFER_SIZE
)
1920 count
= VDA_MAX_BUFFER_SIZE
- off
;
1925 memcpy(a
->vda_buffer
+ off
, buf
, count
);
1930 /* Callback for the completion of an FS_API request.*/
1931 static void fs_api_complete_req(struct esas2r_adapter
*a
,
1932 struct esas2r_request
*rq
)
1934 a
->fs_api_command_done
= 1;
1936 wake_up_interruptible(&a
->fs_api_waiter
);
1939 /* Scatter/gather callback for VDA requests */
1940 static u32
get_physaddr_fs_api(struct esas2r_sg_context
*sgc
, u64
*addr
)
1942 struct esas2r_adapter
*a
= (struct esas2r_adapter
*)sgc
->adapter
;
1943 struct esas2r_ioctl_fs
*fs
=
1944 (struct esas2r_ioctl_fs
*)a
->fs_api_buffer
;
1945 u32 offset
= (u8
*)sgc
->cur_offset
- (u8
*)fs
;
1947 (*addr
) = a
->ppfs_api_buffer
+ offset
;
1949 return a
->fs_api_buffer_size
- offset
;
1952 /* Handle a call to read firmware via FS_API. */
1953 int esas2r_read_fs(struct esas2r_adapter
*a
, char *buf
, long off
, int count
)
1955 if (!a
->fs_api_buffer
)
1959 struct esas2r_request
*rq
;
1960 struct esas2r_sg_context sgc
;
1961 struct esas2r_ioctl_fs
*fs
=
1962 (struct esas2r_ioctl_fs
*)a
->fs_api_buffer
;
1964 /* If another flash request is already in progress, return. */
1965 if (mutex_lock_interruptible(&a
->fs_api_mutex
)) {
1967 fs
->status
= ATTO_STS_OUT_OF_RSRC
;
1972 * Presumeably, someone has already written to the
1973 * fs_api_buffer, and now they are reading the node the
1974 * response, so now we will actually issue the request to the
1975 * chip and reply. Allocate a request
1978 rq
= esas2r_alloc_request(a
);
1980 esas2r_debug("esas2r_read_fs: out of requests");
1981 mutex_unlock(&a
->fs_api_mutex
);
1985 rq
->comp_cb
= fs_api_complete_req
;
1987 /* Set up the SGCONTEXT for to build the s/g table */
1989 sgc
.cur_offset
= fs
->data
;
1990 sgc
.get_phys_addr
= (PGETPHYSADDR
)get_physaddr_fs_api
;
1992 a
->fs_api_command_done
= 0;
1994 if (!esas2r_process_fs_ioctl(a
, fs
, rq
, &sgc
)) {
1995 if (fs
->status
== ATTO_STS_OUT_OF_RSRC
)
2001 /* Now wait around for it to complete. */
2003 while (!a
->fs_api_command_done
)
2004 wait_event_interruptible(a
->fs_api_waiter
,
2005 a
->fs_api_command_done
);
2008 /* Free the request and keep going */
2009 mutex_unlock(&a
->fs_api_mutex
);
2010 esas2r_free_request(a
, (struct esas2r_request
*)rq
);
2012 /* Pick up possible error code from above */
2017 if (off
> a
->fs_api_buffer_size
)
2020 if (count
+ off
> a
->fs_api_buffer_size
)
2021 count
= a
->fs_api_buffer_size
- off
;
2026 memcpy(buf
, a
->fs_api_buffer
+ off
, count
);
2031 /* Handle a call to write firmware via FS_API. */
2032 int esas2r_write_fs(struct esas2r_adapter
*a
, const char *buf
, long off
,
2036 struct esas2r_ioctl_fs
*fs
= (struct esas2r_ioctl_fs
*)buf
;
2037 u32 length
= fs
->command
.length
+ offsetof(
2038 struct esas2r_ioctl_fs
,
2042 * Special case, for BEGIN commands, the length field
2043 * is lying to us, so just get enough for the header.
2046 if (fs
->command
.command
== ESAS2R_FS_CMD_BEGINW
)
2047 length
= offsetof(struct esas2r_ioctl_fs
, data
);
2050 * Beginning a command. We assume we'll get at least
2051 * enough in the first write so we can look at the
2052 * header and see how much we need to alloc.
2055 if (count
< offsetof(struct esas2r_ioctl_fs
, data
))
2058 /* Allocate a buffer or use the existing buffer. */
2059 if (a
->fs_api_buffer
) {
2060 if (a
->fs_api_buffer_size
< length
) {
2061 /* Free too-small buffer and get a new one */
2062 dma_free_coherent(&a
->pcid
->dev
,
2063 (size_t)a
->fs_api_buffer_size
,
2065 (dma_addr_t
)a
->ppfs_api_buffer
);
2067 goto re_allocate_buffer
;
2071 a
->fs_api_buffer_size
= length
;
2073 a
->fs_api_buffer
= (u8
*)dma_alloc_coherent(
2075 (size_t)a
->fs_api_buffer_size
,
2076 (dma_addr_t
*)&a
->ppfs_api_buffer
,
2081 if (!a
->fs_api_buffer
)
2084 if (off
> a
->fs_api_buffer_size
)
2087 if (count
+ off
> a
->fs_api_buffer_size
)
2088 count
= a
->fs_api_buffer_size
- off
;
2093 memcpy(a
->fs_api_buffer
+ off
, buf
, count
);