2 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
3 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
4 * Copyright (C) 2001-2002 Klaus Smolin
5 * IBM Storage Technology Division
6 * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
8 * The big the bad and the ugly.
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/hdreg.h>
21 #include <linux/ide.h>
22 #include <linux/nmi.h>
23 #include <linux/scatterlist.h>
24 #include <linux/uaccess.h>
28 void ide_tf_readback(ide_drive_t
*drive
, struct ide_cmd
*cmd
)
30 ide_hwif_t
*hwif
= drive
->hwif
;
31 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
33 /* Be sure we're looking at the low order bytes */
34 tp_ops
->write_devctl(hwif
, ATA_DEVCTL_OBS
);
36 tp_ops
->tf_read(drive
, &cmd
->tf
, cmd
->valid
.in
.tf
);
38 if (cmd
->tf_flags
& IDE_TFLAG_LBA48
) {
39 tp_ops
->write_devctl(hwif
, ATA_HOB
| ATA_DEVCTL_OBS
);
41 tp_ops
->tf_read(drive
, &cmd
->hob
, cmd
->valid
.in
.hob
);
45 void ide_tf_dump(const char *s
, struct ide_cmd
*cmd
)
48 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
49 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
50 s
, cmd
->tf
.feature
, cmd
->tf
.nsect
,
51 cmd
->tf
.lbal
, cmd
->tf
.lbam
, cmd
->tf
.lbah
,
52 cmd
->tf
.device
, cmd
->tf
.command
);
53 printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n",
54 s
, cmd
->hob
.nsect
, cmd
->hob
.lbal
, cmd
->hob
.lbam
, cmd
->hob
.lbah
);
58 int taskfile_lib_get_identify(ide_drive_t
*drive
, u8
*buf
)
62 memset(&cmd
, 0, sizeof(cmd
));
64 if (drive
->media
== ide_disk
)
65 cmd
.tf
.command
= ATA_CMD_ID_ATA
;
67 cmd
.tf
.command
= ATA_CMD_ID_ATAPI
;
68 cmd
.valid
.out
.tf
= IDE_VALID_OUT_TF
| IDE_VALID_DEVICE
;
69 cmd
.valid
.in
.tf
= IDE_VALID_IN_TF
| IDE_VALID_DEVICE
;
70 cmd
.protocol
= ATA_PROT_PIO
;
72 return ide_raw_taskfile(drive
, &cmd
, buf
, 1);
75 static ide_startstop_t
task_no_data_intr(ide_drive_t
*);
76 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*, struct ide_cmd
*);
77 static ide_startstop_t
task_pio_intr(ide_drive_t
*);
79 ide_startstop_t
do_rw_taskfile(ide_drive_t
*drive
, struct ide_cmd
*orig_cmd
)
81 ide_hwif_t
*hwif
= drive
->hwif
;
82 struct ide_cmd
*cmd
= &hwif
->cmd
;
83 struct ide_taskfile
*tf
= &cmd
->tf
;
84 ide_handler_t
*handler
= NULL
;
85 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
86 const struct ide_dma_ops
*dma_ops
= hwif
->dma_ops
;
88 if (orig_cmd
->protocol
== ATA_PROT_PIO
&&
89 (orig_cmd
->tf_flags
& IDE_TFLAG_MULTI_PIO
) &&
90 drive
->mult_count
== 0) {
91 pr_err("%s: multimode not set!\n", drive
->name
);
95 if (orig_cmd
->ftf_flags
& IDE_FTFLAG_FLAGGED
)
96 orig_cmd
->ftf_flags
|= IDE_FTFLAG_SET_IN_FLAGS
;
98 memcpy(cmd
, orig_cmd
, sizeof(*cmd
));
100 if ((cmd
->tf_flags
& IDE_TFLAG_DMA_PIO_FALLBACK
) == 0) {
101 ide_tf_dump(drive
->name
, cmd
);
102 tp_ops
->write_devctl(hwif
, ATA_DEVCTL_OBS
);
104 if (cmd
->ftf_flags
& IDE_FTFLAG_OUT_DATA
) {
105 u8 data
[2] = { cmd
->tf
.data
, cmd
->hob
.data
};
107 tp_ops
->output_data(drive
, cmd
, data
, 2);
110 if (cmd
->valid
.out
.tf
& IDE_VALID_DEVICE
) {
111 u8 HIHI
= (cmd
->tf_flags
& IDE_TFLAG_LBA48
) ?
114 if (!(cmd
->ftf_flags
& IDE_FTFLAG_FLAGGED
))
115 cmd
->tf
.device
&= HIHI
;
116 cmd
->tf
.device
|= drive
->select
;
119 tp_ops
->tf_load(drive
, &cmd
->hob
, cmd
->valid
.out
.hob
);
120 tp_ops
->tf_load(drive
, &cmd
->tf
, cmd
->valid
.out
.tf
);
123 switch (cmd
->protocol
) {
125 if (cmd
->tf_flags
& IDE_TFLAG_WRITE
) {
126 tp_ops
->exec_command(hwif
, tf
->command
);
127 ndelay(400); /* FIXME */
128 return pre_task_out_intr(drive
, cmd
);
130 handler
= task_pio_intr
;
132 case ATA_PROT_NODATA
:
134 handler
= task_no_data_intr
;
135 ide_execute_command(drive
, cmd
, handler
, WAIT_WORSTCASE
);
138 if (ide_dma_prepare(drive
, cmd
))
140 hwif
->expiry
= dma_ops
->dma_timer_expiry
;
141 ide_execute_command(drive
, cmd
, ide_dma_intr
, 2 * WAIT_CMD
);
142 dma_ops
->dma_start(drive
);
147 EXPORT_SYMBOL_GPL(do_rw_taskfile
);
149 static ide_startstop_t
task_no_data_intr(ide_drive_t
*drive
)
151 ide_hwif_t
*hwif
= drive
->hwif
;
152 struct ide_cmd
*cmd
= &hwif
->cmd
;
153 struct ide_taskfile
*tf
= &cmd
->tf
;
154 int custom
= (cmd
->tf_flags
& IDE_TFLAG_CUSTOM_HANDLER
) ? 1 : 0;
155 int retries
= (custom
&& tf
->command
== ATA_CMD_INIT_DEV_PARAMS
) ? 5 : 1;
158 local_irq_enable_in_hardirq();
161 stat
= hwif
->tp_ops
->read_status(hwif
);
162 if ((stat
& ATA_BUSY
) == 0 || retries
-- == 0)
167 if (!OK_STAT(stat
, ATA_DRDY
, BAD_STAT
)) {
168 if (custom
&& tf
->command
== ATA_CMD_SET_MULTI
) {
169 drive
->mult_req
= drive
->mult_count
= 0;
170 drive
->special_flags
|= IDE_SFLAG_RECALIBRATE
;
171 (void)ide_dump_status(drive
, __func__
, stat
);
173 } else if (custom
&& tf
->command
== ATA_CMD_INIT_DEV_PARAMS
) {
174 if ((stat
& (ATA_ERR
| ATA_DRQ
)) == 0) {
175 ide_set_handler(drive
, &task_no_data_intr
,
180 return ide_error(drive
, "task_no_data_intr", stat
);
183 if (custom
&& tf
->command
== ATA_CMD_SET_MULTI
)
184 drive
->mult_count
= drive
->mult_req
;
186 if (custom
== 0 || tf
->command
== ATA_CMD_IDLEIMMEDIATE
||
187 tf
->command
== ATA_CMD_CHK_POWER
) {
188 struct request
*rq
= hwif
->rq
;
190 if (ata_pm_request(rq
))
191 ide_complete_pm_rq(drive
, rq
);
193 ide_finish_cmd(drive
, cmd
, stat
);
199 static u8
wait_drive_not_busy(ide_drive_t
*drive
)
201 ide_hwif_t
*hwif
= drive
->hwif
;
206 * Last sector was transferred, wait until device is ready. This can
207 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
209 for (retries
= 0; retries
< 1000; retries
++) {
210 stat
= hwif
->tp_ops
->read_status(hwif
);
219 pr_err("%s: drive still BUSY!\n", drive
->name
);
224 void ide_pio_bytes(ide_drive_t
*drive
, struct ide_cmd
*cmd
,
225 unsigned int write
, unsigned int len
)
227 ide_hwif_t
*hwif
= drive
->hwif
;
228 struct scatterlist
*sg
= hwif
->sg_table
;
229 struct scatterlist
*cursg
= cmd
->cursg
;
230 unsigned long uninitialized_var(flags
);
237 cursg
= cmd
->cursg
= sg
;
240 unsigned nr_bytes
= min(len
, cursg
->length
- cmd
->cursg_ofs
);
243 page
= sg_page(cursg
);
244 offset
= cursg
->offset
+ cmd
->cursg_ofs
;
246 /* get the current page and offset */
247 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
250 nr_bytes
= min_t(unsigned, nr_bytes
, (PAGE_SIZE
- offset
));
252 page_is_high
= PageHighMem(page
);
254 local_irq_save(flags
);
256 buf
= kmap_atomic(page
) + offset
;
258 cmd
->nleft
-= nr_bytes
;
259 cmd
->cursg_ofs
+= nr_bytes
;
261 if (cmd
->cursg_ofs
== cursg
->length
) {
262 cursg
= cmd
->cursg
= sg_next(cmd
->cursg
);
266 /* do the actual data transfer */
268 hwif
->tp_ops
->output_data(drive
, cmd
, buf
, nr_bytes
);
270 hwif
->tp_ops
->input_data(drive
, cmd
, buf
, nr_bytes
);
275 local_irq_restore(flags
);
280 EXPORT_SYMBOL_GPL(ide_pio_bytes
);
282 static void ide_pio_datablock(ide_drive_t
*drive
, struct ide_cmd
*cmd
,
285 unsigned int nr_bytes
;
287 u8 saved_io_32bit
= drive
->io_32bit
;
289 if (cmd
->tf_flags
& IDE_TFLAG_FS
)
290 scsi_req(cmd
->rq
)->result
= 0;
292 if (cmd
->tf_flags
& IDE_TFLAG_IO_16BIT
)
295 touch_softlockup_watchdog();
297 if (cmd
->tf_flags
& IDE_TFLAG_MULTI_PIO
)
298 nr_bytes
= min_t(unsigned, cmd
->nleft
, drive
->mult_count
<< 9);
300 nr_bytes
= SECTOR_SIZE
;
302 ide_pio_bytes(drive
, cmd
, write
, nr_bytes
);
304 drive
->io_32bit
= saved_io_32bit
;
307 static void ide_error_cmd(ide_drive_t
*drive
, struct ide_cmd
*cmd
)
309 if (cmd
->tf_flags
& IDE_TFLAG_FS
) {
310 int nr_bytes
= cmd
->nbytes
- cmd
->nleft
;
312 if (cmd
->protocol
== ATA_PROT_PIO
&&
313 ((cmd
->tf_flags
& IDE_TFLAG_WRITE
) || cmd
->nleft
== 0)) {
314 if (cmd
->tf_flags
& IDE_TFLAG_MULTI_PIO
)
315 nr_bytes
-= drive
->mult_count
<< 9;
317 nr_bytes
-= SECTOR_SIZE
;
321 ide_complete_rq(drive
, BLK_STS_OK
, nr_bytes
);
325 void ide_finish_cmd(ide_drive_t
*drive
, struct ide_cmd
*cmd
, u8 stat
)
327 struct request
*rq
= drive
->hwif
->rq
;
328 u8 err
= ide_read_error(drive
), nsect
= cmd
->tf
.nsect
;
329 u8 set_xfer
= !!(cmd
->tf_flags
& IDE_TFLAG_SET_XFER
);
331 ide_complete_cmd(drive
, cmd
, stat
, err
);
332 scsi_req(rq
)->result
= err
;
334 if (err
== 0 && set_xfer
) {
335 ide_set_xfer_rate(drive
, nsect
);
336 ide_driveid_update(drive
);
339 ide_complete_rq(drive
, err
? BLK_STS_IOERR
: BLK_STS_OK
, blk_rq_bytes(rq
));
343 * Handler for command with PIO data phase.
345 static ide_startstop_t
task_pio_intr(ide_drive_t
*drive
)
347 ide_hwif_t
*hwif
= drive
->hwif
;
348 struct ide_cmd
*cmd
= &drive
->hwif
->cmd
;
349 u8 stat
= hwif
->tp_ops
->read_status(hwif
);
350 u8 write
= !!(cmd
->tf_flags
& IDE_TFLAG_WRITE
);
357 /* Didn't want any data? Odd. */
358 if ((stat
& ATA_DRQ
) == 0) {
359 /* Command all done? */
360 if (OK_STAT(stat
, ATA_DRDY
, ATA_BUSY
))
363 /* Assume it was a spurious irq */
367 if (!OK_STAT(stat
, DRIVE_READY
, drive
->bad_wstat
))
370 /* Deal with unexpected ATA data phase. */
371 if (((stat
& ATA_DRQ
) == 0) ^ (cmd
->nleft
== 0))
375 if (write
&& cmd
->nleft
== 0)
378 /* Still data left to transfer. */
379 ide_pio_datablock(drive
, cmd
, write
);
381 /* Are we done? Check status and finish transfer. */
382 if (write
== 0 && cmd
->nleft
== 0) {
383 stat
= wait_drive_not_busy(drive
);
384 if (!OK_STAT(stat
, 0, BAD_STAT
))
390 /* Still data left to transfer. */
391 ide_set_handler(drive
, &task_pio_intr
, WAIT_WORSTCASE
);
394 if ((cmd
->tf_flags
& IDE_TFLAG_FS
) == 0)
395 ide_finish_cmd(drive
, cmd
, stat
);
397 ide_complete_rq(drive
, BLK_STS_OK
, blk_rq_sectors(cmd
->rq
) << 9);
400 ide_error_cmd(drive
, cmd
);
401 return ide_error(drive
, __func__
, stat
);
404 static ide_startstop_t
pre_task_out_intr(ide_drive_t
*drive
,
407 ide_startstop_t startstop
;
409 if (ide_wait_stat(&startstop
, drive
, ATA_DRQ
,
410 drive
->bad_wstat
, WAIT_DRQ
)) {
411 pr_err("%s: no DRQ after issuing %sWRITE%s\n", drive
->name
,
412 (cmd
->tf_flags
& IDE_TFLAG_MULTI_PIO
) ? "MULT" : "",
413 (drive
->dev_flags
& IDE_DFLAG_LBA48
) ? "_EXT" : "");
417 if ((drive
->dev_flags
& IDE_DFLAG_UNMASK
) == 0)
420 ide_set_handler(drive
, &task_pio_intr
, WAIT_WORSTCASE
);
422 ide_pio_datablock(drive
, cmd
, 1);
427 int ide_raw_taskfile(ide_drive_t
*drive
, struct ide_cmd
*cmd
, u8
*buf
,
433 rq
= blk_get_request(drive
->queue
,
434 (cmd
->tf_flags
& IDE_TFLAG_WRITE
) ?
435 REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
, __GFP_RECLAIM
);
436 ide_req(rq
)->type
= ATA_PRIV_TASKFILE
;
439 * (ks) We transfer currently only whole sectors.
440 * This is suffient for now. But, it would be great,
441 * if we would find a solution to transfer any size.
442 * To support special commands like READ LONG.
445 error
= blk_rq_map_kern(drive
->queue
, rq
, buf
,
446 nsect
* SECTOR_SIZE
, __GFP_RECLAIM
);
454 blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
455 error
= scsi_req(rq
)->result
? -EIO
: 0;
460 EXPORT_SYMBOL(ide_raw_taskfile
);
462 int ide_no_data_taskfile(ide_drive_t
*drive
, struct ide_cmd
*cmd
)
464 cmd
->protocol
= ATA_PROT_NODATA
;
466 return ide_raw_taskfile(drive
, cmd
, NULL
, 0);
468 EXPORT_SYMBOL_GPL(ide_no_data_taskfile
);
470 #ifdef CONFIG_IDE_TASK_IOCTL
471 int ide_taskfile_ioctl(ide_drive_t
*drive
, unsigned long arg
)
473 ide_task_request_t
*req_task
;
479 int tasksize
= sizeof(struct ide_task_request_s
);
480 unsigned int taskin
= 0;
481 unsigned int taskout
= 0;
483 char __user
*buf
= (char __user
*)arg
;
485 req_task
= memdup_user(buf
, tasksize
);
486 if (IS_ERR(req_task
))
487 return PTR_ERR(req_task
);
489 taskout
= req_task
->out_size
;
490 taskin
= req_task
->in_size
;
492 if (taskin
> 65536 || taskout
> 65536) {
498 int outtotal
= tasksize
;
499 outbuf
= kzalloc(taskout
, GFP_KERNEL
);
500 if (outbuf
== NULL
) {
504 if (copy_from_user(outbuf
, buf
+ outtotal
, taskout
)) {
511 int intotal
= tasksize
+ taskout
;
512 inbuf
= kzalloc(taskin
, GFP_KERNEL
);
517 if (copy_from_user(inbuf
, buf
+ intotal
, taskin
)) {
523 memset(&cmd
, 0, sizeof(cmd
));
525 memcpy(&cmd
.hob
, req_task
->hob_ports
, HDIO_DRIVE_HOB_HDR_SIZE
- 2);
526 memcpy(&cmd
.tf
, req_task
->io_ports
, HDIO_DRIVE_TASK_HDR_SIZE
);
528 cmd
.valid
.out
.tf
= IDE_VALID_DEVICE
;
529 cmd
.valid
.in
.tf
= IDE_VALID_DEVICE
| IDE_VALID_IN_TF
;
530 cmd
.tf_flags
= IDE_TFLAG_IO_16BIT
;
532 if (drive
->dev_flags
& IDE_DFLAG_LBA48
) {
533 cmd
.tf_flags
|= IDE_TFLAG_LBA48
;
534 cmd
.valid
.in
.hob
= IDE_VALID_IN_HOB
;
537 if (req_task
->out_flags
.all
) {
538 cmd
.ftf_flags
|= IDE_FTFLAG_FLAGGED
;
540 if (req_task
->out_flags
.b
.data
)
541 cmd
.ftf_flags
|= IDE_FTFLAG_OUT_DATA
;
543 if (req_task
->out_flags
.b
.nsector_hob
)
544 cmd
.valid
.out
.hob
|= IDE_VALID_NSECT
;
545 if (req_task
->out_flags
.b
.sector_hob
)
546 cmd
.valid
.out
.hob
|= IDE_VALID_LBAL
;
547 if (req_task
->out_flags
.b
.lcyl_hob
)
548 cmd
.valid
.out
.hob
|= IDE_VALID_LBAM
;
549 if (req_task
->out_flags
.b
.hcyl_hob
)
550 cmd
.valid
.out
.hob
|= IDE_VALID_LBAH
;
552 if (req_task
->out_flags
.b
.error_feature
)
553 cmd
.valid
.out
.tf
|= IDE_VALID_FEATURE
;
554 if (req_task
->out_flags
.b
.nsector
)
555 cmd
.valid
.out
.tf
|= IDE_VALID_NSECT
;
556 if (req_task
->out_flags
.b
.sector
)
557 cmd
.valid
.out
.tf
|= IDE_VALID_LBAL
;
558 if (req_task
->out_flags
.b
.lcyl
)
559 cmd
.valid
.out
.tf
|= IDE_VALID_LBAM
;
560 if (req_task
->out_flags
.b
.hcyl
)
561 cmd
.valid
.out
.tf
|= IDE_VALID_LBAH
;
563 cmd
.valid
.out
.tf
|= IDE_VALID_OUT_TF
;
564 if (cmd
.tf_flags
& IDE_TFLAG_LBA48
)
565 cmd
.valid
.out
.hob
|= IDE_VALID_OUT_HOB
;
568 if (req_task
->in_flags
.b
.data
)
569 cmd
.ftf_flags
|= IDE_FTFLAG_IN_DATA
;
571 if (req_task
->req_cmd
== IDE_DRIVE_TASK_RAW_WRITE
) {
572 /* fixup data phase if needed */
573 if (req_task
->data_phase
== TASKFILE_IN_DMAQ
||
574 req_task
->data_phase
== TASKFILE_IN_DMA
)
575 cmd
.tf_flags
|= IDE_TFLAG_WRITE
;
578 cmd
.protocol
= ATA_PROT_DMA
;
580 switch (req_task
->data_phase
) {
581 case TASKFILE_MULTI_OUT
:
582 if (!drive
->mult_count
) {
583 /* (hs): give up if multcount is not set */
584 pr_err("%s: %s Multimode Write multcount is not set\n",
585 drive
->name
, __func__
);
589 cmd
.tf_flags
|= IDE_TFLAG_MULTI_PIO
;
592 cmd
.protocol
= ATA_PROT_PIO
;
594 case TASKFILE_OUT_DMAQ
:
595 case TASKFILE_OUT_DMA
:
596 cmd
.tf_flags
|= IDE_TFLAG_WRITE
;
597 nsect
= taskout
/ SECTOR_SIZE
;
600 case TASKFILE_MULTI_IN
:
601 if (!drive
->mult_count
) {
602 /* (hs): give up if multcount is not set */
603 pr_err("%s: %s Multimode Read multcount is not set\n",
604 drive
->name
, __func__
);
608 cmd
.tf_flags
|= IDE_TFLAG_MULTI_PIO
;
611 cmd
.protocol
= ATA_PROT_PIO
;
613 case TASKFILE_IN_DMAQ
:
614 case TASKFILE_IN_DMA
:
615 nsect
= taskin
/ SECTOR_SIZE
;
618 case TASKFILE_NO_DATA
:
619 cmd
.protocol
= ATA_PROT_NODATA
;
626 if (req_task
->req_cmd
== IDE_DRIVE_TASK_NO_DATA
)
629 nsect
= (cmd
.hob
.nsect
<< 8) | cmd
.tf
.nsect
;
632 pr_err("%s: in/out command without data\n",
639 err
= ide_raw_taskfile(drive
, &cmd
, data_buf
, nsect
);
641 memcpy(req_task
->hob_ports
, &cmd
.hob
, HDIO_DRIVE_HOB_HDR_SIZE
- 2);
642 memcpy(req_task
->io_ports
, &cmd
.tf
, HDIO_DRIVE_TASK_HDR_SIZE
);
644 if ((cmd
.ftf_flags
& IDE_FTFLAG_SET_IN_FLAGS
) &&
645 req_task
->in_flags
.all
== 0) {
646 req_task
->in_flags
.all
= IDE_TASKFILE_STD_IN_FLAGS
;
647 if (drive
->dev_flags
& IDE_DFLAG_LBA48
)
648 req_task
->in_flags
.all
|= (IDE_HOB_STD_IN_FLAGS
<< 8);
651 if (copy_to_user(buf
, req_task
, tasksize
)) {
656 int outtotal
= tasksize
;
657 if (copy_to_user(buf
+ outtotal
, outbuf
, taskout
)) {
663 int intotal
= tasksize
+ taskout
;
664 if (copy_to_user(buf
+ intotal
, inbuf
, taskin
)) {