1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
6 int generic_ide_suspend(struct device
*dev
, pm_message_t mesg
)
8 ide_drive_t
*drive
= to_ide_device(dev
);
9 ide_drive_t
*pair
= ide_get_pair_dev(drive
);
10 ide_hwif_t
*hwif
= drive
->hwif
;
12 struct ide_pm_state rqpm
;
15 if (ide_port_acpi(hwif
)) {
16 /* call ACPI _GTM only once */
17 if ((drive
->dn
& 1) == 0 || pair
== NULL
)
18 ide_acpi_get_timing(hwif
);
21 memset(&rqpm
, 0, sizeof(rqpm
));
22 rq
= blk_get_request(drive
->queue
, REQ_OP_DRV_IN
, 0);
23 ide_req(rq
)->type
= ATA_PRIV_PM_SUSPEND
;
24 ide_req(rq
)->special
= &rqpm
;
25 rqpm
.pm_step
= IDE_PM_START_SUSPEND
;
26 if (mesg
.event
== PM_EVENT_PRETHAW
)
27 mesg
.event
= PM_EVENT_FREEZE
;
28 rqpm
.pm_state
= mesg
.event
;
30 blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
31 ret
= scsi_req(rq
)->result
? -EIO
: 0;
34 if (ret
== 0 && ide_port_acpi(hwif
)) {
35 /* call ACPI _PS3 only after both devices are suspended */
36 if ((drive
->dn
& 1) || pair
== NULL
)
37 ide_acpi_set_state(hwif
, 0);
43 static int ide_pm_execute_rq(struct request
*rq
)
45 struct request_queue
*q
= rq
->q
;
47 if (unlikely(blk_queue_dying(q
))) {
48 rq
->rq_flags
|= RQF_QUIET
;
49 scsi_req(rq
)->result
= -ENXIO
;
50 blk_mq_end_request(rq
, BLK_STS_OK
);
53 blk_execute_rq(q
, NULL
, rq
, true);
55 return scsi_req(rq
)->result
? -EIO
: 0;
58 int generic_ide_resume(struct device
*dev
)
60 ide_drive_t
*drive
= to_ide_device(dev
);
61 ide_drive_t
*pair
= ide_get_pair_dev(drive
);
62 ide_hwif_t
*hwif
= drive
->hwif
;
64 struct ide_pm_state rqpm
;
67 blk_mq_start_stopped_hw_queues(drive
->queue
, true);
69 if (ide_port_acpi(hwif
)) {
70 /* call ACPI _PS0 / _STM only once */
71 if ((drive
->dn
& 1) == 0 || pair
== NULL
) {
72 ide_acpi_set_state(hwif
, 1);
73 ide_acpi_push_timing(hwif
);
76 ide_acpi_exec_tfs(drive
);
79 memset(&rqpm
, 0, sizeof(rqpm
));
80 rq
= blk_get_request(drive
->queue
, REQ_OP_DRV_IN
, BLK_MQ_REQ_PREEMPT
);
81 ide_req(rq
)->type
= ATA_PRIV_PM_RESUME
;
82 ide_req(rq
)->special
= &rqpm
;
83 rqpm
.pm_step
= IDE_PM_START_RESUME
;
84 rqpm
.pm_state
= PM_EVENT_ON
;
86 err
= ide_pm_execute_rq(rq
);
89 if (err
== 0 && dev
->driver
) {
90 struct ide_driver
*drv
= to_ide_driver(dev
->driver
);
99 void ide_complete_power_step(ide_drive_t
*drive
, struct request
*rq
)
101 struct ide_pm_state
*pm
= ide_req(rq
)->special
;
104 printk(KERN_INFO
"%s: complete_power_step(step: %d)\n",
105 drive
->name
, pm
->pm_step
);
107 if (drive
->media
!= ide_disk
)
110 switch (pm
->pm_step
) {
111 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
112 if (pm
->pm_state
== PM_EVENT_FREEZE
)
113 pm
->pm_step
= IDE_PM_COMPLETED
;
115 pm
->pm_step
= IDE_PM_STANDBY
;
117 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
118 pm
->pm_step
= IDE_PM_COMPLETED
;
120 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
121 pm
->pm_step
= IDE_PM_IDLE
;
123 case IDE_PM_IDLE
: /* Resume step 2 (idle)*/
124 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
129 ide_startstop_t
ide_start_power_step(ide_drive_t
*drive
, struct request
*rq
)
131 struct ide_pm_state
*pm
= ide_req(rq
)->special
;
132 struct ide_cmd cmd
= { };
134 switch (pm
->pm_step
) {
135 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
136 if (drive
->media
!= ide_disk
)
138 /* Not supported? Switch to next step now. */
139 if (ata_id_flush_enabled(drive
->id
) == 0 ||
140 (drive
->dev_flags
& IDE_DFLAG_WCACHE
) == 0) {
141 ide_complete_power_step(drive
, rq
);
144 if (ata_id_flush_ext_enabled(drive
->id
))
145 cmd
.tf
.command
= ATA_CMD_FLUSH_EXT
;
147 cmd
.tf
.command
= ATA_CMD_FLUSH
;
149 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
150 cmd
.tf
.command
= ATA_CMD_STANDBYNOW1
;
152 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
153 ide_set_max_pio(drive
);
155 * skip IDE_PM_IDLE for ATAPI devices
157 if (drive
->media
!= ide_disk
)
158 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
160 ide_complete_power_step(drive
, rq
);
162 case IDE_PM_IDLE
: /* Resume step 2 (idle) */
163 cmd
.tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
165 case IDE_PM_RESTORE_DMA
: /* Resume step 3 (restore DMA) */
167 * Right now, all we do is call ide_set_dma(drive),
168 * we could be smarter and check for current xfer_speed
169 * in struct drive etc...
171 if (drive
->hwif
->dma_ops
== NULL
)
174 * TODO: respect IDE_DFLAG_USING_DMA
180 pm
->pm_step
= IDE_PM_COMPLETED
;
185 cmd
.valid
.out
.tf
= IDE_VALID_OUT_TF
| IDE_VALID_DEVICE
;
186 cmd
.valid
.in
.tf
= IDE_VALID_IN_TF
| IDE_VALID_DEVICE
;
187 cmd
.protocol
= ATA_PROT_NODATA
;
189 return do_rw_taskfile(drive
, &cmd
);
193 * ide_complete_pm_rq - end the current Power Management request
194 * @drive: target drive
197 * This function cleans up the current PM request and stops the queue
200 void ide_complete_pm_rq(ide_drive_t
*drive
, struct request
*rq
)
202 struct request_queue
*q
= drive
->queue
;
203 struct ide_pm_state
*pm
= ide_req(rq
)->special
;
205 ide_complete_power_step(drive
, rq
);
206 if (pm
->pm_step
!= IDE_PM_COMPLETED
)
210 printk("%s: completing PM request, %s\n", drive
->name
,
211 (ide_req(rq
)->type
== ATA_PRIV_PM_SUSPEND
) ? "suspend" : "resume");
213 if (ide_req(rq
)->type
== ATA_PRIV_PM_SUSPEND
)
214 blk_mq_stop_hw_queues(q
);
216 drive
->dev_flags
&= ~IDE_DFLAG_BLOCKED
;
218 drive
->hwif
->rq
= NULL
;
220 blk_mq_end_request(rq
, BLK_STS_OK
);
223 void ide_check_pm_state(ide_drive_t
*drive
, struct request
*rq
)
225 struct ide_pm_state
*pm
= ide_req(rq
)->special
;
227 if (blk_rq_is_private(rq
) &&
228 ide_req(rq
)->type
== ATA_PRIV_PM_SUSPEND
&&
229 pm
->pm_step
== IDE_PM_START_SUSPEND
)
230 /* Mark drive blocked when starting the suspend sequence. */
231 drive
->dev_flags
|= IDE_DFLAG_BLOCKED
;
232 else if (blk_rq_is_private(rq
) &&
233 ide_req(rq
)->type
== ATA_PRIV_PM_RESUME
&&
234 pm
->pm_step
== IDE_PM_START_RESUME
) {
236 * The first thing we do on wakeup is to wait for BSY bit to
237 * go away (with a looong timeout) as a drive on this hwif may
238 * just be POSTing itself.
239 * We do that before even selecting as the "other" device on
240 * the bus may be broken enough to walk on our toes at this
243 ide_hwif_t
*hwif
= drive
->hwif
;
244 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
245 struct request_queue
*q
= drive
->queue
;
248 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive
->name
);
250 rc
= ide_wait_not_busy(hwif
, 35000);
252 printk(KERN_WARNING
"%s: bus not ready on wakeup\n", drive
->name
);
253 tp_ops
->dev_select(drive
);
254 tp_ops
->write_devctl(hwif
, ATA_DEVCTL_OBS
);
255 rc
= ide_wait_not_busy(hwif
, 100000);
257 printk(KERN_WARNING
"%s: drive not ready on wakeup\n", drive
->name
);
259 blk_mq_start_hw_queues(q
);