1 #include <linux/kernel.h>
5 int generic_ide_suspend(struct device
*dev
, pm_message_t mesg
)
7 ide_drive_t
*drive
= to_ide_device(dev
);
8 ide_drive_t
*pair
= ide_get_pair_dev(drive
);
9 ide_hwif_t
*hwif
= drive
->hwif
;
11 struct ide_pm_state rqpm
;
14 if (ide_port_acpi(hwif
)) {
15 /* call ACPI _GTM only once */
16 if ((drive
->dn
& 1) == 0 || pair
== NULL
)
17 ide_acpi_get_timing(hwif
);
20 memset(&rqpm
, 0, sizeof(rqpm
));
21 rq
= blk_get_request(drive
->queue
, READ
, __GFP_RECLAIM
);
22 rq
->cmd_type
= REQ_TYPE_ATA_PM_SUSPEND
;
24 rqpm
.pm_step
= IDE_PM_START_SUSPEND
;
25 if (mesg
.event
== PM_EVENT_PRETHAW
)
26 mesg
.event
= PM_EVENT_FREEZE
;
27 rqpm
.pm_state
= mesg
.event
;
29 ret
= blk_execute_rq(drive
->queue
, NULL
, rq
, 0);
32 if (ret
== 0 && ide_port_acpi(hwif
)) {
33 /* call ACPI _PS3 only after both devices are suspended */
34 if ((drive
->dn
& 1) || pair
== NULL
)
35 ide_acpi_set_state(hwif
, 0);
41 static void ide_end_sync_rq(struct request
*rq
, int error
)
43 complete(rq
->end_io_data
);
46 static int ide_pm_execute_rq(struct request
*rq
)
48 struct request_queue
*q
= rq
->q
;
49 DECLARE_COMPLETION_ONSTACK(wait
);
51 rq
->end_io_data
= &wait
;
52 rq
->end_io
= ide_end_sync_rq
;
54 spin_lock_irq(q
->queue_lock
);
55 if (unlikely(blk_queue_dying(q
))) {
56 rq
->rq_flags
|= RQF_QUIET
;
58 __blk_end_request_all(rq
, rq
->errors
);
59 spin_unlock_irq(q
->queue_lock
);
62 __elv_add_request(q
, rq
, ELEVATOR_INSERT_FRONT
);
63 __blk_run_queue_uncond(q
);
64 spin_unlock_irq(q
->queue_lock
);
66 wait_for_completion_io(&wait
);
68 return rq
->errors
? -EIO
: 0;
71 int generic_ide_resume(struct device
*dev
)
73 ide_drive_t
*drive
= to_ide_device(dev
);
74 ide_drive_t
*pair
= ide_get_pair_dev(drive
);
75 ide_hwif_t
*hwif
= drive
->hwif
;
77 struct ide_pm_state rqpm
;
80 if (ide_port_acpi(hwif
)) {
81 /* call ACPI _PS0 / _STM only once */
82 if ((drive
->dn
& 1) == 0 || pair
== NULL
) {
83 ide_acpi_set_state(hwif
, 1);
84 ide_acpi_push_timing(hwif
);
87 ide_acpi_exec_tfs(drive
);
90 memset(&rqpm
, 0, sizeof(rqpm
));
91 rq
= blk_get_request(drive
->queue
, READ
, __GFP_RECLAIM
);
92 rq
->cmd_type
= REQ_TYPE_ATA_PM_RESUME
;
93 rq
->rq_flags
|= RQF_PREEMPT
;
95 rqpm
.pm_step
= IDE_PM_START_RESUME
;
96 rqpm
.pm_state
= PM_EVENT_ON
;
98 err
= ide_pm_execute_rq(rq
);
101 if (err
== 0 && dev
->driver
) {
102 struct ide_driver
*drv
= to_ide_driver(dev
->driver
);
111 void ide_complete_power_step(ide_drive_t
*drive
, struct request
*rq
)
113 struct ide_pm_state
*pm
= rq
->special
;
116 printk(KERN_INFO
"%s: complete_power_step(step: %d)\n",
117 drive
->name
, pm
->pm_step
);
119 if (drive
->media
!= ide_disk
)
122 switch (pm
->pm_step
) {
123 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
124 if (pm
->pm_state
== PM_EVENT_FREEZE
)
125 pm
->pm_step
= IDE_PM_COMPLETED
;
127 pm
->pm_step
= IDE_PM_STANDBY
;
129 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
130 pm
->pm_step
= IDE_PM_COMPLETED
;
132 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
133 pm
->pm_step
= IDE_PM_IDLE
;
135 case IDE_PM_IDLE
: /* Resume step 2 (idle)*/
136 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
141 ide_startstop_t
ide_start_power_step(ide_drive_t
*drive
, struct request
*rq
)
143 struct ide_pm_state
*pm
= rq
->special
;
144 struct ide_cmd cmd
= { };
146 switch (pm
->pm_step
) {
147 case IDE_PM_FLUSH_CACHE
: /* Suspend step 1 (flush cache) */
148 if (drive
->media
!= ide_disk
)
150 /* Not supported? Switch to next step now. */
151 if (ata_id_flush_enabled(drive
->id
) == 0 ||
152 (drive
->dev_flags
& IDE_DFLAG_WCACHE
) == 0) {
153 ide_complete_power_step(drive
, rq
);
156 if (ata_id_flush_ext_enabled(drive
->id
))
157 cmd
.tf
.command
= ATA_CMD_FLUSH_EXT
;
159 cmd
.tf
.command
= ATA_CMD_FLUSH
;
161 case IDE_PM_STANDBY
: /* Suspend step 2 (standby) */
162 cmd
.tf
.command
= ATA_CMD_STANDBYNOW1
;
164 case IDE_PM_RESTORE_PIO
: /* Resume step 1 (restore PIO) */
165 ide_set_max_pio(drive
);
167 * skip IDE_PM_IDLE for ATAPI devices
169 if (drive
->media
!= ide_disk
)
170 pm
->pm_step
= IDE_PM_RESTORE_DMA
;
172 ide_complete_power_step(drive
, rq
);
174 case IDE_PM_IDLE
: /* Resume step 2 (idle) */
175 cmd
.tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
177 case IDE_PM_RESTORE_DMA
: /* Resume step 3 (restore DMA) */
179 * Right now, all we do is call ide_set_dma(drive),
180 * we could be smarter and check for current xfer_speed
181 * in struct drive etc...
183 if (drive
->hwif
->dma_ops
== NULL
)
186 * TODO: respect IDE_DFLAG_USING_DMA
192 pm
->pm_step
= IDE_PM_COMPLETED
;
197 cmd
.valid
.out
.tf
= IDE_VALID_OUT_TF
| IDE_VALID_DEVICE
;
198 cmd
.valid
.in
.tf
= IDE_VALID_IN_TF
| IDE_VALID_DEVICE
;
199 cmd
.protocol
= ATA_PROT_NODATA
;
201 return do_rw_taskfile(drive
, &cmd
);
205 * ide_complete_pm_rq - end the current Power Management request
206 * @drive: target drive
209 * This function cleans up the current PM request and stops the queue
212 void ide_complete_pm_rq(ide_drive_t
*drive
, struct request
*rq
)
214 struct request_queue
*q
= drive
->queue
;
215 struct ide_pm_state
*pm
= rq
->special
;
218 ide_complete_power_step(drive
, rq
);
219 if (pm
->pm_step
!= IDE_PM_COMPLETED
)
223 printk("%s: completing PM request, %s\n", drive
->name
,
224 (rq
->cmd_type
== REQ_TYPE_ATA_PM_SUSPEND
) ? "suspend" : "resume");
226 spin_lock_irqsave(q
->queue_lock
, flags
);
227 if (rq
->cmd_type
== REQ_TYPE_ATA_PM_SUSPEND
)
230 drive
->dev_flags
&= ~IDE_DFLAG_BLOCKED
;
231 spin_unlock_irqrestore(q
->queue_lock
, flags
);
233 drive
->hwif
->rq
= NULL
;
235 if (blk_end_request(rq
, 0, 0))
239 void ide_check_pm_state(ide_drive_t
*drive
, struct request
*rq
)
241 struct ide_pm_state
*pm
= rq
->special
;
243 if (rq
->cmd_type
== REQ_TYPE_ATA_PM_SUSPEND
&&
244 pm
->pm_step
== IDE_PM_START_SUSPEND
)
245 /* Mark drive blocked when starting the suspend sequence. */
246 drive
->dev_flags
|= IDE_DFLAG_BLOCKED
;
247 else if (rq
->cmd_type
== REQ_TYPE_ATA_PM_RESUME
&&
248 pm
->pm_step
== IDE_PM_START_RESUME
) {
250 * The first thing we do on wakeup is to wait for BSY bit to
251 * go away (with a looong timeout) as a drive on this hwif may
252 * just be POSTing itself.
253 * We do that before even selecting as the "other" device on
254 * the bus may be broken enough to walk on our toes at this
257 ide_hwif_t
*hwif
= drive
->hwif
;
258 const struct ide_tp_ops
*tp_ops
= hwif
->tp_ops
;
259 struct request_queue
*q
= drive
->queue
;
263 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive
->name
);
265 rc
= ide_wait_not_busy(hwif
, 35000);
267 printk(KERN_WARNING
"%s: bus not ready on wakeup\n", drive
->name
);
268 tp_ops
->dev_select(drive
);
269 tp_ops
->write_devctl(hwif
, ATA_DEVCTL_OBS
);
270 rc
= ide_wait_not_busy(hwif
, 100000);
272 printk(KERN_WARNING
"%s: drive not ready on wakeup\n", drive
->name
);
274 spin_lock_irqsave(q
->queue_lock
, flags
);
276 spin_unlock_irqrestore(q
->queue_lock
, flags
);