1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2002 by Alan Korr
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
33 #include "ata_idle_notify.h"
34 #include "ata-target.h"
37 #define SECTOR_SIZE (512)
39 #define ATA_FEATURE ATA_ERROR
41 #define ATA_STATUS ATA_COMMAND
42 #define ATA_ALT_STATUS ATA_CONTROL
44 #define SELECT_DEVICE1 0x10
45 #define SELECT_LBA 0x40
47 #define CONTROL_nIEN 0x02
48 #define CONTROL_SRST 0x04
50 #define CMD_READ_SECTORS 0x20
51 #define CMD_WRITE_SECTORS 0x30
52 #define CMD_WRITE_SECTORS_EXT 0x34
53 #define CMD_READ_MULTIPLE 0xC4
54 #define CMD_READ_MULTIPLE_EXT 0x29
55 #define CMD_WRITE_MULTIPLE 0xC5
56 #define CMD_SET_MULTIPLE_MODE 0xC6
57 #define CMD_STANDBY_IMMEDIATE 0xE0
58 #define CMD_STANDBY 0xE2
59 #define CMD_IDENTIFY 0xEC
60 #define CMD_SLEEP 0xE6
61 #define CMD_SET_FEATURES 0xEF
62 #define CMD_SECURITY_FREEZE_LOCK 0xF5
64 #define CMD_READ_DMA 0xC8
65 #define CMD_READ_DMA_EXT 0x25
66 #define CMD_WRITE_DMA 0xCA
67 #define CMD_WRITE_DMA_EXT 0x35
70 /* Should all be < 0x100 (which are reserved for control messages) */
74 #define READ_TIMEOUT 5*HZ
76 #ifdef HAVE_ATA_POWER_OFF
77 #define ATA_POWER_OFF_TIMEOUT 2*HZ
80 #ifdef ATA_DRIVER_CLOSE
81 static unsigned int ata_thread_id
= 0;
84 #if defined(MAX_PHYS_SECTOR_SIZE) && MEM == 64
85 /* Hack - what's the deal with 5g? */
88 struct thread_entry
*thread
;
90 volatile unsigned char locked
;
91 IF_COP( struct corelock cl
; )
94 static void ata_lock_init(struct ata_lock
*l
)
96 corelock_init(&l
->cl
);
102 static void ata_lock_lock(struct ata_lock
*l
)
104 struct thread_entry
* const current
=
105 thread_id_entry(THREAD_ID_CURRENT
);
107 if (current
== l
->thread
)
113 corelock_lock(&l
->cl
);
115 IF_PRIO( current
->skip_count
= -1; )
117 while (l
->locked
!= 0)
119 corelock_unlock(&l
->cl
);
121 corelock_lock(&l
->cl
);
126 corelock_unlock(&l
->cl
);
129 static void ata_lock_unlock(struct ata_lock
*l
)
137 corelock_lock(&l
->cl
);
139 IF_PRIO( l
->thread
->skip_count
= 0; )
144 corelock_unlock(&l
->cl
);
147 #define mutex ata_lock
148 #define mutex_init ata_lock_init
149 #define mutex_lock ata_lock_lock
150 #define mutex_unlock ata_lock_unlock
151 #endif /* MAX_PHYS_SECTOR_SIZE */
153 #if defined(HAVE_USBSTACK) && defined(USE_ROCKBOX_USB)
154 #define ALLOW_USB_SPINDOWN
157 static struct mutex ata_mtx SHAREDBSS_ATTR
;
158 static int ata_device
; /* device 0 (master) or 1 (slave) */
160 static int spinup_time
= 0;
161 #if (CONFIG_LED == LED_REAL)
162 static bool ata_led_enabled
= true;
163 static bool ata_led_on
= false;
165 static bool spinup
= false;
166 static bool sleeping
= true;
167 static bool poweroff
= false;
168 static long sleep_timeout
= 5*HZ
;
170 static bool lba48
= false; /* set for 48 bit addressing */
172 static long ata_stack
[(DEFAULT_STACK_SIZE
*3)/sizeof(long)];
173 static const char ata_thread_name
[] = "ata";
174 static struct event_queue ata_queue SHAREDBSS_ATTR
;
175 static bool initialized
= false;
177 static long last_user_activity
= -1;
178 static long last_disk_activity
= -1;
180 static unsigned long total_sectors
;
181 static int multisectors
; /* number of supported multisectors */
182 static unsigned short identify_info
[SECTOR_SIZE
/2];
184 #ifdef MAX_PHYS_SECTOR_SIZE
186 struct sector_cache_entry
{
188 unsigned long sectornum
; /* logical sector */
189 unsigned char data
[MAX_PHYS_SECTOR_SIZE
];
191 /* buffer for reading and writing large physical sectors */
193 static struct sector_cache_entry sector_cache
;
194 static int phys_sector_mult
= 1;
198 static int dma_mode
= 0;
201 static int ata_power_on(void);
202 static int perform_soft_reset(void);
203 static int set_multiple_mode(int sectors
);
204 static int set_features(void);
206 STATICIRAM ICODE_ATTR
int wait_for_bsy(void)
208 long timeout
= current_tick
+ HZ
*30;
212 if (!(ATA_STATUS
& STATUS_BSY
))
214 last_disk_activity
= current_tick
;
216 } while (TIME_BEFORE(current_tick
, timeout
));
218 return 0; /* timeout */
221 STATICIRAM ICODE_ATTR
int wait_for_rdy(void)
228 timeout
= current_tick
+ HZ
*10;
232 if (ATA_ALT_STATUS
& STATUS_RDY
)
234 last_disk_activity
= current_tick
;
236 } while (TIME_BEFORE(current_tick
, timeout
));
238 return 0; /* timeout */
241 STATICIRAM ICODE_ATTR
int wait_for_start_of_transfer(void)
246 return (ATA_ALT_STATUS
& (STATUS_BSY
|STATUS_DRQ
)) == STATUS_DRQ
;
249 STATICIRAM ICODE_ATTR
int wait_for_end_of_transfer(void)
253 return (ATA_ALT_STATUS
& (STATUS_RDY
|STATUS_DRQ
)) == STATUS_RDY
;
256 #if (CONFIG_LED == LED_REAL)
257 /* Conditionally block LED access for the ATA driver, so the LED can be
258 * (mis)used for other purposes */
259 static void ata_led(bool on
)
266 #define ata_led(on) led(on)
269 #ifndef ATA_OPTIMIZED_READING
270 STATICIRAM ICODE_ATTR
void copy_read_sectors(unsigned char* buf
, int wordcount
)
272 unsigned short tmp
= 0;
274 if ( (unsigned long)buf
& 1)
275 { /* not 16-bit aligned, copy byte by byte */
276 unsigned char* bufend
= buf
+ wordcount
*2;
280 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
281 *buf
++ = tmp
& 0xff; /* I assume big endian */
282 *buf
++ = tmp
>> 8; /* and don't use the SWAB16 macro */
287 } while (buf
< bufend
); /* tail loop is faster */
290 { /* 16-bit aligned, can do faster copy */
291 unsigned short* wbuf
= (unsigned short*)buf
;
292 unsigned short* wbufend
= wbuf
+ wordcount
;
296 *wbuf
= swap16(ATA_DATA
);
300 } while (++wbuf
< wbufend
); /* tail loop is faster */
303 #endif /* !ATA_OPTIMIZED_READING */
305 #ifdef MAX_PHYS_SECTOR_SIZE
306 static int _read_sectors(unsigned long start
,
310 int ata_read_sectors(IF_MD2(int drive
,)
325 #ifndef MAX_PHYS_SECTOR_SIZE
326 #ifdef HAVE_MULTIDRIVE
327 (void)drive
; /* unused for now */
329 mutex_lock(&ata_mtx
);
332 if (start
+ incount
> total_sectors
) {
337 last_disk_activity
= current_tick
;
338 spinup_start
= current_tick
;
345 if (ata_power_on()) {
351 if (perform_soft_reset()) {
358 timeout
= current_tick
+ READ_TIMEOUT
;
360 SET_REG(ATA_SELECT
, ata_device
);
370 while (TIME_BEFORE(current_tick
, timeout
)) {
372 last_disk_activity
= current_tick
;
375 /* If DMA is supported and parameters are ok for DMA, use it */
376 if (dma_mode
&& ata_dma_setup(inbuf
, incount
* SECTOR_SIZE
, false))
383 SET_REG(ATA_NSECTOR
, count
>> 8);
384 SET_REG(ATA_NSECTOR
, count
& 0xff);
385 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
386 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
387 SET_REG(ATA_LCYL
, 0); /* 39:32 */
388 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
389 SET_REG(ATA_HCYL
, 0); /* 47:40 */
390 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
391 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
393 SET_REG(ATA_COMMAND
, usedma
? CMD_READ_DMA_EXT
: CMD_READ_MULTIPLE_EXT
);
395 SET_REG(ATA_COMMAND
, CMD_READ_MULTIPLE_EXT
);
401 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
402 SET_REG(ATA_SECTOR
, start
& 0xff);
403 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
404 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
405 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
407 SET_REG(ATA_COMMAND
, usedma
? CMD_READ_DMA
: CMD_READ_MULTIPLE
);
409 SET_REG(ATA_COMMAND
, CMD_READ_MULTIPLE
);
413 /* wait at least 400ns between writing command and reading status */
414 __asm__
volatile ("nop");
415 __asm__
volatile ("nop");
416 __asm__
volatile ("nop");
417 __asm__
volatile ("nop");
418 __asm__
volatile ("nop");
422 if (!ata_dma_finish())
426 perform_soft_reset();
431 spinup_time
= current_tick
- spinup_start
;
438 #endif /* HAVE_ATA_DMA */
445 if (!wait_for_start_of_transfer()) {
446 /* We have timed out waiting for RDY and/or DRQ, possibly
447 because the hard drive is shaking and has problems
448 reading the data. We have two options:
450 2) Perform a soft reset and try again.
452 We choose alternative 2.
454 perform_soft_reset();
460 spinup_time
= current_tick
- spinup_start
;
466 /* read the status register exactly once per loop */
469 if (count
>= multisectors
)
470 sectors
= multisectors
;
474 wordcount
= sectors
* SECTOR_SIZE
/ 2;
476 copy_read_sectors(buf
, wordcount
);
479 "Device errors encountered during READ MULTIPLE commands
480 are posted at the beginning of the block or partial block
481 transfer, but the DRQ bit is still set to one and the data
482 transfer shall take place, including transfer of corrupted
486 if ( status
& (STATUS_BSY
| STATUS_ERR
| STATUS_DF
) ) {
487 perform_soft_reset();
492 buf
+= sectors
* SECTOR_SIZE
; /* Advance one chunk of sectors */
495 last_disk_activity
= current_tick
;
499 if(!ret
&& !wait_for_end_of_transfer()) {
500 perform_soft_reset();
509 #ifndef MAX_PHYS_SECTOR_SIZE
510 mutex_unlock(&ata_mtx
);
516 #ifndef ATA_OPTIMIZED_WRITING
517 STATICIRAM ICODE_ATTR
void copy_write_sectors(const unsigned char* buf
,
520 if ( (unsigned long)buf
& 1)
521 { /* not 16-bit aligned, copy byte by byte */
522 unsigned short tmp
= 0;
523 const unsigned char* bufend
= buf
+ wordcount
*2;
526 #if defined(SWAP_WORDS) || defined(ROCKBOX_LITTLE_ENDIAN)
527 tmp
= (unsigned short) *buf
++;
528 tmp
|= (unsigned short) *buf
++ << 8;
529 SET_16BITREG(ATA_DATA
, tmp
);
531 tmp
= (unsigned short) *buf
++ << 8;
532 tmp
|= (unsigned short) *buf
++;
533 SET_16BITREG(ATA_DATA
, tmp
);
535 } while (buf
< bufend
); /* tail loop is faster */
538 { /* 16-bit aligned, can do faster copy */
539 unsigned short* wbuf
= (unsigned short*)buf
;
540 unsigned short* wbufend
= wbuf
+ wordcount
;
544 SET_16BITREG(ATA_DATA
, swap16(*wbuf
));
546 SET_16BITREG(ATA_DATA
, *wbuf
);
548 } while (++wbuf
< wbufend
); /* tail loop is faster */
551 #endif /* !ATA_OPTIMIZED_WRITING */
553 #ifdef MAX_PHYS_SECTOR_SIZE
554 static int _write_sectors(unsigned long start
,
558 int ata_write_sectors(IF_MD2(int drive
,)
571 #ifndef MAX_PHYS_SECTOR_SIZE
572 #ifdef HAVE_MULTIDRIVE
573 (void)drive
; /* unused for now */
575 mutex_lock(&ata_mtx
);
578 if (start
+ count
> total_sectors
)
579 panicf("Writing past end of disk");
581 last_disk_activity
= current_tick
;
582 spinup_start
= current_tick
;
589 if (ata_power_on()) {
595 if (perform_soft_reset()) {
602 SET_REG(ATA_SELECT
, ata_device
);
610 /* If DMA is supported and parameters are ok for DMA, use it */
611 if (dma_mode
&& ata_dma_setup((void *)buf
, count
* SECTOR_SIZE
, true))
618 SET_REG(ATA_NSECTOR
, count
>> 8);
619 SET_REG(ATA_NSECTOR
, count
& 0xff);
620 SET_REG(ATA_SECTOR
, (start
>> 24) & 0xff); /* 31:24 */
621 SET_REG(ATA_SECTOR
, start
& 0xff); /* 7:0 */
622 SET_REG(ATA_LCYL
, 0); /* 39:32 */
623 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff); /* 15:8 */
624 SET_REG(ATA_HCYL
, 0); /* 47:40 */
625 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff); /* 23:16 */
626 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
628 SET_REG(ATA_COMMAND
, usedma
? CMD_WRITE_DMA_EXT
: CMD_WRITE_SECTORS_EXT
);
630 SET_REG(ATA_COMMAND
, CMD_WRITE_SECTORS_EXT
);
636 SET_REG(ATA_NSECTOR
, count
& 0xff); /* 0 means 256 sectors */
637 SET_REG(ATA_SECTOR
, start
& 0xff);
638 SET_REG(ATA_LCYL
, (start
>> 8) & 0xff);
639 SET_REG(ATA_HCYL
, (start
>> 16) & 0xff);
640 SET_REG(ATA_SELECT
, ((start
>> 24) & 0xf) | SELECT_LBA
| ata_device
);
642 SET_REG(ATA_COMMAND
, usedma
? CMD_WRITE_DMA
: CMD_WRITE_SECTORS
);
644 SET_REG(ATA_COMMAND
, CMD_WRITE_SECTORS
);
650 if (!ata_dma_finish())
653 spinup_time
= current_tick
- spinup_start
;
660 #endif /* HAVE_ATA_DMA */
662 for (i
=0; i
<count
; i
++) {
664 if (!wait_for_start_of_transfer()) {
670 spinup_time
= current_tick
- spinup_start
;
676 copy_write_sectors(buf
, SECTOR_SIZE
/2);
679 /* reading the status register clears the interrupt */
684 last_disk_activity
= current_tick
;
688 if(!ret
&& !wait_for_end_of_transfer()) {
689 DEBUGF("End on transfer failed. -- jyp");
695 #ifndef MAX_PHYS_SECTOR_SIZE
696 mutex_unlock(&ata_mtx
);
702 #ifdef MAX_PHYS_SECTOR_SIZE
703 static int cache_sector(unsigned long sector
)
707 sector
&= ~(phys_sector_mult
- 1);
708 /* round down to physical sector boundary */
710 /* check whether the sector is already cached */
711 if (sector_cache
.inuse
&& (sector_cache
.sectornum
== sector
))
714 /* not found: read the sector */
715 sector_cache
.inuse
= false;
716 rc
= _read_sectors(sector
, phys_sector_mult
, sector_cache
.data
);
719 sector_cache
.sectornum
= sector
;
720 sector_cache
.inuse
= true;
725 static inline int flush_current_sector(void)
727 return _write_sectors(sector_cache
.sectornum
, phys_sector_mult
,
731 int ata_read_sectors(IF_MD2(int drive
,)
739 #ifdef HAVE_MULTIDRIVE
740 (void)drive
; /* unused for now */
742 mutex_lock(&ata_mtx
);
744 offset
= start
& (phys_sector_mult
- 1);
746 if (offset
) /* first partial sector */
748 int partcount
= MIN(incount
, phys_sector_mult
- offset
);
750 rc
= cache_sector(start
);
756 memcpy(inbuf
, sector_cache
.data
+ offset
* SECTOR_SIZE
,
757 partcount
* SECTOR_SIZE
);
760 inbuf
+= partcount
* SECTOR_SIZE
;
761 incount
-= partcount
;
765 offset
= incount
& (phys_sector_mult
- 1);
770 rc
= _read_sectors(start
, incount
, inbuf
);
777 inbuf
+= incount
* SECTOR_SIZE
;
781 rc
= cache_sector(start
);
787 memcpy(inbuf
, sector_cache
.data
, offset
* SECTOR_SIZE
);
792 mutex_unlock(&ata_mtx
);
797 int ata_write_sectors(IF_MD2(int drive
,)
805 #ifdef HAVE_MULTIDRIVE
806 (void)drive
; /* unused for now */
808 mutex_lock(&ata_mtx
);
810 offset
= start
& (phys_sector_mult
- 1);
812 if (offset
) /* first partial sector */
814 int partcount
= MIN(count
, phys_sector_mult
- offset
);
816 rc
= cache_sector(start
);
822 memcpy(sector_cache
.data
+ offset
* SECTOR_SIZE
, buf
,
823 partcount
* SECTOR_SIZE
);
824 rc
= flush_current_sector();
831 buf
+= partcount
* SECTOR_SIZE
;
836 offset
= count
& (phys_sector_mult
- 1);
841 rc
= _write_sectors(start
, count
, buf
);
848 buf
+= count
* SECTOR_SIZE
;
852 rc
= cache_sector(start
);
858 memcpy(sector_cache
.data
, buf
, offset
* SECTOR_SIZE
);
859 rc
= flush_current_sector();
869 mutex_unlock(&ata_mtx
);
873 #endif /* MAX_PHYS_SECTOR_SIZE */
875 static int check_registers(void)
878 if ( ATA_STATUS
& STATUS_BSY
)
881 for (i
= 0; i
<64; i
++) {
882 SET_REG(ATA_NSECTOR
, WRITE_PATTERN1
);
883 SET_REG(ATA_SECTOR
, WRITE_PATTERN2
);
884 SET_REG(ATA_LCYL
, WRITE_PATTERN3
);
885 SET_REG(ATA_HCYL
, WRITE_PATTERN4
);
887 if (((ATA_NSECTOR
& READ_PATTERN1_MASK
) == READ_PATTERN1
) &&
888 ((ATA_SECTOR
& READ_PATTERN2_MASK
) == READ_PATTERN2
) &&
889 ((ATA_LCYL
& READ_PATTERN3_MASK
) == READ_PATTERN3
) &&
890 ((ATA_HCYL
& READ_PATTERN4_MASK
) == READ_PATTERN4
))
896 static int freeze_lock(void)
898 /* does the disk support Security Mode feature set? */
899 if (identify_info
[82] & 2)
901 SET_REG(ATA_SELECT
, ata_device
);
906 SET_REG(ATA_COMMAND
, CMD_SECURITY_FREEZE_LOCK
);
915 void ata_spindown(int seconds
)
917 sleep_timeout
= seconds
* HZ
;
920 bool ata_disk_is_active(void)
925 static int ata_perform_sleep(void)
927 mutex_lock(&ata_mtx
);
929 SET_REG(ATA_SELECT
, ata_device
);
931 if(!wait_for_rdy()) {
932 DEBUGF("ata_perform_sleep() - not RDY\n");
933 mutex_unlock(&ata_mtx
);
937 SET_REG(ATA_COMMAND
, CMD_SLEEP
);
941 DEBUGF("ata_perform_sleep() - CMD failed\n");
942 mutex_unlock(&ata_mtx
);
947 mutex_unlock(&ata_mtx
);
953 queue_post(&ata_queue
, Q_SLEEP
, 0);
956 void ata_sleepnow(void)
958 if (!spinup
&& !sleeping
&& !ata_mtx
.locked
&& initialized
)
960 call_storage_idle_notifys(false);
967 last_user_activity
= current_tick
;
970 static void ata_thread(void)
972 static long last_sleep
= 0;
973 struct queue_event ev
;
974 static long last_seen_mtx_unlock
= 0;
975 #ifdef ALLOW_USB_SPINDOWN
976 static bool usb_mode
= false;
980 queue_wait_w_tmo(&ata_queue
, &ev
, HZ
/2);
984 if (!spinup
&& !sleeping
)
988 if (!last_seen_mtx_unlock
)
989 last_seen_mtx_unlock
= current_tick
;
990 if (TIME_AFTER(current_tick
, last_seen_mtx_unlock
+(HZ
*2)))
992 #ifdef ALLOW_USB_SPINDOWN
996 call_storage_idle_notifys(false);
998 last_seen_mtx_unlock
= 0;
1001 if ( sleep_timeout
&&
1002 TIME_AFTER( current_tick
,
1003 last_user_activity
+ sleep_timeout
) &&
1004 TIME_AFTER( current_tick
,
1005 last_disk_activity
+ sleep_timeout
) )
1007 #ifdef ALLOW_USB_SPINDOWN
1011 call_storage_idle_notifys(true);
1013 ata_perform_sleep();
1014 last_sleep
= current_tick
;
1018 #ifdef HAVE_ATA_POWER_OFF
1019 if ( !spinup
&& sleeping
&& !poweroff
&&
1020 TIME_AFTER( current_tick
, last_sleep
+ ATA_POWER_OFF_TIMEOUT
))
1022 mutex_lock(&ata_mtx
);
1023 ide_power_enable(false);
1025 mutex_unlock(&ata_mtx
);
1031 case SYS_USB_CONNECTED
:
1032 /* Tell the USB thread that we are safe */
1033 DEBUGF("ata_thread got SYS_USB_CONNECTED\n");
1034 #ifdef ALLOW_USB_SPINDOWN
1036 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
1037 /* There is no need to force ATA power on */
1040 mutex_lock(&ata_mtx
);
1047 perform_soft_reset();
1051 mutex_unlock(&ata_mtx
);
1054 /* Wait until the USB cable is extracted again */
1055 usb_acknowledge(SYS_USB_CONNECTED_ACK
);
1056 usb_wait_for_disconnect(&ata_queue
);
1060 #ifdef ALLOW_USB_SPINDOWN
1061 case SYS_USB_DISCONNECTED
:
1062 /* Tell the USB thread that we are ready again */
1063 DEBUGF("ata_thread got SYS_USB_DISCONNECTED\n");
1064 usb_acknowledge(SYS_USB_DISCONNECTED_ACK
);
1068 #endif /* USB_NONE */
1071 #ifdef ALLOW_USB_SPINDOWN
1075 call_storage_idle_notifys(false);
1077 last_disk_activity
= current_tick
- sleep_timeout
+ (HZ
/2);
1080 #ifdef ATA_DRIVER_CLOSE
1088 /* Hardware reset protocol as specified in chapter 9.1, ATA spec draft v5 */
1089 static int ata_hard_reset(void)
1093 mutex_lock(&ata_mtx
);
1098 SET_REG(ATA_SELECT
, ata_device
); /* select the right device */
1099 ret
= wait_for_bsy();
1101 /* Massage the return code so it is 0 on success and -1 on failure */
1104 mutex_unlock(&ata_mtx
);
1109 static int perform_soft_reset(void)
1111 /* If this code is allowed to run on a Nano, the next reads from the flash will
1112 * time out, so we disable it. It shouldn't be necessary anyway, since the
1113 * ATA -> Flash interface automatically sleeps almost immediately after the
1119 SET_REG(ATA_SELECT
, SELECT_LBA
| ata_device
);
1120 SET_REG(ATA_CONTROL
, CONTROL_nIEN
|CONTROL_SRST
);
1121 sleep(1); /* >= 5us */
1124 /* DMA requires INTRQ be enabled */
1125 SET_REG(ATA_CONTROL
, 0);
1127 SET_REG(ATA_CONTROL
, CONTROL_nIEN
);
1129 sleep(1); /* >2ms */
1131 /* This little sucker can take up to 30 seconds */
1135 ret
= wait_for_rdy();
1136 } while(!ret
&& retry_count
--);
1144 if (set_multiple_mode(multisectors
))
1153 int ata_soft_reset(void)
1157 mutex_lock(&ata_mtx
);
1159 ret
= perform_soft_reset();
1161 mutex_unlock(&ata_mtx
);
1165 static int ata_power_on(void)
1169 ide_power_enable(true);
1170 sleep(HZ
/4); /* allow voltage to build up */
1172 /* Accessing the PP IDE controller too early after powering up the disk
1173 * makes the core hang for a short time, causing an audio dropout. This
1174 * also depends on the disk; iPod Mini G2 needs at least HZ/5 to get rid
1175 * of the dropout. Since this time isn't additive (the wait_for_bsy() in
1176 * ata_hard_reset() will shortened by the same amount), it's a good idea
1177 * to do this on all HDD based targets. */
1179 if( ata_hard_reset() )
1182 rc
= set_features();
1186 if (set_multiple_mode(multisectors
))
1195 static int master_slave_detect(void)
1198 SET_REG(ATA_SELECT
, 0);
1199 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1201 DEBUGF("Found master harddisk\n");
1205 SET_REG(ATA_SELECT
, SELECT_DEVICE1
);
1206 if ( ATA_STATUS
& (STATUS_RDY
|STATUS_BSY
) ) {
1207 ata_device
= SELECT_DEVICE1
;
1208 DEBUGF("Found slave harddisk\n");
1216 static int identify(void)
1220 SET_REG(ATA_SELECT
, ata_device
);
1222 if(!wait_for_rdy()) {
1223 DEBUGF("identify() - not RDY\n");
1226 SET_REG(ATA_COMMAND
, CMD_IDENTIFY
);
1228 if (!wait_for_start_of_transfer())
1230 DEBUGF("identify() - CMD failed\n");
1234 for (i
=0; i
<SECTOR_SIZE
/2; i
++) {
1235 /* the IDENTIFY words are already swapped, so we need to treat
1236 this info differently that normal sector data */
1237 #if defined(ROCKBOX_BIG_ENDIAN) && !defined(SWAP_WORDS)
1238 identify_info
[i
] = swap16(ATA_DATA
);
1240 identify_info
[i
] = ATA_DATA
;
1247 static int set_multiple_mode(int sectors
)
1249 SET_REG(ATA_SELECT
, ata_device
);
1251 if(!wait_for_rdy()) {
1252 DEBUGF("set_multiple_mode() - not RDY\n");
1256 SET_REG(ATA_NSECTOR
, sectors
);
1257 SET_REG(ATA_COMMAND
, CMD_SET_MULTIPLE_MODE
);
1259 if (!wait_for_rdy())
1261 DEBUGF("set_multiple_mode() - CMD failed\n");
1269 static int get_best_mode(unsigned short identword
, int max
, int modetype
)
1271 unsigned short testbit
= BIT_N(max
);
1274 if (identword
& testbit
)
1275 return max
| modetype
;
1284 static int set_features(void)
1287 unsigned char id_word
;
1288 unsigned char id_bit
;
1289 unsigned char subcommand
;
1290 unsigned char parameter
;
1292 { 83, 14, 0x03, 0 }, /* force PIO mode */
1293 { 83, 3, 0x05, 0x80 }, /* adv. power management: lowest w/o standby */
1294 { 83, 9, 0x42, 0x80 }, /* acoustic management: lowest noise */
1295 { 82, 6, 0xaa, 0 }, /* enable read look-ahead */
1297 { 0, 0, 0x03, 0 }, /* DMA mode */
1303 /* Find out the highest supported PIO mode */
1304 if(identify_info
[64] & 2)
1307 if(identify_info
[64] & 1)
1310 /* Update the table: set highest supported pio mode that we also support */
1311 features
[0].parameter
= 8 + pio_mode
;
1314 if (identify_info
[53] & (1<<2))
1315 /* Ultra DMA mode info present, find a mode */
1316 dma_mode
= get_best_mode(identify_info
[88], ATA_MAX_UDMA
, 0x40);
1319 /* No UDMA mode found, try to find a multi-word DMA mode */
1320 dma_mode
= get_best_mode(identify_info
[63], ATA_MAX_MWDMA
, 0x20);
1321 features
[4].id_word
= 63;
1324 features
[4].id_word
= 88;
1326 features
[4].id_bit
= dma_mode
& 7;
1327 features
[4].parameter
= dma_mode
;
1328 #endif /* HAVE_ATA_DMA */
1330 SET_REG(ATA_SELECT
, ata_device
);
1332 if (!wait_for_rdy()) {
1333 DEBUGF("set_features() - not RDY\n");
1337 for (i
=0; i
< (int)(sizeof(features
)/sizeof(features
[0])); i
++) {
1338 if (identify_info
[features
[i
].id_word
] & BIT_N(features
[i
].id_bit
)) {
1339 SET_REG(ATA_FEATURE
, features
[i
].subcommand
);
1340 SET_REG(ATA_NSECTOR
, features
[i
].parameter
);
1341 SET_REG(ATA_COMMAND
, CMD_SET_FEATURES
);
1343 if (!wait_for_rdy()) {
1344 DEBUGF("set_features() - CMD failed\n");
1348 if((ATA_ALT_STATUS
& STATUS_ERR
) && (i
!= 1)) {
1349 /* some CF cards don't like advanced powermanagement
1350 even if they mark it as supported - go figure... */
1351 if(ATA_ERROR
& ERROR_ABRT
) {
1358 #ifdef ATA_SET_DEVICE_FEATURES
1359 ata_set_pio_timings(pio_mode
);
1363 ata_dma_set_mode(dma_mode
);
1369 unsigned short* ata_get_identify(void)
1371 return identify_info
;
1374 static int init_and_check(bool hard_reset
)
1380 /* This should reset both master and slave, we don't yet know what's in */
1382 if (ata_hard_reset())
1386 rc
= master_slave_detect();
1390 /* symptom fix: else check_registers() below may fail */
1391 if (hard_reset
&& !wait_for_bsy())
1394 rc
= check_registers();
1406 if ( !initialized
) {
1407 mutex_init(&ata_mtx
);
1408 queue_init(&ata_queue
, true);
1411 mutex_lock(&ata_mtx
);
1413 /* must be called before ata_device_init() */
1414 coldstart
= ata_is_coldstart();
1419 #ifdef MAX_PHYS_SECTOR_SIZE
1420 memset(§or_cache
, 0, sizeof(sector_cache
));
1423 if ( !initialized
) {
1424 /* First call won't have multiple thread contention - this
1425 * may return at any point without having to unlock */
1426 mutex_unlock(&ata_mtx
);
1428 if (!ide_powered()) /* somebody has switched it off */
1430 ide_power_enable(true);
1431 sleep(HZ
/4); /* allow voltage to build up */
1435 /* DMA requires INTRQ be enabled */
1436 SET_REG(ATA_CONTROL
, 0);
1439 /* first try, hard reset at cold start only */
1440 rc
= init_and_check(coldstart
);
1443 { /* failed? -> second try, always with hard reset */
1444 DEBUGF("ata: init failed, retrying...\n");
1445 rc
= init_and_check(true);
1455 multisectors
= identify_info
[47] & 0xff;
1456 if (multisectors
== 0) /* Invalid multisector info, try with 16 */
1459 DEBUGF("ata: %d sectors per ata request\n",multisectors
);
1461 #ifdef MAX_PHYS_SECTOR_SIZE
1462 /* Find out the physical sector size */
1463 if((identify_info
[106] & 0xe000) == 0x6000)
1464 phys_sector_mult
= BIT_N(identify_info
[106] & 0x000f);
1466 phys_sector_mult
= 1;
1468 DEBUGF("ata: %d logical sectors per phys sector", phys_sector_mult
);
1470 if (phys_sector_mult
> (MAX_PHYS_SECTOR_SIZE
/SECTOR_SIZE
))
1471 panicf("Unsupported physical sector size: %d",
1472 phys_sector_mult
* SECTOR_SIZE
);
1475 total_sectors
= identify_info
[60] | (identify_info
[61] << 16);
1478 if (identify_info
[83] & 0x0400 /* 48 bit address support */
1479 && total_sectors
== 0x0FFFFFFF) /* and disk size >= 128 GiB */
1480 { /* (needs BigLBA addressing) */
1481 if (identify_info
[102] || identify_info
[103])
1482 panicf("Unsupported disk size: >= 2^32 sectors");
1484 total_sectors
= identify_info
[100] | (identify_info
[101] << 16);
1485 lba48
= true; /* use BigLBA */
1493 rc
= set_features();
1497 mutex_lock(&ata_mtx
); /* Balance unlock below */
1499 last_disk_activity
= current_tick
;
1500 #ifdef ATA_DRIVER_CLOSE
1503 create_thread(ata_thread
, ata_stack
,
1504 sizeof(ata_stack
), 0, ata_thread_name
1505 IF_PRIO(, PRIORITY_USER_INTERFACE
)
1510 rc
= set_multiple_mode(multisectors
);
1514 mutex_unlock(&ata_mtx
);
1518 #ifdef ATA_DRIVER_CLOSE
1519 void ata_close(void)
1521 unsigned int thread_id
= ata_thread_id
;
1528 queue_post(&ata_queue
, Q_CLOSE
, 0);
1529 thread_wait(thread_id
);
1531 #endif /* ATA_DRIVER_CLOSE */
1533 #if (CONFIG_LED == LED_REAL)
1534 void ata_set_led_enabled(bool enabled
)
1536 ata_led_enabled
= enabled
;
1537 if (ata_led_enabled
)
1544 long ata_last_disk_activity(void)
1546 return last_disk_activity
;
1549 int ata_spinup_time(void)
1554 #ifdef STORAGE_GET_INFO
1555 void ata_get_info(IF_MD2(int drive
,)struct storage_info
*info
)
1557 unsigned short *src
,*dest
;
1558 static char vendor
[8];
1559 static char product
[16];
1560 static char revision
[4];
1561 #ifdef HAVE_MULTIDRIVE
1562 (void)drive
; /* unused for now */
1565 info
->sector_size
= SECTOR_SIZE
;
1566 info
->num_sectors
= total_sectors
;
1568 src
= (unsigned short*)&identify_info
[27];
1569 dest
= (unsigned short*)vendor
;
1571 dest
[i
] = htobe16(src
[i
]);
1572 info
->vendor
=vendor
;
1574 src
= (unsigned short*)&identify_info
[31];
1575 dest
= (unsigned short*)product
;
1577 dest
[i
] = htobe16(src
[i
]);
1578 info
->product
=product
;
1580 src
= (unsigned short*)&identify_info
[23];
1581 dest
= (unsigned short*)revision
;
1583 dest
[i
] = htobe16(src
[i
]);
1584 info
->revision
=revision
;
1589 /* Returns last DMA mode as set by set_features() */
1590 int ata_get_dma_mode(void)
1595 /* Needed to allow updating while waiting for DMA to complete */
1596 void ata_keep_active(void)
1598 last_disk_activity
= current_tick
;
1602 #ifdef CONFIG_STORAGE_MULTI
1603 int ata_num_drives(int first_drive
)
1605 /* We don't care which logical drive number(s) we have been assigned */