x86/PCI: use host bridge _CRS info on ASUS M2V-MX SE
[linux-btrfs-devel.git] / drivers / staging / spectra / lld_nand.c
blob60a14ff26c7f834eb7d2938fa0b70d3503b69976
1 /*
2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include "lld.h"
21 #include "lld_nand.h"
22 #include "lld_cdma.h"
24 #include "spectraswconfig.h"
25 #include "flash.h"
26 #include "ffsdefs.h"
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/wait.h>
31 #include <linux/mutex.h>
33 #include "nand_regs.h"
35 #define SPECTRA_NAND_NAME "nd"
37 #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
38 #define MAX_PAGES_PER_RW 128
40 #define INT_IDLE_STATE 0
41 #define INT_READ_PAGE_MAIN 0x01
42 #define INT_WRITE_PAGE_MAIN 0x02
43 #define INT_PIPELINE_READ_AHEAD 0x04
44 #define INT_PIPELINE_WRITE_AHEAD 0x08
45 #define INT_MULTI_PLANE_READ 0x10
46 #define INT_MULTI_PLANE_WRITE 0x11
48 static u32 enable_ecc;
50 struct mrst_nand_info info;
52 int totalUsedBanks;
53 u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
55 void __iomem *FlashReg;
56 void __iomem *FlashMem;
58 u16 conf_parameters[] = {
59 0x0000,
60 0x0000,
61 0x01F4,
62 0x01F4,
63 0x01F4,
64 0x01F4,
65 0x0000,
66 0x0000,
67 0x0001,
68 0x0000,
69 0x0000,
70 0x0000,
71 0x0000,
72 0x0040,
73 0x0001,
74 0x000A,
75 0x000A,
76 0x000A,
77 0x0000,
78 0x0000,
79 0x0005,
80 0x0012,
81 0x000C
84 u16 NAND_Get_Bad_Block(u32 block)
86 u32 status = PASS;
87 u32 flag_bytes = 0;
88 u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
89 u32 page, i;
90 u8 *pReadSpareBuf = buf_get_bad_block;
92 if (enable_ecc)
93 flag_bytes = DeviceInfo.wNumPageSpareFlag;
95 for (page = 0; page < 2; page++) {
96 status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
97 if (status != PASS)
98 return READ_ERROR;
99 for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
100 if (pReadSpareBuf[i] != 0xff)
101 return DEFECTIVE_BLOCK;
104 for (page = 1; page < 3; page++) {
105 status = NAND_Read_Page_Spare(pReadSpareBuf, block,
106 DeviceInfo.wPagesPerBlock - page , 1);
107 if (status != PASS)
108 return READ_ERROR;
109 for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
110 if (pReadSpareBuf[i] != 0xff)
111 return DEFECTIVE_BLOCK;
114 return GOOD_BLOCK;
118 u16 NAND_Flash_Reset(void)
120 u32 i;
121 u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
122 INTR_STATUS1__RST_COMP,
123 INTR_STATUS2__RST_COMP,
124 INTR_STATUS3__RST_COMP};
125 u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
126 INTR_STATUS1__TIME_OUT,
127 INTR_STATUS2__TIME_OUT,
128 INTR_STATUS3__TIME_OUT};
129 u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
130 INTR_STATUS2, INTR_STATUS3};
131 u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
132 DEVICE_RESET__BANK1,
133 DEVICE_RESET__BANK2,
134 DEVICE_RESET__BANK3};
136 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
137 __FILE__, __LINE__, __func__);
139 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
140 iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
141 FlashReg + intr_status[i]);
143 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
144 iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
145 while (!(ioread32(FlashReg + intr_status[i]) &
146 (intr_status_rst_comp[i] | intr_status_time_out[i])))
148 if (ioread32(FlashReg + intr_status[i]) &
149 intr_status_time_out[i])
150 nand_dbg_print(NAND_DBG_WARN,
151 "NAND Reset operation timed out on bank %d\n", i);
154 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
155 iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
156 FlashReg + intr_status[i]);
158 return PASS;
161 static void NAND_ONFi_Timing_Mode(u16 mode)
163 u16 Trea[6] = {40, 30, 25, 20, 20, 16};
164 u16 Trp[6] = {50, 25, 17, 15, 12, 10};
165 u16 Treh[6] = {30, 15, 15, 10, 10, 7};
166 u16 Trc[6] = {100, 50, 35, 30, 25, 20};
167 u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
168 u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
169 u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
170 u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
171 u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
172 u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
173 u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
174 u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
176 u16 TclsRising = 1;
177 u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
178 u16 dv_window = 0;
179 u16 en_lo, en_hi;
180 u16 acc_clks;
181 u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
183 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
184 __FILE__, __LINE__, __func__);
186 en_lo = CEIL_DIV(Trp[mode], CLK_X);
187 en_hi = CEIL_DIV(Treh[mode], CLK_X);
189 #if ONFI_BLOOM_TIME
190 if ((en_hi * CLK_X) < (Treh[mode] + 2))
191 en_hi++;
192 #endif
194 if ((en_lo + en_hi) * CLK_X < Trc[mode])
195 en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
197 if ((en_lo + en_hi) < CLK_MULTI)
198 en_lo += CLK_MULTI - en_lo - en_hi;
200 while (dv_window < 8) {
201 data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
203 data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
205 data_invalid =
206 data_invalid_rhoh <
207 data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
209 dv_window = data_invalid - Trea[mode];
211 if (dv_window < 8)
212 en_lo++;
215 acc_clks = CEIL_DIV(Trea[mode], CLK_X);
217 while (((acc_clks * CLK_X) - Trea[mode]) < 3)
218 acc_clks++;
220 if ((data_invalid - acc_clks * CLK_X) < 2)
221 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
222 __FILE__, __LINE__);
224 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
225 re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
226 re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
227 we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
228 cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
229 if (!TclsRising)
230 cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
231 if (cs_cnt == 0)
232 cs_cnt = 1;
234 if (Tcea[mode]) {
235 while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
236 cs_cnt++;
239 #if MODE5_WORKAROUND
240 if (mode == 5)
241 acc_clks = 5;
242 #endif
244 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
245 if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
246 (ioread32(FlashReg + DEVICE_ID) == 0x88))
247 acc_clks = 6;
249 iowrite32(acc_clks, FlashReg + ACC_CLKS);
250 iowrite32(re_2_we, FlashReg + RE_2_WE);
251 iowrite32(re_2_re, FlashReg + RE_2_RE);
252 iowrite32(we_2_re, FlashReg + WE_2_RE);
253 iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
254 iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
255 iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
256 iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
259 static void index_addr(u32 address, u32 data)
261 iowrite32(address, FlashMem);
262 iowrite32(data, FlashMem + 0x10);
265 static void index_addr_read_data(u32 address, u32 *pdata)
267 iowrite32(address, FlashMem);
268 *pdata = ioread32(FlashMem + 0x10);
271 static void set_ecc_config(void)
273 #if SUPPORT_8BITECC
274 if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
275 (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
276 iowrite32(8, FlashReg + ECC_CORRECTION);
277 #endif
279 if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
280 == 1) {
281 DeviceInfo.wECCBytesPerSector = 4;
282 DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
283 DeviceInfo.wNumPageSpareFlag =
284 DeviceInfo.wPageSpareSize -
285 DeviceInfo.wPageDataSize /
286 (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
287 DeviceInfo.wECCBytesPerSector
288 - DeviceInfo.wSpareSkipBytes;
289 } else {
290 DeviceInfo.wECCBytesPerSector =
291 (ioread32(FlashReg + ECC_CORRECTION) &
292 ECC_CORRECTION__VALUE) * 13 / 8;
293 if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
294 DeviceInfo.wECCBytesPerSector += 2;
295 else
296 DeviceInfo.wECCBytesPerSector += 1;
298 DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
299 DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
300 DeviceInfo.wPageDataSize /
301 (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
302 DeviceInfo.wECCBytesPerSector
303 - DeviceInfo.wSpareSkipBytes;
307 static u16 get_onfi_nand_para(void)
309 int i;
310 u16 blks_lun_l, blks_lun_h, n_of_luns;
311 u32 blockperlun, id;
313 iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
315 while (!((ioread32(FlashReg + INTR_STATUS0) &
316 INTR_STATUS0__RST_COMP) |
317 (ioread32(FlashReg + INTR_STATUS0) &
318 INTR_STATUS0__TIME_OUT)))
321 if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
322 iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
323 while (!((ioread32(FlashReg + INTR_STATUS1) &
324 INTR_STATUS1__RST_COMP) |
325 (ioread32(FlashReg + INTR_STATUS1) &
326 INTR_STATUS1__TIME_OUT)))
329 if (ioread32(FlashReg + INTR_STATUS1) &
330 INTR_STATUS1__RST_COMP) {
331 iowrite32(DEVICE_RESET__BANK2,
332 FlashReg + DEVICE_RESET);
333 while (!((ioread32(FlashReg + INTR_STATUS2) &
334 INTR_STATUS2__RST_COMP) |
335 (ioread32(FlashReg + INTR_STATUS2) &
336 INTR_STATUS2__TIME_OUT)))
339 if (ioread32(FlashReg + INTR_STATUS2) &
340 INTR_STATUS2__RST_COMP) {
341 iowrite32(DEVICE_RESET__BANK3,
342 FlashReg + DEVICE_RESET);
343 while (!((ioread32(FlashReg + INTR_STATUS3) &
344 INTR_STATUS3__RST_COMP) |
345 (ioread32(FlashReg + INTR_STATUS3) &
346 INTR_STATUS3__TIME_OUT)))
348 } else {
349 printk(KERN_ERR "Getting a time out for bank 2!\n");
351 } else {
352 printk(KERN_ERR "Getting a time out for bank 1!\n");
356 iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
357 iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
358 iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
359 iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
361 DeviceInfo.wONFIDevFeatures =
362 ioread32(FlashReg + ONFI_DEVICE_FEATURES);
363 DeviceInfo.wONFIOptCommands =
364 ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
365 DeviceInfo.wONFITimingMode =
366 ioread32(FlashReg + ONFI_TIMING_MODE);
367 DeviceInfo.wONFIPgmCacheTimingMode =
368 ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
370 n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
371 ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
372 blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
373 blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
375 blockperlun = (blks_lun_h << 16) | blks_lun_l;
377 DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
379 if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
380 ONFI_TIMING_MODE__VALUE))
381 return FAIL;
383 for (i = 5; i > 0; i--) {
384 if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
385 break;
388 NAND_ONFi_Timing_Mode(i);
390 index_addr(MODE_11 | 0, 0x90);
391 index_addr(MODE_11 | 1, 0);
393 for (i = 0; i < 3; i++)
394 index_addr_read_data(MODE_11 | 2, &id);
396 nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
398 DeviceInfo.MLCDevice = id & 0x0C;
400 /* By now, all the ONFI devices we know support the page cache */
401 /* rw feature. So here we enable the pipeline_rw_ahead feature */
402 /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
403 /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
405 return PASS;
408 static void get_samsung_nand_para(void)
410 u8 no_of_planes;
411 u32 blk_size;
412 u64 plane_size, capacity;
413 u32 id_bytes[5];
414 int i;
416 index_addr((u32)(MODE_11 | 0), 0x90);
417 index_addr((u32)(MODE_11 | 1), 0);
418 for (i = 0; i < 5; i++)
419 index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
421 nand_dbg_print(NAND_DBG_DEBUG,
422 "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
423 id_bytes[0], id_bytes[1], id_bytes[2],
424 id_bytes[3], id_bytes[4]);
426 if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
427 /* Set timing register values according to datasheet */
428 iowrite32(5, FlashReg + ACC_CLKS);
429 iowrite32(20, FlashReg + RE_2_WE);
430 iowrite32(12, FlashReg + WE_2_RE);
431 iowrite32(14, FlashReg + ADDR_2_DATA);
432 iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
433 iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
434 iowrite32(2, FlashReg + CS_SETUP_CNT);
437 no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
438 plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
439 blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
440 capacity = (u64)128 * plane_size * no_of_planes;
442 DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
445 static void get_toshiba_nand_para(void)
447 void __iomem *scratch_reg;
448 u32 tmp;
450 /* Workaround to fix a controller bug which reports a wrong */
451 /* spare area size for some kind of Toshiba NAND device */
452 if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
453 (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
454 iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
455 tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
456 ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
457 iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
458 #if SUPPORT_15BITECC
459 iowrite32(15, FlashReg + ECC_CORRECTION);
460 #elif SUPPORT_8BITECC
461 iowrite32(8, FlashReg + ECC_CORRECTION);
462 #endif
465 /* As Toshiba NAND can not provide it's block number, */
466 /* so here we need user to provide the correct block */
467 /* number in a scratch register before the Linux NAND */
468 /* driver is loaded. If no valid value found in the scratch */
469 /* register, then we use default block number value */
470 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
471 if (!scratch_reg) {
472 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
473 __FILE__, __LINE__);
474 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
475 } else {
476 nand_dbg_print(NAND_DBG_WARN,
477 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
478 DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
479 if (DeviceInfo.wTotalBlocks < 512)
480 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
481 iounmap(scratch_reg);
485 static void get_hynix_nand_para(void)
487 void __iomem *scratch_reg;
488 u32 main_size, spare_size;
490 switch (DeviceInfo.wDeviceID) {
491 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
492 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
493 iowrite32(128, FlashReg + PAGES_PER_BLOCK);
494 iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
495 iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
496 main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
497 spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
498 iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
499 iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
500 iowrite32(0, FlashReg + DEVICE_WIDTH);
501 #if SUPPORT_15BITECC
502 iowrite32(15, FlashReg + ECC_CORRECTION);
503 #elif SUPPORT_8BITECC
504 iowrite32(8, FlashReg + ECC_CORRECTION);
505 #endif
506 DeviceInfo.MLCDevice = 1;
507 break;
508 default:
509 nand_dbg_print(NAND_DBG_WARN,
510 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
511 "Will use default parameter values instead.\n",
512 DeviceInfo.wDeviceID);
515 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
516 if (!scratch_reg) {
517 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
518 __FILE__, __LINE__);
519 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
520 } else {
521 nand_dbg_print(NAND_DBG_WARN,
522 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
523 DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
524 if (DeviceInfo.wTotalBlocks < 512)
525 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
526 iounmap(scratch_reg);
530 static void find_valid_banks(void)
532 u32 id[LLD_MAX_FLASH_BANKS];
533 int i;
535 totalUsedBanks = 0;
536 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
537 index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
538 index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
539 index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
541 nand_dbg_print(NAND_DBG_DEBUG,
542 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
544 if (i == 0) {
545 if (id[i] & 0x0ff)
546 GLOB_valid_banks[i] = 1;
547 } else {
548 if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
549 GLOB_valid_banks[i] = 1;
552 totalUsedBanks += GLOB_valid_banks[i];
555 nand_dbg_print(NAND_DBG_DEBUG,
556 "totalUsedBanks: %d\n", totalUsedBanks);
559 static void detect_partition_feature(void)
561 if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
562 if ((ioread32(FlashReg + PERM_SRC_ID_1) &
563 PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
564 DeviceInfo.wSpectraStartBlock =
565 ((ioread32(FlashReg + MIN_MAX_BANK_1) &
566 MIN_MAX_BANK_1__MIN_VALUE) *
567 DeviceInfo.wTotalBlocks)
569 (ioread32(FlashReg + MIN_BLK_ADDR_1) &
570 MIN_BLK_ADDR_1__VALUE);
572 DeviceInfo.wSpectraEndBlock =
573 (((ioread32(FlashReg + MIN_MAX_BANK_1) &
574 MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
575 DeviceInfo.wTotalBlocks)
577 (ioread32(FlashReg + MAX_BLK_ADDR_1) &
578 MAX_BLK_ADDR_1__VALUE);
580 DeviceInfo.wTotalBlocks *= totalUsedBanks;
582 if (DeviceInfo.wSpectraEndBlock >=
583 DeviceInfo.wTotalBlocks) {
584 DeviceInfo.wSpectraEndBlock =
585 DeviceInfo.wTotalBlocks - 1;
588 DeviceInfo.wDataBlockNum =
589 DeviceInfo.wSpectraEndBlock -
590 DeviceInfo.wSpectraStartBlock + 1;
591 } else {
592 DeviceInfo.wTotalBlocks *= totalUsedBanks;
593 DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
594 DeviceInfo.wSpectraEndBlock =
595 DeviceInfo.wTotalBlocks - 1;
596 DeviceInfo.wDataBlockNum =
597 DeviceInfo.wSpectraEndBlock -
598 DeviceInfo.wSpectraStartBlock + 1;
600 } else {
601 DeviceInfo.wTotalBlocks *= totalUsedBanks;
602 DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
603 DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
604 DeviceInfo.wDataBlockNum =
605 DeviceInfo.wSpectraEndBlock -
606 DeviceInfo.wSpectraStartBlock + 1;
610 static void dump_device_info(void)
612 nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
613 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
614 DeviceInfo.wDeviceMaker);
615 nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
616 DeviceInfo.wDeviceID);
617 nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
618 DeviceInfo.wDeviceType);
619 nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
620 DeviceInfo.wSpectraStartBlock);
621 nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
622 DeviceInfo.wSpectraEndBlock);
623 nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
624 DeviceInfo.wTotalBlocks);
625 nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
626 DeviceInfo.wPagesPerBlock);
627 nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
628 DeviceInfo.wPageSize);
629 nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
630 DeviceInfo.wPageDataSize);
631 nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
632 DeviceInfo.wPageSpareSize);
633 nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
634 DeviceInfo.wNumPageSpareFlag);
635 nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
636 DeviceInfo.wECCBytesPerSector);
637 nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
638 DeviceInfo.wBlockSize);
639 nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
640 DeviceInfo.wBlockDataSize);
641 nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
642 DeviceInfo.wDataBlockNum);
643 nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
644 DeviceInfo.bPlaneNum);
645 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
646 DeviceInfo.wDeviceMainAreaSize);
647 nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
648 DeviceInfo.wDeviceSpareAreaSize);
649 nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
650 DeviceInfo.wDevicesConnected);
651 nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
652 DeviceInfo.wDeviceWidth);
653 nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
654 DeviceInfo.wHWRevision);
655 nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
656 DeviceInfo.wHWFeatures);
657 nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
658 DeviceInfo.wONFIDevFeatures);
659 nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
660 DeviceInfo.wONFIOptCommands);
661 nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
662 DeviceInfo.wONFITimingMode);
663 nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
664 DeviceInfo.wONFIPgmCacheTimingMode);
665 nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
666 DeviceInfo.MLCDevice ? "Yes" : "No");
667 nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
668 DeviceInfo.wSpareSkipBytes);
669 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
670 DeviceInfo.nBitsInPageNumber);
671 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
672 DeviceInfo.nBitsInPageDataSize);
673 nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
674 DeviceInfo.nBitsInBlockDataSize);
677 u16 NAND_Read_Device_ID(void)
679 u16 status = PASS;
680 u8 no_of_planes;
682 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
683 __FILE__, __LINE__, __func__);
685 iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
686 iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
687 DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
688 DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
689 DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
691 if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
692 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
693 if (FAIL == get_onfi_nand_para())
694 return FAIL;
695 } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
696 get_samsung_nand_para();
697 } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
698 get_toshiba_nand_para();
699 } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
700 get_hynix_nand_para();
701 } else {
702 DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
705 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
706 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
707 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
708 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
709 ioread32(FlashReg + ACC_CLKS),
710 ioread32(FlashReg + RE_2_WE),
711 ioread32(FlashReg + WE_2_RE),
712 ioread32(FlashReg + ADDR_2_DATA),
713 ioread32(FlashReg + RDWR_EN_LO_CNT),
714 ioread32(FlashReg + RDWR_EN_HI_CNT),
715 ioread32(FlashReg + CS_SETUP_CNT));
717 DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
718 DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
720 DeviceInfo.wDeviceMainAreaSize =
721 ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
722 DeviceInfo.wDeviceSpareAreaSize =
723 ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
725 DeviceInfo.wPageDataSize =
726 ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
728 /* Note: When using the Micon 4K NAND device, the controller will report
729 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
730 * And if force set it to 218 bytes, the controller can not work
731 * correctly. So just let it be. But keep in mind that this bug may
732 * cause
733 * other problems in future. - Yunpeng 2008-10-10
735 DeviceInfo.wPageSpareSize =
736 ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
738 DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
740 DeviceInfo.wPageSize =
741 DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
742 DeviceInfo.wBlockSize =
743 DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
744 DeviceInfo.wBlockDataSize =
745 DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
747 DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
748 DeviceInfo.wDeviceType =
749 ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
751 DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
753 DeviceInfo.wSpareSkipBytes =
754 ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
755 DeviceInfo.wDevicesConnected;
757 DeviceInfo.nBitsInPageNumber =
758 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
759 DeviceInfo.nBitsInPageDataSize =
760 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
761 DeviceInfo.nBitsInBlockDataSize =
762 (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
764 set_ecc_config();
766 no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
767 NUMBER_OF_PLANES__VALUE;
769 switch (no_of_planes) {
770 case 0:
771 case 1:
772 case 3:
773 case 7:
774 DeviceInfo.bPlaneNum = no_of_planes + 1;
775 break;
776 default:
777 status = FAIL;
778 break;
781 find_valid_banks();
783 detect_partition_feature();
785 dump_device_info();
787 return status;
790 u16 NAND_UnlockArrayAll(void)
792 u64 start_addr, end_addr;
794 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
795 __FILE__, __LINE__, __func__);
797 start_addr = 0;
798 end_addr = ((u64)DeviceInfo.wBlockSize *
799 (DeviceInfo.wTotalBlocks - 1)) >>
800 DeviceInfo.nBitsInPageDataSize;
802 index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
803 index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
805 return PASS;
808 void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
810 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
811 __FILE__, __LINE__, __func__);
813 if (INT_ENABLE)
814 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
815 else
816 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
819 u16 NAND_Erase_Block(u32 block)
821 u16 status = PASS;
822 u64 flash_add;
823 u16 flash_bank;
824 u32 intr_status = 0;
825 u32 intr_status_addresses[4] = {INTR_STATUS0,
826 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
828 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
829 __FILE__, __LINE__, __func__);
831 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
832 * DeviceInfo.wBlockDataSize;
834 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
836 if (block >= DeviceInfo.wTotalBlocks)
837 status = FAIL;
839 if (status == PASS) {
840 intr_status = intr_status_addresses[flash_bank];
842 iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
843 FlashReg + intr_status);
845 index_addr((u32)(MODE_10 | (flash_bank << 24) |
846 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
848 while (!(ioread32(FlashReg + intr_status) &
849 (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
852 if (ioread32(FlashReg + intr_status) &
853 INTR_STATUS0__ERASE_FAIL)
854 status = FAIL;
856 iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
857 FlashReg + intr_status);
860 return status;
863 static u32 Boundary_Check_Block_Page(u32 block, u16 page,
864 u16 page_count)
866 u32 status = PASS;
868 if (block >= DeviceInfo.wTotalBlocks)
869 status = FAIL;
871 if (page + page_count > DeviceInfo.wPagesPerBlock)
872 status = FAIL;
874 return status;
877 u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
878 u16 page_count)
880 u32 status = PASS;
881 u32 i;
882 u64 flash_add;
883 u32 PageSpareSize = DeviceInfo.wPageSpareSize;
884 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
885 u32 flash_bank;
886 u32 intr_status = 0;
887 u32 intr_status_addresses[4] = {INTR_STATUS0,
888 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
889 u8 *page_spare = buf_read_page_spare;
891 if (block >= DeviceInfo.wTotalBlocks) {
892 printk(KERN_ERR "block too big: %d\n", (int)block);
893 status = FAIL;
896 if (page >= DeviceInfo.wPagesPerBlock) {
897 printk(KERN_ERR "page too big: %d\n", page);
898 status = FAIL;
901 if (page_count > 1) {
902 printk(KERN_ERR "page count too big: %d\n", page_count);
903 status = FAIL;
906 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
907 * DeviceInfo.wBlockDataSize +
908 (u64)page * DeviceInfo.wPageDataSize;
910 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
912 if (status == PASS) {
913 intr_status = intr_status_addresses[flash_bank];
914 iowrite32(ioread32(FlashReg + intr_status),
915 FlashReg + intr_status);
917 index_addr((u32)(MODE_10 | (flash_bank << 24) |
918 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
919 0x41);
920 index_addr((u32)(MODE_10 | (flash_bank << 24) |
921 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
922 0x2000 | page_count);
923 while (!(ioread32(FlashReg + intr_status) &
924 INTR_STATUS0__LOAD_COMP))
927 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
928 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
929 FlashMem);
931 for (i = 0; i < (PageSpareSize / 4); i++)
932 *((u32 *)page_spare + i) =
933 ioread32(FlashMem + 0x10);
935 if (enable_ecc) {
936 for (i = 0; i < spareFlagBytes; i++)
937 read_data[i] =
938 page_spare[PageSpareSize -
939 spareFlagBytes + i];
940 for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
941 read_data[spareFlagBytes + i] =
942 page_spare[i];
943 } else {
944 for (i = 0; i < PageSpareSize; i++)
945 read_data[i] = page_spare[i];
948 index_addr((u32)(MODE_10 | (flash_bank << 24) |
949 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
952 return status;
955 /* No use function. Should be removed later */
956 u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
957 u16 page_count)
959 printk(KERN_ERR
960 "Error! This function (NAND_Write_Page_Spare) should never"
961 " be called!\n");
962 return ERR;
965 /* op value: 0 - DDMA read; 1 - DDMA write */
966 static void ddma_trans(u8 *data, u64 flash_add,
967 u32 flash_bank, int op, u32 numPages)
969 u32 data_addr;
971 /* Map virtual address to bus address for DDMA */
972 data_addr = virt_to_bus(data);
974 index_addr((u32)(MODE_10 | (flash_bank << 24) |
975 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
976 (u16)(2 << 12) | (op << 8) | numPages);
978 index_addr((u32)(MODE_10 | (flash_bank << 24) |
979 ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
980 (u16)(2 << 12) | (2 << 8) | 0);
982 index_addr((u32)(MODE_10 | (flash_bank << 24) |
983 ((u16)(0x0FFFF & data_addr) << 8)),
984 (u16)(2 << 12) | (3 << 8) | 0);
986 index_addr((u32)(MODE_10 | (flash_bank << 24) |
987 (1 << 16) | (0x40 << 8)),
988 (u16)(2 << 12) | (4 << 8) | 0);
991 /* If data in buf are all 0xff, then return 1; otherwise return 0 */
992 static int check_all_1(u8 *buf)
994 int i, j, cnt;
996 for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
997 if (buf[i] != 0xff) {
998 cnt = 0;
999 nand_dbg_print(NAND_DBG_WARN,
1000 "the first non-0xff data byte is: %d\n", i);
1001 for (j = i; j < DeviceInfo.wPageDataSize; j++) {
1002 nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
1003 cnt++;
1004 if (cnt > 8)
1005 break;
1007 nand_dbg_print(NAND_DBG_WARN, "\n");
1008 return 0;
1012 return 1;
1015 static int do_ecc_new(unsigned long bank, u8 *buf,
1016 u32 block, u16 page)
1018 int status = PASS;
1019 u16 err_page = 0;
1020 u16 err_byte;
1021 u8 err_sect;
1022 u8 err_dev;
1023 u16 err_fix_info;
1024 u16 err_addr;
1025 u32 ecc_sect_size;
1026 u8 *err_pos;
1027 u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
1028 ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
1030 ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1032 do {
1033 err_page = ioread32(FlashReg + err_page_addr[bank]);
1034 err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
1035 err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
1036 err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
1037 err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
1038 err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
1039 >> 8);
1040 if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
1041 nand_dbg_print(NAND_DBG_WARN,
1042 "%s, Line %d Uncorrectable ECC error "
1043 "when read block %d page %d."
1044 "PTN_INTR register: 0x%x "
1045 "err_page: %d, err_sect: %d, err_byte: %d, "
1046 "err_dev: %d, ecc_sect_size: %d, "
1047 "err_fix_info: 0x%x\n",
1048 __FILE__, __LINE__, block, page,
1049 ioread32(FlashReg + PTN_INTR),
1050 err_page, err_sect, err_byte, err_dev,
1051 ecc_sect_size, (u32)err_fix_info);
1053 if (check_all_1(buf))
1054 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
1055 "All 0xff!\n",
1056 __FILE__, __LINE__);
1057 else
1058 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
1059 "Not all 0xff!\n",
1060 __FILE__, __LINE__);
1061 status = FAIL;
1062 } else {
1063 nand_dbg_print(NAND_DBG_WARN,
1064 "%s, Line %d Found ECC error "
1065 "when read block %d page %d."
1066 "err_page: %d, err_sect: %d, err_byte: %d, "
1067 "err_dev: %d, ecc_sect_size: %d, "
1068 "err_fix_info: 0x%x\n",
1069 __FILE__, __LINE__, block, page,
1070 err_page, err_sect, err_byte, err_dev,
1071 ecc_sect_size, (u32)err_fix_info);
1072 if (err_byte < ECC_SECTOR_SIZE) {
1073 err_pos = buf +
1074 (err_page - page) *
1075 DeviceInfo.wPageDataSize +
1076 err_sect * ecc_sect_size +
1077 err_byte *
1078 DeviceInfo.wDevicesConnected +
1079 err_dev;
1081 *err_pos ^= err_fix_info &
1082 ERR_CORRECTION_INFO__BYTEMASK;
1085 } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
1087 return status;
1090 u16 NAND_Read_Page_Main_Polling(u8 *read_data,
1091 u32 block, u16 page, u16 page_count)
1093 u32 status = PASS;
1094 u64 flash_add;
1095 u32 intr_status = 0;
1096 u32 flash_bank;
1097 u32 intr_status_addresses[4] = {INTR_STATUS0,
1098 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1099 u8 *read_data_l;
1101 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1102 __FILE__, __LINE__, __func__);
1104 status = Boundary_Check_Block_Page(block, page, page_count);
1105 if (status != PASS)
1106 return status;
1108 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1109 * DeviceInfo.wBlockDataSize +
1110 (u64)page * DeviceInfo.wPageDataSize;
1111 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1113 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1115 intr_status = intr_status_addresses[flash_bank];
1116 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1118 if (page_count > 1) {
1119 read_data_l = read_data;
1120 while (page_count > MAX_PAGES_PER_RW) {
1121 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1122 status = NAND_Multiplane_Read(read_data_l,
1123 block, page, MAX_PAGES_PER_RW);
1124 else
1125 status = NAND_Pipeline_Read_Ahead_Polling(
1126 read_data_l, block, page,
1127 MAX_PAGES_PER_RW);
1129 if (status == FAIL)
1130 return status;
1132 read_data_l += DeviceInfo.wPageDataSize *
1133 MAX_PAGES_PER_RW;
1134 page_count -= MAX_PAGES_PER_RW;
1135 page += MAX_PAGES_PER_RW;
1137 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1138 status = NAND_Multiplane_Read(read_data_l,
1139 block, page, page_count);
1140 else
1141 status = NAND_Pipeline_Read_Ahead_Polling(
1142 read_data_l, block, page, page_count);
1144 return status;
1147 iowrite32(1, FlashReg + DMA_ENABLE);
1148 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1151 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1152 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1154 ddma_trans(read_data, flash_add, flash_bank, 0, 1);
1156 if (enable_ecc) {
1157 while (!(ioread32(FlashReg + intr_status) &
1158 (INTR_STATUS0__ECC_TRANSACTION_DONE |
1159 INTR_STATUS0__ECC_ERR)))
1162 if (ioread32(FlashReg + intr_status) &
1163 INTR_STATUS0__ECC_ERR) {
1164 iowrite32(INTR_STATUS0__ECC_ERR,
1165 FlashReg + intr_status);
1166 status = do_ecc_new(flash_bank, read_data,
1167 block, page);
1170 if (ioread32(FlashReg + intr_status) &
1171 INTR_STATUS0__ECC_TRANSACTION_DONE &
1172 INTR_STATUS0__ECC_ERR)
1173 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
1174 INTR_STATUS0__ECC_ERR,
1175 FlashReg + intr_status);
1176 else if (ioread32(FlashReg + intr_status) &
1177 INTR_STATUS0__ECC_TRANSACTION_DONE)
1178 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
1179 FlashReg + intr_status);
1180 else if (ioread32(FlashReg + intr_status) &
1181 INTR_STATUS0__ECC_ERR)
1182 iowrite32(INTR_STATUS0__ECC_ERR,
1183 FlashReg + intr_status);
1184 } else {
1185 while (!(ioread32(FlashReg + intr_status) &
1186 INTR_STATUS0__DMA_CMD_COMP))
1188 iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
1191 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1193 iowrite32(0, FlashReg + DMA_ENABLE);
1194 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1197 return status;
1200 u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
1201 u32 block, u16 page, u16 page_count)
1203 u32 status = PASS;
1204 u32 NumPages = page_count;
1205 u64 flash_add;
1206 u32 flash_bank;
1207 u32 intr_status = 0;
1208 u32 intr_status_addresses[4] = {INTR_STATUS0,
1209 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1210 u32 ecc_done_OR_dma_comp;
1212 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1213 __FILE__, __LINE__, __func__);
1215 status = Boundary_Check_Block_Page(block, page, page_count);
1217 if (page_count < 2)
1218 status = FAIL;
1220 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1221 *DeviceInfo.wBlockDataSize +
1222 (u64)page * DeviceInfo.wPageDataSize;
1224 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1226 if (status == PASS) {
1227 intr_status = intr_status_addresses[flash_bank];
1228 iowrite32(ioread32(FlashReg + intr_status),
1229 FlashReg + intr_status);
1231 iowrite32(1, FlashReg + DMA_ENABLE);
1232 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1235 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1237 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1238 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1239 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1241 ecc_done_OR_dma_comp = 0;
1242 while (1) {
1243 if (enable_ecc) {
1244 while (!ioread32(FlashReg + intr_status))
1247 if (ioread32(FlashReg + intr_status) &
1248 INTR_STATUS0__ECC_ERR) {
1249 iowrite32(INTR_STATUS0__ECC_ERR,
1250 FlashReg + intr_status);
1251 status = do_ecc_new(flash_bank,
1252 read_data, block, page);
1253 } else if (ioread32(FlashReg + intr_status) &
1254 INTR_STATUS0__DMA_CMD_COMP) {
1255 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1256 FlashReg + intr_status);
1258 if (1 == ecc_done_OR_dma_comp)
1259 break;
1261 ecc_done_OR_dma_comp = 1;
1262 } else if (ioread32(FlashReg + intr_status) &
1263 INTR_STATUS0__ECC_TRANSACTION_DONE) {
1264 iowrite32(
1265 INTR_STATUS0__ECC_TRANSACTION_DONE,
1266 FlashReg + intr_status);
1268 if (1 == ecc_done_OR_dma_comp)
1269 break;
1271 ecc_done_OR_dma_comp = 1;
1273 } else {
1274 while (!(ioread32(FlashReg + intr_status) &
1275 INTR_STATUS0__DMA_CMD_COMP))
1278 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1279 FlashReg + intr_status);
1280 break;
1283 iowrite32((~INTR_STATUS0__ECC_ERR) &
1284 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
1285 (~INTR_STATUS0__DMA_CMD_COMP),
1286 FlashReg + intr_status);
1290 iowrite32(ioread32(FlashReg + intr_status),
1291 FlashReg + intr_status);
1293 iowrite32(0, FlashReg + DMA_ENABLE);
1295 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1298 return status;
1301 u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
1302 u16 page_count)
1304 u32 status = PASS;
1305 u64 flash_add;
1306 u32 intr_status = 0;
1307 u32 flash_bank;
1308 u32 intr_status_addresses[4] = {INTR_STATUS0,
1309 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1310 int ret;
1311 u8 *read_data_l;
1313 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1314 __FILE__, __LINE__, __func__);
1316 status = Boundary_Check_Block_Page(block, page, page_count);
1317 if (status != PASS)
1318 return status;
1320 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1321 * DeviceInfo.wBlockDataSize +
1322 (u64)page * DeviceInfo.wPageDataSize;
1323 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1325 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1327 intr_status = intr_status_addresses[flash_bank];
1328 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1330 if (page_count > 1) {
1331 read_data_l = read_data;
1332 while (page_count > MAX_PAGES_PER_RW) {
1333 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1334 status = NAND_Multiplane_Read(read_data_l,
1335 block, page, MAX_PAGES_PER_RW);
1336 else
1337 status = NAND_Pipeline_Read_Ahead(
1338 read_data_l, block, page,
1339 MAX_PAGES_PER_RW);
1341 if (status == FAIL)
1342 return status;
1344 read_data_l += DeviceInfo.wPageDataSize *
1345 MAX_PAGES_PER_RW;
1346 page_count -= MAX_PAGES_PER_RW;
1347 page += MAX_PAGES_PER_RW;
1349 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1350 status = NAND_Multiplane_Read(read_data_l,
1351 block, page, page_count);
1352 else
1353 status = NAND_Pipeline_Read_Ahead(
1354 read_data_l, block, page, page_count);
1356 return status;
1359 iowrite32(1, FlashReg + DMA_ENABLE);
1360 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1363 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1364 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1366 /* Fill the mrst_nand_info structure */
1367 info.state = INT_READ_PAGE_MAIN;
1368 info.read_data = read_data;
1369 info.flash_bank = flash_bank;
1370 info.block = block;
1371 info.page = page;
1372 info.ret = PASS;
1374 ddma_trans(read_data, flash_add, flash_bank, 0, 1);
1376 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
1378 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1379 if (!ret) {
1380 printk(KERN_ERR "Wait for completion timeout "
1381 "in %s, Line %d\n", __FILE__, __LINE__);
1382 status = ERR;
1383 } else {
1384 status = info.ret;
1387 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1389 iowrite32(0, FlashReg + DMA_ENABLE);
1390 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1393 return status;
1396 void Conv_Spare_Data_Log2Phy_Format(u8 *data)
1398 int i;
1399 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1400 const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1402 if (enable_ecc) {
1403 for (i = spareFlagBytes - 1; i >= 0; i--)
1404 data[PageSpareSize - spareFlagBytes + i] = data[i];
1408 void Conv_Spare_Data_Phy2Log_Format(u8 *data)
1410 int i;
1411 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1412 const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1414 if (enable_ecc) {
1415 for (i = 0; i < spareFlagBytes; i++)
1416 data[i] = data[PageSpareSize - spareFlagBytes + i];
1421 void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
1423 const u32 PageSize = DeviceInfo.wPageSize;
1424 const u32 PageDataSize = DeviceInfo.wPageDataSize;
1425 const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1426 const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1427 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1428 u32 eccSectorSize;
1429 u32 page_offset;
1430 int i, j;
1432 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1433 if (enable_ecc) {
1434 while (page_count > 0) {
1435 page_offset = (page_count - 1) * PageSize;
1436 j = (DeviceInfo.wPageDataSize / eccSectorSize);
1437 for (i = spareFlagBytes - 1; i >= 0; i--)
1438 data[page_offset +
1439 (eccSectorSize + eccBytes) * j + i] =
1440 data[page_offset + PageDataSize + i];
1441 for (j--; j >= 1; j--) {
1442 for (i = eccSectorSize - 1; i >= 0; i--)
1443 data[page_offset +
1444 (eccSectorSize + eccBytes) * j + i] =
1445 data[page_offset +
1446 eccSectorSize * j + i];
1448 for (i = (PageSize - spareSkipBytes) - 1;
1449 i >= PageDataSize; i--)
1450 data[page_offset + i + spareSkipBytes] =
1451 data[page_offset + i];
1452 page_count--;
1457 void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
1459 const u32 PageSize = DeviceInfo.wPageSize;
1460 const u32 PageDataSize = DeviceInfo.wPageDataSize;
1461 const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1462 const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1463 const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1464 u32 eccSectorSize;
1465 u32 page_offset;
1466 int i, j;
1468 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1469 if (enable_ecc) {
1470 while (page_count > 0) {
1471 page_offset = (page_count - 1) * PageSize;
1472 for (i = PageDataSize;
1473 i < PageSize - spareSkipBytes;
1474 i++)
1475 data[page_offset + i] =
1476 data[page_offset + i +
1477 spareSkipBytes];
1478 for (j = 1;
1479 j < DeviceInfo.wPageDataSize / eccSectorSize;
1480 j++) {
1481 for (i = 0; i < eccSectorSize; i++)
1482 data[page_offset +
1483 eccSectorSize * j + i] =
1484 data[page_offset +
1485 (eccSectorSize + eccBytes) * j
1486 + i];
1488 for (i = 0; i < spareFlagBytes; i++)
1489 data[page_offset + PageDataSize + i] =
1490 data[page_offset +
1491 (eccSectorSize + eccBytes) * j + i];
1492 page_count--;
1497 /* Un-tested function */
1498 u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
1499 u16 page_count)
1501 u32 status = PASS;
1502 u32 NumPages = page_count;
1503 u64 flash_add;
1504 u32 flash_bank;
1505 u32 intr_status = 0;
1506 u32 intr_status_addresses[4] = {INTR_STATUS0,
1507 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1508 u32 ecc_done_OR_dma_comp;
1510 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1511 __FILE__, __LINE__, __func__);
1513 status = Boundary_Check_Block_Page(block, page, page_count);
1515 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1516 * DeviceInfo.wBlockDataSize +
1517 (u64)page * DeviceInfo.wPageDataSize;
1519 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1521 if (status == PASS) {
1522 intr_status = intr_status_addresses[flash_bank];
1523 iowrite32(ioread32(FlashReg + intr_status),
1524 FlashReg + intr_status);
1526 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1527 iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
1529 iowrite32(1, FlashReg + DMA_ENABLE);
1530 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1532 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1533 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1534 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1536 ecc_done_OR_dma_comp = 0;
1537 while (1) {
1538 if (enable_ecc) {
1539 while (!ioread32(FlashReg + intr_status))
1542 if (ioread32(FlashReg + intr_status) &
1543 INTR_STATUS0__ECC_ERR) {
1544 iowrite32(INTR_STATUS0__ECC_ERR,
1545 FlashReg + intr_status);
1546 status = do_ecc_new(flash_bank,
1547 read_data, block, page);
1548 } else if (ioread32(FlashReg + intr_status) &
1549 INTR_STATUS0__DMA_CMD_COMP) {
1550 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1551 FlashReg + intr_status);
1553 if (1 == ecc_done_OR_dma_comp)
1554 break;
1556 ecc_done_OR_dma_comp = 1;
1557 } else if (ioread32(FlashReg + intr_status) &
1558 INTR_STATUS0__ECC_TRANSACTION_DONE) {
1559 iowrite32(
1560 INTR_STATUS0__ECC_TRANSACTION_DONE,
1561 FlashReg + intr_status);
1563 if (1 == ecc_done_OR_dma_comp)
1564 break;
1566 ecc_done_OR_dma_comp = 1;
1568 } else {
1569 while (!(ioread32(FlashReg + intr_status) &
1570 INTR_STATUS0__DMA_CMD_COMP))
1572 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
1573 FlashReg + intr_status);
1574 break;
1577 iowrite32((~INTR_STATUS0__ECC_ERR) &
1578 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
1579 (~INTR_STATUS0__DMA_CMD_COMP),
1580 FlashReg + intr_status);
1584 iowrite32(ioread32(FlashReg + intr_status),
1585 FlashReg + intr_status);
1587 iowrite32(0, FlashReg + DMA_ENABLE);
1589 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1592 iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
1595 return status;
1598 u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
1599 u16 page, u16 page_count)
1601 u32 status = PASS;
1602 u32 NumPages = page_count;
1603 u64 flash_add;
1604 u32 flash_bank;
1605 u32 intr_status = 0;
1606 u32 intr_status_addresses[4] = {INTR_STATUS0,
1607 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1608 int ret;
1610 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1611 __FILE__, __LINE__, __func__);
1613 status = Boundary_Check_Block_Page(block, page, page_count);
1615 if (page_count < 2)
1616 status = FAIL;
1618 if (status != PASS)
1619 return status;
1621 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1622 *DeviceInfo.wBlockDataSize +
1623 (u64)page * DeviceInfo.wPageDataSize;
1625 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1627 intr_status = intr_status_addresses[flash_bank];
1628 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1630 iowrite32(1, FlashReg + DMA_ENABLE);
1631 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1634 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1636 /* Fill the mrst_nand_info structure */
1637 info.state = INT_PIPELINE_READ_AHEAD;
1638 info.read_data = read_data;
1639 info.flash_bank = flash_bank;
1640 info.block = block;
1641 info.page = page;
1642 info.ret = PASS;
1644 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1645 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
1647 ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
1649 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
1651 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1652 if (!ret) {
1653 printk(KERN_ERR "Wait for completion timeout "
1654 "in %s, Line %d\n", __FILE__, __LINE__);
1655 status = ERR;
1656 } else {
1657 status = info.ret;
1660 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1662 iowrite32(0, FlashReg + DMA_ENABLE);
1664 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1667 return status;
1671 u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
1672 u16 page_count)
1674 u32 status = PASS;
1675 u64 flash_add;
1676 u32 intr_status = 0;
1677 u32 flash_bank;
1678 u32 intr_status_addresses[4] = {INTR_STATUS0,
1679 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1680 int ret;
1681 u8 *write_data_l;
1683 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1684 __FILE__, __LINE__, __func__);
1686 status = Boundary_Check_Block_Page(block, page, page_count);
1687 if (status != PASS)
1688 return status;
1690 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
1691 * DeviceInfo.wBlockDataSize +
1692 (u64)page * DeviceInfo.wPageDataSize;
1694 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1696 intr_status = intr_status_addresses[flash_bank];
1698 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1700 iowrite32(INTR_STATUS0__PROGRAM_COMP |
1701 INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
1703 if (page_count > 1) {
1704 write_data_l = write_data;
1705 while (page_count > MAX_PAGES_PER_RW) {
1706 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1707 status = NAND_Multiplane_Write(write_data_l,
1708 block, page, MAX_PAGES_PER_RW);
1709 else
1710 status = NAND_Pipeline_Write_Ahead(
1711 write_data_l, block, page,
1712 MAX_PAGES_PER_RW);
1713 if (status == FAIL)
1714 return status;
1716 write_data_l += DeviceInfo.wPageDataSize *
1717 MAX_PAGES_PER_RW;
1718 page_count -= MAX_PAGES_PER_RW;
1719 page += MAX_PAGES_PER_RW;
1721 if (ioread32(FlashReg + MULTIPLANE_OPERATION))
1722 status = NAND_Multiplane_Write(write_data_l,
1723 block, page, page_count);
1724 else
1725 status = NAND_Pipeline_Write_Ahead(write_data_l,
1726 block, page, page_count);
1728 return status;
1731 iowrite32(1, FlashReg + DMA_ENABLE);
1732 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
1735 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1737 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1739 /* Fill the mrst_nand_info structure */
1740 info.state = INT_WRITE_PAGE_MAIN;
1741 info.write_data = write_data;
1742 info.flash_bank = flash_bank;
1743 info.block = block;
1744 info.page = page;
1745 info.ret = PASS;
1747 ddma_trans(write_data, flash_add, flash_bank, 1, 1);
1749 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
1751 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
1752 if (!ret) {
1753 printk(KERN_ERR "Wait for completion timeout "
1754 "in %s, Line %d\n", __FILE__, __LINE__);
1755 status = ERR;
1756 } else {
1757 status = info.ret;
1760 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
1762 iowrite32(0, FlashReg + DMA_ENABLE);
1763 while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
1766 return status;
1769 void NAND_ECC_Ctrl(int enable)
1771 if (enable) {
1772 nand_dbg_print(NAND_DBG_WARN,
1773 "Will enable ECC in %s, Line %d, Function: %s\n",
1774 __FILE__, __LINE__, __func__);
1775 iowrite32(1, FlashReg + ECC_ENABLE);
1776 enable_ecc = 1;
1777 } else {
1778 nand_dbg_print(NAND_DBG_WARN,
1779 "Will disable ECC in %s, Line %d, Function: %s\n",
1780 __FILE__, __LINE__, __func__);
1781 iowrite32(0, FlashReg + ECC_ENABLE);
1782 enable_ecc = 0;
1786 u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
1787 u16 page, u16 page_count)
1789 u32 status = PASS;
1790 u32 i, j, page_num = 0;
1791 u32 PageSize = DeviceInfo.wPageSize;
1792 u32 PageDataSize = DeviceInfo.wPageDataSize;
1793 u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1794 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1795 u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1796 u64 flash_add;
1797 u32 eccSectorSize;
1798 u32 flash_bank;
1799 u32 intr_status = 0;
1800 u32 intr_status_addresses[4] = {INTR_STATUS0,
1801 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1802 u8 *page_main_spare = buf_write_page_main_spare;
1804 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1805 __FILE__, __LINE__, __func__);
1807 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1809 status = Boundary_Check_Block_Page(block, page, page_count);
1811 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1813 if (status == PASS) {
1814 intr_status = intr_status_addresses[flash_bank];
1816 iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
1818 while ((status != FAIL) && (page_count > 0)) {
1819 flash_add = (u64)(block %
1820 (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
1821 DeviceInfo.wBlockDataSize +
1822 (u64)page * DeviceInfo.wPageDataSize;
1824 iowrite32(ioread32(FlashReg + intr_status),
1825 FlashReg + intr_status);
1827 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
1828 (flash_add >>
1829 DeviceInfo.nBitsInPageDataSize)),
1830 FlashMem);
1832 if (enable_ecc) {
1833 for (j = 0;
1835 DeviceInfo.wPageDataSize / eccSectorSize;
1836 j++) {
1837 for (i = 0; i < eccSectorSize; i++)
1838 page_main_spare[(eccSectorSize +
1839 eccBytes) * j +
1840 i] =
1841 write_data[eccSectorSize *
1842 j + i];
1844 for (i = 0; i < eccBytes; i++)
1845 page_main_spare[(eccSectorSize +
1846 eccBytes) * j +
1847 eccSectorSize +
1848 i] =
1849 write_data[PageDataSize +
1850 spareFlagBytes +
1851 eccBytes * j +
1855 for (i = 0; i < spareFlagBytes; i++)
1856 page_main_spare[(eccSectorSize +
1857 eccBytes) * j + i] =
1858 write_data[PageDataSize + i];
1860 for (i = PageSize - 1; i >= PageDataSize +
1861 spareSkipBytes; i--)
1862 page_main_spare[i] = page_main_spare[i -
1863 spareSkipBytes];
1865 for (i = PageDataSize; i < PageDataSize +
1866 spareSkipBytes; i++)
1867 page_main_spare[i] = 0xff;
1869 for (i = 0; i < PageSize / 4; i++)
1870 iowrite32(
1871 *((u32 *)page_main_spare + i),
1872 FlashMem + 0x10);
1873 } else {
1875 for (i = 0; i < PageSize / 4; i++)
1876 iowrite32(*((u32 *)write_data + i),
1877 FlashMem + 0x10);
1880 while (!(ioread32(FlashReg + intr_status) &
1881 (INTR_STATUS0__PROGRAM_COMP |
1882 INTR_STATUS0__PROGRAM_FAIL)))
1885 if (ioread32(FlashReg + intr_status) &
1886 INTR_STATUS0__PROGRAM_FAIL)
1887 status = FAIL;
1889 iowrite32(ioread32(FlashReg + intr_status),
1890 FlashReg + intr_status);
1892 page_num++;
1893 page_count--;
1894 write_data += PageSize;
1897 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
1900 return status;
1903 u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
1904 u16 page_count)
1906 u32 status = PASS;
1907 u32 i, j;
1908 u64 flash_add = 0;
1909 u32 PageSize = DeviceInfo.wPageSize;
1910 u32 PageDataSize = DeviceInfo.wPageDataSize;
1911 u32 PageSpareSize = DeviceInfo.wPageSpareSize;
1912 u32 eccBytes = DeviceInfo.wECCBytesPerSector;
1913 u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
1914 u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
1915 u32 eccSectorSize;
1916 u32 flash_bank;
1917 u32 intr_status = 0;
1918 u8 *read_data_l = read_data;
1919 u32 intr_status_addresses[4] = {INTR_STATUS0,
1920 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
1921 u8 *page_main_spare = buf_read_page_main_spare;
1923 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1924 __FILE__, __LINE__, __func__);
1926 eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
1928 status = Boundary_Check_Block_Page(block, page, page_count);
1930 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
1932 if (status == PASS) {
1933 intr_status = intr_status_addresses[flash_bank];
1935 iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
1937 iowrite32(ioread32(FlashReg + intr_status),
1938 FlashReg + intr_status);
1940 while ((status != FAIL) && (page_count > 0)) {
1941 flash_add = (u64)(block %
1942 (DeviceInfo.wTotalBlocks / totalUsedBanks))
1943 * DeviceInfo.wBlockDataSize +
1944 (u64)page * DeviceInfo.wPageDataSize;
1946 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1947 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
1948 0x43);
1949 index_addr((u32)(MODE_10 | (flash_bank << 24) |
1950 (flash_add >> DeviceInfo.nBitsInPageDataSize)),
1951 0x2000 | page_count);
1953 while (!(ioread32(FlashReg + intr_status) &
1954 INTR_STATUS0__LOAD_COMP))
1957 iowrite32((u32)(MODE_01 | (flash_bank << 24) |
1958 (flash_add >>
1959 DeviceInfo.nBitsInPageDataSize)),
1960 FlashMem);
1962 for (i = 0; i < PageSize / 4; i++)
1963 *(((u32 *)page_main_spare) + i) =
1964 ioread32(FlashMem + 0x10);
1966 if (enable_ecc) {
1967 for (i = PageDataSize; i < PageSize -
1968 spareSkipBytes; i++)
1969 page_main_spare[i] = page_main_spare[i +
1970 spareSkipBytes];
1972 for (j = 0;
1973 j < DeviceInfo.wPageDataSize / eccSectorSize;
1974 j++) {
1976 for (i = 0; i < eccSectorSize; i++)
1977 read_data_l[eccSectorSize * j +
1978 i] =
1979 page_main_spare[
1980 (eccSectorSize +
1981 eccBytes) * j + i];
1983 for (i = 0; i < eccBytes; i++)
1984 read_data_l[PageDataSize +
1985 spareFlagBytes +
1986 eccBytes * j + i] =
1987 page_main_spare[
1988 (eccSectorSize +
1989 eccBytes) * j +
1990 eccSectorSize + i];
1993 for (i = 0; i < spareFlagBytes; i++)
1994 read_data_l[PageDataSize + i] =
1995 page_main_spare[(eccSectorSize +
1996 eccBytes) * j + i];
1997 } else {
1998 for (i = 0; i < (PageDataSize + PageSpareSize);
1999 i++)
2000 read_data_l[i] = page_main_spare[i];
2004 if (enable_ecc) {
2005 while (!(ioread32(FlashReg + intr_status) &
2006 (INTR_STATUS0__ECC_TRANSACTION_DONE |
2007 INTR_STATUS0__ECC_ERR)))
2010 if (ioread32(FlashReg + intr_status) &
2011 INTR_STATUS0__ECC_ERR) {
2012 iowrite32(INTR_STATUS0__ECC_ERR,
2013 FlashReg + intr_status);
2014 status = do_ecc_new(flash_bank,
2015 read_data, block, page);
2018 if (ioread32(FlashReg + intr_status) &
2019 INTR_STATUS0__ECC_TRANSACTION_DONE &
2020 INTR_STATUS0__ECC_ERR) {
2021 iowrite32(INTR_STATUS0__ECC_ERR |
2022 INTR_STATUS0__ECC_TRANSACTION_DONE,
2023 FlashReg + intr_status);
2024 } else if (ioread32(FlashReg + intr_status) &
2025 INTR_STATUS0__ECC_TRANSACTION_DONE) {
2026 iowrite32(
2027 INTR_STATUS0__ECC_TRANSACTION_DONE,
2028 FlashReg + intr_status);
2029 } else if (ioread32(FlashReg + intr_status) &
2030 INTR_STATUS0__ECC_ERR) {
2031 iowrite32(INTR_STATUS0__ECC_ERR,
2032 FlashReg + intr_status);
2036 page++;
2037 page_count--;
2038 read_data_l += PageSize;
2042 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2044 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2045 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2047 return status;
2050 u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
2051 u16 page, u16 page_count)
2053 u16 status = PASS;
2054 u32 NumPages = page_count;
2055 u64 flash_add;
2056 u32 flash_bank;
2057 u32 intr_status = 0;
2058 u32 intr_status_addresses[4] = {INTR_STATUS0,
2059 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2060 int ret;
2062 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2063 __FILE__, __LINE__, __func__);
2065 status = Boundary_Check_Block_Page(block, page, page_count);
2067 if (page_count < 2)
2068 status = FAIL;
2070 if (status != PASS)
2071 return status;
2073 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
2074 * DeviceInfo.wBlockDataSize +
2075 (u64)page * DeviceInfo.wPageDataSize;
2077 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
2079 intr_status = intr_status_addresses[flash_bank];
2080 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2082 iowrite32(1, FlashReg + DMA_ENABLE);
2083 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2086 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2088 /* Fill the mrst_nand_info structure */
2089 info.state = INT_PIPELINE_WRITE_AHEAD;
2090 info.write_data = write_data;
2091 info.flash_bank = flash_bank;
2092 info.block = block;
2093 info.page = page;
2094 info.ret = PASS;
2096 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2097 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2099 ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
2101 iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
2103 ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
2104 if (!ret) {
2105 printk(KERN_ERR "Wait for completion timeout "
2106 "in %s, Line %d\n", __FILE__, __LINE__);
2107 status = ERR;
2108 } else {
2109 status = info.ret;
2112 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2114 iowrite32(0, FlashReg + DMA_ENABLE);
2115 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2118 return status;
2121 /* Un-tested function */
2122 u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
2123 u16 page_count)
2125 u16 status = PASS;
2126 u32 NumPages = page_count;
2127 u64 flash_add;
2128 u32 flash_bank;
2129 u32 intr_status = 0;
2130 u32 intr_status_addresses[4] = {INTR_STATUS0,
2131 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2132 u16 status2 = PASS;
2133 u32 t;
2135 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2136 __FILE__, __LINE__, __func__);
2138 status = Boundary_Check_Block_Page(block, page, page_count);
2139 if (status != PASS)
2140 return status;
2142 flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
2143 * DeviceInfo.wBlockDataSize +
2144 (u64)page * DeviceInfo.wPageDataSize;
2146 flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
2148 intr_status = intr_status_addresses[flash_bank];
2149 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2151 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2152 iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
2154 iowrite32(1, FlashReg + DMA_ENABLE);
2155 while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2158 iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
2160 index_addr((u32)(MODE_10 | (flash_bank << 24) |
2161 (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
2163 ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
2165 while (1) {
2166 while (!ioread32(FlashReg + intr_status))
2169 if (ioread32(FlashReg + intr_status) &
2170 INTR_STATUS0__DMA_CMD_COMP) {
2171 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2172 FlashReg + intr_status);
2173 status = PASS;
2174 if (status2 == FAIL)
2175 status = FAIL;
2176 break;
2177 } else if (ioread32(FlashReg + intr_status) &
2178 INTR_STATUS0__PROGRAM_FAIL) {
2179 status2 = FAIL;
2180 status = FAIL;
2181 t = ioread32(FlashReg + intr_status) &
2182 INTR_STATUS0__PROGRAM_FAIL;
2183 iowrite32(t, FlashReg + intr_status);
2184 } else {
2185 iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
2186 (~INTR_STATUS0__DMA_CMD_COMP),
2187 FlashReg + intr_status);
2191 iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
2193 iowrite32(0, FlashReg + DMA_ENABLE);
2195 while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
2198 iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
2200 return status;
2204 #if CMD_DMA
2205 static irqreturn_t cdma_isr(int irq, void *dev_id)
2207 struct mrst_nand_info *dev = dev_id;
2208 int first_failed_cmd;
2210 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2211 __FILE__, __LINE__, __func__);
2213 if (!is_cdma_interrupt())
2214 return IRQ_NONE;
2216 /* Disable controller interrupts */
2217 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2218 GLOB_FTL_Event_Status(&first_failed_cmd);
2219 complete(&dev->complete);
2221 return IRQ_HANDLED;
2223 #else
2224 static void handle_nand_int_read(struct mrst_nand_info *dev)
2226 u32 intr_status_addresses[4] = {INTR_STATUS0,
2227 INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
2228 u32 intr_status;
2229 u32 ecc_done_OR_dma_comp = 0;
2231 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2232 __FILE__, __LINE__, __func__);
2234 dev->ret = PASS;
2235 intr_status = intr_status_addresses[dev->flash_bank];
2237 while (1) {
2238 if (enable_ecc) {
2239 if (ioread32(FlashReg + intr_status) &
2240 INTR_STATUS0__ECC_ERR) {
2241 iowrite32(INTR_STATUS0__ECC_ERR,
2242 FlashReg + intr_status);
2243 dev->ret = do_ecc_new(dev->flash_bank,
2244 dev->read_data,
2245 dev->block, dev->page);
2246 } else if (ioread32(FlashReg + intr_status) &
2247 INTR_STATUS0__DMA_CMD_COMP) {
2248 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2249 FlashReg + intr_status);
2250 if (1 == ecc_done_OR_dma_comp)
2251 break;
2252 ecc_done_OR_dma_comp = 1;
2253 } else if (ioread32(FlashReg + intr_status) &
2254 INTR_STATUS0__ECC_TRANSACTION_DONE) {
2255 iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
2256 FlashReg + intr_status);
2257 if (1 == ecc_done_OR_dma_comp)
2258 break;
2259 ecc_done_OR_dma_comp = 1;
2261 } else {
2262 if (ioread32(FlashReg + intr_status) &
2263 INTR_STATUS0__DMA_CMD_COMP) {
2264 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2265 FlashReg + intr_status);
2266 break;
2267 } else {
2268 printk(KERN_ERR "Illegal INTS "
2269 "(offset addr 0x%x) value: 0x%x\n",
2270 intr_status,
2271 ioread32(FlashReg + intr_status));
2275 iowrite32((~INTR_STATUS0__ECC_ERR) &
2276 (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
2277 (~INTR_STATUS0__DMA_CMD_COMP),
2278 FlashReg + intr_status);
2282 static void handle_nand_int_write(struct mrst_nand_info *dev)
2284 u32 intr_status;
2285 u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
2286 INTR_STATUS2, INTR_STATUS3};
2287 int status = PASS;
2289 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2290 __FILE__, __LINE__, __func__);
2292 dev->ret = PASS;
2293 intr_status = intr[dev->flash_bank];
2295 while (1) {
2296 while (!ioread32(FlashReg + intr_status))
2299 if (ioread32(FlashReg + intr_status) &
2300 INTR_STATUS0__DMA_CMD_COMP) {
2301 iowrite32(INTR_STATUS0__DMA_CMD_COMP,
2302 FlashReg + intr_status);
2303 if (FAIL == status)
2304 dev->ret = FAIL;
2305 break;
2306 } else if (ioread32(FlashReg + intr_status) &
2307 INTR_STATUS0__PROGRAM_FAIL) {
2308 status = FAIL;
2309 iowrite32(INTR_STATUS0__PROGRAM_FAIL,
2310 FlashReg + intr_status);
2311 } else {
2312 iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
2313 (~INTR_STATUS0__DMA_CMD_COMP),
2314 FlashReg + intr_status);
2319 static irqreturn_t ddma_isr(int irq, void *dev_id)
2321 struct mrst_nand_info *dev = dev_id;
2322 u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
2323 u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
2324 INTR_STATUS2, INTR_STATUS3};
2326 int_mask = INTR_STATUS0__DMA_CMD_COMP |
2327 INTR_STATUS0__ECC_TRANSACTION_DONE |
2328 INTR_STATUS0__ECC_ERR |
2329 INTR_STATUS0__PROGRAM_FAIL |
2330 INTR_STATUS0__ERASE_FAIL;
2332 ints0 = ioread32(FlashReg + INTR_STATUS0);
2333 ints1 = ioread32(FlashReg + INTR_STATUS1);
2334 ints2 = ioread32(FlashReg + INTR_STATUS2);
2335 ints3 = ioread32(FlashReg + INTR_STATUS3);
2337 ints_offset = intr[dev->flash_bank];
2339 nand_dbg_print(NAND_DBG_DEBUG,
2340 "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
2341 "DMA_INTR: 0x%x, "
2342 "dev->state: 0x%x, dev->flash_bank: %d\n",
2343 ints0, ints1, ints2, ints3,
2344 ioread32(FlashReg + DMA_INTR),
2345 dev->state, dev->flash_bank);
2347 if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
2348 iowrite32(ints0, FlashReg + INTR_STATUS0);
2349 iowrite32(ints1, FlashReg + INTR_STATUS1);
2350 iowrite32(ints2, FlashReg + INTR_STATUS2);
2351 iowrite32(ints3, FlashReg + INTR_STATUS3);
2352 nand_dbg_print(NAND_DBG_WARN,
2353 "ddma_isr: Invalid interrupt for NAND controller. "
2354 "Ignore it\n");
2355 return IRQ_NONE;
2358 switch (dev->state) {
2359 case INT_READ_PAGE_MAIN:
2360 case INT_PIPELINE_READ_AHEAD:
2361 /* Disable controller interrupts */
2362 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2363 handle_nand_int_read(dev);
2364 break;
2365 case INT_WRITE_PAGE_MAIN:
2366 case INT_PIPELINE_WRITE_AHEAD:
2367 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2368 handle_nand_int_write(dev);
2369 break;
2370 default:
2371 printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
2372 dev->state);
2373 return IRQ_NONE;
2376 dev->state = INT_IDLE_STATE;
2377 complete(&dev->complete);
2378 return IRQ_HANDLED;
2380 #endif
2382 static const struct pci_device_id nand_pci_ids[] = {
2384 .vendor = 0x8086,
2385 .device = 0x0809,
2386 .subvendor = PCI_ANY_ID,
2387 .subdevice = PCI_ANY_ID,
2389 { /* end: all zeroes */ }
2392 static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
2394 int ret = -ENODEV;
2395 unsigned long csr_base;
2396 unsigned long csr_len;
2397 struct mrst_nand_info *pndev = &info;
2398 u32 int_mask;
2400 ret = pci_enable_device(dev);
2401 if (ret) {
2402 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
2403 return ret;
2406 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2407 __FILE__, __LINE__, __func__);
2409 FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
2410 GLOB_HWCTL_REG_SIZE);
2411 if (!FlashReg) {
2412 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
2413 goto failed_disable;
2415 nand_dbg_print(NAND_DBG_WARN,
2416 "Spectra: Remapped reg base address: "
2417 "0x%p, len: %d\n",
2418 FlashReg, GLOB_HWCTL_REG_SIZE);
2420 FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
2421 GLOB_HWCTL_MEM_SIZE);
2422 if (!FlashMem) {
2423 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
2424 iounmap(FlashReg);
2425 goto failed_disable;
2427 nand_dbg_print(NAND_DBG_WARN,
2428 "Spectra: Remapped flash base address: "
2429 "0x%p, len: %d\n",
2430 (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
2432 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
2433 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
2434 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
2435 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
2436 ioread32(FlashReg + ACC_CLKS),
2437 ioread32(FlashReg + RE_2_WE),
2438 ioread32(FlashReg + WE_2_RE),
2439 ioread32(FlashReg + ADDR_2_DATA),
2440 ioread32(FlashReg + RDWR_EN_LO_CNT),
2441 ioread32(FlashReg + RDWR_EN_HI_CNT),
2442 ioread32(FlashReg + CS_SETUP_CNT));
2444 NAND_Flash_Reset();
2446 iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
2448 #if CMD_DMA
2449 info.pcmds_num = 0;
2450 info.flash_bank = 0;
2451 info.cdma_num = 0;
2452 int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
2453 DMA_INTR__DESC_COMP_CHANNEL1 |
2454 DMA_INTR__DESC_COMP_CHANNEL2 |
2455 DMA_INTR__DESC_COMP_CHANNEL3 |
2456 DMA_INTR__MEMCOPY_DESC_COMP);
2457 iowrite32(int_mask, FlashReg + DMA_INTR_EN);
2458 iowrite32(0xFFFF, FlashReg + DMA_INTR);
2460 int_mask = (INTR_STATUS0__ECC_ERR |
2461 INTR_STATUS0__PROGRAM_FAIL |
2462 INTR_STATUS0__ERASE_FAIL);
2463 #else
2464 int_mask = INTR_STATUS0__DMA_CMD_COMP |
2465 INTR_STATUS0__ECC_TRANSACTION_DONE |
2466 INTR_STATUS0__ECC_ERR |
2467 INTR_STATUS0__PROGRAM_FAIL |
2468 INTR_STATUS0__ERASE_FAIL;
2469 #endif
2470 iowrite32(int_mask, FlashReg + INTR_EN0);
2471 iowrite32(int_mask, FlashReg + INTR_EN1);
2472 iowrite32(int_mask, FlashReg + INTR_EN2);
2473 iowrite32(int_mask, FlashReg + INTR_EN3);
2475 /* Clear all status bits */
2476 iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
2477 iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
2478 iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
2479 iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
2481 iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
2482 iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
2484 /* Should set value for these registers when init */
2485 iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
2486 iowrite32(1, FlashReg + ECC_ENABLE);
2487 enable_ecc = 1;
2489 pci_set_master(dev);
2490 pndev->dev = dev;
2492 csr_base = pci_resource_start(dev, 0);
2493 if (!csr_base) {
2494 printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
2495 ret = -ENODEV;
2496 goto failed_req_csr;
2499 csr_len = pci_resource_len(dev, 0);
2500 if (!csr_len) {
2501 printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
2502 ret = -ENODEV;
2503 goto failed_req_csr;
2506 ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
2507 if (ret) {
2508 printk(KERN_ERR "Spectra: Unable to request "
2509 "memory region\n");
2510 goto failed_req_csr;
2513 pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
2514 if (!pndev->ioaddr) {
2515 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
2516 ret = -ENOMEM;
2517 goto failed_remap_csr;
2519 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
2520 csr_base, pndev->ioaddr, csr_len);
2522 init_completion(&pndev->complete);
2523 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
2525 #if CMD_DMA
2526 if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
2527 SPECTRA_NAND_NAME, &info)) {
2528 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
2529 ret = -ENODEV;
2530 iounmap(pndev->ioaddr);
2531 goto failed_remap_csr;
2533 #else
2534 if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
2535 SPECTRA_NAND_NAME, &info)) {
2536 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
2537 ret = -ENODEV;
2538 iounmap(pndev->ioaddr);
2539 goto failed_remap_csr;
2541 #endif
2543 pci_set_drvdata(dev, pndev);
2545 ret = GLOB_LLD_Read_Device_ID();
2546 if (ret) {
2547 iounmap(pndev->ioaddr);
2548 goto failed_remap_csr;
2551 ret = register_spectra_ftl();
2552 if (ret) {
2553 iounmap(pndev->ioaddr);
2554 goto failed_remap_csr;
2557 return 0;
2559 failed_remap_csr:
2560 pci_release_regions(dev);
2561 failed_req_csr:
2562 iounmap(FlashMem);
2563 iounmap(FlashReg);
2564 failed_disable:
2565 pci_disable_device(dev);
2567 return ret;
2570 static void nand_pci_remove(struct pci_dev *dev)
2572 struct mrst_nand_info *pndev = pci_get_drvdata(dev);
2574 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2575 __FILE__, __LINE__, __func__);
2577 #if CMD_DMA
2578 free_irq(dev->irq, pndev);
2579 #endif
2580 iounmap(pndev->ioaddr);
2581 pci_release_regions(dev);
2582 pci_disable_device(dev);
2585 MODULE_DEVICE_TABLE(pci, nand_pci_ids);
2587 static struct pci_driver nand_pci_driver = {
2588 .name = SPECTRA_NAND_NAME,
2589 .id_table = nand_pci_ids,
2590 .probe = nand_pci_probe,
2591 .remove = nand_pci_remove,
2594 int NAND_Flash_Init(void)
2596 int retval;
2598 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2599 __FILE__, __LINE__, __func__);
2601 retval = pci_register_driver(&nand_pci_driver);
2602 if (retval)
2603 return -ENOMEM;
2605 return PASS;
2608 /* Free memory */
2609 int nand_release_spectra(void)
2611 pci_unregister_driver(&nand_pci_driver);
2612 iounmap(FlashMem);
2613 iounmap(FlashReg);
2615 return 0;