sysfs: Remove support for tagged directories with untagged members (again)
[linux-btrfs-devel.git] / arch / arm / mach-omap2 / gpmc-onenand.c
blobd776ded9830d922188d87873268ac522697e6228
1 /*
2 * linux/arch/arm/mach-omap2/gpmc-onenand.c
4 * Copyright (C) 2006 - 2009 Nokia Corporation
5 * Contacts: Juha Yrjola
6 * Tony Lindgren
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/platform_device.h>
15 #include <linux/mtd/onenand_regs.h>
16 #include <linux/io.h>
18 #include <asm/mach/flash.h>
20 #include <plat/onenand.h>
21 #include <plat/board.h>
22 #include <plat/gpmc.h>
24 static struct omap_onenand_platform_data *gpmc_onenand_data;
26 static struct platform_device gpmc_onenand_device = {
27 .name = "omap2-onenand",
28 .id = -1,
31 static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
33 struct gpmc_timings t;
34 u32 reg;
35 int err;
37 const int t_cer = 15;
38 const int t_avdp = 12;
39 const int t_aavdh = 7;
40 const int t_ce = 76;
41 const int t_aa = 76;
42 const int t_oe = 20;
43 const int t_cez = 20; /* max of t_cez, t_oez */
44 const int t_ds = 30;
45 const int t_wpl = 40;
46 const int t_wph = 30;
48 /* Ensure sync read and sync write are disabled */
49 reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
50 reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
51 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
53 memset(&t, 0, sizeof(t));
54 t.sync_clk = 0;
55 t.cs_on = 0;
56 t.adv_on = 0;
58 /* Read */
59 t.adv_rd_off = gpmc_round_ns_to_ticks(max_t(int, t_avdp, t_cer));
60 t.oe_on = t.adv_rd_off + gpmc_round_ns_to_ticks(t_aavdh);
61 t.access = t.adv_on + gpmc_round_ns_to_ticks(t_aa);
62 t.access = max_t(int, t.access, t.cs_on + gpmc_round_ns_to_ticks(t_ce));
63 t.access = max_t(int, t.access, t.oe_on + gpmc_round_ns_to_ticks(t_oe));
64 t.oe_off = t.access + gpmc_round_ns_to_ticks(1);
65 t.cs_rd_off = t.oe_off;
66 t.rd_cycle = t.cs_rd_off + gpmc_round_ns_to_ticks(t_cez);
68 /* Write */
69 t.adv_wr_off = t.adv_rd_off;
70 t.we_on = t.oe_on;
71 if (cpu_is_omap34xx()) {
72 t.wr_data_mux_bus = t.we_on;
73 t.wr_access = t.we_on + gpmc_round_ns_to_ticks(t_ds);
75 t.we_off = t.we_on + gpmc_round_ns_to_ticks(t_wpl);
76 t.cs_wr_off = t.we_off + gpmc_round_ns_to_ticks(t_wph);
77 t.wr_cycle = t.cs_wr_off + gpmc_round_ns_to_ticks(t_cez);
79 /* Configure GPMC for asynchronous read */
80 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1,
81 GPMC_CONFIG1_DEVICESIZE_16 |
82 GPMC_CONFIG1_MUXADDDATA);
84 err = gpmc_cs_set_timings(cs, &t);
85 if (err)
86 return err;
88 /* Ensure sync read and sync write are disabled */
89 reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
90 reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
91 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
93 return 0;
96 static void set_onenand_cfg(void __iomem *onenand_base, int latency,
97 int sync_read, int sync_write, int hf, int vhf)
99 u32 reg;
101 reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
102 reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
103 reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
104 ONENAND_SYS_CFG1_BL_16;
105 if (sync_read)
106 reg |= ONENAND_SYS_CFG1_SYNC_READ;
107 else
108 reg &= ~ONENAND_SYS_CFG1_SYNC_READ;
109 if (sync_write)
110 reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
111 else
112 reg &= ~ONENAND_SYS_CFG1_SYNC_WRITE;
113 if (hf)
114 reg |= ONENAND_SYS_CFG1_HF;
115 else
116 reg &= ~ONENAND_SYS_CFG1_HF;
117 if (vhf)
118 reg |= ONENAND_SYS_CFG1_VHF;
119 else
120 reg &= ~ONENAND_SYS_CFG1_VHF;
121 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
124 static int omap2_onenand_get_freq(struct omap_onenand_platform_data *cfg,
125 void __iomem *onenand_base, bool *clk_dep)
127 u16 ver = readw(onenand_base + ONENAND_REG_VERSION_ID);
128 int freq = 0;
130 if (cfg->get_freq) {
131 struct onenand_freq_info fi;
133 fi.maf_id = readw(onenand_base + ONENAND_REG_MANUFACTURER_ID);
134 fi.dev_id = readw(onenand_base + ONENAND_REG_DEVICE_ID);
135 fi.ver_id = ver;
136 freq = cfg->get_freq(&fi, clk_dep);
137 if (freq)
138 return freq;
141 switch ((ver >> 4) & 0xf) {
142 case 0:
143 freq = 40;
144 break;
145 case 1:
146 freq = 54;
147 break;
148 case 2:
149 freq = 66;
150 break;
151 case 3:
152 freq = 83;
153 break;
154 case 4:
155 freq = 104;
156 break;
157 default:
158 freq = 54;
159 break;
162 return freq;
165 static int omap2_onenand_set_sync_mode(struct omap_onenand_platform_data *cfg,
166 void __iomem *onenand_base,
167 int *freq_ptr)
169 struct gpmc_timings t;
170 const int t_cer = 15;
171 const int t_avdp = 12;
172 const int t_cez = 20; /* max of t_cez, t_oez */
173 const int t_ds = 30;
174 const int t_wpl = 40;
175 const int t_wph = 30;
176 int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
177 int tick_ns, div, fclk_offset_ns, fclk_offset, gpmc_clk_ns, latency;
178 int first_time = 0, hf = 0, vhf = 0, sync_read = 0, sync_write = 0;
179 int err, ticks_cez;
180 int cs = cfg->cs, freq = *freq_ptr;
181 u32 reg;
182 bool clk_dep = false;
184 if (cfg->flags & ONENAND_SYNC_READ) {
185 sync_read = 1;
186 } else if (cfg->flags & ONENAND_SYNC_READWRITE) {
187 sync_read = 1;
188 sync_write = 1;
189 } else
190 return omap2_onenand_set_async_mode(cs, onenand_base);
192 if (!freq) {
193 /* Very first call freq is not known */
194 err = omap2_onenand_set_async_mode(cs, onenand_base);
195 if (err)
196 return err;
197 freq = omap2_onenand_get_freq(cfg, onenand_base, &clk_dep);
198 first_time = 1;
201 switch (freq) {
202 case 104:
203 min_gpmc_clk_period = 9600; /* 104 MHz */
204 t_ces = 3;
205 t_avds = 4;
206 t_avdh = 2;
207 t_ach = 3;
208 t_aavdh = 6;
209 t_rdyo = 6;
210 break;
211 case 83:
212 min_gpmc_clk_period = 12000; /* 83 MHz */
213 t_ces = 5;
214 t_avds = 4;
215 t_avdh = 2;
216 t_ach = 6;
217 t_aavdh = 6;
218 t_rdyo = 9;
219 break;
220 case 66:
221 min_gpmc_clk_period = 15000; /* 66 MHz */
222 t_ces = 6;
223 t_avds = 5;
224 t_avdh = 2;
225 t_ach = 6;
226 t_aavdh = 6;
227 t_rdyo = 11;
228 break;
229 default:
230 min_gpmc_clk_period = 18500; /* 54 MHz */
231 t_ces = 7;
232 t_avds = 7;
233 t_avdh = 7;
234 t_ach = 9;
235 t_aavdh = 7;
236 t_rdyo = 15;
237 sync_write = 0;
238 break;
241 tick_ns = gpmc_ticks_to_ns(1);
242 div = gpmc_cs_calc_divider(cs, min_gpmc_clk_period);
243 gpmc_clk_ns = gpmc_ticks_to_ns(div);
244 if (gpmc_clk_ns < 15) /* >66Mhz */
245 hf = 1;
246 if (gpmc_clk_ns < 12) /* >83Mhz */
247 vhf = 1;
248 if (vhf)
249 latency = 8;
250 else if (hf)
251 latency = 6;
252 else if (gpmc_clk_ns >= 25) /* 40 MHz*/
253 latency = 3;
254 else
255 latency = 4;
257 if (clk_dep) {
258 if (gpmc_clk_ns < 12) { /* >83Mhz */
259 t_ces = 3;
260 t_avds = 4;
261 } else if (gpmc_clk_ns < 15) { /* >66Mhz */
262 t_ces = 5;
263 t_avds = 4;
264 } else if (gpmc_clk_ns < 25) { /* >40Mhz */
265 t_ces = 6;
266 t_avds = 5;
267 } else {
268 t_ces = 7;
269 t_avds = 7;
273 if (first_time)
274 set_onenand_cfg(onenand_base, latency,
275 sync_read, sync_write, hf, vhf);
277 if (div == 1) {
278 reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2);
279 reg |= (1 << 7);
280 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG2, reg);
281 reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG3);
282 reg |= (1 << 7);
283 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG3, reg);
284 reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG4);
285 reg |= (1 << 7);
286 reg |= (1 << 23);
287 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG4, reg);
288 } else {
289 reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG2);
290 reg &= ~(1 << 7);
291 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG2, reg);
292 reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG3);
293 reg &= ~(1 << 7);
294 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG3, reg);
295 reg = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG4);
296 reg &= ~(1 << 7);
297 reg &= ~(1 << 23);
298 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG4, reg);
301 /* Set synchronous read timings */
302 memset(&t, 0, sizeof(t));
303 t.sync_clk = min_gpmc_clk_period;
304 t.cs_on = 0;
305 t.adv_on = 0;
306 fclk_offset_ns = gpmc_round_ns_to_ticks(max_t(int, t_ces, t_avds));
307 fclk_offset = gpmc_ns_to_ticks(fclk_offset_ns);
308 t.page_burst_access = gpmc_clk_ns;
310 /* Read */
311 t.adv_rd_off = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_avdh));
312 t.oe_on = gpmc_ticks_to_ns(fclk_offset + gpmc_ns_to_ticks(t_ach));
313 /* Force at least 1 clk between AVD High to OE Low */
314 if (t.oe_on <= t.adv_rd_off)
315 t.oe_on = t.adv_rd_off + gpmc_round_ns_to_ticks(1);
316 t.access = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div);
317 t.oe_off = t.access + gpmc_round_ns_to_ticks(1);
318 t.cs_rd_off = t.oe_off;
319 ticks_cez = ((gpmc_ns_to_ticks(t_cez) + div - 1) / div) * div;
320 t.rd_cycle = gpmc_ticks_to_ns(fclk_offset + (latency + 1) * div +
321 ticks_cez);
323 /* Write */
324 if (sync_write) {
325 t.adv_wr_off = t.adv_rd_off;
326 t.we_on = 0;
327 t.we_off = t.cs_rd_off;
328 t.cs_wr_off = t.cs_rd_off;
329 t.wr_cycle = t.rd_cycle;
330 if (cpu_is_omap34xx()) {
331 t.wr_data_mux_bus = gpmc_ticks_to_ns(fclk_offset +
332 gpmc_ps_to_ticks(min_gpmc_clk_period +
333 t_rdyo * 1000));
334 t.wr_access = t.access;
336 } else {
337 t.adv_wr_off = gpmc_round_ns_to_ticks(max_t(int,
338 t_avdp, t_cer));
339 t.we_on = t.adv_wr_off + gpmc_round_ns_to_ticks(t_aavdh);
340 t.we_off = t.we_on + gpmc_round_ns_to_ticks(t_wpl);
341 t.cs_wr_off = t.we_off + gpmc_round_ns_to_ticks(t_wph);
342 t.wr_cycle = t.cs_wr_off + gpmc_round_ns_to_ticks(t_cez);
343 if (cpu_is_omap34xx()) {
344 t.wr_data_mux_bus = t.we_on;
345 t.wr_access = t.we_on + gpmc_round_ns_to_ticks(t_ds);
349 /* Configure GPMC for synchronous read */
350 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1,
351 GPMC_CONFIG1_WRAPBURST_SUPP |
352 GPMC_CONFIG1_READMULTIPLE_SUPP |
353 (sync_read ? GPMC_CONFIG1_READTYPE_SYNC : 0) |
354 (sync_write ? GPMC_CONFIG1_WRITEMULTIPLE_SUPP : 0) |
355 (sync_write ? GPMC_CONFIG1_WRITETYPE_SYNC : 0) |
356 GPMC_CONFIG1_CLKACTIVATIONTIME(fclk_offset) |
357 GPMC_CONFIG1_PAGE_LEN(2) |
358 (cpu_is_omap34xx() ? 0 :
359 (GPMC_CONFIG1_WAIT_READ_MON |
360 GPMC_CONFIG1_WAIT_PIN_SEL(0))) |
361 GPMC_CONFIG1_DEVICESIZE_16 |
362 GPMC_CONFIG1_DEVICETYPE_NOR |
363 GPMC_CONFIG1_MUXADDDATA);
365 err = gpmc_cs_set_timings(cs, &t);
366 if (err)
367 return err;
369 set_onenand_cfg(onenand_base, latency, sync_read, sync_write, hf, vhf);
371 *freq_ptr = freq;
373 return 0;
376 static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
378 struct device *dev = &gpmc_onenand_device.dev;
380 /* Set sync timings in GPMC */
381 if (omap2_onenand_set_sync_mode(gpmc_onenand_data, onenand_base,
382 freq_ptr) < 0) {
383 dev_err(dev, "Unable to set synchronous mode\n");
384 return -EINVAL;
387 return 0;
390 void __init gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
392 gpmc_onenand_data = _onenand_data;
393 gpmc_onenand_data->onenand_setup = gpmc_onenand_setup;
394 gpmc_onenand_device.dev.platform_data = gpmc_onenand_data;
396 if (cpu_is_omap24xx() &&
397 (gpmc_onenand_data->flags & ONENAND_SYNC_READWRITE)) {
398 printk(KERN_ERR "Onenand using only SYNC_READ on 24xx\n");
399 gpmc_onenand_data->flags &= ~ONENAND_SYNC_READWRITE;
400 gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
403 if (platform_device_register(&gpmc_onenand_device) < 0) {
404 printk(KERN_ERR "Unable to register OneNAND device\n");
405 return;