Revert "gdbstub: Do not kill target in system emulation mode"
[qemu/qmp-unstable.git] / hw / misc / omap_gpmc.c
blob74fc91c8e988440a8835cfe8570884511efe0f35
1 /*
2 * TI OMAP general purpose memory controller emulation.
4 * Copyright (C) 2007-2009 Nokia Corporation
5 * Original code written by Andrzej Zaborowski <andrew@openedhand.com>
6 * Enhancements for OMAP3 and NAND support written by Juha Riihimäki
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 or
11 * (at your option) any later version of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "hw/hw.h"
22 #include "hw/block/flash.h"
23 #include "hw/arm/omap.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
27 /* General-Purpose Memory Controller */
28 struct omap_gpmc_s {
29 qemu_irq irq;
30 qemu_irq drq;
31 MemoryRegion iomem;
32 int accept_256;
34 uint8_t revision;
35 uint8_t sysconfig;
36 uint16_t irqst;
37 uint16_t irqen;
38 uint16_t lastirq;
39 uint16_t timeout;
40 uint16_t config;
41 struct omap_gpmc_cs_file_s {
42 uint32_t config[7];
43 MemoryRegion *iomem;
44 MemoryRegion container;
45 MemoryRegion nandiomem;
46 DeviceState *dev;
47 } cs_file[8];
48 int ecc_cs;
49 int ecc_ptr;
50 uint32_t ecc_cfg;
51 ECCState ecc[9];
52 struct prefetch {
53 uint32_t config1; /* GPMC_PREFETCH_CONFIG1 */
54 uint32_t transfercount; /* GPMC_PREFETCH_CONFIG2:TRANSFERCOUNT */
55 int startengine; /* GPMC_PREFETCH_CONTROL:STARTENGINE */
56 int fifopointer; /* GPMC_PREFETCH_STATUS:FIFOPOINTER */
57 int count; /* GPMC_PREFETCH_STATUS:COUNTVALUE */
58 MemoryRegion iomem;
59 uint8_t fifo[64];
60 } prefetch;
63 #define OMAP_GPMC_8BIT 0
64 #define OMAP_GPMC_16BIT 1
65 #define OMAP_GPMC_NOR 0
66 #define OMAP_GPMC_NAND 2
68 static int omap_gpmc_devtype(struct omap_gpmc_cs_file_s *f)
70 return (f->config[0] >> 10) & 3;
73 static int omap_gpmc_devsize(struct omap_gpmc_cs_file_s *f)
75 /* devsize field is really 2 bits but we ignore the high
76 * bit to ensure consistent behaviour if the guest sets
77 * it (values 2 and 3 are reserved in the TRM)
79 return (f->config[0] >> 12) & 1;
82 /* Extract the chip-select value from the prefetch config1 register */
83 static int prefetch_cs(uint32_t config1)
85 return (config1 >> 24) & 7;
88 static int prefetch_threshold(uint32_t config1)
90 return (config1 >> 8) & 0x7f;
93 static void omap_gpmc_int_update(struct omap_gpmc_s *s)
95 /* The TRM is a bit unclear, but it seems to say that
96 * the TERMINALCOUNTSTATUS bit is set only on the
97 * transition when the prefetch engine goes from
98 * active to inactive, whereas the FIFOEVENTSTATUS
99 * bit is held high as long as the fifo has at
100 * least THRESHOLD bytes available.
101 * So we do the latter here, but TERMINALCOUNTSTATUS
102 * is set elsewhere.
104 if (s->prefetch.fifopointer >= prefetch_threshold(s->prefetch.config1)) {
105 s->irqst |= 1;
107 if ((s->irqen & s->irqst) != s->lastirq) {
108 s->lastirq = s->irqen & s->irqst;
109 qemu_set_irq(s->irq, s->lastirq);
113 static void omap_gpmc_dma_update(struct omap_gpmc_s *s, int value)
115 if (s->prefetch.config1 & 4) {
116 qemu_set_irq(s->drq, value);
120 /* Access functions for when a NAND-like device is mapped into memory:
121 * all addresses in the region behave like accesses to the relevant
122 * GPMC_NAND_DATA_i register (which is actually implemented to call these)
124 static uint64_t omap_nand_read(void *opaque, hwaddr addr,
125 unsigned size)
127 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
128 uint64_t v;
129 nand_setpins(f->dev, 0, 0, 0, 1, 0);
130 switch (omap_gpmc_devsize(f)) {
131 case OMAP_GPMC_8BIT:
132 v = nand_getio(f->dev);
133 if (size == 1) {
134 return v;
136 v |= (nand_getio(f->dev) << 8);
137 if (size == 2) {
138 return v;
140 v |= (nand_getio(f->dev) << 16);
141 v |= (nand_getio(f->dev) << 24);
142 return v;
143 case OMAP_GPMC_16BIT:
144 v = nand_getio(f->dev);
145 if (size == 1) {
146 /* 8 bit read from 16 bit device : probably a guest bug */
147 return v & 0xff;
149 if (size == 2) {
150 return v;
152 v |= (nand_getio(f->dev) << 16);
153 return v;
154 default:
155 abort();
159 static void omap_nand_setio(DeviceState *dev, uint64_t value,
160 int nandsize, int size)
162 /* Write the specified value to the NAND device, respecting
163 * both size of the NAND device and size of the write access.
165 switch (nandsize) {
166 case OMAP_GPMC_8BIT:
167 switch (size) {
168 case 1:
169 nand_setio(dev, value & 0xff);
170 break;
171 case 2:
172 nand_setio(dev, value & 0xff);
173 nand_setio(dev, (value >> 8) & 0xff);
174 break;
175 case 4:
176 default:
177 nand_setio(dev, value & 0xff);
178 nand_setio(dev, (value >> 8) & 0xff);
179 nand_setio(dev, (value >> 16) & 0xff);
180 nand_setio(dev, (value >> 24) & 0xff);
181 break;
183 break;
184 case OMAP_GPMC_16BIT:
185 switch (size) {
186 case 1:
187 /* writing to a 16bit device with 8bit access is probably a guest
188 * bug; pass the value through anyway.
190 case 2:
191 nand_setio(dev, value & 0xffff);
192 break;
193 case 4:
194 default:
195 nand_setio(dev, value & 0xffff);
196 nand_setio(dev, (value >> 16) & 0xffff);
197 break;
199 break;
203 static void omap_nand_write(void *opaque, hwaddr addr,
204 uint64_t value, unsigned size)
206 struct omap_gpmc_cs_file_s *f = (struct omap_gpmc_cs_file_s *)opaque;
207 nand_setpins(f->dev, 0, 0, 0, 1, 0);
208 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
211 static const MemoryRegionOps omap_nand_ops = {
212 .read = omap_nand_read,
213 .write = omap_nand_write,
214 .endianness = DEVICE_NATIVE_ENDIAN,
217 static void fill_prefetch_fifo(struct omap_gpmc_s *s)
219 /* Fill the prefetch FIFO by reading data from NAND.
220 * We do this synchronously, unlike the hardware which
221 * will do this asynchronously. We refill when the
222 * FIFO has THRESHOLD bytes free, and we always refill
223 * as much data as possible starting at the top end
224 * of the FIFO.
225 * (We have to refill at THRESHOLD rather than waiting
226 * for the FIFO to empty to allow for the case where
227 * the FIFO size isn't an exact multiple of THRESHOLD
228 * and we're doing DMA transfers.)
229 * This means we never need to handle wrap-around in
230 * the fifo-reading code, and the next byte of data
231 * to read is always fifo[63 - fifopointer].
233 int fptr;
234 int cs = prefetch_cs(s->prefetch.config1);
235 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
236 int bytes;
237 /* Don't believe the bit of the OMAP TRM that says that COUNTVALUE
238 * and TRANSFERCOUNT are in units of 16 bit words for 16 bit NAND.
239 * Instead believe the bit that says it is always a byte count.
241 bytes = 64 - s->prefetch.fifopointer;
242 if (bytes > s->prefetch.count) {
243 bytes = s->prefetch.count;
245 if (is16bit) {
246 bytes &= ~1;
249 s->prefetch.count -= bytes;
250 s->prefetch.fifopointer += bytes;
251 fptr = 64 - s->prefetch.fifopointer;
252 /* Move the existing data in the FIFO so it sits just
253 * before what we're about to read in
255 while (fptr < (64 - bytes)) {
256 s->prefetch.fifo[fptr] = s->prefetch.fifo[fptr + bytes];
257 fptr++;
259 while (fptr < 64) {
260 if (is16bit) {
261 uint32_t v = omap_nand_read(&s->cs_file[cs], 0, 2);
262 s->prefetch.fifo[fptr++] = v & 0xff;
263 s->prefetch.fifo[fptr++] = (v >> 8) & 0xff;
264 } else {
265 s->prefetch.fifo[fptr++] = omap_nand_read(&s->cs_file[cs], 0, 1);
268 if (s->prefetch.startengine && (s->prefetch.count == 0)) {
269 /* This was the final transfer: raise TERMINALCOUNTSTATUS */
270 s->irqst |= 2;
271 s->prefetch.startengine = 0;
273 /* If there are any bytes in the FIFO at this point then
274 * we must raise a DMA request (either this is a final part
275 * transfer, or we filled the FIFO in which case we certainly
276 * have THRESHOLD bytes available)
278 if (s->prefetch.fifopointer != 0) {
279 omap_gpmc_dma_update(s, 1);
281 omap_gpmc_int_update(s);
284 /* Access functions for a NAND-like device when the prefetch/postwrite
285 * engine is enabled -- all addresses in the region behave alike:
286 * data is read or written to the FIFO.
288 static uint64_t omap_gpmc_prefetch_read(void *opaque, hwaddr addr,
289 unsigned size)
291 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
292 uint32_t data;
293 if (s->prefetch.config1 & 1) {
294 /* The TRM doesn't define the behaviour if you read from the
295 * FIFO when the prefetch engine is in write mode. We choose
296 * to always return zero.
298 return 0;
300 /* Note that trying to read an empty fifo repeats the last byte */
301 if (s->prefetch.fifopointer) {
302 s->prefetch.fifopointer--;
304 data = s->prefetch.fifo[63 - s->prefetch.fifopointer];
305 if (s->prefetch.fifopointer ==
306 (64 - prefetch_threshold(s->prefetch.config1))) {
307 /* We've drained THRESHOLD bytes now. So deassert the
308 * DMA request, then refill the FIFO (which will probably
309 * assert it again.)
311 omap_gpmc_dma_update(s, 0);
312 fill_prefetch_fifo(s);
314 omap_gpmc_int_update(s);
315 return data;
318 static void omap_gpmc_prefetch_write(void *opaque, hwaddr addr,
319 uint64_t value, unsigned size)
321 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
322 int cs = prefetch_cs(s->prefetch.config1);
323 if ((s->prefetch.config1 & 1) == 0) {
324 /* The TRM doesn't define the behaviour of writing to the
325 * FIFO when the prefetch engine is in read mode. We
326 * choose to ignore the write.
328 return;
330 if (s->prefetch.count == 0) {
331 /* The TRM doesn't define the behaviour of writing to the
332 * FIFO if the transfer is complete. We choose to ignore.
334 return;
336 /* The only reason we do any data buffering in postwrite
337 * mode is if we are talking to a 16 bit NAND device, in
338 * which case we need to buffer the first byte of the
339 * 16 bit word until the other byte arrives.
341 int is16bit = (((s->cs_file[cs].config[0] >> 12) & 3) != 0);
342 if (is16bit) {
343 /* fifopointer alternates between 64 (waiting for first
344 * byte of word) and 63 (waiting for second byte)
346 if (s->prefetch.fifopointer == 64) {
347 s->prefetch.fifo[0] = value;
348 s->prefetch.fifopointer--;
349 } else {
350 value = (value << 8) | s->prefetch.fifo[0];
351 omap_nand_write(&s->cs_file[cs], 0, value, 2);
352 s->prefetch.count--;
353 s->prefetch.fifopointer = 64;
355 } else {
356 /* Just write the byte : fifopointer remains 64 at all times */
357 omap_nand_write(&s->cs_file[cs], 0, value, 1);
358 s->prefetch.count--;
360 if (s->prefetch.count == 0) {
361 /* Final transfer: raise TERMINALCOUNTSTATUS */
362 s->irqst |= 2;
363 s->prefetch.startengine = 0;
365 omap_gpmc_int_update(s);
368 static const MemoryRegionOps omap_prefetch_ops = {
369 .read = omap_gpmc_prefetch_read,
370 .write = omap_gpmc_prefetch_write,
371 .endianness = DEVICE_NATIVE_ENDIAN,
372 .impl.min_access_size = 1,
373 .impl.max_access_size = 1,
376 static MemoryRegion *omap_gpmc_cs_memregion(struct omap_gpmc_s *s, int cs)
378 /* Return the MemoryRegion* to map/unmap for this chipselect */
379 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
380 if (omap_gpmc_devtype(f) == OMAP_GPMC_NOR) {
381 return f->iomem;
383 if ((s->prefetch.config1 & 0x80) &&
384 (prefetch_cs(s->prefetch.config1) == cs)) {
385 /* The prefetch engine is enabled for this CS: map the FIFO */
386 return &s->prefetch.iomem;
388 return &f->nandiomem;
391 static void omap_gpmc_cs_map(struct omap_gpmc_s *s, int cs)
393 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
394 uint32_t mask = (f->config[6] >> 8) & 0xf;
395 uint32_t base = f->config[6] & 0x3f;
396 uint32_t size;
398 if (!f->iomem && !f->dev) {
399 return;
402 if (!(f->config[6] & (1 << 6))) {
403 /* Do nothing unless CSVALID */
404 return;
407 /* TODO: check for overlapping regions and report access errors */
408 if (mask != 0x8 && mask != 0xc && mask != 0xe && mask != 0xf
409 && !(s->accept_256 && !mask)) {
410 fprintf(stderr, "%s: invalid chip-select mask address (0x%x)\n",
411 __func__, mask);
414 base <<= 24;
415 size = (0x0fffffff & ~(mask << 24)) + 1;
416 /* TODO: rather than setting the size of the mapping (which should be
417 * constant), the mask should cause wrapping of the address space, so
418 * that the same memory becomes accessible at every <i>size</i> bytes
419 * starting from <i>base</i>. */
420 memory_region_init(&f->container, NULL, "omap-gpmc-file", size);
421 memory_region_add_subregion(&f->container, 0,
422 omap_gpmc_cs_memregion(s, cs));
423 memory_region_add_subregion(get_system_memory(), base,
424 &f->container);
427 static void omap_gpmc_cs_unmap(struct omap_gpmc_s *s, int cs)
429 struct omap_gpmc_cs_file_s *f = &s->cs_file[cs];
430 if (!(f->config[6] & (1 << 6))) {
431 /* Do nothing unless CSVALID */
432 return;
434 if (!f->iomem && !f->dev) {
435 return;
437 memory_region_del_subregion(get_system_memory(), &f->container);
438 memory_region_del_subregion(&f->container, omap_gpmc_cs_memregion(s, cs));
439 object_unparent(OBJECT(&f->container));
442 void omap_gpmc_reset(struct omap_gpmc_s *s)
444 int i;
446 s->sysconfig = 0;
447 s->irqst = 0;
448 s->irqen = 0;
449 omap_gpmc_int_update(s);
450 for (i = 0; i < 8; i++) {
451 /* This has to happen before we change any of the config
452 * used to determine which memory regions are mapped or unmapped.
454 omap_gpmc_cs_unmap(s, i);
456 s->timeout = 0;
457 s->config = 0xa00;
458 s->prefetch.config1 = 0x00004000;
459 s->prefetch.transfercount = 0x00000000;
460 s->prefetch.startengine = 0;
461 s->prefetch.fifopointer = 0;
462 s->prefetch.count = 0;
463 for (i = 0; i < 8; i ++) {
464 s->cs_file[i].config[1] = 0x101001;
465 s->cs_file[i].config[2] = 0x020201;
466 s->cs_file[i].config[3] = 0x10031003;
467 s->cs_file[i].config[4] = 0x10f1111;
468 s->cs_file[i].config[5] = 0;
469 s->cs_file[i].config[6] = 0xf00;
470 /* In theory we could probe attached devices for some CFG1
471 * bits here, but we just retain them across resets as they
472 * were set initially by omap_gpmc_attach().
474 if (i == 0) {
475 s->cs_file[i].config[0] &= 0x00433e00;
476 s->cs_file[i].config[6] |= 1 << 6; /* CSVALID */
477 omap_gpmc_cs_map(s, i);
478 } else {
479 s->cs_file[i].config[0] &= 0x00403c00;
482 s->ecc_cs = 0;
483 s->ecc_ptr = 0;
484 s->ecc_cfg = 0x3fcff000;
485 for (i = 0; i < 9; i ++)
486 ecc_reset(&s->ecc[i]);
489 static int gpmc_wordaccess_only(hwaddr addr)
491 /* Return true if the register offset is to a register that
492 * only permits word width accesses.
493 * Non-word accesses are only OK for GPMC_NAND_DATA/ADDRESS/COMMAND
494 * for any chipselect.
496 if (addr >= 0x60 && addr <= 0x1d4) {
497 int cs = (addr - 0x60) / 0x30;
498 addr -= cs * 0x30;
499 if (addr >= 0x7c && addr < 0x88) {
500 /* GPMC_NAND_COMMAND, GPMC_NAND_ADDRESS, GPMC_NAND_DATA */
501 return 0;
504 return 1;
507 static uint64_t omap_gpmc_read(void *opaque, hwaddr addr,
508 unsigned size)
510 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
511 int cs;
512 struct omap_gpmc_cs_file_s *f;
514 if (size != 4 && gpmc_wordaccess_only(addr)) {
515 return omap_badwidth_read32(opaque, addr);
518 switch (addr) {
519 case 0x000: /* GPMC_REVISION */
520 return s->revision;
522 case 0x010: /* GPMC_SYSCONFIG */
523 return s->sysconfig;
525 case 0x014: /* GPMC_SYSSTATUS */
526 return 1; /* RESETDONE */
528 case 0x018: /* GPMC_IRQSTATUS */
529 return s->irqst;
531 case 0x01c: /* GPMC_IRQENABLE */
532 return s->irqen;
534 case 0x040: /* GPMC_TIMEOUT_CONTROL */
535 return s->timeout;
537 case 0x044: /* GPMC_ERR_ADDRESS */
538 case 0x048: /* GPMC_ERR_TYPE */
539 return 0;
541 case 0x050: /* GPMC_CONFIG */
542 return s->config;
544 case 0x054: /* GPMC_STATUS */
545 return 0x001;
547 case 0x060 ... 0x1d4:
548 cs = (addr - 0x060) / 0x30;
549 addr -= cs * 0x30;
550 f = s->cs_file + cs;
551 switch (addr) {
552 case 0x60: /* GPMC_CONFIG1 */
553 return f->config[0];
554 case 0x64: /* GPMC_CONFIG2 */
555 return f->config[1];
556 case 0x68: /* GPMC_CONFIG3 */
557 return f->config[2];
558 case 0x6c: /* GPMC_CONFIG4 */
559 return f->config[3];
560 case 0x70: /* GPMC_CONFIG5 */
561 return f->config[4];
562 case 0x74: /* GPMC_CONFIG6 */
563 return f->config[5];
564 case 0x78: /* GPMC_CONFIG7 */
565 return f->config[6];
566 case 0x84 ... 0x87: /* GPMC_NAND_DATA */
567 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
568 return omap_nand_read(f, 0, size);
570 return 0;
572 break;
574 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
575 return s->prefetch.config1;
576 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
577 return s->prefetch.transfercount;
578 case 0x1ec: /* GPMC_PREFETCH_CONTROL */
579 return s->prefetch.startengine;
580 case 0x1f0: /* GPMC_PREFETCH_STATUS */
581 /* NB: The OMAP3 TRM is inconsistent about whether the GPMC
582 * FIFOTHRESHOLDSTATUS bit should be set when
583 * FIFOPOINTER > FIFOTHRESHOLD or when it is >= FIFOTHRESHOLD.
584 * Apparently the underlying functional spec from which the TRM was
585 * created states that the behaviour is ">=", and this also
586 * makes more conceptual sense.
588 return (s->prefetch.fifopointer << 24) |
589 ((s->prefetch.fifopointer >=
590 ((s->prefetch.config1 >> 8) & 0x7f) ? 1 : 0) << 16) |
591 s->prefetch.count;
593 case 0x1f4: /* GPMC_ECC_CONFIG */
594 return s->ecc_cs;
595 case 0x1f8: /* GPMC_ECC_CONTROL */
596 return s->ecc_ptr;
597 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
598 return s->ecc_cfg;
599 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
600 cs = (addr & 0x1f) >> 2;
601 /* TODO: check correctness */
602 return
603 ((s->ecc[cs].cp & 0x07) << 0) |
604 ((s->ecc[cs].cp & 0x38) << 13) |
605 ((s->ecc[cs].lp[0] & 0x1ff) << 3) |
606 ((s->ecc[cs].lp[1] & 0x1ff) << 19);
608 case 0x230: /* GPMC_TESTMODE_CTRL */
609 return 0;
610 case 0x234: /* GPMC_PSA_LSB */
611 case 0x238: /* GPMC_PSA_MSB */
612 return 0x00000000;
615 OMAP_BAD_REG(addr);
616 return 0;
619 static void omap_gpmc_write(void *opaque, hwaddr addr,
620 uint64_t value, unsigned size)
622 struct omap_gpmc_s *s = (struct omap_gpmc_s *) opaque;
623 int cs;
624 struct omap_gpmc_cs_file_s *f;
626 if (size != 4 && gpmc_wordaccess_only(addr)) {
627 omap_badwidth_write32(opaque, addr, value);
628 return;
631 switch (addr) {
632 case 0x000: /* GPMC_REVISION */
633 case 0x014: /* GPMC_SYSSTATUS */
634 case 0x054: /* GPMC_STATUS */
635 case 0x1f0: /* GPMC_PREFETCH_STATUS */
636 case 0x200 ... 0x220: /* GPMC_ECC_RESULT */
637 case 0x234: /* GPMC_PSA_LSB */
638 case 0x238: /* GPMC_PSA_MSB */
639 OMAP_RO_REG(addr);
640 break;
642 case 0x010: /* GPMC_SYSCONFIG */
643 if ((value >> 3) == 0x3)
644 fprintf(stderr, "%s: bad SDRAM idle mode %"PRIi64"\n",
645 __FUNCTION__, value >> 3);
646 if (value & 2)
647 omap_gpmc_reset(s);
648 s->sysconfig = value & 0x19;
649 break;
651 case 0x018: /* GPMC_IRQSTATUS */
652 s->irqst &= ~value;
653 omap_gpmc_int_update(s);
654 break;
656 case 0x01c: /* GPMC_IRQENABLE */
657 s->irqen = value & 0xf03;
658 omap_gpmc_int_update(s);
659 break;
661 case 0x040: /* GPMC_TIMEOUT_CONTROL */
662 s->timeout = value & 0x1ff1;
663 break;
665 case 0x044: /* GPMC_ERR_ADDRESS */
666 case 0x048: /* GPMC_ERR_TYPE */
667 break;
669 case 0x050: /* GPMC_CONFIG */
670 s->config = value & 0xf13;
671 break;
673 case 0x060 ... 0x1d4:
674 cs = (addr - 0x060) / 0x30;
675 addr -= cs * 0x30;
676 f = s->cs_file + cs;
677 switch (addr) {
678 case 0x60: /* GPMC_CONFIG1 */
679 f->config[0] = value & 0xffef3e13;
680 break;
681 case 0x64: /* GPMC_CONFIG2 */
682 f->config[1] = value & 0x001f1f8f;
683 break;
684 case 0x68: /* GPMC_CONFIG3 */
685 f->config[2] = value & 0x001f1f8f;
686 break;
687 case 0x6c: /* GPMC_CONFIG4 */
688 f->config[3] = value & 0x1f8f1f8f;
689 break;
690 case 0x70: /* GPMC_CONFIG5 */
691 f->config[4] = value & 0x0f1f1f1f;
692 break;
693 case 0x74: /* GPMC_CONFIG6 */
694 f->config[5] = value & 0x00000fcf;
695 break;
696 case 0x78: /* GPMC_CONFIG7 */
697 if ((f->config[6] ^ value) & 0xf7f) {
698 omap_gpmc_cs_unmap(s, cs);
699 f->config[6] = value & 0x00000f7f;
700 omap_gpmc_cs_map(s, cs);
702 break;
703 case 0x7c ... 0x7f: /* GPMC_NAND_COMMAND */
704 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
705 nand_setpins(f->dev, 1, 0, 0, 1, 0); /* CLE */
706 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
708 break;
709 case 0x80 ... 0x83: /* GPMC_NAND_ADDRESS */
710 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
711 nand_setpins(f->dev, 0, 1, 0, 1, 0); /* ALE */
712 omap_nand_setio(f->dev, value, omap_gpmc_devsize(f), size);
714 break;
715 case 0x84 ... 0x87: /* GPMC_NAND_DATA */
716 if (omap_gpmc_devtype(f) == OMAP_GPMC_NAND) {
717 omap_nand_write(f, 0, value, size);
719 break;
720 default:
721 goto bad_reg;
723 break;
725 case 0x1e0: /* GPMC_PREFETCH_CONFIG1 */
726 if (!s->prefetch.startengine) {
727 uint32_t newconfig1 = value & 0x7f8f7fbf;
728 uint32_t changed;
729 changed = newconfig1 ^ s->prefetch.config1;
730 if (changed & (0x80 | 0x7000000)) {
731 /* Turning the engine on or off, or mapping it somewhere else.
732 * cs_map() and cs_unmap() check the prefetch config and
733 * overall CSVALID bits, so it is sufficient to unmap-and-map
734 * both the old cs and the new one. Note that we adhere to
735 * the "unmap/change config/map" order (and not unmap twice
736 * if newcs == oldcs), otherwise we'll try to delete the wrong
737 * memory region.
739 int oldcs = prefetch_cs(s->prefetch.config1);
740 int newcs = prefetch_cs(newconfig1);
741 omap_gpmc_cs_unmap(s, oldcs);
742 if (oldcs != newcs) {
743 omap_gpmc_cs_unmap(s, newcs);
745 s->prefetch.config1 = newconfig1;
746 omap_gpmc_cs_map(s, oldcs);
747 if (oldcs != newcs) {
748 omap_gpmc_cs_map(s, newcs);
750 } else {
751 s->prefetch.config1 = newconfig1;
754 break;
756 case 0x1e4: /* GPMC_PREFETCH_CONFIG2 */
757 if (!s->prefetch.startengine) {
758 s->prefetch.transfercount = value & 0x3fff;
760 break;
762 case 0x1ec: /* GPMC_PREFETCH_CONTROL */
763 if (s->prefetch.startengine != (value & 1)) {
764 s->prefetch.startengine = value & 1;
765 if (s->prefetch.startengine) {
766 /* Prefetch engine start */
767 s->prefetch.count = s->prefetch.transfercount;
768 if (s->prefetch.config1 & 1) {
769 /* Write */
770 s->prefetch.fifopointer = 64;
771 } else {
772 /* Read */
773 s->prefetch.fifopointer = 0;
774 fill_prefetch_fifo(s);
776 } else {
777 /* Prefetch engine forcibly stopped. The TRM
778 * doesn't define the behaviour if you do this.
779 * We clear the prefetch count, which means that
780 * we permit no more writes, and don't read any
781 * more data from NAND. The CPU can still drain
782 * the FIFO of unread data.
784 s->prefetch.count = 0;
786 omap_gpmc_int_update(s);
788 break;
790 case 0x1f4: /* GPMC_ECC_CONFIG */
791 s->ecc_cs = 0x8f;
792 break;
793 case 0x1f8: /* GPMC_ECC_CONTROL */
794 if (value & (1 << 8))
795 for (cs = 0; cs < 9; cs ++)
796 ecc_reset(&s->ecc[cs]);
797 s->ecc_ptr = value & 0xf;
798 if (s->ecc_ptr == 0 || s->ecc_ptr > 9) {
799 s->ecc_ptr = 0;
800 s->ecc_cs &= ~1;
802 break;
803 case 0x1fc: /* GPMC_ECC_SIZE_CONFIG */
804 s->ecc_cfg = value & 0x3fcff1ff;
805 break;
806 case 0x230: /* GPMC_TESTMODE_CTRL */
807 if (value & 7)
808 fprintf(stderr, "%s: test mode enable attempt\n", __FUNCTION__);
809 break;
811 default:
812 bad_reg:
813 OMAP_BAD_REG(addr);
814 return;
818 static const MemoryRegionOps omap_gpmc_ops = {
819 .read = omap_gpmc_read,
820 .write = omap_gpmc_write,
821 .endianness = DEVICE_NATIVE_ENDIAN,
824 struct omap_gpmc_s *omap_gpmc_init(struct omap_mpu_state_s *mpu,
825 hwaddr base,
826 qemu_irq irq, qemu_irq drq)
828 int cs;
829 struct omap_gpmc_s *s = (struct omap_gpmc_s *)
830 g_malloc0(sizeof(struct omap_gpmc_s));
832 memory_region_init_io(&s->iomem, NULL, &omap_gpmc_ops, s, "omap-gpmc", 0x1000);
833 memory_region_add_subregion(get_system_memory(), base, &s->iomem);
835 s->irq = irq;
836 s->drq = drq;
837 s->accept_256 = cpu_is_omap3630(mpu);
838 s->revision = cpu_class_omap3(mpu) ? 0x50 : 0x20;
839 s->lastirq = 0;
840 omap_gpmc_reset(s);
842 /* We have to register a different IO memory handler for each
843 * chip select region in case a NAND device is mapped there. We
844 * make the region the worst-case size of 256MB and rely on the
845 * container memory region in cs_map to chop it down to the actual
846 * guest-requested size.
848 for (cs = 0; cs < 8; cs++) {
849 memory_region_init_io(&s->cs_file[cs].nandiomem, NULL,
850 &omap_nand_ops,
851 &s->cs_file[cs],
852 "omap-nand",
853 256 * 1024 * 1024);
856 memory_region_init_io(&s->prefetch.iomem, NULL, &omap_prefetch_ops, s,
857 "omap-gpmc-prefetch", 256 * 1024 * 1024);
858 return s;
861 void omap_gpmc_attach(struct omap_gpmc_s *s, int cs, MemoryRegion *iomem)
863 struct omap_gpmc_cs_file_s *f;
864 assert(iomem);
866 if (cs < 0 || cs >= 8) {
867 fprintf(stderr, "%s: bad chip-select %i\n", __FUNCTION__, cs);
868 exit(-1);
870 f = &s->cs_file[cs];
872 omap_gpmc_cs_unmap(s, cs);
873 f->config[0] &= ~(0xf << 10);
874 f->iomem = iomem;
875 omap_gpmc_cs_map(s, cs);
878 void omap_gpmc_attach_nand(struct omap_gpmc_s *s, int cs, DeviceState *nand)
880 struct omap_gpmc_cs_file_s *f;
881 assert(nand);
883 if (cs < 0 || cs >= 8) {
884 fprintf(stderr, "%s: bad chip-select %i\n", __func__, cs);
885 exit(-1);
887 f = &s->cs_file[cs];
889 omap_gpmc_cs_unmap(s, cs);
890 f->config[0] &= ~(0xf << 10);
891 f->config[0] |= (OMAP_GPMC_NAND << 10);
892 f->dev = nand;
893 if (nand_getbuswidth(f->dev) == 16) {
894 f->config[0] |= OMAP_GPMC_16BIT << 12;
896 omap_gpmc_cs_map(s, cs);