PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / gpu / drm / omapdrm / omap_dmm_tiler.c
blobf926b4caf44989be904451c277049152cfc9b9ad
1 /*
2 * DMM IOMMU driver support functions for TI OMAP processors.
4 * Author: Rob Clark <rob@ti.com>
5 * Andy Gross <andy.gross@ti.com>
7 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h> /* platform_device() */
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/delay.h>
29 #include <linux/mm.h>
30 #include <linux/time.h>
31 #include <linux/list.h>
33 #include "omap_dmm_tiler.h"
34 #include "omap_dmm_priv.h"
36 #define DMM_DRIVER_NAME "dmm"
38 /* mappings for associating views to luts */
39 static struct tcm *containers[TILFMT_NFORMATS];
40 static struct dmm *omap_dmm;
42 /* global spinlock for protecting lists */
43 static DEFINE_SPINLOCK(list_lock);
45 /* Geometry table */
46 #define GEOM(xshift, yshift, bytes_per_pixel) { \
47 .x_shft = (xshift), \
48 .y_shft = (yshift), \
49 .cpp = (bytes_per_pixel), \
50 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
51 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
54 static const struct {
55 uint32_t x_shft; /* unused X-bits (as part of bpp) */
56 uint32_t y_shft; /* unused Y-bits (as part of bpp) */
57 uint32_t cpp; /* bytes/chars per pixel */
58 uint32_t slot_w; /* width of each slot (in pixels) */
59 uint32_t slot_h; /* height of each slot (in pixels) */
60 } geom[TILFMT_NFORMATS] = {
61 [TILFMT_8BIT] = GEOM(0, 0, 1),
62 [TILFMT_16BIT] = GEOM(0, 1, 2),
63 [TILFMT_32BIT] = GEOM(1, 1, 4),
64 [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
68 /* lookup table for registers w/ per-engine instances */
69 static const uint32_t reg[][4] = {
70 [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
71 DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
72 [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
73 DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
76 /* simple allocator to grab next 16 byte aligned memory from txn */
77 static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
79 void *ptr;
80 struct refill_engine *engine = txn->engine_handle;
82 /* dmm programming requires 16 byte aligned addresses */
83 txn->current_pa = round_up(txn->current_pa, 16);
84 txn->current_va = (void *)round_up((long)txn->current_va, 16);
86 ptr = txn->current_va;
87 *pa = txn->current_pa;
89 txn->current_pa += sz;
90 txn->current_va += sz;
92 BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
94 return ptr;
97 /* check status and spin until wait_mask comes true */
98 static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
100 struct dmm *dmm = engine->dmm;
101 uint32_t r = 0, err, i;
103 i = DMM_FIXED_RETRY_COUNT;
104 while (true) {
105 r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
106 err = r & DMM_PATSTATUS_ERR;
107 if (err)
108 return -EFAULT;
110 if ((r & wait_mask) == wait_mask)
111 break;
113 if (--i == 0)
114 return -ETIMEDOUT;
116 udelay(1);
119 return 0;
122 static void release_engine(struct refill_engine *engine)
124 unsigned long flags;
126 spin_lock_irqsave(&list_lock, flags);
127 list_add(&engine->idle_node, &omap_dmm->idle_head);
128 spin_unlock_irqrestore(&list_lock, flags);
130 atomic_inc(&omap_dmm->engine_counter);
131 wake_up_interruptible(&omap_dmm->engine_queue);
134 static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
136 struct dmm *dmm = arg;
137 uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
138 int i;
140 /* ack IRQ */
141 writel(status, dmm->base + DMM_PAT_IRQSTATUS);
143 for (i = 0; i < dmm->num_engines; i++) {
144 if (status & DMM_IRQSTAT_LST) {
145 wake_up_interruptible(&dmm->engines[i].wait_for_refill);
147 if (dmm->engines[i].async)
148 release_engine(&dmm->engines[i]);
151 status >>= 8;
154 return IRQ_HANDLED;
158 * Get a handle for a DMM transaction
160 static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
162 struct dmm_txn *txn = NULL;
163 struct refill_engine *engine = NULL;
164 int ret;
165 unsigned long flags;
168 /* wait until an engine is available */
169 ret = wait_event_interruptible(omap_dmm->engine_queue,
170 atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
171 if (ret)
172 return ERR_PTR(ret);
174 /* grab an idle engine */
175 spin_lock_irqsave(&list_lock, flags);
176 if (!list_empty(&dmm->idle_head)) {
177 engine = list_entry(dmm->idle_head.next, struct refill_engine,
178 idle_node);
179 list_del(&engine->idle_node);
181 spin_unlock_irqrestore(&list_lock, flags);
183 BUG_ON(!engine);
185 txn = &engine->txn;
186 engine->tcm = tcm;
187 txn->engine_handle = engine;
188 txn->last_pat = NULL;
189 txn->current_va = engine->refill_va;
190 txn->current_pa = engine->refill_pa;
192 return txn;
196 * Add region to DMM transaction. If pages or pages[i] is NULL, then the
197 * corresponding slot is cleared (ie. dummy_pa is programmed)
199 static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
200 struct page **pages, uint32_t npages, uint32_t roll)
202 dma_addr_t pat_pa = 0;
203 uint32_t *data;
204 struct pat *pat;
205 struct refill_engine *engine = txn->engine_handle;
206 int columns = (1 + area->x1 - area->x0);
207 int rows = (1 + area->y1 - area->y0);
208 int i = columns*rows;
210 pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
212 if (txn->last_pat)
213 txn->last_pat->next_pa = (uint32_t)pat_pa;
215 pat->area = *area;
217 /* adjust Y coordinates based off of container parameters */
218 pat->area.y0 += engine->tcm->y_offset;
219 pat->area.y1 += engine->tcm->y_offset;
221 pat->ctrl = (struct pat_ctrl){
222 .start = 1,
223 .lut_id = engine->tcm->lut_id,
226 data = alloc_dma(txn, 4*i, &pat->data_pa);
228 while (i--) {
229 int n = i + roll;
230 if (n >= npages)
231 n -= npages;
232 data[i] = (pages && pages[n]) ?
233 page_to_phys(pages[n]) : engine->dmm->dummy_pa;
236 txn->last_pat = pat;
238 return;
242 * Commit the DMM transaction.
244 static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
246 int ret = 0;
247 struct refill_engine *engine = txn->engine_handle;
248 struct dmm *dmm = engine->dmm;
250 if (!txn->last_pat) {
251 dev_err(engine->dmm->dev, "need at least one txn\n");
252 ret = -EINVAL;
253 goto cleanup;
256 txn->last_pat->next_pa = 0;
258 /* write to PAT_DESCR to clear out any pending transaction */
259 writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
261 /* wait for engine ready: */
262 ret = wait_status(engine, DMM_PATSTATUS_READY);
263 if (ret) {
264 ret = -EFAULT;
265 goto cleanup;
268 /* mark whether it is async to denote list management in IRQ handler */
269 engine->async = wait ? false : true;
271 /* kick reload */
272 writel(engine->refill_pa,
273 dmm->base + reg[PAT_DESCR][engine->id]);
275 if (wait) {
276 if (wait_event_interruptible_timeout(engine->wait_for_refill,
277 wait_status(engine, DMM_PATSTATUS_READY) == 0,
278 msecs_to_jiffies(1)) <= 0) {
279 dev_err(dmm->dev, "timed out waiting for done\n");
280 ret = -ETIMEDOUT;
284 cleanup:
285 /* only place engine back on list if we are done with it */
286 if (ret || wait)
287 release_engine(engine);
289 return ret;
293 * DMM programming
295 static int fill(struct tcm_area *area, struct page **pages,
296 uint32_t npages, uint32_t roll, bool wait)
298 int ret = 0;
299 struct tcm_area slice, area_s;
300 struct dmm_txn *txn;
302 txn = dmm_txn_init(omap_dmm, area->tcm);
303 if (IS_ERR_OR_NULL(txn))
304 return -ENOMEM;
306 tcm_for_each_slice(slice, *area, area_s) {
307 struct pat_area p_area = {
308 .x0 = slice.p0.x, .y0 = slice.p0.y,
309 .x1 = slice.p1.x, .y1 = slice.p1.y,
312 dmm_txn_append(txn, &p_area, pages, npages, roll);
314 roll += tcm_sizeof(slice);
317 ret = dmm_txn_commit(txn, wait);
319 return ret;
323 * Pin/unpin
326 /* note: slots for which pages[i] == NULL are filled w/ dummy page
328 int tiler_pin(struct tiler_block *block, struct page **pages,
329 uint32_t npages, uint32_t roll, bool wait)
331 int ret;
333 ret = fill(&block->area, pages, npages, roll, wait);
335 if (ret)
336 tiler_unpin(block);
338 return ret;
341 int tiler_unpin(struct tiler_block *block)
343 return fill(&block->area, NULL, 0, 0, false);
347 * Reserve/release
349 struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
350 uint16_t h, uint16_t align)
352 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
353 u32 min_align = 128;
354 int ret;
355 unsigned long flags;
357 BUG_ON(!validfmt(fmt));
359 /* convert width/height to slots */
360 w = DIV_ROUND_UP(w, geom[fmt].slot_w);
361 h = DIV_ROUND_UP(h, geom[fmt].slot_h);
363 /* convert alignment to slots */
364 min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
365 align = ALIGN(align, min_align);
366 align /= geom[fmt].slot_w * geom[fmt].cpp;
368 block->fmt = fmt;
370 ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
371 if (ret) {
372 kfree(block);
373 return ERR_PTR(-ENOMEM);
376 /* add to allocation list */
377 spin_lock_irqsave(&list_lock, flags);
378 list_add(&block->alloc_node, &omap_dmm->alloc_head);
379 spin_unlock_irqrestore(&list_lock, flags);
381 return block;
384 struct tiler_block *tiler_reserve_1d(size_t size)
386 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
387 int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
388 unsigned long flags;
390 if (!block)
391 return ERR_PTR(-ENOMEM);
393 block->fmt = TILFMT_PAGE;
395 if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
396 &block->area)) {
397 kfree(block);
398 return ERR_PTR(-ENOMEM);
401 spin_lock_irqsave(&list_lock, flags);
402 list_add(&block->alloc_node, &omap_dmm->alloc_head);
403 spin_unlock_irqrestore(&list_lock, flags);
405 return block;
408 /* note: if you have pin'd pages, you should have already unpin'd first! */
409 int tiler_release(struct tiler_block *block)
411 int ret = tcm_free(&block->area);
412 unsigned long flags;
414 if (block->area.tcm)
415 dev_err(omap_dmm->dev, "failed to release block\n");
417 spin_lock_irqsave(&list_lock, flags);
418 list_del(&block->alloc_node);
419 spin_unlock_irqrestore(&list_lock, flags);
421 kfree(block);
422 return ret;
426 * Utils
429 /* calculate the tiler space address of a pixel in a view orientation...
430 * below description copied from the display subsystem section of TRM:
432 * When the TILER is addressed, the bits:
433 * [28:27] = 0x0 for 8-bit tiled
434 * 0x1 for 16-bit tiled
435 * 0x2 for 32-bit tiled
436 * 0x3 for page mode
437 * [31:29] = 0x0 for 0-degree view
438 * 0x1 for 180-degree view + mirroring
439 * 0x2 for 0-degree view + mirroring
440 * 0x3 for 180-degree view
441 * 0x4 for 270-degree view + mirroring
442 * 0x5 for 270-degree view
443 * 0x6 for 90-degree view
444 * 0x7 for 90-degree view + mirroring
445 * Otherwise the bits indicated the corresponding bit address to access
446 * the SDRAM.
448 static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
450 u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
452 x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
453 y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
454 alignment = geom[fmt].x_shft + geom[fmt].y_shft;
456 /* validate coordinate */
457 x_mask = MASK(x_bits);
458 y_mask = MASK(y_bits);
460 if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
461 DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
462 x, x, x_mask, y, y, y_mask);
463 return 0;
466 /* account for mirroring */
467 if (orient & MASK_X_INVERT)
468 x ^= x_mask;
469 if (orient & MASK_Y_INVERT)
470 y ^= y_mask;
472 /* get coordinate address */
473 if (orient & MASK_XY_FLIP)
474 tmp = ((x << y_bits) + y);
475 else
476 tmp = ((y << x_bits) + x);
478 return TIL_ADDR((tmp << alignment), orient, fmt);
481 dma_addr_t tiler_ssptr(struct tiler_block *block)
483 BUG_ON(!validfmt(block->fmt));
485 return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
486 block->area.p0.x * geom[block->fmt].slot_w,
487 block->area.p0.y * geom[block->fmt].slot_h);
490 dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
491 uint32_t x, uint32_t y)
493 struct tcm_pt *p = &block->area.p0;
494 BUG_ON(!validfmt(block->fmt));
496 return tiler_get_address(block->fmt, orient,
497 (p->x * geom[block->fmt].slot_w) + x,
498 (p->y * geom[block->fmt].slot_h) + y);
501 void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
503 BUG_ON(!validfmt(fmt));
504 *w = round_up(*w, geom[fmt].slot_w);
505 *h = round_up(*h, geom[fmt].slot_h);
508 uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
510 BUG_ON(!validfmt(fmt));
512 if (orient & MASK_XY_FLIP)
513 return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
514 else
515 return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
518 size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
520 tiler_align(fmt, &w, &h);
521 return geom[fmt].cpp * w * h;
524 size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
526 BUG_ON(!validfmt(fmt));
527 return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
530 bool dmm_is_available(void)
532 return omap_dmm ? true : false;
535 static int omap_dmm_remove(struct platform_device *dev)
537 struct tiler_block *block, *_block;
538 int i;
539 unsigned long flags;
541 if (omap_dmm) {
542 /* free all area regions */
543 spin_lock_irqsave(&list_lock, flags);
544 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
545 alloc_node) {
546 list_del(&block->alloc_node);
547 kfree(block);
549 spin_unlock_irqrestore(&list_lock, flags);
551 for (i = 0; i < omap_dmm->num_lut; i++)
552 if (omap_dmm->tcm && omap_dmm->tcm[i])
553 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
554 kfree(omap_dmm->tcm);
556 kfree(omap_dmm->engines);
557 if (omap_dmm->refill_va)
558 dma_free_writecombine(omap_dmm->dev,
559 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
560 omap_dmm->refill_va,
561 omap_dmm->refill_pa);
562 if (omap_dmm->dummy_page)
563 __free_page(omap_dmm->dummy_page);
565 if (omap_dmm->irq > 0)
566 free_irq(omap_dmm->irq, omap_dmm);
568 iounmap(omap_dmm->base);
569 kfree(omap_dmm);
570 omap_dmm = NULL;
573 return 0;
576 static int omap_dmm_probe(struct platform_device *dev)
578 int ret = -EFAULT, i;
579 struct tcm_area area = {0};
580 u32 hwinfo, pat_geom;
581 struct resource *mem;
583 omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
584 if (!omap_dmm)
585 goto fail;
587 /* initialize lists */
588 INIT_LIST_HEAD(&omap_dmm->alloc_head);
589 INIT_LIST_HEAD(&omap_dmm->idle_head);
591 init_waitqueue_head(&omap_dmm->engine_queue);
593 /* lookup hwmod data - base address and irq */
594 mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
595 if (!mem) {
596 dev_err(&dev->dev, "failed to get base address resource\n");
597 goto fail;
600 omap_dmm->base = ioremap(mem->start, SZ_2K);
602 if (!omap_dmm->base) {
603 dev_err(&dev->dev, "failed to get dmm base address\n");
604 goto fail;
607 omap_dmm->irq = platform_get_irq(dev, 0);
608 if (omap_dmm->irq < 0) {
609 dev_err(&dev->dev, "failed to get IRQ resource\n");
610 goto fail;
613 omap_dmm->dev = &dev->dev;
615 hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
616 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
617 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
618 omap_dmm->container_width = 256;
619 omap_dmm->container_height = 128;
621 atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
623 /* read out actual LUT width and height */
624 pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
625 omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
626 omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
628 /* increment LUT by one if on OMAP5 */
629 /* LUT has twice the height, and is split into a separate container */
630 if (omap_dmm->lut_height != omap_dmm->container_height)
631 omap_dmm->num_lut++;
633 /* initialize DMM registers */
634 writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
635 writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
636 writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
637 writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
638 writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
639 writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
641 ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
642 "omap_dmm_irq_handler", omap_dmm);
644 if (ret) {
645 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
646 omap_dmm->irq, ret);
647 omap_dmm->irq = -1;
648 goto fail;
651 /* Enable all interrupts for each refill engine except
652 * ERR_LUT_MISS<n> (which is just advisory, and we don't care
653 * about because we want to be able to refill live scanout
654 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
655 * we just generally don't care about.
657 writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
659 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
660 if (!omap_dmm->dummy_page) {
661 dev_err(&dev->dev, "could not allocate dummy page\n");
662 ret = -ENOMEM;
663 goto fail;
666 /* set dma mask for device */
667 ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
668 if (ret)
669 goto fail;
671 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
673 /* alloc refill memory */
674 omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
675 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
676 &omap_dmm->refill_pa, GFP_KERNEL);
677 if (!omap_dmm->refill_va) {
678 dev_err(&dev->dev, "could not allocate refill memory\n");
679 goto fail;
682 /* alloc engines */
683 omap_dmm->engines = kcalloc(omap_dmm->num_engines,
684 sizeof(struct refill_engine), GFP_KERNEL);
685 if (!omap_dmm->engines) {
686 ret = -ENOMEM;
687 goto fail;
690 for (i = 0; i < omap_dmm->num_engines; i++) {
691 omap_dmm->engines[i].id = i;
692 omap_dmm->engines[i].dmm = omap_dmm;
693 omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
694 (REFILL_BUFFER_SIZE * i);
695 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
696 (REFILL_BUFFER_SIZE * i);
697 init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
699 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
702 omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
703 GFP_KERNEL);
704 if (!omap_dmm->tcm) {
705 ret = -ENOMEM;
706 goto fail;
709 /* init containers */
710 /* Each LUT is associated with a TCM (container manager). We use the
711 lut_id to denote the lut_id used to identify the correct LUT for
712 programming during reill operations */
713 for (i = 0; i < omap_dmm->num_lut; i++) {
714 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
715 omap_dmm->container_height,
716 NULL);
718 if (!omap_dmm->tcm[i]) {
719 dev_err(&dev->dev, "failed to allocate container\n");
720 ret = -ENOMEM;
721 goto fail;
724 omap_dmm->tcm[i]->lut_id = i;
727 /* assign access mode containers to applicable tcm container */
728 /* OMAP 4 has 1 container for all 4 views */
729 /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
730 containers[TILFMT_8BIT] = omap_dmm->tcm[0];
731 containers[TILFMT_16BIT] = omap_dmm->tcm[0];
732 containers[TILFMT_32BIT] = omap_dmm->tcm[0];
734 if (omap_dmm->container_height != omap_dmm->lut_height) {
735 /* second LUT is used for PAGE mode. Programming must use
736 y offset that is added to all y coordinates. LUT id is still
737 0, because it is the same LUT, just the upper 128 lines */
738 containers[TILFMT_PAGE] = omap_dmm->tcm[1];
739 omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
740 omap_dmm->tcm[1]->lut_id = 0;
741 } else {
742 containers[TILFMT_PAGE] = omap_dmm->tcm[0];
745 area = (struct tcm_area) {
746 .tcm = NULL,
747 .p1.x = omap_dmm->container_width - 1,
748 .p1.y = omap_dmm->container_height - 1,
751 /* initialize all LUTs to dummy page entries */
752 for (i = 0; i < omap_dmm->num_lut; i++) {
753 area.tcm = omap_dmm->tcm[i];
754 if (fill(&area, NULL, 0, 0, true))
755 dev_err(omap_dmm->dev, "refill failed");
758 dev_info(omap_dmm->dev, "initialized all PAT entries\n");
760 return 0;
762 fail:
763 if (omap_dmm_remove(dev))
764 dev_err(&dev->dev, "cleanup failed\n");
765 return ret;
769 * debugfs support
772 #ifdef CONFIG_DEBUG_FS
774 static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
775 "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
776 static const char *special = ".,:;'\"`~!^-+";
778 static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
779 char c, bool ovw)
781 int x, y;
782 for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
783 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
784 if (map[y][x] == ' ' || ovw)
785 map[y][x] = c;
788 static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
789 char c)
791 map[p->y / ydiv][p->x / xdiv] = c;
794 static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
796 return map[p->y / ydiv][p->x / xdiv];
799 static int map_width(int xdiv, int x0, int x1)
801 return (x1 / xdiv) - (x0 / xdiv) + 1;
804 static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
806 char *p = map[yd] + (x0 / xdiv);
807 int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
808 if (w >= 0) {
809 p += w;
810 while (*nice)
811 *p++ = *nice++;
815 static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
816 struct tcm_area *a)
818 sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
819 if (a->p0.y + 1 < a->p1.y) {
820 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
821 256 - 1);
822 } else if (a->p0.y < a->p1.y) {
823 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
824 text_map(map, xdiv, nice, a->p0.y / ydiv,
825 a->p0.x + xdiv, 256 - 1);
826 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
827 text_map(map, xdiv, nice, a->p1.y / ydiv,
828 0, a->p1.y - xdiv);
829 } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
830 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
834 static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
835 struct tcm_area *a)
837 sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
838 if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
839 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
840 a->p0.x, a->p1.x);
843 int tiler_map_show(struct seq_file *s, void *arg)
845 int xdiv = 2, ydiv = 1;
846 char **map = NULL, *global_map;
847 struct tiler_block *block;
848 struct tcm_area a, p;
849 int i;
850 const char *m2d = alphabet;
851 const char *a2d = special;
852 const char *m2dp = m2d, *a2dp = a2d;
853 char nice[128];
854 int h_adj;
855 int w_adj;
856 unsigned long flags;
857 int lut_idx;
860 if (!omap_dmm) {
861 /* early return if dmm/tiler device is not initialized */
862 return 0;
865 h_adj = omap_dmm->container_height / ydiv;
866 w_adj = omap_dmm->container_width / xdiv;
868 map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
869 global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
871 if (!map || !global_map)
872 goto error;
874 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
875 memset(map, 0, h_adj * sizeof(*map));
876 memset(global_map, ' ', (w_adj + 1) * h_adj);
878 for (i = 0; i < omap_dmm->container_height; i++) {
879 map[i] = global_map + i * (w_adj + 1);
880 map[i][w_adj] = 0;
883 spin_lock_irqsave(&list_lock, flags);
885 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
886 if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
887 if (block->fmt != TILFMT_PAGE) {
888 fill_map(map, xdiv, ydiv, &block->area,
889 *m2dp, true);
890 if (!*++a2dp)
891 a2dp = a2d;
892 if (!*++m2dp)
893 m2dp = m2d;
894 map_2d_info(map, xdiv, ydiv, nice,
895 &block->area);
896 } else {
897 bool start = read_map_pt(map, xdiv,
898 ydiv, &block->area.p0) == ' ';
899 bool end = read_map_pt(map, xdiv, ydiv,
900 &block->area.p1) == ' ';
902 tcm_for_each_slice(a, block->area, p)
903 fill_map(map, xdiv, ydiv, &a,
904 '=', true);
905 fill_map_pt(map, xdiv, ydiv,
906 &block->area.p0,
907 start ? '<' : 'X');
908 fill_map_pt(map, xdiv, ydiv,
909 &block->area.p1,
910 end ? '>' : 'X');
911 map_1d_info(map, xdiv, ydiv, nice,
912 &block->area);
917 spin_unlock_irqrestore(&list_lock, flags);
919 if (s) {
920 seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
921 for (i = 0; i < 128; i++)
922 seq_printf(s, "%03d:%s\n", i, map[i]);
923 seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
924 } else {
925 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
926 lut_idx);
927 for (i = 0; i < 128; i++)
928 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
929 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
930 lut_idx);
934 error:
935 kfree(map);
936 kfree(global_map);
938 return 0;
940 #endif
942 #ifdef CONFIG_PM
943 static int omap_dmm_resume(struct device *dev)
945 struct tcm_area area;
946 int i;
948 if (!omap_dmm)
949 return -ENODEV;
951 area = (struct tcm_area) {
952 .tcm = NULL,
953 .p1.x = omap_dmm->container_width - 1,
954 .p1.y = omap_dmm->container_height - 1,
957 /* initialize all LUTs to dummy page entries */
958 for (i = 0; i < omap_dmm->num_lut; i++) {
959 area.tcm = omap_dmm->tcm[i];
960 if (fill(&area, NULL, 0, 0, true))
961 dev_err(dev, "refill failed");
964 return 0;
967 static const struct dev_pm_ops omap_dmm_pm_ops = {
968 .resume = omap_dmm_resume,
970 #endif
972 #if defined(CONFIG_OF)
973 static const struct of_device_id dmm_of_match[] = {
974 { .compatible = "ti,omap4-dmm", },
975 { .compatible = "ti,omap5-dmm", },
978 #endif
980 struct platform_driver omap_dmm_driver = {
981 .probe = omap_dmm_probe,
982 .remove = omap_dmm_remove,
983 .driver = {
984 .owner = THIS_MODULE,
985 .name = DMM_DRIVER_NAME,
986 .of_match_table = of_match_ptr(dmm_of_match),
987 #ifdef CONFIG_PM
988 .pm = &omap_dmm_pm_ops,
989 #endif
993 MODULE_LICENSE("GPL v2");
994 MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
995 MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
996 MODULE_ALIAS("platform:" DMM_DRIVER_NAME);