Adding support for MOXA ART SoC. Testing port of linux-2.6.32.60-moxart.
[linux-3.6.7-moxart.git] / drivers / gpu / drm / nouveau / nv10_fence.c
blob8a1b75009185dcc735af23265b8065e163ba19c1
1 /*
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_dma.h"
28 #include "nouveau_ramht.h"
29 #include "nouveau_fence.h"
31 struct nv10_fence_chan {
32 struct nouveau_fence_chan base;
35 struct nv10_fence_priv {
36 struct nouveau_fence_priv base;
37 struct nouveau_bo *bo;
38 spinlock_t lock;
39 u32 sequence;
42 static int
43 nv10_fence_emit(struct nouveau_fence *fence)
45 struct nouveau_channel *chan = fence->channel;
46 int ret = RING_SPACE(chan, 2);
47 if (ret == 0) {
48 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
49 OUT_RING (chan, fence->sequence);
50 FIRE_RING (chan);
52 return ret;
56 static int
57 nv10_fence_sync(struct nouveau_fence *fence,
58 struct nouveau_channel *prev, struct nouveau_channel *chan)
60 return -ENODEV;
63 static int
64 nv17_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan)
67 struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
68 u32 value;
69 int ret;
71 if (!mutex_trylock(&prev->mutex))
72 return -EBUSY;
74 spin_lock(&priv->lock);
75 value = priv->sequence;
76 priv->sequence += 2;
77 spin_unlock(&priv->lock);
79 ret = RING_SPACE(prev, 5);
80 if (!ret) {
81 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
82 OUT_RING (prev, NvSema);
83 OUT_RING (prev, 0);
84 OUT_RING (prev, value + 0);
85 OUT_RING (prev, value + 1);
86 FIRE_RING (prev);
89 if (!ret && !(ret = RING_SPACE(chan, 5))) {
90 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
91 OUT_RING (chan, NvSema);
92 OUT_RING (chan, 0);
93 OUT_RING (chan, value + 1);
94 OUT_RING (chan, value + 2);
95 FIRE_RING (chan);
98 mutex_unlock(&prev->mutex);
99 return 0;
102 static u32
103 nv10_fence_read(struct nouveau_channel *chan)
105 return nvchan_rd32(chan, 0x0048);
108 static void
109 nv10_fence_context_del(struct nouveau_channel *chan, int engine)
111 struct nv10_fence_chan *fctx = chan->engctx[engine];
112 nouveau_fence_context_del(&fctx->base);
113 chan->engctx[engine] = NULL;
114 kfree(fctx);
117 static int
118 nv10_fence_context_new(struct nouveau_channel *chan, int engine)
120 struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
121 struct nv10_fence_chan *fctx;
122 struct nouveau_gpuobj *obj;
123 int ret = 0;
125 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
126 if (!fctx)
127 return -ENOMEM;
129 nouveau_fence_context_new(&fctx->base);
131 if (priv->bo) {
132 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
134 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
135 mem->start * PAGE_SIZE, mem->size,
136 NV_MEM_ACCESS_RW,
137 NV_MEM_TARGET_VRAM, &obj);
138 if (!ret) {
139 ret = nouveau_ramht_insert(chan, NvSema, obj);
140 nouveau_gpuobj_ref(NULL, &obj);
144 if (ret)
145 nv10_fence_context_del(chan, engine);
146 return ret;
149 static int
150 nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
152 return 0;
155 static int
156 nv10_fence_init(struct drm_device *dev, int engine)
158 return 0;
161 static void
162 nv10_fence_destroy(struct drm_device *dev, int engine)
164 struct drm_nouveau_private *dev_priv = dev->dev_private;
165 struct nv10_fence_priv *priv = nv_engine(dev, engine);
167 nouveau_bo_ref(NULL, &priv->bo);
168 dev_priv->eng[engine] = NULL;
169 kfree(priv);
173 nv10_fence_create(struct drm_device *dev)
175 struct drm_nouveau_private *dev_priv = dev->dev_private;
176 struct nv10_fence_priv *priv;
177 int ret = 0;
179 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
180 if (!priv)
181 return -ENOMEM;
183 priv->base.engine.destroy = nv10_fence_destroy;
184 priv->base.engine.init = nv10_fence_init;
185 priv->base.engine.fini = nv10_fence_fini;
186 priv->base.engine.context_new = nv10_fence_context_new;
187 priv->base.engine.context_del = nv10_fence_context_del;
188 priv->base.emit = nv10_fence_emit;
189 priv->base.read = nv10_fence_read;
190 priv->base.sync = nv10_fence_sync;
191 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
192 spin_lock_init(&priv->lock);
194 if (dev_priv->chipset >= 0x17) {
195 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
196 0, 0x0000, NULL, &priv->bo);
197 if (!ret) {
198 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
199 if (!ret)
200 ret = nouveau_bo_map(priv->bo);
201 if (ret)
202 nouveau_bo_ref(NULL, &priv->bo);
205 if (ret == 0) {
206 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
207 priv->base.sync = nv17_fence_sync;
211 if (ret)
212 nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
213 return ret;