2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
26 #include "nouveau_drv.h"
27 #include "nouveau_dma.h"
28 #include "nouveau_ramht.h"
29 #include "nouveau_fence.h"
31 struct nv10_fence_chan
{
32 struct nouveau_fence_chan base
;
35 struct nv10_fence_priv
{
36 struct nouveau_fence_priv base
;
37 struct nouveau_bo
*bo
;
43 nv10_fence_emit(struct nouveau_fence
*fence
)
45 struct nouveau_channel
*chan
= fence
->channel
;
46 int ret
= RING_SPACE(chan
, 2);
48 BEGIN_NV04(chan
, 0, NV10_SUBCHAN_REF_CNT
, 1);
49 OUT_RING (chan
, fence
->sequence
);
57 nv10_fence_sync(struct nouveau_fence
*fence
,
58 struct nouveau_channel
*prev
, struct nouveau_channel
*chan
)
64 nv17_fence_sync(struct nouveau_fence
*fence
,
65 struct nouveau_channel
*prev
, struct nouveau_channel
*chan
)
67 struct nv10_fence_priv
*priv
= nv_engine(chan
->dev
, NVOBJ_ENGINE_FENCE
);
71 if (!mutex_trylock(&prev
->mutex
))
74 spin_lock(&priv
->lock
);
75 value
= priv
->sequence
;
77 spin_unlock(&priv
->lock
);
79 ret
= RING_SPACE(prev
, 5);
81 BEGIN_NV04(prev
, 0, NV11_SUBCHAN_DMA_SEMAPHORE
, 4);
82 OUT_RING (prev
, NvSema
);
84 OUT_RING (prev
, value
+ 0);
85 OUT_RING (prev
, value
+ 1);
89 if (!ret
&& !(ret
= RING_SPACE(chan
, 5))) {
90 BEGIN_NV04(chan
, 0, NV11_SUBCHAN_DMA_SEMAPHORE
, 4);
91 OUT_RING (chan
, NvSema
);
93 OUT_RING (chan
, value
+ 1);
94 OUT_RING (chan
, value
+ 2);
98 mutex_unlock(&prev
->mutex
);
103 nv10_fence_read(struct nouveau_channel
*chan
)
105 return nvchan_rd32(chan
, 0x0048);
109 nv10_fence_context_del(struct nouveau_channel
*chan
, int engine
)
111 struct nv10_fence_chan
*fctx
= chan
->engctx
[engine
];
112 nouveau_fence_context_del(&fctx
->base
);
113 chan
->engctx
[engine
] = NULL
;
118 nv10_fence_context_new(struct nouveau_channel
*chan
, int engine
)
120 struct nv10_fence_priv
*priv
= nv_engine(chan
->dev
, engine
);
121 struct nv10_fence_chan
*fctx
;
122 struct nouveau_gpuobj
*obj
;
125 fctx
= chan
->engctx
[engine
] = kzalloc(sizeof(*fctx
), GFP_KERNEL
);
129 nouveau_fence_context_new(&fctx
->base
);
132 struct ttm_mem_reg
*mem
= &priv
->bo
->bo
.mem
;
134 ret
= nouveau_gpuobj_dma_new(chan
, NV_CLASS_DMA_FROM_MEMORY
,
135 mem
->start
* PAGE_SIZE
, mem
->size
,
137 NV_MEM_TARGET_VRAM
, &obj
);
139 ret
= nouveau_ramht_insert(chan
, NvSema
, obj
);
140 nouveau_gpuobj_ref(NULL
, &obj
);
145 nv10_fence_context_del(chan
, engine
);
150 nv10_fence_fini(struct drm_device
*dev
, int engine
, bool suspend
)
156 nv10_fence_init(struct drm_device
*dev
, int engine
)
162 nv10_fence_destroy(struct drm_device
*dev
, int engine
)
164 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
165 struct nv10_fence_priv
*priv
= nv_engine(dev
, engine
);
167 nouveau_bo_ref(NULL
, &priv
->bo
);
168 dev_priv
->eng
[engine
] = NULL
;
173 nv10_fence_create(struct drm_device
*dev
)
175 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
176 struct nv10_fence_priv
*priv
;
179 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
183 priv
->base
.engine
.destroy
= nv10_fence_destroy
;
184 priv
->base
.engine
.init
= nv10_fence_init
;
185 priv
->base
.engine
.fini
= nv10_fence_fini
;
186 priv
->base
.engine
.context_new
= nv10_fence_context_new
;
187 priv
->base
.engine
.context_del
= nv10_fence_context_del
;
188 priv
->base
.emit
= nv10_fence_emit
;
189 priv
->base
.read
= nv10_fence_read
;
190 priv
->base
.sync
= nv10_fence_sync
;
191 dev_priv
->eng
[NVOBJ_ENGINE_FENCE
] = &priv
->base
.engine
;
192 spin_lock_init(&priv
->lock
);
194 if (dev_priv
->chipset
>= 0x17) {
195 ret
= nouveau_bo_new(dev
, 4096, 0x1000, TTM_PL_FLAG_VRAM
,
196 0, 0x0000, NULL
, &priv
->bo
);
198 ret
= nouveau_bo_pin(priv
->bo
, TTM_PL_FLAG_VRAM
);
200 ret
= nouveau_bo_map(priv
->bo
);
202 nouveau_bo_ref(NULL
, &priv
->bo
);
206 nouveau_bo_wr32(priv
->bo
, 0x000, 0x00000000);
207 priv
->base
.sync
= nv17_fence_sync
;
212 nv10_fence_destroy(dev
, NVOBJ_ENGINE_FENCE
);