2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "nouveau_drv.h"
30 #include "nouveau_dma.h"
33 nouveau_dma_channel_init(struct drm_device
*dev
)
35 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
36 struct nouveau_drm_channel
*dchan
= &dev_priv
->channel
;
37 struct nouveau_gpuobj
*gpuobj
= NULL
;
38 struct mem_block
*pushbuf
;
43 pushbuf
= nouveau_mem_alloc(dev
, 0, 0x8000,
44 NOUVEAU_MEM_FB
| NOUVEAU_MEM_MAPPED
,
45 (struct drm_file
*)-2);
47 DRM_ERROR("Failed to allocate DMA push buffer\n");
51 /* Allocate channel */
52 ret
= nouveau_fifo_alloc(dev
, &dchan
->chan
, (struct drm_file
*)-2,
53 pushbuf
, NvDmaFB
, NvDmaTT
);
55 DRM_ERROR("Error allocating GPU channel: %d\n", ret
);
58 DRM_DEBUG("Using FIFO channel %d\n", dchan
->chan
->id
);
61 drm_core_ioremap(dchan
->chan
->pushbuf_mem
->map
, dev
);
62 if (!dchan
->chan
->pushbuf_mem
->map
->handle
) {
63 DRM_ERROR("Failed to ioremap push buffer\n");
66 dchan
->pushbuf
= (void*)dchan
->chan
->pushbuf_mem
->map
->handle
;
68 /* Initialise DMA vars */
69 dchan
->max
= (dchan
->chan
->pushbuf_mem
->size
>> 2) - 2;
70 dchan
->put
= dchan
->chan
->pushbuf_base
>> 2;
71 dchan
->cur
= dchan
->put
;
72 dchan
->free
= dchan
->max
- dchan
->cur
;
74 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
75 dchan
->free
-= NOUVEAU_DMA_SKIPS
;
76 dchan
->push_free
= NOUVEAU_DMA_SKIPS
;
77 for (i
=0; i
< NOUVEAU_DMA_SKIPS
; i
++)
80 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */
81 if ((ret
= nouveau_notifier_alloc(dchan
->chan
, NvNotify0
, 1,
82 &dchan
->notify0_offset
))) {
83 DRM_ERROR("Error allocating NvNotify0: %d\n", ret
);
87 /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
88 if (dev_priv
->card_type
< NV_50
) grclass
= NV_MEMORY_TO_MEMORY_FORMAT
;
89 else grclass
= NV50_MEMORY_TO_MEMORY_FORMAT
;
90 if ((ret
= nouveau_gpuobj_gr_new(dchan
->chan
, grclass
, &gpuobj
))) {
91 DRM_ERROR("Error creating NvM2MF: %d\n", ret
);
95 if ((ret
= nouveau_gpuobj_ref_add(dev
, dchan
->chan
, NvM2MF
,
97 DRM_ERROR("Error referencing NvM2MF: %d\n", ret
);
100 dchan
->m2mf_dma_source
= NvDmaFB
;
101 dchan
->m2mf_dma_destin
= NvDmaFB
;
103 BEGIN_RING(NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_NAME
, 1);
105 BEGIN_RING(NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY
, 1);
106 OUT_RING (NvNotify0
);
107 BEGIN_RING(NvSubM2MF
, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE
, 2);
108 OUT_RING (dchan
->m2mf_dma_source
);
109 OUT_RING (dchan
->m2mf_dma_destin
);
116 nouveau_dma_channel_takedown(struct drm_device
*dev
)
118 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
119 struct nouveau_drm_channel
*dchan
= &dev_priv
->channel
;
124 drm_core_ioremapfree(dchan
->chan
->pushbuf_mem
->map
, dev
);
125 nouveau_fifo_free(dchan
->chan
);
130 #define READ_GET() ((NV_READ(dchan->chan->get) - \
131 dchan->chan->pushbuf_base) >> 2)
132 #define WRITE_PUT(val) do { \
133 NV_WRITE(dchan->chan->put, \
134 ((val) << 2) + dchan->chan->pushbuf_base); \
138 nouveau_dma_wait(struct drm_device
*dev
, int size
)
140 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
141 struct nouveau_drm_channel
*dchan
= &dev_priv
->channel
;
144 while (dchan
->free
< size
) {
147 if (dchan
->put
>= get
) {
148 dchan
->free
= dchan
->max
- dchan
->cur
;
150 if (dchan
->free
< size
) {
151 dchan
->push_free
= 1;
152 OUT_RING(0x20000000|dchan
->chan
->pushbuf_base
);
153 if (get
<= NOUVEAU_DMA_SKIPS
) {
154 /*corner case - will be idle*/
155 if (dchan
->put
<= NOUVEAU_DMA_SKIPS
)
156 WRITE_PUT(NOUVEAU_DMA_SKIPS
+ 1);
160 } while (get
<= NOUVEAU_DMA_SKIPS
);
163 WRITE_PUT(NOUVEAU_DMA_SKIPS
);
164 dchan
->cur
= dchan
->put
= NOUVEAU_DMA_SKIPS
;
165 dchan
->free
= get
- (NOUVEAU_DMA_SKIPS
+ 1);
168 dchan
->free
= get
- dchan
->cur
- 1;