2 #include "nouveau_drv.h"
4 #include "pscnv_chan.h"
7 static int nv50_chan_new (struct pscnv_chan
*ch
) {
8 struct pscnv_vspace
*vs
= ch
->vspace
;
9 struct drm_nouveau_private
*dev_priv
= ch
->dev
->dev_private
;
14 /* determine size of underlying VO... for normal channels,
15 * allocate 64kiB since they have to store the objects
16 * heap. for the BAR fake channel, we'll only need two objects,
21 else if (dev_priv
->chipset
== 0x50)
25 ch
->bo
= pscnv_mem_alloc(vs
->dev
, size
, PSCNV_GEM_CONTIG
,
26 0, (ch
->cid
== -1 ? 0xc5a2ba7 : 0xc5a2f1f0));
30 spin_lock_irqsave(&dev_priv
->chan
->ch_lock
, flags
);
31 ch
->handle
= ch
->bo
->start
>> 12;
32 spin_unlock_irqrestore(&dev_priv
->chan
->ch_lock
, flags
);
35 dev_priv
->vm
->map_kernel(ch
->bo
);
37 mutex_lock(&vs
->lock
);
38 list_add(&ch
->vspace_list
, &nv50_vs(vs
)->chan_list
);
39 if (dev_priv
->chipset
== 0x50)
40 chan_pd
= NV50_CHAN_PD
;
42 chan_pd
= NV84_CHAN_PD
;
43 for (i
= 0; i
< NV50_VM_PDE_COUNT
; i
++) {
44 if (nv50_vs(vs
)->pt
[i
]) {
45 nv_wv32(ch
->bo
, chan_pd
+ i
* 8 + 4, nv50_vs(vs
)->pt
[i
]->start
>> 32);
46 nv_wv32(ch
->bo
, chan_pd
+ i
* 8, nv50_vs(vs
)->pt
[i
]->start
| 0x3);
48 nv_wv32(ch
->bo
, chan_pd
+ i
* 8, 0);
51 mutex_unlock(&vs
->lock
);
53 ch
->instpos
= chan_pd
+ NV50_VM_PDE_COUNT
* 8;
57 ch
->ramht
.bo
= ch
->bo
;
59 ch
->ramht
.offset
= nv50_chan_iobj_new(ch
, 8 << ch
->ramht
.bits
);
60 for (i
= 0; i
< (8 << ch
->ramht
.bits
); i
+= 8)
61 nv_wv32(ch
->ramht
.bo
, ch
->ramht
.offset
+ i
+ 4, 0);
63 if (dev_priv
->chipset
== 0x50) {
66 /* actually, addresses of these two are NOT relative to
67 * channel struct on NV84+, and can be anywhere in VRAM,
68 * but we stuff them inside the channel struct anyway for
70 ch
->ramfc
= nv50_chan_iobj_new(ch
, 0x100);
71 ch
->cache
= pscnv_mem_alloc(vs
->dev
, 0x1000, PSCNV_GEM_CONTIG
,
74 spin_lock_irqsave(&dev_priv
->chan
->ch_lock
, flags
);
76 spin_unlock_irqrestore(&dev_priv
->chan
->ch_lock
, flags
);
77 pscnv_mem_free(ch
->bo
);
81 if (dev_priv
->chipset
!= 0x50) {
82 nv_wr32(vs
->dev
, 0x2600 + ch
->cid
* 4, (ch
->bo
->start
+ ch
->ramfc
) >> 8);
84 nv_wr32(vs
->dev
, 0x2600 + ch
->cid
* 4, ch
->bo
->start
>> 12);
87 dev_priv
->vm
->bar_flush(vs
->dev
);
92 nv50_chan_iobj_new(struct pscnv_chan
*ch
, uint32_t size
) {
93 /* XXX: maybe do this "properly" one day?
95 * Why we don't implement _del for instance objects:
96 * - Usually, bounded const number of them is allocated
97 * for any given channel, and the used set doesn't change
98 * much during channel's lifetime
99 * - Since instance objects are stored inside the main
100 * VO of the channel, the storage will be freed on channel
102 * - We cannot easily tell what objects are currently in use
103 * by PGRAPH and maybe other execution engines -- the user
104 * could cheat us. Caching doesn't help either.
109 spin_lock(&ch
->instlock
);
110 if (ch
->instpos
+ size
> ch
->bo
->size
) {
111 spin_unlock(&ch
->instlock
);
116 spin_unlock(&ch
->instlock
);
120 /* XXX: we'll possibly want to break down type and/or add mysterious flags5
121 * when we know more. */
123 nv50_chan_dmaobj_new(struct pscnv_chan
*ch
, uint32_t type
, uint64_t start
, uint64_t size
) {
124 struct drm_device
*dev
= ch
->dev
;
125 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
126 uint64_t end
= start
+ size
- 1;
127 int res
= nv50_chan_iobj_new (ch
, 0x18);
131 nv_wv32(ch
->bo
, res
+ 0x00, type
);
132 nv_wv32(ch
->bo
, res
+ 0x04, end
);
133 nv_wv32(ch
->bo
, res
+ 0x08, start
);
134 nv_wv32(ch
->bo
, res
+ 0x0c, (end
>> 32) << 24 | (start
>> 32));
135 nv_wv32(ch
->bo
, res
+ 0x10, 0);
136 nv_wv32(ch
->bo
, res
+ 0x14, 0x80000);
137 dev_priv
->vm
->bar_flush(dev
);
142 static void nv50_chan_free(struct pscnv_chan
*ch
) {
143 struct drm_nouveau_private
*dev_priv
= ch
->dev
->dev_private
;
145 spin_lock_irqsave(&dev_priv
->chan
->ch_lock
, flags
);
147 spin_unlock_irqrestore(&dev_priv
->chan
->ch_lock
, flags
);
148 pscnv_mem_free(ch
->bo
);
150 pscnv_mem_free(ch
->cache
);
151 mutex_lock(&ch
->vspace
->lock
);
152 list_del(&ch
->vspace_list
);
153 mutex_unlock(&ch
->vspace
->lock
);
157 nv50_chan_takedown(struct drm_device
*dev
) {
158 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
159 struct nv50_chan_engine
*che
= nv50_ch(dev_priv
->chan
);
164 nv50_chan_init(struct drm_device
*dev
) {
165 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
166 struct nv50_chan_engine
*che
= kzalloc(sizeof *che
, GFP_KERNEL
);
168 NV_ERROR(dev
, "CH: Couldn't alloc engine\n");
171 che
->base
.takedown
= nv50_chan_takedown
;
172 che
->base
.do_chan_new
= nv50_chan_new
;
173 che
->base
.do_chan_free
= nv50_chan_free
;
174 dev_priv
->chan
= &che
->base
;
175 spin_lock_init(&dev_priv
->chan
->ch_lock
);
176 dev_priv
->chan
->ch_min
= 1;
177 dev_priv
->chan
->ch_max
= 126;