Set memory attributes on page
[pscnv.git] / pscnv / nv50_chan.c
blob775e40ff2881b61c149afd0c4ca4a9933dad05b7
1 #include "drm.h"
2 #include "nouveau_drv.h"
3 #include "nv50_chan.h"
4 #include "pscnv_chan.h"
5 #include "nv50_vm.h"
7 static int nv50_chan_new (struct pscnv_chan *ch) {
8 struct pscnv_vspace *vs = ch->vspace;
9 struct drm_nouveau_private *dev_priv = ch->dev->dev_private;
10 uint64_t size;
11 uint32_t chan_pd;
12 unsigned long flags;
13 int i;
14 /* determine size of underlying VO... for normal channels,
15 * allocate 64kiB since they have to store the objects
16 * heap. for the BAR fake channel, we'll only need two objects,
17 * so keep it minimal
19 if (ch->cid >= 0)
20 size = 0x10000;
21 else if (dev_priv->chipset == 0x50)
22 size = 0x6000;
23 else
24 size = 0x5000;
25 ch->bo = pscnv_mem_alloc(vs->dev, size, PSCNV_GEM_CONTIG,
26 0, (ch->cid == -1 ? 0xc5a2ba7 : 0xc5a2f1f0));
27 if (!ch->bo)
28 return -ENOMEM;
30 spin_lock_irqsave(&dev_priv->chan->ch_lock, flags);
31 ch->handle = ch->bo->start >> 12;
32 spin_unlock_irqrestore(&dev_priv->chan->ch_lock, flags);
34 if (vs->vid != -1)
35 dev_priv->vm->map_kernel(ch->bo);
37 mutex_lock(&vs->lock);
38 list_add(&ch->vspace_list, &nv50_vs(vs)->chan_list);
39 if (dev_priv->chipset == 0x50)
40 chan_pd = NV50_CHAN_PD;
41 else
42 chan_pd = NV84_CHAN_PD;
43 for (i = 0; i < NV50_VM_PDE_COUNT; i++) {
44 if (nv50_vs(vs)->pt[i]) {
45 nv_wv32(ch->bo, chan_pd + i * 8 + 4, nv50_vs(vs)->pt[i]->start >> 32);
46 nv_wv32(ch->bo, chan_pd + i * 8, nv50_vs(vs)->pt[i]->start | 0x3);
47 } else {
48 nv_wv32(ch->bo, chan_pd + i * 8, 0);
51 mutex_unlock(&vs->lock);
53 ch->instpos = chan_pd + NV50_VM_PDE_COUNT * 8;
55 if (ch->cid >= 0) {
56 int i;
57 ch->ramht.bo = ch->bo;
58 ch->ramht.bits = 9;
59 ch->ramht.offset = nv50_chan_iobj_new(ch, 8 << ch->ramht.bits);
60 for (i = 0; i < (8 << ch->ramht.bits); i += 8)
61 nv_wv32(ch->ramht.bo, ch->ramht.offset + i + 4, 0);
63 if (dev_priv->chipset == 0x50) {
64 ch->ramfc = 0;
65 } else {
66 /* actually, addresses of these two are NOT relative to
67 * channel struct on NV84+, and can be anywhere in VRAM,
68 * but we stuff them inside the channel struct anyway for
69 * simplicity. */
70 ch->ramfc = nv50_chan_iobj_new(ch, 0x100);
71 ch->cache = pscnv_mem_alloc(vs->dev, 0x1000, PSCNV_GEM_CONTIG,
72 0, 0xf1f0cace);
73 if (!ch->cache) {
74 spin_lock_irqsave(&dev_priv->chan->ch_lock, flags);
75 ch->handle = 0;
76 spin_unlock_irqrestore(&dev_priv->chan->ch_lock, flags);
77 pscnv_mem_free(ch->bo);
78 return -ENOMEM;
81 if (dev_priv->chipset != 0x50) {
82 nv_wr32(vs->dev, 0x2600 + ch->cid * 4, (ch->bo->start + ch->ramfc) >> 8);
83 } else {
84 nv_wr32(vs->dev, 0x2600 + ch->cid * 4, ch->bo->start >> 12);
87 dev_priv->vm->bar_flush(vs->dev);
88 return 0;
91 int
92 nv50_chan_iobj_new(struct pscnv_chan *ch, uint32_t size) {
93 /* XXX: maybe do this "properly" one day?
95 * Why we don't implement _del for instance objects:
96 * - Usually, bounded const number of them is allocated
97 * for any given channel, and the used set doesn't change
98 * much during channel's lifetime
99 * - Since instance objects are stored inside the main
100 * VO of the channel, the storage will be freed on channel
101 * close anyway
102 * - We cannot easily tell what objects are currently in use
103 * by PGRAPH and maybe other execution engines -- the user
104 * could cheat us. Caching doesn't help either.
106 int res;
107 size += 0xf;
108 size &= ~0xf;
109 spin_lock(&ch->instlock);
110 if (ch->instpos + size > ch->bo->size) {
111 spin_unlock(&ch->instlock);
112 return 0;
114 res = ch->instpos;
115 ch->instpos += size;
116 spin_unlock(&ch->instlock);
117 return res;
120 /* XXX: we'll possibly want to break down type and/or add mysterious flags5
121 * when we know more. */
123 nv50_chan_dmaobj_new(struct pscnv_chan *ch, uint32_t type, uint64_t start, uint64_t size) {
124 struct drm_device *dev = ch->dev;
125 struct drm_nouveau_private *dev_priv = dev->dev_private;
126 uint64_t end = start + size - 1;
127 int res = nv50_chan_iobj_new (ch, 0x18);
128 if (!res)
129 return 0;
131 nv_wv32(ch->bo, res + 0x00, type);
132 nv_wv32(ch->bo, res + 0x04, end);
133 nv_wv32(ch->bo, res + 0x08, start);
134 nv_wv32(ch->bo, res + 0x0c, (end >> 32) << 24 | (start >> 32));
135 nv_wv32(ch->bo, res + 0x10, 0);
136 nv_wv32(ch->bo, res + 0x14, 0x80000);
137 dev_priv->vm->bar_flush(dev);
139 return res;
142 static void nv50_chan_free(struct pscnv_chan *ch) {
143 struct drm_nouveau_private *dev_priv = ch->dev->dev_private;
144 unsigned long flags;
145 spin_lock_irqsave(&dev_priv->chan->ch_lock, flags);
146 ch->handle = 0;
147 spin_unlock_irqrestore(&dev_priv->chan->ch_lock, flags);
148 pscnv_mem_free(ch->bo);
149 if (ch->cache)
150 pscnv_mem_free(ch->cache);
151 mutex_lock(&ch->vspace->lock);
152 list_del(&ch->vspace_list);
153 mutex_unlock(&ch->vspace->lock);
156 static void
157 nv50_chan_takedown(struct drm_device *dev) {
158 struct drm_nouveau_private *dev_priv = dev->dev_private;
159 struct nv50_chan_engine *che = nv50_ch(dev_priv->chan);
160 kfree(che);
164 nv50_chan_init(struct drm_device *dev) {
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nv50_chan_engine *che = kzalloc(sizeof *che, GFP_KERNEL);
167 if (!che) {
168 NV_ERROR(dev, "CH: Couldn't alloc engine\n");
169 return -ENOMEM;
171 che->base.takedown = nv50_chan_takedown;
172 che->base.do_chan_new = nv50_chan_new;
173 che->base.do_chan_free = nv50_chan_free;
174 dev_priv->chan = &che->base;
175 spin_lock_init(&dev_priv->chan->ch_lock);
176 dev_priv->chan->ch_min = 1;
177 dev_priv->chan->ch_max = 126;
178 return 0;