percpu, x86: don't use PMD_SIZE as embedded atom_size on 32bit
[zen-stable.git] / drivers / gpu / drm / nouveau / nv04_instmem.c
blobc1248e0740a304947a1170c0a822edd5f43ffe65
1 #include "drmP.h"
2 #include "drm.h"
3 #include "nouveau_drv.h"
4 #include "nouveau_ramht.h"
6 /* returns the size of fifo context */
7 static int
8 nouveau_fifo_ctx_size(struct drm_device *dev)
10 struct drm_nouveau_private *dev_priv = dev->dev_private;
12 if (dev_priv->chipset >= 0x40)
13 return 128;
14 else
15 if (dev_priv->chipset >= 0x17)
16 return 64;
18 return 32;
21 int nv04_instmem_init(struct drm_device *dev)
23 struct drm_nouveau_private *dev_priv = dev->dev_private;
24 struct nouveau_gpuobj *ramht = NULL;
25 u32 offset, length;
26 int ret;
28 /* RAMIN always available */
29 dev_priv->ramin_available = true;
31 /* Reserve space at end of VRAM for PRAMIN */
32 if (dev_priv->card_type >= NV_40) {
33 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
34 u32 rsvd;
36 /* estimate grctx size, the magics come from nv40_grctx.c */
37 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
38 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
39 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
40 else rsvd = 0x4a40 * vs;
41 rsvd += 16 * 1024;
42 rsvd *= dev_priv->engine.fifo.channels;
44 /* pciegart table */
45 if (pci_is_pcie(dev->pdev))
46 rsvd += 512 * 1024;
48 /* object storage */
49 rsvd += 512 * 1024;
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else {
53 dev_priv->ramin_rsvd_vram = 512 * 1024;
56 /* Setup shared RAMHT */
57 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
58 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
59 if (ret)
60 return ret;
62 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
63 nouveau_gpuobj_ref(NULL, &ramht);
64 if (ret)
65 return ret;
67 /* And RAMRO */
68 ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
69 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
70 if (ret)
71 return ret;
73 /* And RAMFC */
74 length = dev_priv->engine.fifo.channels * nouveau_fifo_ctx_size(dev);
75 switch (dev_priv->card_type) {
76 case NV_40:
77 offset = 0x20000;
78 break;
79 default:
80 offset = 0x11400;
81 break;
84 ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
85 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
86 if (ret)
87 return ret;
89 /* Only allow space after RAMFC to be used for object allocation */
90 offset += length;
92 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
93 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
94 * ("new style" control) the upper 16-bits of 0x2220 points at this
95 * other mysterious table that's clobbering important things.
97 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
98 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
100 if (dev_priv->card_type >= NV_40) {
101 if (offset < 0x40000)
102 offset = 0x40000;
105 ret = drm_mm_init(&dev_priv->ramin_heap, offset,
106 dev_priv->ramin_rsvd_vram - offset);
107 if (ret) {
108 NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
109 return ret;
112 return 0;
115 void
116 nv04_instmem_takedown(struct drm_device *dev)
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
121 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
122 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
124 if (drm_mm_initialized(&dev_priv->ramin_heap))
125 drm_mm_takedown(&dev_priv->ramin_heap);
129 nv04_instmem_suspend(struct drm_device *dev)
131 return 0;
134 void
135 nv04_instmem_resume(struct drm_device *dev)
140 nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
141 u32 size, u32 align)
143 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
144 struct drm_mm_node *ramin = NULL;
146 do {
147 if (drm_mm_pre_get(&dev_priv->ramin_heap))
148 return -ENOMEM;
150 spin_lock(&dev_priv->ramin_lock);
151 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
152 if (ramin == NULL) {
153 spin_unlock(&dev_priv->ramin_lock);
154 return -ENOMEM;
157 ramin = drm_mm_get_block_atomic(ramin, size, align);
158 spin_unlock(&dev_priv->ramin_lock);
159 } while (ramin == NULL);
161 gpuobj->node = ramin;
162 gpuobj->vinst = ramin->start;
163 return 0;
166 void
167 nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
169 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
171 spin_lock(&dev_priv->ramin_lock);
172 drm_mm_put_block(gpuobj->node);
173 gpuobj->node = NULL;
174 spin_unlock(&dev_priv->ramin_lock);
178 nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
180 gpuobj->pinst = gpuobj->vinst;
181 return 0;
184 void
185 nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
189 void
190 nv04_instmem_flush(struct drm_device *dev)