2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_vm.h"
30 struct nvc0_instmem_priv
{
31 struct nouveau_gpuobj
*bar1_pgd
;
32 struct nouveau_channel
*bar1
;
33 struct nouveau_gpuobj
*bar3_pgd
;
34 struct nouveau_channel
*bar3
;
35 struct nouveau_gpuobj
*chan_pgd
;
39 nvc0_instmem_suspend(struct drm_device
*dev
)
41 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
43 dev_priv
->ramin_available
= false;
48 nvc0_instmem_resume(struct drm_device
*dev
)
50 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
51 struct nvc0_instmem_priv
*priv
= dev_priv
->engine
.instmem
.priv
;
53 nv_mask(dev
, 0x100c80, 0x00000001, 0x00000000);
54 nv_wr32(dev
, 0x001704, 0x80000000 | priv
->bar1
->ramin
->vinst
>> 12);
55 nv_wr32(dev
, 0x001714, 0xc0000000 | priv
->bar3
->ramin
->vinst
>> 12);
56 dev_priv
->ramin_available
= true;
60 nvc0_channel_del(struct nouveau_channel
**pchan
)
62 struct nouveau_channel
*chan
;
69 nouveau_vm_ref(NULL
, &chan
->vm
, NULL
);
70 if (drm_mm_initialized(&chan
->ramin_heap
))
71 drm_mm_takedown(&chan
->ramin_heap
);
72 nouveau_gpuobj_ref(NULL
, &chan
->ramin
);
77 nvc0_channel_new(struct drm_device
*dev
, u32 size
, struct nouveau_vm
*vm
,
78 struct nouveau_channel
**pchan
,
79 struct nouveau_gpuobj
*pgd
, u64 vm_size
)
81 struct nouveau_channel
*chan
;
84 chan
= kzalloc(sizeof(*chan
), GFP_KERNEL
);
89 ret
= nouveau_gpuobj_new(dev
, NULL
, size
, 0x1000, 0, &chan
->ramin
);
91 nvc0_channel_del(&chan
);
95 ret
= drm_mm_init(&chan
->ramin_heap
, 0x1000, size
- 0x1000);
97 nvc0_channel_del(&chan
);
101 ret
= nouveau_vm_ref(vm
, &chan
->vm
, NULL
);
103 nvc0_channel_del(&chan
);
107 nv_wo32(chan
->ramin
, 0x0200, lower_32_bits(pgd
->vinst
));
108 nv_wo32(chan
->ramin
, 0x0204, upper_32_bits(pgd
->vinst
));
109 nv_wo32(chan
->ramin
, 0x0208, lower_32_bits(vm_size
- 1));
110 nv_wo32(chan
->ramin
, 0x020c, upper_32_bits(vm_size
- 1));
117 nvc0_instmem_init(struct drm_device
*dev
)
119 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
120 struct nouveau_instmem_engine
*pinstmem
= &dev_priv
->engine
.instmem
;
121 struct pci_dev
*pdev
= dev
->pdev
;
122 struct nvc0_instmem_priv
*priv
;
123 struct nouveau_vm
*vm
= NULL
;
126 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
129 pinstmem
->priv
= priv
;
132 ret
= nouveau_vm_new(dev
, 0, pci_resource_len(pdev
, 3), 0,
137 ret
= nouveau_gpuobj_new(dev
, NULL
,
138 (pci_resource_len(pdev
, 3) >> 12) * 8, 0,
139 NVOBJ_FLAG_DONT_MAP
|
140 NVOBJ_FLAG_ZERO_ALLOC
,
141 &dev_priv
->bar3_vm
->pgt
[0].obj
[0]);
144 dev_priv
->bar3_vm
->pgt
[0].refcount
[0] = 1;
146 nv50_instmem_map(dev_priv
->bar3_vm
->pgt
[0].obj
[0]);
148 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x8000, 4096,
149 NVOBJ_FLAG_ZERO_ALLOC
, &priv
->bar3_pgd
);
153 ret
= nouveau_vm_ref(dev_priv
->bar3_vm
, &vm
, priv
->bar3_pgd
);
156 nouveau_vm_ref(NULL
, &vm
, NULL
);
158 ret
= nvc0_channel_new(dev
, 8192, dev_priv
->bar3_vm
, &priv
->bar3
,
159 priv
->bar3_pgd
, pci_resource_len(dev
->pdev
, 3));
164 ret
= nouveau_vm_new(dev
, 0, pci_resource_len(pdev
, 1), 0, &vm
);
168 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x8000, 4096,
169 NVOBJ_FLAG_ZERO_ALLOC
, &priv
->bar1_pgd
);
173 ret
= nouveau_vm_ref(vm
, &dev_priv
->bar1_vm
, priv
->bar1_pgd
);
176 nouveau_vm_ref(NULL
, &vm
, NULL
);
178 ret
= nvc0_channel_new(dev
, 8192, dev_priv
->bar1_vm
, &priv
->bar1
,
179 priv
->bar1_pgd
, pci_resource_len(dev
->pdev
, 1));
184 ret
= nouveau_vm_new(dev
, 0, (1ULL << 40), 0x0008000000ULL
, &vm
);
188 ret
= nouveau_gpuobj_new(dev
, NULL
, 0x8000, 4096, 0, &priv
->chan_pgd
);
192 nouveau_vm_ref(vm
, &dev_priv
->chan_vm
, priv
->chan_pgd
);
193 nouveau_vm_ref(NULL
, &vm
, NULL
);
195 nvc0_instmem_resume(dev
);
198 nvc0_instmem_takedown(dev
);
203 nvc0_instmem_takedown(struct drm_device
*dev
)
205 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
206 struct nvc0_instmem_priv
*priv
= dev_priv
->engine
.instmem
.priv
;
207 struct nouveau_vm
*vm
= NULL
;
209 nvc0_instmem_suspend(dev
);
211 nv_wr32(dev
, 0x1704, 0x00000000);
212 nv_wr32(dev
, 0x1714, 0x00000000);
214 nouveau_vm_ref(NULL
, &dev_priv
->chan_vm
, priv
->chan_pgd
);
215 nouveau_gpuobj_ref(NULL
, &priv
->chan_pgd
);
217 nvc0_channel_del(&priv
->bar1
);
218 nouveau_vm_ref(NULL
, &dev_priv
->bar1_vm
, priv
->bar1_pgd
);
219 nouveau_gpuobj_ref(NULL
, &priv
->bar1_pgd
);
221 nvc0_channel_del(&priv
->bar3
);
222 nouveau_vm_ref(dev_priv
->bar3_vm
, &vm
, NULL
);
223 nouveau_vm_ref(NULL
, &vm
, priv
->bar3_pgd
);
224 nouveau_gpuobj_ref(NULL
, &priv
->bar3_pgd
);
225 nouveau_gpuobj_ref(NULL
, &dev_priv
->bar3_vm
->pgt
[0].obj
[0]);
226 nouveau_vm_ref(NULL
, &dev_priv
->bar3_vm
, NULL
);
228 dev_priv
->engine
.instmem
.priv
= NULL
;