2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
29 static int types
[0x80] = {
30 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
31 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
32 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
33 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
35 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
37 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
41 nv50_vram_flags_valid(struct drm_device
*dev
, u32 tile_flags
)
43 int type
= (tile_flags
& NOUVEAU_GEM_TILE_LAYOUT_MASK
) >> 8;
45 if (likely(type
< ARRAY_SIZE(types
) && types
[type
]))
51 nv50_vram_del(struct drm_device
*dev
, struct nouveau_mem
**pmem
)
53 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
54 struct nouveau_mm
*mm
= dev_priv
->engine
.vram
.mm
;
55 struct nouveau_mm_node
*this;
56 struct nouveau_mem
*mem
;
60 if (unlikely(mem
== NULL
))
63 mutex_lock(&mm
->mutex
);
64 while (!list_empty(&mem
->regions
)) {
65 this = list_first_entry(&mem
->regions
, struct nouveau_mm_node
, rl_entry
);
67 list_del(&this->rl_entry
);
68 nouveau_mm_put(mm
, this);
72 drm_mm_put_block(mem
->tag
);
75 mutex_unlock(&mm
->mutex
);
81 nv50_vram_new(struct drm_device
*dev
, u64 size
, u32 align
, u32 size_nc
,
82 u32 memtype
, struct nouveau_mem
**pmem
)
84 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
85 struct nouveau_mm
*mm
= dev_priv
->engine
.vram
.mm
;
86 struct nouveau_mm_node
*r
;
87 struct nouveau_mem
*mem
;
88 int comp
= (memtype
& 0x300) >> 8;
89 int type
= (memtype
& 0x07f);
98 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
102 mutex_lock(&mm
->mutex
);
105 struct nouveau_fb_engine
*pfb
= &dev_priv
->engine
.fb
;
106 int n
= (size
>> 4) * comp
;
108 mem
->tag
= drm_mm_search_free(&pfb
->tag_heap
, n
, 0, 0);
110 mem
->tag
= drm_mm_get_block(mem
->tag
, n
, 0);
113 if (unlikely(!mem
->tag
))
117 INIT_LIST_HEAD(&mem
->regions
);
118 mem
->dev
= dev_priv
->dev
;
119 mem
->memtype
= (comp
<< 7) | type
;
123 ret
= nouveau_mm_get(mm
, types
[type
], size
, size_nc
, align
, &r
);
125 mutex_unlock(&mm
->mutex
);
126 nv50_vram_del(dev
, &mem
);
130 list_add_tail(&r
->rl_entry
, &mem
->regions
);
133 mutex_unlock(&mm
->mutex
);
135 r
= list_first_entry(&mem
->regions
, struct nouveau_mm_node
, rl_entry
);
136 mem
->offset
= (u64
)r
->offset
<< 12;
142 nv50_vram_rblock(struct drm_device
*dev
)
144 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
145 int i
, parts
, colbits
, rowbitsa
, rowbitsb
, banks
;
146 u64 rowsize
, predicted
;
147 u32 r0
, r4
, rt
, ru
, rblock_size
;
149 r0
= nv_rd32(dev
, 0x100200);
150 r4
= nv_rd32(dev
, 0x100204);
151 rt
= nv_rd32(dev
, 0x100250);
152 ru
= nv_rd32(dev
, 0x001540);
153 NV_DEBUG(dev
, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0
, r4
, rt
, ru
);
155 for (i
= 0, parts
= 0; i
< 8; i
++) {
156 if (ru
& (0x00010000 << i
))
160 colbits
= (r4
& 0x0000f000) >> 12;
161 rowbitsa
= ((r4
& 0x000f0000) >> 16) + 8;
162 rowbitsb
= ((r4
& 0x00f00000) >> 20) + 8;
163 banks
= ((r4
& 0x01000000) ? 8 : 4);
165 rowsize
= parts
* banks
* (1 << colbits
) * 8;
166 predicted
= rowsize
<< rowbitsa
;
168 predicted
+= rowsize
<< rowbitsb
;
170 if (predicted
!= dev_priv
->vram_size
) {
171 NV_WARN(dev
, "memory controller reports %dMiB VRAM\n",
172 (u32
)(dev_priv
->vram_size
>> 20));
173 NV_WARN(dev
, "we calculated %dMiB VRAM\n",
174 (u32
)(predicted
>> 20));
177 rblock_size
= rowsize
;
181 NV_DEBUG(dev
, "rblock %d bytes\n", rblock_size
);
186 nv50_vram_init(struct drm_device
*dev
)
188 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
189 struct nouveau_vram_engine
*vram
= &dev_priv
->engine
.vram
;
190 const u32 rsvd_head
= ( 256 * 1024) >> 12; /* vga memory */
191 const u32 rsvd_tail
= (1024 * 1024) >> 12; /* vbios etc */
194 dev_priv
->vram_size
= nv_rd32(dev
, 0x10020c);
195 dev_priv
->vram_size
|= (dev_priv
->vram_size
& 0xff) << 32;
196 dev_priv
->vram_size
&= 0xffffffff00ULL
;
198 /* IGPs, no funky reordering happens here, they don't have VRAM */
199 if (dev_priv
->chipset
== 0xaa ||
200 dev_priv
->chipset
== 0xac ||
201 dev_priv
->chipset
== 0xaf) {
202 dev_priv
->vram_sys_base
= (u64
)nv_rd32(dev
, 0x100e10) << 12;
205 rblock
= nv50_vram_rblock(dev
) >> 12;
208 length
= (dev_priv
->vram_size
>> 12) - rsvd_head
- rsvd_tail
;
210 return nouveau_mm_init(&vram
->mm
, rsvd_head
, length
, rblock
);
214 nv50_vram_fini(struct drm_device
*dev
)
216 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
217 struct nouveau_vram_engine
*vram
= &dev_priv
->engine
.vram
;
219 nouveau_mm_fini(&vram
->mm
);