2 * Copyright (c) 2007, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19 * Alan Cox <alan@linux.intel.com>
27 * GTT resource allocator - manage page mappings in GTT space
31 * psb_gtt_mask_pte - generate GTT pte entry
32 * @pfn: page number to encode
33 * @type: type of memory in the GTT
35 * Set the GTT entry for the appropriate memory type.
37 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn
, int type
)
39 uint32_t mask
= PSB_PTE_VALID
;
41 if (type
& PSB_MMU_CACHED_MEMORY
)
42 mask
|= PSB_PTE_CACHED
;
43 if (type
& PSB_MMU_RO_MEMORY
)
45 if (type
& PSB_MMU_WO_MEMORY
)
48 return (pfn
<< PAGE_SHIFT
) | mask
;
52 * psb_gtt_entry - find the GTT entries for a gtt_range
53 * @dev: our DRM device
56 * Given a gtt_range object return the GTT offset of the page table
57 * entries for this gtt_range
59 u32
*psb_gtt_entry(struct drm_device
*dev
, struct gtt_range
*r
)
61 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
64 offset
= r
->resource
.start
- dev_priv
->gtt_mem
->start
;
66 return dev_priv
->gtt_map
+ (offset
>> PAGE_SHIFT
);
70 * psb_gtt_insert - put an object into the GTT
71 * @dev: our DRM device
74 * Take our preallocated GTT range and insert the GEM object into
79 static int psb_gtt_insert(struct drm_device
*dev
, struct gtt_range
*r
)
85 if (r
->pages
== NULL
) {
90 WARN_ON(r
->stolen
); /* refcount these maybe ? */
92 gtt_slot
= psb_gtt_entry(dev
, r
);
95 /* Make sure changes are visible to the GPU */
96 set_pages_array_uc(pages
, r
->npage
);
98 /* Write our page entries into the GTT itself */
99 for (i
= 0; i
< r
->npage
; i
++) {
100 pte
= psb_gtt_mask_pte(page_to_pfn(*pages
++), 0/*type*/);
101 iowrite32(pte
, gtt_slot
++);
103 /* Make sure all the entries are set before we return */
104 ioread32(gtt_slot
- 1);
109 * psb_gtt_remove - remove an object from the GTT
110 * @dev: our DRM device
113 * Remove a preallocated GTT range from the GTT. Overwrite all the
114 * page table entries with the dummy page
117 static void psb_gtt_remove(struct drm_device
*dev
, struct gtt_range
*r
)
119 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
125 gtt_slot
= psb_gtt_entry(dev
, r
);
126 pte
= psb_gtt_mask_pte(page_to_pfn(dev_priv
->scratch_page
), 0);
128 for (i
= 0; i
< r
->npage
; i
++)
129 iowrite32(pte
, gtt_slot
++);
130 ioread32(gtt_slot
- 1);
131 set_pages_array_wb(r
->pages
, r
->npage
);
135 * psb_gtt_attach_pages - attach and pin GEM pages
138 * Pin and build an in kernel list of the pages that back our GEM object.
139 * While we hold this the pages cannot be swapped out
141 static int psb_gtt_attach_pages(struct gtt_range
*gt
)
144 struct address_space
*mapping
;
147 int pages
= gt
->gem
.size
/ PAGE_SIZE
;
151 /* This is the shared memory object that backs the GEM resource */
152 inode
= gt
->gem
.filp
->f_path
.dentry
->d_inode
;
153 mapping
= inode
->i_mapping
;
155 gt
->pages
= kmalloc(pages
* sizeof(struct page
*), GFP_KERNEL
);
156 if (gt
->pages
== NULL
)
160 for (i
= 0; i
< pages
; i
++) {
161 /* FIXME: review flags later */
162 p
= read_cache_page_gfp(mapping
, i
,
163 __GFP_COLD
| GFP_KERNEL
);
172 page_cache_release(gt
->pages
[i
]);
179 * psb_gtt_detach_pages - attach and pin GEM pages
182 * Undo the effect of psb_gtt_attach_pages. At this point the pages
183 * must have been removed from the GTT as they could now be paged out
184 * and move bus address.
186 static void psb_gtt_detach_pages(struct gtt_range
*gt
)
189 for (i
= 0; i
< gt
->npage
; i
++) {
190 /* FIXME: do we need to force dirty */
191 set_page_dirty(gt
->pages
[i
]);
192 page_cache_release(gt
->pages
[i
]);
199 * psb_gtt_pin - pin pages into the GTT
202 * Pin a set of pages into the GTT. The pins are refcounted so that
203 * multiple pins need multiple unpins to undo.
205 * Non GEM backed objects treat this as a no-op as they are always GTT
208 int psb_gtt_pin(struct gtt_range
*gt
)
211 struct drm_device
*dev
= gt
->gem
.dev
;
212 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
214 mutex_lock(&dev_priv
->gtt_mutex
);
216 if (gt
->in_gart
== 0 && gt
->stolen
== 0) {
217 ret
= psb_gtt_attach_pages(gt
);
220 ret
= psb_gtt_insert(dev
, gt
);
222 psb_gtt_detach_pages(gt
);
228 mutex_unlock(&dev_priv
->gtt_mutex
);
233 * psb_gtt_unpin - Drop a GTT pin requirement
236 * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
237 * will be removed from the GTT which will also drop the page references
238 * and allow the VM to clean up or page stuff.
240 * Non GEM backed objects treat this as a no-op as they are always GTT
243 void psb_gtt_unpin(struct gtt_range
*gt
)
245 struct drm_device
*dev
= gt
->gem
.dev
;
246 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
248 mutex_lock(&dev_priv
->gtt_mutex
);
250 WARN_ON(!gt
->in_gart
);
253 if (gt
->in_gart
== 0 && gt
->stolen
== 0) {
254 psb_gtt_remove(dev
, gt
);
255 psb_gtt_detach_pages(gt
);
257 mutex_unlock(&dev_priv
->gtt_mutex
);
261 * GTT resource allocator - allocate and manage GTT address space
265 * psb_gtt_alloc_range - allocate GTT address space
266 * @dev: Our DRM device
267 * @len: length (bytes) of address space required
268 * @name: resource name
269 * @backed: resource should be backed by stolen pages
271 * Ask the kernel core to find us a suitable range of addresses
272 * to use for a GTT mapping.
274 * Returns a gtt_range structure describing the object, or NULL on
275 * error. On successful return the resource is both allocated and marked
278 struct gtt_range
*psb_gtt_alloc_range(struct drm_device
*dev
, int len
,
279 const char *name
, int backed
)
281 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
282 struct gtt_range
*gt
;
283 struct resource
*r
= dev_priv
->gtt_mem
;
285 unsigned long start
, end
;
288 /* The start of the GTT is the stolen pages */
290 end
= r
->start
+ dev_priv
->gtt
.stolen_size
- 1;
292 /* The rest we will use for GEM backed objects */
293 start
= r
->start
+ dev_priv
->gtt
.stolen_size
;
297 gt
= kzalloc(sizeof(struct gtt_range
), GFP_KERNEL
);
300 gt
->resource
.name
= name
;
302 gt
->in_gart
= backed
;
303 /* Ensure this is set for non GEM objects */
305 ret
= allocate_resource(dev_priv
->gtt_mem
, >
->resource
,
306 len
, start
, end
, PAGE_SIZE
, NULL
, NULL
);
308 gt
->offset
= gt
->resource
.start
- r
->start
;
316 * psb_gtt_free_range - release GTT address space
317 * @dev: our DRM device
318 * @gt: a mapping created with psb_gtt_alloc_range
320 * Release a resource that was allocated with psb_gtt_alloc_range. If the
321 * object has been pinned by mmap users we clean this up here currently.
323 void psb_gtt_free_range(struct drm_device
*dev
, struct gtt_range
*gt
)
325 /* Undo the mmap pin if we are destroying the object */
330 WARN_ON(gt
->in_gart
&& !gt
->stolen
);
331 release_resource(>
->resource
);
335 void psb_gtt_alloc(struct drm_device
*dev
)
337 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
338 init_rwsem(&dev_priv
->gtt
.sem
);
341 void psb_gtt_takedown(struct drm_device
*dev
)
343 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
345 if (dev_priv
->gtt_map
) {
346 iounmap(dev_priv
->gtt_map
);
347 dev_priv
->gtt_map
= NULL
;
349 if (dev_priv
->gtt_initialized
) {
350 pci_write_config_word(dev
->pdev
, PSB_GMCH_CTRL
,
351 dev_priv
->gmch_ctrl
);
352 PSB_WVDC32(dev_priv
->pge_ctl
, PSB_PGETBL_CTL
);
353 (void) PSB_RVDC32(PSB_PGETBL_CTL
);
355 if (dev_priv
->vram_addr
)
356 iounmap(dev_priv
->gtt_map
);
359 int psb_gtt_init(struct drm_device
*dev
, int resume
)
361 struct drm_psb_private
*dev_priv
= dev
->dev_private
;
363 unsigned long stolen_size
, vram_stolen_size
;
364 unsigned i
, num_pages
;
367 uint32_t dvmt_mode
= 0;
373 mutex_init(&dev_priv
->gtt_mutex
);
379 pci_read_config_word(dev
->pdev
, PSB_GMCH_CTRL
, &dev_priv
->gmch_ctrl
);
380 pci_write_config_word(dev
->pdev
, PSB_GMCH_CTRL
,
381 dev_priv
->gmch_ctrl
| _PSB_GMCH_ENABLED
);
383 dev_priv
->pge_ctl
= PSB_RVDC32(PSB_PGETBL_CTL
);
384 PSB_WVDC32(dev_priv
->pge_ctl
| _PSB_PGETBL_ENABLED
, PSB_PGETBL_CTL
);
385 (void) PSB_RVDC32(PSB_PGETBL_CTL
);
387 /* The root resource we allocate address space from */
388 dev_priv
->gtt_initialized
= 1;
390 pg
->gtt_phys_start
= dev_priv
->pge_ctl
& PAGE_MASK
;
393 * FIXME: video mmu has hw bug to access 0x0D0000000,
394 * then make gatt start at 0x0e000,0000
396 pg
->mmu_gatt_start
= 0xE0000000;
398 pg
->gtt_start
= pci_resource_start(dev
->pdev
, PSB_GTT_RESOURCE
);
399 gtt_pages
= pci_resource_len(dev
->pdev
, PSB_GTT_RESOURCE
)
402 if (pg
->gtt_start
== 0 || gtt_pages
== 0) {
403 dev_err(dev
->dev
, "GTT PCI BAR not initialized.\n");
405 pg
->gtt_start
= dev_priv
->pge_ctl
;
408 pg
->gatt_start
= pci_resource_start(dev
->pdev
, PSB_GATT_RESOURCE
);
409 pg
->gatt_pages
= pci_resource_len(dev
->pdev
, PSB_GATT_RESOURCE
)
411 dev_priv
->gtt_mem
= &dev
->pdev
->resource
[PSB_GATT_RESOURCE
];
413 if (pg
->gatt_pages
== 0 || pg
->gatt_start
== 0) {
414 static struct resource fudge
; /* Preferably peppermint */
416 /* This can occur on CDV SDV systems. Fudge it in this case.
417 We really don't care what imaginary space is being allocated
419 dev_err(dev
->dev
, "GATT PCI BAR not initialized.\n");
420 pg
->gatt_start
= 0x40000000;
421 pg
->gatt_pages
= (128 * 1024 * 1024) >> PAGE_SHIFT
;
422 fudge
.start
= 0x40000000;
423 fudge
.end
= 0x40000000 + 128 * 1024 * 1024 - 1;
424 fudge
.name
= "fudge";
425 fudge
.flags
= IORESOURCE_MEM
;
426 dev_priv
->gtt_mem
= &fudge
;
429 pci_read_config_dword(dev
->pdev
, PSB_BSM
, &dev_priv
->stolen_base
);
430 vram_stolen_size
= pg
->gtt_phys_start
- dev_priv
->stolen_base
433 stolen_size
= vram_stolen_size
;
435 printk(KERN_INFO
"Stolen memory information\n");
436 printk(KERN_INFO
" base in RAM: 0x%x\n", dev_priv
->stolen_base
);
437 printk(KERN_INFO
" size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
438 vram_stolen_size
/1024);
439 dvmt_mode
= (dev_priv
->gmch_ctrl
>> 4) & 0x7;
440 printk(KERN_INFO
" the correct size should be: %dM(dvmt mode=%d)\n",
441 (dvmt_mode
== 1) ? 1 : (2 << (dvmt_mode
- 1)), dvmt_mode
);
443 if (resume
&& (gtt_pages
!= pg
->gtt_pages
) &&
444 (stolen_size
!= pg
->stolen_size
)) {
445 dev_err(dev
->dev
, "GTT resume error.\n");
450 pg
->gtt_pages
= gtt_pages
;
451 pg
->stolen_size
= stolen_size
;
452 dev_priv
->vram_stolen_size
= vram_stolen_size
;
455 * Map the GTT and the stolen memory area
457 dev_priv
->gtt_map
= ioremap_nocache(pg
->gtt_phys_start
,
458 gtt_pages
<< PAGE_SHIFT
);
459 if (!dev_priv
->gtt_map
) {
460 dev_err(dev
->dev
, "Failure to map gtt.\n");
465 dev_priv
->vram_addr
= ioremap_wc(dev_priv
->stolen_base
, stolen_size
);
466 if (!dev_priv
->vram_addr
) {
467 dev_err(dev
->dev
, "Failure to map stolen base.\n");
473 * Insert vram stolen pages into the GTT
476 pfn_base
= dev_priv
->stolen_base
>> PAGE_SHIFT
;
477 vram_pages
= num_pages
= vram_stolen_size
>> PAGE_SHIFT
;
478 printk(KERN_INFO
"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
479 num_pages
, pfn_base
<< PAGE_SHIFT
, 0);
480 for (i
= 0; i
< num_pages
; ++i
) {
481 pte
= psb_gtt_mask_pte(pfn_base
+ i
, 0);
482 iowrite32(pte
, dev_priv
->gtt_map
+ i
);
486 * Init rest of GTT to the scratch page to avoid accidents or scribbles
489 pfn_base
= page_to_pfn(dev_priv
->scratch_page
);
490 pte
= psb_gtt_mask_pte(pfn_base
, 0);
491 for (; i
< gtt_pages
; ++i
)
492 iowrite32(pte
, dev_priv
->gtt_map
+ i
);
494 (void) ioread32(dev_priv
->gtt_map
+ i
- 1);
498 psb_gtt_takedown(dev
);