PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / gpu / drm / ast / ast_ttm.c
blob4ea9b17ac17a9c5459617898718f1145eaf232ba
1 /*
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
26 * Authors: Dave Airlie <airlied@redhat.com>
28 #include <drm/drmP.h>
29 #include "ast_drv.h"
30 #include <ttm/ttm_page_alloc.h>
32 static inline struct ast_private *
33 ast_bdev(struct ttm_bo_device *bd)
35 return container_of(bd, struct ast_private, ttm.bdev);
38 static int
39 ast_ttm_mem_global_init(struct drm_global_reference *ref)
41 return ttm_mem_global_init(ref->object);
44 static void
45 ast_ttm_mem_global_release(struct drm_global_reference *ref)
47 ttm_mem_global_release(ref->object);
50 static int ast_ttm_global_init(struct ast_private *ast)
52 struct drm_global_reference *global_ref;
53 int r;
55 global_ref = &ast->ttm.mem_global_ref;
56 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
57 global_ref->size = sizeof(struct ttm_mem_global);
58 global_ref->init = &ast_ttm_mem_global_init;
59 global_ref->release = &ast_ttm_mem_global_release;
60 r = drm_global_item_ref(global_ref);
61 if (r != 0) {
62 DRM_ERROR("Failed setting up TTM memory accounting "
63 "subsystem.\n");
64 return r;
67 ast->ttm.bo_global_ref.mem_glob =
68 ast->ttm.mem_global_ref.object;
69 global_ref = &ast->ttm.bo_global_ref.ref;
70 global_ref->global_type = DRM_GLOBAL_TTM_BO;
71 global_ref->size = sizeof(struct ttm_bo_global);
72 global_ref->init = &ttm_bo_global_init;
73 global_ref->release = &ttm_bo_global_release;
74 r = drm_global_item_ref(global_ref);
75 if (r != 0) {
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&ast->ttm.mem_global_ref);
78 return r;
80 return 0;
83 static void
84 ast_ttm_global_release(struct ast_private *ast)
86 if (ast->ttm.mem_global_ref.release == NULL)
87 return;
89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
90 drm_global_item_unref(&ast->ttm.mem_global_ref);
91 ast->ttm.mem_global_ref.release = NULL;
95 static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
97 struct ast_bo *bo;
99 bo = container_of(tbo, struct ast_bo, bo);
101 drm_gem_object_release(&bo->gem);
102 kfree(bo);
105 static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
107 if (bo->destroy == &ast_bo_ttm_destroy)
108 return true;
109 return false;
112 static int
113 ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
114 struct ttm_mem_type_manager *man)
116 switch (type) {
117 case TTM_PL_SYSTEM:
118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
119 man->available_caching = TTM_PL_MASK_CACHING;
120 man->default_caching = TTM_PL_FLAG_CACHED;
121 break;
122 case TTM_PL_VRAM:
123 man->func = &ttm_bo_manager_func;
124 man->flags = TTM_MEMTYPE_FLAG_FIXED |
125 TTM_MEMTYPE_FLAG_MAPPABLE;
126 man->available_caching = TTM_PL_FLAG_UNCACHED |
127 TTM_PL_FLAG_WC;
128 man->default_caching = TTM_PL_FLAG_WC;
129 break;
130 default:
131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
132 return -EINVAL;
134 return 0;
137 static void
138 ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
140 struct ast_bo *astbo = ast_bo(bo);
142 if (!ast_ttm_bo_is_ast_bo(bo))
143 return;
145 ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
146 *pl = astbo->placement;
149 static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
151 struct ast_bo *astbo = ast_bo(bo);
153 return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
156 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
157 struct ttm_mem_reg *mem)
159 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
160 struct ast_private *ast = ast_bdev(bdev);
162 mem->bus.addr = NULL;
163 mem->bus.offset = 0;
164 mem->bus.size = mem->num_pages << PAGE_SHIFT;
165 mem->bus.base = 0;
166 mem->bus.is_iomem = false;
167 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
168 return -EINVAL;
169 switch (mem->mem_type) {
170 case TTM_PL_SYSTEM:
171 /* system memory */
172 return 0;
173 case TTM_PL_VRAM:
174 mem->bus.offset = mem->start << PAGE_SHIFT;
175 mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
176 mem->bus.is_iomem = true;
177 break;
178 default:
179 return -EINVAL;
180 break;
182 return 0;
185 static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
189 static int ast_bo_move(struct ttm_buffer_object *bo,
190 bool evict, bool interruptible,
191 bool no_wait_gpu,
192 struct ttm_mem_reg *new_mem)
194 int r;
195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
196 return r;
200 static void ast_ttm_backend_destroy(struct ttm_tt *tt)
202 ttm_tt_fini(tt);
203 kfree(tt);
206 static struct ttm_backend_func ast_tt_backend_func = {
207 .destroy = &ast_ttm_backend_destroy,
211 static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
212 unsigned long size, uint32_t page_flags,
213 struct page *dummy_read_page)
215 struct ttm_tt *tt;
217 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
218 if (tt == NULL)
219 return NULL;
220 tt->func = &ast_tt_backend_func;
221 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
222 kfree(tt);
223 return NULL;
225 return tt;
228 static int ast_ttm_tt_populate(struct ttm_tt *ttm)
230 return ttm_pool_populate(ttm);
233 static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
235 ttm_pool_unpopulate(ttm);
238 struct ttm_bo_driver ast_bo_driver = {
239 .ttm_tt_create = ast_ttm_tt_create,
240 .ttm_tt_populate = ast_ttm_tt_populate,
241 .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
242 .init_mem_type = ast_bo_init_mem_type,
243 .evict_flags = ast_bo_evict_flags,
244 .move = ast_bo_move,
245 .verify_access = ast_bo_verify_access,
246 .io_mem_reserve = &ast_ttm_io_mem_reserve,
247 .io_mem_free = &ast_ttm_io_mem_free,
250 int ast_mm_init(struct ast_private *ast)
252 int ret;
253 struct drm_device *dev = ast->dev;
254 struct ttm_bo_device *bdev = &ast->ttm.bdev;
256 ret = ast_ttm_global_init(ast);
257 if (ret)
258 return ret;
260 ret = ttm_bo_device_init(&ast->ttm.bdev,
261 ast->ttm.bo_global_ref.ref.object,
262 &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
263 true);
264 if (ret) {
265 DRM_ERROR("Error initialising bo driver; %d\n", ret);
266 return ret;
269 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
270 ast->vram_size >> PAGE_SHIFT);
271 if (ret) {
272 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
273 return ret;
276 ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
277 pci_resource_len(dev->pdev, 0));
279 return 0;
282 void ast_mm_fini(struct ast_private *ast)
284 ttm_bo_device_release(&ast->ttm.bdev);
286 ast_ttm_global_release(ast);
288 arch_phys_wc_del(ast->fb_mtrr);
291 void ast_ttm_placement(struct ast_bo *bo, int domain)
293 u32 c = 0;
294 bo->placement.fpfn = 0;
295 bo->placement.lpfn = 0;
296 bo->placement.placement = bo->placements;
297 bo->placement.busy_placement = bo->placements;
298 if (domain & TTM_PL_FLAG_VRAM)
299 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
300 if (domain & TTM_PL_FLAG_SYSTEM)
301 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
302 if (!c)
303 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
304 bo->placement.num_placement = c;
305 bo->placement.num_busy_placement = c;
308 int ast_bo_create(struct drm_device *dev, int size, int align,
309 uint32_t flags, struct ast_bo **pastbo)
311 struct ast_private *ast = dev->dev_private;
312 struct ast_bo *astbo;
313 size_t acc_size;
314 int ret;
316 astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
317 if (!astbo)
318 return -ENOMEM;
320 ret = drm_gem_object_init(dev, &astbo->gem, size);
321 if (ret) {
322 kfree(astbo);
323 return ret;
326 astbo->bo.bdev = &ast->ttm.bdev;
327 astbo->bo.bdev->dev_mapping = dev->dev_mapping;
329 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
331 acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
332 sizeof(struct ast_bo));
334 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
335 ttm_bo_type_device, &astbo->placement,
336 align >> PAGE_SHIFT, false, NULL, acc_size,
337 NULL, ast_bo_ttm_destroy);
338 if (ret)
339 return ret;
341 *pastbo = astbo;
342 return 0;
345 static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
347 return bo->bo.offset;
350 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
352 int i, ret;
354 if (bo->pin_count) {
355 bo->pin_count++;
356 if (gpu_addr)
357 *gpu_addr = ast_bo_gpu_offset(bo);
360 ast_ttm_placement(bo, pl_flag);
361 for (i = 0; i < bo->placement.num_placement; i++)
362 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
363 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
364 if (ret)
365 return ret;
367 bo->pin_count = 1;
368 if (gpu_addr)
369 *gpu_addr = ast_bo_gpu_offset(bo);
370 return 0;
373 int ast_bo_unpin(struct ast_bo *bo)
375 int i, ret;
376 if (!bo->pin_count) {
377 DRM_ERROR("unpin bad %p\n", bo);
378 return 0;
380 bo->pin_count--;
381 if (bo->pin_count)
382 return 0;
384 for (i = 0; i < bo->placement.num_placement ; i++)
385 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
387 if (ret)
388 return ret;
390 return 0;
393 int ast_bo_push_sysram(struct ast_bo *bo)
395 int i, ret;
396 if (!bo->pin_count) {
397 DRM_ERROR("unpin bad %p\n", bo);
398 return 0;
400 bo->pin_count--;
401 if (bo->pin_count)
402 return 0;
404 if (bo->kmap.virtual)
405 ttm_bo_kunmap(&bo->kmap);
407 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
408 for (i = 0; i < bo->placement.num_placement ; i++)
409 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
411 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
412 if (ret) {
413 DRM_ERROR("pushing to VRAM failed\n");
414 return ret;
416 return 0;
419 int ast_mmap(struct file *filp, struct vm_area_struct *vma)
421 struct drm_file *file_priv;
422 struct ast_private *ast;
424 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
425 return drm_mmap(filp, vma);
427 file_priv = filp->private_data;
428 ast = file_priv->minor->dev->dev_private;
429 return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);