x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / gpu / drm / qxl / qxl_object.h
blobd458a140c02407c01858f2d7b8a212e80417db3a
1 /*
2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
23 * Alon Levy
25 #ifndef QXL_OBJECT_H
26 #define QXL_OBJECT_H
28 #include "qxl_drv.h"
30 static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
32 int r;
34 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
35 if (unlikely(r != 0)) {
36 if (r != -ERESTARTSYS) {
37 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
38 dev_err(qdev->dev, "%p reserve failed\n", bo);
40 return r;
42 return 0;
45 static inline void qxl_bo_unreserve(struct qxl_bo *bo)
47 ttm_bo_unreserve(&bo->tbo);
50 static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
52 return bo->tbo.offset;
55 static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
57 return bo->tbo.num_pages << PAGE_SHIFT;
60 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
62 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
65 static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
66 bool no_wait)
68 int r;
70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
71 if (unlikely(r != 0)) {
72 if (r != -ERESTARTSYS) {
73 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
74 dev_err(qdev->dev, "%p reserve failed for wait\n",
75 bo);
77 return r;
79 spin_lock(&bo->tbo.bdev->fence_lock);
80 if (mem_type)
81 *mem_type = bo->tbo.mem.mem_type;
82 if (bo->tbo.sync_obj)
83 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
84 spin_unlock(&bo->tbo.bdev->fence_lock);
85 ttm_bo_unreserve(&bo->tbo);
86 return r;
89 extern int qxl_bo_create(struct qxl_device *qdev,
90 unsigned long size,
91 bool kernel, bool pinned, u32 domain,
92 struct qxl_surface *surf,
93 struct qxl_bo **bo_ptr);
94 extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
95 extern void qxl_bo_kunmap(struct qxl_bo *bo);
96 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
97 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
98 extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
99 extern void qxl_bo_unref(struct qxl_bo **bo);
100 extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
101 extern int qxl_bo_unpin(struct qxl_bo *bo);
102 extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
103 extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
105 #endif