2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
27 #include "qxl_object.h"
30 * TODO: allocating a new gem(in qxl_bo) for each request.
31 * This is wasteful since bo's are page aligned.
33 static int qxl_alloc_ioctl(struct drm_device
*dev
, void *data
,
34 struct drm_file
*file_priv
)
36 struct qxl_device
*qdev
= dev
->dev_private
;
37 struct drm_qxl_alloc
*qxl_alloc
= data
;
41 u32 domain
= QXL_GEM_DOMAIN_VRAM
;
43 if (qxl_alloc
->size
== 0) {
44 DRM_ERROR("invalid size %d\n", qxl_alloc
->size
);
47 ret
= qxl_gem_object_create_with_handle(qdev
, file_priv
,
53 DRM_ERROR("%s: failed to create gem ret=%d\n",
57 qxl_alloc
->handle
= handle
;
61 static int qxl_map_ioctl(struct drm_device
*dev
, void *data
,
62 struct drm_file
*file_priv
)
64 struct qxl_device
*qdev
= dev
->dev_private
;
65 struct drm_qxl_map
*qxl_map
= data
;
67 return qxl_mode_dumb_mmap(file_priv
, qdev
->ddev
, qxl_map
->handle
,
71 struct qxl_reloc_info
{
73 struct qxl_bo
*dst_bo
;
75 struct qxl_bo
*src_bo
;
80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
85 apply_reloc(struct qxl_device
*qdev
, struct qxl_reloc_info
*info
)
88 reloc_page
= qxl_bo_kmap_atomic_page(qdev
, info
->dst_bo
, info
->dst_offset
& PAGE_MASK
);
89 *(uint64_t *)(reloc_page
+ (info
->dst_offset
& ~PAGE_MASK
)) = qxl_bo_physical_address(qdev
,
92 qxl_bo_kunmap_atomic_page(qdev
, info
->dst_bo
, reloc_page
);
96 apply_surf_reloc(struct qxl_device
*qdev
, struct qxl_reloc_info
*info
)
101 if (info
->src_bo
&& !info
->src_bo
->is_primary
)
102 id
= info
->src_bo
->surface_id
;
104 reloc_page
= qxl_bo_kmap_atomic_page(qdev
, info
->dst_bo
, info
->dst_offset
& PAGE_MASK
);
105 *(uint32_t *)(reloc_page
+ (info
->dst_offset
& ~PAGE_MASK
)) = id
;
106 qxl_bo_kunmap_atomic_page(qdev
, info
->dst_bo
, reloc_page
);
109 /* return holding the reference to this object */
110 static struct qxl_bo
*qxlhw_handle_to_bo(struct qxl_device
*qdev
,
111 struct drm_file
*file_priv
, uint64_t handle
,
112 struct qxl_release
*release
)
114 struct drm_gem_object
*gobj
;
118 gobj
= drm_gem_object_lookup(qdev
->ddev
, file_priv
, handle
);
122 qobj
= gem_to_qxl_bo(gobj
);
124 ret
= qxl_release_list_add(release
, qobj
);
132 * Usage of execbuffer:
133 * Relocations need to take into account the full QXLDrawable size.
134 * However, the command as passed from user space must *not* contain the initial
135 * QXLReleaseInfo struct (first XXX bytes)
137 static int qxl_process_single_command(struct qxl_device
*qdev
,
138 struct drm_qxl_command
*cmd
,
139 struct drm_file
*file_priv
)
141 struct qxl_reloc_info
*reloc_info
;
143 struct qxl_release
*release
;
144 struct qxl_bo
*cmd_bo
;
146 int i
, j
, ret
, num_relocs
;
151 release_type
= QXL_RELEASE_DRAWABLE
;
153 case QXL_CMD_SURFACE
:
156 DRM_DEBUG("Only draw commands in execbuffers\n");
161 if (cmd
->command_size
> PAGE_SIZE
- sizeof(union qxl_release_info
))
164 if (!access_ok(VERIFY_READ
,
165 (void *)(unsigned long)cmd
->command
,
169 reloc_info
= kmalloc(sizeof(struct qxl_reloc_info
) * cmd
->relocs_num
, GFP_KERNEL
);
173 ret
= qxl_alloc_release_reserved(qdev
,
174 sizeof(union qxl_release_info
) +
182 /* TODO copy slow path code from i915 */
183 fb_cmd
= qxl_bo_kmap_atomic_page(qdev
, cmd_bo
, (release
->release_offset
& PAGE_SIZE
));
184 unwritten
= __copy_from_user_inatomic_nocache(fb_cmd
+ sizeof(union qxl_release_info
) + (release
->release_offset
& ~PAGE_SIZE
), (void *)(unsigned long)cmd
->command
, cmd
->command_size
);
187 struct qxl_drawable
*draw
= fb_cmd
;
188 draw
->mm_time
= qdev
->rom
->mm_clock
;
191 qxl_bo_kunmap_atomic_page(qdev
, cmd_bo
, fb_cmd
);
193 DRM_ERROR("got unwritten %d\n", unwritten
);
195 goto out_free_release
;
198 /* fill out reloc info structs */
200 for (i
= 0; i
< cmd
->relocs_num
; ++i
) {
201 struct drm_qxl_reloc reloc
;
203 if (DRM_COPY_FROM_USER(&reloc
,
204 &((struct drm_qxl_reloc
*)(uintptr_t)cmd
->relocs
)[i
],
210 /* add the bos to the list of bos to validate -
211 need to validate first then process relocs? */
212 if (reloc
.reloc_type
!= QXL_RELOC_TYPE_BO
&& reloc
.reloc_type
!= QXL_RELOC_TYPE_SURF
) {
213 DRM_DEBUG("unknown reloc type %d\n", reloc_info
[i
].type
);
218 reloc_info
[i
].type
= reloc
.reloc_type
;
220 if (reloc
.dst_handle
) {
221 reloc_info
[i
].dst_bo
= qxlhw_handle_to_bo(qdev
, file_priv
,
222 reloc
.dst_handle
, release
);
223 if (!reloc_info
[i
].dst_bo
) {
225 reloc_info
[i
].src_bo
= NULL
;
228 reloc_info
[i
].dst_offset
= reloc
.dst_offset
;
230 reloc_info
[i
].dst_bo
= cmd_bo
;
231 reloc_info
[i
].dst_offset
= reloc
.dst_offset
+ release
->release_offset
;
235 /* reserve and validate the reloc dst bo */
236 if (reloc
.reloc_type
== QXL_RELOC_TYPE_BO
|| reloc
.src_handle
> 0) {
237 reloc_info
[i
].src_bo
=
238 qxlhw_handle_to_bo(qdev
, file_priv
,
239 reloc
.src_handle
, release
);
240 if (!reloc_info
[i
].src_bo
) {
241 if (reloc_info
[i
].dst_bo
!= cmd_bo
)
242 drm_gem_object_unreference_unlocked(&reloc_info
[i
].dst_bo
->gem_base
);
246 reloc_info
[i
].src_offset
= reloc
.src_offset
;
248 reloc_info
[i
].src_bo
= NULL
;
249 reloc_info
[i
].src_offset
= 0;
253 /* validate all buffers */
254 ret
= qxl_release_reserve_list(release
, false);
258 for (i
= 0; i
< cmd
->relocs_num
; ++i
) {
259 if (reloc_info
[i
].type
== QXL_RELOC_TYPE_BO
)
260 apply_reloc(qdev
, &reloc_info
[i
]);
261 else if (reloc_info
[i
].type
== QXL_RELOC_TYPE_SURF
)
262 apply_surf_reloc(qdev
, &reloc_info
[i
]);
265 ret
= qxl_push_command_ring_release(qdev
, release
, cmd
->type
, true);
267 qxl_release_backoff_reserve_list(release
);
269 qxl_release_fence_buffer_objects(release
);
272 for (j
= 0; j
< num_relocs
; j
++) {
273 if (reloc_info
[j
].dst_bo
!= cmd_bo
)
274 drm_gem_object_unreference_unlocked(&reloc_info
[j
].dst_bo
->gem_base
);
275 if (reloc_info
[j
].src_bo
&& reloc_info
[j
].src_bo
!= cmd_bo
)
276 drm_gem_object_unreference_unlocked(&reloc_info
[j
].src_bo
->gem_base
);
280 qxl_release_free(qdev
, release
);
286 static int qxl_execbuffer_ioctl(struct drm_device
*dev
, void *data
,
287 struct drm_file
*file_priv
)
289 struct qxl_device
*qdev
= dev
->dev_private
;
290 struct drm_qxl_execbuffer
*execbuffer
= data
;
291 struct drm_qxl_command user_cmd
;
295 for (cmd_num
= 0; cmd_num
< execbuffer
->commands_num
; ++cmd_num
) {
297 struct drm_qxl_command
*commands
=
298 (struct drm_qxl_command
*)(uintptr_t)execbuffer
->commands
;
300 if (DRM_COPY_FROM_USER(&user_cmd
, &commands
[cmd_num
],
304 ret
= qxl_process_single_command(qdev
, &user_cmd
, file_priv
);
311 static int qxl_update_area_ioctl(struct drm_device
*dev
, void *data
,
312 struct drm_file
*file
)
314 struct qxl_device
*qdev
= dev
->dev_private
;
315 struct drm_qxl_update_area
*update_area
= data
;
316 struct qxl_rect area
= {.left
= update_area
->left
,
317 .top
= update_area
->top
,
318 .right
= update_area
->right
,
319 .bottom
= update_area
->bottom
};
321 struct drm_gem_object
*gobj
= NULL
;
322 struct qxl_bo
*qobj
= NULL
;
324 if (update_area
->left
>= update_area
->right
||
325 update_area
->top
>= update_area
->bottom
)
328 gobj
= drm_gem_object_lookup(dev
, file
, update_area
->handle
);
332 qobj
= gem_to_qxl_bo(gobj
);
334 ret
= qxl_bo_reserve(qobj
, false);
338 if (!qobj
->pin_count
) {
339 qxl_ttm_placement_from_domain(qobj
, qobj
->type
, false);
340 ret
= ttm_bo_validate(&qobj
->tbo
, &qobj
->placement
,
346 ret
= qxl_bo_check_id(qdev
, qobj
);
349 if (!qobj
->surface_id
)
350 DRM_ERROR("got update area for surface with no id %d\n", update_area
->handle
);
351 ret
= qxl_io_update_area(qdev
, qobj
, &area
);
354 qxl_bo_unreserve(qobj
);
357 drm_gem_object_unreference_unlocked(gobj
);
361 static int qxl_getparam_ioctl(struct drm_device
*dev
, void *data
,
362 struct drm_file
*file_priv
)
364 struct qxl_device
*qdev
= dev
->dev_private
;
365 struct drm_qxl_getparam
*param
= data
;
367 switch (param
->param
) {
368 case QXL_PARAM_NUM_SURFACES
:
369 param
->value
= qdev
->rom
->n_surfaces
;
371 case QXL_PARAM_MAX_RELOCS
:
372 param
->value
= QXL_MAX_RES
;
380 static int qxl_clientcap_ioctl(struct drm_device
*dev
, void *data
,
381 struct drm_file
*file_priv
)
383 struct qxl_device
*qdev
= dev
->dev_private
;
384 struct drm_qxl_clientcap
*param
= data
;
387 byte
= param
->index
/ 8;
388 idx
= param
->index
% 8;
390 if (qdev
->pdev
->revision
< 4)
396 if (qdev
->rom
->client_capabilities
[byte
] & (1 << idx
))
401 static int qxl_alloc_surf_ioctl(struct drm_device
*dev
, void *data
,
402 struct drm_file
*file
)
404 struct qxl_device
*qdev
= dev
->dev_private
;
405 struct drm_qxl_alloc_surf
*param
= data
;
409 int size
, actual_stride
;
410 struct qxl_surface surf
;
412 /* work out size allocate bo with handle */
413 actual_stride
= param
->stride
< 0 ? -param
->stride
: param
->stride
;
414 size
= actual_stride
* param
->height
+ actual_stride
;
416 surf
.format
= param
->format
;
417 surf
.width
= param
->width
;
418 surf
.height
= param
->height
;
419 surf
.stride
= param
->stride
;
422 ret
= qxl_gem_object_create_with_handle(qdev
, file
,
423 QXL_GEM_DOMAIN_SURFACE
,
428 DRM_ERROR("%s: failed to create gem ret=%d\n",
432 param
->handle
= handle
;
436 const struct drm_ioctl_desc qxl_ioctls
[] = {
437 DRM_IOCTL_DEF_DRV(QXL_ALLOC
, qxl_alloc_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
439 DRM_IOCTL_DEF_DRV(QXL_MAP
, qxl_map_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
441 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER
, qxl_execbuffer_ioctl
,
442 DRM_AUTH
|DRM_UNLOCKED
),
443 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA
, qxl_update_area_ioctl
,
444 DRM_AUTH
|DRM_UNLOCKED
),
445 DRM_IOCTL_DEF_DRV(QXL_GETPARAM
, qxl_getparam_ioctl
,
446 DRM_AUTH
|DRM_UNLOCKED
),
447 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP
, qxl_clientcap_ioctl
,
448 DRM_AUTH
|DRM_UNLOCKED
),
450 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF
, qxl_alloc_surf_ioctl
,
451 DRM_AUTH
|DRM_UNLOCKED
),
454 int qxl_max_ioctls
= DRM_ARRAY_SIZE(qxl_ioctls
);