2 * Copyright 2017 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
31 #include <linux/dma-buf.h>
32 #include <linux/vfio.h>
37 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
39 static int vgpu_pin_dma_address(struct intel_vgpu
*vgpu
,
45 if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu
, dma_addr
))
51 static void vgpu_unpin_dma_address(struct intel_vgpu
*vgpu
,
54 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu
, dma_addr
);
57 static int vgpu_gem_get_pages(
58 struct drm_i915_gem_object
*obj
)
60 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
61 struct intel_vgpu
*vgpu
;
63 struct scatterlist
*sg
;
65 gen8_pte_t __iomem
*gtt_entries
;
66 struct intel_vgpu_fb_info
*fb_info
;
69 fb_info
= (struct intel_vgpu_fb_info
*)obj
->gvt_info
;
70 if (drm_WARN_ON(&dev_priv
->drm
, !fb_info
))
73 vgpu
= fb_info
->obj
->vgpu
;
74 if (drm_WARN_ON(&dev_priv
->drm
, !vgpu
))
77 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
81 page_num
= obj
->base
.size
>> PAGE_SHIFT
;
82 ret
= sg_alloc_table(st
, page_num
, GFP_KERNEL
);
87 gtt_entries
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+
88 (fb_info
->start
>> PAGE_SHIFT
);
89 for_each_sg(st
->sgl
, sg
, page_num
, i
) {
91 GEN8_DECODE_PTE(readq(>t_entries
[i
]));
92 if (vgpu_pin_dma_address(vgpu
, PAGE_SIZE
, dma_addr
)) {
98 sg
->length
= PAGE_SIZE
;
99 sg_dma_len(sg
) = PAGE_SIZE
;
100 sg_dma_address(sg
) = dma_addr
;
103 __i915_gem_object_set_pages(obj
, st
, PAGE_SIZE
);
108 for_each_sg(st
->sgl
, sg
, i
, j
) {
109 dma_addr
= sg_dma_address(sg
);
111 vgpu_unpin_dma_address(vgpu
, dma_addr
);
121 static void vgpu_gem_put_pages(struct drm_i915_gem_object
*obj
,
122 struct sg_table
*pages
)
124 struct scatterlist
*sg
;
126 if (obj
->base
.dma_buf
) {
127 struct intel_vgpu_fb_info
*fb_info
= obj
->gvt_info
;
128 struct intel_vgpu_dmabuf_obj
*obj
= fb_info
->obj
;
129 struct intel_vgpu
*vgpu
= obj
->vgpu
;
132 for_each_sg(pages
->sgl
, sg
, fb_info
->size
, i
)
133 vgpu_unpin_dma_address(vgpu
,
137 sg_free_table(pages
);
141 static void dmabuf_gem_object_free(struct kref
*kref
)
143 struct intel_vgpu_dmabuf_obj
*obj
=
144 container_of(kref
, struct intel_vgpu_dmabuf_obj
, kref
);
145 struct intel_vgpu
*vgpu
= obj
->vgpu
;
146 struct list_head
*pos
;
147 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
149 if (vgpu
&& vgpu
->active
&& !list_empty(&vgpu
->dmabuf_obj_list_head
)) {
150 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
151 dmabuf_obj
= container_of(pos
,
152 struct intel_vgpu_dmabuf_obj
, list
);
153 if (dmabuf_obj
== obj
) {
155 intel_gvt_hypervisor_put_vfio_device(vgpu
);
156 idr_remove(&vgpu
->object_idr
,
157 dmabuf_obj
->dmabuf_id
);
158 kfree(dmabuf_obj
->info
);
164 /* Free the orphan dmabuf_objs here */
171 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj
*obj
)
173 kref_get(&obj
->kref
);
176 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj
*obj
)
178 kref_put(&obj
->kref
, dmabuf_gem_object_free
);
181 static void vgpu_gem_release(struct drm_i915_gem_object
*gem_obj
)
184 struct intel_vgpu_fb_info
*fb_info
= gem_obj
->gvt_info
;
185 struct intel_vgpu_dmabuf_obj
*obj
= fb_info
->obj
;
186 struct intel_vgpu
*vgpu
= obj
->vgpu
;
189 mutex_lock(&vgpu
->dmabuf_lock
);
190 gem_obj
->base
.dma_buf
= NULL
;
192 mutex_unlock(&vgpu
->dmabuf_lock
);
194 /* vgpu is NULL, as it has been removed already */
195 gem_obj
->base
.dma_buf
= NULL
;
200 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops
= {
201 .name
= "i915_gem_object_vgpu",
202 .flags
= I915_GEM_OBJECT_IS_PROXY
,
203 .get_pages
= vgpu_gem_get_pages
,
204 .put_pages
= vgpu_gem_put_pages
,
205 .release
= vgpu_gem_release
,
208 static struct drm_i915_gem_object
*vgpu_create_gem(struct drm_device
*dev
,
209 struct intel_vgpu_fb_info
*info
)
211 static struct lock_class_key lock_class
;
212 struct drm_i915_private
*dev_priv
= to_i915(dev
);
213 struct drm_i915_gem_object
*obj
;
215 obj
= i915_gem_object_alloc();
219 drm_gem_private_object_init(dev
, &obj
->base
,
220 roundup(info
->size
, PAGE_SIZE
));
221 i915_gem_object_init(obj
, &intel_vgpu_gem_ops
, &lock_class
);
222 i915_gem_object_set_readonly(obj
);
224 obj
->read_domains
= I915_GEM_DOMAIN_GTT
;
225 obj
->write_domain
= 0;
226 if (INTEL_GEN(dev_priv
) >= 9) {
227 unsigned int tiling_mode
= 0;
228 unsigned int stride
= 0;
230 switch (info
->drm_format_mod
) {
231 case DRM_FORMAT_MOD_LINEAR
:
232 tiling_mode
= I915_TILING_NONE
;
234 case I915_FORMAT_MOD_X_TILED
:
235 tiling_mode
= I915_TILING_X
;
236 stride
= info
->stride
;
238 case I915_FORMAT_MOD_Y_TILED
:
239 case I915_FORMAT_MOD_Yf_TILED
:
240 tiling_mode
= I915_TILING_Y
;
241 stride
= info
->stride
;
244 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
245 info
->drm_format_mod
);
247 obj
->tiling_and_stride
= tiling_mode
| stride
;
249 obj
->tiling_and_stride
= info
->drm_format_mod
?
256 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format
*c
)
258 if (c
&& c
->x_hot
<= c
->width
&& c
->y_hot
<= c
->height
)
264 static int vgpu_get_plane_info(struct drm_device
*dev
,
265 struct intel_vgpu
*vgpu
,
266 struct intel_vgpu_fb_info
*info
,
269 struct intel_vgpu_primary_plane_format p
;
270 struct intel_vgpu_cursor_plane_format c
;
271 int ret
, tile_height
= 1;
273 memset(info
, 0, sizeof(*info
));
275 if (plane_id
== DRM_PLANE_TYPE_PRIMARY
) {
276 ret
= intel_vgpu_decode_primary_plane(vgpu
, &p
);
279 info
->start
= p
.base
;
280 info
->start_gpa
= p
.base_gpa
;
281 info
->width
= p
.width
;
282 info
->height
= p
.height
;
283 info
->stride
= p
.stride
;
284 info
->drm_format
= p
.drm_format
;
287 case PLANE_CTL_TILED_LINEAR
:
288 info
->drm_format_mod
= DRM_FORMAT_MOD_LINEAR
;
290 case PLANE_CTL_TILED_X
:
291 info
->drm_format_mod
= I915_FORMAT_MOD_X_TILED
;
294 case PLANE_CTL_TILED_Y
:
295 info
->drm_format_mod
= I915_FORMAT_MOD_Y_TILED
;
298 case PLANE_CTL_TILED_YF
:
299 info
->drm_format_mod
= I915_FORMAT_MOD_Yf_TILED
;
303 gvt_vgpu_err("invalid tiling mode: %x\n", p
.tiled
);
305 } else if (plane_id
== DRM_PLANE_TYPE_CURSOR
) {
306 ret
= intel_vgpu_decode_cursor_plane(vgpu
, &c
);
309 info
->start
= c
.base
;
310 info
->start_gpa
= c
.base_gpa
;
311 info
->width
= c
.width
;
312 info
->height
= c
.height
;
313 info
->stride
= c
.width
* (c
.bpp
/ 8);
314 info
->drm_format
= c
.drm_format
;
315 info
->drm_format_mod
= 0;
316 info
->x_pos
= c
.x_pos
;
317 info
->y_pos
= c
.y_pos
;
319 if (validate_hotspot(&c
)) {
320 info
->x_hot
= c
.x_hot
;
321 info
->y_hot
= c
.y_hot
;
323 info
->x_hot
= UINT_MAX
;
324 info
->y_hot
= UINT_MAX
;
327 gvt_vgpu_err("invalid plane id:%d\n", plane_id
);
331 info
->size
= info
->stride
* roundup(info
->height
, tile_height
);
332 if (info
->size
== 0) {
333 gvt_vgpu_err("fb size is zero\n");
337 if (info
->start
& (PAGE_SIZE
- 1)) {
338 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info
->start
);
342 if (!intel_gvt_ggtt_validate_range(vgpu
, info
->start
, info
->size
)) {
343 gvt_vgpu_err("invalid gma addr\n");
350 static struct intel_vgpu_dmabuf_obj
*
351 pick_dmabuf_by_info(struct intel_vgpu
*vgpu
,
352 struct intel_vgpu_fb_info
*latest_info
)
354 struct list_head
*pos
;
355 struct intel_vgpu_fb_info
*fb_info
;
356 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
= NULL
;
357 struct intel_vgpu_dmabuf_obj
*ret
= NULL
;
359 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
360 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
362 if ((dmabuf_obj
== NULL
) ||
363 (dmabuf_obj
->info
== NULL
))
366 fb_info
= (struct intel_vgpu_fb_info
*)dmabuf_obj
->info
;
367 if ((fb_info
->start
== latest_info
->start
) &&
368 (fb_info
->start_gpa
== latest_info
->start_gpa
) &&
369 (fb_info
->size
== latest_info
->size
) &&
370 (fb_info
->drm_format_mod
== latest_info
->drm_format_mod
) &&
371 (fb_info
->drm_format
== latest_info
->drm_format
) &&
372 (fb_info
->width
== latest_info
->width
) &&
373 (fb_info
->height
== latest_info
->height
)) {
382 static struct intel_vgpu_dmabuf_obj
*
383 pick_dmabuf_by_num(struct intel_vgpu
*vgpu
, u32 id
)
385 struct list_head
*pos
;
386 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
= NULL
;
387 struct intel_vgpu_dmabuf_obj
*ret
= NULL
;
389 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
390 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
395 if (dmabuf_obj
->dmabuf_id
== id
) {
404 static void update_fb_info(struct vfio_device_gfx_plane_info
*gvt_dmabuf
,
405 struct intel_vgpu_fb_info
*fb_info
)
407 gvt_dmabuf
->drm_format
= fb_info
->drm_format
;
408 gvt_dmabuf
->drm_format_mod
= fb_info
->drm_format_mod
;
409 gvt_dmabuf
->width
= fb_info
->width
;
410 gvt_dmabuf
->height
= fb_info
->height
;
411 gvt_dmabuf
->stride
= fb_info
->stride
;
412 gvt_dmabuf
->size
= fb_info
->size
;
413 gvt_dmabuf
->x_pos
= fb_info
->x_pos
;
414 gvt_dmabuf
->y_pos
= fb_info
->y_pos
;
415 gvt_dmabuf
->x_hot
= fb_info
->x_hot
;
416 gvt_dmabuf
->y_hot
= fb_info
->y_hot
;
419 int intel_vgpu_query_plane(struct intel_vgpu
*vgpu
, void *args
)
421 struct drm_device
*dev
= &vgpu
->gvt
->gt
->i915
->drm
;
422 struct vfio_device_gfx_plane_info
*gfx_plane_info
= args
;
423 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
424 struct intel_vgpu_fb_info fb_info
;
427 if (gfx_plane_info
->flags
== (VFIO_GFX_PLANE_TYPE_DMABUF
|
428 VFIO_GFX_PLANE_TYPE_PROBE
))
430 else if ((gfx_plane_info
->flags
& ~VFIO_GFX_PLANE_TYPE_DMABUF
) ||
431 (!gfx_plane_info
->flags
))
434 ret
= vgpu_get_plane_info(dev
, vgpu
, &fb_info
,
435 gfx_plane_info
->drm_plane_type
);
439 mutex_lock(&vgpu
->dmabuf_lock
);
440 /* If exists, pick up the exposed dmabuf_obj */
441 dmabuf_obj
= pick_dmabuf_by_info(vgpu
, &fb_info
);
443 update_fb_info(gfx_plane_info
, &fb_info
);
444 gfx_plane_info
->dmabuf_id
= dmabuf_obj
->dmabuf_id
;
446 /* This buffer may be released between query_plane ioctl and
447 * get_dmabuf ioctl. Add the refcount to make sure it won't
448 * be released between the two ioctls.
450 if (!dmabuf_obj
->initref
) {
451 dmabuf_obj
->initref
= true;
452 dmabuf_obj_get(dmabuf_obj
);
455 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
456 vgpu
->id
, kref_read(&dmabuf_obj
->kref
),
457 gfx_plane_info
->dmabuf_id
);
458 mutex_unlock(&vgpu
->dmabuf_lock
);
462 mutex_unlock(&vgpu
->dmabuf_lock
);
464 /* Need to allocate a new one*/
465 dmabuf_obj
= kmalloc(sizeof(struct intel_vgpu_dmabuf_obj
), GFP_KERNEL
);
466 if (unlikely(!dmabuf_obj
)) {
467 gvt_vgpu_err("alloc dmabuf_obj failed\n");
472 dmabuf_obj
->info
= kmalloc(sizeof(struct intel_vgpu_fb_info
),
474 if (unlikely(!dmabuf_obj
->info
)) {
475 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
477 goto out_free_dmabuf
;
479 memcpy(dmabuf_obj
->info
, &fb_info
, sizeof(struct intel_vgpu_fb_info
));
481 ((struct intel_vgpu_fb_info
*)dmabuf_obj
->info
)->obj
= dmabuf_obj
;
483 dmabuf_obj
->vgpu
= vgpu
;
485 ret
= idr_alloc(&vgpu
->object_idr
, dmabuf_obj
, 1, 0, GFP_NOWAIT
);
488 gfx_plane_info
->dmabuf_id
= ret
;
489 dmabuf_obj
->dmabuf_id
= ret
;
491 dmabuf_obj
->initref
= true;
493 kref_init(&dmabuf_obj
->kref
);
495 mutex_lock(&vgpu
->dmabuf_lock
);
496 if (intel_gvt_hypervisor_get_vfio_device(vgpu
)) {
497 gvt_vgpu_err("get vfio device failed\n");
498 mutex_unlock(&vgpu
->dmabuf_lock
);
501 mutex_unlock(&vgpu
->dmabuf_lock
);
503 update_fb_info(gfx_plane_info
, &fb_info
);
505 INIT_LIST_HEAD(&dmabuf_obj
->list
);
506 mutex_lock(&vgpu
->dmabuf_lock
);
507 list_add_tail(&dmabuf_obj
->list
, &vgpu
->dmabuf_obj_list_head
);
508 mutex_unlock(&vgpu
->dmabuf_lock
);
510 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu
->id
,
511 __func__
, kref_read(&dmabuf_obj
->kref
), ret
);
516 kfree(dmabuf_obj
->info
);
520 /* ENODEV means plane isn't ready, which might be a normal case. */
521 return (ret
== -ENODEV
) ? 0 : ret
;
524 /* To associate an exposed dmabuf with the dmabuf_obj */
525 int intel_vgpu_get_dmabuf(struct intel_vgpu
*vgpu
, unsigned int dmabuf_id
)
527 struct drm_device
*dev
= &vgpu
->gvt
->gt
->i915
->drm
;
528 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
529 struct drm_i915_gem_object
*obj
;
530 struct dma_buf
*dmabuf
;
534 mutex_lock(&vgpu
->dmabuf_lock
);
536 dmabuf_obj
= pick_dmabuf_by_num(vgpu
, dmabuf_id
);
537 if (dmabuf_obj
== NULL
) {
538 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id
);
543 obj
= vgpu_create_gem(dev
, dmabuf_obj
->info
);
545 gvt_vgpu_err("create gvt gem obj failed\n");
550 obj
->gvt_info
= dmabuf_obj
->info
;
552 dmabuf
= i915_gem_prime_export(&obj
->base
, DRM_CLOEXEC
| DRM_RDWR
);
553 if (IS_ERR(dmabuf
)) {
554 gvt_vgpu_err("export dma-buf failed\n");
555 ret
= PTR_ERR(dmabuf
);
559 ret
= dma_buf_fd(dmabuf
, DRM_CLOEXEC
| DRM_RDWR
);
561 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret
);
562 goto out_free_dmabuf
;
566 dmabuf_obj_get(dmabuf_obj
);
568 if (dmabuf_obj
->initref
) {
569 dmabuf_obj
->initref
= false;
570 dmabuf_obj_put(dmabuf_obj
);
573 mutex_unlock(&vgpu
->dmabuf_lock
);
575 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
576 " file count: %ld, GEM ref: %d\n",
577 vgpu
->id
, dmabuf_obj
->dmabuf_id
,
578 kref_read(&dmabuf_obj
->kref
),
580 file_count(dmabuf
->file
),
581 kref_read(&obj
->base
.refcount
));
583 i915_gem_object_put(obj
);
590 i915_gem_object_put(obj
);
592 mutex_unlock(&vgpu
->dmabuf_lock
);
596 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu
*vgpu
)
598 struct list_head
*pos
, *n
;
599 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
601 mutex_lock(&vgpu
->dmabuf_lock
);
602 list_for_each_safe(pos
, n
, &vgpu
->dmabuf_obj_list_head
) {
603 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
605 dmabuf_obj
->vgpu
= NULL
;
607 idr_remove(&vgpu
->object_idr
, dmabuf_obj
->dmabuf_id
);
608 intel_gvt_hypervisor_put_vfio_device(vgpu
);
611 /* dmabuf_obj might be freed in dmabuf_obj_put */
612 if (dmabuf_obj
->initref
) {
613 dmabuf_obj
->initref
= false;
614 dmabuf_obj_put(dmabuf_obj
);
618 mutex_unlock(&vgpu
->dmabuf_lock
);