2 * Copyright 2017 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
31 #include <linux/dma-buf.h>
32 #include <linux/vfio.h>
37 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
39 static int vgpu_pin_dma_address(struct intel_vgpu
*vgpu
,
45 if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu
, dma_addr
))
51 static void vgpu_unpin_dma_address(struct intel_vgpu
*vgpu
,
54 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu
, dma_addr
);
57 static int vgpu_gem_get_pages(
58 struct drm_i915_gem_object
*obj
)
60 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
61 struct intel_vgpu
*vgpu
;
63 struct scatterlist
*sg
;
65 gen8_pte_t __iomem
*gtt_entries
;
66 struct intel_vgpu_fb_info
*fb_info
;
69 fb_info
= (struct intel_vgpu_fb_info
*)obj
->gvt_info
;
70 if (WARN_ON(!fb_info
))
73 vgpu
= fb_info
->obj
->vgpu
;
77 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
81 page_num
= obj
->base
.size
>> PAGE_SHIFT
;
82 ret
= sg_alloc_table(st
, page_num
, GFP_KERNEL
);
87 gtt_entries
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+
88 (fb_info
->start
>> PAGE_SHIFT
);
89 for_each_sg(st
->sgl
, sg
, page_num
, i
) {
91 GEN8_DECODE_PTE(readq(>t_entries
[i
]));
92 if (vgpu_pin_dma_address(vgpu
, PAGE_SIZE
, dma_addr
)) {
98 sg
->length
= PAGE_SIZE
;
99 sg_dma_len(sg
) = PAGE_SIZE
;
100 sg_dma_address(sg
) = dma_addr
;
103 __i915_gem_object_set_pages(obj
, st
, PAGE_SIZE
);
108 for_each_sg(st
->sgl
, sg
, i
, j
) {
109 dma_addr
= sg_dma_address(sg
);
111 vgpu_unpin_dma_address(vgpu
, dma_addr
);
121 static void vgpu_gem_put_pages(struct drm_i915_gem_object
*obj
,
122 struct sg_table
*pages
)
124 struct scatterlist
*sg
;
126 if (obj
->base
.dma_buf
) {
127 struct intel_vgpu_fb_info
*fb_info
= obj
->gvt_info
;
128 struct intel_vgpu_dmabuf_obj
*obj
= fb_info
->obj
;
129 struct intel_vgpu
*vgpu
= obj
->vgpu
;
132 for_each_sg(pages
->sgl
, sg
, fb_info
->size
, i
)
133 vgpu_unpin_dma_address(vgpu
,
137 sg_free_table(pages
);
141 static void dmabuf_gem_object_free(struct kref
*kref
)
143 struct intel_vgpu_dmabuf_obj
*obj
=
144 container_of(kref
, struct intel_vgpu_dmabuf_obj
, kref
);
145 struct intel_vgpu
*vgpu
= obj
->vgpu
;
146 struct list_head
*pos
;
147 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
149 if (vgpu
&& vgpu
->active
&& !list_empty(&vgpu
->dmabuf_obj_list_head
)) {
150 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
151 dmabuf_obj
= container_of(pos
,
152 struct intel_vgpu_dmabuf_obj
, list
);
153 if (dmabuf_obj
== obj
) {
154 intel_gvt_hypervisor_put_vfio_device(vgpu
);
155 idr_remove(&vgpu
->object_idr
,
156 dmabuf_obj
->dmabuf_id
);
157 kfree(dmabuf_obj
->info
);
164 /* Free the orphan dmabuf_objs here */
171 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj
*obj
)
173 kref_get(&obj
->kref
);
176 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj
*obj
)
178 kref_put(&obj
->kref
, dmabuf_gem_object_free
);
181 static void vgpu_gem_release(struct drm_i915_gem_object
*gem_obj
)
184 struct intel_vgpu_fb_info
*fb_info
= gem_obj
->gvt_info
;
185 struct intel_vgpu_dmabuf_obj
*obj
= fb_info
->obj
;
186 struct intel_vgpu
*vgpu
= obj
->vgpu
;
189 mutex_lock(&vgpu
->dmabuf_lock
);
190 gem_obj
->base
.dma_buf
= NULL
;
192 mutex_unlock(&vgpu
->dmabuf_lock
);
194 /* vgpu is NULL, as it has been removed already */
195 gem_obj
->base
.dma_buf
= NULL
;
200 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops
= {
201 .flags
= I915_GEM_OBJECT_IS_PROXY
,
202 .get_pages
= vgpu_gem_get_pages
,
203 .put_pages
= vgpu_gem_put_pages
,
204 .release
= vgpu_gem_release
,
207 static struct drm_i915_gem_object
*vgpu_create_gem(struct drm_device
*dev
,
208 struct intel_vgpu_fb_info
*info
)
210 static struct lock_class_key lock_class
;
211 struct drm_i915_private
*dev_priv
= to_i915(dev
);
212 struct drm_i915_gem_object
*obj
;
214 obj
= i915_gem_object_alloc();
218 drm_gem_private_object_init(dev
, &obj
->base
,
219 roundup(info
->size
, PAGE_SIZE
));
220 i915_gem_object_init(obj
, &intel_vgpu_gem_ops
, &lock_class
);
221 i915_gem_object_set_readonly(obj
);
223 obj
->read_domains
= I915_GEM_DOMAIN_GTT
;
224 obj
->write_domain
= 0;
225 if (INTEL_GEN(dev_priv
) >= 9) {
226 unsigned int tiling_mode
= 0;
227 unsigned int stride
= 0;
229 switch (info
->drm_format_mod
) {
230 case DRM_FORMAT_MOD_LINEAR
:
231 tiling_mode
= I915_TILING_NONE
;
233 case I915_FORMAT_MOD_X_TILED
:
234 tiling_mode
= I915_TILING_X
;
235 stride
= info
->stride
;
237 case I915_FORMAT_MOD_Y_TILED
:
238 case I915_FORMAT_MOD_Yf_TILED
:
239 tiling_mode
= I915_TILING_Y
;
240 stride
= info
->stride
;
243 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
244 info
->drm_format_mod
);
246 obj
->tiling_and_stride
= tiling_mode
| stride
;
248 obj
->tiling_and_stride
= info
->drm_format_mod
?
255 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format
*c
)
257 if (c
&& c
->x_hot
<= c
->width
&& c
->y_hot
<= c
->height
)
263 static int vgpu_get_plane_info(struct drm_device
*dev
,
264 struct intel_vgpu
*vgpu
,
265 struct intel_vgpu_fb_info
*info
,
268 struct intel_vgpu_primary_plane_format p
;
269 struct intel_vgpu_cursor_plane_format c
;
270 int ret
, tile_height
= 1;
272 memset(info
, 0, sizeof(*info
));
274 if (plane_id
== DRM_PLANE_TYPE_PRIMARY
) {
275 ret
= intel_vgpu_decode_primary_plane(vgpu
, &p
);
278 info
->start
= p
.base
;
279 info
->start_gpa
= p
.base_gpa
;
280 info
->width
= p
.width
;
281 info
->height
= p
.height
;
282 info
->stride
= p
.stride
;
283 info
->drm_format
= p
.drm_format
;
286 case PLANE_CTL_TILED_LINEAR
:
287 info
->drm_format_mod
= DRM_FORMAT_MOD_LINEAR
;
289 case PLANE_CTL_TILED_X
:
290 info
->drm_format_mod
= I915_FORMAT_MOD_X_TILED
;
293 case PLANE_CTL_TILED_Y
:
294 info
->drm_format_mod
= I915_FORMAT_MOD_Y_TILED
;
297 case PLANE_CTL_TILED_YF
:
298 info
->drm_format_mod
= I915_FORMAT_MOD_Yf_TILED
;
302 gvt_vgpu_err("invalid tiling mode: %x\n", p
.tiled
);
304 } else if (plane_id
== DRM_PLANE_TYPE_CURSOR
) {
305 ret
= intel_vgpu_decode_cursor_plane(vgpu
, &c
);
308 info
->start
= c
.base
;
309 info
->start_gpa
= c
.base_gpa
;
310 info
->width
= c
.width
;
311 info
->height
= c
.height
;
312 info
->stride
= c
.width
* (c
.bpp
/ 8);
313 info
->drm_format
= c
.drm_format
;
314 info
->drm_format_mod
= 0;
315 info
->x_pos
= c
.x_pos
;
316 info
->y_pos
= c
.y_pos
;
318 if (validate_hotspot(&c
)) {
319 info
->x_hot
= c
.x_hot
;
320 info
->y_hot
= c
.y_hot
;
322 info
->x_hot
= UINT_MAX
;
323 info
->y_hot
= UINT_MAX
;
326 gvt_vgpu_err("invalid plane id:%d\n", plane_id
);
330 info
->size
= info
->stride
* roundup(info
->height
, tile_height
);
331 if (info
->size
== 0) {
332 gvt_vgpu_err("fb size is zero\n");
336 if (info
->start
& (PAGE_SIZE
- 1)) {
337 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info
->start
);
341 if (!intel_gvt_ggtt_validate_range(vgpu
, info
->start
, info
->size
)) {
342 gvt_vgpu_err("invalid gma addr\n");
349 static struct intel_vgpu_dmabuf_obj
*
350 pick_dmabuf_by_info(struct intel_vgpu
*vgpu
,
351 struct intel_vgpu_fb_info
*latest_info
)
353 struct list_head
*pos
;
354 struct intel_vgpu_fb_info
*fb_info
;
355 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
= NULL
;
356 struct intel_vgpu_dmabuf_obj
*ret
= NULL
;
358 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
359 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
361 if ((dmabuf_obj
== NULL
) ||
362 (dmabuf_obj
->info
== NULL
))
365 fb_info
= (struct intel_vgpu_fb_info
*)dmabuf_obj
->info
;
366 if ((fb_info
->start
== latest_info
->start
) &&
367 (fb_info
->start_gpa
== latest_info
->start_gpa
) &&
368 (fb_info
->size
== latest_info
->size
) &&
369 (fb_info
->drm_format_mod
== latest_info
->drm_format_mod
) &&
370 (fb_info
->drm_format
== latest_info
->drm_format
) &&
371 (fb_info
->width
== latest_info
->width
) &&
372 (fb_info
->height
== latest_info
->height
)) {
381 static struct intel_vgpu_dmabuf_obj
*
382 pick_dmabuf_by_num(struct intel_vgpu
*vgpu
, u32 id
)
384 struct list_head
*pos
;
385 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
= NULL
;
386 struct intel_vgpu_dmabuf_obj
*ret
= NULL
;
388 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
389 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
394 if (dmabuf_obj
->dmabuf_id
== id
) {
403 static void update_fb_info(struct vfio_device_gfx_plane_info
*gvt_dmabuf
,
404 struct intel_vgpu_fb_info
*fb_info
)
406 gvt_dmabuf
->drm_format
= fb_info
->drm_format
;
407 gvt_dmabuf
->drm_format_mod
= fb_info
->drm_format_mod
;
408 gvt_dmabuf
->width
= fb_info
->width
;
409 gvt_dmabuf
->height
= fb_info
->height
;
410 gvt_dmabuf
->stride
= fb_info
->stride
;
411 gvt_dmabuf
->size
= fb_info
->size
;
412 gvt_dmabuf
->x_pos
= fb_info
->x_pos
;
413 gvt_dmabuf
->y_pos
= fb_info
->y_pos
;
414 gvt_dmabuf
->x_hot
= fb_info
->x_hot
;
415 gvt_dmabuf
->y_hot
= fb_info
->y_hot
;
418 int intel_vgpu_query_plane(struct intel_vgpu
*vgpu
, void *args
)
420 struct drm_device
*dev
= &vgpu
->gvt
->dev_priv
->drm
;
421 struct vfio_device_gfx_plane_info
*gfx_plane_info
= args
;
422 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
423 struct intel_vgpu_fb_info fb_info
;
426 if (gfx_plane_info
->flags
== (VFIO_GFX_PLANE_TYPE_DMABUF
|
427 VFIO_GFX_PLANE_TYPE_PROBE
))
429 else if ((gfx_plane_info
->flags
& ~VFIO_GFX_PLANE_TYPE_DMABUF
) ||
430 (!gfx_plane_info
->flags
))
433 ret
= vgpu_get_plane_info(dev
, vgpu
, &fb_info
,
434 gfx_plane_info
->drm_plane_type
);
438 mutex_lock(&vgpu
->dmabuf_lock
);
439 /* If exists, pick up the exposed dmabuf_obj */
440 dmabuf_obj
= pick_dmabuf_by_info(vgpu
, &fb_info
);
442 update_fb_info(gfx_plane_info
, &fb_info
);
443 gfx_plane_info
->dmabuf_id
= dmabuf_obj
->dmabuf_id
;
445 /* This buffer may be released between query_plane ioctl and
446 * get_dmabuf ioctl. Add the refcount to make sure it won't
447 * be released between the two ioctls.
449 if (!dmabuf_obj
->initref
) {
450 dmabuf_obj
->initref
= true;
451 dmabuf_obj_get(dmabuf_obj
);
454 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
455 vgpu
->id
, kref_read(&dmabuf_obj
->kref
),
456 gfx_plane_info
->dmabuf_id
);
457 mutex_unlock(&vgpu
->dmabuf_lock
);
461 mutex_unlock(&vgpu
->dmabuf_lock
);
463 /* Need to allocate a new one*/
464 dmabuf_obj
= kmalloc(sizeof(struct intel_vgpu_dmabuf_obj
), GFP_KERNEL
);
465 if (unlikely(!dmabuf_obj
)) {
466 gvt_vgpu_err("alloc dmabuf_obj failed\n");
471 dmabuf_obj
->info
= kmalloc(sizeof(struct intel_vgpu_fb_info
),
473 if (unlikely(!dmabuf_obj
->info
)) {
474 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
476 goto out_free_dmabuf
;
478 memcpy(dmabuf_obj
->info
, &fb_info
, sizeof(struct intel_vgpu_fb_info
));
480 ((struct intel_vgpu_fb_info
*)dmabuf_obj
->info
)->obj
= dmabuf_obj
;
482 dmabuf_obj
->vgpu
= vgpu
;
484 ret
= idr_alloc(&vgpu
->object_idr
, dmabuf_obj
, 1, 0, GFP_NOWAIT
);
487 gfx_plane_info
->dmabuf_id
= ret
;
488 dmabuf_obj
->dmabuf_id
= ret
;
490 dmabuf_obj
->initref
= true;
492 kref_init(&dmabuf_obj
->kref
);
494 mutex_lock(&vgpu
->dmabuf_lock
);
495 if (intel_gvt_hypervisor_get_vfio_device(vgpu
)) {
496 gvt_vgpu_err("get vfio device failed\n");
497 mutex_unlock(&vgpu
->dmabuf_lock
);
500 mutex_unlock(&vgpu
->dmabuf_lock
);
502 update_fb_info(gfx_plane_info
, &fb_info
);
504 INIT_LIST_HEAD(&dmabuf_obj
->list
);
505 mutex_lock(&vgpu
->dmabuf_lock
);
506 list_add_tail(&dmabuf_obj
->list
, &vgpu
->dmabuf_obj_list_head
);
507 mutex_unlock(&vgpu
->dmabuf_lock
);
509 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu
->id
,
510 __func__
, kref_read(&dmabuf_obj
->kref
), ret
);
515 kfree(dmabuf_obj
->info
);
519 /* ENODEV means plane isn't ready, which might be a normal case. */
520 return (ret
== -ENODEV
) ? 0 : ret
;
523 /* To associate an exposed dmabuf with the dmabuf_obj */
524 int intel_vgpu_get_dmabuf(struct intel_vgpu
*vgpu
, unsigned int dmabuf_id
)
526 struct drm_device
*dev
= &vgpu
->gvt
->dev_priv
->drm
;
527 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
528 struct drm_i915_gem_object
*obj
;
529 struct dma_buf
*dmabuf
;
533 mutex_lock(&vgpu
->dmabuf_lock
);
535 dmabuf_obj
= pick_dmabuf_by_num(vgpu
, dmabuf_id
);
536 if (dmabuf_obj
== NULL
) {
537 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id
);
542 obj
= vgpu_create_gem(dev
, dmabuf_obj
->info
);
544 gvt_vgpu_err("create gvt gem obj failed\n");
549 obj
->gvt_info
= dmabuf_obj
->info
;
551 dmabuf
= i915_gem_prime_export(&obj
->base
, DRM_CLOEXEC
| DRM_RDWR
);
552 if (IS_ERR(dmabuf
)) {
553 gvt_vgpu_err("export dma-buf failed\n");
554 ret
= PTR_ERR(dmabuf
);
558 ret
= dma_buf_fd(dmabuf
, DRM_CLOEXEC
| DRM_RDWR
);
560 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret
);
561 goto out_free_dmabuf
;
565 dmabuf_obj_get(dmabuf_obj
);
567 if (dmabuf_obj
->initref
) {
568 dmabuf_obj
->initref
= false;
569 dmabuf_obj_put(dmabuf_obj
);
572 mutex_unlock(&vgpu
->dmabuf_lock
);
574 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
575 " file count: %ld, GEM ref: %d\n",
576 vgpu
->id
, dmabuf_obj
->dmabuf_id
,
577 kref_read(&dmabuf_obj
->kref
),
579 file_count(dmabuf
->file
),
580 kref_read(&obj
->base
.refcount
));
582 i915_gem_object_put(obj
);
589 i915_gem_object_put(obj
);
591 mutex_unlock(&vgpu
->dmabuf_lock
);
595 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu
*vgpu
)
597 struct list_head
*pos
, *n
;
598 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
600 mutex_lock(&vgpu
->dmabuf_lock
);
601 list_for_each_safe(pos
, n
, &vgpu
->dmabuf_obj_list_head
) {
602 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
604 dmabuf_obj
->vgpu
= NULL
;
606 idr_remove(&vgpu
->object_idr
, dmabuf_obj
->dmabuf_id
);
607 intel_gvt_hypervisor_put_vfio_device(vgpu
);
610 /* dmabuf_obj might be freed in dmabuf_obj_put */
611 if (dmabuf_obj
->initref
) {
612 dmabuf_obj
->initref
= false;
613 dmabuf_obj_put(dmabuf_obj
);
617 mutex_unlock(&vgpu
->dmabuf_lock
);