2 * Copyright 2017 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
31 #include <linux/dma-buf.h>
33 #include <linux/vfio.h>
38 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
40 static int vgpu_gem_get_pages(
41 struct drm_i915_gem_object
*obj
)
43 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
45 struct scatterlist
*sg
;
47 gen8_pte_t __iomem
*gtt_entries
;
48 struct intel_vgpu_fb_info
*fb_info
;
50 fb_info
= (struct intel_vgpu_fb_info
*)obj
->gvt_info
;
51 if (WARN_ON(!fb_info
))
54 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
58 ret
= sg_alloc_table(st
, fb_info
->size
, GFP_KERNEL
);
63 gtt_entries
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+
64 (fb_info
->start
>> PAGE_SHIFT
);
65 for_each_sg(st
->sgl
, sg
, fb_info
->size
, i
) {
67 sg
->length
= PAGE_SIZE
;
69 GEN8_DECODE_PTE(readq(>t_entries
[i
]));
70 sg_dma_len(sg
) = PAGE_SIZE
;
73 __i915_gem_object_set_pages(obj
, st
, PAGE_SIZE
);
78 static void vgpu_gem_put_pages(struct drm_i915_gem_object
*obj
,
79 struct sg_table
*pages
)
85 static void dmabuf_gem_object_free(struct kref
*kref
)
87 struct intel_vgpu_dmabuf_obj
*obj
=
88 container_of(kref
, struct intel_vgpu_dmabuf_obj
, kref
);
89 struct intel_vgpu
*vgpu
= obj
->vgpu
;
90 struct list_head
*pos
;
91 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
93 if (vgpu
&& vgpu
->active
&& !list_empty(&vgpu
->dmabuf_obj_list_head
)) {
94 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
95 dmabuf_obj
= container_of(pos
,
96 struct intel_vgpu_dmabuf_obj
, list
);
97 if (dmabuf_obj
== obj
) {
98 intel_gvt_hypervisor_put_vfio_device(vgpu
);
99 idr_remove(&vgpu
->object_idr
,
100 dmabuf_obj
->dmabuf_id
);
101 kfree(dmabuf_obj
->info
);
108 /* Free the orphan dmabuf_objs here */
115 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj
*obj
)
117 kref_get(&obj
->kref
);
120 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj
*obj
)
122 kref_put(&obj
->kref
, dmabuf_gem_object_free
);
125 static void vgpu_gem_release(struct drm_i915_gem_object
*gem_obj
)
128 struct intel_vgpu_fb_info
*fb_info
= gem_obj
->gvt_info
;
129 struct intel_vgpu_dmabuf_obj
*obj
= fb_info
->obj
;
130 struct intel_vgpu
*vgpu
= obj
->vgpu
;
133 mutex_lock(&vgpu
->dmabuf_lock
);
134 gem_obj
->base
.dma_buf
= NULL
;
136 mutex_unlock(&vgpu
->dmabuf_lock
);
138 /* vgpu is NULL, as it has been removed already */
139 gem_obj
->base
.dma_buf
= NULL
;
144 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops
= {
145 .flags
= I915_GEM_OBJECT_IS_PROXY
,
146 .get_pages
= vgpu_gem_get_pages
,
147 .put_pages
= vgpu_gem_put_pages
,
148 .release
= vgpu_gem_release
,
151 static struct drm_i915_gem_object
*vgpu_create_gem(struct drm_device
*dev
,
152 struct intel_vgpu_fb_info
*info
)
154 struct drm_i915_private
*dev_priv
= to_i915(dev
);
155 struct drm_i915_gem_object
*obj
;
157 obj
= i915_gem_object_alloc(dev_priv
);
161 drm_gem_private_object_init(dev
, &obj
->base
,
162 info
->size
<< PAGE_SHIFT
);
163 i915_gem_object_init(obj
, &intel_vgpu_gem_ops
);
165 obj
->base
.read_domains
= I915_GEM_DOMAIN_GTT
;
166 obj
->base
.write_domain
= 0;
167 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
168 unsigned int tiling_mode
= 0;
169 unsigned int stride
= 0;
171 switch (info
->drm_format_mod
<< 10) {
172 case PLANE_CTL_TILED_LINEAR
:
173 tiling_mode
= I915_TILING_NONE
;
175 case PLANE_CTL_TILED_X
:
176 tiling_mode
= I915_TILING_X
;
177 stride
= info
->stride
;
179 case PLANE_CTL_TILED_Y
:
180 tiling_mode
= I915_TILING_Y
;
181 stride
= info
->stride
;
184 gvt_dbg_core("not supported tiling mode\n");
186 obj
->tiling_and_stride
= tiling_mode
| stride
;
188 obj
->tiling_and_stride
= info
->drm_format_mod
?
195 static int vgpu_get_plane_info(struct drm_device
*dev
,
196 struct intel_vgpu
*vgpu
,
197 struct intel_vgpu_fb_info
*info
,
200 struct drm_i915_private
*dev_priv
= to_i915(dev
);
201 struct intel_vgpu_primary_plane_format p
;
202 struct intel_vgpu_cursor_plane_format c
;
205 if (plane_id
== DRM_PLANE_TYPE_PRIMARY
) {
206 ret
= intel_vgpu_decode_primary_plane(vgpu
, &p
);
209 info
->start
= p
.base
;
210 info
->start_gpa
= p
.base_gpa
;
211 info
->width
= p
.width
;
212 info
->height
= p
.height
;
213 info
->stride
= p
.stride
;
214 info
->drm_format
= p
.drm_format
;
215 info
->drm_format_mod
= p
.tiled
;
216 info
->size
= (((p
.stride
* p
.height
* p
.bpp
) / 8) +
217 (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
218 } else if (plane_id
== DRM_PLANE_TYPE_CURSOR
) {
219 ret
= intel_vgpu_decode_cursor_plane(vgpu
, &c
);
222 info
->start
= c
.base
;
223 info
->start_gpa
= c
.base_gpa
;
224 info
->width
= c
.width
;
225 info
->height
= c
.height
;
226 info
->stride
= c
.width
* (c
.bpp
/ 8);
227 info
->drm_format
= c
.drm_format
;
228 info
->drm_format_mod
= 0;
229 info
->x_pos
= c
.x_pos
;
230 info
->y_pos
= c
.y_pos
;
232 /* The invalid cursor hotspot value is delivered to host
233 * until we find a way to get the cursor hotspot info of
236 info
->x_hot
= UINT_MAX
;
237 info
->y_hot
= UINT_MAX
;
238 info
->size
= (((info
->stride
* c
.height
* c
.bpp
) / 8)
239 + (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
241 gvt_vgpu_err("invalid plane id:%d\n", plane_id
);
245 if (info
->size
== 0) {
246 gvt_vgpu_err("fb size is zero\n");
250 if (info
->start
& (PAGE_SIZE
- 1)) {
251 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info
->start
);
254 if (((info
->start
>> PAGE_SHIFT
) + info
->size
) >
255 ggtt_total_entries(&dev_priv
->ggtt
)) {
256 gvt_vgpu_err("Invalid GTT offset or size\n");
260 if (!intel_gvt_ggtt_validate_range(vgpu
, info
->start
, info
->size
)) {
261 gvt_vgpu_err("invalid gma addr\n");
268 static struct intel_vgpu_dmabuf_obj
*
269 pick_dmabuf_by_info(struct intel_vgpu
*vgpu
,
270 struct intel_vgpu_fb_info
*latest_info
)
272 struct list_head
*pos
;
273 struct intel_vgpu_fb_info
*fb_info
;
274 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
= NULL
;
275 struct intel_vgpu_dmabuf_obj
*ret
= NULL
;
277 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
278 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
280 if ((dmabuf_obj
== NULL
) ||
281 (dmabuf_obj
->info
== NULL
))
284 fb_info
= (struct intel_vgpu_fb_info
*)dmabuf_obj
->info
;
285 if ((fb_info
->start
== latest_info
->start
) &&
286 (fb_info
->start_gpa
== latest_info
->start_gpa
) &&
287 (fb_info
->size
== latest_info
->size
) &&
288 (fb_info
->drm_format_mod
== latest_info
->drm_format_mod
) &&
289 (fb_info
->drm_format
== latest_info
->drm_format
) &&
290 (fb_info
->width
== latest_info
->width
) &&
291 (fb_info
->height
== latest_info
->height
)) {
300 static struct intel_vgpu_dmabuf_obj
*
301 pick_dmabuf_by_num(struct intel_vgpu
*vgpu
, u32 id
)
303 struct list_head
*pos
;
304 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
= NULL
;
305 struct intel_vgpu_dmabuf_obj
*ret
= NULL
;
307 list_for_each(pos
, &vgpu
->dmabuf_obj_list_head
) {
308 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
313 if (dmabuf_obj
->dmabuf_id
== id
) {
322 static void update_fb_info(struct vfio_device_gfx_plane_info
*gvt_dmabuf
,
323 struct intel_vgpu_fb_info
*fb_info
)
325 gvt_dmabuf
->drm_format
= fb_info
->drm_format
;
326 gvt_dmabuf
->width
= fb_info
->width
;
327 gvt_dmabuf
->height
= fb_info
->height
;
328 gvt_dmabuf
->stride
= fb_info
->stride
;
329 gvt_dmabuf
->size
= fb_info
->size
;
330 gvt_dmabuf
->x_pos
= fb_info
->x_pos
;
331 gvt_dmabuf
->y_pos
= fb_info
->y_pos
;
332 gvt_dmabuf
->x_hot
= fb_info
->x_hot
;
333 gvt_dmabuf
->y_hot
= fb_info
->y_hot
;
336 int intel_vgpu_query_plane(struct intel_vgpu
*vgpu
, void *args
)
338 struct drm_device
*dev
= &vgpu
->gvt
->dev_priv
->drm
;
339 struct vfio_device_gfx_plane_info
*gfx_plane_info
= args
;
340 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
341 struct intel_vgpu_fb_info fb_info
;
344 if (gfx_plane_info
->flags
== (VFIO_GFX_PLANE_TYPE_DMABUF
|
345 VFIO_GFX_PLANE_TYPE_PROBE
))
347 else if ((gfx_plane_info
->flags
& ~VFIO_GFX_PLANE_TYPE_DMABUF
) ||
348 (!gfx_plane_info
->flags
))
351 ret
= vgpu_get_plane_info(dev
, vgpu
, &fb_info
,
352 gfx_plane_info
->drm_plane_type
);
356 mutex_lock(&vgpu
->dmabuf_lock
);
357 /* If exists, pick up the exposed dmabuf_obj */
358 dmabuf_obj
= pick_dmabuf_by_info(vgpu
, &fb_info
);
360 update_fb_info(gfx_plane_info
, &fb_info
);
361 gfx_plane_info
->dmabuf_id
= dmabuf_obj
->dmabuf_id
;
363 /* This buffer may be released between query_plane ioctl and
364 * get_dmabuf ioctl. Add the refcount to make sure it won't
365 * be released between the two ioctls.
367 if (!dmabuf_obj
->initref
) {
368 dmabuf_obj
->initref
= true;
369 dmabuf_obj_get(dmabuf_obj
);
372 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
373 vgpu
->id
, kref_read(&dmabuf_obj
->kref
),
374 gfx_plane_info
->dmabuf_id
);
375 mutex_unlock(&vgpu
->dmabuf_lock
);
379 mutex_unlock(&vgpu
->dmabuf_lock
);
381 /* Need to allocate a new one*/
382 dmabuf_obj
= kmalloc(sizeof(struct intel_vgpu_dmabuf_obj
), GFP_KERNEL
);
383 if (unlikely(!dmabuf_obj
)) {
384 gvt_vgpu_err("alloc dmabuf_obj failed\n");
389 dmabuf_obj
->info
= kmalloc(sizeof(struct intel_vgpu_fb_info
),
391 if (unlikely(!dmabuf_obj
->info
)) {
392 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
394 goto out_free_dmabuf
;
396 memcpy(dmabuf_obj
->info
, &fb_info
, sizeof(struct intel_vgpu_fb_info
));
398 ((struct intel_vgpu_fb_info
*)dmabuf_obj
->info
)->obj
= dmabuf_obj
;
400 dmabuf_obj
->vgpu
= vgpu
;
402 ret
= idr_alloc(&vgpu
->object_idr
, dmabuf_obj
, 1, 0, GFP_NOWAIT
);
405 gfx_plane_info
->dmabuf_id
= ret
;
406 dmabuf_obj
->dmabuf_id
= ret
;
408 dmabuf_obj
->initref
= true;
410 kref_init(&dmabuf_obj
->kref
);
412 mutex_lock(&vgpu
->dmabuf_lock
);
413 if (intel_gvt_hypervisor_get_vfio_device(vgpu
)) {
414 gvt_vgpu_err("get vfio device failed\n");
415 mutex_unlock(&vgpu
->dmabuf_lock
);
418 mutex_unlock(&vgpu
->dmabuf_lock
);
420 update_fb_info(gfx_plane_info
, &fb_info
);
422 INIT_LIST_HEAD(&dmabuf_obj
->list
);
423 mutex_lock(&vgpu
->dmabuf_lock
);
424 list_add_tail(&dmabuf_obj
->list
, &vgpu
->dmabuf_obj_list_head
);
425 mutex_unlock(&vgpu
->dmabuf_lock
);
427 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu
->id
,
428 __func__
, kref_read(&dmabuf_obj
->kref
), ret
);
433 kfree(dmabuf_obj
->info
);
437 /* ENODEV means plane isn't ready, which might be a normal case. */
438 return (ret
== -ENODEV
) ? 0 : ret
;
441 /* To associate an exposed dmabuf with the dmabuf_obj */
442 int intel_vgpu_get_dmabuf(struct intel_vgpu
*vgpu
, unsigned int dmabuf_id
)
444 struct drm_device
*dev
= &vgpu
->gvt
->dev_priv
->drm
;
445 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
446 struct drm_i915_gem_object
*obj
;
447 struct dma_buf
*dmabuf
;
451 mutex_lock(&vgpu
->dmabuf_lock
);
453 dmabuf_obj
= pick_dmabuf_by_num(vgpu
, dmabuf_id
);
454 if (dmabuf_obj
== NULL
) {
455 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id
);
460 obj
= vgpu_create_gem(dev
, dmabuf_obj
->info
);
462 gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu
->id
);
467 obj
->gvt_info
= dmabuf_obj
->info
;
469 dmabuf
= i915_gem_prime_export(dev
, &obj
->base
, DRM_CLOEXEC
| DRM_RDWR
);
470 if (IS_ERR(dmabuf
)) {
471 gvt_vgpu_err("export dma-buf failed\n");
472 ret
= PTR_ERR(dmabuf
);
476 i915_gem_object_put(obj
);
478 ret
= dma_buf_fd(dmabuf
, DRM_CLOEXEC
| DRM_RDWR
);
480 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret
);
481 goto out_free_dmabuf
;
485 dmabuf_obj_get(dmabuf_obj
);
487 if (dmabuf_obj
->initref
) {
488 dmabuf_obj
->initref
= false;
489 dmabuf_obj_put(dmabuf_obj
);
492 mutex_unlock(&vgpu
->dmabuf_lock
);
494 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
495 " file count: %ld, GEM ref: %d\n",
496 vgpu
->id
, dmabuf_obj
->dmabuf_id
,
497 kref_read(&dmabuf_obj
->kref
),
499 file_count(dmabuf
->file
),
500 kref_read(&obj
->base
.refcount
));
507 i915_gem_object_put(obj
);
509 mutex_unlock(&vgpu
->dmabuf_lock
);
513 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu
*vgpu
)
515 struct list_head
*pos
, *n
;
516 struct intel_vgpu_dmabuf_obj
*dmabuf_obj
;
518 mutex_lock(&vgpu
->dmabuf_lock
);
519 list_for_each_safe(pos
, n
, &vgpu
->dmabuf_obj_list_head
) {
520 dmabuf_obj
= container_of(pos
, struct intel_vgpu_dmabuf_obj
,
522 dmabuf_obj
->vgpu
= NULL
;
524 idr_remove(&vgpu
->object_idr
, dmabuf_obj
->dmabuf_id
);
525 intel_gvt_hypervisor_put_vfio_device(vgpu
);
528 /* dmabuf_obj might be freed in dmabuf_obj_put */
529 if (dmabuf_obj
->initref
) {
530 dmabuf_obj
->initref
= false;
531 dmabuf_obj_put(dmabuf_obj
);
535 mutex_unlock(&vgpu
->dmabuf_lock
);