1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
13 #if defined(CONFIG_X86)
14 #include <drm/drm_cache.h>
16 #include <linux/errno.h>
19 #include <asm/xen/hypervisor.h>
20 #include <xen/balloon.h>
22 #include <xen/xenbus.h>
23 #include <xen/interface/io/ring.h>
24 #include <xen/interface/io/displif.h>
26 #include "xen_drm_front.h"
27 #include "xen_drm_front_shbuf.h"
29 struct xen_drm_front_shbuf_ops
{
31 * Calculate number of grefs required to handle this buffer,
32 * e.g. if grefs are required for page directory only or the buffer
35 void (*calc_num_grefs
)(struct xen_drm_front_shbuf
*buf
);
36 /* Fill page directory according to para-virtual display protocol. */
37 void (*fill_page_dir
)(struct xen_drm_front_shbuf
*buf
);
38 /* Claim grant references for the pages of the buffer. */
39 int (*grant_refs_for_buffer
)(struct xen_drm_front_shbuf
*buf
,
40 grant_ref_t
*priv_gref_head
, int gref_idx
);
41 /* Map grant references of the buffer. */
42 int (*map
)(struct xen_drm_front_shbuf
*buf
);
43 /* Unmap grant references of the buffer. */
44 int (*unmap
)(struct xen_drm_front_shbuf
*buf
);
47 grant_ref_t
xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf
*buf
)
50 return GRANT_INVALID_REF
;
55 int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf
*buf
)
58 return buf
->ops
->map(buf
);
60 /* no need to map own grant references */
64 int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf
*buf
)
67 return buf
->ops
->unmap(buf
);
69 /* no need to unmap own grant references */
73 void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf
*buf
)
75 #if defined(CONFIG_X86)
76 drm_clflush_pages(buf
->pages
, buf
->num_pages
);
80 void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf
*buf
)
85 for (i
= 0; i
< buf
->num_grefs
; i
++)
86 if (buf
->grefs
[i
] != GRANT_INVALID_REF
)
87 gnttab_end_foreign_access(buf
->grefs
[i
],
91 kfree(buf
->directory
);
96 * number of grefs a page can hold with respect to the
97 * struct xendispl_page_directory header
99 #define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
100 offsetof(struct xendispl_page_directory, gref)) / \
103 static int get_num_pages_dir(struct xen_drm_front_shbuf
*buf
)
105 /* number of pages the page directory consumes itself */
106 return DIV_ROUND_UP(buf
->num_pages
, XEN_DRM_NUM_GREFS_PER_PAGE
);
109 static void backend_calc_num_grefs(struct xen_drm_front_shbuf
*buf
)
111 /* only for pages the page directory consumes itself */
112 buf
->num_grefs
= get_num_pages_dir(buf
);
115 static void guest_calc_num_grefs(struct xen_drm_front_shbuf
*buf
)
118 * number of pages the page directory consumes itself
119 * plus grefs for the buffer pages
121 buf
->num_grefs
= get_num_pages_dir(buf
) + buf
->num_pages
;
124 #define xen_page_to_vaddr(page) \
125 ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
127 static int backend_unmap(struct xen_drm_front_shbuf
*buf
)
129 struct gnttab_unmap_grant_ref
*unmap_ops
;
132 if (!buf
->pages
|| !buf
->backend_map_handles
|| !buf
->grefs
)
135 unmap_ops
= kcalloc(buf
->num_pages
, sizeof(*unmap_ops
),
138 DRM_ERROR("Failed to get memory while unmapping\n");
142 for (i
= 0; i
< buf
->num_pages
; i
++) {
145 addr
= xen_page_to_vaddr(buf
->pages
[i
]);
146 gnttab_set_unmap_op(&unmap_ops
[i
], addr
, GNTMAP_host_map
,
147 buf
->backend_map_handles
[i
]);
150 ret
= gnttab_unmap_refs(unmap_ops
, NULL
, buf
->pages
,
153 for (i
= 0; i
< buf
->num_pages
; i
++) {
154 if (unlikely(unmap_ops
[i
].status
!= GNTST_okay
))
155 DRM_ERROR("Failed to unmap page %d: %d\n",
156 i
, unmap_ops
[i
].status
);
160 DRM_ERROR("Failed to unmap grant references, ret %d", ret
);
163 kfree(buf
->backend_map_handles
);
164 buf
->backend_map_handles
= NULL
;
168 static int backend_map(struct xen_drm_front_shbuf
*buf
)
170 struct gnttab_map_grant_ref
*map_ops
= NULL
;
172 int ret
, cur_gref
, cur_dir_page
, cur_page
, grefs_left
;
174 map_ops
= kcalloc(buf
->num_pages
, sizeof(*map_ops
), GFP_KERNEL
);
178 buf
->backend_map_handles
= kcalloc(buf
->num_pages
,
179 sizeof(*buf
->backend_map_handles
),
181 if (!buf
->backend_map_handles
) {
187 * read page directory to get grefs from the backend: for external
188 * buffer we only allocate buf->grefs for the page directory,
189 * so buf->num_grefs has number of pages in the page directory itself
191 ptr
= buf
->directory
;
192 grefs_left
= buf
->num_pages
;
194 for (cur_dir_page
= 0; cur_dir_page
< buf
->num_grefs
; cur_dir_page
++) {
195 struct xendispl_page_directory
*page_dir
=
196 (struct xendispl_page_directory
*)ptr
;
197 int to_copy
= XEN_DRM_NUM_GREFS_PER_PAGE
;
199 if (to_copy
> grefs_left
)
200 to_copy
= grefs_left
;
202 for (cur_gref
= 0; cur_gref
< to_copy
; cur_gref
++) {
205 addr
= xen_page_to_vaddr(buf
->pages
[cur_page
]);
206 gnttab_set_map_op(&map_ops
[cur_page
], addr
,
208 page_dir
->gref
[cur_gref
],
209 buf
->xb_dev
->otherend_id
);
213 grefs_left
-= to_copy
;
216 ret
= gnttab_map_refs(map_ops
, NULL
, buf
->pages
, buf
->num_pages
);
218 /* save handles even if error, so we can unmap */
219 for (cur_page
= 0; cur_page
< buf
->num_pages
; cur_page
++) {
220 buf
->backend_map_handles
[cur_page
] = map_ops
[cur_page
].handle
;
221 if (unlikely(map_ops
[cur_page
].status
!= GNTST_okay
))
222 DRM_ERROR("Failed to map page %d: %d\n",
223 cur_page
, map_ops
[cur_page
].status
);
227 DRM_ERROR("Failed to map grant references, ret %d", ret
);
235 static void backend_fill_page_dir(struct xen_drm_front_shbuf
*buf
)
237 struct xendispl_page_directory
*page_dir
;
239 int i
, num_pages_dir
;
241 ptr
= buf
->directory
;
242 num_pages_dir
= get_num_pages_dir(buf
);
244 /* fill only grefs for the page directory itself */
245 for (i
= 0; i
< num_pages_dir
- 1; i
++) {
246 page_dir
= (struct xendispl_page_directory
*)ptr
;
248 page_dir
->gref_dir_next_page
= buf
->grefs
[i
+ 1];
251 /* last page must say there is no more pages */
252 page_dir
= (struct xendispl_page_directory
*)ptr
;
253 page_dir
->gref_dir_next_page
= GRANT_INVALID_REF
;
256 static void guest_fill_page_dir(struct xen_drm_front_shbuf
*buf
)
259 int cur_gref
, grefs_left
, to_copy
, i
, num_pages_dir
;
261 ptr
= buf
->directory
;
262 num_pages_dir
= get_num_pages_dir(buf
);
265 * while copying, skip grefs at start, they are for pages
266 * granted for the page directory itself
268 cur_gref
= num_pages_dir
;
269 grefs_left
= buf
->num_pages
;
270 for (i
= 0; i
< num_pages_dir
; i
++) {
271 struct xendispl_page_directory
*page_dir
=
272 (struct xendispl_page_directory
*)ptr
;
274 if (grefs_left
<= XEN_DRM_NUM_GREFS_PER_PAGE
) {
275 to_copy
= grefs_left
;
276 page_dir
->gref_dir_next_page
= GRANT_INVALID_REF
;
278 to_copy
= XEN_DRM_NUM_GREFS_PER_PAGE
;
279 page_dir
->gref_dir_next_page
= buf
->grefs
[i
+ 1];
281 memcpy(&page_dir
->gref
, &buf
->grefs
[cur_gref
],
282 to_copy
* sizeof(grant_ref_t
));
284 grefs_left
-= to_copy
;
289 static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf
*buf
,
290 grant_ref_t
*priv_gref_head
,
293 int i
, cur_ref
, otherend_id
;
295 otherend_id
= buf
->xb_dev
->otherend_id
;
296 for (i
= 0; i
< buf
->num_pages
; i
++) {
297 cur_ref
= gnttab_claim_grant_reference(priv_gref_head
);
301 gnttab_grant_foreign_access_ref(cur_ref
, otherend_id
,
302 xen_page_to_gfn(buf
->pages
[i
]),
304 buf
->grefs
[gref_idx
++] = cur_ref
;
309 static int grant_references(struct xen_drm_front_shbuf
*buf
)
311 grant_ref_t priv_gref_head
;
312 int ret
, i
, j
, cur_ref
;
313 int otherend_id
, num_pages_dir
;
315 ret
= gnttab_alloc_grant_references(buf
->num_grefs
, &priv_gref_head
);
317 DRM_ERROR("Cannot allocate grant references\n");
321 otherend_id
= buf
->xb_dev
->otherend_id
;
323 num_pages_dir
= get_num_pages_dir(buf
);
324 for (i
= 0; i
< num_pages_dir
; i
++) {
327 cur_ref
= gnttab_claim_grant_reference(&priv_gref_head
);
331 frame
= xen_page_to_gfn(virt_to_page(buf
->directory
+
333 gnttab_grant_foreign_access_ref(cur_ref
, otherend_id
, frame
, 0);
334 buf
->grefs
[j
++] = cur_ref
;
337 if (buf
->ops
->grant_refs_for_buffer
) {
338 ret
= buf
->ops
->grant_refs_for_buffer(buf
, &priv_gref_head
, j
);
343 gnttab_free_grant_references(priv_gref_head
);
347 static int alloc_storage(struct xen_drm_front_shbuf
*buf
)
349 buf
->grefs
= kcalloc(buf
->num_grefs
, sizeof(*buf
->grefs
), GFP_KERNEL
);
353 buf
->directory
= kcalloc(get_num_pages_dir(buf
), PAGE_SIZE
, GFP_KERNEL
);
361 * For be allocated buffers we don't need grant_refs_for_buffer as those
362 * grant references are allocated at backend side
364 static const struct xen_drm_front_shbuf_ops backend_ops
= {
365 .calc_num_grefs
= backend_calc_num_grefs
,
366 .fill_page_dir
= backend_fill_page_dir
,
368 .unmap
= backend_unmap
371 /* For locally granted references we do not need to map/unmap the references */
372 static const struct xen_drm_front_shbuf_ops local_ops
= {
373 .calc_num_grefs
= guest_calc_num_grefs
,
374 .fill_page_dir
= guest_fill_page_dir
,
375 .grant_refs_for_buffer
= guest_grant_refs_for_buffer
,
378 struct xen_drm_front_shbuf
*
379 xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg
*cfg
)
381 struct xen_drm_front_shbuf
*buf
;
384 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
386 return ERR_PTR(-ENOMEM
);
389 buf
->ops
= &backend_ops
;
391 buf
->ops
= &local_ops
;
393 buf
->xb_dev
= cfg
->xb_dev
;
394 buf
->num_pages
= DIV_ROUND_UP(cfg
->size
, PAGE_SIZE
);
395 buf
->pages
= cfg
->pages
;
397 buf
->ops
->calc_num_grefs(buf
);
399 ret
= alloc_storage(buf
);
403 ret
= grant_references(buf
);
407 buf
->ops
->fill_page_dir(buf
);
412 xen_drm_front_shbuf_free(buf
);