1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
12 #include <drm/drm_atomic_helper.h>
13 #include <drm/drm_crtc_helper.h>
14 #include <drm/drm_gem.h>
16 #include <linux/of_device.h>
18 #include <xen/platform_pci.h>
20 #include <xen/xenbus.h>
22 #include <xen/interface/io/displif.h>
24 #include "xen_drm_front.h"
25 #include "xen_drm_front_cfg.h"
26 #include "xen_drm_front_evtchnl.h"
27 #include "xen_drm_front_gem.h"
28 #include "xen_drm_front_kms.h"
29 #include "xen_drm_front_shbuf.h"
31 struct xen_drm_front_dbuf
{
32 struct list_head list
;
35 struct xen_drm_front_shbuf
*shbuf
;
38 static int dbuf_add_to_list(struct xen_drm_front_info
*front_info
,
39 struct xen_drm_front_shbuf
*shbuf
, u64 dbuf_cookie
)
41 struct xen_drm_front_dbuf
*dbuf
;
43 dbuf
= kzalloc(sizeof(*dbuf
), GFP_KERNEL
);
47 dbuf
->dbuf_cookie
= dbuf_cookie
;
49 list_add(&dbuf
->list
, &front_info
->dbuf_list
);
53 static struct xen_drm_front_dbuf
*dbuf_get(struct list_head
*dbuf_list
,
56 struct xen_drm_front_dbuf
*buf
, *q
;
58 list_for_each_entry_safe(buf
, q
, dbuf_list
, list
)
59 if (buf
->dbuf_cookie
== dbuf_cookie
)
65 static void dbuf_flush_fb(struct list_head
*dbuf_list
, u64 fb_cookie
)
67 struct xen_drm_front_dbuf
*buf
, *q
;
69 list_for_each_entry_safe(buf
, q
, dbuf_list
, list
)
70 if (buf
->fb_cookie
== fb_cookie
)
71 xen_drm_front_shbuf_flush(buf
->shbuf
);
74 static void dbuf_free(struct list_head
*dbuf_list
, u64 dbuf_cookie
)
76 struct xen_drm_front_dbuf
*buf
, *q
;
78 list_for_each_entry_safe(buf
, q
, dbuf_list
, list
)
79 if (buf
->dbuf_cookie
== dbuf_cookie
) {
81 xen_drm_front_shbuf_unmap(buf
->shbuf
);
82 xen_drm_front_shbuf_free(buf
->shbuf
);
88 static void dbuf_free_all(struct list_head
*dbuf_list
)
90 struct xen_drm_front_dbuf
*buf
, *q
;
92 list_for_each_entry_safe(buf
, q
, dbuf_list
, list
) {
94 xen_drm_front_shbuf_unmap(buf
->shbuf
);
95 xen_drm_front_shbuf_free(buf
->shbuf
);
100 static struct xendispl_req
*
101 be_prepare_req(struct xen_drm_front_evtchnl
*evtchnl
, u8 operation
)
103 struct xendispl_req
*req
;
105 req
= RING_GET_REQUEST(&evtchnl
->u
.req
.ring
,
106 evtchnl
->u
.req
.ring
.req_prod_pvt
);
107 req
->operation
= operation
;
108 req
->id
= evtchnl
->evt_next_id
++;
109 evtchnl
->evt_id
= req
->id
;
113 static int be_stream_do_io(struct xen_drm_front_evtchnl
*evtchnl
,
114 struct xendispl_req
*req
)
116 reinit_completion(&evtchnl
->u
.req
.completion
);
117 if (unlikely(evtchnl
->state
!= EVTCHNL_STATE_CONNECTED
))
120 xen_drm_front_evtchnl_flush(evtchnl
);
124 static int be_stream_wait_io(struct xen_drm_front_evtchnl
*evtchnl
)
126 if (wait_for_completion_timeout(&evtchnl
->u
.req
.completion
,
127 msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS
)) <= 0)
130 return evtchnl
->u
.req
.resp_status
;
133 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline
*pipeline
,
134 u32 x
, u32 y
, u32 width
, u32 height
,
135 u32 bpp
, u64 fb_cookie
)
137 struct xen_drm_front_evtchnl
*evtchnl
;
138 struct xen_drm_front_info
*front_info
;
139 struct xendispl_req
*req
;
143 front_info
= pipeline
->drm_info
->front_info
;
144 evtchnl
= &front_info
->evt_pairs
[pipeline
->index
].req
;
145 if (unlikely(!evtchnl
))
148 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
150 spin_lock_irqsave(&front_info
->io_lock
, flags
);
151 req
= be_prepare_req(evtchnl
, XENDISPL_OP_SET_CONFIG
);
152 req
->op
.set_config
.x
= x
;
153 req
->op
.set_config
.y
= y
;
154 req
->op
.set_config
.width
= width
;
155 req
->op
.set_config
.height
= height
;
156 req
->op
.set_config
.bpp
= bpp
;
157 req
->op
.set_config
.fb_cookie
= fb_cookie
;
159 ret
= be_stream_do_io(evtchnl
, req
);
160 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
163 ret
= be_stream_wait_io(evtchnl
);
165 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
169 int xen_drm_front_dbuf_create(struct xen_drm_front_info
*front_info
,
170 u64 dbuf_cookie
, u32 width
, u32 height
,
171 u32 bpp
, u64 size
, struct page
**pages
)
173 struct xen_drm_front_evtchnl
*evtchnl
;
174 struct xen_drm_front_shbuf
*shbuf
;
175 struct xendispl_req
*req
;
176 struct xen_drm_front_shbuf_cfg buf_cfg
;
180 evtchnl
= &front_info
->evt_pairs
[GENERIC_OP_EVT_CHNL
].req
;
181 if (unlikely(!evtchnl
))
184 memset(&buf_cfg
, 0, sizeof(buf_cfg
));
185 buf_cfg
.xb_dev
= front_info
->xb_dev
;
186 buf_cfg
.pages
= pages
;
188 buf_cfg
.be_alloc
= front_info
->cfg
.be_alloc
;
190 shbuf
= xen_drm_front_shbuf_alloc(&buf_cfg
);
192 return PTR_ERR(shbuf
);
194 ret
= dbuf_add_to_list(front_info
, shbuf
, dbuf_cookie
);
196 xen_drm_front_shbuf_free(shbuf
);
200 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
202 spin_lock_irqsave(&front_info
->io_lock
, flags
);
203 req
= be_prepare_req(evtchnl
, XENDISPL_OP_DBUF_CREATE
);
204 req
->op
.dbuf_create
.gref_directory
=
205 xen_drm_front_shbuf_get_dir_start(shbuf
);
206 req
->op
.dbuf_create
.buffer_sz
= size
;
207 req
->op
.dbuf_create
.dbuf_cookie
= dbuf_cookie
;
208 req
->op
.dbuf_create
.width
= width
;
209 req
->op
.dbuf_create
.height
= height
;
210 req
->op
.dbuf_create
.bpp
= bpp
;
211 if (buf_cfg
.be_alloc
)
212 req
->op
.dbuf_create
.flags
|= XENDISPL_DBUF_FLG_REQ_ALLOC
;
214 ret
= be_stream_do_io(evtchnl
, req
);
215 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
220 ret
= be_stream_wait_io(evtchnl
);
224 ret
= xen_drm_front_shbuf_map(shbuf
);
228 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
232 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
233 dbuf_free(&front_info
->dbuf_list
, dbuf_cookie
);
237 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info
*front_info
,
240 struct xen_drm_front_evtchnl
*evtchnl
;
241 struct xendispl_req
*req
;
246 evtchnl
= &front_info
->evt_pairs
[GENERIC_OP_EVT_CHNL
].req
;
247 if (unlikely(!evtchnl
))
250 be_alloc
= front_info
->cfg
.be_alloc
;
253 * For the backend allocated buffer release references now, so backend
254 * can free the buffer.
257 dbuf_free(&front_info
->dbuf_list
, dbuf_cookie
);
259 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
261 spin_lock_irqsave(&front_info
->io_lock
, flags
);
262 req
= be_prepare_req(evtchnl
, XENDISPL_OP_DBUF_DESTROY
);
263 req
->op
.dbuf_destroy
.dbuf_cookie
= dbuf_cookie
;
265 ret
= be_stream_do_io(evtchnl
, req
);
266 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
269 ret
= be_stream_wait_io(evtchnl
);
272 * Do this regardless of communication status with the backend:
273 * if we cannot remove remote resources remove what we can locally.
276 dbuf_free(&front_info
->dbuf_list
, dbuf_cookie
);
278 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
282 int xen_drm_front_fb_attach(struct xen_drm_front_info
*front_info
,
283 u64 dbuf_cookie
, u64 fb_cookie
, u32 width
,
284 u32 height
, u32 pixel_format
)
286 struct xen_drm_front_evtchnl
*evtchnl
;
287 struct xen_drm_front_dbuf
*buf
;
288 struct xendispl_req
*req
;
292 evtchnl
= &front_info
->evt_pairs
[GENERIC_OP_EVT_CHNL
].req
;
293 if (unlikely(!evtchnl
))
296 buf
= dbuf_get(&front_info
->dbuf_list
, dbuf_cookie
);
300 buf
->fb_cookie
= fb_cookie
;
302 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
304 spin_lock_irqsave(&front_info
->io_lock
, flags
);
305 req
= be_prepare_req(evtchnl
, XENDISPL_OP_FB_ATTACH
);
306 req
->op
.fb_attach
.dbuf_cookie
= dbuf_cookie
;
307 req
->op
.fb_attach
.fb_cookie
= fb_cookie
;
308 req
->op
.fb_attach
.width
= width
;
309 req
->op
.fb_attach
.height
= height
;
310 req
->op
.fb_attach
.pixel_format
= pixel_format
;
312 ret
= be_stream_do_io(evtchnl
, req
);
313 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
316 ret
= be_stream_wait_io(evtchnl
);
318 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
322 int xen_drm_front_fb_detach(struct xen_drm_front_info
*front_info
,
325 struct xen_drm_front_evtchnl
*evtchnl
;
326 struct xendispl_req
*req
;
330 evtchnl
= &front_info
->evt_pairs
[GENERIC_OP_EVT_CHNL
].req
;
331 if (unlikely(!evtchnl
))
334 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
336 spin_lock_irqsave(&front_info
->io_lock
, flags
);
337 req
= be_prepare_req(evtchnl
, XENDISPL_OP_FB_DETACH
);
338 req
->op
.fb_detach
.fb_cookie
= fb_cookie
;
340 ret
= be_stream_do_io(evtchnl
, req
);
341 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
344 ret
= be_stream_wait_io(evtchnl
);
346 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
350 int xen_drm_front_page_flip(struct xen_drm_front_info
*front_info
,
351 int conn_idx
, u64 fb_cookie
)
353 struct xen_drm_front_evtchnl
*evtchnl
;
354 struct xendispl_req
*req
;
358 if (unlikely(conn_idx
>= front_info
->num_evt_pairs
))
361 dbuf_flush_fb(&front_info
->dbuf_list
, fb_cookie
);
362 evtchnl
= &front_info
->evt_pairs
[conn_idx
].req
;
364 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
366 spin_lock_irqsave(&front_info
->io_lock
, flags
);
367 req
= be_prepare_req(evtchnl
, XENDISPL_OP_PG_FLIP
);
368 req
->op
.pg_flip
.fb_cookie
= fb_cookie
;
370 ret
= be_stream_do_io(evtchnl
, req
);
371 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
374 ret
= be_stream_wait_io(evtchnl
);
376 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
380 void xen_drm_front_on_frame_done(struct xen_drm_front_info
*front_info
,
381 int conn_idx
, u64 fb_cookie
)
383 struct xen_drm_front_drm_info
*drm_info
= front_info
->drm_info
;
385 if (unlikely(conn_idx
>= front_info
->cfg
.num_connectors
))
388 xen_drm_front_kms_on_frame_done(&drm_info
->pipeline
[conn_idx
],
392 static int xen_drm_drv_dumb_create(struct drm_file
*filp
,
393 struct drm_device
*dev
,
394 struct drm_mode_create_dumb
*args
)
396 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
397 struct drm_gem_object
*obj
;
401 * Dumb creation is a two stage process: first we create a fully
402 * constructed GEM object which is communicated to the backend, and
403 * only after that we can create GEM's handle. This is done so,
404 * because of the possible races: once you create a handle it becomes
405 * immediately visible to user-space, so the latter can try accessing
406 * object without pages etc.
407 * For details also see drm_gem_handle_create
409 args
->pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
410 args
->size
= args
->pitch
* args
->height
;
412 obj
= xen_drm_front_gem_create(dev
, args
->size
);
413 if (IS_ERR_OR_NULL(obj
)) {
418 ret
= xen_drm_front_dbuf_create(drm_info
->front_info
,
419 xen_drm_front_dbuf_to_cookie(obj
),
420 args
->width
, args
->height
, args
->bpp
,
422 xen_drm_front_gem_get_pages(obj
));
426 /* This is the tail of GEM object creation */
427 ret
= drm_gem_handle_create(filp
, obj
, &args
->handle
);
431 /* Drop reference from allocate - handle holds it now */
432 drm_gem_object_put_unlocked(obj
);
436 xen_drm_front_dbuf_destroy(drm_info
->front_info
,
437 xen_drm_front_dbuf_to_cookie(obj
));
439 /* drop reference from allocate */
440 drm_gem_object_put_unlocked(obj
);
442 DRM_ERROR("Failed to create dumb buffer: %d\n", ret
);
446 static void xen_drm_drv_free_object_unlocked(struct drm_gem_object
*obj
)
448 struct xen_drm_front_drm_info
*drm_info
= obj
->dev
->dev_private
;
451 if (drm_dev_enter(obj
->dev
, &idx
)) {
452 xen_drm_front_dbuf_destroy(drm_info
->front_info
,
453 xen_drm_front_dbuf_to_cookie(obj
));
456 dbuf_free(&drm_info
->front_info
->dbuf_list
,
457 xen_drm_front_dbuf_to_cookie(obj
));
460 xen_drm_front_gem_free_object_unlocked(obj
);
463 static void xen_drm_drv_release(struct drm_device
*dev
)
465 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
466 struct xen_drm_front_info
*front_info
= drm_info
->front_info
;
468 xen_drm_front_kms_fini(drm_info
);
470 drm_atomic_helper_shutdown(dev
);
471 drm_mode_config_cleanup(dev
);
476 if (front_info
->cfg
.be_alloc
)
477 xenbus_switch_state(front_info
->xb_dev
,
478 XenbusStateInitialising
);
483 static const struct file_operations xen_drm_dev_fops
= {
484 .owner
= THIS_MODULE
,
486 .release
= drm_release
,
487 .unlocked_ioctl
= drm_ioctl
,
489 .compat_ioctl
= drm_compat_ioctl
,
494 .mmap
= xen_drm_front_gem_mmap
,
497 static const struct vm_operations_struct xen_drm_drv_vm_ops
= {
498 .open
= drm_gem_vm_open
,
499 .close
= drm_gem_vm_close
,
502 static struct drm_driver xen_drm_driver
= {
503 .driver_features
= DRIVER_GEM
| DRIVER_MODESET
|
504 DRIVER_PRIME
| DRIVER_ATOMIC
,
505 .release
= xen_drm_drv_release
,
506 .gem_vm_ops
= &xen_drm_drv_vm_ops
,
507 .gem_free_object_unlocked
= xen_drm_drv_free_object_unlocked
,
508 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
509 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
510 .gem_prime_import
= drm_gem_prime_import
,
511 .gem_prime_export
= drm_gem_prime_export
,
512 .gem_prime_import_sg_table
= xen_drm_front_gem_import_sg_table
,
513 .gem_prime_get_sg_table
= xen_drm_front_gem_get_sg_table
,
514 .gem_prime_vmap
= xen_drm_front_gem_prime_vmap
,
515 .gem_prime_vunmap
= xen_drm_front_gem_prime_vunmap
,
516 .gem_prime_mmap
= xen_drm_front_gem_prime_mmap
,
517 .dumb_create
= xen_drm_drv_dumb_create
,
518 .fops
= &xen_drm_dev_fops
,
520 .desc
= "Xen PV DRM Display Unit",
527 static int xen_drm_drv_init(struct xen_drm_front_info
*front_info
)
529 struct device
*dev
= &front_info
->xb_dev
->dev
;
530 struct xen_drm_front_drm_info
*drm_info
;
531 struct drm_device
*drm_dev
;
534 DRM_INFO("Creating %s\n", xen_drm_driver
.desc
);
536 drm_info
= kzalloc(sizeof(*drm_info
), GFP_KERNEL
);
542 drm_info
->front_info
= front_info
;
543 front_info
->drm_info
= drm_info
;
545 drm_dev
= drm_dev_alloc(&xen_drm_driver
, dev
);
546 if (IS_ERR(drm_dev
)) {
547 ret
= PTR_ERR(drm_dev
);
551 drm_info
->drm_dev
= drm_dev
;
553 drm_dev
->dev_private
= drm_info
;
555 ret
= xen_drm_front_kms_init(drm_info
);
557 DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret
);
561 ret
= drm_dev_register(drm_dev
, 0);
565 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
566 xen_drm_driver
.name
, xen_drm_driver
.major
,
567 xen_drm_driver
.minor
, xen_drm_driver
.patchlevel
,
568 xen_drm_driver
.date
, drm_dev
->primary
->index
);
573 drm_dev_unregister(drm_dev
);
575 drm_kms_helper_poll_fini(drm_dev
);
576 drm_mode_config_cleanup(drm_dev
);
582 static void xen_drm_drv_fini(struct xen_drm_front_info
*front_info
)
584 struct xen_drm_front_drm_info
*drm_info
= front_info
->drm_info
;
585 struct drm_device
*dev
;
590 dev
= drm_info
->drm_dev
;
594 /* Nothing to do if device is already unplugged */
595 if (drm_dev_is_unplugged(dev
))
598 drm_kms_helper_poll_fini(dev
);
601 front_info
->drm_info
= NULL
;
603 xen_drm_front_evtchnl_free_all(front_info
);
604 dbuf_free_all(&front_info
->dbuf_list
);
607 * If we are not using backend allocated buffers, then tell the
608 * backend we are ready to (re)initialize. Otherwise, wait for
609 * drm_driver.release.
611 if (!front_info
->cfg
.be_alloc
)
612 xenbus_switch_state(front_info
->xb_dev
,
613 XenbusStateInitialising
);
616 static int displback_initwait(struct xen_drm_front_info
*front_info
)
618 struct xen_drm_front_cfg
*cfg
= &front_info
->cfg
;
621 cfg
->front_info
= front_info
;
622 ret
= xen_drm_front_cfg_card(front_info
, cfg
);
626 DRM_INFO("Have %d connector(s)\n", cfg
->num_connectors
);
627 /* Create event channels for all connectors and publish */
628 ret
= xen_drm_front_evtchnl_create_all(front_info
);
632 return xen_drm_front_evtchnl_publish_all(front_info
);
635 static int displback_connect(struct xen_drm_front_info
*front_info
)
637 xen_drm_front_evtchnl_set_state(front_info
, EVTCHNL_STATE_CONNECTED
);
638 return xen_drm_drv_init(front_info
);
641 static void displback_disconnect(struct xen_drm_front_info
*front_info
)
643 if (!front_info
->drm_info
)
646 /* Tell the backend to wait until we release the DRM driver. */
647 xenbus_switch_state(front_info
->xb_dev
, XenbusStateReconfiguring
);
649 xen_drm_drv_fini(front_info
);
652 static void displback_changed(struct xenbus_device
*xb_dev
,
653 enum xenbus_state backend_state
)
655 struct xen_drm_front_info
*front_info
= dev_get_drvdata(&xb_dev
->dev
);
658 DRM_DEBUG("Backend state is %s, front is %s\n",
659 xenbus_strstate(backend_state
),
660 xenbus_strstate(xb_dev
->state
));
662 switch (backend_state
) {
663 case XenbusStateReconfiguring
:
665 case XenbusStateReconfigured
:
667 case XenbusStateInitialised
:
670 case XenbusStateInitialising
:
671 if (xb_dev
->state
== XenbusStateReconfiguring
)
674 /* recovering after backend unexpected closure */
675 displback_disconnect(front_info
);
678 case XenbusStateInitWait
:
679 if (xb_dev
->state
== XenbusStateReconfiguring
)
682 /* recovering after backend unexpected closure */
683 displback_disconnect(front_info
);
684 if (xb_dev
->state
!= XenbusStateInitialising
)
687 ret
= displback_initwait(front_info
);
689 xenbus_dev_fatal(xb_dev
, ret
, "initializing frontend");
691 xenbus_switch_state(xb_dev
, XenbusStateInitialised
);
694 case XenbusStateConnected
:
695 if (xb_dev
->state
!= XenbusStateInitialised
)
698 ret
= displback_connect(front_info
);
700 displback_disconnect(front_info
);
701 xenbus_dev_fatal(xb_dev
, ret
, "connecting backend");
703 xenbus_switch_state(xb_dev
, XenbusStateConnected
);
707 case XenbusStateClosing
:
709 * in this state backend starts freeing resources,
710 * so let it go into closed state, so we can also
715 case XenbusStateUnknown
:
717 case XenbusStateClosed
:
718 if (xb_dev
->state
== XenbusStateClosed
)
721 displback_disconnect(front_info
);
726 static int xen_drv_probe(struct xenbus_device
*xb_dev
,
727 const struct xenbus_device_id
*id
)
729 struct xen_drm_front_info
*front_info
;
730 struct device
*dev
= &xb_dev
->dev
;
734 * The device is not spawn from a device tree, so arch_setup_dma_ops
735 * is not called, thus leaving the device with dummy DMA ops.
736 * This makes the device return error on PRIME buffer import, which
737 * is not correct: to fix this call of_dma_configure() with a NULL
738 * node to set default DMA ops.
740 dev
->coherent_dma_mask
= DMA_BIT_MASK(32);
741 ret
= of_dma_configure(dev
, NULL
, true);
743 DRM_ERROR("Cannot setup DMA ops, ret %d", ret
);
747 front_info
= devm_kzalloc(&xb_dev
->dev
,
748 sizeof(*front_info
), GFP_KERNEL
);
752 front_info
->xb_dev
= xb_dev
;
753 spin_lock_init(&front_info
->io_lock
);
754 INIT_LIST_HEAD(&front_info
->dbuf_list
);
755 dev_set_drvdata(&xb_dev
->dev
, front_info
);
757 return xenbus_switch_state(xb_dev
, XenbusStateInitialising
);
760 static int xen_drv_remove(struct xenbus_device
*dev
)
762 struct xen_drm_front_info
*front_info
= dev_get_drvdata(&dev
->dev
);
765 xenbus_switch_state(dev
, XenbusStateClosing
);
768 * On driver removal it is disconnected from XenBus,
769 * so no backend state change events come via .otherend_changed
770 * callback. This prevents us from exiting gracefully, e.g.
771 * signaling the backend to free event channels, waiting for its
772 * state to change to XenbusStateClosed and cleaning at our end.
773 * Normally when front driver removed backend will finally go into
774 * XenbusStateInitWait state.
776 * Workaround: read backend's state manually and wait with time-out.
778 while ((xenbus_read_unsigned(front_info
->xb_dev
->otherend
, "state",
779 XenbusStateUnknown
) != XenbusStateInitWait
) &&
786 state
= xenbus_read_unsigned(front_info
->xb_dev
->otherend
,
787 "state", XenbusStateUnknown
);
788 DRM_ERROR("Backend state is %s while removing driver\n",
789 xenbus_strstate(state
));
792 xen_drm_drv_fini(front_info
);
793 xenbus_frontend_closed(dev
);
797 static const struct xenbus_device_id xen_driver_ids
[] = {
798 { XENDISPL_DRIVER_NAME
},
802 static struct xenbus_driver xen_driver
= {
803 .ids
= xen_driver_ids
,
804 .probe
= xen_drv_probe
,
805 .remove
= xen_drv_remove
,
806 .otherend_changed
= displback_changed
,
809 static int __init
xen_drv_init(void)
811 /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
812 if (XEN_PAGE_SIZE
!= PAGE_SIZE
) {
813 DRM_ERROR(XENDISPL_DRIVER_NAME
": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
814 XEN_PAGE_SIZE
, PAGE_SIZE
);
821 if (!xen_has_pv_devices())
824 DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME
"\n");
825 return xenbus_register_frontend(&xen_driver
);
828 static void __exit
xen_drv_fini(void)
830 DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME
"\n");
831 xenbus_unregister_driver(&xen_driver
);
834 module_init(xen_drv_init
);
835 module_exit(xen_drv_fini
);
837 MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
838 MODULE_LICENSE("GPL");
839 MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME
);