1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
12 #include <drm/drm_atomic_helper.h>
13 #include <drm/drm_probe_helper.h>
14 #include <drm/drm_gem.h>
16 #include <linux/of_device.h>
18 #include <xen/platform_pci.h>
20 #include <xen/xenbus.h>
22 #include <xen/xen-front-pgdir-shbuf.h>
23 #include <xen/interface/io/displif.h>
25 #include "xen_drm_front.h"
26 #include "xen_drm_front_cfg.h"
27 #include "xen_drm_front_evtchnl.h"
28 #include "xen_drm_front_gem.h"
29 #include "xen_drm_front_kms.h"
31 struct xen_drm_front_dbuf
{
32 struct list_head list
;
36 struct xen_front_pgdir_shbuf shbuf
;
39 static void dbuf_add_to_list(struct xen_drm_front_info
*front_info
,
40 struct xen_drm_front_dbuf
*dbuf
, u64 dbuf_cookie
)
42 dbuf
->dbuf_cookie
= dbuf_cookie
;
43 list_add(&dbuf
->list
, &front_info
->dbuf_list
);
46 static struct xen_drm_front_dbuf
*dbuf_get(struct list_head
*dbuf_list
,
49 struct xen_drm_front_dbuf
*buf
, *q
;
51 list_for_each_entry_safe(buf
, q
, dbuf_list
, list
)
52 if (buf
->dbuf_cookie
== dbuf_cookie
)
58 static void dbuf_free(struct list_head
*dbuf_list
, u64 dbuf_cookie
)
60 struct xen_drm_front_dbuf
*buf
, *q
;
62 list_for_each_entry_safe(buf
, q
, dbuf_list
, list
)
63 if (buf
->dbuf_cookie
== dbuf_cookie
) {
65 xen_front_pgdir_shbuf_unmap(&buf
->shbuf
);
66 xen_front_pgdir_shbuf_free(&buf
->shbuf
);
72 static void dbuf_free_all(struct list_head
*dbuf_list
)
74 struct xen_drm_front_dbuf
*buf
, *q
;
76 list_for_each_entry_safe(buf
, q
, dbuf_list
, list
) {
78 xen_front_pgdir_shbuf_unmap(&buf
->shbuf
);
79 xen_front_pgdir_shbuf_free(&buf
->shbuf
);
84 static struct xendispl_req
*
85 be_prepare_req(struct xen_drm_front_evtchnl
*evtchnl
, u8 operation
)
87 struct xendispl_req
*req
;
89 req
= RING_GET_REQUEST(&evtchnl
->u
.req
.ring
,
90 evtchnl
->u
.req
.ring
.req_prod_pvt
);
91 req
->operation
= operation
;
92 req
->id
= evtchnl
->evt_next_id
++;
93 evtchnl
->evt_id
= req
->id
;
97 static int be_stream_do_io(struct xen_drm_front_evtchnl
*evtchnl
,
98 struct xendispl_req
*req
)
100 reinit_completion(&evtchnl
->u
.req
.completion
);
101 if (unlikely(evtchnl
->state
!= EVTCHNL_STATE_CONNECTED
))
104 xen_drm_front_evtchnl_flush(evtchnl
);
108 static int be_stream_wait_io(struct xen_drm_front_evtchnl
*evtchnl
)
110 if (wait_for_completion_timeout(&evtchnl
->u
.req
.completion
,
111 msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS
)) <= 0)
114 return evtchnl
->u
.req
.resp_status
;
117 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline
*pipeline
,
118 u32 x
, u32 y
, u32 width
, u32 height
,
119 u32 bpp
, u64 fb_cookie
)
121 struct xen_drm_front_evtchnl
*evtchnl
;
122 struct xen_drm_front_info
*front_info
;
123 struct xendispl_req
*req
;
127 front_info
= pipeline
->drm_info
->front_info
;
128 evtchnl
= &front_info
->evt_pairs
[pipeline
->index
].req
;
129 if (unlikely(!evtchnl
))
132 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
134 spin_lock_irqsave(&front_info
->io_lock
, flags
);
135 req
= be_prepare_req(evtchnl
, XENDISPL_OP_SET_CONFIG
);
136 req
->op
.set_config
.x
= x
;
137 req
->op
.set_config
.y
= y
;
138 req
->op
.set_config
.width
= width
;
139 req
->op
.set_config
.height
= height
;
140 req
->op
.set_config
.bpp
= bpp
;
141 req
->op
.set_config
.fb_cookie
= fb_cookie
;
143 ret
= be_stream_do_io(evtchnl
, req
);
144 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
147 ret
= be_stream_wait_io(evtchnl
);
149 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
153 int xen_drm_front_dbuf_create(struct xen_drm_front_info
*front_info
,
154 u64 dbuf_cookie
, u32 width
, u32 height
,
155 u32 bpp
, u64 size
, struct page
**pages
)
157 struct xen_drm_front_evtchnl
*evtchnl
;
158 struct xen_drm_front_dbuf
*dbuf
;
159 struct xendispl_req
*req
;
160 struct xen_front_pgdir_shbuf_cfg buf_cfg
;
164 evtchnl
= &front_info
->evt_pairs
[GENERIC_OP_EVT_CHNL
].req
;
165 if (unlikely(!evtchnl
))
168 dbuf
= kzalloc(sizeof(*dbuf
), GFP_KERNEL
);
172 dbuf_add_to_list(front_info
, dbuf
, dbuf_cookie
);
174 memset(&buf_cfg
, 0, sizeof(buf_cfg
));
175 buf_cfg
.xb_dev
= front_info
->xb_dev
;
176 buf_cfg
.num_pages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
177 buf_cfg
.pages
= pages
;
178 buf_cfg
.pgdir
= &dbuf
->shbuf
;
179 buf_cfg
.be_alloc
= front_info
->cfg
.be_alloc
;
181 ret
= xen_front_pgdir_shbuf_alloc(&buf_cfg
);
183 goto fail_shbuf_alloc
;
185 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
187 spin_lock_irqsave(&front_info
->io_lock
, flags
);
188 req
= be_prepare_req(evtchnl
, XENDISPL_OP_DBUF_CREATE
);
189 req
->op
.dbuf_create
.gref_directory
=
190 xen_front_pgdir_shbuf_get_dir_start(&dbuf
->shbuf
);
191 req
->op
.dbuf_create
.buffer_sz
= size
;
192 req
->op
.dbuf_create
.dbuf_cookie
= dbuf_cookie
;
193 req
->op
.dbuf_create
.width
= width
;
194 req
->op
.dbuf_create
.height
= height
;
195 req
->op
.dbuf_create
.bpp
= bpp
;
196 if (buf_cfg
.be_alloc
)
197 req
->op
.dbuf_create
.flags
|= XENDISPL_DBUF_FLG_REQ_ALLOC
;
199 ret
= be_stream_do_io(evtchnl
, req
);
200 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
205 ret
= be_stream_wait_io(evtchnl
);
209 ret
= xen_front_pgdir_shbuf_map(&dbuf
->shbuf
);
213 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
217 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
219 dbuf_free(&front_info
->dbuf_list
, dbuf_cookie
);
223 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info
*front_info
,
226 struct xen_drm_front_evtchnl
*evtchnl
;
227 struct xendispl_req
*req
;
232 evtchnl
= &front_info
->evt_pairs
[GENERIC_OP_EVT_CHNL
].req
;
233 if (unlikely(!evtchnl
))
236 be_alloc
= front_info
->cfg
.be_alloc
;
239 * For the backend allocated buffer release references now, so backend
240 * can free the buffer.
243 dbuf_free(&front_info
->dbuf_list
, dbuf_cookie
);
245 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
247 spin_lock_irqsave(&front_info
->io_lock
, flags
);
248 req
= be_prepare_req(evtchnl
, XENDISPL_OP_DBUF_DESTROY
);
249 req
->op
.dbuf_destroy
.dbuf_cookie
= dbuf_cookie
;
251 ret
= be_stream_do_io(evtchnl
, req
);
252 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
255 ret
= be_stream_wait_io(evtchnl
);
258 * Do this regardless of communication status with the backend:
259 * if we cannot remove remote resources remove what we can locally.
262 dbuf_free(&front_info
->dbuf_list
, dbuf_cookie
);
264 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
268 int xen_drm_front_fb_attach(struct xen_drm_front_info
*front_info
,
269 u64 dbuf_cookie
, u64 fb_cookie
, u32 width
,
270 u32 height
, u32 pixel_format
)
272 struct xen_drm_front_evtchnl
*evtchnl
;
273 struct xen_drm_front_dbuf
*buf
;
274 struct xendispl_req
*req
;
278 evtchnl
= &front_info
->evt_pairs
[GENERIC_OP_EVT_CHNL
].req
;
279 if (unlikely(!evtchnl
))
282 buf
= dbuf_get(&front_info
->dbuf_list
, dbuf_cookie
);
286 buf
->fb_cookie
= fb_cookie
;
288 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
290 spin_lock_irqsave(&front_info
->io_lock
, flags
);
291 req
= be_prepare_req(evtchnl
, XENDISPL_OP_FB_ATTACH
);
292 req
->op
.fb_attach
.dbuf_cookie
= dbuf_cookie
;
293 req
->op
.fb_attach
.fb_cookie
= fb_cookie
;
294 req
->op
.fb_attach
.width
= width
;
295 req
->op
.fb_attach
.height
= height
;
296 req
->op
.fb_attach
.pixel_format
= pixel_format
;
298 ret
= be_stream_do_io(evtchnl
, req
);
299 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
302 ret
= be_stream_wait_io(evtchnl
);
304 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
308 int xen_drm_front_fb_detach(struct xen_drm_front_info
*front_info
,
311 struct xen_drm_front_evtchnl
*evtchnl
;
312 struct xendispl_req
*req
;
316 evtchnl
= &front_info
->evt_pairs
[GENERIC_OP_EVT_CHNL
].req
;
317 if (unlikely(!evtchnl
))
320 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
322 spin_lock_irqsave(&front_info
->io_lock
, flags
);
323 req
= be_prepare_req(evtchnl
, XENDISPL_OP_FB_DETACH
);
324 req
->op
.fb_detach
.fb_cookie
= fb_cookie
;
326 ret
= be_stream_do_io(evtchnl
, req
);
327 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
330 ret
= be_stream_wait_io(evtchnl
);
332 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
336 int xen_drm_front_page_flip(struct xen_drm_front_info
*front_info
,
337 int conn_idx
, u64 fb_cookie
)
339 struct xen_drm_front_evtchnl
*evtchnl
;
340 struct xendispl_req
*req
;
344 if (unlikely(conn_idx
>= front_info
->num_evt_pairs
))
347 evtchnl
= &front_info
->evt_pairs
[conn_idx
].req
;
349 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
351 spin_lock_irqsave(&front_info
->io_lock
, flags
);
352 req
= be_prepare_req(evtchnl
, XENDISPL_OP_PG_FLIP
);
353 req
->op
.pg_flip
.fb_cookie
= fb_cookie
;
355 ret
= be_stream_do_io(evtchnl
, req
);
356 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
359 ret
= be_stream_wait_io(evtchnl
);
361 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
365 void xen_drm_front_on_frame_done(struct xen_drm_front_info
*front_info
,
366 int conn_idx
, u64 fb_cookie
)
368 struct xen_drm_front_drm_info
*drm_info
= front_info
->drm_info
;
370 if (unlikely(conn_idx
>= front_info
->cfg
.num_connectors
))
373 xen_drm_front_kms_on_frame_done(&drm_info
->pipeline
[conn_idx
],
377 static int xen_drm_drv_dumb_create(struct drm_file
*filp
,
378 struct drm_device
*dev
,
379 struct drm_mode_create_dumb
*args
)
381 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
382 struct drm_gem_object
*obj
;
386 * Dumb creation is a two stage process: first we create a fully
387 * constructed GEM object which is communicated to the backend, and
388 * only after that we can create GEM's handle. This is done so,
389 * because of the possible races: once you create a handle it becomes
390 * immediately visible to user-space, so the latter can try accessing
391 * object without pages etc.
392 * For details also see drm_gem_handle_create
394 args
->pitch
= DIV_ROUND_UP(args
->width
* args
->bpp
, 8);
395 args
->size
= args
->pitch
* args
->height
;
397 obj
= xen_drm_front_gem_create(dev
, args
->size
);
398 if (IS_ERR_OR_NULL(obj
)) {
403 ret
= xen_drm_front_dbuf_create(drm_info
->front_info
,
404 xen_drm_front_dbuf_to_cookie(obj
),
405 args
->width
, args
->height
, args
->bpp
,
407 xen_drm_front_gem_get_pages(obj
));
411 /* This is the tail of GEM object creation */
412 ret
= drm_gem_handle_create(filp
, obj
, &args
->handle
);
416 /* Drop reference from allocate - handle holds it now */
417 drm_gem_object_put_unlocked(obj
);
421 xen_drm_front_dbuf_destroy(drm_info
->front_info
,
422 xen_drm_front_dbuf_to_cookie(obj
));
424 /* drop reference from allocate */
425 drm_gem_object_put_unlocked(obj
);
427 DRM_ERROR("Failed to create dumb buffer: %d\n", ret
);
431 static void xen_drm_drv_free_object_unlocked(struct drm_gem_object
*obj
)
433 struct xen_drm_front_drm_info
*drm_info
= obj
->dev
->dev_private
;
436 if (drm_dev_enter(obj
->dev
, &idx
)) {
437 xen_drm_front_dbuf_destroy(drm_info
->front_info
,
438 xen_drm_front_dbuf_to_cookie(obj
));
441 dbuf_free(&drm_info
->front_info
->dbuf_list
,
442 xen_drm_front_dbuf_to_cookie(obj
));
445 xen_drm_front_gem_free_object_unlocked(obj
);
448 static void xen_drm_drv_release(struct drm_device
*dev
)
450 struct xen_drm_front_drm_info
*drm_info
= dev
->dev_private
;
451 struct xen_drm_front_info
*front_info
= drm_info
->front_info
;
453 xen_drm_front_kms_fini(drm_info
);
455 drm_atomic_helper_shutdown(dev
);
456 drm_mode_config_cleanup(dev
);
461 if (front_info
->cfg
.be_alloc
)
462 xenbus_switch_state(front_info
->xb_dev
,
463 XenbusStateInitialising
);
468 static const struct file_operations xen_drm_dev_fops
= {
469 .owner
= THIS_MODULE
,
471 .release
= drm_release
,
472 .unlocked_ioctl
= drm_ioctl
,
474 .compat_ioctl
= drm_compat_ioctl
,
479 .mmap
= xen_drm_front_gem_mmap
,
482 static const struct vm_operations_struct xen_drm_drv_vm_ops
= {
483 .open
= drm_gem_vm_open
,
484 .close
= drm_gem_vm_close
,
487 static struct drm_driver xen_drm_driver
= {
488 .driver_features
= DRIVER_GEM
| DRIVER_MODESET
|
489 DRIVER_PRIME
| DRIVER_ATOMIC
,
490 .release
= xen_drm_drv_release
,
491 .gem_vm_ops
= &xen_drm_drv_vm_ops
,
492 .gem_free_object_unlocked
= xen_drm_drv_free_object_unlocked
,
493 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
494 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
495 .gem_prime_import
= drm_gem_prime_import
,
496 .gem_prime_export
= drm_gem_prime_export
,
497 .gem_prime_import_sg_table
= xen_drm_front_gem_import_sg_table
,
498 .gem_prime_get_sg_table
= xen_drm_front_gem_get_sg_table
,
499 .gem_prime_vmap
= xen_drm_front_gem_prime_vmap
,
500 .gem_prime_vunmap
= xen_drm_front_gem_prime_vunmap
,
501 .gem_prime_mmap
= xen_drm_front_gem_prime_mmap
,
502 .dumb_create
= xen_drm_drv_dumb_create
,
503 .fops
= &xen_drm_dev_fops
,
505 .desc
= "Xen PV DRM Display Unit",
512 static int xen_drm_drv_init(struct xen_drm_front_info
*front_info
)
514 struct device
*dev
= &front_info
->xb_dev
->dev
;
515 struct xen_drm_front_drm_info
*drm_info
;
516 struct drm_device
*drm_dev
;
519 DRM_INFO("Creating %s\n", xen_drm_driver
.desc
);
521 drm_info
= kzalloc(sizeof(*drm_info
), GFP_KERNEL
);
527 drm_info
->front_info
= front_info
;
528 front_info
->drm_info
= drm_info
;
530 drm_dev
= drm_dev_alloc(&xen_drm_driver
, dev
);
531 if (IS_ERR(drm_dev
)) {
532 ret
= PTR_ERR(drm_dev
);
536 drm_info
->drm_dev
= drm_dev
;
538 drm_dev
->dev_private
= drm_info
;
540 ret
= xen_drm_front_kms_init(drm_info
);
542 DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret
);
546 ret
= drm_dev_register(drm_dev
, 0);
550 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
551 xen_drm_driver
.name
, xen_drm_driver
.major
,
552 xen_drm_driver
.minor
, xen_drm_driver
.patchlevel
,
553 xen_drm_driver
.date
, drm_dev
->primary
->index
);
558 drm_dev_unregister(drm_dev
);
560 drm_kms_helper_poll_fini(drm_dev
);
561 drm_mode_config_cleanup(drm_dev
);
567 static void xen_drm_drv_fini(struct xen_drm_front_info
*front_info
)
569 struct xen_drm_front_drm_info
*drm_info
= front_info
->drm_info
;
570 struct drm_device
*dev
;
575 dev
= drm_info
->drm_dev
;
579 /* Nothing to do if device is already unplugged */
580 if (drm_dev_is_unplugged(dev
))
583 drm_kms_helper_poll_fini(dev
);
586 front_info
->drm_info
= NULL
;
588 xen_drm_front_evtchnl_free_all(front_info
);
589 dbuf_free_all(&front_info
->dbuf_list
);
592 * If we are not using backend allocated buffers, then tell the
593 * backend we are ready to (re)initialize. Otherwise, wait for
594 * drm_driver.release.
596 if (!front_info
->cfg
.be_alloc
)
597 xenbus_switch_state(front_info
->xb_dev
,
598 XenbusStateInitialising
);
601 static int displback_initwait(struct xen_drm_front_info
*front_info
)
603 struct xen_drm_front_cfg
*cfg
= &front_info
->cfg
;
606 cfg
->front_info
= front_info
;
607 ret
= xen_drm_front_cfg_card(front_info
, cfg
);
611 DRM_INFO("Have %d connector(s)\n", cfg
->num_connectors
);
612 /* Create event channels for all connectors and publish */
613 ret
= xen_drm_front_evtchnl_create_all(front_info
);
617 return xen_drm_front_evtchnl_publish_all(front_info
);
620 static int displback_connect(struct xen_drm_front_info
*front_info
)
622 xen_drm_front_evtchnl_set_state(front_info
, EVTCHNL_STATE_CONNECTED
);
623 return xen_drm_drv_init(front_info
);
626 static void displback_disconnect(struct xen_drm_front_info
*front_info
)
628 if (!front_info
->drm_info
)
631 /* Tell the backend to wait until we release the DRM driver. */
632 xenbus_switch_state(front_info
->xb_dev
, XenbusStateReconfiguring
);
634 xen_drm_drv_fini(front_info
);
637 static void displback_changed(struct xenbus_device
*xb_dev
,
638 enum xenbus_state backend_state
)
640 struct xen_drm_front_info
*front_info
= dev_get_drvdata(&xb_dev
->dev
);
643 DRM_DEBUG("Backend state is %s, front is %s\n",
644 xenbus_strstate(backend_state
),
645 xenbus_strstate(xb_dev
->state
));
647 switch (backend_state
) {
648 case XenbusStateReconfiguring
:
650 case XenbusStateReconfigured
:
652 case XenbusStateInitialised
:
655 case XenbusStateInitialising
:
656 if (xb_dev
->state
== XenbusStateReconfiguring
)
659 /* recovering after backend unexpected closure */
660 displback_disconnect(front_info
);
663 case XenbusStateInitWait
:
664 if (xb_dev
->state
== XenbusStateReconfiguring
)
667 /* recovering after backend unexpected closure */
668 displback_disconnect(front_info
);
669 if (xb_dev
->state
!= XenbusStateInitialising
)
672 ret
= displback_initwait(front_info
);
674 xenbus_dev_fatal(xb_dev
, ret
, "initializing frontend");
676 xenbus_switch_state(xb_dev
, XenbusStateInitialised
);
679 case XenbusStateConnected
:
680 if (xb_dev
->state
!= XenbusStateInitialised
)
683 ret
= displback_connect(front_info
);
685 displback_disconnect(front_info
);
686 xenbus_dev_fatal(xb_dev
, ret
, "connecting backend");
688 xenbus_switch_state(xb_dev
, XenbusStateConnected
);
692 case XenbusStateClosing
:
694 * in this state backend starts freeing resources,
695 * so let it go into closed state, so we can also
700 case XenbusStateUnknown
:
702 case XenbusStateClosed
:
703 if (xb_dev
->state
== XenbusStateClosed
)
706 displback_disconnect(front_info
);
711 static int xen_drv_probe(struct xenbus_device
*xb_dev
,
712 const struct xenbus_device_id
*id
)
714 struct xen_drm_front_info
*front_info
;
715 struct device
*dev
= &xb_dev
->dev
;
719 * The device is not spawn from a device tree, so arch_setup_dma_ops
720 * is not called, thus leaving the device with dummy DMA ops.
721 * This makes the device return error on PRIME buffer import, which
722 * is not correct: to fix this call of_dma_configure() with a NULL
723 * node to set default DMA ops.
725 dev
->coherent_dma_mask
= DMA_BIT_MASK(32);
726 ret
= of_dma_configure(dev
, NULL
, true);
728 DRM_ERROR("Cannot setup DMA ops, ret %d", ret
);
732 front_info
= devm_kzalloc(&xb_dev
->dev
,
733 sizeof(*front_info
), GFP_KERNEL
);
737 front_info
->xb_dev
= xb_dev
;
738 spin_lock_init(&front_info
->io_lock
);
739 INIT_LIST_HEAD(&front_info
->dbuf_list
);
740 dev_set_drvdata(&xb_dev
->dev
, front_info
);
742 return xenbus_switch_state(xb_dev
, XenbusStateInitialising
);
745 static int xen_drv_remove(struct xenbus_device
*dev
)
747 struct xen_drm_front_info
*front_info
= dev_get_drvdata(&dev
->dev
);
750 xenbus_switch_state(dev
, XenbusStateClosing
);
753 * On driver removal it is disconnected from XenBus,
754 * so no backend state change events come via .otherend_changed
755 * callback. This prevents us from exiting gracefully, e.g.
756 * signaling the backend to free event channels, waiting for its
757 * state to change to XenbusStateClosed and cleaning at our end.
758 * Normally when front driver removed backend will finally go into
759 * XenbusStateInitWait state.
761 * Workaround: read backend's state manually and wait with time-out.
763 while ((xenbus_read_unsigned(front_info
->xb_dev
->otherend
, "state",
764 XenbusStateUnknown
) != XenbusStateInitWait
) &&
771 state
= xenbus_read_unsigned(front_info
->xb_dev
->otherend
,
772 "state", XenbusStateUnknown
);
773 DRM_ERROR("Backend state is %s while removing driver\n",
774 xenbus_strstate(state
));
777 xen_drm_drv_fini(front_info
);
778 xenbus_frontend_closed(dev
);
782 static const struct xenbus_device_id xen_driver_ids
[] = {
783 { XENDISPL_DRIVER_NAME
},
787 static struct xenbus_driver xen_driver
= {
788 .ids
= xen_driver_ids
,
789 .probe
= xen_drv_probe
,
790 .remove
= xen_drv_remove
,
791 .otherend_changed
= displback_changed
,
794 static int __init
xen_drv_init(void)
796 /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
797 if (XEN_PAGE_SIZE
!= PAGE_SIZE
) {
798 DRM_ERROR(XENDISPL_DRIVER_NAME
": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
799 XEN_PAGE_SIZE
, PAGE_SIZE
);
806 if (!xen_has_pv_devices())
809 DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME
"\n");
810 return xenbus_register_frontend(&xen_driver
);
813 static void __exit
xen_drv_fini(void)
815 DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME
"\n");
816 xenbus_unregister_driver(&xen_driver
);
819 module_init(xen_drv_init
);
820 module_exit(xen_drv_fini
);
822 MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
823 MODULE_LICENSE("GPL");
824 MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME
);