1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
13 #include <linux/errno.h>
14 #include <linux/irq.h>
16 #include <xen/xenbus.h>
17 #include <xen/events.h>
18 #include <xen/grant_table.h>
20 #include "xen_drm_front.h"
21 #include "xen_drm_front_evtchnl.h"
23 static irqreturn_t
evtchnl_interrupt_ctrl(int irq
, void *dev_id
)
25 struct xen_drm_front_evtchnl
*evtchnl
= dev_id
;
26 struct xen_drm_front_info
*front_info
= evtchnl
->front_info
;
27 struct xendispl_resp
*resp
;
31 if (unlikely(evtchnl
->state
!= EVTCHNL_STATE_CONNECTED
))
34 spin_lock_irqsave(&front_info
->io_lock
, flags
);
37 rp
= evtchnl
->u
.req
.ring
.sring
->rsp_prod
;
38 /* ensure we see queued responses up to rp */
41 for (i
= evtchnl
->u
.req
.ring
.rsp_cons
; i
!= rp
; i
++) {
42 resp
= RING_GET_RESPONSE(&evtchnl
->u
.req
.ring
, i
);
43 if (unlikely(resp
->id
!= evtchnl
->evt_id
))
46 switch (resp
->operation
) {
47 case XENDISPL_OP_PG_FLIP
:
48 case XENDISPL_OP_FB_ATTACH
:
49 case XENDISPL_OP_FB_DETACH
:
50 case XENDISPL_OP_DBUF_CREATE
:
51 case XENDISPL_OP_DBUF_DESTROY
:
52 case XENDISPL_OP_SET_CONFIG
:
53 evtchnl
->u
.req
.resp_status
= resp
->status
;
54 complete(&evtchnl
->u
.req
.completion
);
58 DRM_ERROR("Operation %d is not supported\n",
64 evtchnl
->u
.req
.ring
.rsp_cons
= i
;
66 if (i
!= evtchnl
->u
.req
.ring
.req_prod_pvt
) {
69 RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl
->u
.req
.ring
,
74 evtchnl
->u
.req
.ring
.sring
->rsp_event
= i
+ 1;
77 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
81 static irqreturn_t
evtchnl_interrupt_evt(int irq
, void *dev_id
)
83 struct xen_drm_front_evtchnl
*evtchnl
= dev_id
;
84 struct xen_drm_front_info
*front_info
= evtchnl
->front_info
;
85 struct xendispl_event_page
*page
= evtchnl
->u
.evt
.page
;
89 if (unlikely(evtchnl
->state
!= EVTCHNL_STATE_CONNECTED
))
92 spin_lock_irqsave(&front_info
->io_lock
, flags
);
95 /* ensure we see ring contents up to prod */
97 if (prod
== page
->in_cons
)
100 for (cons
= page
->in_cons
; cons
!= prod
; cons
++) {
101 struct xendispl_evt
*event
;
103 event
= &XENDISPL_IN_RING_REF(page
, cons
);
104 if (unlikely(event
->id
!= evtchnl
->evt_id
++))
107 switch (event
->type
) {
108 case XENDISPL_EVT_PG_FLIP
:
109 xen_drm_front_on_frame_done(front_info
, evtchnl
->index
,
110 event
->op
.pg_flip
.fb_cookie
);
114 page
->in_cons
= cons
;
115 /* ensure ring contents */
119 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
123 static void evtchnl_free(struct xen_drm_front_info
*front_info
,
124 struct xen_drm_front_evtchnl
*evtchnl
)
126 unsigned long page
= 0;
128 if (evtchnl
->type
== EVTCHNL_TYPE_REQ
)
129 page
= (unsigned long)evtchnl
->u
.req
.ring
.sring
;
130 else if (evtchnl
->type
== EVTCHNL_TYPE_EVT
)
131 page
= (unsigned long)evtchnl
->u
.evt
.page
;
135 evtchnl
->state
= EVTCHNL_STATE_DISCONNECTED
;
137 if (evtchnl
->type
== EVTCHNL_TYPE_REQ
) {
138 /* release all who still waits for response if any */
139 evtchnl
->u
.req
.resp_status
= -EIO
;
140 complete_all(&evtchnl
->u
.req
.completion
);
144 unbind_from_irqhandler(evtchnl
->irq
, evtchnl
);
147 xenbus_free_evtchn(front_info
->xb_dev
, evtchnl
->port
);
149 /* end access and free the page */
150 if (evtchnl
->gref
!= GRANT_INVALID_REF
)
151 gnttab_end_foreign_access(evtchnl
->gref
, 0, page
);
153 memset(evtchnl
, 0, sizeof(*evtchnl
));
156 static int evtchnl_alloc(struct xen_drm_front_info
*front_info
, int index
,
157 struct xen_drm_front_evtchnl
*evtchnl
,
158 enum xen_drm_front_evtchnl_type type
)
160 struct xenbus_device
*xb_dev
= front_info
->xb_dev
;
163 irq_handler_t handler
;
166 memset(evtchnl
, 0, sizeof(*evtchnl
));
167 evtchnl
->type
= type
;
168 evtchnl
->index
= index
;
169 evtchnl
->front_info
= front_info
;
170 evtchnl
->state
= EVTCHNL_STATE_DISCONNECTED
;
171 evtchnl
->gref
= GRANT_INVALID_REF
;
173 page
= get_zeroed_page(GFP_NOIO
| __GFP_HIGH
);
179 if (type
== EVTCHNL_TYPE_REQ
) {
180 struct xen_displif_sring
*sring
;
182 init_completion(&evtchnl
->u
.req
.completion
);
183 mutex_init(&evtchnl
->u
.req
.req_io_lock
);
184 sring
= (struct xen_displif_sring
*)page
;
185 SHARED_RING_INIT(sring
);
186 FRONT_RING_INIT(&evtchnl
->u
.req
.ring
, sring
, XEN_PAGE_SIZE
);
188 ret
= xenbus_grant_ring(xb_dev
, sring
, 1, &gref
);
190 evtchnl
->u
.req
.ring
.sring
= NULL
;
195 handler
= evtchnl_interrupt_ctrl
;
197 ret
= gnttab_grant_foreign_access(xb_dev
->otherend_id
,
198 virt_to_gfn((void *)page
), 0);
204 evtchnl
->u
.evt
.page
= (struct xendispl_event_page
*)page
;
206 handler
= evtchnl_interrupt_evt
;
208 evtchnl
->gref
= gref
;
210 ret
= xenbus_alloc_evtchn(xb_dev
, &evtchnl
->port
);
214 ret
= bind_evtchn_to_irqhandler(evtchnl
->port
,
215 handler
, 0, xb_dev
->devicetype
,
224 DRM_ERROR("Failed to allocate ring: %d\n", ret
);
228 int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info
*front_info
)
230 struct xen_drm_front_cfg
*cfg
;
233 cfg
= &front_info
->cfg
;
235 front_info
->evt_pairs
=
236 kcalloc(cfg
->num_connectors
,
237 sizeof(struct xen_drm_front_evtchnl_pair
),
239 if (!front_info
->evt_pairs
) {
244 for (conn
= 0; conn
< cfg
->num_connectors
; conn
++) {
245 ret
= evtchnl_alloc(front_info
, conn
,
246 &front_info
->evt_pairs
[conn
].req
,
249 DRM_ERROR("Error allocating control channel\n");
253 ret
= evtchnl_alloc(front_info
, conn
,
254 &front_info
->evt_pairs
[conn
].evt
,
257 DRM_ERROR("Error allocating in-event channel\n");
261 front_info
->num_evt_pairs
= cfg
->num_connectors
;
265 xen_drm_front_evtchnl_free_all(front_info
);
269 static int evtchnl_publish(struct xenbus_transaction xbt
,
270 struct xen_drm_front_evtchnl
*evtchnl
,
271 const char *path
, const char *node_ring
,
272 const char *node_chnl
)
274 struct xenbus_device
*xb_dev
= evtchnl
->front_info
->xb_dev
;
277 /* write control channel ring reference */
278 ret
= xenbus_printf(xbt
, path
, node_ring
, "%u", evtchnl
->gref
);
280 xenbus_dev_error(xb_dev
, ret
, "writing ring-ref");
284 /* write event channel ring reference */
285 ret
= xenbus_printf(xbt
, path
, node_chnl
, "%u", evtchnl
->port
);
287 xenbus_dev_error(xb_dev
, ret
, "writing event channel");
294 int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info
*front_info
)
296 struct xenbus_transaction xbt
;
297 struct xen_drm_front_cfg
*plat_data
;
300 plat_data
= &front_info
->cfg
;
303 ret
= xenbus_transaction_start(&xbt
);
305 xenbus_dev_fatal(front_info
->xb_dev
, ret
,
306 "starting transaction");
310 for (conn
= 0; conn
< plat_data
->num_connectors
; conn
++) {
311 ret
= evtchnl_publish(xbt
, &front_info
->evt_pairs
[conn
].req
,
312 plat_data
->connectors
[conn
].xenstore_path
,
313 XENDISPL_FIELD_REQ_RING_REF
,
314 XENDISPL_FIELD_REQ_CHANNEL
);
318 ret
= evtchnl_publish(xbt
, &front_info
->evt_pairs
[conn
].evt
,
319 plat_data
->connectors
[conn
].xenstore_path
,
320 XENDISPL_FIELD_EVT_RING_REF
,
321 XENDISPL_FIELD_EVT_CHANNEL
);
326 ret
= xenbus_transaction_end(xbt
, 0);
331 xenbus_dev_fatal(front_info
->xb_dev
, ret
,
332 "completing transaction");
339 xenbus_transaction_end(xbt
, 1);
342 xenbus_dev_fatal(front_info
->xb_dev
, ret
, "writing Xen store");
346 void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl
*evtchnl
)
350 evtchnl
->u
.req
.ring
.req_prod_pvt
++;
351 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl
->u
.req
.ring
, notify
);
353 notify_remote_via_irq(evtchnl
->irq
);
356 void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info
*front_info
,
357 enum xen_drm_front_evtchnl_state state
)
362 if (!front_info
->evt_pairs
)
365 spin_lock_irqsave(&front_info
->io_lock
, flags
);
366 for (i
= 0; i
< front_info
->num_evt_pairs
; i
++) {
367 front_info
->evt_pairs
[i
].req
.state
= state
;
368 front_info
->evt_pairs
[i
].evt
.state
= state
;
370 spin_unlock_irqrestore(&front_info
->io_lock
, flags
);
373 void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info
*front_info
)
377 if (!front_info
->evt_pairs
)
380 for (i
= 0; i
< front_info
->num_evt_pairs
; i
++) {
381 evtchnl_free(front_info
, &front_info
->evt_pairs
[i
].req
);
382 evtchnl_free(front_info
, &front_info
->evt_pairs
[i
].evt
);
385 kfree(front_info
->evt_pairs
);
386 front_info
->evt_pairs
= NULL
;