1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual sound device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11 #include <xen/events.h>
12 #include <xen/grant_table.h>
14 #include <xen/xenbus.h>
16 #include "xen_snd_front.h"
17 #include "xen_snd_front_alsa.h"
18 #include "xen_snd_front_cfg.h"
19 #include "xen_snd_front_evtchnl.h"
21 static irqreturn_t
evtchnl_interrupt_req(int irq
, void *dev_id
)
23 struct xen_snd_front_evtchnl
*channel
= dev_id
;
24 struct xen_snd_front_info
*front_info
= channel
->front_info
;
25 struct xensnd_resp
*resp
;
28 if (unlikely(channel
->state
!= EVTCHNL_STATE_CONNECTED
))
31 mutex_lock(&channel
->ring_io_lock
);
34 rp
= channel
->u
.req
.ring
.sring
->rsp_prod
;
35 /* Ensure we see queued responses up to rp. */
39 * Assume that the backend is trusted to always write sane values
40 * to the ring counters, so no overflow checks on frontend side
43 for (i
= channel
->u
.req
.ring
.rsp_cons
; i
!= rp
; i
++) {
44 resp
= RING_GET_RESPONSE(&channel
->u
.req
.ring
, i
);
45 if (resp
->id
!= channel
->evt_id
)
47 switch (resp
->operation
) {
56 case XENSND_OP_TRIGGER
:
57 channel
->u
.req
.resp_status
= resp
->status
;
58 complete(&channel
->u
.req
.completion
);
60 case XENSND_OP_HW_PARAM_QUERY
:
61 channel
->u
.req
.resp_status
= resp
->status
;
62 channel
->u
.req
.resp
.hw_param
=
64 complete(&channel
->u
.req
.completion
);
68 dev_err(&front_info
->xb_dev
->dev
,
69 "Operation %d is not supported\n",
75 channel
->u
.req
.ring
.rsp_cons
= i
;
76 if (i
!= channel
->u
.req
.ring
.req_prod_pvt
) {
79 RING_FINAL_CHECK_FOR_RESPONSES(&channel
->u
.req
.ring
,
84 channel
->u
.req
.ring
.sring
->rsp_event
= i
+ 1;
87 mutex_unlock(&channel
->ring_io_lock
);
91 static irqreturn_t
evtchnl_interrupt_evt(int irq
, void *dev_id
)
93 struct xen_snd_front_evtchnl
*channel
= dev_id
;
94 struct xensnd_event_page
*page
= channel
->u
.evt
.page
;
97 if (unlikely(channel
->state
!= EVTCHNL_STATE_CONNECTED
))
100 mutex_lock(&channel
->ring_io_lock
);
102 prod
= page
->in_prod
;
103 /* Ensure we see ring contents up to prod. */
105 if (prod
== page
->in_cons
)
109 * Assume that the backend is trusted to always write sane values
110 * to the ring counters, so no overflow checks on frontend side
113 for (cons
= page
->in_cons
; cons
!= prod
; cons
++) {
114 struct xensnd_evt
*event
;
116 event
= &XENSND_IN_RING_REF(page
, cons
);
117 if (unlikely(event
->id
!= channel
->evt_id
++))
120 switch (event
->type
) {
121 case XENSND_EVT_CUR_POS
:
122 xen_snd_front_alsa_handle_cur_pos(channel
,
123 event
->op
.cur_pos
.position
);
128 page
->in_cons
= cons
;
129 /* Ensure ring contents. */
133 mutex_unlock(&channel
->ring_io_lock
);
137 void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl
*channel
)
141 channel
->u
.req
.ring
.req_prod_pvt
++;
142 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel
->u
.req
.ring
, notify
);
144 notify_remote_via_irq(channel
->irq
);
147 static void evtchnl_free(struct xen_snd_front_info
*front_info
,
148 struct xen_snd_front_evtchnl
*channel
)
150 unsigned long page
= 0;
152 if (channel
->type
== EVTCHNL_TYPE_REQ
)
153 page
= (unsigned long)channel
->u
.req
.ring
.sring
;
154 else if (channel
->type
== EVTCHNL_TYPE_EVT
)
155 page
= (unsigned long)channel
->u
.evt
.page
;
160 channel
->state
= EVTCHNL_STATE_DISCONNECTED
;
161 if (channel
->type
== EVTCHNL_TYPE_REQ
) {
162 /* Release all who still waits for response if any. */
163 channel
->u
.req
.resp_status
= -EIO
;
164 complete_all(&channel
->u
.req
.completion
);
168 unbind_from_irqhandler(channel
->irq
, channel
);
171 xenbus_free_evtchn(front_info
->xb_dev
, channel
->port
);
173 /* End access and free the page. */
174 if (channel
->gref
!= GRANT_INVALID_REF
)
175 gnttab_end_foreign_access(channel
->gref
, 0, page
);
179 memset(channel
, 0, sizeof(*channel
));
182 void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info
*front_info
)
186 if (!front_info
->evt_pairs
)
189 for (i
= 0; i
< front_info
->num_evt_pairs
; i
++) {
190 evtchnl_free(front_info
, &front_info
->evt_pairs
[i
].req
);
191 evtchnl_free(front_info
, &front_info
->evt_pairs
[i
].evt
);
194 kfree(front_info
->evt_pairs
);
195 front_info
->evt_pairs
= NULL
;
198 static int evtchnl_alloc(struct xen_snd_front_info
*front_info
, int index
,
199 struct xen_snd_front_evtchnl
*channel
,
200 enum xen_snd_front_evtchnl_type type
)
202 struct xenbus_device
*xb_dev
= front_info
->xb_dev
;
205 irq_handler_t handler
;
206 char *handler_name
= NULL
;
209 memset(channel
, 0, sizeof(*channel
));
210 channel
->type
= type
;
211 channel
->index
= index
;
212 channel
->front_info
= front_info
;
213 channel
->state
= EVTCHNL_STATE_DISCONNECTED
;
214 channel
->gref
= GRANT_INVALID_REF
;
215 page
= get_zeroed_page(GFP_KERNEL
);
221 handler_name
= kasprintf(GFP_KERNEL
, "%s-%s", XENSND_DRIVER_NAME
,
222 type
== EVTCHNL_TYPE_REQ
?
223 XENSND_FIELD_RING_REF
:
224 XENSND_FIELD_EVT_RING_REF
);
230 mutex_init(&channel
->ring_io_lock
);
232 if (type
== EVTCHNL_TYPE_REQ
) {
233 struct xen_sndif_sring
*sring
= (struct xen_sndif_sring
*)page
;
235 init_completion(&channel
->u
.req
.completion
);
236 mutex_init(&channel
->u
.req
.req_io_lock
);
237 SHARED_RING_INIT(sring
);
238 FRONT_RING_INIT(&channel
->u
.req
.ring
, sring
, XEN_PAGE_SIZE
);
240 ret
= xenbus_grant_ring(xb_dev
, sring
, 1, &gref
);
242 channel
->u
.req
.ring
.sring
= NULL
;
246 handler
= evtchnl_interrupt_req
;
248 ret
= gnttab_grant_foreign_access(xb_dev
->otherend_id
,
249 virt_to_gfn((void *)page
), 0);
253 channel
->u
.evt
.page
= (struct xensnd_event_page
*)page
;
255 handler
= evtchnl_interrupt_evt
;
258 channel
->gref
= gref
;
260 ret
= xenbus_alloc_evtchn(xb_dev
, &channel
->port
);
264 ret
= bind_evtchn_to_irq(channel
->port
);
266 dev_err(&xb_dev
->dev
,
267 "Failed to bind IRQ for domid %d port %d: %d\n",
268 front_info
->xb_dev
->otherend_id
, channel
->port
, ret
);
274 ret
= request_threaded_irq(channel
->irq
, NULL
, handler
,
275 IRQF_ONESHOT
, handler_name
, channel
);
277 dev_err(&xb_dev
->dev
, "Failed to request IRQ %d: %d\n",
289 dev_err(&xb_dev
->dev
, "Failed to allocate ring: %d\n", ret
);
293 int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info
*front_info
,
296 struct xen_front_cfg_card
*cfg
= &front_info
->cfg
;
297 struct device
*dev
= &front_info
->xb_dev
->dev
;
300 front_info
->evt_pairs
=
302 sizeof(struct xen_snd_front_evtchnl_pair
),
304 if (!front_info
->evt_pairs
)
307 /* Iterate over devices and their streams and create event channels. */
308 for (d
= 0; d
< cfg
->num_pcm_instances
; d
++) {
309 struct xen_front_cfg_pcm_instance
*pcm_instance
;
312 pcm_instance
= &cfg
->pcm_instances
[d
];
314 for (s
= 0; s
< pcm_instance
->num_streams_pb
; s
++) {
315 index
= pcm_instance
->streams_pb
[s
].index
;
317 ret
= evtchnl_alloc(front_info
, index
,
318 &front_info
->evt_pairs
[index
].req
,
321 dev_err(dev
, "Error allocating control channel\n");
325 ret
= evtchnl_alloc(front_info
, index
,
326 &front_info
->evt_pairs
[index
].evt
,
329 dev_err(dev
, "Error allocating in-event channel\n");
334 for (s
= 0; s
< pcm_instance
->num_streams_cap
; s
++) {
335 index
= pcm_instance
->streams_cap
[s
].index
;
337 ret
= evtchnl_alloc(front_info
, index
,
338 &front_info
->evt_pairs
[index
].req
,
341 dev_err(dev
, "Error allocating control channel\n");
345 ret
= evtchnl_alloc(front_info
, index
,
346 &front_info
->evt_pairs
[index
].evt
,
349 dev_err(dev
, "Error allocating in-event channel\n");
355 front_info
->num_evt_pairs
= num_streams
;
359 xen_snd_front_evtchnl_free_all(front_info
);
363 static int evtchnl_publish(struct xenbus_transaction xbt
,
364 struct xen_snd_front_evtchnl
*channel
,
365 const char *path
, const char *node_ring
,
366 const char *node_chnl
)
368 struct xenbus_device
*xb_dev
= channel
->front_info
->xb_dev
;
371 /* Write control channel ring reference. */
372 ret
= xenbus_printf(xbt
, path
, node_ring
, "%u", channel
->gref
);
374 dev_err(&xb_dev
->dev
, "Error writing ring-ref: %d\n", ret
);
378 /* Write event channel ring reference. */
379 ret
= xenbus_printf(xbt
, path
, node_chnl
, "%u", channel
->port
);
381 dev_err(&xb_dev
->dev
, "Error writing event channel: %d\n", ret
);
388 int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info
*front_info
)
390 struct xen_front_cfg_card
*cfg
= &front_info
->cfg
;
391 struct xenbus_transaction xbt
;
395 ret
= xenbus_transaction_start(&xbt
);
397 xenbus_dev_fatal(front_info
->xb_dev
, ret
,
398 "starting transaction");
402 for (d
= 0; d
< cfg
->num_pcm_instances
; d
++) {
403 struct xen_front_cfg_pcm_instance
*pcm_instance
;
406 pcm_instance
= &cfg
->pcm_instances
[d
];
408 for (s
= 0; s
< pcm_instance
->num_streams_pb
; s
++) {
409 index
= pcm_instance
->streams_pb
[s
].index
;
411 ret
= evtchnl_publish(xbt
,
412 &front_info
->evt_pairs
[index
].req
,
413 pcm_instance
->streams_pb
[s
].xenstore_path
,
414 XENSND_FIELD_RING_REF
,
415 XENSND_FIELD_EVT_CHNL
);
419 ret
= evtchnl_publish(xbt
,
420 &front_info
->evt_pairs
[index
].evt
,
421 pcm_instance
->streams_pb
[s
].xenstore_path
,
422 XENSND_FIELD_EVT_RING_REF
,
423 XENSND_FIELD_EVT_EVT_CHNL
);
428 for (s
= 0; s
< pcm_instance
->num_streams_cap
; s
++) {
429 index
= pcm_instance
->streams_cap
[s
].index
;
431 ret
= evtchnl_publish(xbt
,
432 &front_info
->evt_pairs
[index
].req
,
433 pcm_instance
->streams_cap
[s
].xenstore_path
,
434 XENSND_FIELD_RING_REF
,
435 XENSND_FIELD_EVT_CHNL
);
439 ret
= evtchnl_publish(xbt
,
440 &front_info
->evt_pairs
[index
].evt
,
441 pcm_instance
->streams_cap
[s
].xenstore_path
,
442 XENSND_FIELD_EVT_RING_REF
,
443 XENSND_FIELD_EVT_EVT_CHNL
);
448 ret
= xenbus_transaction_end(xbt
, 0);
453 xenbus_dev_fatal(front_info
->xb_dev
, ret
,
454 "completing transaction");
459 xenbus_transaction_end(xbt
, 1);
461 xenbus_dev_fatal(front_info
->xb_dev
, ret
, "writing XenStore");
465 void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair
*evt_pair
,
468 enum xen_snd_front_evtchnl_state state
;
471 state
= EVTCHNL_STATE_CONNECTED
;
473 state
= EVTCHNL_STATE_DISCONNECTED
;
475 mutex_lock(&evt_pair
->req
.ring_io_lock
);
476 evt_pair
->req
.state
= state
;
477 mutex_unlock(&evt_pair
->req
.ring_io_lock
);
479 mutex_lock(&evt_pair
->evt
.ring_io_lock
);
480 evt_pair
->evt
.state
= state
;
481 mutex_unlock(&evt_pair
->evt
.ring_io_lock
);
484 void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair
*evt_pair
)
486 mutex_lock(&evt_pair
->req
.ring_io_lock
);
487 evt_pair
->req
.evt_next_id
= 0;
488 mutex_unlock(&evt_pair
->req
.ring_io_lock
);
490 mutex_lock(&evt_pair
->evt
.ring_io_lock
);
491 evt_pair
->evt
.evt_next_id
= 0;
492 mutex_unlock(&evt_pair
->evt
.ring_io_lock
);