1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual sound device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11 #include <linux/delay.h>
12 #include <linux/module.h>
15 #include <xen/platform_pci.h>
17 #include <xen/xenbus.h>
19 #include <xen/xen-front-pgdir-shbuf.h>
20 #include <xen/interface/io/sndif.h>
22 #include "xen_snd_front.h"
23 #include "xen_snd_front_alsa.h"
24 #include "xen_snd_front_evtchnl.h"
26 static struct xensnd_req
*
27 be_stream_prepare_req(struct xen_snd_front_evtchnl
*evtchnl
, u8 operation
)
29 struct xensnd_req
*req
;
31 req
= RING_GET_REQUEST(&evtchnl
->u
.req
.ring
,
32 evtchnl
->u
.req
.ring
.req_prod_pvt
);
33 req
->operation
= operation
;
34 req
->id
= evtchnl
->evt_next_id
++;
35 evtchnl
->evt_id
= req
->id
;
39 static int be_stream_do_io(struct xen_snd_front_evtchnl
*evtchnl
)
41 if (unlikely(evtchnl
->state
!= EVTCHNL_STATE_CONNECTED
))
44 reinit_completion(&evtchnl
->u
.req
.completion
);
45 xen_snd_front_evtchnl_flush(evtchnl
);
49 static int be_stream_wait_io(struct xen_snd_front_evtchnl
*evtchnl
)
51 if (wait_for_completion_timeout(&evtchnl
->u
.req
.completion
,
52 msecs_to_jiffies(VSND_WAIT_BACK_MS
)) <= 0)
55 return evtchnl
->u
.req
.resp_status
;
58 int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl
*evtchnl
,
59 struct xensnd_query_hw_param
*hw_param_req
,
60 struct xensnd_query_hw_param
*hw_param_resp
)
62 struct xensnd_req
*req
;
65 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
67 mutex_lock(&evtchnl
->ring_io_lock
);
68 req
= be_stream_prepare_req(evtchnl
, XENSND_OP_HW_PARAM_QUERY
);
69 req
->op
.hw_param
= *hw_param_req
;
70 mutex_unlock(&evtchnl
->ring_io_lock
);
72 ret
= be_stream_do_io(evtchnl
);
75 ret
= be_stream_wait_io(evtchnl
);
78 *hw_param_resp
= evtchnl
->u
.req
.resp
.hw_param
;
80 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
84 int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl
*evtchnl
,
85 struct xen_front_pgdir_shbuf
*shbuf
,
86 u8 format
, unsigned int channels
,
87 unsigned int rate
, u32 buffer_sz
,
90 struct xensnd_req
*req
;
93 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
95 mutex_lock(&evtchnl
->ring_io_lock
);
96 req
= be_stream_prepare_req(evtchnl
, XENSND_OP_OPEN
);
97 req
->op
.open
.pcm_format
= format
;
98 req
->op
.open
.pcm_channels
= channels
;
99 req
->op
.open
.pcm_rate
= rate
;
100 req
->op
.open
.buffer_sz
= buffer_sz
;
101 req
->op
.open
.period_sz
= period_sz
;
102 req
->op
.open
.gref_directory
=
103 xen_front_pgdir_shbuf_get_dir_start(shbuf
);
104 mutex_unlock(&evtchnl
->ring_io_lock
);
106 ret
= be_stream_do_io(evtchnl
);
109 ret
= be_stream_wait_io(evtchnl
);
111 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
115 int xen_snd_front_stream_close(struct xen_snd_front_evtchnl
*evtchnl
)
117 struct xensnd_req
*req
;
120 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
122 mutex_lock(&evtchnl
->ring_io_lock
);
123 req
= be_stream_prepare_req(evtchnl
, XENSND_OP_CLOSE
);
124 mutex_unlock(&evtchnl
->ring_io_lock
);
126 ret
= be_stream_do_io(evtchnl
);
129 ret
= be_stream_wait_io(evtchnl
);
131 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
135 int xen_snd_front_stream_write(struct xen_snd_front_evtchnl
*evtchnl
,
136 unsigned long pos
, unsigned long count
)
138 struct xensnd_req
*req
;
141 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
143 mutex_lock(&evtchnl
->ring_io_lock
);
144 req
= be_stream_prepare_req(evtchnl
, XENSND_OP_WRITE
);
145 req
->op
.rw
.length
= count
;
146 req
->op
.rw
.offset
= pos
;
147 mutex_unlock(&evtchnl
->ring_io_lock
);
149 ret
= be_stream_do_io(evtchnl
);
152 ret
= be_stream_wait_io(evtchnl
);
154 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
158 int xen_snd_front_stream_read(struct xen_snd_front_evtchnl
*evtchnl
,
159 unsigned long pos
, unsigned long count
)
161 struct xensnd_req
*req
;
164 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
166 mutex_lock(&evtchnl
->ring_io_lock
);
167 req
= be_stream_prepare_req(evtchnl
, XENSND_OP_READ
);
168 req
->op
.rw
.length
= count
;
169 req
->op
.rw
.offset
= pos
;
170 mutex_unlock(&evtchnl
->ring_io_lock
);
172 ret
= be_stream_do_io(evtchnl
);
175 ret
= be_stream_wait_io(evtchnl
);
177 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
181 int xen_snd_front_stream_trigger(struct xen_snd_front_evtchnl
*evtchnl
,
184 struct xensnd_req
*req
;
187 mutex_lock(&evtchnl
->u
.req
.req_io_lock
);
189 mutex_lock(&evtchnl
->ring_io_lock
);
190 req
= be_stream_prepare_req(evtchnl
, XENSND_OP_TRIGGER
);
191 req
->op
.trigger
.type
= type
;
192 mutex_unlock(&evtchnl
->ring_io_lock
);
194 ret
= be_stream_do_io(evtchnl
);
197 ret
= be_stream_wait_io(evtchnl
);
199 mutex_unlock(&evtchnl
->u
.req
.req_io_lock
);
203 static void xen_snd_drv_fini(struct xen_snd_front_info
*front_info
)
205 xen_snd_front_alsa_fini(front_info
);
206 xen_snd_front_evtchnl_free_all(front_info
);
209 static int sndback_initwait(struct xen_snd_front_info
*front_info
)
214 ret
= xen_snd_front_cfg_card(front_info
, &num_streams
);
218 /* create event channels for all streams and publish */
219 ret
= xen_snd_front_evtchnl_create_all(front_info
, num_streams
);
223 return xen_snd_front_evtchnl_publish_all(front_info
);
226 static int sndback_connect(struct xen_snd_front_info
*front_info
)
228 return xen_snd_front_alsa_init(front_info
);
231 static void sndback_disconnect(struct xen_snd_front_info
*front_info
)
233 xen_snd_drv_fini(front_info
);
234 xenbus_switch_state(front_info
->xb_dev
, XenbusStateInitialising
);
237 static void sndback_changed(struct xenbus_device
*xb_dev
,
238 enum xenbus_state backend_state
)
240 struct xen_snd_front_info
*front_info
= dev_get_drvdata(&xb_dev
->dev
);
243 dev_dbg(&xb_dev
->dev
, "Backend state is %s, front is %s\n",
244 xenbus_strstate(backend_state
),
245 xenbus_strstate(xb_dev
->state
));
247 switch (backend_state
) {
248 case XenbusStateReconfiguring
:
250 case XenbusStateReconfigured
:
252 case XenbusStateInitialised
:
256 case XenbusStateInitialising
:
257 /* Recovering after backend unexpected closure. */
258 sndback_disconnect(front_info
);
261 case XenbusStateInitWait
:
262 /* Recovering after backend unexpected closure. */
263 sndback_disconnect(front_info
);
265 ret
= sndback_initwait(front_info
);
267 xenbus_dev_fatal(xb_dev
, ret
, "initializing frontend");
269 xenbus_switch_state(xb_dev
, XenbusStateInitialised
);
272 case XenbusStateConnected
:
273 if (xb_dev
->state
!= XenbusStateInitialised
)
276 ret
= sndback_connect(front_info
);
278 xenbus_dev_fatal(xb_dev
, ret
, "initializing frontend");
280 xenbus_switch_state(xb_dev
, XenbusStateConnected
);
283 case XenbusStateClosing
:
285 * In this state backend starts freeing resources,
286 * so let it go into closed state first, so we can also
291 case XenbusStateUnknown
:
293 case XenbusStateClosed
:
294 if (xb_dev
->state
== XenbusStateClosed
)
297 sndback_disconnect(front_info
);
302 static int xen_drv_probe(struct xenbus_device
*xb_dev
,
303 const struct xenbus_device_id
*id
)
305 struct xen_snd_front_info
*front_info
;
307 front_info
= devm_kzalloc(&xb_dev
->dev
,
308 sizeof(*front_info
), GFP_KERNEL
);
312 front_info
->xb_dev
= xb_dev
;
313 dev_set_drvdata(&xb_dev
->dev
, front_info
);
315 return xenbus_switch_state(xb_dev
, XenbusStateInitialising
);
318 static int xen_drv_remove(struct xenbus_device
*dev
)
320 struct xen_snd_front_info
*front_info
= dev_get_drvdata(&dev
->dev
);
323 xenbus_switch_state(dev
, XenbusStateClosing
);
326 * On driver removal it is disconnected from XenBus,
327 * so no backend state change events come via .otherend_changed
328 * callback. This prevents us from exiting gracefully, e.g.
329 * signaling the backend to free event channels, waiting for its
330 * state to change to XenbusStateClosed and cleaning at our end.
331 * Normally when front driver removed backend will finally go into
332 * XenbusStateInitWait state.
334 * Workaround: read backend's state manually and wait with time-out.
336 while ((xenbus_read_unsigned(front_info
->xb_dev
->otherend
, "state",
337 XenbusStateUnknown
) != XenbusStateInitWait
) &&
344 state
= xenbus_read_unsigned(front_info
->xb_dev
->otherend
,
345 "state", XenbusStateUnknown
);
346 pr_err("Backend state is %s while removing driver\n",
347 xenbus_strstate(state
));
350 xen_snd_drv_fini(front_info
);
351 xenbus_frontend_closed(dev
);
355 static const struct xenbus_device_id xen_drv_ids
[] = {
356 { XENSND_DRIVER_NAME
},
360 static struct xenbus_driver xen_driver
= {
362 .probe
= xen_drv_probe
,
363 .remove
= xen_drv_remove
,
364 .otherend_changed
= sndback_changed
,
367 static int __init
xen_drv_init(void)
372 if (!xen_has_pv_devices())
375 /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
376 if (XEN_PAGE_SIZE
!= PAGE_SIZE
) {
377 pr_err(XENSND_DRIVER_NAME
": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
378 XEN_PAGE_SIZE
, PAGE_SIZE
);
382 pr_info("Initialising Xen " XENSND_DRIVER_NAME
" frontend driver\n");
383 return xenbus_register_frontend(&xen_driver
);
386 static void __exit
xen_drv_fini(void)
388 pr_info("Unregistering Xen " XENSND_DRIVER_NAME
" frontend driver\n");
389 xenbus_unregister_driver(&xen_driver
);
392 module_init(xen_drv_init
);
393 module_exit(xen_drv_fini
);
395 MODULE_DESCRIPTION("Xen virtual sound device frontend");
396 MODULE_LICENSE("GPL");
397 MODULE_ALIAS("xen:" XENSND_DRIVER_NAME
);
398 MODULE_SUPPORTED_DEVICE("{{ALSA,Virtual soundcard}}");