1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel SST generic IPC Support
5 * Copyright (C) 2015, Intel Corporation. All rights reserved.
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/wait.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/sched.h>
18 #include <linux/delay.h>
19 #include <linux/platform_device.h>
20 #include <sound/asound.h>
23 #include "sst-dsp-priv.h"
26 /* IPC message timeout (msecs) */
27 #define IPC_TIMEOUT_MSECS 300
29 #define IPC_EMPTY_LIST_SIZE 8
31 /* locks held by caller */
32 static struct ipc_message
*msg_get_empty(struct sst_generic_ipc
*ipc
)
34 struct ipc_message
*msg
= NULL
;
36 if (!list_empty(&ipc
->empty_list
)) {
37 msg
= list_first_entry(&ipc
->empty_list
, struct ipc_message
,
45 static int tx_wait_done(struct sst_generic_ipc
*ipc
,
46 struct ipc_message
*msg
, struct sst_ipc_message
*reply
)
51 /* wait for DSP completion (in all cases atm inc pending) */
52 ret
= wait_event_timeout(msg
->waitq
, msg
->complete
,
53 msecs_to_jiffies(IPC_TIMEOUT_MSECS
));
55 spin_lock_irqsave(&ipc
->dsp
->spinlock
, flags
);
57 if (ipc
->ops
.shim_dbg
!= NULL
)
58 ipc
->ops
.shim_dbg(ipc
, "message timeout");
64 /* copy the data returned from DSP */
66 reply
->header
= msg
->rx
.header
;
68 memcpy(reply
->data
, msg
->rx
.data
, msg
->rx
.size
);
73 list_add_tail(&msg
->list
, &ipc
->empty_list
);
74 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
78 static int ipc_tx_message(struct sst_generic_ipc
*ipc
,
79 struct sst_ipc_message request
,
80 struct sst_ipc_message
*reply
, int wait
)
82 struct ipc_message
*msg
;
85 spin_lock_irqsave(&ipc
->dsp
->spinlock
, flags
);
87 msg
= msg_get_empty(ipc
);
89 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
93 msg
->tx
.header
= request
.header
;
94 msg
->tx
.size
= request
.size
;
96 msg
->rx
.size
= reply
? reply
->size
: 0;
100 msg
->complete
= false;
102 if ((request
.size
) && (ipc
->ops
.tx_data_copy
!= NULL
))
103 ipc
->ops
.tx_data_copy(msg
, request
.data
, request
.size
);
105 list_add_tail(&msg
->list
, &ipc
->tx_list
);
106 schedule_work(&ipc
->kwork
);
107 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
110 return tx_wait_done(ipc
, msg
, reply
);
115 static int msg_empty_list_init(struct sst_generic_ipc
*ipc
)
119 ipc
->msg
= kcalloc(IPC_EMPTY_LIST_SIZE
, sizeof(struct ipc_message
),
121 if (ipc
->msg
== NULL
)
124 for (i
= 0; i
< IPC_EMPTY_LIST_SIZE
; i
++) {
125 ipc
->msg
[i
].tx
.data
= kzalloc(ipc
->tx_data_max_size
, GFP_KERNEL
);
126 if (ipc
->msg
[i
].tx
.data
== NULL
)
129 ipc
->msg
[i
].rx
.data
= kzalloc(ipc
->rx_data_max_size
, GFP_KERNEL
);
130 if (ipc
->msg
[i
].rx
.data
== NULL
) {
131 kfree(ipc
->msg
[i
].tx
.data
);
135 init_waitqueue_head(&ipc
->msg
[i
].waitq
);
136 list_add(&ipc
->msg
[i
].list
, &ipc
->empty_list
);
143 kfree(ipc
->msg
[i
-1].tx
.data
);
144 kfree(ipc
->msg
[i
-1].rx
.data
);
152 static void ipc_tx_msgs(struct work_struct
*work
)
154 struct sst_generic_ipc
*ipc
=
155 container_of(work
, struct sst_generic_ipc
, kwork
);
156 struct ipc_message
*msg
;
158 spin_lock_irq(&ipc
->dsp
->spinlock
);
160 while (!list_empty(&ipc
->tx_list
) && !ipc
->pending
) {
161 /* if the DSP is busy, we will TX messages after IRQ.
162 * also postpone if we are in the middle of processing
165 if (ipc
->ops
.is_dsp_busy
&& ipc
->ops
.is_dsp_busy(ipc
->dsp
)) {
166 dev_dbg(ipc
->dev
, "ipc_tx_msgs dsp busy\n");
170 msg
= list_first_entry(&ipc
->tx_list
, struct ipc_message
, list
);
171 list_move(&msg
->list
, &ipc
->rx_list
);
173 if (ipc
->ops
.tx_msg
!= NULL
)
174 ipc
->ops
.tx_msg(ipc
, msg
);
177 spin_unlock_irq(&ipc
->dsp
->spinlock
);
180 int sst_ipc_tx_message_wait(struct sst_generic_ipc
*ipc
,
181 struct sst_ipc_message request
, struct sst_ipc_message
*reply
)
186 * DSP maybe in lower power active state, so
187 * check if the DSP supports DSP lp On method
188 * if so invoke that before sending IPC
190 if (ipc
->ops
.check_dsp_lp_on
)
191 if (ipc
->ops
.check_dsp_lp_on(ipc
->dsp
, true))
194 ret
= ipc_tx_message(ipc
, request
, reply
, 1);
196 if (ipc
->ops
.check_dsp_lp_on
)
197 if (ipc
->ops
.check_dsp_lp_on(ipc
->dsp
, false))
202 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait
);
204 int sst_ipc_tx_message_nowait(struct sst_generic_ipc
*ipc
,
205 struct sst_ipc_message request
)
207 return ipc_tx_message(ipc
, request
, NULL
, 0);
209 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait
);
211 int sst_ipc_tx_message_nopm(struct sst_generic_ipc
*ipc
,
212 struct sst_ipc_message request
, struct sst_ipc_message
*reply
)
214 return ipc_tx_message(ipc
, request
, reply
, 1);
216 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nopm
);
218 struct ipc_message
*sst_ipc_reply_find_msg(struct sst_generic_ipc
*ipc
,
221 struct ipc_message
*msg
;
224 if (ipc
->ops
.reply_msg_match
!= NULL
)
225 header
= ipc
->ops
.reply_msg_match(header
, &mask
);
229 if (list_empty(&ipc
->rx_list
)) {
230 dev_err(ipc
->dev
, "error: rx list empty but received 0x%llx\n",
235 list_for_each_entry(msg
, &ipc
->rx_list
, list
) {
236 if ((msg
->tx
.header
& mask
) == header
)
242 EXPORT_SYMBOL_GPL(sst_ipc_reply_find_msg
);
244 /* locks held by caller */
245 void sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc
*ipc
,
246 struct ipc_message
*msg
)
248 msg
->complete
= true;
251 list_add_tail(&msg
->list
, &ipc
->empty_list
);
253 wake_up(&msg
->waitq
);
255 EXPORT_SYMBOL_GPL(sst_ipc_tx_msg_reply_complete
);
257 void sst_ipc_drop_all(struct sst_generic_ipc
*ipc
)
259 struct ipc_message
*msg
, *tmp
;
261 int tx_drop_cnt
= 0, rx_drop_cnt
= 0;
263 /* drop all TX and Rx messages before we stall + reset DSP */
264 spin_lock_irqsave(&ipc
->dsp
->spinlock
, flags
);
266 list_for_each_entry_safe(msg
, tmp
, &ipc
->tx_list
, list
) {
267 list_move(&msg
->list
, &ipc
->empty_list
);
271 list_for_each_entry_safe(msg
, tmp
, &ipc
->rx_list
, list
) {
272 list_move(&msg
->list
, &ipc
->empty_list
);
276 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
278 if (tx_drop_cnt
|| rx_drop_cnt
)
279 dev_err(ipc
->dev
, "dropped IPC msg RX=%d, TX=%d\n",
280 tx_drop_cnt
, rx_drop_cnt
);
282 EXPORT_SYMBOL_GPL(sst_ipc_drop_all
);
284 int sst_ipc_init(struct sst_generic_ipc
*ipc
)
288 INIT_LIST_HEAD(&ipc
->tx_list
);
289 INIT_LIST_HEAD(&ipc
->rx_list
);
290 INIT_LIST_HEAD(&ipc
->empty_list
);
291 init_waitqueue_head(&ipc
->wait_txq
);
293 ret
= msg_empty_list_init(ipc
);
297 INIT_WORK(&ipc
->kwork
, ipc_tx_msgs
);
300 EXPORT_SYMBOL_GPL(sst_ipc_init
);
302 void sst_ipc_fini(struct sst_generic_ipc
*ipc
)
306 cancel_work_sync(&ipc
->kwork
);
309 for (i
= 0; i
< IPC_EMPTY_LIST_SIZE
; i
++) {
310 kfree(ipc
->msg
[i
].tx
.data
);
311 kfree(ipc
->msg
[i
].rx
.data
);
316 EXPORT_SYMBOL_GPL(sst_ipc_fini
);
318 /* Module information */
319 MODULE_AUTHOR("Jin Yao");
320 MODULE_DESCRIPTION("Intel SST IPC generic");
321 MODULE_LICENSE("GPL v2");