2 * Intel SST generic IPC Support
4 * Copyright (C) 2015, Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <linux/spinlock.h>
23 #include <linux/device.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include <linux/sched.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/kthread.h>
30 #include <sound/asound.h>
33 #include "sst-dsp-priv.h"
36 /* IPC message timeout (msecs) */
37 #define IPC_TIMEOUT_MSECS 300
39 #define IPC_EMPTY_LIST_SIZE 8
41 /* locks held by caller */
42 static struct ipc_message
*msg_get_empty(struct sst_generic_ipc
*ipc
)
44 struct ipc_message
*msg
= NULL
;
46 if (!list_empty(&ipc
->empty_list
)) {
47 msg
= list_first_entry(&ipc
->empty_list
, struct ipc_message
,
55 static int tx_wait_done(struct sst_generic_ipc
*ipc
,
56 struct ipc_message
*msg
, void *rx_data
)
61 /* wait for DSP completion (in all cases atm inc pending) */
62 ret
= wait_event_timeout(msg
->waitq
, msg
->complete
,
63 msecs_to_jiffies(IPC_TIMEOUT_MSECS
));
65 spin_lock_irqsave(&ipc
->dsp
->spinlock
, flags
);
67 if (ipc
->ops
.shim_dbg
!= NULL
)
68 ipc
->ops
.shim_dbg(ipc
, "message timeout");
74 /* copy the data returned from DSP */
76 memcpy(rx_data
, msg
->rx_data
, msg
->rx_size
);
80 list_add_tail(&msg
->list
, &ipc
->empty_list
);
81 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
85 static int ipc_tx_message(struct sst_generic_ipc
*ipc
, u64 header
,
86 void *tx_data
, size_t tx_bytes
, void *rx_data
,
87 size_t rx_bytes
, int wait
)
89 struct ipc_message
*msg
;
92 spin_lock_irqsave(&ipc
->dsp
->spinlock
, flags
);
94 msg
= msg_get_empty(ipc
);
96 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
100 msg
->header
= header
;
101 msg
->tx_size
= tx_bytes
;
102 msg
->rx_size
= rx_bytes
;
105 msg
->pending
= false;
106 msg
->complete
= false;
108 if ((tx_bytes
) && (ipc
->ops
.tx_data_copy
!= NULL
))
109 ipc
->ops
.tx_data_copy(msg
, tx_data
, tx_bytes
);
111 list_add_tail(&msg
->list
, &ipc
->tx_list
);
112 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
114 queue_kthread_work(&ipc
->kworker
, &ipc
->kwork
);
117 return tx_wait_done(ipc
, msg
, rx_data
);
122 static int msg_empty_list_init(struct sst_generic_ipc
*ipc
)
126 ipc
->msg
= kzalloc(sizeof(struct ipc_message
) *
127 IPC_EMPTY_LIST_SIZE
, GFP_KERNEL
);
128 if (ipc
->msg
== NULL
)
131 for (i
= 0; i
< IPC_EMPTY_LIST_SIZE
; i
++) {
132 ipc
->msg
[i
].tx_data
= kzalloc(ipc
->tx_data_max_size
, GFP_KERNEL
);
133 if (ipc
->msg
[i
].tx_data
== NULL
)
136 ipc
->msg
[i
].rx_data
= kzalloc(ipc
->rx_data_max_size
, GFP_KERNEL
);
137 if (ipc
->msg
[i
].rx_data
== NULL
) {
138 kfree(ipc
->msg
[i
].tx_data
);
142 init_waitqueue_head(&ipc
->msg
[i
].waitq
);
143 list_add(&ipc
->msg
[i
].list
, &ipc
->empty_list
);
150 kfree(ipc
->msg
[i
-1].tx_data
);
151 kfree(ipc
->msg
[i
-1].rx_data
);
159 static void ipc_tx_msgs(struct kthread_work
*work
)
161 struct sst_generic_ipc
*ipc
=
162 container_of(work
, struct sst_generic_ipc
, kwork
);
163 struct ipc_message
*msg
;
166 spin_lock_irqsave(&ipc
->dsp
->spinlock
, flags
);
168 if (list_empty(&ipc
->tx_list
) || ipc
->pending
) {
169 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
173 /* if the DSP is busy, we will TX messages after IRQ.
174 * also postpone if we are in the middle of procesing completion irq*/
175 if (ipc
->ops
.is_dsp_busy
&& ipc
->ops
.is_dsp_busy(ipc
->dsp
)) {
176 dev_dbg(ipc
->dev
, "ipc_tx_msgs dsp busy\n");
177 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
181 msg
= list_first_entry(&ipc
->tx_list
, struct ipc_message
, list
);
182 list_move(&msg
->list
, &ipc
->rx_list
);
184 if (ipc
->ops
.tx_msg
!= NULL
)
185 ipc
->ops
.tx_msg(ipc
, msg
);
187 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
190 int sst_ipc_tx_message_wait(struct sst_generic_ipc
*ipc
, u64 header
,
191 void *tx_data
, size_t tx_bytes
, void *rx_data
, size_t rx_bytes
)
193 return ipc_tx_message(ipc
, header
, tx_data
, tx_bytes
,
194 rx_data
, rx_bytes
, 1);
196 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait
);
198 int sst_ipc_tx_message_nowait(struct sst_generic_ipc
*ipc
, u64 header
,
199 void *tx_data
, size_t tx_bytes
)
201 return ipc_tx_message(ipc
, header
, tx_data
, tx_bytes
,
204 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait
);
206 struct ipc_message
*sst_ipc_reply_find_msg(struct sst_generic_ipc
*ipc
,
209 struct ipc_message
*msg
;
212 if (ipc
->ops
.reply_msg_match
!= NULL
)
213 header
= ipc
->ops
.reply_msg_match(header
, &mask
);
215 if (list_empty(&ipc
->rx_list
)) {
216 dev_err(ipc
->dev
, "error: rx list empty but received 0x%llx\n",
221 list_for_each_entry(msg
, &ipc
->rx_list
, list
) {
222 if ((msg
->header
& mask
) == header
)
228 EXPORT_SYMBOL_GPL(sst_ipc_reply_find_msg
);
230 /* locks held by caller */
231 void sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc
*ipc
,
232 struct ipc_message
*msg
)
234 msg
->complete
= true;
237 list_add_tail(&msg
->list
, &ipc
->empty_list
);
239 wake_up(&msg
->waitq
);
241 EXPORT_SYMBOL_GPL(sst_ipc_tx_msg_reply_complete
);
243 void sst_ipc_drop_all(struct sst_generic_ipc
*ipc
)
245 struct ipc_message
*msg
, *tmp
;
247 int tx_drop_cnt
= 0, rx_drop_cnt
= 0;
249 /* drop all TX and Rx messages before we stall + reset DSP */
250 spin_lock_irqsave(&ipc
->dsp
->spinlock
, flags
);
252 list_for_each_entry_safe(msg
, tmp
, &ipc
->tx_list
, list
) {
253 list_move(&msg
->list
, &ipc
->empty_list
);
257 list_for_each_entry_safe(msg
, tmp
, &ipc
->rx_list
, list
) {
258 list_move(&msg
->list
, &ipc
->empty_list
);
262 spin_unlock_irqrestore(&ipc
->dsp
->spinlock
, flags
);
264 if (tx_drop_cnt
|| rx_drop_cnt
)
265 dev_err(ipc
->dev
, "dropped IPC msg RX=%d, TX=%d\n",
266 tx_drop_cnt
, rx_drop_cnt
);
268 EXPORT_SYMBOL_GPL(sst_ipc_drop_all
);
270 int sst_ipc_init(struct sst_generic_ipc
*ipc
)
274 INIT_LIST_HEAD(&ipc
->tx_list
);
275 INIT_LIST_HEAD(&ipc
->rx_list
);
276 INIT_LIST_HEAD(&ipc
->empty_list
);
277 init_waitqueue_head(&ipc
->wait_txq
);
279 ret
= msg_empty_list_init(ipc
);
283 /* start the IPC message thread */
284 init_kthread_worker(&ipc
->kworker
);
285 ipc
->tx_thread
= kthread_run(kthread_worker_fn
,
288 if (IS_ERR(ipc
->tx_thread
)) {
289 dev_err(ipc
->dev
, "error: failed to create message TX task\n");
290 ret
= PTR_ERR(ipc
->tx_thread
);
295 init_kthread_work(&ipc
->kwork
, ipc_tx_msgs
);
298 EXPORT_SYMBOL_GPL(sst_ipc_init
);
300 void sst_ipc_fini(struct sst_generic_ipc
*ipc
)
305 kthread_stop(ipc
->tx_thread
);
308 for (i
= 0; i
< IPC_EMPTY_LIST_SIZE
; i
++) {
309 kfree(ipc
->msg
[i
].tx_data
);
310 kfree(ipc
->msg
[i
].rx_data
);
315 EXPORT_SYMBOL_GPL(sst_ipc_fini
);
317 /* Module information */
318 MODULE_AUTHOR("Jin Yao");
319 MODULE_DESCRIPTION("Intel SST IPC generic");
320 MODULE_LICENSE("GPL v2");