1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2018 Intel Corporation
8 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
11 #ifndef __SOUND_SOC_SOF_IO_H
12 #define __SOUND_SOC_SOF_IO_H
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <sound/pcm.h>
21 #define sof_ops(sdev) \
22 ((sdev)->pdata->desc->ops)
24 static inline int sof_ops_init(struct snd_sof_dev
*sdev
)
26 if (sdev
->pdata
->desc
->ops_init
)
27 return sdev
->pdata
->desc
->ops_init(sdev
);
32 static inline void sof_ops_free(struct snd_sof_dev
*sdev
)
34 if (sdev
->pdata
->desc
->ops_free
)
35 sdev
->pdata
->desc
->ops_free(sdev
);
38 /* Mandatory operations are verified during probing */
41 static inline int snd_sof_probe_early(struct snd_sof_dev
*sdev
)
43 if (sof_ops(sdev
)->probe_early
)
44 return sof_ops(sdev
)->probe_early(sdev
);
49 static inline int snd_sof_probe(struct snd_sof_dev
*sdev
)
51 return sof_ops(sdev
)->probe(sdev
);
54 static inline void snd_sof_remove(struct snd_sof_dev
*sdev
)
56 if (sof_ops(sdev
)->remove
)
57 sof_ops(sdev
)->remove(sdev
);
60 static inline void snd_sof_remove_late(struct snd_sof_dev
*sdev
)
62 if (sof_ops(sdev
)->remove_late
)
63 sof_ops(sdev
)->remove_late(sdev
);
66 static inline int snd_sof_shutdown(struct snd_sof_dev
*sdev
)
68 if (sof_ops(sdev
)->shutdown
)
69 return sof_ops(sdev
)->shutdown(sdev
);
77 * snd_sof_dsp_run returns the core mask of the cores that are available
78 * after successful fw boot
80 static inline int snd_sof_dsp_run(struct snd_sof_dev
*sdev
)
82 return sof_ops(sdev
)->run(sdev
);
85 static inline int snd_sof_dsp_stall(struct snd_sof_dev
*sdev
, unsigned int core_mask
)
87 if (sof_ops(sdev
)->stall
)
88 return sof_ops(sdev
)->stall(sdev
, core_mask
);
93 static inline int snd_sof_dsp_reset(struct snd_sof_dev
*sdev
)
95 if (sof_ops(sdev
)->reset
)
96 return sof_ops(sdev
)->reset(sdev
);
101 /* dsp core get/put */
102 static inline int snd_sof_dsp_core_get(struct snd_sof_dev
*sdev
, int core
)
104 if (core
> sdev
->num_cores
- 1) {
105 dev_err(sdev
->dev
, "invalid core id: %d for num_cores: %d\n", core
,
110 if (sof_ops(sdev
)->core_get
) {
113 /* if current ref_count is > 0, increment it and return */
114 if (sdev
->dsp_core_ref_count
[core
] > 0) {
115 sdev
->dsp_core_ref_count
[core
]++;
119 /* power up the core */
120 ret
= sof_ops(sdev
)->core_get(sdev
, core
);
124 /* increment ref_count */
125 sdev
->dsp_core_ref_count
[core
]++;
127 /* and update enabled_cores_mask */
128 sdev
->enabled_cores_mask
|= BIT(core
);
130 dev_dbg(sdev
->dev
, "Core %d powered up\n", core
);
136 static inline int snd_sof_dsp_core_put(struct snd_sof_dev
*sdev
, int core
)
138 if (core
> sdev
->num_cores
- 1) {
139 dev_err(sdev
->dev
, "invalid core id: %d for num_cores: %d\n", core
,
144 if (sof_ops(sdev
)->core_put
) {
147 /* decrement ref_count and return if it is > 0 */
148 if (--(sdev
->dsp_core_ref_count
[core
]) > 0)
151 /* power down the core */
152 ret
= sof_ops(sdev
)->core_put(sdev
, core
);
156 /* and update enabled_cores_mask */
157 sdev
->enabled_cores_mask
&= ~BIT(core
);
159 dev_dbg(sdev
->dev
, "Core %d powered down\n", core
);
165 /* pre/post fw load */
166 static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev
*sdev
)
168 if (sof_ops(sdev
)->pre_fw_run
)
169 return sof_ops(sdev
)->pre_fw_run(sdev
);
174 static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev
*sdev
)
176 if (sof_ops(sdev
)->post_fw_run
)
177 return sof_ops(sdev
)->post_fw_run(sdev
);
182 /* parse platform specific extended manifest */
183 static inline int snd_sof_dsp_parse_platform_ext_manifest(struct snd_sof_dev
*sdev
,
184 const struct sof_ext_man_elem_header
*hdr
)
186 if (sof_ops(sdev
)->parse_platform_ext_manifest
)
187 return sof_ops(sdev
)->parse_platform_ext_manifest(sdev
, hdr
);
195 * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index
198 * @type: section type as described by snd_sof_fw_blk_type
200 * Returns the corresponding BAR index (a positive integer) or -EINVAL
201 * in case there is no mapping
203 static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev
*sdev
, u32 type
)
205 if (sof_ops(sdev
)->get_bar_index
)
206 return sof_ops(sdev
)->get_bar_index(sdev
, type
);
208 return sdev
->mmio_bar
;
211 static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev
*sdev
)
213 if (sof_ops(sdev
)->get_mailbox_offset
)
214 return sof_ops(sdev
)->get_mailbox_offset(sdev
);
216 dev_err(sdev
->dev
, "error: %s not defined\n", __func__
);
220 static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev
*sdev
,
223 if (sof_ops(sdev
)->get_window_offset
)
224 return sof_ops(sdev
)->get_window_offset(sdev
, id
);
226 dev_err(sdev
->dev
, "error: %s not defined\n", __func__
);
229 /* power management */
230 static inline int snd_sof_dsp_resume(struct snd_sof_dev
*sdev
)
232 if (sof_ops(sdev
)->resume
)
233 return sof_ops(sdev
)->resume(sdev
);
238 static inline int snd_sof_dsp_suspend(struct snd_sof_dev
*sdev
,
241 if (sof_ops(sdev
)->suspend
)
242 return sof_ops(sdev
)->suspend(sdev
, target_state
);
247 static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev
*sdev
)
249 if (sof_ops(sdev
)->runtime_resume
)
250 return sof_ops(sdev
)->runtime_resume(sdev
);
255 static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev
*sdev
)
257 if (sof_ops(sdev
)->runtime_suspend
)
258 return sof_ops(sdev
)->runtime_suspend(sdev
);
263 static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev
*sdev
)
265 if (sof_ops(sdev
)->runtime_idle
)
266 return sof_ops(sdev
)->runtime_idle(sdev
);
271 static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev
*sdev
)
273 if (sof_ops(sdev
)->set_hw_params_upon_resume
)
274 return sof_ops(sdev
)->set_hw_params_upon_resume(sdev
);
278 static inline int snd_sof_dsp_set_clk(struct snd_sof_dev
*sdev
, u32 freq
)
280 if (sof_ops(sdev
)->set_clk
)
281 return sof_ops(sdev
)->set_clk(sdev
, freq
);
287 snd_sof_dsp_set_power_state(struct snd_sof_dev
*sdev
,
288 const struct sof_dsp_power_state
*target_state
)
292 mutex_lock(&sdev
->power_state_access
);
294 if (sof_ops(sdev
)->set_power_state
)
295 ret
= sof_ops(sdev
)->set_power_state(sdev
, target_state
);
297 mutex_unlock(&sdev
->power_state_access
);
303 void snd_sof_dsp_dbg_dump(struct snd_sof_dev
*sdev
, const char *msg
, u32 flags
);
305 static inline int snd_sof_debugfs_add_region_item(struct snd_sof_dev
*sdev
,
306 enum snd_sof_fw_blk_type blk_type
, u32 offset
, size_t size
,
307 const char *name
, enum sof_debugfs_access_type access_type
)
309 if (sof_ops(sdev
) && sof_ops(sdev
)->debugfs_add_region_item
)
310 return sof_ops(sdev
)->debugfs_add_region_item(sdev
, blk_type
, offset
,
311 size
, name
, access_type
);
317 static inline void snd_sof_dsp_write8(struct snd_sof_dev
*sdev
, u32 bar
,
318 u32 offset
, u8 value
)
320 if (sof_ops(sdev
)->write8
)
321 sof_ops(sdev
)->write8(sdev
, sdev
->bar
[bar
] + offset
, value
);
323 writeb(value
, sdev
->bar
[bar
] + offset
);
326 static inline void snd_sof_dsp_write(struct snd_sof_dev
*sdev
, u32 bar
,
327 u32 offset
, u32 value
)
329 if (sof_ops(sdev
)->write
)
330 sof_ops(sdev
)->write(sdev
, sdev
->bar
[bar
] + offset
, value
);
332 writel(value
, sdev
->bar
[bar
] + offset
);
335 static inline void snd_sof_dsp_write64(struct snd_sof_dev
*sdev
, u32 bar
,
336 u32 offset
, u64 value
)
338 if (sof_ops(sdev
)->write64
)
339 sof_ops(sdev
)->write64(sdev
, sdev
->bar
[bar
] + offset
, value
);
341 writeq(value
, sdev
->bar
[bar
] + offset
);
344 static inline u8
snd_sof_dsp_read8(struct snd_sof_dev
*sdev
, u32 bar
,
347 if (sof_ops(sdev
)->read8
)
348 return sof_ops(sdev
)->read8(sdev
, sdev
->bar
[bar
] + offset
);
350 return readb(sdev
->bar
[bar
] + offset
);
353 static inline u32
snd_sof_dsp_read(struct snd_sof_dev
*sdev
, u32 bar
,
356 if (sof_ops(sdev
)->read
)
357 return sof_ops(sdev
)->read(sdev
, sdev
->bar
[bar
] + offset
);
359 return readl(sdev
->bar
[bar
] + offset
);
362 static inline u64
snd_sof_dsp_read64(struct snd_sof_dev
*sdev
, u32 bar
,
365 if (sof_ops(sdev
)->read64
)
366 return sof_ops(sdev
)->read64(sdev
, sdev
->bar
[bar
] + offset
);
368 return readq(sdev
->bar
[bar
] + offset
);
371 static inline void snd_sof_dsp_update8(struct snd_sof_dev
*sdev
, u32 bar
,
372 u32 offset
, u8 mask
, u8 value
)
376 reg
= snd_sof_dsp_read8(sdev
, bar
, offset
);
379 snd_sof_dsp_write8(sdev
, bar
, offset
, reg
);
383 static inline int snd_sof_dsp_block_read(struct snd_sof_dev
*sdev
,
384 enum snd_sof_fw_blk_type blk_type
,
385 u32 offset
, void *dest
, size_t bytes
)
387 return sof_ops(sdev
)->block_read(sdev
, blk_type
, offset
, dest
, bytes
);
390 static inline int snd_sof_dsp_block_write(struct snd_sof_dev
*sdev
,
391 enum snd_sof_fw_blk_type blk_type
,
392 u32 offset
, void *src
, size_t bytes
)
394 return sof_ops(sdev
)->block_write(sdev
, blk_type
, offset
, src
, bytes
);
398 static inline void snd_sof_dsp_mailbox_read(struct snd_sof_dev
*sdev
,
399 u32 offset
, void *dest
, size_t bytes
)
401 if (sof_ops(sdev
)->mailbox_read
)
402 sof_ops(sdev
)->mailbox_read(sdev
, offset
, dest
, bytes
);
405 static inline void snd_sof_dsp_mailbox_write(struct snd_sof_dev
*sdev
,
406 u32 offset
, void *src
, size_t bytes
)
408 if (sof_ops(sdev
)->mailbox_write
)
409 sof_ops(sdev
)->mailbox_write(sdev
, offset
, src
, bytes
);
413 static inline int snd_sof_dsp_send_msg(struct snd_sof_dev
*sdev
,
414 struct snd_sof_ipc_msg
*msg
)
416 return sof_ops(sdev
)->send_msg(sdev
, msg
);
421 snd_sof_pcm_platform_open(struct snd_sof_dev
*sdev
,
422 struct snd_pcm_substream
*substream
)
424 if (sof_ops(sdev
) && sof_ops(sdev
)->pcm_open
)
425 return sof_ops(sdev
)->pcm_open(sdev
, substream
);
430 /* disconnect pcm substream to a host stream */
432 snd_sof_pcm_platform_close(struct snd_sof_dev
*sdev
,
433 struct snd_pcm_substream
*substream
)
435 if (sof_ops(sdev
) && sof_ops(sdev
)->pcm_close
)
436 return sof_ops(sdev
)->pcm_close(sdev
, substream
);
441 /* host stream hw params */
443 snd_sof_pcm_platform_hw_params(struct snd_sof_dev
*sdev
,
444 struct snd_pcm_substream
*substream
,
445 struct snd_pcm_hw_params
*params
,
446 struct snd_sof_platform_stream_params
*platform_params
)
448 if (sof_ops(sdev
) && sof_ops(sdev
)->pcm_hw_params
)
449 return sof_ops(sdev
)->pcm_hw_params(sdev
, substream
, params
,
455 /* host stream hw free */
457 snd_sof_pcm_platform_hw_free(struct snd_sof_dev
*sdev
,
458 struct snd_pcm_substream
*substream
)
460 if (sof_ops(sdev
) && sof_ops(sdev
)->pcm_hw_free
)
461 return sof_ops(sdev
)->pcm_hw_free(sdev
, substream
);
466 /* host stream trigger */
468 snd_sof_pcm_platform_trigger(struct snd_sof_dev
*sdev
,
469 struct snd_pcm_substream
*substream
, int cmd
)
471 if (sof_ops(sdev
) && sof_ops(sdev
)->pcm_trigger
)
472 return sof_ops(sdev
)->pcm_trigger(sdev
, substream
, cmd
);
477 /* Firmware loading */
478 static inline int snd_sof_load_firmware(struct snd_sof_dev
*sdev
)
480 dev_dbg(sdev
->dev
, "loading firmware\n");
482 return sof_ops(sdev
)->load_firmware(sdev
);
485 /* host DSP message data */
486 static inline int snd_sof_ipc_msg_data(struct snd_sof_dev
*sdev
,
487 struct snd_sof_pcm_stream
*sps
,
490 return sof_ops(sdev
)->ipc_msg_data(sdev
, sps
, p
, sz
);
492 /* host side configuration of the stream's data offset in stream mailbox area */
494 snd_sof_set_stream_data_offset(struct snd_sof_dev
*sdev
,
495 struct snd_sof_pcm_stream
*sps
,
498 if (sof_ops(sdev
) && sof_ops(sdev
)->set_stream_data_offset
)
499 return sof_ops(sdev
)->set_stream_data_offset(sdev
, sps
,
505 /* host stream pointer */
506 static inline snd_pcm_uframes_t
507 snd_sof_pcm_platform_pointer(struct snd_sof_dev
*sdev
,
508 struct snd_pcm_substream
*substream
)
510 if (sof_ops(sdev
) && sof_ops(sdev
)->pcm_pointer
)
511 return sof_ops(sdev
)->pcm_pointer(sdev
, substream
);
517 static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev
*sdev
,
518 struct snd_pcm_substream
*substream
)
520 if (sof_ops(sdev
) && sof_ops(sdev
)->pcm_ack
)
521 return sof_ops(sdev
)->pcm_ack(sdev
, substream
);
527 snd_sof_pcm_get_dai_frame_counter(struct snd_sof_dev
*sdev
,
528 struct snd_soc_component
*component
,
529 struct snd_pcm_substream
*substream
)
531 if (sof_ops(sdev
) && sof_ops(sdev
)->get_dai_frame_counter
)
532 return sof_ops(sdev
)->get_dai_frame_counter(sdev
, component
,
539 snd_sof_pcm_get_host_byte_counter(struct snd_sof_dev
*sdev
,
540 struct snd_soc_component
*component
,
541 struct snd_pcm_substream
*substream
)
543 if (sof_ops(sdev
) && sof_ops(sdev
)->get_host_byte_counter
)
544 return sof_ops(sdev
)->get_host_byte_counter(sdev
, component
,
552 snd_sof_machine_register(struct snd_sof_dev
*sdev
, void *pdata
)
554 if (sof_ops(sdev
) && sof_ops(sdev
)->machine_register
)
555 return sof_ops(sdev
)->machine_register(sdev
, pdata
);
561 snd_sof_machine_unregister(struct snd_sof_dev
*sdev
, void *pdata
)
563 if (sof_ops(sdev
) && sof_ops(sdev
)->machine_unregister
)
564 sof_ops(sdev
)->machine_unregister(sdev
, pdata
);
567 static inline struct snd_soc_acpi_mach
*
568 snd_sof_machine_select(struct snd_sof_dev
*sdev
)
570 if (sof_ops(sdev
) && sof_ops(sdev
)->machine_select
)
571 return sof_ops(sdev
)->machine_select(sdev
);
577 snd_sof_set_mach_params(struct snd_soc_acpi_mach
*mach
,
578 struct snd_sof_dev
*sdev
)
580 if (sof_ops(sdev
) && sof_ops(sdev
)->set_mach_params
)
581 sof_ops(sdev
)->set_mach_params(mach
, sdev
);
585 snd_sof_is_chain_dma_supported(struct snd_sof_dev
*sdev
, u32 dai_type
)
587 if (sof_ops(sdev
) && sof_ops(sdev
)->is_chain_dma_supported
)
588 return sof_ops(sdev
)->is_chain_dma_supported(sdev
, dai_type
);
594 * snd_sof_dsp_register_poll_timeout - Periodically poll an address
595 * until a condition is met or a timeout occurs
596 * @op: accessor function (takes @addr as its only argument)
597 * @addr: Address to poll
598 * @val: Variable to read the value into
599 * @cond: Break condition (usually involving @val)
600 * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
601 * read usleep_range() function description for details and
603 * @timeout_us: Timeout in us, 0 means never timeout
605 * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
606 * case, the last read value at @addr is stored in @val. Must not
607 * be called from atomic context if sleep_us or timeout_us are used.
609 * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
611 #define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \
613 u64 __timeout_us = (timeout_us); \
614 unsigned long __sleep_us = (sleep_us); \
615 ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
616 might_sleep_if((__sleep_us) != 0); \
618 (val) = snd_sof_dsp_read(sdev, bar, offset); \
621 "FW Poll Status: reg[%#x]=%#x successful\n", \
625 if (__timeout_us && \
626 ktime_compare(ktime_get(), __timeout) > 0) { \
627 (val) = snd_sof_dsp_read(sdev, bar, offset); \
629 "FW Poll Status: reg[%#x]=%#x timedout\n", \
634 usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
636 (cond) ? 0 : -ETIMEDOUT; \
639 /* This is for registers bits with attribute RWC */
640 bool snd_sof_pci_update_bits(struct snd_sof_dev
*sdev
, u32 offset
,
641 u32 mask
, u32 value
);
643 bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev
*sdev
, u32 bar
,
644 u32 offset
, u32 mask
, u32 value
);
646 bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev
*sdev
, u32 bar
,
647 u32 offset
, u64 mask
, u64 value
);
649 bool snd_sof_dsp_update_bits(struct snd_sof_dev
*sdev
, u32 bar
, u32 offset
,
650 u32 mask
, u32 value
);
652 bool snd_sof_dsp_update_bits64(struct snd_sof_dev
*sdev
, u32 bar
,
653 u32 offset
, u64 mask
, u64 value
);
655 void snd_sof_dsp_update_bits_forced(struct snd_sof_dev
*sdev
, u32 bar
,
656 u32 offset
, u32 mask
, u32 value
);
658 int snd_sof_dsp_register_poll(struct snd_sof_dev
*sdev
, u32 bar
, u32 offset
,
659 u32 mask
, u32 target
, u32 timeout_ms
,
662 void snd_sof_dsp_panic(struct snd_sof_dev
*sdev
, u32 offset
, bool non_recoverable
);