1 /* OMAP SSI port driver.
3 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
4 * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
6 * Contact: Carlos Chinea <carlos.chinea@nokia.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/platform_device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/pm_runtime.h>
27 #include <linux/of_gpio.h>
28 #include <linux/debugfs.h>
30 #include "omap_ssi_regs.h"
33 static inline int hsi_dummy_msg(struct hsi_msg
*msg __maybe_unused
)
38 static inline int hsi_dummy_cl(struct hsi_client
*cl __maybe_unused
)
43 static inline unsigned int ssi_wakein(struct hsi_port
*port
)
45 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
46 return gpio_get_value(omap_port
->wake_gpio
);
49 #ifdef CONFIG_DEBUG_FS
50 static void ssi_debug_remove_port(struct hsi_port
*port
)
52 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
54 debugfs_remove_recursive(omap_port
->dir
);
57 static int ssi_debug_port_show(struct seq_file
*m
, void *p __maybe_unused
)
59 struct hsi_port
*port
= m
->private;
60 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
61 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
62 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
63 void __iomem
*base
= omap_ssi
->sys
;
66 pm_runtime_get_sync(omap_port
->pdev
);
67 if (omap_port
->wake_irq
> 0)
68 seq_printf(m
, "CAWAKE\t\t: %d\n", ssi_wakein(port
));
69 seq_printf(m
, "WAKE\t\t: 0x%08x\n",
70 readl(base
+ SSI_WAKE_REG(port
->num
)));
71 seq_printf(m
, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
72 readl(base
+ SSI_MPU_ENABLE_REG(port
->num
, 0)));
73 seq_printf(m
, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
74 readl(base
+ SSI_MPU_STATUS_REG(port
->num
, 0)));
76 base
= omap_port
->sst_base
;
77 seq_puts(m
, "\nSST\n===\n");
78 seq_printf(m
, "ID SST\t\t: 0x%08x\n",
79 readl(base
+ SSI_SST_ID_REG
));
80 seq_printf(m
, "MODE\t\t: 0x%08x\n",
81 readl(base
+ SSI_SST_MODE_REG
));
82 seq_printf(m
, "FRAMESIZE\t: 0x%08x\n",
83 readl(base
+ SSI_SST_FRAMESIZE_REG
));
84 seq_printf(m
, "DIVISOR\t\t: 0x%08x\n",
85 readl(base
+ SSI_SST_DIVISOR_REG
));
86 seq_printf(m
, "CHANNELS\t: 0x%08x\n",
87 readl(base
+ SSI_SST_CHANNELS_REG
));
88 seq_printf(m
, "ARBMODE\t\t: 0x%08x\n",
89 readl(base
+ SSI_SST_ARBMODE_REG
));
90 seq_printf(m
, "TXSTATE\t\t: 0x%08x\n",
91 readl(base
+ SSI_SST_TXSTATE_REG
));
92 seq_printf(m
, "BUFSTATE\t: 0x%08x\n",
93 readl(base
+ SSI_SST_BUFSTATE_REG
));
94 seq_printf(m
, "BREAK\t\t: 0x%08x\n",
95 readl(base
+ SSI_SST_BREAK_REG
));
96 for (ch
= 0; ch
< omap_port
->channels
; ch
++) {
97 seq_printf(m
, "BUFFER_CH%d\t: 0x%08x\n", ch
,
98 readl(base
+ SSI_SST_BUFFER_CH_REG(ch
)));
101 base
= omap_port
->ssr_base
;
102 seq_puts(m
, "\nSSR\n===\n");
103 seq_printf(m
, "ID SSR\t\t: 0x%08x\n",
104 readl(base
+ SSI_SSR_ID_REG
));
105 seq_printf(m
, "MODE\t\t: 0x%08x\n",
106 readl(base
+ SSI_SSR_MODE_REG
));
107 seq_printf(m
, "FRAMESIZE\t: 0x%08x\n",
108 readl(base
+ SSI_SSR_FRAMESIZE_REG
));
109 seq_printf(m
, "CHANNELS\t: 0x%08x\n",
110 readl(base
+ SSI_SSR_CHANNELS_REG
));
111 seq_printf(m
, "TIMEOUT\t\t: 0x%08x\n",
112 readl(base
+ SSI_SSR_TIMEOUT_REG
));
113 seq_printf(m
, "RXSTATE\t\t: 0x%08x\n",
114 readl(base
+ SSI_SSR_RXSTATE_REG
));
115 seq_printf(m
, "BUFSTATE\t: 0x%08x\n",
116 readl(base
+ SSI_SSR_BUFSTATE_REG
));
117 seq_printf(m
, "BREAK\t\t: 0x%08x\n",
118 readl(base
+ SSI_SSR_BREAK_REG
));
119 seq_printf(m
, "ERROR\t\t: 0x%08x\n",
120 readl(base
+ SSI_SSR_ERROR_REG
));
121 seq_printf(m
, "ERRORACK\t: 0x%08x\n",
122 readl(base
+ SSI_SSR_ERRORACK_REG
));
123 for (ch
= 0; ch
< omap_port
->channels
; ch
++) {
124 seq_printf(m
, "BUFFER_CH%d\t: 0x%08x\n", ch
,
125 readl(base
+ SSI_SSR_BUFFER_CH_REG(ch
)));
127 pm_runtime_put_sync(omap_port
->pdev
);
132 static int ssi_port_regs_open(struct inode
*inode
, struct file
*file
)
134 return single_open(file
, ssi_debug_port_show
, inode
->i_private
);
137 static const struct file_operations ssi_port_regs_fops
= {
138 .open
= ssi_port_regs_open
,
141 .release
= single_release
,
144 static int ssi_div_get(void *data
, u64
*val
)
146 struct hsi_port
*port
= data
;
147 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
149 pm_runtime_get_sync(omap_port
->pdev
);
150 *val
= readl(omap_port
->sst_base
+ SSI_SST_DIVISOR_REG
);
151 pm_runtime_put_sync(omap_port
->pdev
);
156 static int ssi_div_set(void *data
, u64 val
)
158 struct hsi_port
*port
= data
;
159 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
164 pm_runtime_get_sync(omap_port
->pdev
);
165 writel(val
, omap_port
->sst_base
+ SSI_SST_DIVISOR_REG
);
166 omap_port
->sst
.divisor
= val
;
167 pm_runtime_put_sync(omap_port
->pdev
);
172 DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops
, ssi_div_get
, ssi_div_set
, "%llu\n");
174 static int __init
ssi_debug_add_port(struct omap_ssi_port
*omap_port
,
177 struct hsi_port
*port
= to_hsi_port(omap_port
->dev
);
179 dir
= debugfs_create_dir(dev_name(omap_port
->dev
), dir
);
182 omap_port
->dir
= dir
;
183 debugfs_create_file("regs", S_IRUGO
, dir
, port
, &ssi_port_regs_fops
);
184 dir
= debugfs_create_dir("sst", dir
);
187 debugfs_create_file("divisor", S_IRUGO
| S_IWUSR
, dir
, port
,
194 static int ssi_claim_lch(struct hsi_msg
*msg
)
197 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
198 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
199 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
202 for (lch
= 0; lch
< SSI_MAX_GDD_LCH
; lch
++)
203 if (!omap_ssi
->gdd_trn
[lch
].msg
) {
204 omap_ssi
->gdd_trn
[lch
].msg
= msg
;
205 omap_ssi
->gdd_trn
[lch
].sg
= msg
->sgt
.sgl
;
212 static int ssi_start_dma(struct hsi_msg
*msg
, int lch
)
214 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
215 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
216 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
217 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
218 void __iomem
*gdd
= omap_ssi
->gdd
;
226 if (msg
->ttype
== HSI_MSG_READ
) {
227 err
= dma_map_sg(&ssi
->device
, msg
->sgt
.sgl
, msg
->sgt
.nents
,
230 dev_dbg(&ssi
->device
, "DMA map SG failed !\n");
233 csdp
= SSI_DST_BURST_4x32_BIT
| SSI_DST_MEMORY_PORT
|
234 SSI_SRC_SINGLE_ACCESS0
| SSI_SRC_PERIPHERAL_PORT
|
236 ccr
= msg
->channel
+ 0x10 + (port
->num
* 8); /* Sync */
237 ccr
|= SSI_DST_AMODE_POSTINC
| SSI_SRC_AMODE_CONST
|
239 s_addr
= omap_port
->ssr_dma
+
240 SSI_SSR_BUFFER_CH_REG(msg
->channel
);
241 d_addr
= sg_dma_address(msg
->sgt
.sgl
);
243 err
= dma_map_sg(&ssi
->device
, msg
->sgt
.sgl
, msg
->sgt
.nents
,
246 dev_dbg(&ssi
->device
, "DMA map SG failed !\n");
249 csdp
= SSI_SRC_BURST_4x32_BIT
| SSI_SRC_MEMORY_PORT
|
250 SSI_DST_SINGLE_ACCESS0
| SSI_DST_PERIPHERAL_PORT
|
252 ccr
= (msg
->channel
+ 1 + (port
->num
* 8)) & 0xf; /* Sync */
253 ccr
|= SSI_SRC_AMODE_POSTINC
| SSI_DST_AMODE_CONST
|
255 s_addr
= sg_dma_address(msg
->sgt
.sgl
);
256 d_addr
= omap_port
->sst_dma
+
257 SSI_SST_BUFFER_CH_REG(msg
->channel
);
259 dev_dbg(&ssi
->device
, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
260 lch
, csdp
, ccr
, s_addr
, d_addr
);
262 /* Hold clocks during the transfer */
263 pm_runtime_get_sync(omap_port
->pdev
);
265 writew_relaxed(csdp
, gdd
+ SSI_GDD_CSDP_REG(lch
));
266 writew_relaxed(SSI_BLOCK_IE
| SSI_TOUT_IE
, gdd
+ SSI_GDD_CICR_REG(lch
));
267 writel_relaxed(d_addr
, gdd
+ SSI_GDD_CDSA_REG(lch
));
268 writel_relaxed(s_addr
, gdd
+ SSI_GDD_CSSA_REG(lch
));
269 writew_relaxed(SSI_BYTES_TO_FRAMES(msg
->sgt
.sgl
->length
),
270 gdd
+ SSI_GDD_CEN_REG(lch
));
272 spin_lock_bh(&omap_ssi
->lock
);
273 tmp
= readl(omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
274 tmp
|= SSI_GDD_LCH(lch
);
275 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
276 spin_unlock_bh(&omap_ssi
->lock
);
277 writew(ccr
, gdd
+ SSI_GDD_CCR_REG(lch
));
278 msg
->status
= HSI_STATUS_PROCEEDING
;
283 static int ssi_start_pio(struct hsi_msg
*msg
)
285 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
286 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
287 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
288 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
291 pm_runtime_get_sync(omap_port
->pdev
);
292 if (msg
->ttype
== HSI_MSG_WRITE
) {
293 val
= SSI_DATAACCEPT(msg
->channel
);
294 /* Hold clocks for pio writes */
295 pm_runtime_get_sync(omap_port
->pdev
);
297 val
= SSI_DATAAVAILABLE(msg
->channel
) | SSI_ERROROCCURED
;
299 dev_dbg(&port
->device
, "Single %s transfer\n",
300 msg
->ttype
? "write" : "read");
301 val
|= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
302 writel(val
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
303 pm_runtime_put_sync(omap_port
->pdev
);
305 msg
->status
= HSI_STATUS_PROCEEDING
;
310 static int ssi_start_transfer(struct list_head
*queue
)
315 if (list_empty(queue
))
317 msg
= list_first_entry(queue
, struct hsi_msg
, link
);
318 if (msg
->status
!= HSI_STATUS_QUEUED
)
320 if ((msg
->sgt
.nents
) && (msg
->sgt
.sgl
->length
> sizeof(u32
)))
321 lch
= ssi_claim_lch(msg
);
323 return ssi_start_dma(msg
, lch
);
325 return ssi_start_pio(msg
);
328 static int ssi_async_break(struct hsi_msg
*msg
)
330 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
331 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
332 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
333 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
337 pm_runtime_get_sync(omap_port
->pdev
);
338 if (msg
->ttype
== HSI_MSG_WRITE
) {
339 if (omap_port
->sst
.mode
!= SSI_MODE_FRAME
) {
343 writel(1, omap_port
->sst_base
+ SSI_SST_BREAK_REG
);
344 msg
->status
= HSI_STATUS_COMPLETED
;
347 if (omap_port
->ssr
.mode
!= SSI_MODE_FRAME
) {
351 spin_lock_bh(&omap_port
->lock
);
352 tmp
= readl(omap_ssi
->sys
+
353 SSI_MPU_ENABLE_REG(port
->num
, 0));
354 writel(tmp
| SSI_BREAKDETECTED
,
355 omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
356 msg
->status
= HSI_STATUS_PROCEEDING
;
357 list_add_tail(&msg
->link
, &omap_port
->brkqueue
);
358 spin_unlock_bh(&omap_port
->lock
);
361 pm_runtime_put_sync(omap_port
->pdev
);
366 static int ssi_async(struct hsi_msg
*msg
)
368 struct hsi_port
*port
= hsi_get_port(msg
->cl
);
369 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
370 struct list_head
*queue
;
375 if (msg
->sgt
.nents
> 1)
376 return -ENOSYS
; /* TODO: Add sg support */
378 if (msg
->break_frame
)
379 return ssi_async_break(msg
);
382 BUG_ON(msg
->channel
>= omap_port
->sst
.channels
);
383 queue
= &omap_port
->txqueue
[msg
->channel
];
385 BUG_ON(msg
->channel
>= omap_port
->ssr
.channels
);
386 queue
= &omap_port
->rxqueue
[msg
->channel
];
388 msg
->status
= HSI_STATUS_QUEUED
;
389 spin_lock_bh(&omap_port
->lock
);
390 list_add_tail(&msg
->link
, queue
);
391 err
= ssi_start_transfer(queue
);
393 list_del(&msg
->link
);
394 msg
->status
= HSI_STATUS_ERROR
;
396 spin_unlock_bh(&omap_port
->lock
);
397 dev_dbg(&port
->device
, "msg status %d ttype %d ch %d\n",
398 msg
->status
, msg
->ttype
, msg
->channel
);
403 static u32
ssi_calculate_div(struct hsi_controller
*ssi
)
405 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
406 u32 tx_fckrate
= (u32
) omap_ssi
->fck_rate
;
408 /* / 2 : SSI TX clock is always half of the SSI functional clock */
410 /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
412 dev_dbg(&ssi
->device
, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
413 tx_fckrate
/ omap_ssi
->max_speed
, omap_ssi
->fck_rate
,
414 omap_ssi
->max_speed
);
416 return tx_fckrate
/ omap_ssi
->max_speed
;
419 static void ssi_flush_queue(struct list_head
*queue
, struct hsi_client
*cl
)
421 struct list_head
*node
, *tmp
;
424 list_for_each_safe(node
, tmp
, queue
) {
425 msg
= list_entry(node
, struct hsi_msg
, link
);
426 if ((cl
) && (cl
!= msg
->cl
))
429 pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
430 msg
->channel
, msg
, msg
->sgt
.sgl
->length
,
431 msg
->ttype
, msg
->context
);
433 msg
->destructor(msg
);
439 static int ssi_setup(struct hsi_client
*cl
)
441 struct hsi_port
*port
= to_hsi_port(cl
->device
.parent
);
442 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
443 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
444 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
445 void __iomem
*sst
= omap_port
->sst_base
;
446 void __iomem
*ssr
= omap_port
->ssr_base
;
451 pm_runtime_get_sync(omap_port
->pdev
);
452 spin_lock_bh(&omap_port
->lock
);
453 if (cl
->tx_cfg
.speed
)
454 omap_ssi
->max_speed
= cl
->tx_cfg
.speed
;
455 div
= ssi_calculate_div(ssi
);
456 if (div
> SSI_MAX_DIVISOR
) {
457 dev_err(&cl
->device
, "Invalid TX speed %d Mb/s (div %d)\n",
458 cl
->tx_cfg
.speed
, div
);
462 /* Set TX/RX module to sleep to stop TX/RX during cfg update */
463 writel_relaxed(SSI_MODE_SLEEP
, sst
+ SSI_SST_MODE_REG
);
464 writel_relaxed(SSI_MODE_SLEEP
, ssr
+ SSI_SSR_MODE_REG
);
465 /* Flush posted write */
466 val
= readl(ssr
+ SSI_SSR_MODE_REG
);
468 writel_relaxed(31, sst
+ SSI_SST_FRAMESIZE_REG
);
469 writel_relaxed(div
, sst
+ SSI_SST_DIVISOR_REG
);
470 writel_relaxed(cl
->tx_cfg
.num_hw_channels
, sst
+ SSI_SST_CHANNELS_REG
);
471 writel_relaxed(cl
->tx_cfg
.arb_mode
, sst
+ SSI_SST_ARBMODE_REG
);
472 writel_relaxed(cl
->tx_cfg
.mode
, sst
+ SSI_SST_MODE_REG
);
474 writel_relaxed(31, ssr
+ SSI_SSR_FRAMESIZE_REG
);
475 writel_relaxed(cl
->rx_cfg
.num_hw_channels
, ssr
+ SSI_SSR_CHANNELS_REG
);
476 writel_relaxed(0, ssr
+ SSI_SSR_TIMEOUT_REG
);
477 /* Cleanup the break queue if we leave FRAME mode */
478 if ((omap_port
->ssr
.mode
== SSI_MODE_FRAME
) &&
479 (cl
->rx_cfg
.mode
!= SSI_MODE_FRAME
))
480 ssi_flush_queue(&omap_port
->brkqueue
, cl
);
481 writel_relaxed(cl
->rx_cfg
.mode
, ssr
+ SSI_SSR_MODE_REG
);
482 omap_port
->channels
= max(cl
->rx_cfg
.num_hw_channels
,
483 cl
->tx_cfg
.num_hw_channels
);
484 /* Shadow registering for OFF mode */
486 omap_port
->sst
.divisor
= div
;
487 omap_port
->sst
.frame_size
= 31;
488 omap_port
->sst
.channels
= cl
->tx_cfg
.num_hw_channels
;
489 omap_port
->sst
.arb_mode
= cl
->tx_cfg
.arb_mode
;
490 omap_port
->sst
.mode
= cl
->tx_cfg
.mode
;
492 omap_port
->ssr
.frame_size
= 31;
493 omap_port
->ssr
.timeout
= 0;
494 omap_port
->ssr
.channels
= cl
->rx_cfg
.num_hw_channels
;
495 omap_port
->ssr
.mode
= cl
->rx_cfg
.mode
;
497 spin_unlock_bh(&omap_port
->lock
);
498 pm_runtime_put_sync(omap_port
->pdev
);
503 static int ssi_flush(struct hsi_client
*cl
)
505 struct hsi_port
*port
= hsi_get_port(cl
);
506 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
507 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
508 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
510 void __iomem
*sst
= omap_port
->sst_base
;
511 void __iomem
*ssr
= omap_port
->ssr_base
;
515 pm_runtime_get_sync(omap_port
->pdev
);
516 spin_lock_bh(&omap_port
->lock
);
517 /* Stop all DMA transfers */
518 for (i
= 0; i
< SSI_MAX_GDD_LCH
; i
++) {
519 msg
= omap_ssi
->gdd_trn
[i
].msg
;
520 if (!msg
|| (port
!= hsi_get_port(msg
->cl
)))
522 writew_relaxed(0, omap_ssi
->gdd
+ SSI_GDD_CCR_REG(i
));
523 if (msg
->ttype
== HSI_MSG_READ
)
524 pm_runtime_put_sync(omap_port
->pdev
);
525 omap_ssi
->gdd_trn
[i
].msg
= NULL
;
527 /* Flush all SST buffers */
528 writel_relaxed(0, sst
+ SSI_SST_BUFSTATE_REG
);
529 writel_relaxed(0, sst
+ SSI_SST_TXSTATE_REG
);
530 /* Flush all SSR buffers */
531 writel_relaxed(0, ssr
+ SSI_SSR_RXSTATE_REG
);
532 writel_relaxed(0, ssr
+ SSI_SSR_BUFSTATE_REG
);
533 /* Flush all errors */
534 err
= readl(ssr
+ SSI_SSR_ERROR_REG
);
535 writel_relaxed(err
, ssr
+ SSI_SSR_ERRORACK_REG
);
537 writel_relaxed(0, ssr
+ SSI_SSR_BREAK_REG
);
538 /* Clear interrupts */
539 writel_relaxed(0, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
540 writel_relaxed(0xffffff00,
541 omap_ssi
->sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
542 writel_relaxed(0, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
543 writel(0xff, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_STATUS_REG
);
544 /* Dequeue all pending requests */
545 for (i
= 0; i
< omap_port
->channels
; i
++) {
546 /* Release write clocks */
547 if (!list_empty(&omap_port
->txqueue
[i
]))
548 pm_runtime_put_sync(omap_port
->pdev
);
549 ssi_flush_queue(&omap_port
->txqueue
[i
], NULL
);
550 ssi_flush_queue(&omap_port
->rxqueue
[i
], NULL
);
552 ssi_flush_queue(&omap_port
->brkqueue
, NULL
);
553 spin_unlock_bh(&omap_port
->lock
);
554 pm_runtime_put_sync(omap_port
->pdev
);
559 static int ssi_start_tx(struct hsi_client
*cl
)
561 struct hsi_port
*port
= hsi_get_port(cl
);
562 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
563 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
564 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
566 dev_dbg(&port
->device
, "Wake out high %d\n", omap_port
->wk_refcount
);
568 spin_lock_bh(&omap_port
->wk_lock
);
569 if (omap_port
->wk_refcount
++) {
570 spin_unlock_bh(&omap_port
->wk_lock
);
573 pm_runtime_get_sync(omap_port
->pdev
); /* Grab clocks */
574 writel(SSI_WAKE(0), omap_ssi
->sys
+ SSI_SET_WAKE_REG(port
->num
));
575 spin_unlock_bh(&omap_port
->wk_lock
);
580 static int ssi_stop_tx(struct hsi_client
*cl
)
582 struct hsi_port
*port
= hsi_get_port(cl
);
583 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
584 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
585 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
587 dev_dbg(&port
->device
, "Wake out low %d\n", omap_port
->wk_refcount
);
589 spin_lock_bh(&omap_port
->wk_lock
);
590 BUG_ON(!omap_port
->wk_refcount
);
591 if (--omap_port
->wk_refcount
) {
592 spin_unlock_bh(&omap_port
->wk_lock
);
595 writel(SSI_WAKE(0), omap_ssi
->sys
+ SSI_CLEAR_WAKE_REG(port
->num
));
596 pm_runtime_put_sync(omap_port
->pdev
); /* Release clocks */
597 spin_unlock_bh(&omap_port
->wk_lock
);
602 static void ssi_transfer(struct omap_ssi_port
*omap_port
,
603 struct list_head
*queue
)
608 spin_lock_bh(&omap_port
->lock
);
610 err
= ssi_start_transfer(queue
);
612 msg
= list_first_entry(queue
, struct hsi_msg
, link
);
613 msg
->status
= HSI_STATUS_ERROR
;
615 list_del(&msg
->link
);
616 spin_unlock_bh(&omap_port
->lock
);
618 spin_lock_bh(&omap_port
->lock
);
621 spin_unlock_bh(&omap_port
->lock
);
624 static void ssi_cleanup_queues(struct hsi_client
*cl
)
626 struct hsi_port
*port
= hsi_get_port(cl
);
627 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
628 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
629 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
634 u32 status
= SSI_ERROROCCURED
;
637 ssi_flush_queue(&omap_port
->brkqueue
, cl
);
638 if (list_empty(&omap_port
->brkqueue
))
639 status
|= SSI_BREAKDETECTED
;
641 for (i
= 0; i
< omap_port
->channels
; i
++) {
642 if (list_empty(&omap_port
->txqueue
[i
]))
644 msg
= list_first_entry(&omap_port
->txqueue
[i
], struct hsi_msg
,
646 if ((msg
->cl
== cl
) && (msg
->status
== HSI_STATUS_PROCEEDING
)) {
647 txbufstate
|= (1 << i
);
648 status
|= SSI_DATAACCEPT(i
);
649 /* Release the clocks writes, also GDD ones */
650 pm_runtime_put_sync(omap_port
->pdev
);
652 ssi_flush_queue(&omap_port
->txqueue
[i
], cl
);
654 for (i
= 0; i
< omap_port
->channels
; i
++) {
655 if (list_empty(&omap_port
->rxqueue
[i
]))
657 msg
= list_first_entry(&omap_port
->rxqueue
[i
], struct hsi_msg
,
659 if ((msg
->cl
== cl
) && (msg
->status
== HSI_STATUS_PROCEEDING
)) {
660 rxbufstate
|= (1 << i
);
661 status
|= SSI_DATAAVAILABLE(i
);
663 ssi_flush_queue(&omap_port
->rxqueue
[i
], cl
);
664 /* Check if we keep the error detection interrupt armed */
665 if (!list_empty(&omap_port
->rxqueue
[i
]))
666 status
&= ~SSI_ERROROCCURED
;
668 /* Cleanup write buffers */
669 tmp
= readl(omap_port
->sst_base
+ SSI_SST_BUFSTATE_REG
);
671 writel_relaxed(tmp
, omap_port
->sst_base
+ SSI_SST_BUFSTATE_REG
);
672 /* Cleanup read buffers */
673 tmp
= readl(omap_port
->ssr_base
+ SSI_SSR_BUFSTATE_REG
);
675 writel_relaxed(tmp
, omap_port
->ssr_base
+ SSI_SSR_BUFSTATE_REG
);
676 /* Disarm and ack pending interrupts */
677 tmp
= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
679 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
680 writel_relaxed(status
, omap_ssi
->sys
+
681 SSI_MPU_STATUS_REG(port
->num
, 0));
684 static void ssi_cleanup_gdd(struct hsi_controller
*ssi
, struct hsi_client
*cl
)
686 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
687 struct hsi_port
*port
= hsi_get_port(cl
);
688 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
694 for (i
= 0; i
< SSI_MAX_GDD_LCH
; i
++) {
695 msg
= omap_ssi
->gdd_trn
[i
].msg
;
696 if ((!msg
) || (msg
->cl
!= cl
))
698 writew_relaxed(0, omap_ssi
->gdd
+ SSI_GDD_CCR_REG(i
));
701 * Clock references for write will be handled in
704 if (msg
->ttype
== HSI_MSG_READ
)
705 pm_runtime_put_sync(omap_port
->pdev
);
706 omap_ssi
->gdd_trn
[i
].msg
= NULL
;
708 tmp
= readl_relaxed(omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
710 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
711 writel(val
, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_STATUS_REG
);
714 static int ssi_set_port_mode(struct omap_ssi_port
*omap_port
, u32 mode
)
716 writel(mode
, omap_port
->sst_base
+ SSI_SST_MODE_REG
);
717 writel(mode
, omap_port
->ssr_base
+ SSI_SSR_MODE_REG
);
719 mode
= readl(omap_port
->ssr_base
+ SSI_SSR_MODE_REG
);
724 static int ssi_release(struct hsi_client
*cl
)
726 struct hsi_port
*port
= hsi_get_port(cl
);
727 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
728 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
730 spin_lock_bh(&omap_port
->lock
);
731 pm_runtime_get_sync(omap_port
->pdev
);
732 /* Stop all the pending DMA requests for that client */
733 ssi_cleanup_gdd(ssi
, cl
);
734 /* Now cleanup all the queues */
735 ssi_cleanup_queues(cl
);
736 pm_runtime_put_sync(omap_port
->pdev
);
737 /* If it is the last client of the port, do extra checks and cleanup */
738 if (port
->claimed
<= 1) {
740 * Drop the clock reference for the incoming wake line
741 * if it is still kept high by the other side.
743 if (omap_port
->wkin_cken
) {
744 pm_runtime_put_sync(omap_port
->pdev
);
745 omap_port
->wkin_cken
= 0;
747 pm_runtime_get_sync(omap_port
->pdev
);
748 /* Stop any SSI TX/RX without a client */
749 ssi_set_port_mode(omap_port
, SSI_MODE_SLEEP
);
750 omap_port
->sst
.mode
= SSI_MODE_SLEEP
;
751 omap_port
->ssr
.mode
= SSI_MODE_SLEEP
;
752 pm_runtime_put_sync(omap_port
->pdev
);
753 WARN_ON(omap_port
->wk_refcount
!= 0);
755 spin_unlock_bh(&omap_port
->lock
);
762 static void ssi_error(struct hsi_port
*port
)
764 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
765 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
766 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
774 err
= readl(omap_port
->ssr_base
+ SSI_SSR_ERROR_REG
);
775 dev_err(&port
->device
, "SSI error: 0x%02x\n", err
);
777 dev_dbg(&port
->device
, "spurious SSI error ignored!\n");
780 spin_lock(&omap_ssi
->lock
);
781 /* Cancel all GDD read transfers */
782 for (i
= 0, val
= 0; i
< SSI_MAX_GDD_LCH
; i
++) {
783 msg
= omap_ssi
->gdd_trn
[i
].msg
;
784 if ((msg
) && (msg
->ttype
== HSI_MSG_READ
)) {
785 writew_relaxed(0, omap_ssi
->gdd
+ SSI_GDD_CCR_REG(i
));
787 omap_ssi
->gdd_trn
[i
].msg
= NULL
;
790 tmp
= readl(omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
792 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_GDD_MPU_IRQ_ENABLE_REG
);
793 spin_unlock(&omap_ssi
->lock
);
794 /* Cancel all PIO read transfers */
795 spin_lock(&omap_port
->lock
);
796 tmp
= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
797 tmp
&= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
798 writel_relaxed(tmp
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
800 writel_relaxed(err
, omap_port
->ssr_base
+ SSI_SSR_ERRORACK_REG
);
801 writel_relaxed(SSI_ERROROCCURED
,
802 omap_ssi
->sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
803 /* Signal the error all current pending read requests */
804 for (i
= 0; i
< omap_port
->channels
; i
++) {
805 if (list_empty(&omap_port
->rxqueue
[i
]))
807 msg
= list_first_entry(&omap_port
->rxqueue
[i
], struct hsi_msg
,
809 list_del(&msg
->link
);
810 msg
->status
= HSI_STATUS_ERROR
;
811 spin_unlock(&omap_port
->lock
);
813 /* Now restart queued reads if any */
814 ssi_transfer(omap_port
, &omap_port
->rxqueue
[i
]);
815 spin_lock(&omap_port
->lock
);
817 spin_unlock(&omap_port
->lock
);
820 static void ssi_break_complete(struct hsi_port
*port
)
822 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
823 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
824 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
829 dev_dbg(&port
->device
, "HWBREAK received\n");
831 spin_lock(&omap_port
->lock
);
832 val
= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
833 val
&= ~SSI_BREAKDETECTED
;
834 writel_relaxed(val
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
835 writel_relaxed(0, omap_port
->ssr_base
+ SSI_SSR_BREAK_REG
);
836 writel(SSI_BREAKDETECTED
,
837 omap_ssi
->sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
838 spin_unlock(&omap_port
->lock
);
840 list_for_each_entry_safe(msg
, tmp
, &omap_port
->brkqueue
, link
) {
841 msg
->status
= HSI_STATUS_COMPLETED
;
842 spin_lock(&omap_port
->lock
);
843 list_del(&msg
->link
);
844 spin_unlock(&omap_port
->lock
);
850 static void ssi_pio_complete(struct hsi_port
*port
, struct list_head
*queue
)
852 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
853 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
854 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
860 spin_lock(&omap_port
->lock
);
861 msg
= list_first_entry(queue
, struct hsi_msg
, link
);
862 if ((!msg
->sgt
.nents
) || (!msg
->sgt
.sgl
->length
)) {
864 msg
->status
= HSI_STATUS_PENDING
;
866 if (msg
->ttype
== HSI_MSG_WRITE
)
867 val
= SSI_DATAACCEPT(msg
->channel
);
869 val
= SSI_DATAAVAILABLE(msg
->channel
);
870 if (msg
->status
== HSI_STATUS_PROCEEDING
) {
871 buf
= sg_virt(msg
->sgt
.sgl
) + msg
->actual_len
;
872 if (msg
->ttype
== HSI_MSG_WRITE
)
873 writel(*buf
, omap_port
->sst_base
+
874 SSI_SST_BUFFER_CH_REG(msg
->channel
));
876 *buf
= readl(omap_port
->ssr_base
+
877 SSI_SSR_BUFFER_CH_REG(msg
->channel
));
878 dev_dbg(&port
->device
, "ch %d ttype %d 0x%08x\n", msg
->channel
,
880 msg
->actual_len
+= sizeof(*buf
);
881 if (msg
->actual_len
>= msg
->sgt
.sgl
->length
)
882 msg
->status
= HSI_STATUS_COMPLETED
;
884 * Wait for the last written frame to be really sent before
885 * we call the complete callback
887 if ((msg
->status
== HSI_STATUS_PROCEEDING
) ||
888 ((msg
->status
== HSI_STATUS_COMPLETED
) &&
889 (msg
->ttype
== HSI_MSG_WRITE
))) {
890 writel(val
, omap_ssi
->sys
+
891 SSI_MPU_STATUS_REG(port
->num
, 0));
892 spin_unlock(&omap_port
->lock
);
898 /* Transfer completed at this point */
899 reg
= readl(omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
900 if (msg
->ttype
== HSI_MSG_WRITE
) {
901 /* Release clocks for write transfer */
902 pm_runtime_put_sync(omap_port
->pdev
);
905 writel_relaxed(reg
, omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
906 writel_relaxed(val
, omap_ssi
->sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
907 list_del(&msg
->link
);
908 spin_unlock(&omap_port
->lock
);
910 ssi_transfer(omap_port
, queue
);
913 static void ssi_pio_tasklet(unsigned long ssi_port
)
915 struct hsi_port
*port
= (struct hsi_port
*)ssi_port
;
916 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
917 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
918 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
919 void __iomem
*sys
= omap_ssi
->sys
;
923 pm_runtime_get_sync(omap_port
->pdev
);
924 status_reg
= readl(sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
925 status_reg
&= readl(sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
927 for (ch
= 0; ch
< omap_port
->channels
; ch
++) {
928 if (status_reg
& SSI_DATAACCEPT(ch
))
929 ssi_pio_complete(port
, &omap_port
->txqueue
[ch
]);
930 if (status_reg
& SSI_DATAAVAILABLE(ch
))
931 ssi_pio_complete(port
, &omap_port
->rxqueue
[ch
]);
933 if (status_reg
& SSI_BREAKDETECTED
)
934 ssi_break_complete(port
);
935 if (status_reg
& SSI_ERROROCCURED
)
938 status_reg
= readl(sys
+ SSI_MPU_STATUS_REG(port
->num
, 0));
939 status_reg
&= readl(sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
940 pm_runtime_put_sync(omap_port
->pdev
);
943 tasklet_hi_schedule(&omap_port
->pio_tasklet
);
945 enable_irq(omap_port
->irq
);
948 static irqreturn_t
ssi_pio_isr(int irq
, void *port
)
950 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
952 tasklet_hi_schedule(&omap_port
->pio_tasklet
);
953 disable_irq_nosync(irq
);
958 static void ssi_wake_tasklet(unsigned long ssi_port
)
960 struct hsi_port
*port
= (struct hsi_port
*)ssi_port
;
961 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
962 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
963 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
965 if (ssi_wakein(port
)) {
967 * We can have a quick High-Low-High transition in the line.
968 * In such a case if we have long interrupt latencies,
969 * we can miss the low event or get twice a high event.
970 * This workaround will avoid breaking the clock reference
971 * count when such a situation ocurrs.
973 spin_lock(&omap_port
->lock
);
974 if (!omap_port
->wkin_cken
) {
975 omap_port
->wkin_cken
= 1;
976 pm_runtime_get_sync(omap_port
->pdev
);
978 spin_unlock(&omap_port
->lock
);
979 dev_dbg(&ssi
->device
, "Wake in high\n");
980 if (omap_port
->wktest
) { /* FIXME: HACK ! To be removed */
982 omap_ssi
->sys
+ SSI_SET_WAKE_REG(port
->num
));
984 hsi_event(port
, HSI_EVENT_START_RX
);
986 dev_dbg(&ssi
->device
, "Wake in low\n");
987 if (omap_port
->wktest
) { /* FIXME: HACK ! To be removed */
989 omap_ssi
->sys
+ SSI_CLEAR_WAKE_REG(port
->num
));
991 hsi_event(port
, HSI_EVENT_STOP_RX
);
992 spin_lock(&omap_port
->lock
);
993 if (omap_port
->wkin_cken
) {
994 pm_runtime_put_sync(omap_port
->pdev
);
995 omap_port
->wkin_cken
= 0;
997 spin_unlock(&omap_port
->lock
);
1001 static irqreturn_t
ssi_wake_isr(int irq __maybe_unused
, void *ssi_port
)
1003 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(ssi_port
);
1005 tasklet_hi_schedule(&omap_port
->wake_tasklet
);
1010 static int __init
ssi_port_irq(struct hsi_port
*port
,
1011 struct platform_device
*pd
)
1013 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1016 err
= platform_get_irq(pd
, 0);
1018 dev_err(&port
->device
, "Port IRQ resource missing\n");
1021 omap_port
->irq
= err
;
1022 tasklet_init(&omap_port
->pio_tasklet
, ssi_pio_tasklet
,
1023 (unsigned long)port
);
1024 err
= devm_request_irq(&port
->device
, omap_port
->irq
, ssi_pio_isr
,
1025 0, "mpu_irq0", port
);
1027 dev_err(&port
->device
, "Request IRQ %d failed (%d)\n",
1028 omap_port
->irq
, err
);
1032 static int __init
ssi_wake_irq(struct hsi_port
*port
,
1033 struct platform_device
*pd
)
1035 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1039 if (omap_port
->wake_gpio
== -1) {
1040 omap_port
->wake_irq
= -1;
1044 cawake_irq
= gpio_to_irq(omap_port
->wake_gpio
);
1046 omap_port
->wake_irq
= cawake_irq
;
1047 tasklet_init(&omap_port
->wake_tasklet
, ssi_wake_tasklet
,
1048 (unsigned long)port
);
1049 err
= devm_request_irq(&port
->device
, cawake_irq
, ssi_wake_isr
,
1050 IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING
,
1053 dev_err(&port
->device
, "Request Wake in IRQ %d failed %d\n",
1055 err
= enable_irq_wake(cawake_irq
);
1057 dev_err(&port
->device
, "Enable wake on the wakeline in irq %d failed %d\n",
1063 static void __init
ssi_queues_init(struct omap_ssi_port
*omap_port
)
1067 for (ch
= 0; ch
< SSI_MAX_CHANNELS
; ch
++) {
1068 INIT_LIST_HEAD(&omap_port
->txqueue
[ch
]);
1069 INIT_LIST_HEAD(&omap_port
->rxqueue
[ch
]);
1071 INIT_LIST_HEAD(&omap_port
->brkqueue
);
1074 static int __init
ssi_port_get_iomem(struct platform_device
*pd
,
1075 const char *name
, void __iomem
**pbase
, dma_addr_t
*phy
)
1077 struct hsi_port
*port
= platform_get_drvdata(pd
);
1078 struct resource
*mem
;
1079 struct resource
*ioarea
;
1082 mem
= platform_get_resource_byname(pd
, IORESOURCE_MEM
, name
);
1084 dev_err(&pd
->dev
, "IO memory region missing (%s)\n", name
);
1087 ioarea
= devm_request_mem_region(&port
->device
, mem
->start
,
1088 resource_size(mem
), dev_name(&pd
->dev
));
1090 dev_err(&pd
->dev
, "%s IO memory region request failed\n",
1094 base
= devm_ioremap(&port
->device
, mem
->start
, resource_size(mem
));
1096 dev_err(&pd
->dev
, "%s IO remap failed\n", mem
->name
);
1107 static int __init
ssi_port_probe(struct platform_device
*pd
)
1109 struct device_node
*np
= pd
->dev
.of_node
;
1110 struct hsi_port
*port
;
1111 struct omap_ssi_port
*omap_port
;
1112 struct hsi_controller
*ssi
= dev_get_drvdata(pd
->dev
.parent
);
1113 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1114 int cawake_gpio
= 0;
1118 dev_dbg(&pd
->dev
, "init ssi port...\n");
1120 if (!try_module_get(ssi
->owner
)) {
1121 dev_err(&pd
->dev
, "could not increment parent module refcount\n");
1125 if (!ssi
->port
|| !omap_ssi
->port
) {
1126 dev_err(&pd
->dev
, "ssi controller not initialized!\n");
1131 /* get id of first uninitialized port in controller */
1132 for (port_id
= 0; port_id
< ssi
->num_ports
&& omap_ssi
->port
[port_id
];
1136 if (port_id
>= ssi
->num_ports
) {
1137 dev_err(&pd
->dev
, "port id out of range!\n");
1142 port
= ssi
->port
[port_id
];
1145 dev_err(&pd
->dev
, "missing device tree data\n");
1150 cawake_gpio
= of_get_named_gpio(np
, "ti,ssi-cawake-gpio", 0);
1151 if (cawake_gpio
< 0) {
1152 dev_err(&pd
->dev
, "DT data is missing cawake gpio (err=%d)\n",
1158 err
= devm_gpio_request_one(&port
->device
, cawake_gpio
, GPIOF_DIR_IN
,
1161 dev_err(&pd
->dev
, "could not request cawake gpio (err=%d)!\n",
1167 omap_port
= devm_kzalloc(&port
->device
, sizeof(*omap_port
), GFP_KERNEL
);
1172 omap_port
->wake_gpio
= cawake_gpio
;
1173 omap_port
->pdev
= &pd
->dev
;
1174 omap_port
->port_id
= port_id
;
1176 /* initialize HSI port */
1177 port
->async
= ssi_async
;
1178 port
->setup
= ssi_setup
;
1179 port
->flush
= ssi_flush
;
1180 port
->start_tx
= ssi_start_tx
;
1181 port
->stop_tx
= ssi_stop_tx
;
1182 port
->release
= ssi_release
;
1183 hsi_port_set_drvdata(port
, omap_port
);
1184 omap_ssi
->port
[port_id
] = omap_port
;
1186 platform_set_drvdata(pd
, port
);
1188 err
= ssi_port_get_iomem(pd
, "tx", &omap_port
->sst_base
,
1189 &omap_port
->sst_dma
);
1192 err
= ssi_port_get_iomem(pd
, "rx", &omap_port
->ssr_base
,
1193 &omap_port
->ssr_dma
);
1197 err
= ssi_port_irq(port
, pd
);
1200 err
= ssi_wake_irq(port
, pd
);
1204 ssi_queues_init(omap_port
);
1205 spin_lock_init(&omap_port
->lock
);
1206 spin_lock_init(&omap_port
->wk_lock
);
1207 omap_port
->dev
= &port
->device
;
1209 pm_runtime_irq_safe(omap_port
->pdev
);
1210 pm_runtime_enable(omap_port
->pdev
);
1212 #ifdef CONFIG_DEBUG_FS
1213 err
= ssi_debug_add_port(omap_port
, omap_ssi
->dir
);
1215 pm_runtime_disable(omap_port
->pdev
);
1220 hsi_add_clients_from_dt(port
, np
);
1222 dev_info(&pd
->dev
, "ssi port %u successfully initialized (cawake=%d)\n",
1223 port_id
, cawake_gpio
);
1231 static int __exit
ssi_port_remove(struct platform_device
*pd
)
1233 struct hsi_port
*port
= platform_get_drvdata(pd
);
1234 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1235 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1236 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1238 #ifdef CONFIG_DEBUG_FS
1239 ssi_debug_remove_port(port
);
1242 hsi_port_unregister_clients(port
);
1244 tasklet_kill(&omap_port
->wake_tasklet
);
1245 tasklet_kill(&omap_port
->pio_tasklet
);
1247 port
->async
= hsi_dummy_msg
;
1248 port
->setup
= hsi_dummy_cl
;
1249 port
->flush
= hsi_dummy_cl
;
1250 port
->start_tx
= hsi_dummy_cl
;
1251 port
->stop_tx
= hsi_dummy_cl
;
1252 port
->release
= hsi_dummy_cl
;
1254 omap_ssi
->port
[omap_port
->port_id
] = NULL
;
1255 platform_set_drvdata(pd
, NULL
);
1256 module_put(ssi
->owner
);
1257 pm_runtime_disable(&pd
->dev
);
1263 static int ssi_save_port_ctx(struct omap_ssi_port
*omap_port
)
1265 struct hsi_port
*port
= to_hsi_port(omap_port
->dev
);
1266 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1267 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1269 omap_port
->sys_mpu_enable
= readl(omap_ssi
->sys
+
1270 SSI_MPU_ENABLE_REG(port
->num
, 0));
1275 static int ssi_restore_port_ctx(struct omap_ssi_port
*omap_port
)
1277 struct hsi_port
*port
= to_hsi_port(omap_port
->dev
);
1278 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1279 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1282 writel_relaxed(omap_port
->sys_mpu_enable
,
1283 omap_ssi
->sys
+ SSI_MPU_ENABLE_REG(port
->num
, 0));
1286 base
= omap_port
->sst_base
;
1287 writel_relaxed(omap_port
->sst
.frame_size
, base
+ SSI_SST_FRAMESIZE_REG
);
1288 writel_relaxed(omap_port
->sst
.channels
, base
+ SSI_SST_CHANNELS_REG
);
1289 writel_relaxed(omap_port
->sst
.arb_mode
, base
+ SSI_SST_ARBMODE_REG
);
1292 base
= omap_port
->ssr_base
;
1293 writel_relaxed(omap_port
->ssr
.frame_size
, base
+ SSI_SSR_FRAMESIZE_REG
);
1294 writel_relaxed(omap_port
->ssr
.channels
, base
+ SSI_SSR_CHANNELS_REG
);
1295 writel_relaxed(omap_port
->ssr
.timeout
, base
+ SSI_SSR_TIMEOUT_REG
);
1300 static int ssi_restore_port_mode(struct omap_ssi_port
*omap_port
)
1304 writel_relaxed(omap_port
->sst
.mode
,
1305 omap_port
->sst_base
+ SSI_SST_MODE_REG
);
1306 writel_relaxed(omap_port
->ssr
.mode
,
1307 omap_port
->ssr_base
+ SSI_SSR_MODE_REG
);
1309 mode
= readl(omap_port
->ssr_base
+ SSI_SSR_MODE_REG
);
1314 static int ssi_restore_divisor(struct omap_ssi_port
*omap_port
)
1316 writel_relaxed(omap_port
->sst
.divisor
,
1317 omap_port
->sst_base
+ SSI_SST_DIVISOR_REG
);
1322 static int omap_ssi_port_runtime_suspend(struct device
*dev
)
1324 struct hsi_port
*port
= dev_get_drvdata(dev
);
1325 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1326 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1327 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1329 dev_dbg(dev
, "port runtime suspend!\n");
1331 ssi_set_port_mode(omap_port
, SSI_MODE_SLEEP
);
1332 if (omap_ssi
->get_loss
)
1333 omap_port
->loss_count
=
1334 omap_ssi
->get_loss(ssi
->device
.parent
);
1335 ssi_save_port_ctx(omap_port
);
1340 static int omap_ssi_port_runtime_resume(struct device
*dev
)
1342 struct hsi_port
*port
= dev_get_drvdata(dev
);
1343 struct omap_ssi_port
*omap_port
= hsi_port_drvdata(port
);
1344 struct hsi_controller
*ssi
= to_hsi_controller(port
->device
.parent
);
1345 struct omap_ssi_controller
*omap_ssi
= hsi_controller_drvdata(ssi
);
1347 dev_dbg(dev
, "port runtime resume!\n");
1349 if ((omap_ssi
->get_loss
) && (omap_port
->loss_count
==
1350 omap_ssi
->get_loss(ssi
->device
.parent
)))
1351 goto mode
; /* We always need to restore the mode & TX divisor */
1353 ssi_restore_port_ctx(omap_port
);
1356 ssi_restore_divisor(omap_port
);
1357 ssi_restore_port_mode(omap_port
);
1362 static const struct dev_pm_ops omap_ssi_port_pm_ops
= {
1363 SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend
,
1364 omap_ssi_port_runtime_resume
, NULL
)
1367 #define DEV_PM_OPS (&omap_ssi_port_pm_ops)
1369 #define DEV_PM_OPS NULL
1374 static const struct of_device_id omap_ssi_port_of_match
[] = {
1375 { .compatible
= "ti,omap3-ssi-port", },
1378 MODULE_DEVICE_TABLE(of
, omap_ssi_port_of_match
);
1380 #define omap_ssi_port_of_match NULL
1383 static struct platform_driver ssi_port_pdriver
= {
1384 .remove
= __exit_p(ssi_port_remove
),
1386 .name
= "omap_ssi_port",
1387 .of_match_table
= omap_ssi_port_of_match
,
1392 module_platform_driver_probe(ssi_port_pdriver
, ssi_port_probe
);
1394 MODULE_ALIAS("platform:omap_ssi_port");
1395 MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
1396 MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
1397 MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver");
1398 MODULE_LICENSE("GPL v2");