ARM: cpu topology: Add debugfs interface for cpu_power
[cmplus.git] / drivers / omap_hsi / hsi_driver_if.c
blob79e05cc32be5dffe9c2847268bf83ae176c357d1
1 /*
2 * hsi_driver_if.c
4 * Implements HSI hardware driver interfaces for the upper layers.
6 * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
7 * Copyright (C) 2009 Texas Instruments, Inc.
9 * Author: Carlos Chinea <carlos.chinea@nokia.com>
10 * Author: Sebastien JAN <s-jan@ti.com>
12 * This package is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
16 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
18 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 #include "hsi_driver.h"
23 #define NOT_SET (-1)
25 /* Manage HSR divisor update
26 * A special divisor value allows switching to auto-divisor mode in Rx
27 * (but with error counters deactivated). This function implements the
28 * the transitions to/from this mode.
30 int hsi_set_rx_divisor(struct hsi_port *sport, struct hsr_ctx *cfg)
32 struct hsi_dev *hsi_ctrl = sport->hsi_controller;
33 void __iomem *base = hsi_ctrl->base;
34 int port = sport->port_number;
35 struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
37 if (cfg->divisor == NOT_SET)
38 return 0;
40 if (hsi_driver_device_is_hsi(pdev)) {
41 if (cfg->divisor == HSI_HSR_DIVISOR_AUTO &&
42 sport->counters_on) {
43 /* auto mode: deactivate counters + set divisor = 0 */
44 sport->reg_counters = hsi_inl(base, HSI_HSR_COUNTERS_REG
45 (port));
46 sport->counters_on = 0;
47 hsi_outl(0, base, HSI_HSR_COUNTERS_REG(port));
48 hsi_outl(0, base, HSI_HSR_DIVISOR_REG(port));
49 dev_dbg(hsi_ctrl->dev, "Switched to HSR auto mode\n");
50 } else if (cfg->divisor != HSI_HSR_DIVISOR_AUTO) {
51 /* Divisor set mode: use counters */
52 /* Leave auto mode: use new counters values */
53 sport->reg_counters = cfg->counters;
54 sport->counters_on = 1;
55 hsi_outl(cfg->counters, base,
56 HSI_HSR_COUNTERS_REG(port));
57 hsi_outl(cfg->divisor, base, HSI_HSR_DIVISOR_REG(port));
58 dev_dbg(hsi_ctrl->dev, "Left HSR auto mode. "
59 "Counters=0x%08x, Divisor=0x%08x\n",
60 cfg->counters, cfg->divisor);
62 } else {
63 if (cfg->divisor == HSI_HSR_DIVISOR_AUTO &&
64 sport->counters_on) {
65 /* auto mode: deactivate timeout */
66 sport->reg_counters = hsi_inl(base,
67 SSI_TIMEOUT_REG(port));
68 sport->counters_on = 0;
69 hsi_outl(0, base, SSI_TIMEOUT_REG(port));
70 dev_dbg(hsi_ctrl->dev, "Deactivated SSR timeout\n");
71 } else if (cfg->divisor == HSI_SSR_DIVISOR_USE_TIMEOUT) {
72 /* Leave auto mode: use new counters values */
73 sport->reg_counters = cfg->counters;
74 sport->counters_on = 1;
75 hsi_outl(cfg->counters, base, SSI_TIMEOUT_REG(port));
76 dev_dbg(hsi_ctrl->dev, "Left SSR auto mode. "
77 "Timeout=0x%08x\n", cfg->counters);
81 return 0;
84 int hsi_set_rx(struct hsi_port *sport, struct hsr_ctx *cfg)
86 struct hsi_dev *hsi_ctrl = sport->hsi_controller;
87 void __iomem *base = hsi_ctrl->base;
88 int port = sport->port_number;
89 struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
91 if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) &&
92 ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) &&
93 ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_SLEEP) &&
94 (cfg->mode != NOT_SET))
95 return -EINVAL;
97 if (hsi_driver_device_is_hsi(pdev)) {
98 if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
99 && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED)
100 && (cfg->flow != NOT_SET))
101 return -EINVAL;
102 /* HSI only supports payload size of 32bits */
103 if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
104 (cfg->frame_size != NOT_SET))
105 return -EINVAL;
106 } else {
107 if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
108 && (cfg->flow != NOT_SET))
109 return -EINVAL;
110 /* HSI only supports payload size of 32bits */
111 if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
112 (cfg->frame_size != NOT_SET))
113 return -EINVAL;
116 if ((cfg->channels == 0) ||
117 ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET)))
118 return -EINVAL;
120 if (hsi_driver_device_is_hsi(pdev)) {
121 if ((cfg->divisor > HSI_MAX_RX_DIVISOR) &&
122 (cfg->divisor != HSI_HSR_DIVISOR_AUTO) &&
123 (cfg->divisor != NOT_SET))
124 return -EINVAL;
127 if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET))
128 hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK)
129 << HSI_FLOW_OFFSET), base,
130 HSI_HSR_MODE_REG(port));
132 if (cfg->frame_size != NOT_SET)
133 hsi_outl(cfg->frame_size, base, HSI_HSR_FRAMESIZE_REG(port));
135 if (cfg->channels != NOT_SET) {
136 if ((cfg->channels & (-cfg->channels)) ^ cfg->channels)
137 return -EINVAL;
138 else
139 hsi_outl(cfg->channels, base,
140 HSI_HSR_CHANNELS_REG(port));
143 return hsi_set_rx_divisor(sport, cfg);
146 void hsi_get_rx(struct hsi_port *sport, struct hsr_ctx *cfg)
148 struct hsi_dev *hsi_ctrl = sport->hsi_controller;
149 void __iomem *base = hsi_ctrl->base;
150 int port = sport->port_number;
151 struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
153 cfg->mode = hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_MODE_VAL_MASK;
154 cfg->flow = (hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_FLOW_VAL_MASK)
155 >> HSI_FLOW_OFFSET;
156 cfg->frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port));
157 cfg->channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port));
158 if (hsi_driver_device_is_hsi(pdev)) {
159 cfg->divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port));
160 cfg->counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port));
161 } else {
162 cfg->counters = hsi_inl(base, SSI_TIMEOUT_REG(port));
166 int hsi_set_tx(struct hsi_port *sport, struct hst_ctx *cfg)
168 struct hsi_dev *hsi_ctrl = sport->hsi_controller;
169 void __iomem *base = hsi_ctrl->base;
170 int port = sport->port_number;
171 struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
172 unsigned int max_divisor = hsi_driver_device_is_hsi(pdev) ?
173 HSI_MAX_TX_DIVISOR : HSI_SSI_MAX_TX_DIVISOR;
175 if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) &&
176 ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) &&
177 (cfg->mode != NOT_SET))
178 return -EINVAL;
180 if (hsi_driver_device_is_hsi(pdev)) {
181 if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
182 && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED)
183 && (cfg->flow != NOT_SET))
184 return -EINVAL;
185 /* HSI only supports payload size of 32bits */
186 if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
187 (cfg->frame_size != NOT_SET))
188 return -EINVAL;
189 } else {
190 if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
191 && (cfg->flow != NOT_SET))
192 return -EINVAL;
194 if ((cfg->frame_size > HSI_FRAMESIZE_MAX) &&
195 (cfg->frame_size != NOT_SET))
196 return -EINVAL;
199 if ((cfg->channels == 0) ||
200 ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET)))
201 return -EINVAL;
203 if ((cfg->divisor > max_divisor) && (cfg->divisor != NOT_SET))
204 return -EINVAL;
206 if ((cfg->arb_mode != HSI_ARBMODE_ROUNDROBIN) &&
207 (cfg->arb_mode != HSI_ARBMODE_PRIORITY) && (cfg->mode != NOT_SET))
208 return -EINVAL;
210 if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET))
211 hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK) <<
212 HSI_FLOW_OFFSET) |
213 HSI_HST_MODE_WAKE_CTRL_SW, base,
214 HSI_HST_MODE_REG(port));
216 if (cfg->frame_size != NOT_SET)
217 hsi_outl(cfg->frame_size, base, HSI_HST_FRAMESIZE_REG(port));
219 if (cfg->channels != NOT_SET) {
220 if ((cfg->channels & (-cfg->channels)) ^ cfg->channels)
221 return -EINVAL;
222 else
223 hsi_outl(cfg->channels, base,
224 HSI_HST_CHANNELS_REG(port));
227 if (cfg->divisor != NOT_SET)
228 hsi_outl(cfg->divisor, base, HSI_HST_DIVISOR_REG(port));
230 if (cfg->arb_mode != NOT_SET)
231 hsi_outl(cfg->arb_mode, base, HSI_HST_ARBMODE_REG(port));
233 return 0;
236 void hsi_get_tx(struct hsi_port *sport, struct hst_ctx *cfg)
238 struct hsi_dev *hsi_ctrl = sport->hsi_controller;
239 void __iomem *base = hsi_ctrl->base;
240 int port = sport->port_number;
242 cfg->mode = hsi_inl(base, HSI_HST_MODE_REG(port)) & HSI_MODE_VAL_MASK;
243 cfg->flow = (hsi_inl(base, HSI_HST_MODE_REG(port)) & HSI_FLOW_VAL_MASK)
244 >> HSI_FLOW_OFFSET;
245 cfg->frame_size = hsi_inl(base, HSI_HST_FRAMESIZE_REG(port));
246 cfg->channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port));
247 cfg->divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port));
248 cfg->arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port));
252 * hsi_open - open a hsi device channel.
253 * @dev - Reference to the hsi device channel to be openned.
255 * Returns 0 on success, -EINVAL on bad parameters, -EBUSY if is already opened.
257 int hsi_open(struct hsi_device *dev)
259 struct hsi_channel *ch;
260 struct hsi_port *port;
261 struct hsi_dev *hsi_ctrl;
262 int err;
264 if (!dev || !dev->ch) {
265 pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
266 return -EINVAL;
268 dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
270 ch = dev->ch;
271 if (!ch->read_done || !ch->write_done) {
272 dev_err(dev->device.parent,
273 "Trying to open with no (read/write) callbacks "
274 "registered\n");
275 return -EINVAL;
277 if (ch->flags & HSI_CH_OPEN) {
278 dev_err(dev->device.parent,
279 "Port %d Channel %d already OPENED\n",
280 dev->n_p, dev->n_ch);
281 return -EBUSY;
284 port = ch->hsi_port;
285 hsi_ctrl = port->hsi_controller;
286 if (!hsi_ctrl) {
287 dev_err(dev->device.parent,
288 "%s: Port %d Channel %d has no hsi controller?\n",
289 __func__, dev->n_p, dev->n_ch);
290 return -EINVAL;
293 if (hsi_ctrl->clock_rate == 0) {
294 struct hsi_platform_data *pdata;
296 pdata = dev_get_platdata(hsi_ctrl->dev);
297 if (!pdata) {
298 dev_err(dev->device.parent,
299 "%s: Port %d Channel %d has no pdata\n",
300 __func__, dev->n_p, dev->n_ch);
301 return -EINVAL;
303 if (!pdata->device_scale) {
304 dev_err(dev->device.parent,
305 "%s: Undefined platform device_scale function\n",
306 __func__);
307 return -ENXIO;
310 /* Retry to set the HSI FCLK to default. */
311 err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev,
312 pdata->default_hsi_fclk);
313 if (err) {
314 dev_err(dev->device.parent,
315 "%s: Error %d setting HSI FClk to %ld. "
316 "Will retry on next open\n",
317 __func__, err, pdata->default_hsi_fclk);
318 return err;
319 } else {
320 dev_info(dev->device.parent, "HSI clock is now %ld\n",
321 pdata->default_hsi_fclk);
322 hsi_ctrl->clock_rate = pdata->default_hsi_fclk;
325 spin_lock_bh(&hsi_ctrl->lock);
326 hsi_clocks_enable_channel(dev->device.parent, ch->channel_number,
327 __func__);
329 /* Restart with flags cleaned up */
330 ch->flags = HSI_CH_OPEN;
332 if (port->wake_rx_3_wires_mode)
333 hsi_driver_enable_interrupt(port, HSI_ERROROCCURED
334 | HSI_BREAKDETECTED);
335 else
336 hsi_driver_enable_interrupt(port, HSI_CAWAKEDETECTED
337 | HSI_ERROROCCURED
338 | HSI_BREAKDETECTED);
341 /* NOTE: error and break are port events and do not need to be
342 * enabled for HSI extended enable register */
344 hsi_clocks_disable_channel(dev->device.parent, ch->channel_number,
345 __func__);
346 spin_unlock_bh(&hsi_ctrl->lock);
348 return 0;
350 EXPORT_SYMBOL(hsi_open);
353 * hsi_write - write data into the hsi device channel
354 * @dev - reference to the hsi device channel to write into.
355 * @addr - pointer to a 32-bit word data to be written.
356 * @size - number of 32-bit word to be written.
358 * Return 0 on success, a negative value on failure.
359 * A success value only indicates that the request has been accepted.
360 * Transfer is only completed when the write_done callback is called.
363 int hsi_write(struct hsi_device *dev, u32 *addr, unsigned int size)
365 struct hsi_channel *ch;
366 int err;
368 if (unlikely(!dev)) {
369 pr_err(LOG_NAME "Null dev pointer in hsi_write\n");
370 return -EINVAL;
373 if (unlikely(!dev->ch || !addr || (size <= 0))) {
374 dev_err(dev->device.parent,
375 "Wrong parameters hsi_device %p data %p count %d",
376 dev, addr, size);
377 return -EINVAL;
379 dev_dbg(dev->device.parent, "%s ch %d, @%x, size %d u32\n", __func__,
380 dev->n_ch, (u32) addr, size);
382 if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
383 dev_err(dev->device.parent, "HSI device NOT open\n");
384 return -EINVAL;
387 ch = dev->ch;
388 if (ch->write_data.addr != NULL) {
389 dev_err(dev->device.parent, "# Invalid request - Write "
390 "operation pending port %d channel %d\n",
391 ch->hsi_port->port_number,
392 ch->channel_number);
393 return -EINVAL;
396 spin_lock_bh(&ch->hsi_port->hsi_controller->lock);
397 if (pm_runtime_suspended(dev->device.parent) ||
398 !ch->hsi_port->hsi_controller->clock_enabled)
399 dev_dbg(dev->device.parent,
400 "hsi_write with HSI clocks OFF, clock_enabled = %d\n",
401 ch->hsi_port->hsi_controller->clock_enabled);
403 hsi_clocks_enable_channel(dev->device.parent,
404 ch->channel_number, __func__);
406 ch->write_data.addr = addr;
407 ch->write_data.size = size;
408 ch->write_data.lch = -1;
410 if (size == 1)
411 err = hsi_driver_enable_write_interrupt(ch, addr);
412 else
413 err = hsi_driver_write_dma(ch, addr, size);
415 if (unlikely(err < 0)) {
416 ch->write_data.addr = NULL;
417 ch->write_data.size = 0;
418 dev_err(dev->device.parent, "Failed to program write\n");
421 spin_unlock_bh(&ch->hsi_port->hsi_controller->lock);
423 /* Leave clocks enabled until transfer is complete (write callback */
424 /* is called */
425 return err;
427 EXPORT_SYMBOL(hsi_write);
430 * hsi_read - read data from the hsi device channel
431 * @dev - hsi device channel reference to read data from.
432 * @addr - pointer to a 32-bit word data to store the data.
433 * @size - number of 32-bit word to be stored.
435 * Return 0 on sucess, a negative value on failure.
436 * A success value only indicates that the request has been accepted.
437 * Data is only available in the buffer when the read_done callback is called.
440 int hsi_read(struct hsi_device *dev, u32 *addr, unsigned int size)
442 struct hsi_channel *ch;
443 int err;
445 if (unlikely(!dev)) {
446 pr_err(LOG_NAME "Null dev pointer in hsi_read\n");
447 return -EINVAL;
450 if (unlikely(!dev->ch || !addr || (size <= 0))) {
451 dev_err(dev->device.parent, "Wrong parameters "
452 "hsi_device %p data %p count %d", dev, addr, size);
453 return -EINVAL;
455 #if 0
456 if (dev->n_ch == 0)
457 dev_info(dev->device.parent, "%s ch %d, @%x, size %d u32\n",
458 __func__, dev->n_ch, (u32) addr, size);
459 #endif
460 if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
461 dev_err(dev->device.parent, "HSI device NOT open\n");
462 return -EINVAL;
465 ch = dev->ch;
467 spin_lock_bh(&ch->hsi_port->hsi_controller->lock);
468 if (pm_runtime_suspended(dev->device.parent) ||
469 !ch->hsi_port->hsi_controller->clock_enabled)
470 dev_dbg(dev->device.parent,
471 "hsi_read with HSI clocks OFF, clock_enabled = %d\n",
472 ch->hsi_port->hsi_controller->clock_enabled);
474 hsi_clocks_enable_channel(dev->device.parent, ch->channel_number,
475 __func__);
477 if (ch->read_data.addr != NULL) {
478 dev_err(dev->device.parent, "# Invalid request - Read "
479 "operation pending port %d channel %d\n",
480 ch->hsi_port->port_number,
481 ch->channel_number);
482 err = -EINVAL;
483 goto done;
486 ch->read_data.addr = addr;
487 ch->read_data.size = size;
488 ch->read_data.lch = -1;
490 if (size == 1)
491 err = hsi_driver_enable_read_interrupt(ch, addr);
492 else
493 err = hsi_driver_read_dma(ch, addr, size);
495 if (unlikely(err < 0)) {
496 ch->read_data.addr = NULL;
497 ch->read_data.size = 0;
498 dev_err(dev->device.parent, "Failed to program read\n");
501 done:
502 hsi_clocks_disable_channel(dev->device.parent, ch->channel_number,
503 __func__);
504 spin_unlock_bh(&ch->hsi_port->hsi_controller->lock);
506 return err;
508 EXPORT_SYMBOL(hsi_read);
510 int __hsi_write_cancel(struct hsi_channel *ch)
512 int err = -ENODATA;
513 if (ch->write_data.size == 1)
514 err = hsi_driver_cancel_write_interrupt(ch);
515 else if (ch->write_data.size > 1)
516 err = hsi_driver_cancel_write_dma(ch);
517 else
518 dev_dbg(ch->dev->device.parent, "%s : Nothing to cancel %d\n",
519 __func__, ch->write_data.size);
520 dev_err(ch->dev->device.parent, "%s : %d\n", __func__, err);
521 return err;
525 * hsi_write_cancel - Cancel pending write request.
526 * @dev - hsi device channel where to cancel the pending write.
528 * write_done() callback will not be called after success of this function.
530 * Return: -ENXIO : No DMA channel found for specified HSI channel
531 * -ECANCELED : write cancel success, data not transfered to TX FIFO
532 * 0 : transfer is already over, data already transfered to TX FIFO
534 * Note: whatever returned value, write callback will not be called after
535 * write cancel.
537 int hsi_write_cancel(struct hsi_device *dev)
539 int err;
540 if (unlikely(!dev || !dev->ch)) {
541 pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
542 return -ENODEV;
544 dev_err(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
546 if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
547 dev_err(dev->device.parent, "HSI device NOT open\n");
548 return -ENODEV;
551 spin_lock_bh(&dev->ch->hsi_port->hsi_controller->lock);
552 hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
553 __func__);
555 err = __hsi_write_cancel(dev->ch);
557 hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
558 __func__);
559 spin_unlock_bh(&dev->ch->hsi_port->hsi_controller->lock);
560 return err;
562 EXPORT_SYMBOL(hsi_write_cancel);
564 int __hsi_read_cancel(struct hsi_channel *ch)
566 int err = -ENODATA;
567 if (ch->read_data.size == 1)
568 err = hsi_driver_cancel_read_interrupt(ch);
569 else if (ch->read_data.size > 1)
570 err = hsi_driver_cancel_read_dma(ch);
571 else
572 dev_dbg(ch->dev->device.parent, "%s : Nothing to cancel %d\n",
573 __func__, ch->read_data.size);
575 dev_err(ch->dev->device.parent, "%s : %d\n", __func__, err);
576 return err;
580 * hsi_read_cancel - Cancel pending read request.
581 * @dev - hsi device channel where to cancel the pending read.
583 * read_done() callback will not be called after success of this function.
585 * Return: -ENXIO : No DMA channel found for specified HSI channel
586 * -ECANCELED : read cancel success, data not available at expected
587 * address.
588 * 0 : transfer is already over, data already available at expected
589 * address.
591 * Note: whatever returned value, read callback will not be called after cancel.
593 int hsi_read_cancel(struct hsi_device *dev)
595 int err;
596 if (unlikely(!dev || !dev->ch)) {
597 pr_err(LOG_NAME "Wrong HSI device %p\n", dev);
598 return -ENODEV;
600 dev_err(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
602 if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
603 dev_err(dev->device.parent, "HSI device NOT open\n");
604 return -ENODEV;
607 spin_lock_bh(&dev->ch->hsi_port->hsi_controller->lock);
608 hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
609 __func__);
611 err = __hsi_read_cancel(dev->ch);
613 hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
614 __func__);
615 spin_unlock_bh(&dev->ch->hsi_port->hsi_controller->lock);
616 return err;
618 EXPORT_SYMBOL(hsi_read_cancel);
621 * hsi_poll - HSI poll feature, enables data interrupt on frame reception
622 * @dev - hsi device channel reference to apply the I/O control
623 * (or port associated to it)
625 * Return 0 on success, a negative value on failure.
628 int hsi_poll(struct hsi_device *dev)
630 struct hsi_channel *ch;
631 struct hsi_dev *hsi_ctrl;
632 int err;
634 if (unlikely(!dev || !dev->ch))
635 return -EINVAL;
636 dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
638 if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
639 dev_err(dev->device.parent, "HSI device NOT open\n");
640 return -EINVAL;
643 ch = dev->ch;
644 hsi_ctrl = ch->hsi_port->hsi_controller;
646 spin_lock_bh(&hsi_ctrl->lock);
647 hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
648 __func__);
650 ch->flags |= HSI_CH_RX_POLL;
652 err = hsi_driver_enable_read_interrupt(ch, NULL);
654 hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
655 __func__);
656 spin_unlock_bh(&hsi_ctrl->lock);
658 return err;
660 EXPORT_SYMBOL(hsi_poll);
663 * hsi_unpoll - HSI poll feature, disables data interrupt on frame reception
664 * @dev - hsi device channel reference to apply the I/O control
665 * (or port associated to it)
667 * Return 0 on success, a negative value on failure.
670 int hsi_unpoll(struct hsi_device *dev)
672 struct hsi_channel *ch;
673 struct hsi_dev *hsi_ctrl;
675 if (unlikely(!dev || !dev->ch))
676 return -EINVAL;
677 dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
679 if (unlikely(!(dev->ch->flags & HSI_CH_OPEN))) {
680 dev_err(dev->device.parent, "HSI device NOT open\n");
681 return -EINVAL;
684 ch = dev->ch;
685 hsi_ctrl = ch->hsi_port->hsi_controller;
687 spin_lock_bh(&hsi_ctrl->lock);
688 hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
689 __func__);
691 ch->flags &= ~HSI_CH_RX_POLL;
693 hsi_driver_disable_read_interrupt(ch);
695 hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
696 __func__);
697 spin_unlock_bh(&hsi_ctrl->lock);
699 return 0;
701 EXPORT_SYMBOL(hsi_unpoll);
704 * hsi_ioctl - HSI I/O control
705 * @dev - hsi device channel reference to apply the I/O control
706 * (or port associated to it)
707 * @command - HSI I/O control command
708 * @arg - parameter associated to the control command. NULL, if no parameter.
710 * Return 0 on success, a negative value on failure.
713 int hsi_ioctl(struct hsi_device *dev, unsigned int command, void *arg)
715 struct hsi_channel *ch;
716 struct hsi_dev *hsi_ctrl;
717 struct hsi_port *pport;
718 void __iomem *base;
719 unsigned int port, channel;
720 u32 acwake;
721 int err = 0;
722 int fifo = 0;
724 if (unlikely((!dev) ||
725 (!dev->ch) ||
726 (!dev->ch->hsi_port) ||
727 (!dev->ch->hsi_port->hsi_controller)) ||
728 (!(dev->ch->flags & HSI_CH_OPEN))) {
729 pr_err(LOG_NAME "HSI IOCTL Invalid parameter\n");
730 return -EINVAL;
733 ch = dev->ch;
734 pport = ch->hsi_port;
735 hsi_ctrl = ch->hsi_port->hsi_controller;
736 port = ch->hsi_port->port_number;
737 channel = ch->channel_number;
738 base = hsi_ctrl->base;
740 dev_dbg(dev->device.parent, "IOCTL: ch %d, command %d\n",
741 channel, command);
743 spin_lock_bh(&hsi_ctrl->lock);
744 hsi_clocks_enable_channel(dev->device.parent, channel, __func__);
746 switch (command) {
747 case HSI_IOCTL_ACWAKE_UP:
748 /* Wake up request to Modem (typically OMAP initiated) */
749 /* Symetrical disable will be done in HSI_IOCTL_ACWAKE_DOWN */
750 if (ch->flags & HSI_CH_ACWAKE) {
751 dev_dbg(dev->device.parent, "Duplicate ACWAKE UP\n");
752 err = -EPERM;
753 goto out;
756 ch->flags |= HSI_CH_ACWAKE;
757 pport->acwake_status |= BIT(channel);
759 /* We only claim once the wake line per channel */
760 acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port));
761 if (!(acwake & HSI_WAKE(channel))) {
762 hsi_outl(HSI_SET_WAKE(channel), base,
763 HSI_SYS_SET_WAKE_REG(port));
766 goto out;
767 break;
768 case HSI_IOCTL_ACWAKE_DOWN:
769 /* Low power request initiation (OMAP initiated, typically */
770 /* following inactivity timeout) */
771 /* ACPU HSI block shall still be capable of receiving */
772 if (!(ch->flags & HSI_CH_ACWAKE)) {
773 dev_dbg(dev->device.parent, "Duplicate ACWAKE DOWN\n");
774 err = -EPERM;
775 goto out;
778 acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port));
779 if (unlikely(pport->acwake_status !=
780 (acwake & HSI_WAKE_MASK))) {
781 dev_warn(dev->device.parent,
782 "ACWAKE shadow register mismatch"
783 " acwake_status: 0x%x, HSI_SYS_WAKE_REG: 0x%x",
784 pport->acwake_status, acwake);
785 pport->acwake_status = acwake & HSI_WAKE_MASK;
787 /* SSI_TODO: add safety check for SSI also */
789 ch->flags &= ~HSI_CH_ACWAKE;
790 pport->acwake_status &= ~BIT(channel);
792 /* Release the wake line per channel */
793 if ((acwake & HSI_WAKE(channel))) {
794 hsi_outl(HSI_CLEAR_WAKE(channel), base,
795 HSI_SYS_CLEAR_WAKE_REG(port));
798 goto out;
799 break;
800 case HSI_IOCTL_SEND_BREAK:
801 hsi_outl(1, base, HSI_HST_BREAK_REG(port));
802 /*HSI_TODO : need to deactivate clock after BREAK frames sent*/
803 /*Use interrupt ? (if TX BREAK INT exists)*/
804 break;
805 case HSI_IOCTL_GET_ACWAKE:
806 if (!arg) {
807 err = -EINVAL;
808 goto out;
810 *(u32 *)arg = hsi_inl(base, HSI_SYS_WAKE_REG(port));
811 break;
812 case HSI_IOCTL_FLUSH_RX:
813 hsi_outl(0, base, HSI_HSR_RXSTATE_REG(port));
814 break;
815 case HSI_IOCTL_FLUSH_TX:
816 hsi_outl(0, base, HSI_HST_TXSTATE_REG(port));
817 break;
818 case HSI_IOCTL_GET_CAWAKE:
819 if (!arg) {
820 err = -EINVAL;
821 goto out;
823 err = hsi_get_cawake(dev->ch->hsi_port);
824 if (err < 0) {
825 err = -ENODEV;
826 goto out;
828 *(u32 *)arg = err;
829 break;
830 case HSI_IOCTL_SET_RX:
831 if (!arg) {
832 err = -EINVAL;
833 goto out;
835 err = hsi_set_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg);
836 break;
837 case HSI_IOCTL_GET_RX:
838 if (!arg) {
839 err = -EINVAL;
840 goto out;
842 hsi_get_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg);
843 break;
844 case HSI_IOCTL_SET_TX:
845 if (!arg) {
846 err = -EINVAL;
847 goto out;
849 err = hsi_set_tx(dev->ch->hsi_port, (struct hst_ctx *)arg);
850 break;
851 case HSI_IOCTL_GET_TX:
852 if (!arg) {
853 err = -EINVAL;
854 goto out;
856 hsi_get_tx(dev->ch->hsi_port, (struct hst_ctx *)arg);
857 break;
858 case HSI_IOCTL_SW_RESET:
859 dev_info(dev->device.parent, "SW Reset\n");
860 err = hsi_softreset(hsi_ctrl);
862 /* Reset HSI config to default */
863 hsi_softreset_driver(hsi_ctrl);
864 break;
865 case HSI_IOCTL_GET_FIFO_OCCUPANCY:
866 if (!arg) {
867 err = -EINVAL;
868 goto out;
870 fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
871 if (unlikely(fifo < 0)) {
872 dev_err(hsi_ctrl->dev, "No valid FIFO id found for "
873 "channel %d.\n", channel);
874 err = -EFAULT;
875 goto out;
877 *(size_t *)arg = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo);
878 break;
879 case HSI_IOCTL_SET_WAKE_RX_3WIRES_MODE:
880 dev_info(dev->device.parent,
881 "Entering RX wakeup in 3 wires mode (no CAWAKE)\n");
882 pport->wake_rx_3_wires_mode = 1;
884 /* HSI-C1BUG00085: ixxx: HSI wakeup issue in 3 wires mode
885 * HSI will NOT generate the Swakeup for 2nd frame if it entered
886 * IDLE after 1st received frame */
887 if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_ixxx_3WIRES_NO_SWAKEUP))
888 if (hsi_driver_device_is_hsi(to_platform_device
889 (hsi_ctrl->dev)))
890 hsi_set_pm_force_hsi_on(hsi_ctrl);
892 /* When WAKE is not available, ACREADY must be set to 1 at
893 * reset else remote will never have a chance to transmit. */
894 hsi_outl_or(HSI_SET_WAKE_3_WIRES | HSI_SET_WAKE_READY_LVL_1,
895 base, HSI_SYS_SET_WAKE_REG(port));
896 hsi_driver_disable_interrupt(pport, HSI_CAWAKEDETECTED);
897 break;
898 case HSI_IOCTL_SET_WAKE_RX_4WIRES_MODE:
899 dev_info(dev->device.parent,
900 "Entering RX wakeup in 4 wires mode\n");
901 pport->wake_rx_3_wires_mode = 0;
903 /* HSI-C1BUG00085: ixxx: HSI wakeup issue in 3 wires mode
904 * HSI will NOT generate the Swakeup for 2nd frame if it entered
905 * IDLE after 1st received frame */
906 if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_ixxx_3WIRES_NO_SWAKEUP))
907 if (hsi_driver_device_is_hsi(to_platform_device
908 (hsi_ctrl->dev)))
909 hsi_set_pm_default(hsi_ctrl);
911 /* Clean CA_WAKE status */
912 pport->cawake_status = -1;
913 hsi_outl(HSI_CAWAKEDETECTED, base,
914 HSI_SYS_MPU_STATUS_REG(port, pport->n_irq));
915 hsi_driver_enable_interrupt(pport, HSI_CAWAKEDETECTED);
916 hsi_outl_and(HSI_SET_WAKE_3_WIRES_MASK, base,
917 HSI_SYS_SET_WAKE_REG(port));
918 break;
919 default:
920 err = -ENOIOCTLCMD;
921 break;
923 out:
924 /* All IOCTL end by disabling the clocks, except ACWAKE high. */
925 hsi_clocks_disable_channel(dev->device.parent, channel, __func__);
927 spin_unlock_bh(&hsi_ctrl->lock);
929 return err;
931 EXPORT_SYMBOL(hsi_ioctl);
934 * hsi_close - close given hsi device channel
935 * @dev - reference to hsi device channel.
937 void hsi_close(struct hsi_device *dev)
939 struct hsi_dev *hsi_ctrl;
941 if (!dev || !dev->ch) {
942 pr_err(LOG_NAME "Trying to close wrong HSI device %p\n", dev);
943 return;
945 dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
947 hsi_ctrl = dev->ch->hsi_port->hsi_controller;
949 spin_lock_bh(&hsi_ctrl->lock);
950 hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
951 __func__);
953 if (dev->ch->flags & HSI_CH_OPEN) {
954 dev->ch->flags &= ~HSI_CH_OPEN;
955 __hsi_write_cancel(dev->ch);
956 __hsi_read_cancel(dev->ch);
959 hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
960 __func__);
961 spin_unlock_bh(&hsi_ctrl->lock);
963 EXPORT_SYMBOL(hsi_close);
966 * hsi_set_read_cb - register read_done() callback.
967 * @dev - reference to hsi device channel where the callback is associated to.
968 * @read_cb - callback to signal read transfer completed.
969 * size is expressed in number of 32-bit words.
971 * NOTE: Write callback must be only set when channel is not open !
973 void hsi_set_read_cb(struct hsi_device *dev,
974 void (*read_cb) (struct hsi_device *dev,
975 unsigned int size))
977 dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
979 dev->ch->read_done = read_cb;
981 EXPORT_SYMBOL(hsi_set_read_cb);
984 * hsi_set_read_cb - register write_done() callback.
985 * @dev - reference to hsi device channel where the callback is associated to.
986 * @write_cb - callback to signal read transfer completed.
987 * size is expressed in number of 32-bit words.
989 * NOTE: Read callback must be only set when channel is not open !
991 void hsi_set_write_cb(struct hsi_device *dev,
992 void (*write_cb) (struct hsi_device *dev,
993 unsigned int size))
995 dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
997 dev->ch->write_done = write_cb;
999 EXPORT_SYMBOL(hsi_set_write_cb);
1002 * hsi_set_port_event_cb - register port_event callback.
1003 * @dev - reference to hsi device channel where the callback is associated to.
1004 * @port_event_cb - callback to signal events from the channel port.
1006 void hsi_set_port_event_cb(struct hsi_device *dev,
1007 void (*port_event_cb) (struct hsi_device *dev,
1008 unsigned int event,
1009 void *arg))
1011 struct hsi_port *port = dev->ch->hsi_port;
1012 struct hsi_dev *hsi_ctrl = port->hsi_controller;
1014 dev_dbg(dev->device.parent, "%s ch %d\n", __func__, dev->n_ch);
1016 write_lock_bh(&dev->ch->rw_lock);
1017 dev->ch->port_event = port_event_cb;
1018 write_unlock_bh(&dev->ch->rw_lock);
1020 /* Since we now have a callback registered for events, we can now */
1021 /* enable the CAWAKE, ERROR and BREAK interrupts */
1022 spin_lock_bh(&hsi_ctrl->lock);
1023 hsi_clocks_enable_channel(dev->device.parent, dev->ch->channel_number,
1024 __func__);
1025 if (port->wake_rx_3_wires_mode)
1026 hsi_driver_enable_interrupt(port, HSI_ERROROCCURED
1027 | HSI_BREAKDETECTED);
1028 else
1029 hsi_driver_enable_interrupt(port, HSI_CAWAKEDETECTED
1030 | HSI_ERROROCCURED
1031 | HSI_BREAKDETECTED);
1032 hsi_clocks_disable_channel(dev->device.parent, dev->ch->channel_number,
1033 __func__);
1034 spin_unlock_bh(&hsi_ctrl->lock);
1036 EXPORT_SYMBOL(hsi_set_port_event_cb);