ARM: cpu topology: Add debugfs interface for cpu_power
[cmplus.git] / drivers / omap_hsi / hsi-if.c
blobf360e5a46c404c430608055b6d8e5a82d3cfd85d
1 /*
2 * hsi-if.c
4 * Part of the HSI character driver, implements the HSI interface.
6 * Copyright (C) 2009 Nokia Corporation. All rights reserved.
7 * Copyright (C) 2009 Texas Instruments, Inc.
9 * Author: Andras Domokos <andras.domokos@nokia.com>
10 * Author: Sebastien JAN <s-jan@ti.com>
12 * This package is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
16 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
18 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/device.h>
25 #include <linux/err.h>
26 #include <linux/platform_device.h>
27 #include <asm/mach-types.h>
28 #include <linux/ioctl.h>
29 #include <linux/delay.h>
30 #include <linux/ktime.h>
31 #include <linux/bitmap.h>
33 #include <linux/hsi_driver_if.h>
34 #include <linux/hsi_char.h>
36 #include "hsi-char.h"
37 #include "hsi-if.h"
39 #define HSI_CHANNEL_STATE_UNAVAIL (1 << 0)
40 #define HSI_CHANNEL_STATE_READING (1 << 1)
41 #define HSI_CHANNEL_STATE_WRITING (1 << 2)
43 #define PORT1 0
44 #define PORT2 1
46 #define RXCONV(dst, src) \
47 do { \
48 (dst)->mode = (src)->mode; \
49 (dst)->flow = (src)->flow; \
50 (dst)->frame_size = (src)->frame_size; \
51 (dst)->channels = (src)->channels; \
52 (dst)->divisor = (src)->divisor; \
53 (dst)->counters = (src)->counters; \
54 } while (0)
56 #define TXCONV(dst, src) \
57 do { \
58 (dst)->mode = (src)->mode; \
59 (dst)->flow = (src)->flow; \
60 (dst)->frame_size = (src)->frame_size; \
61 (dst)->channels = (src)->channels; \
62 (dst)->divisor = (src)->divisor; \
63 (dst)->arb_mode = (src)->arb_mode; \
64 } while (0)
66 struct if_hsi_channel {
67 struct hsi_device *dev;
68 unsigned int channel_id;
69 u32 *tx_data;
70 unsigned int tx_count; /* Number of bytes to be written */
71 u32 *rx_data;
72 unsigned int rx_count; /* Number of bytes to be read */
73 unsigned int opened;
74 unsigned int state;
75 spinlock_t lock; /* Serializes access to channel data */
78 struct if_hsi_iface {
79 struct if_hsi_channel channels[HSI_MAX_CHAR_DEVS];
80 int bootstrap;
81 unsigned long init_chan_map;
82 spinlock_t lock; /* Serializes access to HSI functional interface */
85 static void if_hsi_port_event(struct hsi_device *dev, unsigned int event,
86 void *arg);
87 static int __devinit if_hsi_probe(struct hsi_device *dev);
88 static int __devexit if_hsi_remove(struct hsi_device *dev);
90 static struct hsi_device_driver if_hsi_char_driver = {
91 .ctrl_mask = ANY_HSI_CONTROLLER,
92 .probe = if_hsi_probe,
93 .remove = __devexit_p(if_hsi_remove),
94 .driver = {
95 .name = "hsi_char"},
98 static struct if_hsi_iface hsi_iface;
100 static int if_hsi_read_on(int ch, u32 *data, unsigned int count)
102 struct if_hsi_channel *channel;
103 int ret;
105 channel = &hsi_iface.channels[ch];
106 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
108 spin_lock(&channel->lock);
109 if (channel->state & HSI_CHANNEL_STATE_READING) {
110 pr_err("Read still pending on channel %d\n", ch);
111 spin_unlock(&channel->lock);
112 return -EBUSY;
114 channel->state |= HSI_CHANNEL_STATE_READING;
115 channel->rx_data = data;
116 channel->rx_count = count;
117 spin_unlock(&channel->lock);
119 ret = hsi_read(channel->dev, data, count / 4);
120 dev_dbg(&channel->dev->device, "%s, ch = %d, ret = %d\n", __func__, ch,
121 ret);
123 return ret;
126 /* HSI char driver read done callback */
127 static void if_hsi_read_done(struct hsi_device *dev, unsigned int size)
129 struct if_hsi_channel *channel;
130 struct hsi_event ev;
132 channel = &hsi_iface.channels[dev->n_ch];
133 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
134 spin_lock(&channel->lock);
135 channel->state &= ~HSI_CHANNEL_STATE_READING;
136 ev.event = HSI_EV_IN;
137 ev.data = channel->rx_data;
138 ev.count = 4 * size; /* Convert size to number of u8, not u32 */
139 spin_unlock(&channel->lock);
140 if_hsi_notify(dev->n_ch, &ev);
143 int if_hsi_read(int ch, u32 *data, unsigned int count)
145 int ret = 0;
146 struct if_hsi_channel *channel;
147 channel = &hsi_iface.channels[ch];
148 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
149 ret = if_hsi_read_on(ch, data, count);
150 return ret;
153 int if_hsi_poll(int ch)
155 struct if_hsi_channel *channel;
156 int ret = 0;
157 channel = &hsi_iface.channels[ch];
158 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
159 ret = hsi_poll(channel->dev);
160 return ret;
163 static int if_hsi_write_on(int ch, u32 *address, unsigned int count)
165 struct if_hsi_channel *channel;
166 int ret;
168 channel = &hsi_iface.channels[ch];
170 spin_lock(&channel->lock);
171 if (channel->state & HSI_CHANNEL_STATE_WRITING) {
172 pr_err("Write still pending on channel %d\n", ch);
173 spin_unlock(&channel->lock);
174 return -EBUSY;
177 channel->tx_data = address;
178 channel->tx_count = count;
179 channel->state |= HSI_CHANNEL_STATE_WRITING;
180 spin_unlock(&channel->lock);
181 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
182 ret = hsi_write(channel->dev, address, count / 4);
183 return ret;
186 /* HSI char driver write done callback */
187 static void if_hsi_write_done(struct hsi_device *dev, unsigned int size)
189 struct if_hsi_channel *channel;
190 struct hsi_event ev;
192 channel = &hsi_iface.channels[dev->n_ch];
193 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, dev->n_ch);
195 spin_lock(&channel->lock);
196 channel->state &= ~HSI_CHANNEL_STATE_WRITING;
197 ev.event = HSI_EV_OUT;
198 ev.data = channel->tx_data;
199 ev.count = 4 * size; /* Convert size to number of u8, not u32 */
200 spin_unlock(&channel->lock);
201 if_hsi_notify(dev->n_ch, &ev);
204 int if_hsi_write(int ch, u32 *data, unsigned int count)
206 int ret = 0;
207 struct if_hsi_channel *channel;
208 channel = &hsi_iface.channels[ch];
209 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
210 ret = if_hsi_write_on(ch, data, count);
211 return ret;
214 void if_hsi_send_break(int ch)
216 struct if_hsi_channel *channel;
217 channel = &hsi_iface.channels[ch];
218 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
219 hsi_ioctl(channel->dev, HSI_IOCTL_SEND_BREAK, NULL);
222 void if_hsi_flush_rx(int ch)
224 struct if_hsi_channel *channel;
225 channel = &hsi_iface.channels[ch];
226 hsi_ioctl(channel->dev, HSI_IOCTL_FLUSH_RX, NULL);
229 void if_hsi_flush_ch(int ch)
231 /* FIXME - Check the purpose of this function */
232 struct if_hsi_channel *channel;
233 channel = &hsi_iface.channels[ch];
236 void if_hsi_flush_tx(int ch)
238 struct if_hsi_channel *channel;
239 channel = &hsi_iface.channels[ch];
240 hsi_ioctl(channel->dev, HSI_IOCTL_FLUSH_TX, NULL);
243 void if_hsi_get_acwakeline(int ch, unsigned int *state)
245 struct if_hsi_channel *channel;
246 channel = &hsi_iface.channels[ch];
247 hsi_ioctl(channel->dev, HSI_IOCTL_GET_ACWAKE, state);
250 void if_hsi_set_acwakeline(int ch, unsigned int state)
252 struct if_hsi_channel *channel;
253 channel = &hsi_iface.channels[ch];
254 hsi_ioctl(channel->dev,
255 state ? HSI_IOCTL_ACWAKE_UP : HSI_IOCTL_ACWAKE_DOWN, NULL);
258 void if_hsi_get_cawakeline(int ch, unsigned int *state)
260 struct if_hsi_channel *channel;
261 channel = &hsi_iface.channels[ch];
262 hsi_ioctl(channel->dev, HSI_IOCTL_GET_CAWAKE, state);
265 void if_hsi_set_wake_rx_3wires_mode(int ch, unsigned int state)
267 struct if_hsi_channel *channel;
268 channel = &hsi_iface.channels[ch];
269 hsi_ioctl(channel->dev,
270 state ? HSI_IOCTL_SET_WAKE_RX_3WIRES_MODE :
271 HSI_IOCTL_SET_WAKE_RX_4WIRES_MODE, NULL);
274 int if_hsi_set_rx(int ch, struct hsi_rx_config *cfg)
276 int ret;
277 struct if_hsi_channel *channel;
278 struct hsr_ctx ctx;
279 channel = &hsi_iface.channels[ch];
280 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
281 RXCONV(&ctx, cfg);
282 ret = hsi_ioctl(channel->dev, HSI_IOCTL_SET_RX, &ctx);
283 return ret;
286 void if_hsi_get_rx(int ch, struct hsi_rx_config *cfg)
288 struct if_hsi_channel *channel;
289 struct hsr_ctx ctx;
290 channel = &hsi_iface.channels[ch];
291 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
292 hsi_ioctl(channel->dev, HSI_IOCTL_GET_RX, &ctx);
293 RXCONV(cfg, &ctx);
296 int if_hsi_set_tx(int ch, struct hsi_tx_config *cfg)
298 int ret;
299 struct if_hsi_channel *channel;
300 struct hst_ctx ctx;
301 channel = &hsi_iface.channels[ch];
302 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
303 TXCONV(&ctx, cfg);
304 ret = hsi_ioctl(channel->dev, HSI_IOCTL_SET_TX, &ctx);
305 return ret;
308 void if_hsi_get_tx(int ch, struct hsi_tx_config *cfg)
310 struct if_hsi_channel *channel;
311 struct hst_ctx ctx;
312 channel = &hsi_iface.channels[ch];
313 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
314 hsi_ioctl(channel->dev, HSI_IOCTL_GET_TX, &ctx);
315 TXCONV(cfg, &ctx);
318 void if_hsi_sw_reset(int ch)
320 struct if_hsi_channel *channel;
321 int i;
322 channel = &hsi_iface.channels[ch];
323 hsi_ioctl(channel->dev, HSI_IOCTL_SW_RESET, NULL);
325 spin_lock_bh(&hsi_iface.lock);
326 /* Reset HSI channel states */
327 for (i = 0; i < HSI_MAX_PORTS; i++)
328 if_hsi_char_driver.ch_mask[i] = 0;
330 for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
331 channel = &hsi_iface.channels[i];
332 channel->opened = 0;
333 channel->state = HSI_CHANNEL_STATE_UNAVAIL;
335 spin_unlock_bh(&hsi_iface.lock);
338 void if_hsi_get_fifo_occupancy(int ch, size_t *occ)
340 struct if_hsi_channel *channel;
341 channel = &hsi_iface.channels[ch];
342 hsi_ioctl(channel->dev, HSI_IOCTL_GET_FIFO_OCCUPANCY, occ);
345 void if_hsi_cancel_read(int ch)
347 struct if_hsi_channel *channel;
348 channel = &hsi_iface.channels[ch];
349 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
350 if (channel->state & HSI_CHANNEL_STATE_READING)
351 hsi_read_cancel(channel->dev);
352 spin_lock(&channel->lock);
353 channel->state &= ~HSI_CHANNEL_STATE_READING;
354 spin_unlock(&channel->lock);
357 void if_hsi_cancel_write(int ch)
359 struct if_hsi_channel *channel;
360 channel = &hsi_iface.channels[ch];
361 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
362 if (channel->state & HSI_CHANNEL_STATE_WRITING)
363 hsi_write_cancel(channel->dev);
364 spin_lock(&channel->lock);
365 channel->state &= ~HSI_CHANNEL_STATE_WRITING;
366 spin_unlock(&channel->lock);
369 static int if_hsi_openchannel(struct if_hsi_channel *channel)
371 int ret = 0;
373 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
374 channel->channel_id);
375 spin_lock(&channel->lock);
377 if (channel->state == HSI_CHANNEL_STATE_UNAVAIL) {
378 pr_err("Channel %d is not available\n", channel->channel_id);
379 ret = -ENODEV;
380 goto leave;
383 if (channel->opened) {
384 pr_err("Channel %d is busy\n", channel->channel_id);
385 ret = -EBUSY;
386 goto leave;
389 if (!channel->dev) {
390 pr_err("Channel %d is not ready??\n", channel->channel_id);
391 ret = -ENODEV;
392 goto leave;
394 spin_unlock(&channel->lock);
396 ret = hsi_open(channel->dev);
398 spin_lock(&channel->lock);
399 if (ret < 0) {
400 pr_err("Could not open channel %d\n", channel->channel_id);
401 goto leave;
404 channel->opened = 1;
406 leave:
407 spin_unlock(&channel->lock);
408 return ret;
411 static int if_hsi_closechannel(struct if_hsi_channel *channel)
413 int ret = 0;
415 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__,
416 channel->channel_id);
417 spin_lock(&channel->lock);
419 if (!channel->opened)
420 goto leave;
422 if (!channel->dev) {
423 pr_err("Channel %d is not ready??\n", channel->channel_id);
424 ret = -ENODEV;
425 goto leave;
428 /* Stop any pending read/write */
429 if (channel->state & HSI_CHANNEL_STATE_READING) {
430 channel->state &= ~HSI_CHANNEL_STATE_READING;
431 spin_unlock(&channel->lock);
432 hsi_read_cancel(channel->dev);
433 spin_lock(&channel->lock);
436 if (channel->state & HSI_CHANNEL_STATE_WRITING) {
437 channel->state &= ~HSI_CHANNEL_STATE_WRITING;
438 spin_unlock(&channel->lock);
439 hsi_write_cancel(channel->dev);
440 } else
441 spin_unlock(&channel->lock);
443 hsi_close(channel->dev);
445 spin_lock(&channel->lock);
446 channel->opened = 0;
447 leave:
448 spin_unlock(&channel->lock);
449 return ret;
452 int if_hsi_start(int ch)
454 struct if_hsi_channel *channel;
455 int ret = 0;
457 channel = &hsi_iface.channels[ch];
458 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
460 spin_lock_bh(&channel->lock);
461 channel->state = 0;
462 spin_unlock_bh(&channel->lock);
464 ret = if_hsi_openchannel(channel);
465 if (ret < 0) {
466 pr_err("Could not open channel %d\n", ch);
467 goto error;
470 if_hsi_poll(ch);
471 error:
472 return ret;
475 void if_hsi_stop(int ch)
477 struct if_hsi_channel *channel;
478 channel = &hsi_iface.channels[ch];
479 dev_dbg(&channel->dev->device, "%s, ch = %d\n", __func__, ch);
481 if_hsi_closechannel(channel);
484 static int __devinit if_hsi_probe(struct hsi_device *dev)
486 struct if_hsi_channel *channel;
487 unsigned long *address;
488 int ret = -ENXIO, port;
490 dev_dbg(&dev->device, "%s, port = %d, ch = %d\n", __func__, dev->n_p,
491 dev->n_ch);
493 for (port = 0; port < HSI_MAX_PORTS; port++) {
494 if (if_hsi_char_driver.ch_mask[port])
495 break;
498 if (port == HSI_MAX_PORTS)
499 return -ENXIO;
501 if (dev->n_ch >= HSI_MAX_CHAR_DEV_ID) {
502 pr_err("HSI char driver cannot handle channel %d\n", dev->n_ch);
503 return -ENXIO;
506 address = &if_hsi_char_driver.ch_mask[port];
508 spin_lock_bh(&hsi_iface.lock);
509 if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
510 hsi_set_read_cb(dev, if_hsi_read_done);
511 hsi_set_write_cb(dev, if_hsi_write_done);
512 hsi_set_port_event_cb(dev, if_hsi_port_event);
513 channel = &hsi_iface.channels[dev->n_ch];
514 channel->dev = dev;
515 channel->state = 0;
516 ret = 0;
517 hsi_iface.init_chan_map ^= (1 << dev->n_ch);
519 spin_unlock_bh(&hsi_iface.lock);
521 return ret;
524 static int __devexit if_hsi_remove(struct hsi_device *dev)
526 struct if_hsi_channel *channel;
527 unsigned long *address;
528 int ret = -ENXIO, port;
530 dev_dbg(&dev->device, "%s, port = %d, ch = %d\n", __func__, dev->n_p,
531 dev->n_ch);
533 for (port = 0; port < HSI_MAX_PORTS; port++) {
534 if (if_hsi_char_driver.ch_mask[port])
535 break;
538 if (port == HSI_MAX_PORTS)
539 return -ENXIO;
541 address = &if_hsi_char_driver.ch_mask[port];
543 spin_lock_bh(&hsi_iface.lock);
544 if (test_bit(dev->n_ch, address) && (dev->n_p == port)) {
545 hsi_set_read_cb(dev, NULL);
546 hsi_set_write_cb(dev, NULL);
547 hsi_set_port_event_cb(dev, NULL);
548 channel = &hsi_iface.channels[dev->n_ch];
549 channel->dev = NULL;
550 channel->state = HSI_CHANNEL_STATE_UNAVAIL;
551 ret = 0;
553 spin_unlock_bh(&hsi_iface.lock);
555 return ret;
558 static void if_hsi_port_event(struct hsi_device *dev, unsigned int event,
559 void *arg)
561 struct hsi_event ev;
562 int i;
564 ev.event = HSI_EV_EXCEP;
565 ev.data = (u32 *) 0;
566 ev.count = 0;
568 switch (event) {
569 case HSI_EVENT_BREAK_DETECTED:
570 pr_debug("%s, HWBREAK detected\n", __func__);
571 ev.data = (u32 *) HSI_HWBREAK;
572 for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
573 if (hsi_iface.channels[i].opened)
574 if_hsi_notify(i, &ev);
576 break;
577 case HSI_EVENT_HSR_DATAAVAILABLE:
578 i = (int)arg;
579 pr_debug("%s, HSI_EVENT_HSR_DATAAVAILABLE channel = %d\n",
580 __func__, i);
581 ev.event = HSI_EV_AVAIL;
582 if (hsi_iface.channels[i].opened)
583 if_hsi_notify(i, &ev);
584 break;
585 case HSI_EVENT_CAWAKE_UP:
586 pr_debug("%s, CAWAKE up\n", __func__);
587 break;
588 case HSI_EVENT_CAWAKE_DOWN:
589 pr_debug("%s, CAWAKE down\n", __func__);
590 break;
591 case HSI_EVENT_ERROR:
592 pr_debug("%s, HSI ERROR occured\n", __func__);
593 break;
594 default:
595 pr_warning("%s, Unknown event(%d)\n", __func__, event);
596 break;
600 int __init if_hsi_init(unsigned int port, unsigned int *channels_map,
601 unsigned int num_channels)
603 struct if_hsi_channel *channel;
604 int i, ret = 0;
606 pr_debug("%s, port = %d\n", __func__, port);
608 port -= 1;
609 if (port >= HSI_MAX_PORTS)
610 return -EINVAL;
612 hsi_iface.bootstrap = 1;
613 spin_lock_init(&hsi_iface.lock);
615 for (i = 0; i < HSI_MAX_PORTS; i++)
616 if_hsi_char_driver.ch_mask[i] = 0;
618 for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
619 channel = &hsi_iface.channels[i];
620 channel->dev = NULL;
621 channel->opened = 0;
622 channel->state = HSI_CHANNEL_STATE_UNAVAIL;
623 channel->channel_id = i;
624 spin_lock_init(&channel->lock);
627 for (i = 0; (i < num_channels) && channels_map[i]; i++) {
628 pr_debug("%s, port = %d, channels_map[i] = %d\n", __func__,
629 port, channels_map[i]);
630 if ((channels_map[i] - 1) < HSI_MAX_CHAR_DEV_ID)
631 if_hsi_char_driver.ch_mask[port] |=
632 (1 << ((channels_map[i] - 1)));
633 else {
634 pr_err("Channel %d cannot be handled by the HSI "
635 "driver.\n", channels_map[i]);
636 return -EINVAL;
640 hsi_iface.init_chan_map = if_hsi_char_driver.ch_mask[port];
642 ret = hsi_register_driver(&if_hsi_char_driver);
643 if (ret)
644 pr_err("Error while registering HSI driver %d", ret);
646 if (hsi_iface.init_chan_map) {
647 ret = -ENXIO;
648 pr_err("HSI: Some channels could not be registered (out of "
649 "range or already registered?)\n");
651 return ret;
654 int __devexit if_hsi_exit(void)
656 struct if_hsi_channel *channel;
657 unsigned long *address;
658 int i, port;
660 pr_debug("%s\n", __func__);
662 for (port = 0; port < HSI_MAX_PORTS; port++) {
663 if (if_hsi_char_driver.ch_mask[port])
664 break;
667 if (port == HSI_MAX_PORTS)
668 return -ENXIO;
670 address = &if_hsi_char_driver.ch_mask[port];
672 for (i = 0; i < HSI_MAX_CHAR_DEVS; i++) {
673 channel = &hsi_iface.channels[i];
674 if (channel->opened) {
675 if_hsi_set_acwakeline(i, HSI_IOCTL_ACWAKE_DOWN);
676 if_hsi_closechannel(channel);
679 hsi_unregister_driver(&if_hsi_char_driver);
680 return 0;