WIP FPC-III support
[linux/fpc-iii.git] / drivers / hsi / clients / hsi_char.c
blob71ce7dbfe31d8d4a9e233f84a9c0a4cdf0429f63
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HSI character device driver, implements the character device
4 * interface.
6 * Copyright (C) 2010 Nokia Corporation. All rights reserved.
8 * Contact: Andras Domokos <andras.domokos@nokia.com>
9 */
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/atomic.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/kmemleak.h>
21 #include <linux/ioctl.h>
22 #include <linux/wait.h>
23 #include <linux/fs.h>
24 #include <linux/sched.h>
25 #include <linux/device.h>
26 #include <linux/cdev.h>
27 #include <linux/uaccess.h>
28 #include <linux/scatterlist.h>
29 #include <linux/stat.h>
30 #include <linux/hsi/hsi.h>
31 #include <linux/hsi/hsi_char.h>
33 #define HSC_DEVS 16 /* Num of channels */
34 #define HSC_MSGS 4
36 #define HSC_RXBREAK 0
38 #define HSC_ID_BITS 6
39 #define HSC_PORT_ID_BITS 4
40 #define HSC_ID_MASK 3
41 #define HSC_PORT_ID_MASK 3
42 #define HSC_CH_MASK 0xf
45 * We support up to 4 controllers that can have up to 4
46 * ports, which should currently be more than enough.
48 #define HSC_BASEMINOR(id, port_id) \
49 ((((id) & HSC_ID_MASK) << HSC_ID_BITS) | \
50 (((port_id) & HSC_PORT_ID_MASK) << HSC_PORT_ID_BITS))
52 enum {
53 HSC_CH_OPEN,
54 HSC_CH_READ,
55 HSC_CH_WRITE,
56 HSC_CH_WLINE,
59 enum {
60 HSC_RX,
61 HSC_TX,
64 struct hsc_client_data;
65 /**
66 * struct hsc_channel - hsi_char internal channel data
67 * @ch: channel number
68 * @flags: Keeps state of the channel (open/close, reading, writing)
69 * @free_msgs_list: List of free HSI messages/requests
70 * @rx_msgs_queue: List of pending RX requests
71 * @tx_msgs_queue: List of pending TX requests
72 * @lock: Serialize access to the lists
73 * @cl: reference to the associated hsi_client
74 * @cl_data: reference to the client data that this channels belongs to
75 * @rx_wait: RX requests wait queue
76 * @tx_wait: TX requests wait queue
78 struct hsc_channel {
79 unsigned int ch;
80 unsigned long flags;
81 struct list_head free_msgs_list;
82 struct list_head rx_msgs_queue;
83 struct list_head tx_msgs_queue;
84 spinlock_t lock;
85 struct hsi_client *cl;
86 struct hsc_client_data *cl_data;
87 wait_queue_head_t rx_wait;
88 wait_queue_head_t tx_wait;
91 /**
92 * struct hsc_client_data - hsi_char internal client data
93 * @cdev: Characther device associated to the hsi_client
94 * @lock: Lock to serialize open/close access
95 * @flags: Keeps track of port state (rx hwbreak armed)
96 * @usecnt: Use count for claiming the HSI port (mutex protected)
97 * @cl: Referece to the HSI client
98 * @channels: Array of channels accessible by the client
100 struct hsc_client_data {
101 struct cdev cdev;
102 struct mutex lock;
103 unsigned long flags;
104 unsigned int usecnt;
105 struct hsi_client *cl;
106 struct hsc_channel channels[HSC_DEVS];
109 /* Stores the major number dynamically allocated for hsi_char */
110 static unsigned int hsc_major;
111 /* Maximum buffer size that hsi_char will accept from userspace */
112 static unsigned int max_data_size = 0x1000;
113 module_param(max_data_size, uint, 0);
114 MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
116 static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg,
117 struct list_head *queue)
119 unsigned long flags;
121 spin_lock_irqsave(&channel->lock, flags);
122 list_add_tail(&msg->link, queue);
123 spin_unlock_irqrestore(&channel->lock, flags);
126 static struct hsi_msg *hsc_get_first_msg(struct hsc_channel *channel,
127 struct list_head *queue)
129 struct hsi_msg *msg = NULL;
130 unsigned long flags;
132 spin_lock_irqsave(&channel->lock, flags);
134 if (list_empty(queue))
135 goto out;
137 msg = list_first_entry(queue, struct hsi_msg, link);
138 list_del(&msg->link);
139 out:
140 spin_unlock_irqrestore(&channel->lock, flags);
142 return msg;
145 static inline void hsc_msg_free(struct hsi_msg *msg)
147 kfree(sg_virt(msg->sgt.sgl));
148 hsi_free_msg(msg);
151 static void hsc_free_list(struct list_head *list)
153 struct hsi_msg *msg, *tmp;
155 list_for_each_entry_safe(msg, tmp, list, link) {
156 list_del(&msg->link);
157 hsc_msg_free(msg);
161 static void hsc_reset_list(struct hsc_channel *channel, struct list_head *l)
163 unsigned long flags;
164 LIST_HEAD(list);
166 spin_lock_irqsave(&channel->lock, flags);
167 list_splice_init(l, &list);
168 spin_unlock_irqrestore(&channel->lock, flags);
170 hsc_free_list(&list);
173 static inline struct hsi_msg *hsc_msg_alloc(unsigned int alloc_size)
175 struct hsi_msg *msg;
176 void *buf;
178 msg = hsi_alloc_msg(1, GFP_KERNEL);
179 if (!msg)
180 goto out;
181 buf = kmalloc(alloc_size, GFP_KERNEL);
182 if (!buf) {
183 hsi_free_msg(msg);
184 goto out;
186 sg_init_one(msg->sgt.sgl, buf, alloc_size);
187 /* Ignore false positive, due to sg pointer handling */
188 kmemleak_ignore(buf);
190 return msg;
191 out:
192 return NULL;
195 static inline int hsc_msgs_alloc(struct hsc_channel *channel)
197 struct hsi_msg *msg;
198 int i;
200 for (i = 0; i < HSC_MSGS; i++) {
201 msg = hsc_msg_alloc(max_data_size);
202 if (!msg)
203 goto out;
204 msg->channel = channel->ch;
205 list_add_tail(&msg->link, &channel->free_msgs_list);
208 return 0;
209 out:
210 hsc_free_list(&channel->free_msgs_list);
212 return -ENOMEM;
215 static inline unsigned int hsc_msg_len_get(struct hsi_msg *msg)
217 return msg->sgt.sgl->length;
220 static inline void hsc_msg_len_set(struct hsi_msg *msg, unsigned int len)
222 msg->sgt.sgl->length = len;
225 static void hsc_rx_completed(struct hsi_msg *msg)
227 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
228 struct hsc_channel *channel = cl_data->channels + msg->channel;
230 if (test_bit(HSC_CH_READ, &channel->flags)) {
231 hsc_add_tail(channel, msg, &channel->rx_msgs_queue);
232 wake_up(&channel->rx_wait);
233 } else {
234 hsc_add_tail(channel, msg, &channel->free_msgs_list);
238 static void hsc_rx_msg_destructor(struct hsi_msg *msg)
240 msg->status = HSI_STATUS_ERROR;
241 hsc_msg_len_set(msg, 0);
242 hsc_rx_completed(msg);
245 static void hsc_tx_completed(struct hsi_msg *msg)
247 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
248 struct hsc_channel *channel = cl_data->channels + msg->channel;
250 if (test_bit(HSC_CH_WRITE, &channel->flags)) {
251 hsc_add_tail(channel, msg, &channel->tx_msgs_queue);
252 wake_up(&channel->tx_wait);
253 } else {
254 hsc_add_tail(channel, msg, &channel->free_msgs_list);
258 static void hsc_tx_msg_destructor(struct hsi_msg *msg)
260 msg->status = HSI_STATUS_ERROR;
261 hsc_msg_len_set(msg, 0);
262 hsc_tx_completed(msg);
265 static void hsc_break_req_destructor(struct hsi_msg *msg)
267 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
269 hsi_free_msg(msg);
270 clear_bit(HSC_RXBREAK, &cl_data->flags);
273 static void hsc_break_received(struct hsi_msg *msg)
275 struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl);
276 struct hsc_channel *channel = cl_data->channels;
277 int i, ret;
279 /* Broadcast HWBREAK on all channels */
280 for (i = 0; i < HSC_DEVS; i++, channel++) {
281 struct hsi_msg *msg2;
283 if (!test_bit(HSC_CH_READ, &channel->flags))
284 continue;
285 msg2 = hsc_get_first_msg(channel, &channel->free_msgs_list);
286 if (!msg2)
287 continue;
288 clear_bit(HSC_CH_READ, &channel->flags);
289 hsc_msg_len_set(msg2, 0);
290 msg2->status = HSI_STATUS_COMPLETED;
291 hsc_add_tail(channel, msg2, &channel->rx_msgs_queue);
292 wake_up(&channel->rx_wait);
294 hsi_flush(msg->cl);
295 ret = hsi_async_read(msg->cl, msg);
296 if (ret < 0)
297 hsc_break_req_destructor(msg);
300 static int hsc_break_request(struct hsi_client *cl)
302 struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
303 struct hsi_msg *msg;
304 int ret;
306 if (test_and_set_bit(HSC_RXBREAK, &cl_data->flags))
307 return -EBUSY;
309 msg = hsi_alloc_msg(0, GFP_KERNEL);
310 if (!msg) {
311 clear_bit(HSC_RXBREAK, &cl_data->flags);
312 return -ENOMEM;
314 msg->break_frame = 1;
315 msg->complete = hsc_break_received;
316 msg->destructor = hsc_break_req_destructor;
317 ret = hsi_async_read(cl, msg);
318 if (ret < 0)
319 hsc_break_req_destructor(msg);
321 return ret;
324 static int hsc_break_send(struct hsi_client *cl)
326 struct hsi_msg *msg;
327 int ret;
329 msg = hsi_alloc_msg(0, GFP_ATOMIC);
330 if (!msg)
331 return -ENOMEM;
332 msg->break_frame = 1;
333 msg->complete = hsi_free_msg;
334 msg->destructor = hsi_free_msg;
335 ret = hsi_async_write(cl, msg);
336 if (ret < 0)
337 hsi_free_msg(msg);
339 return ret;
342 static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc)
344 struct hsi_config tmp;
345 int ret;
347 if ((rxc->mode != HSI_MODE_STREAM) && (rxc->mode != HSI_MODE_FRAME))
348 return -EINVAL;
349 if ((rxc->channels == 0) || (rxc->channels > HSC_DEVS))
350 return -EINVAL;
351 if (rxc->channels & (rxc->channels - 1))
352 return -EINVAL;
353 if ((rxc->flow != HSI_FLOW_SYNC) && (rxc->flow != HSI_FLOW_PIPE))
354 return -EINVAL;
355 tmp = cl->rx_cfg;
356 cl->rx_cfg.mode = rxc->mode;
357 cl->rx_cfg.num_hw_channels = rxc->channels;
358 cl->rx_cfg.flow = rxc->flow;
359 ret = hsi_setup(cl);
360 if (ret < 0) {
361 cl->rx_cfg = tmp;
362 return ret;
364 if (rxc->mode == HSI_MODE_FRAME)
365 hsc_break_request(cl);
367 return ret;
370 static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc)
372 rxc->mode = cl->rx_cfg.mode;
373 rxc->channels = cl->rx_cfg.num_hw_channels;
374 rxc->flow = cl->rx_cfg.flow;
377 static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc)
379 struct hsi_config tmp;
380 int ret;
382 if ((txc->mode != HSI_MODE_STREAM) && (txc->mode != HSI_MODE_FRAME))
383 return -EINVAL;
384 if ((txc->channels == 0) || (txc->channels > HSC_DEVS))
385 return -EINVAL;
386 if (txc->channels & (txc->channels - 1))
387 return -EINVAL;
388 if ((txc->arb_mode != HSI_ARB_RR) && (txc->arb_mode != HSI_ARB_PRIO))
389 return -EINVAL;
390 tmp = cl->tx_cfg;
391 cl->tx_cfg.mode = txc->mode;
392 cl->tx_cfg.num_hw_channels = txc->channels;
393 cl->tx_cfg.speed = txc->speed;
394 cl->tx_cfg.arb_mode = txc->arb_mode;
395 ret = hsi_setup(cl);
396 if (ret < 0) {
397 cl->tx_cfg = tmp;
398 return ret;
401 return ret;
404 static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc)
406 txc->mode = cl->tx_cfg.mode;
407 txc->channels = cl->tx_cfg.num_hw_channels;
408 txc->speed = cl->tx_cfg.speed;
409 txc->arb_mode = cl->tx_cfg.arb_mode;
412 static ssize_t hsc_read(struct file *file, char __user *buf, size_t len,
413 loff_t *ppos __maybe_unused)
415 struct hsc_channel *channel = file->private_data;
416 struct hsi_msg *msg;
417 ssize_t ret;
419 if (len == 0)
420 return 0;
421 if (!IS_ALIGNED(len, sizeof(u32)))
422 return -EINVAL;
423 if (len > max_data_size)
424 len = max_data_size;
425 if (channel->ch >= channel->cl->rx_cfg.num_hw_channels)
426 return -ECHRNG;
427 if (test_and_set_bit(HSC_CH_READ, &channel->flags))
428 return -EBUSY;
429 msg = hsc_get_first_msg(channel, &channel->free_msgs_list);
430 if (!msg) {
431 ret = -ENOSPC;
432 goto out;
434 hsc_msg_len_set(msg, len);
435 msg->complete = hsc_rx_completed;
436 msg->destructor = hsc_rx_msg_destructor;
437 ret = hsi_async_read(channel->cl, msg);
438 if (ret < 0) {
439 hsc_add_tail(channel, msg, &channel->free_msgs_list);
440 goto out;
443 ret = wait_event_interruptible(channel->rx_wait,
444 !list_empty(&channel->rx_msgs_queue));
445 if (ret < 0) {
446 clear_bit(HSC_CH_READ, &channel->flags);
447 hsi_flush(channel->cl);
448 return -EINTR;
451 msg = hsc_get_first_msg(channel, &channel->rx_msgs_queue);
452 if (msg) {
453 if (msg->status != HSI_STATUS_ERROR) {
454 ret = copy_to_user((void __user *)buf,
455 sg_virt(msg->sgt.sgl), hsc_msg_len_get(msg));
456 if (ret)
457 ret = -EFAULT;
458 else
459 ret = hsc_msg_len_get(msg);
460 } else {
461 ret = -EIO;
463 hsc_add_tail(channel, msg, &channel->free_msgs_list);
465 out:
466 clear_bit(HSC_CH_READ, &channel->flags);
468 return ret;
471 static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len,
472 loff_t *ppos __maybe_unused)
474 struct hsc_channel *channel = file->private_data;
475 struct hsi_msg *msg;
476 ssize_t ret;
478 if ((len == 0) || !IS_ALIGNED(len, sizeof(u32)))
479 return -EINVAL;
480 if (len > max_data_size)
481 len = max_data_size;
482 if (channel->ch >= channel->cl->tx_cfg.num_hw_channels)
483 return -ECHRNG;
484 if (test_and_set_bit(HSC_CH_WRITE, &channel->flags))
485 return -EBUSY;
486 msg = hsc_get_first_msg(channel, &channel->free_msgs_list);
487 if (!msg) {
488 clear_bit(HSC_CH_WRITE, &channel->flags);
489 return -ENOSPC;
491 if (copy_from_user(sg_virt(msg->sgt.sgl), (void __user *)buf, len)) {
492 ret = -EFAULT;
493 goto out;
495 hsc_msg_len_set(msg, len);
496 msg->complete = hsc_tx_completed;
497 msg->destructor = hsc_tx_msg_destructor;
498 ret = hsi_async_write(channel->cl, msg);
499 if (ret < 0)
500 goto out;
502 ret = wait_event_interruptible(channel->tx_wait,
503 !list_empty(&channel->tx_msgs_queue));
504 if (ret < 0) {
505 clear_bit(HSC_CH_WRITE, &channel->flags);
506 hsi_flush(channel->cl);
507 return -EINTR;
510 msg = hsc_get_first_msg(channel, &channel->tx_msgs_queue);
511 if (msg) {
512 if (msg->status == HSI_STATUS_ERROR)
513 ret = -EIO;
514 else
515 ret = hsc_msg_len_get(msg);
517 hsc_add_tail(channel, msg, &channel->free_msgs_list);
519 out:
520 clear_bit(HSC_CH_WRITE, &channel->flags);
522 return ret;
525 static long hsc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
527 struct hsc_channel *channel = file->private_data;
528 unsigned int state;
529 struct hsc_rx_config rxc;
530 struct hsc_tx_config txc;
531 long ret = 0;
533 switch (cmd) {
534 case HSC_RESET:
535 hsi_flush(channel->cl);
536 break;
537 case HSC_SET_PM:
538 if (copy_from_user(&state, (void __user *)arg, sizeof(state)))
539 return -EFAULT;
540 if (state == HSC_PM_DISABLE) {
541 if (test_and_set_bit(HSC_CH_WLINE, &channel->flags))
542 return -EINVAL;
543 ret = hsi_start_tx(channel->cl);
544 } else if (state == HSC_PM_ENABLE) {
545 if (!test_and_clear_bit(HSC_CH_WLINE, &channel->flags))
546 return -EINVAL;
547 ret = hsi_stop_tx(channel->cl);
548 } else {
549 ret = -EINVAL;
551 break;
552 case HSC_SEND_BREAK:
553 return hsc_break_send(channel->cl);
554 case HSC_SET_RX:
555 if (copy_from_user(&rxc, (void __user *)arg, sizeof(rxc)))
556 return -EFAULT;
557 return hsc_rx_set(channel->cl, &rxc);
558 case HSC_GET_RX:
559 hsc_rx_get(channel->cl, &rxc);
560 if (copy_to_user((void __user *)arg, &rxc, sizeof(rxc)))
561 return -EFAULT;
562 break;
563 case HSC_SET_TX:
564 if (copy_from_user(&txc, (void __user *)arg, sizeof(txc)))
565 return -EFAULT;
566 return hsc_tx_set(channel->cl, &txc);
567 case HSC_GET_TX:
568 hsc_tx_get(channel->cl, &txc);
569 if (copy_to_user((void __user *)arg, &txc, sizeof(txc)))
570 return -EFAULT;
571 break;
572 default:
573 return -ENOIOCTLCMD;
576 return ret;
579 static inline void __hsc_port_release(struct hsc_client_data *cl_data)
581 BUG_ON(cl_data->usecnt == 0);
583 if (--cl_data->usecnt == 0) {
584 hsi_flush(cl_data->cl);
585 hsi_release_port(cl_data->cl);
589 static int hsc_open(struct inode *inode, struct file *file)
591 struct hsc_client_data *cl_data;
592 struct hsc_channel *channel;
593 int ret = 0;
595 pr_debug("open, minor = %d\n", iminor(inode));
597 cl_data = container_of(inode->i_cdev, struct hsc_client_data, cdev);
598 mutex_lock(&cl_data->lock);
599 channel = cl_data->channels + (iminor(inode) & HSC_CH_MASK);
601 if (test_and_set_bit(HSC_CH_OPEN, &channel->flags)) {
602 ret = -EBUSY;
603 goto out;
606 * Check if we have already claimed the port associated to the HSI
607 * client. If not then try to claim it, else increase its refcount
609 if (cl_data->usecnt == 0) {
610 ret = hsi_claim_port(cl_data->cl, 0);
611 if (ret < 0)
612 goto out;
613 hsi_setup(cl_data->cl);
615 cl_data->usecnt++;
617 ret = hsc_msgs_alloc(channel);
618 if (ret < 0) {
619 __hsc_port_release(cl_data);
620 goto out;
623 file->private_data = channel;
624 mutex_unlock(&cl_data->lock);
626 return ret;
627 out:
628 mutex_unlock(&cl_data->lock);
630 return ret;
633 static int hsc_release(struct inode *inode __maybe_unused, struct file *file)
635 struct hsc_channel *channel = file->private_data;
636 struct hsc_client_data *cl_data = channel->cl_data;
638 mutex_lock(&cl_data->lock);
639 file->private_data = NULL;
640 if (test_and_clear_bit(HSC_CH_WLINE, &channel->flags))
641 hsi_stop_tx(channel->cl);
642 __hsc_port_release(cl_data);
643 hsc_reset_list(channel, &channel->rx_msgs_queue);
644 hsc_reset_list(channel, &channel->tx_msgs_queue);
645 hsc_reset_list(channel, &channel->free_msgs_list);
646 clear_bit(HSC_CH_READ, &channel->flags);
647 clear_bit(HSC_CH_WRITE, &channel->flags);
648 clear_bit(HSC_CH_OPEN, &channel->flags);
649 wake_up(&channel->rx_wait);
650 wake_up(&channel->tx_wait);
651 mutex_unlock(&cl_data->lock);
653 return 0;
656 static const struct file_operations hsc_fops = {
657 .owner = THIS_MODULE,
658 .read = hsc_read,
659 .write = hsc_write,
660 .unlocked_ioctl = hsc_ioctl,
661 .open = hsc_open,
662 .release = hsc_release,
665 static void hsc_channel_init(struct hsc_channel *channel)
667 init_waitqueue_head(&channel->rx_wait);
668 init_waitqueue_head(&channel->tx_wait);
669 spin_lock_init(&channel->lock);
670 INIT_LIST_HEAD(&channel->free_msgs_list);
671 INIT_LIST_HEAD(&channel->rx_msgs_queue);
672 INIT_LIST_HEAD(&channel->tx_msgs_queue);
675 static int hsc_probe(struct device *dev)
677 const char devname[] = "hsi_char";
678 struct hsc_client_data *cl_data;
679 struct hsc_channel *channel;
680 struct hsi_client *cl = to_hsi_client(dev);
681 unsigned int hsc_baseminor;
682 dev_t hsc_dev;
683 int ret;
684 int i;
686 cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
687 if (!cl_data)
688 return -ENOMEM;
690 hsc_baseminor = HSC_BASEMINOR(hsi_id(cl), hsi_port_id(cl));
691 if (!hsc_major) {
692 ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor,
693 HSC_DEVS, devname);
694 if (ret == 0)
695 hsc_major = MAJOR(hsc_dev);
696 } else {
697 hsc_dev = MKDEV(hsc_major, hsc_baseminor);
698 ret = register_chrdev_region(hsc_dev, HSC_DEVS, devname);
700 if (ret < 0) {
701 dev_err(dev, "Device %s allocation failed %d\n",
702 hsc_major ? "minor" : "major", ret);
703 goto out1;
705 mutex_init(&cl_data->lock);
706 hsi_client_set_drvdata(cl, cl_data);
707 cdev_init(&cl_data->cdev, &hsc_fops);
708 cl_data->cdev.owner = THIS_MODULE;
709 cl_data->cl = cl;
710 for (i = 0, channel = cl_data->channels; i < HSC_DEVS; i++, channel++) {
711 hsc_channel_init(channel);
712 channel->ch = i;
713 channel->cl = cl;
714 channel->cl_data = cl_data;
717 /* 1 hsi client -> N char devices (one for each channel) */
718 ret = cdev_add(&cl_data->cdev, hsc_dev, HSC_DEVS);
719 if (ret) {
720 dev_err(dev, "Could not add char device %d\n", ret);
721 goto out2;
724 return 0;
725 out2:
726 unregister_chrdev_region(hsc_dev, HSC_DEVS);
727 out1:
728 kfree(cl_data);
730 return ret;
733 static int hsc_remove(struct device *dev)
735 struct hsi_client *cl = to_hsi_client(dev);
736 struct hsc_client_data *cl_data = hsi_client_drvdata(cl);
737 dev_t hsc_dev = cl_data->cdev.dev;
739 cdev_del(&cl_data->cdev);
740 unregister_chrdev_region(hsc_dev, HSC_DEVS);
741 hsi_client_set_drvdata(cl, NULL);
742 kfree(cl_data);
744 return 0;
747 static struct hsi_client_driver hsc_driver = {
748 .driver = {
749 .name = "hsi_char",
750 .owner = THIS_MODULE,
751 .probe = hsc_probe,
752 .remove = hsc_remove,
756 static int __init hsc_init(void)
758 int ret;
760 if ((max_data_size < 4) || (max_data_size > 0x10000) ||
761 (max_data_size & (max_data_size - 1))) {
762 pr_err("Invalid max read/write data size\n");
763 return -EINVAL;
766 ret = hsi_register_client_driver(&hsc_driver);
767 if (ret) {
768 pr_err("Error while registering HSI/SSI driver %d\n", ret);
769 return ret;
772 pr_info("HSI/SSI char device loaded\n");
774 return 0;
776 module_init(hsc_init);
778 static void __exit hsc_exit(void)
780 hsi_unregister_client_driver(&hsc_driver);
781 pr_info("HSI char device removed\n");
783 module_exit(hsc_exit);
785 MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>");
786 MODULE_ALIAS("hsi:hsi_char");
787 MODULE_DESCRIPTION("HSI character device");
788 MODULE_LICENSE("GPL v2");