mm: use kcalloc() instead of kzalloc() to allocate array
[linux/fpc-iii.git] / drivers / net / caif / caif_hsi.c
blob1520814c77c7d1a4a94b0569069abb66cb356b51
1 /*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
7 */
9 #define pr_fmt(fmt) KBUILD_MODNAME fmt
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/platform_device.h>
15 #include <linux/netdevice.h>
16 #include <linux/string.h>
17 #include <linux/list.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/sched.h>
21 #include <linux/if_arp.h>
22 #include <linux/timer.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/pkt_sched.h>
25 #include <net/caif/caif_layer.h>
26 #include <net/caif/caif_hsi.h>
28 MODULE_LICENSE("GPL");
29 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
30 MODULE_DESCRIPTION("CAIF HSI driver");
32 /* Returns the number of padding bytes for alignment. */
33 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
34 (((pow)-((x)&((pow)-1)))))
36 static int inactivity_timeout = 1000;
37 module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
40 static int aggregation_timeout = 1;
41 module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
42 MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
45 * HSI padding options.
46 * Warning: must be a base of 2 (& operation used) and can not be zero !
48 static int hsi_head_align = 4;
49 module_param(hsi_head_align, int, S_IRUGO);
50 MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
52 static int hsi_tail_align = 4;
53 module_param(hsi_tail_align, int, S_IRUGO);
54 MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
57 * HSI link layer flowcontrol thresholds.
58 * Warning: A high threshold value migth increase throughput but it will at
59 * the same time prevent channel prioritization and increase the risk of
60 * flooding the modem. The high threshold should be above the low.
62 static int hsi_high_threshold = 100;
63 module_param(hsi_high_threshold, int, S_IRUGO);
64 MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
66 static int hsi_low_threshold = 50;
67 module_param(hsi_low_threshold, int, S_IRUGO);
68 MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
70 #define ON 1
71 #define OFF 0
74 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
75 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
76 * de-asserted before the number of packets drops below LOW_WATER_MARK.
78 #define LOW_WATER_MARK hsi_low_threshold
79 #define HIGH_WATER_MARK hsi_high_threshold
81 static LIST_HEAD(cfhsi_list);
82 static spinlock_t cfhsi_list_lock;
84 static void cfhsi_inactivity_tout(unsigned long arg)
86 struct cfhsi *cfhsi = (struct cfhsi *)arg;
88 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
89 __func__);
91 /* Schedule power down work queue. */
92 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
93 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
96 static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
97 const struct sk_buff *skb,
98 int direction)
100 struct caif_payload_info *info;
101 int hpad, tpad, len;
103 info = (struct caif_payload_info *)&skb->cb;
104 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
105 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
106 len = skb->len + hpad + tpad;
108 if (direction > 0)
109 cfhsi->aggregation_len += len;
110 else if (direction < 0)
111 cfhsi->aggregation_len -= len;
114 static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
116 int i;
118 if (cfhsi->aggregation_timeout < 0)
119 return true;
121 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
122 if (cfhsi->qhead[i].qlen)
123 return true;
126 /* TODO: Use aggregation_len instead */
127 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
128 return true;
130 return false;
133 static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
135 struct sk_buff *skb;
136 int i;
138 for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
139 skb = skb_dequeue(&cfhsi->qhead[i]);
140 if (skb)
141 break;
144 return skb;
147 static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
149 int i, len = 0;
150 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
151 len += skb_queue_len(&cfhsi->qhead[i]);
152 return len;
155 static void cfhsi_abort_tx(struct cfhsi *cfhsi)
157 struct sk_buff *skb;
159 for (;;) {
160 spin_lock_bh(&cfhsi->lock);
161 skb = cfhsi_dequeue(cfhsi);
162 if (!skb)
163 break;
165 cfhsi->ndev->stats.tx_errors++;
166 cfhsi->ndev->stats.tx_dropped++;
167 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
168 spin_unlock_bh(&cfhsi->lock);
169 kfree_skb(skb);
171 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
172 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
173 mod_timer(&cfhsi->inactivity_timer,
174 jiffies + cfhsi->inactivity_timeout);
175 spin_unlock_bh(&cfhsi->lock);
178 static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
180 char buffer[32]; /* Any reasonable value */
181 size_t fifo_occupancy;
182 int ret;
184 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
185 __func__);
187 do {
188 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
189 &fifo_occupancy);
190 if (ret) {
191 dev_warn(&cfhsi->ndev->dev,
192 "%s: can't get FIFO occupancy: %d.\n",
193 __func__, ret);
194 break;
195 } else if (!fifo_occupancy)
196 /* No more data, exitting normally */
197 break;
199 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
200 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
201 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
202 cfhsi->dev);
203 if (ret) {
204 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
205 dev_warn(&cfhsi->ndev->dev,
206 "%s: can't read data: %d.\n",
207 __func__, ret);
208 break;
211 ret = 5 * HZ;
212 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
213 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
215 if (ret < 0) {
216 dev_warn(&cfhsi->ndev->dev,
217 "%s: can't wait for flush complete: %d.\n",
218 __func__, ret);
219 break;
220 } else if (!ret) {
221 ret = -ETIMEDOUT;
222 dev_warn(&cfhsi->ndev->dev,
223 "%s: timeout waiting for flush complete.\n",
224 __func__);
225 break;
227 } while (1);
229 return ret;
232 static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
234 int nfrms = 0;
235 int pld_len = 0;
236 struct sk_buff *skb;
237 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
239 skb = cfhsi_dequeue(cfhsi);
240 if (!skb)
241 return 0;
243 /* Clear offset. */
244 desc->offset = 0;
246 /* Check if we can embed a CAIF frame. */
247 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
248 struct caif_payload_info *info;
249 int hpad = 0;
250 int tpad = 0;
252 /* Calculate needed head alignment and tail alignment. */
253 info = (struct caif_payload_info *)&skb->cb;
255 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
256 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
258 /* Check if frame still fits with added alignment. */
259 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
260 u8 *pemb = desc->emb_frm;
261 desc->offset = CFHSI_DESC_SHORT_SZ;
262 *pemb = (u8)(hpad - 1);
263 pemb += hpad;
265 /* Update network statistics. */
266 spin_lock_bh(&cfhsi->lock);
267 cfhsi->ndev->stats.tx_packets++;
268 cfhsi->ndev->stats.tx_bytes += skb->len;
269 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
270 spin_unlock_bh(&cfhsi->lock);
272 /* Copy in embedded CAIF frame. */
273 skb_copy_bits(skb, 0, pemb, skb->len);
275 /* Consume the SKB */
276 consume_skb(skb);
277 skb = NULL;
281 /* Create payload CAIF frames. */
282 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
283 while (nfrms < CFHSI_MAX_PKTS) {
284 struct caif_payload_info *info;
285 int hpad = 0;
286 int tpad = 0;
288 if (!skb)
289 skb = cfhsi_dequeue(cfhsi);
291 if (!skb)
292 break;
294 /* Calculate needed head alignment and tail alignment. */
295 info = (struct caif_payload_info *)&skb->cb;
297 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
298 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
300 /* Fill in CAIF frame length in descriptor. */
301 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
303 /* Fill head padding information. */
304 *pfrm = (u8)(hpad - 1);
305 pfrm += hpad;
307 /* Update network statistics. */
308 spin_lock_bh(&cfhsi->lock);
309 cfhsi->ndev->stats.tx_packets++;
310 cfhsi->ndev->stats.tx_bytes += skb->len;
311 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
312 spin_unlock_bh(&cfhsi->lock);
314 /* Copy in CAIF frame. */
315 skb_copy_bits(skb, 0, pfrm, skb->len);
317 /* Update payload length. */
318 pld_len += desc->cffrm_len[nfrms];
320 /* Update frame pointer. */
321 pfrm += skb->len + tpad;
323 /* Consume the SKB */
324 consume_skb(skb);
325 skb = NULL;
327 /* Update number of frames. */
328 nfrms++;
331 /* Unused length fields should be zero-filled (according to SPEC). */
332 while (nfrms < CFHSI_MAX_PKTS) {
333 desc->cffrm_len[nfrms] = 0x0000;
334 nfrms++;
337 /* Check if we can piggy-back another descriptor. */
338 if (cfhsi_can_send_aggregate(cfhsi))
339 desc->header |= CFHSI_PIGGY_DESC;
340 else
341 desc->header &= ~CFHSI_PIGGY_DESC;
343 return CFHSI_DESC_SZ + pld_len;
346 static void cfhsi_start_tx(struct cfhsi *cfhsi)
348 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
349 int len, res;
351 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
353 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
354 return;
356 do {
357 /* Create HSI frame. */
358 len = cfhsi_tx_frm(desc, cfhsi);
359 if (!len) {
360 spin_lock_bh(&cfhsi->lock);
361 if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
362 spin_unlock_bh(&cfhsi->lock);
363 res = -EAGAIN;
364 continue;
366 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
367 /* Start inactivity timer. */
368 mod_timer(&cfhsi->inactivity_timer,
369 jiffies + cfhsi->inactivity_timeout);
370 spin_unlock_bh(&cfhsi->lock);
371 break;
374 /* Set up new transfer. */
375 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
376 if (WARN_ON(res < 0))
377 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
378 __func__, res);
379 } while (res < 0);
382 static void cfhsi_tx_done(struct cfhsi *cfhsi)
384 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
386 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
387 return;
390 * Send flow on if flow off has been previously signalled
391 * and number of packets is below low water mark.
393 spin_lock_bh(&cfhsi->lock);
394 if (cfhsi->flow_off_sent &&
395 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
396 cfhsi->cfdev.flowctrl) {
398 cfhsi->flow_off_sent = 0;
399 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
402 if (cfhsi_can_send_aggregate(cfhsi)) {
403 spin_unlock_bh(&cfhsi->lock);
404 cfhsi_start_tx(cfhsi);
405 } else {
406 mod_timer(&cfhsi->aggregation_timer,
407 jiffies + cfhsi->aggregation_timeout);
408 spin_unlock_bh(&cfhsi->lock);
411 return;
414 static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
416 struct cfhsi *cfhsi;
418 cfhsi = container_of(drv, struct cfhsi, drv);
419 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
420 __func__);
422 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
423 return;
424 cfhsi_tx_done(cfhsi);
427 static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
429 int xfer_sz = 0;
430 int nfrms = 0;
431 u16 *plen = NULL;
432 u8 *pfrm = NULL;
434 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
435 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
436 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
437 __func__);
438 return -EPROTO;
441 /* Check for embedded CAIF frame. */
442 if (desc->offset) {
443 struct sk_buff *skb;
444 u8 *dst = NULL;
445 int len = 0;
446 pfrm = ((u8 *)desc) + desc->offset;
448 /* Remove offset padding. */
449 pfrm += *pfrm + 1;
451 /* Read length of CAIF frame (little endian). */
452 len = *pfrm;
453 len |= ((*(pfrm+1)) << 8) & 0xFF00;
454 len += 2; /* Add FCS fields. */
456 /* Sanity check length of CAIF frame. */
457 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
458 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
459 __func__);
460 return -EPROTO;
463 /* Allocate SKB (OK even in IRQ context). */
464 skb = alloc_skb(len + 1, GFP_ATOMIC);
465 if (!skb) {
466 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
467 __func__);
468 return -ENOMEM;
470 caif_assert(skb != NULL);
472 dst = skb_put(skb, len);
473 memcpy(dst, pfrm, len);
475 skb->protocol = htons(ETH_P_CAIF);
476 skb_reset_mac_header(skb);
477 skb->dev = cfhsi->ndev;
480 * We are called from a arch specific platform device.
481 * Unfortunately we don't know what context we're
482 * running in.
484 if (in_interrupt())
485 netif_rx(skb);
486 else
487 netif_rx_ni(skb);
489 /* Update network statistics. */
490 cfhsi->ndev->stats.rx_packets++;
491 cfhsi->ndev->stats.rx_bytes += len;
494 /* Calculate transfer length. */
495 plen = desc->cffrm_len;
496 while (nfrms < CFHSI_MAX_PKTS && *plen) {
497 xfer_sz += *plen;
498 plen++;
499 nfrms++;
502 /* Check for piggy-backed descriptor. */
503 if (desc->header & CFHSI_PIGGY_DESC)
504 xfer_sz += CFHSI_DESC_SZ;
506 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
507 dev_err(&cfhsi->ndev->dev,
508 "%s: Invalid payload len: %d, ignored.\n",
509 __func__, xfer_sz);
510 return -EPROTO;
512 return xfer_sz;
515 static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
517 int xfer_sz = 0;
518 int nfrms = 0;
519 u16 *plen;
521 if ((desc->header & ~CFHSI_PIGGY_DESC) ||
522 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
524 pr_err("Invalid descriptor. %x %x\n", desc->header,
525 desc->offset);
526 return -EPROTO;
529 /* Calculate transfer length. */
530 plen = desc->cffrm_len;
531 while (nfrms < CFHSI_MAX_PKTS && *plen) {
532 xfer_sz += *plen;
533 plen++;
534 nfrms++;
537 if (xfer_sz % 4) {
538 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
539 return -EPROTO;
541 return xfer_sz;
544 static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
546 int rx_sz = 0;
547 int nfrms = 0;
548 u16 *plen = NULL;
549 u8 *pfrm = NULL;
551 /* Sanity check header and offset. */
552 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
553 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
554 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
555 __func__);
556 return -EPROTO;
559 /* Set frame pointer to start of payload. */
560 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
561 plen = desc->cffrm_len;
563 /* Skip already processed frames. */
564 while (nfrms < cfhsi->rx_state.nfrms) {
565 pfrm += *plen;
566 rx_sz += *plen;
567 plen++;
568 nfrms++;
571 /* Parse payload. */
572 while (nfrms < CFHSI_MAX_PKTS && *plen) {
573 struct sk_buff *skb;
574 u8 *dst = NULL;
575 u8 *pcffrm = NULL;
576 int len = 0;
578 /* CAIF frame starts after head padding. */
579 pcffrm = pfrm + *pfrm + 1;
581 /* Read length of CAIF frame (little endian). */
582 len = *pcffrm;
583 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
584 len += 2; /* Add FCS fields. */
586 /* Sanity check length of CAIF frames. */
587 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
588 dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
589 __func__);
590 return -EPROTO;
593 /* Allocate SKB (OK even in IRQ context). */
594 skb = alloc_skb(len + 1, GFP_ATOMIC);
595 if (!skb) {
596 dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
597 __func__);
598 cfhsi->rx_state.nfrms = nfrms;
599 return -ENOMEM;
601 caif_assert(skb != NULL);
603 dst = skb_put(skb, len);
604 memcpy(dst, pcffrm, len);
606 skb->protocol = htons(ETH_P_CAIF);
607 skb_reset_mac_header(skb);
608 skb->dev = cfhsi->ndev;
611 * We're called from a platform device,
612 * and don't know the context we're running in.
614 if (in_interrupt())
615 netif_rx(skb);
616 else
617 netif_rx_ni(skb);
619 /* Update network statistics. */
620 cfhsi->ndev->stats.rx_packets++;
621 cfhsi->ndev->stats.rx_bytes += len;
623 pfrm += *plen;
624 rx_sz += *plen;
625 plen++;
626 nfrms++;
629 return rx_sz;
632 static void cfhsi_rx_done(struct cfhsi *cfhsi)
634 int res;
635 int desc_pld_len = 0, rx_len, rx_state;
636 struct cfhsi_desc *desc = NULL;
637 u8 *rx_ptr, *rx_buf;
638 struct cfhsi_desc *piggy_desc = NULL;
640 desc = (struct cfhsi_desc *)cfhsi->rx_buf;
642 dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
644 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
645 return;
647 /* Update inactivity timer if pending. */
648 spin_lock_bh(&cfhsi->lock);
649 mod_timer_pending(&cfhsi->inactivity_timer,
650 jiffies + cfhsi->inactivity_timeout);
651 spin_unlock_bh(&cfhsi->lock);
653 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
654 desc_pld_len = cfhsi_rx_desc_len(desc);
656 if (desc_pld_len < 0)
657 goto out_of_sync;
659 rx_buf = cfhsi->rx_buf;
660 rx_len = desc_pld_len;
661 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
662 rx_len += CFHSI_DESC_SZ;
663 if (desc_pld_len == 0)
664 rx_buf = cfhsi->rx_flip_buf;
665 } else {
666 rx_buf = cfhsi->rx_flip_buf;
668 rx_len = CFHSI_DESC_SZ;
669 if (cfhsi->rx_state.pld_len > 0 &&
670 (desc->header & CFHSI_PIGGY_DESC)) {
672 piggy_desc = (struct cfhsi_desc *)
673 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
674 cfhsi->rx_state.pld_len);
676 cfhsi->rx_state.piggy_desc = true;
678 /* Extract payload len from piggy-backed descriptor. */
679 desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
680 if (desc_pld_len < 0)
681 goto out_of_sync;
683 if (desc_pld_len > 0)
684 rx_len = desc_pld_len;
686 if (desc_pld_len > 0 &&
687 (piggy_desc->header & CFHSI_PIGGY_DESC))
688 rx_len += CFHSI_DESC_SZ;
691 * Copy needed information from the piggy-backed
692 * descriptor to the descriptor in the start.
694 memcpy(rx_buf, (u8 *)piggy_desc,
695 CFHSI_DESC_SHORT_SZ);
696 /* Mark no embedded frame here */
697 piggy_desc->offset = 0;
698 if (desc_pld_len == -EPROTO)
699 goto out_of_sync;
703 if (desc_pld_len) {
704 rx_state = CFHSI_RX_STATE_PAYLOAD;
705 rx_ptr = rx_buf + CFHSI_DESC_SZ;
706 } else {
707 rx_state = CFHSI_RX_STATE_DESC;
708 rx_ptr = rx_buf;
709 rx_len = CFHSI_DESC_SZ;
712 /* Initiate next read */
713 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
714 /* Set up new transfer. */
715 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
716 __func__);
718 res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
719 cfhsi->dev);
720 if (WARN_ON(res < 0)) {
721 dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
722 __func__, res);
723 cfhsi->ndev->stats.rx_errors++;
724 cfhsi->ndev->stats.rx_dropped++;
728 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
729 /* Extract payload from descriptor */
730 if (cfhsi_rx_desc(desc, cfhsi) < 0)
731 goto out_of_sync;
732 } else {
733 /* Extract payload */
734 if (cfhsi_rx_pld(desc, cfhsi) < 0)
735 goto out_of_sync;
736 if (piggy_desc) {
737 /* Extract any payload in piggyback descriptor. */
738 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
739 goto out_of_sync;
743 /* Update state info */
744 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
745 cfhsi->rx_state.state = rx_state;
746 cfhsi->rx_ptr = rx_ptr;
747 cfhsi->rx_len = rx_len;
748 cfhsi->rx_state.pld_len = desc_pld_len;
749 cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
751 if (rx_buf != cfhsi->rx_buf)
752 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
753 return;
755 out_of_sync:
756 dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
757 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
758 cfhsi->rx_buf, CFHSI_DESC_SZ);
759 schedule_work(&cfhsi->out_of_sync_work);
762 static void cfhsi_rx_slowpath(unsigned long arg)
764 struct cfhsi *cfhsi = (struct cfhsi *)arg;
766 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
767 __func__);
769 cfhsi_rx_done(cfhsi);
772 static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
774 struct cfhsi *cfhsi;
776 cfhsi = container_of(drv, struct cfhsi, drv);
777 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
778 __func__);
780 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
781 return;
783 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
784 wake_up_interruptible(&cfhsi->flush_fifo_wait);
785 else
786 cfhsi_rx_done(cfhsi);
789 static void cfhsi_wake_up(struct work_struct *work)
791 struct cfhsi *cfhsi = NULL;
792 int res;
793 int len;
794 long ret;
796 cfhsi = container_of(work, struct cfhsi, wake_up_work);
798 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
799 return;
801 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
802 /* It happenes when wakeup is requested by
803 * both ends at the same time. */
804 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
805 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
806 return;
809 /* Activate wake line. */
810 cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
812 dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
813 __func__);
815 /* Wait for acknowledge. */
816 ret = CFHSI_WAKE_TOUT;
817 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
818 test_and_clear_bit(CFHSI_WAKE_UP_ACK,
819 &cfhsi->bits), ret);
820 if (unlikely(ret < 0)) {
821 /* Interrupted by signal. */
822 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
823 __func__, ret);
825 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
826 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
827 return;
828 } else if (!ret) {
829 bool ca_wake = false;
830 size_t fifo_occupancy = 0;
832 /* Wakeup timeout */
833 dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
834 __func__);
836 /* Check FIFO to check if modem has sent something. */
837 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
838 &fifo_occupancy));
840 dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
841 __func__, (unsigned) fifo_occupancy);
843 /* Check if we misssed the interrupt. */
844 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
845 &ca_wake));
847 if (ca_wake) {
848 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
849 __func__);
851 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
852 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
854 /* Continue execution. */
855 goto wake_ack;
858 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
859 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
860 return;
862 wake_ack:
863 dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
864 __func__);
866 /* Clear power up bit. */
867 set_bit(CFHSI_AWAKE, &cfhsi->bits);
868 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
870 /* Resume read operation. */
871 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
872 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
874 if (WARN_ON(res < 0))
875 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
877 /* Clear power up acknowledment. */
878 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
880 spin_lock_bh(&cfhsi->lock);
882 /* Resume transmit if queues are not empty. */
883 if (!cfhsi_tx_queue_len(cfhsi)) {
884 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
885 __func__);
886 /* Start inactivity timer. */
887 mod_timer(&cfhsi->inactivity_timer,
888 jiffies + cfhsi->inactivity_timeout);
889 spin_unlock_bh(&cfhsi->lock);
890 return;
893 dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
894 __func__);
896 spin_unlock_bh(&cfhsi->lock);
898 /* Create HSI frame. */
899 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
901 if (likely(len > 0)) {
902 /* Set up new transfer. */
903 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
904 if (WARN_ON(res < 0)) {
905 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
906 __func__, res);
907 cfhsi_abort_tx(cfhsi);
909 } else {
910 dev_err(&cfhsi->ndev->dev,
911 "%s: Failed to create HSI frame: %d.\n",
912 __func__, len);
916 static void cfhsi_wake_down(struct work_struct *work)
918 long ret;
919 struct cfhsi *cfhsi = NULL;
920 size_t fifo_occupancy = 0;
921 int retry = CFHSI_WAKE_TOUT;
923 cfhsi = container_of(work, struct cfhsi, wake_down_work);
924 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
926 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
927 return;
929 /* Deactivate wake line. */
930 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
932 /* Wait for acknowledge. */
933 ret = CFHSI_WAKE_TOUT;
934 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
935 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
936 &cfhsi->bits), ret);
937 if (ret < 0) {
938 /* Interrupted by signal. */
939 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
940 __func__, ret);
941 return;
942 } else if (!ret) {
943 bool ca_wake = true;
945 /* Timeout */
946 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
948 /* Check if we misssed the interrupt. */
949 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
950 &ca_wake));
951 if (!ca_wake)
952 dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
953 __func__);
956 /* Check FIFO occupancy. */
957 while (retry) {
958 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
959 &fifo_occupancy));
961 if (!fifo_occupancy)
962 break;
964 set_current_state(TASK_INTERRUPTIBLE);
965 schedule_timeout(1);
966 retry--;
969 if (!retry)
970 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
972 /* Clear AWAKE condition. */
973 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
975 /* Cancel pending RX requests. */
976 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
980 static void cfhsi_out_of_sync(struct work_struct *work)
982 struct cfhsi *cfhsi = NULL;
984 cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
986 rtnl_lock();
987 dev_close(cfhsi->ndev);
988 rtnl_unlock();
991 static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
993 struct cfhsi *cfhsi = NULL;
995 cfhsi = container_of(drv, struct cfhsi, drv);
996 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
997 __func__);
999 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1000 wake_up_interruptible(&cfhsi->wake_up_wait);
1002 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
1003 return;
1005 /* Schedule wake up work queue if the peer initiates. */
1006 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1007 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1010 static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
1012 struct cfhsi *cfhsi = NULL;
1014 cfhsi = container_of(drv, struct cfhsi, drv);
1015 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
1016 __func__);
1018 /* Initiating low power is only permitted by the host (us). */
1019 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1020 wake_up_interruptible(&cfhsi->wake_down_wait);
1023 static void cfhsi_aggregation_tout(unsigned long arg)
1025 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1027 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
1028 __func__);
1030 cfhsi_start_tx(cfhsi);
1033 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1035 struct cfhsi *cfhsi = NULL;
1036 int start_xfer = 0;
1037 int timer_active;
1038 int prio;
1040 if (!dev)
1041 return -EINVAL;
1043 cfhsi = netdev_priv(dev);
1045 switch (skb->priority) {
1046 case TC_PRIO_BESTEFFORT:
1047 case TC_PRIO_FILLER:
1048 case TC_PRIO_BULK:
1049 prio = CFHSI_PRIO_BEBK;
1050 break;
1051 case TC_PRIO_INTERACTIVE_BULK:
1052 prio = CFHSI_PRIO_VI;
1053 break;
1054 case TC_PRIO_INTERACTIVE:
1055 prio = CFHSI_PRIO_VO;
1056 break;
1057 case TC_PRIO_CONTROL:
1058 default:
1059 prio = CFHSI_PRIO_CTL;
1060 break;
1063 spin_lock_bh(&cfhsi->lock);
1065 /* Update aggregation statistics */
1066 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1068 /* Queue the SKB */
1069 skb_queue_tail(&cfhsi->qhead[prio], skb);
1071 /* Sanity check; xmit should not be called after unregister_netdev */
1072 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
1073 spin_unlock_bh(&cfhsi->lock);
1074 cfhsi_abort_tx(cfhsi);
1075 return -EINVAL;
1078 /* Send flow off if number of packets is above high water mark. */
1079 if (!cfhsi->flow_off_sent &&
1080 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
1081 cfhsi->cfdev.flowctrl) {
1082 cfhsi->flow_off_sent = 1;
1083 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
1086 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
1087 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
1088 start_xfer = 1;
1091 if (!start_xfer) {
1092 /* Send aggregate if it is possible */
1093 bool aggregate_ready =
1094 cfhsi_can_send_aggregate(cfhsi) &&
1095 del_timer(&cfhsi->aggregation_timer) > 0;
1096 spin_unlock_bh(&cfhsi->lock);
1097 if (aggregate_ready)
1098 cfhsi_start_tx(cfhsi);
1099 return 0;
1102 /* Delete inactivity timer if started. */
1103 timer_active = del_timer_sync(&cfhsi->inactivity_timer);
1105 spin_unlock_bh(&cfhsi->lock);
1107 if (timer_active) {
1108 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
1109 int len;
1110 int res;
1112 /* Create HSI frame. */
1113 len = cfhsi_tx_frm(desc, cfhsi);
1114 WARN_ON(!len);
1116 /* Set up new transfer. */
1117 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
1118 if (WARN_ON(res < 0)) {
1119 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
1120 __func__, res);
1121 cfhsi_abort_tx(cfhsi);
1123 } else {
1124 /* Schedule wake up work queue if the we initiate. */
1125 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
1126 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
1129 return 0;
1132 static const struct net_device_ops cfhsi_ops;
1134 static void cfhsi_setup(struct net_device *dev)
1136 int i;
1137 struct cfhsi *cfhsi = netdev_priv(dev);
1138 dev->features = 0;
1139 dev->netdev_ops = &cfhsi_ops;
1140 dev->type = ARPHRD_CAIF;
1141 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1142 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1143 dev->tx_queue_len = 0;
1144 dev->destructor = free_netdev;
1145 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1146 skb_queue_head_init(&cfhsi->qhead[i]);
1147 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1148 cfhsi->cfdev.use_frag = false;
1149 cfhsi->cfdev.use_stx = false;
1150 cfhsi->cfdev.use_fcs = false;
1151 cfhsi->ndev = dev;
1154 int cfhsi_probe(struct platform_device *pdev)
1156 struct cfhsi *cfhsi = NULL;
1157 struct net_device *ndev;
1159 int res;
1161 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
1162 if (!ndev)
1163 return -ENODEV;
1165 cfhsi = netdev_priv(ndev);
1166 cfhsi->ndev = ndev;
1167 cfhsi->pdev = pdev;
1169 /* Assign the HSI device. */
1170 cfhsi->dev = pdev->dev.platform_data;
1172 /* Assign the driver to this HSI device. */
1173 cfhsi->dev->drv = &cfhsi->drv;
1175 /* Register network device. */
1176 res = register_netdev(ndev);
1177 if (res) {
1178 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1179 __func__, res);
1180 free_netdev(ndev);
1182 /* Add CAIF HSI device to list. */
1183 spin_lock(&cfhsi_list_lock);
1184 list_add_tail(&cfhsi->list, &cfhsi_list);
1185 spin_unlock(&cfhsi_list_lock);
1187 return res;
1190 static int cfhsi_open(struct net_device *ndev)
1192 struct cfhsi *cfhsi = netdev_priv(ndev);
1193 int res;
1195 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1197 /* Initialize state vaiables. */
1198 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1199 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1201 /* Set flow info */
1202 cfhsi->flow_off_sent = 0;
1203 cfhsi->q_low_mark = LOW_WATER_MARK;
1204 cfhsi->q_high_mark = HIGH_WATER_MARK;
1208 * Allocate a TX buffer with the size of a HSI packet descriptors
1209 * and the necessary room for CAIF payload frames.
1211 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1212 if (!cfhsi->tx_buf) {
1213 res = -ENODEV;
1214 goto err_alloc_tx;
1218 * Allocate a RX buffer with the size of two HSI packet descriptors and
1219 * the necessary room for CAIF payload frames.
1221 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1222 if (!cfhsi->rx_buf) {
1223 res = -ENODEV;
1224 goto err_alloc_rx;
1227 cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1228 if (!cfhsi->rx_flip_buf) {
1229 res = -ENODEV;
1230 goto err_alloc_rx_flip;
1233 /* Pre-calculate inactivity timeout. */
1234 if (inactivity_timeout != -1) {
1235 cfhsi->inactivity_timeout =
1236 inactivity_timeout * HZ / 1000;
1237 if (!cfhsi->inactivity_timeout)
1238 cfhsi->inactivity_timeout = 1;
1239 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1240 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1241 } else {
1242 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1245 /* Initialize aggregation timeout */
1246 cfhsi->aggregation_timeout = aggregation_timeout;
1248 /* Initialize recieve vaiables. */
1249 cfhsi->rx_ptr = cfhsi->rx_buf;
1250 cfhsi->rx_len = CFHSI_DESC_SZ;
1252 /* Initialize spin locks. */
1253 spin_lock_init(&cfhsi->lock);
1255 /* Set up the driver. */
1256 cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1257 cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
1258 cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1259 cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
1261 /* Initialize the work queues. */
1262 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1263 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1264 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
1266 /* Clear all bit fields. */
1267 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1268 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1269 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1270 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1272 /* Create work thread. */
1273 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
1274 if (!cfhsi->wq) {
1275 dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
1276 __func__);
1277 res = -ENODEV;
1278 goto err_create_wq;
1281 /* Initialize wait queues. */
1282 init_waitqueue_head(&cfhsi->wake_up_wait);
1283 init_waitqueue_head(&cfhsi->wake_down_wait);
1284 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1286 /* Setup the inactivity timer. */
1287 init_timer(&cfhsi->inactivity_timer);
1288 cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1289 cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
1290 /* Setup the slowpath RX timer. */
1291 init_timer(&cfhsi->rx_slowpath_timer);
1292 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1293 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1294 /* Setup the aggregation timer. */
1295 init_timer(&cfhsi->aggregation_timer);
1296 cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1297 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1299 /* Activate HSI interface. */
1300 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1301 if (res) {
1302 dev_err(&cfhsi->ndev->dev,
1303 "%s: can't activate HSI interface: %d.\n",
1304 __func__, res);
1305 goto err_activate;
1308 /* Flush FIFO */
1309 res = cfhsi_flush_fifo(cfhsi);
1310 if (res) {
1311 dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
1312 __func__, res);
1313 goto err_net_reg;
1315 return res;
1317 err_net_reg:
1318 cfhsi->dev->cfhsi_down(cfhsi->dev);
1319 err_activate:
1320 destroy_workqueue(cfhsi->wq);
1321 err_create_wq:
1322 kfree(cfhsi->rx_flip_buf);
1323 err_alloc_rx_flip:
1324 kfree(cfhsi->rx_buf);
1325 err_alloc_rx:
1326 kfree(cfhsi->tx_buf);
1327 err_alloc_tx:
1328 return res;
1331 static int cfhsi_close(struct net_device *ndev)
1333 struct cfhsi *cfhsi = netdev_priv(ndev);
1334 u8 *tx_buf, *rx_buf, *flip_buf;
1336 /* going to shutdown driver */
1337 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1339 /* Flush workqueue */
1340 flush_workqueue(cfhsi->wq);
1342 /* Delete timers if pending */
1343 del_timer_sync(&cfhsi->inactivity_timer);
1344 del_timer_sync(&cfhsi->rx_slowpath_timer);
1345 del_timer_sync(&cfhsi->aggregation_timer);
1347 /* Cancel pending RX request (if any) */
1348 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1350 /* Destroy workqueue */
1351 destroy_workqueue(cfhsi->wq);
1353 /* Store bufferes: will be freed later. */
1354 tx_buf = cfhsi->tx_buf;
1355 rx_buf = cfhsi->rx_buf;
1356 flip_buf = cfhsi->rx_flip_buf;
1357 /* Flush transmit queues. */
1358 cfhsi_abort_tx(cfhsi);
1360 /* Deactivate interface */
1361 cfhsi->dev->cfhsi_down(cfhsi->dev);
1363 /* Free buffers. */
1364 kfree(tx_buf);
1365 kfree(rx_buf);
1366 kfree(flip_buf);
1367 return 0;
1370 static const struct net_device_ops cfhsi_ops = {
1371 .ndo_open = cfhsi_open,
1372 .ndo_stop = cfhsi_close,
1373 .ndo_start_xmit = cfhsi_xmit
1376 int cfhsi_remove(struct platform_device *pdev)
1378 struct list_head *list_node;
1379 struct list_head *n;
1380 struct cfhsi *cfhsi = NULL;
1381 struct cfhsi_dev *dev;
1383 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1384 spin_lock(&cfhsi_list_lock);
1385 list_for_each_safe(list_node, n, &cfhsi_list) {
1386 cfhsi = list_entry(list_node, struct cfhsi, list);
1387 /* Find the corresponding device. */
1388 if (cfhsi->dev == dev) {
1389 /* Remove from list. */
1390 list_del(list_node);
1391 spin_unlock(&cfhsi_list_lock);
1392 return 0;
1395 spin_unlock(&cfhsi_list_lock);
1396 return -ENODEV;
1399 struct platform_driver cfhsi_plat_drv = {
1400 .probe = cfhsi_probe,
1401 .remove = cfhsi_remove,
1402 .driver = {
1403 .name = "cfhsi",
1404 .owner = THIS_MODULE,
1408 static void __exit cfhsi_exit_module(void)
1410 struct list_head *list_node;
1411 struct list_head *n;
1412 struct cfhsi *cfhsi = NULL;
1414 spin_lock(&cfhsi_list_lock);
1415 list_for_each_safe(list_node, n, &cfhsi_list) {
1416 cfhsi = list_entry(list_node, struct cfhsi, list);
1418 /* Remove from list. */
1419 list_del(list_node);
1420 spin_unlock(&cfhsi_list_lock);
1422 unregister_netdevice(cfhsi->ndev);
1424 spin_lock(&cfhsi_list_lock);
1426 spin_unlock(&cfhsi_list_lock);
1428 /* Unregister platform driver. */
1429 platform_driver_unregister(&cfhsi_plat_drv);
1432 static int __init cfhsi_init_module(void)
1434 int result;
1436 /* Initialize spin lock. */
1437 spin_lock_init(&cfhsi_list_lock);
1439 /* Register platform driver. */
1440 result = platform_driver_register(&cfhsi_plat_drv);
1441 if (result) {
1442 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1443 result);
1444 goto err_dev_register;
1447 err_dev_register:
1448 return result;
1451 module_init(cfhsi_init_module);
1452 module_exit(cfhsi_exit_module);