2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
9 #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
11 #include <linux/spinlock.h>
12 #include <linux/sched.h>
13 #include <linux/list.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
17 #include <net/caif/caif_device.h>
18 #include <net/caif/caif_shm.h>
22 #define TX_BUF_SZ 0x2000
23 #define RX_BUF_SZ 0x2000
25 #define CAIF_NEEDED_HEADROOM 32
27 #define CAIF_FLOW_ON 1
28 #define CAIF_FLOW_OFF 0
30 #define LOW_WATERMARK 3
31 #define HIGH_WATERMARK 4
33 /* Maximum number of CAIF buffers per shared memory buffer. */
34 #define SHM_MAX_FRMS_PER_BUF 10
37 * Size in bytes of the descriptor area
38 * (With end of descriptor signalling)
40 #define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
41 sizeof(struct shm_pck_desc))
44 * Offset to the first CAIF frame within a shared memory buffer.
45 * Aligned on 32 bytes.
47 #define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
49 /* Number of bytes for CAIF shared memory header. */
52 /* Number of padding bytes for the complete CAIF frame. */
53 #define SHM_FRM_PAD_LEN 4
55 #define CAIF_MAX_MTU 4096
57 #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
58 #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
60 #define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
61 #define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
63 #define SHM_FULL_MASK (0x0F << 0)
64 #define SHM_EMPTY_MASK (0x0F << 4)
68 * Offset from start of shared memory area to start of
69 * shared memory CAIF frame.
76 unsigned char *desc_vptr
;
82 struct list_head list
;
86 /* Number of bytes of padding before the CAIF frame. */
91 /* caif_dev_common must always be first in the structure*/
92 struct caif_dev_common cfdev
;
97 u32 tx_empty_available
;
100 struct list_head tx_empty_list
;
101 struct list_head tx_pend_list
;
102 struct list_head tx_full_list
;
103 struct list_head rx_empty_list
;
104 struct list_head rx_pend_list
;
105 struct list_head rx_full_list
;
107 struct workqueue_struct
*pshm_tx_workqueue
;
108 struct workqueue_struct
*pshm_rx_workqueue
;
110 struct work_struct shm_tx_work
;
111 struct work_struct shm_rx_work
;
113 struct sk_buff_head sk_qhead
;
114 struct shmdev_layer
*pshm_dev
;
117 static int shm_netdev_open(struct net_device
*shm_netdev
)
119 netif_wake_queue(shm_netdev
);
123 static int shm_netdev_close(struct net_device
*shm_netdev
)
125 netif_stop_queue(shm_netdev
);
129 int caif_shmdrv_rx_cb(u32 mbx_msg
, void *priv
)
131 struct buf_list
*pbuf
;
132 struct shmdrv_layer
*pshm_drv
;
133 struct list_head
*pos
;
134 u32 avail_emptybuff
= 0;
135 unsigned long flags
= 0;
139 /* Check for received buffers. */
140 if (mbx_msg
& SHM_FULL_MASK
) {
143 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
145 /* Check whether we have any outstanding buffers. */
146 if (list_empty(&pshm_drv
->rx_empty_list
)) {
148 /* Release spin lock. */
149 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
151 /* We print even in IRQ context... */
152 pr_warn("No empty Rx buffers to fill: "
153 "mbx_msg:%x\n", mbx_msg
);
160 list_entry(pshm_drv
->rx_empty_list
.next
,
161 struct buf_list
, list
);
164 /* Check buffer synchronization. */
165 if (idx
!= SHM_GET_FULL(mbx_msg
)) {
167 /* We print even in IRQ context... */
169 "phyif_shm_mbx_msg_cb: RX full out of sync:"
170 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
171 idx
, mbx_msg
, SHM_GET_FULL(mbx_msg
));
173 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
179 list_del_init(&pbuf
->list
);
180 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_full_list
);
182 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
184 /* Schedule RX work queue. */
185 if (!work_pending(&pshm_drv
->shm_rx_work
))
186 queue_work(pshm_drv
->pshm_rx_workqueue
,
187 &pshm_drv
->shm_rx_work
);
190 /* Check for emptied buffers. */
191 if (mbx_msg
& SHM_EMPTY_MASK
) {
194 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
196 /* Check whether we have any outstanding buffers. */
197 if (list_empty(&pshm_drv
->tx_full_list
)) {
199 /* We print even in IRQ context... */
200 pr_warn("No TX to empty: msg:%x\n", mbx_msg
);
202 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
209 list_entry(pshm_drv
->tx_full_list
.next
,
210 struct buf_list
, list
);
213 /* Check buffer synchronization. */
214 if (idx
!= SHM_GET_EMPTY(mbx_msg
)) {
216 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
218 /* We print even in IRQ context... */
220 "out of sync:idx:%d, msg:%x\n", idx
, mbx_msg
);
225 list_del_init(&pbuf
->list
);
227 /* Reset buffer parameters. */
229 pbuf
->frm_ofs
= SHM_CAIF_FRM_OFS
;
231 list_add_tail(&pbuf
->list
, &pshm_drv
->tx_empty_list
);
233 /* Check the available no. of buffers in the empty list */
234 list_for_each(pos
, &pshm_drv
->tx_empty_list
)
237 /* Check whether we have to wake up the transmitter. */
238 if ((avail_emptybuff
> HIGH_WATERMARK
) &&
239 (!pshm_drv
->tx_empty_available
)) {
240 pshm_drv
->tx_empty_available
= 1;
241 pshm_drv
->cfdev
.flowctrl
242 (pshm_drv
->pshm_dev
->pshm_netdev
,
245 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
247 /* Schedule the work queue. if required */
248 if (!work_pending(&pshm_drv
->shm_tx_work
))
249 queue_work(pshm_drv
->pshm_tx_workqueue
,
250 &pshm_drv
->shm_tx_work
);
252 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
261 static void shm_rx_work_func(struct work_struct
*rx_work
)
263 struct shmdrv_layer
*pshm_drv
;
264 struct buf_list
*pbuf
;
265 unsigned long flags
= 0;
270 pshm_drv
= container_of(rx_work
, struct shmdrv_layer
, shm_rx_work
);
274 struct shm_pck_desc
*pck_desc
;
276 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
278 /* Check for received buffers. */
279 if (list_empty(&pshm_drv
->rx_full_list
)) {
280 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
285 list_entry(pshm_drv
->rx_full_list
.next
, struct buf_list
,
287 list_del_init(&pbuf
->list
);
289 /* Retrieve pointer to start of the packet descriptor area. */
290 pck_desc
= (struct shm_pck_desc
*) pbuf
->desc_vptr
;
293 * Check whether descriptor contains a CAIF shared memory
296 while (pck_desc
->frm_ofs
) {
297 unsigned int frm_buf_ofs
;
298 unsigned int frm_pck_ofs
;
299 unsigned int frm_pck_len
;
301 * Check whether offset is within buffer limits
304 if (pck_desc
->frm_ofs
<
305 (pbuf
->phy_addr
- pshm_drv
->shm_base_addr
))
308 * Check whether offset is within buffer limits
311 if (pck_desc
->frm_ofs
>
312 ((pbuf
->phy_addr
- pshm_drv
->shm_base_addr
) +
316 /* Calculate offset from start of buffer. */
318 pck_desc
->frm_ofs
- (pbuf
->phy_addr
-
319 pshm_drv
->shm_base_addr
);
322 * Calculate offset and length of CAIF packet while
323 * taking care of the shared memory header.
326 frm_buf_ofs
+ SHM_HDR_LEN
+
327 (*(pbuf
->desc_vptr
+ frm_buf_ofs
));
329 (pck_desc
->frm_len
- SHM_HDR_LEN
-
330 (*(pbuf
->desc_vptr
+ frm_buf_ofs
)));
332 /* Check whether CAIF packet is within buffer limits */
333 if ((frm_pck_ofs
+ pck_desc
->frm_len
) > pbuf
->len
)
336 /* Get a suitable CAIF packet and copy in data. */
337 skb
= netdev_alloc_skb(pshm_drv
->pshm_dev
->pshm_netdev
,
341 p
= skb_put(skb
, frm_pck_len
);
342 memcpy(p
, pbuf
->desc_vptr
+ frm_pck_ofs
, frm_pck_len
);
344 skb
->protocol
= htons(ETH_P_CAIF
);
345 skb_reset_mac_header(skb
);
346 skb
->dev
= pshm_drv
->pshm_dev
->pshm_netdev
;
348 /* Push received packet up the stack. */
349 ret
= netif_rx_ni(skb
);
352 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
354 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
355 rx_bytes
+= pck_desc
->frm_len
;
357 ++pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
359 /* Move to next packet descriptor. */
363 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_pend_list
);
365 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
369 /* Schedule the work queue. if required */
370 if (!work_pending(&pshm_drv
->shm_tx_work
))
371 queue_work(pshm_drv
->pshm_tx_workqueue
, &pshm_drv
->shm_tx_work
);
375 static void shm_tx_work_func(struct work_struct
*tx_work
)
378 unsigned int frmlen
, avail_emptybuff
, append
= 0;
379 unsigned long flags
= 0;
380 struct buf_list
*pbuf
= NULL
;
381 struct shmdrv_layer
*pshm_drv
;
382 struct shm_caif_frm
*frm
;
384 struct shm_pck_desc
*pck_desc
;
385 struct list_head
*pos
;
387 pshm_drv
= container_of(tx_work
, struct shmdrv_layer
, shm_tx_work
);
390 /* Initialize mailbox message. */
394 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
396 /* Check for pending receive buffers. */
397 if (!list_empty(&pshm_drv
->rx_pend_list
)) {
399 pbuf
= list_entry(pshm_drv
->rx_pend_list
.next
,
400 struct buf_list
, list
);
402 list_del_init(&pbuf
->list
);
403 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_empty_list
);
405 * Value index is never changed,
406 * so read access should be safe.
408 mbox_msg
|= SHM_SET_EMPTY(pbuf
->index
);
411 skb
= skb_peek(&pshm_drv
->sk_qhead
);
416 /* Check the available no. of buffers in the empty list */
417 list_for_each(pos
, &pshm_drv
->tx_empty_list
)
420 if ((avail_emptybuff
< LOW_WATERMARK
) &&
421 pshm_drv
->tx_empty_available
) {
422 /* Update blocking condition. */
423 pshm_drv
->tx_empty_available
= 0;
424 pshm_drv
->cfdev
.flowctrl
425 (pshm_drv
->pshm_dev
->pshm_netdev
,
429 * We simply return back to the caller if we do not have space
430 * either in Tx pending list or Tx empty list. In this case,
431 * we hold the received skb in the skb list, waiting to
432 * be transmitted once Tx buffers become available
434 if (list_empty(&pshm_drv
->tx_empty_list
))
437 /* Get the first free Tx buffer. */
438 pbuf
= list_entry(pshm_drv
->tx_empty_list
.next
,
439 struct buf_list
, list
);
442 skb
= skb_peek(&pshm_drv
->sk_qhead
);
447 frm
= (struct shm_caif_frm
*)
448 (pbuf
->desc_vptr
+ pbuf
->frm_ofs
);
452 frmlen
+= SHM_HDR_LEN
+ frm
->hdr_ofs
+ skb
->len
;
454 /* Add tail padding if needed. */
455 if (frmlen
% SHM_FRM_PAD_LEN
)
456 frmlen
+= SHM_FRM_PAD_LEN
-
457 (frmlen
% SHM_FRM_PAD_LEN
);
460 * Verify that packet, header and additional padding
461 * can fit within the buffer frame area.
463 if (frmlen
>= (pbuf
->len
- pbuf
->frm_ofs
))
467 list_del_init(&pbuf
->list
);
471 skb
= skb_dequeue(&pshm_drv
->sk_qhead
);
472 /* Copy in CAIF frame. */
473 skb_copy_bits(skb
, 0, pbuf
->desc_vptr
+
474 pbuf
->frm_ofs
+ SHM_HDR_LEN
+
475 frm
->hdr_ofs
, skb
->len
);
477 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.tx_packets
++;
478 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.tx_bytes
+=
482 /* Fill in the shared memory packet descriptor area. */
483 pck_desc
= (struct shm_pck_desc
*) (pbuf
->desc_vptr
);
484 /* Forward to current frame. */
485 pck_desc
+= pbuf
->frames
;
486 pck_desc
->frm_ofs
= (pbuf
->phy_addr
-
487 pshm_drv
->shm_base_addr
) +
489 pck_desc
->frm_len
= frmlen
;
490 /* Terminate packet descriptor area. */
492 pck_desc
->frm_ofs
= 0;
493 /* Update buffer parameters. */
495 pbuf
->frm_ofs
+= frmlen
+ (frmlen
% 32);
497 } while (pbuf
->frames
< SHM_MAX_FRMS_PER_BUF
);
499 /* Assign buffer as full. */
500 list_add_tail(&pbuf
->list
, &pshm_drv
->tx_full_list
);
502 mbox_msg
|= SHM_SET_FULL(pbuf
->index
);
504 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
507 pshm_drv
->pshm_dev
->pshmdev_mbxsend
508 (pshm_drv
->pshm_dev
->shm_id
, mbox_msg
);
512 static int shm_netdev_tx(struct sk_buff
*skb
, struct net_device
*shm_netdev
)
514 struct shmdrv_layer
*pshm_drv
;
515 unsigned long flags
= 0;
517 pshm_drv
= netdev_priv(shm_netdev
);
519 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
521 skb_queue_tail(&pshm_drv
->sk_qhead
, skb
);
523 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
525 /* Schedule Tx work queue. for deferred processing of skbs*/
526 if (!work_pending(&pshm_drv
->shm_tx_work
))
527 queue_work(pshm_drv
->pshm_tx_workqueue
, &pshm_drv
->shm_tx_work
);
532 static const struct net_device_ops netdev_ops
= {
533 .ndo_open
= shm_netdev_open
,
534 .ndo_stop
= shm_netdev_close
,
535 .ndo_start_xmit
= shm_netdev_tx
,
538 static void shm_netdev_setup(struct net_device
*pshm_netdev
)
540 struct shmdrv_layer
*pshm_drv
;
541 pshm_netdev
->netdev_ops
= &netdev_ops
;
543 pshm_netdev
->mtu
= CAIF_MAX_MTU
;
544 pshm_netdev
->type
= ARPHRD_CAIF
;
545 pshm_netdev
->hard_header_len
= CAIF_NEEDED_HEADROOM
;
546 pshm_netdev
->tx_queue_len
= 0;
547 pshm_netdev
->destructor
= free_netdev
;
549 pshm_drv
= netdev_priv(pshm_netdev
);
551 /* Initialize structures in a clean state. */
552 memset(pshm_drv
, 0, sizeof(struct shmdrv_layer
));
554 pshm_drv
->cfdev
.link_select
= CAIF_LINK_LOW_LATENCY
;
557 int caif_shmcore_probe(struct shmdev_layer
*pshm_dev
)
560 struct shmdrv_layer
*pshm_drv
= NULL
;
562 pshm_dev
->pshm_netdev
= alloc_netdev(sizeof(struct shmdrv_layer
),
563 "cfshm%d", shm_netdev_setup
);
564 if (!pshm_dev
->pshm_netdev
)
567 pshm_drv
= netdev_priv(pshm_dev
->pshm_netdev
);
568 pshm_drv
->pshm_dev
= pshm_dev
;
571 * Initialization starts with the verification of the
572 * availability of MBX driver by calling its setup function.
573 * MBX driver must be available by this time for proper
574 * functioning of SHM driver.
576 if ((pshm_dev
->pshmdev_mbxsetup
577 (caif_shmdrv_rx_cb
, pshm_dev
, pshm_drv
)) != 0) {
578 pr_warn("Could not config. SHM Mailbox,"
579 " Bailing out.....\n");
580 free_netdev(pshm_dev
->pshm_netdev
);
584 skb_queue_head_init(&pshm_drv
->sk_qhead
);
586 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
587 " INSTANCE AT pshm_drv =0x%p\n",
588 pshm_drv
->pshm_dev
->shm_id
, pshm_drv
);
590 if (pshm_dev
->shm_total_sz
<
591 (NR_TX_BUF
* TX_BUF_SZ
+ NR_RX_BUF
* RX_BUF_SZ
)) {
593 pr_warn("ERROR, Amount of available"
594 " Phys. SHM cannot accommodate current SHM "
595 "driver configuration, Bailing out ...\n");
596 free_netdev(pshm_dev
->pshm_netdev
);
600 pshm_drv
->shm_base_addr
= pshm_dev
->shm_base_addr
;
601 pshm_drv
->shm_tx_addr
= pshm_drv
->shm_base_addr
;
603 if (pshm_dev
->shm_loopback
)
604 pshm_drv
->shm_rx_addr
= pshm_drv
->shm_tx_addr
;
606 pshm_drv
->shm_rx_addr
= pshm_dev
->shm_base_addr
+
607 (NR_TX_BUF
* TX_BUF_SZ
);
609 INIT_LIST_HEAD(&pshm_drv
->tx_empty_list
);
610 INIT_LIST_HEAD(&pshm_drv
->tx_pend_list
);
611 INIT_LIST_HEAD(&pshm_drv
->tx_full_list
);
613 INIT_LIST_HEAD(&pshm_drv
->rx_empty_list
);
614 INIT_LIST_HEAD(&pshm_drv
->rx_pend_list
);
615 INIT_LIST_HEAD(&pshm_drv
->rx_full_list
);
617 INIT_WORK(&pshm_drv
->shm_tx_work
, shm_tx_work_func
);
618 INIT_WORK(&pshm_drv
->shm_rx_work
, shm_rx_work_func
);
620 pshm_drv
->pshm_tx_workqueue
=
621 create_singlethread_workqueue("shm_tx_work");
622 pshm_drv
->pshm_rx_workqueue
=
623 create_singlethread_workqueue("shm_rx_work");
625 for (j
= 0; j
< NR_TX_BUF
; j
++) {
626 struct buf_list
*tx_buf
=
627 kmalloc(sizeof(struct buf_list
), GFP_KERNEL
);
629 if (tx_buf
== NULL
) {
630 pr_warn("ERROR, Could not"
631 " allocate dynamic mem. for tx_buf,"
632 " Bailing out ...\n");
633 free_netdev(pshm_dev
->pshm_netdev
);
637 tx_buf
->phy_addr
= pshm_drv
->shm_tx_addr
+ (TX_BUF_SZ
* j
);
638 tx_buf
->len
= TX_BUF_SZ
;
640 tx_buf
->frm_ofs
= SHM_CAIF_FRM_OFS
;
642 if (pshm_dev
->shm_loopback
)
643 tx_buf
->desc_vptr
= (char *)tx_buf
->phy_addr
;
646 ioremap(tx_buf
->phy_addr
, TX_BUF_SZ
);
648 list_add_tail(&tx_buf
->list
, &pshm_drv
->tx_empty_list
);
651 for (j
= 0; j
< NR_RX_BUF
; j
++) {
652 struct buf_list
*rx_buf
=
653 kmalloc(sizeof(struct buf_list
), GFP_KERNEL
);
655 if (rx_buf
== NULL
) {
656 pr_warn("ERROR, Could not"
657 " allocate dynamic mem.for rx_buf,"
658 " Bailing out ...\n");
659 free_netdev(pshm_dev
->pshm_netdev
);
663 rx_buf
->phy_addr
= pshm_drv
->shm_rx_addr
+ (RX_BUF_SZ
* j
);
664 rx_buf
->len
= RX_BUF_SZ
;
666 if (pshm_dev
->shm_loopback
)
667 rx_buf
->desc_vptr
= (char *)rx_buf
->phy_addr
;
670 ioremap(rx_buf
->phy_addr
, RX_BUF_SZ
);
671 list_add_tail(&rx_buf
->list
, &pshm_drv
->rx_empty_list
);
674 pshm_drv
->tx_empty_available
= 1;
675 result
= register_netdev(pshm_dev
->pshm_netdev
);
677 pr_warn("ERROR[%d], SHM could not, "
678 "register with NW FRMWK Bailing out ...\n", result
);
683 void caif_shmcore_remove(struct net_device
*pshm_netdev
)
685 struct buf_list
*pbuf
;
686 struct shmdrv_layer
*pshm_drv
= NULL
;
688 pshm_drv
= netdev_priv(pshm_netdev
);
690 while (!(list_empty(&pshm_drv
->tx_pend_list
))) {
692 list_entry(pshm_drv
->tx_pend_list
.next
,
693 struct buf_list
, list
);
695 list_del(&pbuf
->list
);
699 while (!(list_empty(&pshm_drv
->tx_full_list
))) {
701 list_entry(pshm_drv
->tx_full_list
.next
,
702 struct buf_list
, list
);
703 list_del(&pbuf
->list
);
707 while (!(list_empty(&pshm_drv
->tx_empty_list
))) {
709 list_entry(pshm_drv
->tx_empty_list
.next
,
710 struct buf_list
, list
);
711 list_del(&pbuf
->list
);
715 while (!(list_empty(&pshm_drv
->rx_full_list
))) {
717 list_entry(pshm_drv
->tx_full_list
.next
,
718 struct buf_list
, list
);
719 list_del(&pbuf
->list
);
723 while (!(list_empty(&pshm_drv
->rx_pend_list
))) {
725 list_entry(pshm_drv
->tx_pend_list
.next
,
726 struct buf_list
, list
);
727 list_del(&pbuf
->list
);
731 while (!(list_empty(&pshm_drv
->rx_empty_list
))) {
733 list_entry(pshm_drv
->rx_empty_list
.next
,
734 struct buf_list
, list
);
735 list_del(&pbuf
->list
);
739 /* Destroy work queues. */
740 destroy_workqueue(pshm_drv
->pshm_tx_workqueue
);
741 destroy_workqueue(pshm_drv
->pshm_rx_workqueue
);
743 unregister_netdev(pshm_netdev
);