2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
9 #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
11 #include <linux/spinlock.h>
12 #include <linux/sched.h>
13 #include <linux/list.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
17 #include <net/caif/caif_device.h>
18 #include <net/caif/caif_shm.h>
22 #define TX_BUF_SZ 0x2000
23 #define RX_BUF_SZ 0x2000
25 #define CAIF_NEEDED_HEADROOM 32
27 #define CAIF_FLOW_ON 1
28 #define CAIF_FLOW_OFF 0
30 #define LOW_WATERMARK 3
31 #define HIGH_WATERMARK 4
33 /* Maximum number of CAIF buffers per shared memory buffer. */
34 #define SHM_MAX_FRMS_PER_BUF 10
37 * Size in bytes of the descriptor area
38 * (With end of descriptor signalling)
40 #define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
41 sizeof(struct shm_pck_desc))
44 * Offset to the first CAIF frame within a shared memory buffer.
45 * Aligned on 32 bytes.
47 #define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
49 /* Number of bytes for CAIF shared memory header. */
52 /* Number of padding bytes for the complete CAIF frame. */
53 #define SHM_FRM_PAD_LEN 4
55 #define CAIF_MAX_MTU 4096
57 #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
58 #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
60 #define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
61 #define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
63 #define SHM_FULL_MASK (0x0F << 0)
64 #define SHM_EMPTY_MASK (0x0F << 4)
68 * Offset from start of shared memory area to start of
69 * shared memory CAIF frame.
76 unsigned char *desc_vptr
;
82 struct list_head list
;
86 /* Number of bytes of padding before the CAIF frame. */
91 /* caif_dev_common must always be first in the structure*/
92 struct caif_dev_common cfdev
;
97 u32 tx_empty_available
;
100 struct list_head tx_empty_list
;
101 struct list_head tx_pend_list
;
102 struct list_head tx_full_list
;
103 struct list_head rx_empty_list
;
104 struct list_head rx_pend_list
;
105 struct list_head rx_full_list
;
107 struct workqueue_struct
*pshm_tx_workqueue
;
108 struct workqueue_struct
*pshm_rx_workqueue
;
110 struct work_struct shm_tx_work
;
111 struct work_struct shm_rx_work
;
113 struct sk_buff_head sk_qhead
;
114 struct shmdev_layer
*pshm_dev
;
117 static int shm_netdev_open(struct net_device
*shm_netdev
)
119 netif_wake_queue(shm_netdev
);
123 static int shm_netdev_close(struct net_device
*shm_netdev
)
125 netif_stop_queue(shm_netdev
);
129 int caif_shmdrv_rx_cb(u32 mbx_msg
, void *priv
)
131 struct buf_list
*pbuf
;
132 struct shmdrv_layer
*pshm_drv
;
133 struct list_head
*pos
;
134 u32 avail_emptybuff
= 0;
135 unsigned long flags
= 0;
139 /* Check for received buffers. */
140 if (mbx_msg
& SHM_FULL_MASK
) {
143 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
145 /* Check whether we have any outstanding buffers. */
146 if (list_empty(&pshm_drv
->rx_empty_list
)) {
148 /* Release spin lock. */
149 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
151 /* We print even in IRQ context... */
152 pr_warn("No empty Rx buffers to fill: "
153 "mbx_msg:%x\n", mbx_msg
);
160 list_entry(pshm_drv
->rx_empty_list
.next
,
161 struct buf_list
, list
);
164 /* Check buffer synchronization. */
165 if (idx
!= SHM_GET_FULL(mbx_msg
)) {
167 /* We print even in IRQ context... */
169 "phyif_shm_mbx_msg_cb: RX full out of sync:"
170 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
171 idx
, mbx_msg
, SHM_GET_FULL(mbx_msg
));
173 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
179 list_del_init(&pbuf
->list
);
180 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_full_list
);
182 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
184 /* Schedule RX work queue. */
185 if (!work_pending(&pshm_drv
->shm_rx_work
))
186 queue_work(pshm_drv
->pshm_rx_workqueue
,
187 &pshm_drv
->shm_rx_work
);
190 /* Check for emptied buffers. */
191 if (mbx_msg
& SHM_EMPTY_MASK
) {
194 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
196 /* Check whether we have any outstanding buffers. */
197 if (list_empty(&pshm_drv
->tx_full_list
)) {
199 /* We print even in IRQ context... */
200 pr_warn("No TX to empty: msg:%x\n", mbx_msg
);
202 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
209 list_entry(pshm_drv
->tx_full_list
.next
,
210 struct buf_list
, list
);
213 /* Check buffer synchronization. */
214 if (idx
!= SHM_GET_EMPTY(mbx_msg
)) {
216 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
218 /* We print even in IRQ context... */
220 "out of sync:idx:%d, msg:%x\n", idx
, mbx_msg
);
225 list_del_init(&pbuf
->list
);
227 /* Reset buffer parameters. */
229 pbuf
->frm_ofs
= SHM_CAIF_FRM_OFS
;
231 list_add_tail(&pbuf
->list
, &pshm_drv
->tx_empty_list
);
233 /* Check the available no. of buffers in the empty list */
234 list_for_each(pos
, &pshm_drv
->tx_empty_list
)
237 /* Check whether we have to wake up the transmitter. */
238 if ((avail_emptybuff
> HIGH_WATERMARK
) &&
239 (!pshm_drv
->tx_empty_available
)) {
240 pshm_drv
->tx_empty_available
= 1;
241 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
242 pshm_drv
->cfdev
.flowctrl
243 (pshm_drv
->pshm_dev
->pshm_netdev
,
247 /* Schedule the work queue. if required */
248 if (!work_pending(&pshm_drv
->shm_tx_work
))
249 queue_work(pshm_drv
->pshm_tx_workqueue
,
250 &pshm_drv
->shm_tx_work
);
252 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
261 static void shm_rx_work_func(struct work_struct
*rx_work
)
263 struct shmdrv_layer
*pshm_drv
;
264 struct buf_list
*pbuf
;
265 unsigned long flags
= 0;
270 pshm_drv
= container_of(rx_work
, struct shmdrv_layer
, shm_rx_work
);
274 struct shm_pck_desc
*pck_desc
;
276 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
278 /* Check for received buffers. */
279 if (list_empty(&pshm_drv
->rx_full_list
)) {
280 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
285 list_entry(pshm_drv
->rx_full_list
.next
, struct buf_list
,
287 list_del_init(&pbuf
->list
);
288 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
290 /* Retrieve pointer to start of the packet descriptor area. */
291 pck_desc
= (struct shm_pck_desc
*) pbuf
->desc_vptr
;
294 * Check whether descriptor contains a CAIF shared memory
297 while (pck_desc
->frm_ofs
) {
298 unsigned int frm_buf_ofs
;
299 unsigned int frm_pck_ofs
;
300 unsigned int frm_pck_len
;
302 * Check whether offset is within buffer limits
305 if (pck_desc
->frm_ofs
<
306 (pbuf
->phy_addr
- pshm_drv
->shm_base_addr
))
309 * Check whether offset is within buffer limits
312 if (pck_desc
->frm_ofs
>
313 ((pbuf
->phy_addr
- pshm_drv
->shm_base_addr
) +
317 /* Calculate offset from start of buffer. */
319 pck_desc
->frm_ofs
- (pbuf
->phy_addr
-
320 pshm_drv
->shm_base_addr
);
323 * Calculate offset and length of CAIF packet while
324 * taking care of the shared memory header.
327 frm_buf_ofs
+ SHM_HDR_LEN
+
328 (*(pbuf
->desc_vptr
+ frm_buf_ofs
));
330 (pck_desc
->frm_len
- SHM_HDR_LEN
-
331 (*(pbuf
->desc_vptr
+ frm_buf_ofs
)));
333 /* Check whether CAIF packet is within buffer limits */
334 if ((frm_pck_ofs
+ pck_desc
->frm_len
) > pbuf
->len
)
337 /* Get a suitable CAIF packet and copy in data. */
338 skb
= netdev_alloc_skb(pshm_drv
->pshm_dev
->pshm_netdev
,
342 pr_info("OOM: Try next frame in descriptor\n");
346 p
= skb_put(skb
, frm_pck_len
);
347 memcpy(p
, pbuf
->desc_vptr
+ frm_pck_ofs
, frm_pck_len
);
349 skb
->protocol
= htons(ETH_P_CAIF
);
350 skb_reset_mac_header(skb
);
351 skb
->dev
= pshm_drv
->pshm_dev
->pshm_netdev
;
353 /* Push received packet up the stack. */
354 ret
= netif_rx_ni(skb
);
357 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
359 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
360 rx_bytes
+= pck_desc
->frm_len
;
362 ++pshm_drv
->pshm_dev
->pshm_netdev
->stats
.
364 /* Move to next packet descriptor. */
368 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
369 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_pend_list
);
371 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
375 /* Schedule the work queue. if required */
376 if (!work_pending(&pshm_drv
->shm_tx_work
))
377 queue_work(pshm_drv
->pshm_tx_workqueue
, &pshm_drv
->shm_tx_work
);
381 static void shm_tx_work_func(struct work_struct
*tx_work
)
384 unsigned int frmlen
, avail_emptybuff
, append
= 0;
385 unsigned long flags
= 0;
386 struct buf_list
*pbuf
= NULL
;
387 struct shmdrv_layer
*pshm_drv
;
388 struct shm_caif_frm
*frm
;
390 struct shm_pck_desc
*pck_desc
;
391 struct list_head
*pos
;
393 pshm_drv
= container_of(tx_work
, struct shmdrv_layer
, shm_tx_work
);
396 /* Initialize mailbox message. */
400 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
402 /* Check for pending receive buffers. */
403 if (!list_empty(&pshm_drv
->rx_pend_list
)) {
405 pbuf
= list_entry(pshm_drv
->rx_pend_list
.next
,
406 struct buf_list
, list
);
408 list_del_init(&pbuf
->list
);
409 list_add_tail(&pbuf
->list
, &pshm_drv
->rx_empty_list
);
411 * Value index is never changed,
412 * so read access should be safe.
414 mbox_msg
|= SHM_SET_EMPTY(pbuf
->index
);
417 skb
= skb_peek(&pshm_drv
->sk_qhead
);
421 /* Check the available no. of buffers in the empty list */
422 list_for_each(pos
, &pshm_drv
->tx_empty_list
)
425 if ((avail_emptybuff
< LOW_WATERMARK
) &&
426 pshm_drv
->tx_empty_available
) {
427 /* Update blocking condition. */
428 pshm_drv
->tx_empty_available
= 0;
429 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
430 pshm_drv
->cfdev
.flowctrl
431 (pshm_drv
->pshm_dev
->pshm_netdev
,
433 spin_lock_irqsave(&pshm_drv
->lock
, flags
);
436 * We simply return back to the caller if we do not have space
437 * either in Tx pending list or Tx empty list. In this case,
438 * we hold the received skb in the skb list, waiting to
439 * be transmitted once Tx buffers become available
441 if (list_empty(&pshm_drv
->tx_empty_list
))
444 /* Get the first free Tx buffer. */
445 pbuf
= list_entry(pshm_drv
->tx_empty_list
.next
,
446 struct buf_list
, list
);
449 skb
= skb_peek(&pshm_drv
->sk_qhead
);
454 frm
= (struct shm_caif_frm
*)
455 (pbuf
->desc_vptr
+ pbuf
->frm_ofs
);
459 frmlen
+= SHM_HDR_LEN
+ frm
->hdr_ofs
+ skb
->len
;
461 /* Add tail padding if needed. */
462 if (frmlen
% SHM_FRM_PAD_LEN
)
463 frmlen
+= SHM_FRM_PAD_LEN
-
464 (frmlen
% SHM_FRM_PAD_LEN
);
467 * Verify that packet, header and additional padding
468 * can fit within the buffer frame area.
470 if (frmlen
>= (pbuf
->len
- pbuf
->frm_ofs
))
474 list_del_init(&pbuf
->list
);
478 skb
= skb_dequeue(&pshm_drv
->sk_qhead
);
481 /* Copy in CAIF frame. */
482 skb_copy_bits(skb
, 0, pbuf
->desc_vptr
+
483 pbuf
->frm_ofs
+ SHM_HDR_LEN
+
484 frm
->hdr_ofs
, skb
->len
);
486 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.tx_packets
++;
487 pshm_drv
->pshm_dev
->pshm_netdev
->stats
.tx_bytes
+=
489 dev_kfree_skb_irq(skb
);
491 /* Fill in the shared memory packet descriptor area. */
492 pck_desc
= (struct shm_pck_desc
*) (pbuf
->desc_vptr
);
493 /* Forward to current frame. */
494 pck_desc
+= pbuf
->frames
;
495 pck_desc
->frm_ofs
= (pbuf
->phy_addr
-
496 pshm_drv
->shm_base_addr
) +
498 pck_desc
->frm_len
= frmlen
;
499 /* Terminate packet descriptor area. */
501 pck_desc
->frm_ofs
= 0;
502 /* Update buffer parameters. */
504 pbuf
->frm_ofs
+= frmlen
+ (frmlen
% 32);
506 } while (pbuf
->frames
< SHM_MAX_FRMS_PER_BUF
);
508 /* Assign buffer as full. */
509 list_add_tail(&pbuf
->list
, &pshm_drv
->tx_full_list
);
511 mbox_msg
|= SHM_SET_FULL(pbuf
->index
);
513 spin_unlock_irqrestore(&pshm_drv
->lock
, flags
);
516 pshm_drv
->pshm_dev
->pshmdev_mbxsend
517 (pshm_drv
->pshm_dev
->shm_id
, mbox_msg
);
521 static int shm_netdev_tx(struct sk_buff
*skb
, struct net_device
*shm_netdev
)
523 struct shmdrv_layer
*pshm_drv
;
525 pshm_drv
= netdev_priv(shm_netdev
);
527 skb_queue_tail(&pshm_drv
->sk_qhead
, skb
);
529 /* Schedule Tx work queue. for deferred processing of skbs*/
530 if (!work_pending(&pshm_drv
->shm_tx_work
))
531 queue_work(pshm_drv
->pshm_tx_workqueue
, &pshm_drv
->shm_tx_work
);
536 static const struct net_device_ops netdev_ops
= {
537 .ndo_open
= shm_netdev_open
,
538 .ndo_stop
= shm_netdev_close
,
539 .ndo_start_xmit
= shm_netdev_tx
,
542 static void shm_netdev_setup(struct net_device
*pshm_netdev
)
544 struct shmdrv_layer
*pshm_drv
;
545 pshm_netdev
->netdev_ops
= &netdev_ops
;
547 pshm_netdev
->mtu
= CAIF_MAX_MTU
;
548 pshm_netdev
->type
= ARPHRD_CAIF
;
549 pshm_netdev
->hard_header_len
= CAIF_NEEDED_HEADROOM
;
550 pshm_netdev
->tx_queue_len
= 0;
551 pshm_netdev
->destructor
= free_netdev
;
553 pshm_drv
= netdev_priv(pshm_netdev
);
555 /* Initialize structures in a clean state. */
556 memset(pshm_drv
, 0, sizeof(struct shmdrv_layer
));
558 pshm_drv
->cfdev
.link_select
= CAIF_LINK_LOW_LATENCY
;
561 int caif_shmcore_probe(struct shmdev_layer
*pshm_dev
)
564 struct shmdrv_layer
*pshm_drv
= NULL
;
566 pshm_dev
->pshm_netdev
= alloc_netdev(sizeof(struct shmdrv_layer
),
567 "cfshm%d", shm_netdev_setup
);
568 if (!pshm_dev
->pshm_netdev
)
571 pshm_drv
= netdev_priv(pshm_dev
->pshm_netdev
);
572 pshm_drv
->pshm_dev
= pshm_dev
;
575 * Initialization starts with the verification of the
576 * availability of MBX driver by calling its setup function.
577 * MBX driver must be available by this time for proper
578 * functioning of SHM driver.
580 if ((pshm_dev
->pshmdev_mbxsetup
581 (caif_shmdrv_rx_cb
, pshm_dev
, pshm_drv
)) != 0) {
582 pr_warn("Could not config. SHM Mailbox,"
583 " Bailing out.....\n");
584 free_netdev(pshm_dev
->pshm_netdev
);
588 skb_queue_head_init(&pshm_drv
->sk_qhead
);
590 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
591 " INSTANCE AT pshm_drv =0x%p\n",
592 pshm_drv
->pshm_dev
->shm_id
, pshm_drv
);
594 if (pshm_dev
->shm_total_sz
<
595 (NR_TX_BUF
* TX_BUF_SZ
+ NR_RX_BUF
* RX_BUF_SZ
)) {
597 pr_warn("ERROR, Amount of available"
598 " Phys. SHM cannot accommodate current SHM "
599 "driver configuration, Bailing out ...\n");
600 free_netdev(pshm_dev
->pshm_netdev
);
604 pshm_drv
->shm_base_addr
= pshm_dev
->shm_base_addr
;
605 pshm_drv
->shm_tx_addr
= pshm_drv
->shm_base_addr
;
607 if (pshm_dev
->shm_loopback
)
608 pshm_drv
->shm_rx_addr
= pshm_drv
->shm_tx_addr
;
610 pshm_drv
->shm_rx_addr
= pshm_dev
->shm_base_addr
+
611 (NR_TX_BUF
* TX_BUF_SZ
);
613 spin_lock_init(&pshm_drv
->lock
);
614 INIT_LIST_HEAD(&pshm_drv
->tx_empty_list
);
615 INIT_LIST_HEAD(&pshm_drv
->tx_pend_list
);
616 INIT_LIST_HEAD(&pshm_drv
->tx_full_list
);
618 INIT_LIST_HEAD(&pshm_drv
->rx_empty_list
);
619 INIT_LIST_HEAD(&pshm_drv
->rx_pend_list
);
620 INIT_LIST_HEAD(&pshm_drv
->rx_full_list
);
622 INIT_WORK(&pshm_drv
->shm_tx_work
, shm_tx_work_func
);
623 INIT_WORK(&pshm_drv
->shm_rx_work
, shm_rx_work_func
);
625 pshm_drv
->pshm_tx_workqueue
=
626 create_singlethread_workqueue("shm_tx_work");
627 pshm_drv
->pshm_rx_workqueue
=
628 create_singlethread_workqueue("shm_rx_work");
630 for (j
= 0; j
< NR_TX_BUF
; j
++) {
631 struct buf_list
*tx_buf
=
632 kmalloc(sizeof(struct buf_list
), GFP_KERNEL
);
634 if (tx_buf
== NULL
) {
635 pr_warn("ERROR, Could not"
636 " allocate dynamic mem. for tx_buf,"
637 " Bailing out ...\n");
638 free_netdev(pshm_dev
->pshm_netdev
);
642 tx_buf
->phy_addr
= pshm_drv
->shm_tx_addr
+ (TX_BUF_SZ
* j
);
643 tx_buf
->len
= TX_BUF_SZ
;
645 tx_buf
->frm_ofs
= SHM_CAIF_FRM_OFS
;
647 if (pshm_dev
->shm_loopback
)
648 tx_buf
->desc_vptr
= (unsigned char *)tx_buf
->phy_addr
;
651 ioremap(tx_buf
->phy_addr
, TX_BUF_SZ
);
653 list_add_tail(&tx_buf
->list
, &pshm_drv
->tx_empty_list
);
656 for (j
= 0; j
< NR_RX_BUF
; j
++) {
657 struct buf_list
*rx_buf
=
658 kmalloc(sizeof(struct buf_list
), GFP_KERNEL
);
660 if (rx_buf
== NULL
) {
661 pr_warn("ERROR, Could not"
662 " allocate dynamic mem.for rx_buf,"
663 " Bailing out ...\n");
664 free_netdev(pshm_dev
->pshm_netdev
);
668 rx_buf
->phy_addr
= pshm_drv
->shm_rx_addr
+ (RX_BUF_SZ
* j
);
669 rx_buf
->len
= RX_BUF_SZ
;
671 if (pshm_dev
->shm_loopback
)
672 rx_buf
->desc_vptr
= (unsigned char *)rx_buf
->phy_addr
;
675 ioremap(rx_buf
->phy_addr
, RX_BUF_SZ
);
676 list_add_tail(&rx_buf
->list
, &pshm_drv
->rx_empty_list
);
679 pshm_drv
->tx_empty_available
= 1;
680 result
= register_netdev(pshm_dev
->pshm_netdev
);
682 pr_warn("ERROR[%d], SHM could not, "
683 "register with NW FRMWK Bailing out ...\n", result
);
688 void caif_shmcore_remove(struct net_device
*pshm_netdev
)
690 struct buf_list
*pbuf
;
691 struct shmdrv_layer
*pshm_drv
= NULL
;
693 pshm_drv
= netdev_priv(pshm_netdev
);
695 while (!(list_empty(&pshm_drv
->tx_pend_list
))) {
697 list_entry(pshm_drv
->tx_pend_list
.next
,
698 struct buf_list
, list
);
700 list_del(&pbuf
->list
);
704 while (!(list_empty(&pshm_drv
->tx_full_list
))) {
706 list_entry(pshm_drv
->tx_full_list
.next
,
707 struct buf_list
, list
);
708 list_del(&pbuf
->list
);
712 while (!(list_empty(&pshm_drv
->tx_empty_list
))) {
714 list_entry(pshm_drv
->tx_empty_list
.next
,
715 struct buf_list
, list
);
716 list_del(&pbuf
->list
);
720 while (!(list_empty(&pshm_drv
->rx_full_list
))) {
722 list_entry(pshm_drv
->tx_full_list
.next
,
723 struct buf_list
, list
);
724 list_del(&pbuf
->list
);
728 while (!(list_empty(&pshm_drv
->rx_pend_list
))) {
730 list_entry(pshm_drv
->tx_pend_list
.next
,
731 struct buf_list
, list
);
732 list_del(&pbuf
->list
);
736 while (!(list_empty(&pshm_drv
->rx_empty_list
))) {
738 list_entry(pshm_drv
->rx_empty_list
.next
,
739 struct buf_list
, list
);
740 list_del(&pbuf
->list
);
744 /* Destroy work queues. */
745 destroy_workqueue(pshm_drv
->pshm_tx_workqueue
);
746 destroy_workqueue(pshm_drv
->pshm_rx_workqueue
);
748 unregister_netdev(pshm_netdev
);