2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/etherdevice.h>
19 #include <linux/module.h>
20 #include <net/cfg80211.h>
21 #include <net/rtnetlink.h>
22 #include <brcmu_utils.h>
23 #include <brcmu_wifi.h>
28 #include "fwil_types.h"
30 #include "wl_cfg80211.h"
35 MODULE_AUTHOR("Broadcom Corporation");
36 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
37 MODULE_LICENSE("Dual BSD/GPL");
39 #define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
41 /* AMPDU rx reordering definitions */
42 #define BRCMF_RXREORDER_FLOWID_OFFSET 0
43 #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
44 #define BRCMF_RXREORDER_FLAGS_OFFSET 4
45 #define BRCMF_RXREORDER_CURIDX_OFFSET 6
46 #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
48 #define BRCMF_RXREORDER_DEL_FLOW 0x01
49 #define BRCMF_RXREORDER_FLUSH_ALL 0x02
50 #define BRCMF_RXREORDER_CURIDX_VALID 0x04
51 #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
52 #define BRCMF_RXREORDER_NEW_HOLE 0x10
56 module_param_named(debug
, brcmf_msg_level
, int, S_IRUSR
| S_IWUSR
);
57 MODULE_PARM_DESC(debug
, "level of debug output");
60 static int brcmf_p2p_enable
;
62 module_param_named(p2pon
, brcmf_p2p_enable
, int, 0);
63 MODULE_PARM_DESC(p2pon
, "enable p2p management functionality");
66 char *brcmf_ifname(struct brcmf_pub
*drvr
, int ifidx
)
68 if (ifidx
< 0 || ifidx
>= BRCMF_MAX_IFS
) {
69 brcmf_err("ifidx %d out of range\n", ifidx
);
73 if (drvr
->iflist
[ifidx
] == NULL
) {
74 brcmf_err("null i/f %d\n", ifidx
);
78 if (drvr
->iflist
[ifidx
]->ndev
)
79 return drvr
->iflist
[ifidx
]->ndev
->name
;
84 static void _brcmf_set_multicast_list(struct work_struct
*work
)
87 struct net_device
*ndev
;
88 struct netdev_hw_addr
*ha
;
95 ifp
= container_of(work
, struct brcmf_if
, multicast_work
);
97 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
101 /* Determine initial value of allmulti flag */
102 cmd_value
= (ndev
->flags
& IFF_ALLMULTI
) ? true : false;
104 /* Send down the multicast list first. */
105 cnt
= netdev_mc_count(ndev
);
106 buflen
= sizeof(cnt
) + (cnt
* ETH_ALEN
);
107 buf
= kmalloc(buflen
, GFP_ATOMIC
);
112 cnt_le
= cpu_to_le32(cnt
);
113 memcpy(bufp
, &cnt_le
, sizeof(cnt_le
));
114 bufp
+= sizeof(cnt_le
);
116 netdev_for_each_mc_addr(ha
, ndev
) {
119 memcpy(bufp
, ha
->addr
, ETH_ALEN
);
124 err
= brcmf_fil_iovar_data_set(ifp
, "mcast_list", buf
, buflen
);
126 brcmf_err("Setting mcast_list failed, %d\n", err
);
127 cmd_value
= cnt
? true : cmd_value
;
133 * Now send the allmulti setting. This is based on the setting in the
134 * net_device flags, but might be modified above to be turned on if we
135 * were trying to set some addresses and dongle rejected it...
137 err
= brcmf_fil_iovar_int_set(ifp
, "allmulti", cmd_value
);
139 brcmf_err("Setting allmulti failed, %d\n", err
);
141 /*Finally, pick up the PROMISC flag */
142 cmd_value
= (ndev
->flags
& IFF_PROMISC
) ? true : false;
143 err
= brcmf_fil_cmd_int_set(ifp
, BRCMF_C_SET_PROMISC
, cmd_value
);
145 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
150 _brcmf_set_mac_address(struct work_struct
*work
)
152 struct brcmf_if
*ifp
;
155 ifp
= container_of(work
, struct brcmf_if
, setmacaddr_work
);
157 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
159 err
= brcmf_fil_iovar_data_set(ifp
, "cur_etheraddr", ifp
->mac_addr
,
162 brcmf_err("Setting cur_etheraddr failed, %d\n", err
);
164 brcmf_dbg(TRACE
, "MAC address updated to %pM\n",
166 memcpy(ifp
->ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
170 static int brcmf_netdev_set_mac_address(struct net_device
*ndev
, void *addr
)
172 struct brcmf_if
*ifp
= netdev_priv(ndev
);
173 struct sockaddr
*sa
= (struct sockaddr
*)addr
;
175 memcpy(&ifp
->mac_addr
, sa
->sa_data
, ETH_ALEN
);
176 schedule_work(&ifp
->setmacaddr_work
);
180 static void brcmf_netdev_set_multicast_list(struct net_device
*ndev
)
182 struct brcmf_if
*ifp
= netdev_priv(ndev
);
184 schedule_work(&ifp
->multicast_work
);
187 static netdev_tx_t
brcmf_netdev_start_xmit(struct sk_buff
*skb
,
188 struct net_device
*ndev
)
191 struct brcmf_if
*ifp
= netdev_priv(ndev
);
192 struct brcmf_pub
*drvr
= ifp
->drvr
;
195 brcmf_dbg(DATA
, "Enter, idx=%d\n", ifp
->bssidx
);
197 /* Can the device send data? */
198 if (drvr
->bus_if
->state
!= BRCMF_BUS_DATA
) {
199 brcmf_err("xmit rejected state=%d\n", drvr
->bus_if
->state
);
200 netif_stop_queue(ndev
);
206 if (!drvr
->iflist
[ifp
->bssidx
]) {
207 brcmf_err("bad ifidx %d\n", ifp
->bssidx
);
208 netif_stop_queue(ndev
);
214 /* Make sure there's enough room for any header */
215 if (skb_headroom(skb
) < drvr
->hdrlen
) {
216 struct sk_buff
*skb2
;
218 brcmf_dbg(INFO
, "%s: insufficient headroom\n",
219 brcmf_ifname(drvr
, ifp
->bssidx
));
220 drvr
->bus_if
->tx_realloc
++;
221 skb2
= skb_realloc_headroom(skb
, drvr
->hdrlen
);
225 brcmf_err("%s: skb_realloc_headroom failed\n",
226 brcmf_ifname(drvr
, ifp
->bssidx
));
232 /* validate length for ether packet */
233 if (skb
->len
< sizeof(*eh
)) {
239 ret
= brcmf_fws_process_skb(ifp
, skb
);
243 ifp
->stats
.tx_dropped
++;
245 ifp
->stats
.tx_packets
++;
246 ifp
->stats
.tx_bytes
+= skb
->len
;
249 /* Return ok: we always eat the packet */
253 void brcmf_txflowblock_if(struct brcmf_if
*ifp
,
254 enum brcmf_netif_stop_reason reason
, bool state
)
258 if (!ifp
|| !ifp
->ndev
)
261 brcmf_dbg(TRACE
, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
262 ifp
->bssidx
, ifp
->netif_stop
, reason
, state
);
264 spin_lock_irqsave(&ifp
->netif_stop_lock
, flags
);
266 if (!ifp
->netif_stop
)
267 netif_stop_queue(ifp
->ndev
);
268 ifp
->netif_stop
|= reason
;
270 ifp
->netif_stop
&= ~reason
;
271 if (!ifp
->netif_stop
)
272 netif_wake_queue(ifp
->ndev
);
274 spin_unlock_irqrestore(&ifp
->netif_stop_lock
, flags
);
277 void brcmf_txflowblock(struct device
*dev
, bool state
)
279 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
280 struct brcmf_pub
*drvr
= bus_if
->drvr
;
282 brcmf_dbg(TRACE
, "Enter\n");
284 brcmf_fws_bus_blocked(drvr
, state
);
287 static void brcmf_netif_rx(struct brcmf_if
*ifp
, struct sk_buff
*skb
)
289 skb
->dev
= ifp
->ndev
;
290 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
292 if (skb
->pkt_type
== PACKET_MULTICAST
)
293 ifp
->stats
.multicast
++;
295 /* Process special event packets */
296 brcmf_fweh_process_skb(ifp
->drvr
, skb
);
298 if (!(ifp
->ndev
->flags
& IFF_UP
)) {
299 brcmu_pkt_buf_free_skb(skb
);
303 ifp
->stats
.rx_bytes
+= skb
->len
;
304 ifp
->stats
.rx_packets
++;
306 brcmf_dbg(DATA
, "rx proto=0x%X\n", ntohs(skb
->protocol
));
310 /* If the receive is not processed inside an ISR,
311 * the softirqd must be woken explicitly to service
312 * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
317 static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder
*rfi
,
319 struct sk_buff_head
*skb_list
)
321 /* initialize return list */
322 __skb_queue_head_init(skb_list
);
324 if (rfi
->pend_pkts
== 0) {
325 brcmf_dbg(INFO
, "no packets in reorder queue\n");
330 if (rfi
->pktslots
[start
]) {
331 __skb_queue_tail(skb_list
, rfi
->pktslots
[start
]);
332 rfi
->pktslots
[start
] = NULL
;
335 if (start
> rfi
->max_idx
)
337 } while (start
!= end
);
338 rfi
->pend_pkts
-= skb_queue_len(skb_list
);
341 static void brcmf_rxreorder_process_info(struct brcmf_if
*ifp
, u8
*reorder_data
,
344 u8 flow_id
, max_idx
, cur_idx
, exp_idx
, end_idx
;
345 struct brcmf_ampdu_rx_reorder
*rfi
;
346 struct sk_buff_head reorder_list
;
347 struct sk_buff
*pnext
;
351 flow_id
= reorder_data
[BRCMF_RXREORDER_FLOWID_OFFSET
];
352 flags
= reorder_data
[BRCMF_RXREORDER_FLAGS_OFFSET
];
354 /* validate flags and flow id */
356 brcmf_err("invalid flags...so ignore this packet\n");
357 brcmf_netif_rx(ifp
, pkt
);
361 rfi
= ifp
->drvr
->reorder_flows
[flow_id
];
362 if (flags
& BRCMF_RXREORDER_DEL_FLOW
) {
363 brcmf_dbg(INFO
, "flow-%d: delete\n",
367 brcmf_dbg(INFO
, "received flags to cleanup, but no flow (%d) yet\n",
369 brcmf_netif_rx(ifp
, pkt
);
373 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, rfi
->exp_idx
,
375 /* add the last packet */
376 __skb_queue_tail(&reorder_list
, pkt
);
378 ifp
->drvr
->reorder_flows
[flow_id
] = NULL
;
381 /* from here on we need a flow reorder instance */
383 buf_size
= sizeof(*rfi
);
384 max_idx
= reorder_data
[BRCMF_RXREORDER_MAXIDX_OFFSET
];
386 buf_size
+= (max_idx
+ 1) * sizeof(pkt
);
388 /* allocate space for flow reorder info */
389 brcmf_dbg(INFO
, "flow-%d: start, maxidx %d\n",
391 rfi
= kzalloc(buf_size
, GFP_ATOMIC
);
393 brcmf_err("failed to alloc buffer\n");
394 brcmf_netif_rx(ifp
, pkt
);
398 ifp
->drvr
->reorder_flows
[flow_id
] = rfi
;
399 rfi
->pktslots
= (struct sk_buff
**)(rfi
+1);
400 rfi
->max_idx
= max_idx
;
402 if (flags
& BRCMF_RXREORDER_NEW_HOLE
) {
403 if (rfi
->pend_pkts
) {
404 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
,
407 WARN_ON(rfi
->pend_pkts
);
409 __skb_queue_head_init(&reorder_list
);
411 rfi
->cur_idx
= reorder_data
[BRCMF_RXREORDER_CURIDX_OFFSET
];
412 rfi
->exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
413 rfi
->max_idx
= reorder_data
[BRCMF_RXREORDER_MAXIDX_OFFSET
];
414 rfi
->pktslots
[rfi
->cur_idx
] = pkt
;
416 brcmf_dbg(DATA
, "flow-%d: new hole %d (%d), pending %d\n",
417 flow_id
, rfi
->cur_idx
, rfi
->exp_idx
, rfi
->pend_pkts
);
418 } else if (flags
& BRCMF_RXREORDER_CURIDX_VALID
) {
419 cur_idx
= reorder_data
[BRCMF_RXREORDER_CURIDX_OFFSET
];
420 exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
422 if ((exp_idx
== rfi
->exp_idx
) && (cur_idx
!= rfi
->exp_idx
)) {
423 /* still in the current hole */
424 /* enqueue the current on the buffer chain */
425 if (rfi
->pktslots
[cur_idx
] != NULL
) {
426 brcmf_dbg(INFO
, "HOLE: ERROR buffer pending..free it\n");
427 brcmu_pkt_buf_free_skb(rfi
->pktslots
[cur_idx
]);
428 rfi
->pktslots
[cur_idx
] = NULL
;
430 rfi
->pktslots
[cur_idx
] = pkt
;
432 rfi
->cur_idx
= cur_idx
;
433 brcmf_dbg(DATA
, "flow-%d: store pkt %d (%d), pending %d\n",
434 flow_id
, cur_idx
, exp_idx
, rfi
->pend_pkts
);
436 /* can return now as there is no reorder
441 if (rfi
->exp_idx
== cur_idx
) {
442 if (rfi
->pktslots
[cur_idx
] != NULL
) {
443 brcmf_dbg(INFO
, "error buffer pending..free it\n");
444 brcmu_pkt_buf_free_skb(rfi
->pktslots
[cur_idx
]);
445 rfi
->pktslots
[cur_idx
] = NULL
;
447 rfi
->pktslots
[cur_idx
] = pkt
;
450 /* got the expected one. flush from current to expected
451 * and update expected
453 brcmf_dbg(DATA
, "flow-%d: expected %d (%d), pending %d\n",
454 flow_id
, cur_idx
, exp_idx
, rfi
->pend_pkts
);
456 rfi
->cur_idx
= cur_idx
;
457 rfi
->exp_idx
= exp_idx
;
459 brcmf_rxreorder_get_skb_list(rfi
, cur_idx
, exp_idx
,
461 brcmf_dbg(DATA
, "flow-%d: freeing buffers %d, pending %d\n",
462 flow_id
, skb_queue_len(&reorder_list
),
467 brcmf_dbg(DATA
, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
468 flow_id
, flags
, rfi
->cur_idx
, rfi
->exp_idx
,
470 if (flags
& BRCMF_RXREORDER_FLUSH_ALL
)
471 end_idx
= rfi
->exp_idx
;
475 /* flush pkts first */
476 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, end_idx
,
479 if (exp_idx
== ((cur_idx
+ 1) % (rfi
->max_idx
+ 1))) {
480 __skb_queue_tail(&reorder_list
, pkt
);
482 rfi
->pktslots
[cur_idx
] = pkt
;
485 rfi
->exp_idx
= exp_idx
;
486 rfi
->cur_idx
= cur_idx
;
489 /* explicity window move updating the expected index */
490 exp_idx
= reorder_data
[BRCMF_RXREORDER_EXPIDX_OFFSET
];
492 brcmf_dbg(DATA
, "flow-%d (0x%x): change expected: %d -> %d\n",
493 flow_id
, flags
, rfi
->exp_idx
, exp_idx
);
494 if (flags
& BRCMF_RXREORDER_FLUSH_ALL
)
495 end_idx
= rfi
->exp_idx
;
499 brcmf_rxreorder_get_skb_list(rfi
, rfi
->exp_idx
, end_idx
,
501 __skb_queue_tail(&reorder_list
, pkt
);
502 /* set the new expected idx */
503 rfi
->exp_idx
= exp_idx
;
506 skb_queue_walk_safe(&reorder_list
, pkt
, pnext
) {
507 __skb_unlink(pkt
, &reorder_list
);
508 brcmf_netif_rx(ifp
, pkt
);
512 void brcmf_rx_frame(struct device
*dev
, struct sk_buff
*skb
)
514 struct brcmf_if
*ifp
;
515 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
516 struct brcmf_pub
*drvr
= bus_if
->drvr
;
517 struct brcmf_skb_reorder_data
*rd
;
521 brcmf_dbg(DATA
, "Enter: %s: rxp=%p\n", dev_name(dev
), skb
);
523 /* process and remove protocol-specific header */
524 ret
= brcmf_proto_hdrpull(drvr
, true, &ifidx
, skb
);
525 ifp
= drvr
->iflist
[ifidx
];
527 if (ret
|| !ifp
|| !ifp
->ndev
) {
528 if ((ret
!= -ENODATA
) && ifp
)
529 ifp
->stats
.rx_errors
++;
530 brcmu_pkt_buf_free_skb(skb
);
534 rd
= (struct brcmf_skb_reorder_data
*)skb
->cb
;
536 brcmf_rxreorder_process_info(ifp
, rd
->reorder
, skb
);
538 brcmf_netif_rx(ifp
, skb
);
541 void brcmf_txfinalize(struct brcmf_pub
*drvr
, struct sk_buff
*txp
,
544 struct brcmf_if
*ifp
;
550 res
= brcmf_proto_hdrpull(drvr
, false, &ifidx
, txp
);
552 ifp
= drvr
->iflist
[ifidx
];
557 eh
= (struct ethhdr
*)(txp
->data
);
558 type
= ntohs(eh
->h_proto
);
560 if (type
== ETH_P_PAE
) {
561 atomic_dec(&ifp
->pend_8021x_cnt
);
562 if (waitqueue_active(&ifp
->pend_8021x_wait
))
563 wake_up(&ifp
->pend_8021x_wait
);
567 ifp
->stats
.tx_errors
++;
569 brcmu_pkt_buf_free_skb(txp
);
572 void brcmf_txcomplete(struct device
*dev
, struct sk_buff
*txp
, bool success
)
574 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
575 struct brcmf_pub
*drvr
= bus_if
->drvr
;
577 /* await txstatus signal for firmware if active */
578 if (brcmf_fws_fc_active(drvr
->fws
)) {
580 brcmf_fws_bustxfail(drvr
->fws
, txp
);
582 brcmf_txfinalize(drvr
, txp
, success
);
586 static struct net_device_stats
*brcmf_netdev_get_stats(struct net_device
*ndev
)
588 struct brcmf_if
*ifp
= netdev_priv(ndev
);
590 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
595 static void brcmf_ethtool_get_drvinfo(struct net_device
*ndev
,
596 struct ethtool_drvinfo
*info
)
598 struct brcmf_if
*ifp
= netdev_priv(ndev
);
599 struct brcmf_pub
*drvr
= ifp
->drvr
;
601 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
602 snprintf(info
->version
, sizeof(info
->version
), "n/a");
603 strlcpy(info
->fw_version
, drvr
->fwver
, sizeof(info
->fw_version
));
604 strlcpy(info
->bus_info
, dev_name(drvr
->bus_if
->dev
),
605 sizeof(info
->bus_info
));
608 static const struct ethtool_ops brcmf_ethtool_ops
= {
609 .get_drvinfo
= brcmf_ethtool_get_drvinfo
,
612 static int brcmf_netdev_stop(struct net_device
*ndev
)
614 struct brcmf_if
*ifp
= netdev_priv(ndev
);
616 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
618 brcmf_cfg80211_down(ndev
);
620 /* Set state and stop OS transmissions */
621 netif_stop_queue(ndev
);
626 static int brcmf_netdev_open(struct net_device
*ndev
)
628 struct brcmf_if
*ifp
= netdev_priv(ndev
);
629 struct brcmf_pub
*drvr
= ifp
->drvr
;
630 struct brcmf_bus
*bus_if
= drvr
->bus_if
;
633 brcmf_dbg(TRACE
, "Enter, idx=%d\n", ifp
->bssidx
);
635 /* If bus is not ready, can't continue */
636 if (bus_if
->state
!= BRCMF_BUS_DATA
) {
637 brcmf_err("failed bus is not ready\n");
641 atomic_set(&ifp
->pend_8021x_cnt
, 0);
643 /* Get current TOE mode from dongle */
644 if (brcmf_fil_iovar_int_get(ifp
, "toe_ol", &toe_ol
) >= 0
645 && (toe_ol
& TOE_TX_CSUM_OL
) != 0)
646 ndev
->features
|= NETIF_F_IP_CSUM
;
648 ndev
->features
&= ~NETIF_F_IP_CSUM
;
650 if (brcmf_cfg80211_up(ndev
)) {
651 brcmf_err("failed to bring up cfg80211\n");
655 /* Allow transmit calls */
656 netif_start_queue(ndev
);
660 static const struct net_device_ops brcmf_netdev_ops_pri
= {
661 .ndo_open
= brcmf_netdev_open
,
662 .ndo_stop
= brcmf_netdev_stop
,
663 .ndo_get_stats
= brcmf_netdev_get_stats
,
664 .ndo_start_xmit
= brcmf_netdev_start_xmit
,
665 .ndo_set_mac_address
= brcmf_netdev_set_mac_address
,
666 .ndo_set_rx_mode
= brcmf_netdev_set_multicast_list
669 int brcmf_net_attach(struct brcmf_if
*ifp
, bool rtnl_locked
)
671 struct brcmf_pub
*drvr
= ifp
->drvr
;
672 struct net_device
*ndev
;
675 brcmf_dbg(TRACE
, "Enter, idx=%d mac=%pM\n", ifp
->bssidx
,
679 /* set appropriate operations */
680 ndev
->netdev_ops
= &brcmf_netdev_ops_pri
;
682 ndev
->hard_header_len
+= drvr
->hdrlen
;
683 ndev
->ethtool_ops
= &brcmf_ethtool_ops
;
685 drvr
->rxsz
= ndev
->mtu
+ ndev
->hard_header_len
+
688 /* set the mac address */
689 memcpy(ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
691 INIT_WORK(&ifp
->setmacaddr_work
, _brcmf_set_mac_address
);
692 INIT_WORK(&ifp
->multicast_work
, _brcmf_set_multicast_list
);
695 err
= register_netdevice(ndev
);
697 err
= register_netdev(ndev
);
699 brcmf_err("couldn't register the net device\n");
703 brcmf_dbg(INFO
, "%s: Broadcom Dongle Host Driver\n", ndev
->name
);
705 ndev
->destructor
= brcmf_cfg80211_free_netdev
;
709 drvr
->iflist
[ifp
->bssidx
] = NULL
;
710 ndev
->netdev_ops
= NULL
;
715 static int brcmf_net_p2p_open(struct net_device
*ndev
)
717 brcmf_dbg(TRACE
, "Enter\n");
719 return brcmf_cfg80211_up(ndev
);
722 static int brcmf_net_p2p_stop(struct net_device
*ndev
)
724 brcmf_dbg(TRACE
, "Enter\n");
726 return brcmf_cfg80211_down(ndev
);
729 static netdev_tx_t
brcmf_net_p2p_start_xmit(struct sk_buff
*skb
,
730 struct net_device
*ndev
)
733 dev_kfree_skb_any(skb
);
738 static const struct net_device_ops brcmf_netdev_ops_p2p
= {
739 .ndo_open
= brcmf_net_p2p_open
,
740 .ndo_stop
= brcmf_net_p2p_stop
,
741 .ndo_start_xmit
= brcmf_net_p2p_start_xmit
744 static int brcmf_net_p2p_attach(struct brcmf_if
*ifp
)
746 struct net_device
*ndev
;
748 brcmf_dbg(TRACE
, "Enter, idx=%d mac=%pM\n", ifp
->bssidx
,
752 ndev
->netdev_ops
= &brcmf_netdev_ops_p2p
;
754 /* set the mac address */
755 memcpy(ndev
->dev_addr
, ifp
->mac_addr
, ETH_ALEN
);
757 if (register_netdev(ndev
) != 0) {
758 brcmf_err("couldn't register the p2p net device\n");
762 brcmf_dbg(INFO
, "%s: Broadcom Dongle Host Driver\n", ndev
->name
);
767 ifp
->drvr
->iflist
[ifp
->bssidx
] = NULL
;
768 ndev
->netdev_ops
= NULL
;
773 struct brcmf_if
*brcmf_add_if(struct brcmf_pub
*drvr
, s32 bssidx
, s32 ifidx
,
774 char *name
, u8
*mac_addr
)
776 struct brcmf_if
*ifp
;
777 struct net_device
*ndev
;
779 brcmf_dbg(TRACE
, "Enter, idx=%d, ifidx=%d\n", bssidx
, ifidx
);
781 ifp
= drvr
->iflist
[bssidx
];
783 * Delete the existing interface before overwriting it
784 * in case we missed the BRCMF_E_IF_DEL event.
787 brcmf_err("ERROR: netdev:%s already exists\n",
790 netif_stop_queue(ifp
->ndev
);
791 unregister_netdev(ifp
->ndev
);
792 free_netdev(ifp
->ndev
);
793 drvr
->iflist
[bssidx
] = NULL
;
795 brcmf_err("ignore IF event\n");
796 return ERR_PTR(-EINVAL
);
800 if (!brcmf_p2p_enable
&& bssidx
== 1) {
801 /* this is P2P_DEVICE interface */
802 brcmf_dbg(INFO
, "allocate non-netdev interface\n");
803 ifp
= kzalloc(sizeof(*ifp
), GFP_KERNEL
);
805 return ERR_PTR(-ENOMEM
);
807 brcmf_dbg(INFO
, "allocate netdev interface\n");
808 /* Allocate netdev, including space for private structure */
809 ndev
= alloc_netdev(sizeof(*ifp
), name
, ether_setup
);
811 return ERR_PTR(-ENOMEM
);
813 ifp
= netdev_priv(ndev
);
818 drvr
->iflist
[bssidx
] = ifp
;
820 ifp
->bssidx
= bssidx
;
822 init_waitqueue_head(&ifp
->pend_8021x_wait
);
823 spin_lock_init(&ifp
->netif_stop_lock
);
825 if (mac_addr
!= NULL
)
826 memcpy(ifp
->mac_addr
, mac_addr
, ETH_ALEN
);
828 brcmf_dbg(TRACE
, " ==== pid:%x, if:%s (%pM) created ===\n",
829 current
->pid
, name
, ifp
->mac_addr
);
834 void brcmf_del_if(struct brcmf_pub
*drvr
, s32 bssidx
)
836 struct brcmf_if
*ifp
;
838 ifp
= drvr
->iflist
[bssidx
];
839 drvr
->iflist
[bssidx
] = NULL
;
841 brcmf_err("Null interface, idx=%d\n", bssidx
);
844 brcmf_dbg(TRACE
, "Enter, idx=%d, ifidx=%d\n", bssidx
, ifp
->ifidx
);
847 if (ifp
->ndev
->netdev_ops
== &brcmf_netdev_ops_pri
) {
849 brcmf_netdev_stop(ifp
->ndev
);
853 netif_stop_queue(ifp
->ndev
);
856 if (ifp
->ndev
->netdev_ops
== &brcmf_netdev_ops_pri
) {
857 cancel_work_sync(&ifp
->setmacaddr_work
);
858 cancel_work_sync(&ifp
->multicast_work
);
860 /* unregister will take care of freeing it */
861 unregister_netdev(ifp
->ndev
);
867 int brcmf_attach(struct device
*dev
)
869 struct brcmf_pub
*drvr
= NULL
;
872 brcmf_dbg(TRACE
, "Enter\n");
874 /* Allocate primary brcmf_info */
875 drvr
= kzalloc(sizeof(struct brcmf_pub
), GFP_ATOMIC
);
879 mutex_init(&drvr
->proto_block
);
881 /* Link to bus module */
883 drvr
->bus_if
= dev_get_drvdata(dev
);
884 drvr
->bus_if
->drvr
= drvr
;
886 /* create device debugfs folder */
887 brcmf_debugfs_attach(drvr
);
889 /* Attach and link in the protocol */
890 ret
= brcmf_proto_attach(drvr
);
892 brcmf_err("brcmf_prot_attach failed\n");
896 /* attach firmware event handler */
897 brcmf_fweh_attach(drvr
);
907 int brcmf_bus_start(struct device
*dev
)
910 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
911 struct brcmf_pub
*drvr
= bus_if
->drvr
;
912 struct brcmf_if
*ifp
;
913 struct brcmf_if
*p2p_ifp
;
915 brcmf_dbg(TRACE
, "\n");
917 /* Bring up the bus */
918 ret
= brcmf_bus_init(bus_if
);
920 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret
);
924 /* add primary networking interface */
925 ifp
= brcmf_add_if(drvr
, 0, 0, "wlan%d", NULL
);
929 if (brcmf_p2p_enable
)
930 p2p_ifp
= brcmf_add_if(drvr
, 1, 0, "p2p%d", NULL
);
936 /* signal bus ready */
937 brcmf_bus_change_state(bus_if
, BRCMF_BUS_DATA
);
939 /* Bus is ready, do any initialization */
940 ret
= brcmf_c_preinit_dcmds(ifp
);
944 ret
= brcmf_fws_init(drvr
);
948 brcmf_fws_add_interface(ifp
);
950 drvr
->config
= brcmf_cfg80211_attach(drvr
, bus_if
->dev
);
951 if (drvr
->config
== NULL
) {
956 ret
= brcmf_fweh_activate_events(ifp
);
960 ret
= brcmf_net_attach(ifp
, false);
963 brcmf_err("failed: %d\n", ret
);
964 brcmf_cfg80211_detach(drvr
->config
);
966 brcmf_fws_del_interface(ifp
);
967 brcmf_fws_deinit(drvr
);
969 if (drvr
->iflist
[0]) {
970 free_netdev(ifp
->ndev
);
971 drvr
->iflist
[0] = NULL
;
974 free_netdev(p2p_ifp
->ndev
);
975 drvr
->iflist
[1] = NULL
;
979 if ((brcmf_p2p_enable
) && (p2p_ifp
))
980 if (brcmf_net_p2p_attach(p2p_ifp
) < 0)
981 brcmf_p2p_enable
= 0;
986 void brcmf_bus_add_txhdrlen(struct device
*dev
, uint len
)
988 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
989 struct brcmf_pub
*drvr
= bus_if
->drvr
;
996 static void brcmf_bus_detach(struct brcmf_pub
*drvr
)
998 brcmf_dbg(TRACE
, "Enter\n");
1001 /* Stop the bus module */
1002 brcmf_bus_stop(drvr
->bus_if
);
1006 void brcmf_dev_reset(struct device
*dev
)
1008 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1009 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1014 if (drvr
->iflist
[0])
1015 brcmf_fil_cmd_int_set(drvr
->iflist
[0], BRCMF_C_TERMINATED
, 1);
1018 void brcmf_detach(struct device
*dev
)
1021 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1022 struct brcmf_pub
*drvr
= bus_if
->drvr
;
1024 brcmf_dbg(TRACE
, "Enter\n");
1029 /* stop firmware event handling */
1030 brcmf_fweh_detach(drvr
);
1032 brcmf_bus_change_state(bus_if
, BRCMF_BUS_DOWN
);
1034 /* make sure primary interface removed last */
1035 for (i
= BRCMF_MAX_IFS
-1; i
> -1; i
--)
1036 if (drvr
->iflist
[i
]) {
1037 brcmf_fws_del_interface(drvr
->iflist
[i
]);
1038 brcmf_del_if(drvr
, i
);
1041 brcmf_cfg80211_detach(drvr
->config
);
1043 brcmf_bus_detach(drvr
);
1045 brcmf_proto_detach(drvr
);
1047 brcmf_fws_deinit(drvr
);
1049 brcmf_debugfs_detach(drvr
);
1050 bus_if
->drvr
= NULL
;
1054 s32
brcmf_iovar_data_set(struct device
*dev
, char *name
, void *data
, u32 len
)
1056 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1057 struct brcmf_if
*ifp
= bus_if
->drvr
->iflist
[0];
1059 return brcmf_fil_iovar_data_set(ifp
, name
, data
, len
);
1062 static int brcmf_get_pend_8021x_cnt(struct brcmf_if
*ifp
)
1064 return atomic_read(&ifp
->pend_8021x_cnt
);
1067 int brcmf_netdev_wait_pend8021x(struct net_device
*ndev
)
1069 struct brcmf_if
*ifp
= netdev_priv(ndev
);
1072 err
= wait_event_timeout(ifp
->pend_8021x_wait
,
1073 !brcmf_get_pend_8021x_cnt(ifp
),
1074 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX
));
1082 * return chip id and rev of the device encoded in u32.
1084 u32
brcmf_get_chip_info(struct brcmf_if
*ifp
)
1086 struct brcmf_bus
*bus
= ifp
->drvr
->bus_if
;
1088 return bus
->chip
<< 4 | bus
->chiprev
;
1091 static void brcmf_driver_register(struct work_struct
*work
)
1093 #ifdef CONFIG_BRCMFMAC_SDIO
1094 brcmf_sdio_register();
1096 #ifdef CONFIG_BRCMFMAC_USB
1097 brcmf_usb_register();
1100 static DECLARE_WORK(brcmf_driver_work
, brcmf_driver_register
);
1102 static int __init
brcmfmac_module_init(void)
1104 brcmf_debugfs_init();
1105 #ifdef CONFIG_BRCMFMAC_SDIO
1108 if (!schedule_work(&brcmf_driver_work
))
1114 static void __exit
brcmfmac_module_exit(void)
1116 cancel_work_sync(&brcmf_driver_work
);
1118 #ifdef CONFIG_BRCMFMAC_SDIO
1121 #ifdef CONFIG_BRCMFMAC_USB
1124 brcmf_debugfs_exit();
1127 module_init(brcmfmac_module_init
);
1128 module_exit(brcmfmac_module_exit
);