1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2014 Broadcom Corporation
7 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <brcmu_utils.h>
21 #define BRCMF_FLOWRING_HIGH 1024
22 #define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256)
23 #define BRCMF_FLOWRING_INVALID_IFIDX 0xff
25 #define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
26 #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
28 static const u8 brcmf_flowring_prio2fifo
[] = {
39 static const u8 ALLFFMAC
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
43 brcmf_flowring_is_tdls_mac(struct brcmf_flowring
*flow
, u8 mac
[ETH_ALEN
])
45 struct brcmf_flowring_tdls_entry
*search
;
47 search
= flow
->tdls_entry
;
50 if (memcmp(search
->mac
, mac
, ETH_ALEN
) == 0)
52 search
= search
->next
;
59 u32
brcmf_flowring_lookup(struct brcmf_flowring
*flow
, u8 da
[ETH_ALEN
],
62 struct brcmf_flowring_hash
*hash
;
70 fifo
= brcmf_flowring_prio2fifo
[prio
];
71 sta
= (flow
->addr_mode
[ifidx
] == ADDR_INDIRECT
);
73 if ((!sta
) && (is_multicast_ether_addr(da
))) {
77 if ((sta
) && (flow
->tdls_active
) &&
78 (brcmf_flowring_is_tdls_mac(flow
, da
))) {
81 hash_idx
= sta
? BRCMF_FLOWRING_HASH_STA(fifo
, ifidx
) :
82 BRCMF_FLOWRING_HASH_AP(mac
, fifo
, ifidx
);
83 hash_idx
&= (BRCMF_FLOWRING_HASHSIZE
- 1);
86 for (i
= 0; i
< BRCMF_FLOWRING_HASHSIZE
; i
++) {
87 if ((sta
|| (memcmp(hash
[hash_idx
].mac
, mac
, ETH_ALEN
) == 0)) &&
88 (hash
[hash_idx
].fifo
== fifo
) &&
89 (hash
[hash_idx
].ifidx
== ifidx
)) {
94 hash_idx
&= (BRCMF_FLOWRING_HASHSIZE
- 1);
97 return hash
[hash_idx
].flowid
;
99 return BRCMF_FLOWRING_INVALID_ID
;
103 u32
brcmf_flowring_create(struct brcmf_flowring
*flow
, u8 da
[ETH_ALEN
],
106 struct brcmf_flowring_ring
*ring
;
107 struct brcmf_flowring_hash
*hash
;
115 fifo
= brcmf_flowring_prio2fifo
[prio
];
116 sta
= (flow
->addr_mode
[ifidx
] == ADDR_INDIRECT
);
118 if ((!sta
) && (is_multicast_ether_addr(da
))) {
119 mac
= (u8
*)ALLFFMAC
;
122 if ((sta
) && (flow
->tdls_active
) &&
123 (brcmf_flowring_is_tdls_mac(flow
, da
))) {
126 hash_idx
= sta
? BRCMF_FLOWRING_HASH_STA(fifo
, ifidx
) :
127 BRCMF_FLOWRING_HASH_AP(mac
, fifo
, ifidx
);
128 hash_idx
&= (BRCMF_FLOWRING_HASHSIZE
- 1);
131 for (i
= 0; i
< BRCMF_FLOWRING_HASHSIZE
; i
++) {
132 if ((hash
[hash_idx
].ifidx
== BRCMF_FLOWRING_INVALID_IFIDX
) &&
133 (is_zero_ether_addr(hash
[hash_idx
].mac
))) {
138 hash_idx
&= (BRCMF_FLOWRING_HASHSIZE
- 1);
141 for (i
= 0; i
< flow
->nrofrings
; i
++) {
142 if (flow
->rings
[i
] == NULL
)
145 if (i
== flow
->nrofrings
)
148 ring
= kzalloc(sizeof(*ring
), GFP_ATOMIC
);
152 memcpy(hash
[hash_idx
].mac
, mac
, ETH_ALEN
);
153 hash
[hash_idx
].fifo
= fifo
;
154 hash
[hash_idx
].ifidx
= ifidx
;
155 hash
[hash_idx
].flowid
= i
;
157 ring
->hash_id
= hash_idx
;
158 ring
->status
= RING_CLOSED
;
159 skb_queue_head_init(&ring
->skblist
);
160 flow
->rings
[i
] = ring
;
164 return BRCMF_FLOWRING_INVALID_ID
;
168 u8
brcmf_flowring_tid(struct brcmf_flowring
*flow
, u16 flowid
)
170 struct brcmf_flowring_ring
*ring
;
172 ring
= flow
->rings
[flowid
];
174 return flow
->hash
[ring
->hash_id
].fifo
;
178 static void brcmf_flowring_block(struct brcmf_flowring
*flow
, u16 flowid
,
181 struct brcmf_flowring_ring
*ring
;
182 struct brcmf_bus
*bus_if
;
183 struct brcmf_pub
*drvr
;
184 struct brcmf_if
*ifp
;
185 bool currently_blocked
;
190 spin_lock_irqsave(&flow
->block_lock
, flags
);
192 ring
= flow
->rings
[flowid
];
193 if (ring
->blocked
== blocked
) {
194 spin_unlock_irqrestore(&flow
->block_lock
, flags
);
197 ifidx
= brcmf_flowring_ifidx_get(flow
, flowid
);
199 currently_blocked
= false;
200 for (i
= 0; i
< flow
->nrofrings
; i
++) {
201 if ((flow
->rings
[i
]) && (i
!= flowid
)) {
202 ring
= flow
->rings
[i
];
203 if ((ring
->status
== RING_OPEN
) &&
204 (brcmf_flowring_ifidx_get(flow
, i
) == ifidx
)) {
206 currently_blocked
= true;
212 flow
->rings
[flowid
]->blocked
= blocked
;
213 if (currently_blocked
) {
214 spin_unlock_irqrestore(&flow
->block_lock
, flags
);
218 bus_if
= dev_get_drvdata(flow
->dev
);
220 ifp
= brcmf_get_ifp(drvr
, ifidx
);
221 brcmf_txflowblock_if(ifp
, BRCMF_NETIF_STOP_REASON_FLOW
, blocked
);
223 spin_unlock_irqrestore(&flow
->block_lock
, flags
);
227 void brcmf_flowring_delete(struct brcmf_flowring
*flow
, u16 flowid
)
229 struct brcmf_bus
*bus_if
= dev_get_drvdata(flow
->dev
);
230 struct brcmf_flowring_ring
*ring
;
231 struct brcmf_if
*ifp
;
236 ring
= flow
->rings
[flowid
];
240 ifidx
= brcmf_flowring_ifidx_get(flow
, flowid
);
241 ifp
= brcmf_get_ifp(bus_if
->drvr
, ifidx
);
243 brcmf_flowring_block(flow
, flowid
, false);
244 hash_idx
= ring
->hash_id
;
245 flow
->hash
[hash_idx
].ifidx
= BRCMF_FLOWRING_INVALID_IFIDX
;
246 eth_zero_addr(flow
->hash
[hash_idx
].mac
);
247 flow
->rings
[flowid
] = NULL
;
249 skb
= skb_dequeue(&ring
->skblist
);
251 brcmf_txfinalize(ifp
, skb
, false);
252 skb
= skb_dequeue(&ring
->skblist
);
259 u32
brcmf_flowring_enqueue(struct brcmf_flowring
*flow
, u16 flowid
,
262 struct brcmf_flowring_ring
*ring
;
264 ring
= flow
->rings
[flowid
];
266 skb_queue_tail(&ring
->skblist
, skb
);
268 if (!ring
->blocked
&&
269 (skb_queue_len(&ring
->skblist
) > BRCMF_FLOWRING_HIGH
)) {
270 brcmf_flowring_block(flow
, flowid
, true);
271 brcmf_dbg(MSGBUF
, "Flowcontrol: BLOCK for ring %d\n", flowid
);
272 /* To prevent (work around) possible race condition, check
273 * queue len again. It is also possible to use locking to
274 * protect, but that is undesirable for every enqueue and
275 * dequeue. This simple check will solve a possible race
276 * condition if it occurs.
278 if (skb_queue_len(&ring
->skblist
) < BRCMF_FLOWRING_LOW
)
279 brcmf_flowring_block(flow
, flowid
, false);
281 return skb_queue_len(&ring
->skblist
);
285 struct sk_buff
*brcmf_flowring_dequeue(struct brcmf_flowring
*flow
, u16 flowid
)
287 struct brcmf_flowring_ring
*ring
;
290 ring
= flow
->rings
[flowid
];
291 if (ring
->status
!= RING_OPEN
)
294 skb
= skb_dequeue(&ring
->skblist
);
297 (skb_queue_len(&ring
->skblist
) < BRCMF_FLOWRING_LOW
)) {
298 brcmf_flowring_block(flow
, flowid
, false);
299 brcmf_dbg(MSGBUF
, "Flowcontrol: OPEN for ring %d\n", flowid
);
306 void brcmf_flowring_reinsert(struct brcmf_flowring
*flow
, u16 flowid
,
309 struct brcmf_flowring_ring
*ring
;
311 ring
= flow
->rings
[flowid
];
313 skb_queue_head(&ring
->skblist
, skb
);
317 u32
brcmf_flowring_qlen(struct brcmf_flowring
*flow
, u16 flowid
)
319 struct brcmf_flowring_ring
*ring
;
321 ring
= flow
->rings
[flowid
];
325 if (ring
->status
!= RING_OPEN
)
328 return skb_queue_len(&ring
->skblist
);
332 void brcmf_flowring_open(struct brcmf_flowring
*flow
, u16 flowid
)
334 struct brcmf_flowring_ring
*ring
;
336 ring
= flow
->rings
[flowid
];
338 brcmf_err("Ring NULL, for flowid %d\n", flowid
);
342 ring
->status
= RING_OPEN
;
346 u8
brcmf_flowring_ifidx_get(struct brcmf_flowring
*flow
, u16 flowid
)
348 struct brcmf_flowring_ring
*ring
;
351 ring
= flow
->rings
[flowid
];
352 hash_idx
= ring
->hash_id
;
354 return flow
->hash
[hash_idx
].ifidx
;
358 struct brcmf_flowring
*brcmf_flowring_attach(struct device
*dev
, u16 nrofrings
)
360 struct brcmf_flowring
*flow
;
363 flow
= kzalloc(sizeof(*flow
), GFP_KERNEL
);
366 flow
->nrofrings
= nrofrings
;
367 spin_lock_init(&flow
->block_lock
);
368 for (i
= 0; i
< ARRAY_SIZE(flow
->addr_mode
); i
++)
369 flow
->addr_mode
[i
] = ADDR_INDIRECT
;
370 for (i
= 0; i
< ARRAY_SIZE(flow
->hash
); i
++)
371 flow
->hash
[i
].ifidx
= BRCMF_FLOWRING_INVALID_IFIDX
;
372 flow
->rings
= kcalloc(nrofrings
, sizeof(*flow
->rings
),
384 void brcmf_flowring_detach(struct brcmf_flowring
*flow
)
386 struct brcmf_bus
*bus_if
= dev_get_drvdata(flow
->dev
);
387 struct brcmf_pub
*drvr
= bus_if
->drvr
;
388 struct brcmf_flowring_tdls_entry
*search
;
389 struct brcmf_flowring_tdls_entry
*remove
;
392 for (flowid
= 0; flowid
< flow
->nrofrings
; flowid
++) {
393 if (flow
->rings
[flowid
])
394 brcmf_msgbuf_delete_flowring(drvr
, flowid
);
397 search
= flow
->tdls_entry
;
400 search
= search
->next
;
408 void brcmf_flowring_configure_addr_mode(struct brcmf_flowring
*flow
, int ifidx
,
409 enum proto_addr_mode addr_mode
)
411 struct brcmf_bus
*bus_if
= dev_get_drvdata(flow
->dev
);
412 struct brcmf_pub
*drvr
= bus_if
->drvr
;
416 if (flow
->addr_mode
[ifidx
] != addr_mode
) {
417 for (i
= 0; i
< ARRAY_SIZE(flow
->hash
); i
++) {
418 if (flow
->hash
[i
].ifidx
== ifidx
) {
419 flowid
= flow
->hash
[i
].flowid
;
420 if (flow
->rings
[flowid
]->status
!= RING_OPEN
)
422 flow
->rings
[flowid
]->status
= RING_CLOSING
;
423 brcmf_msgbuf_delete_flowring(drvr
, flowid
);
426 flow
->addr_mode
[ifidx
] = addr_mode
;
431 void brcmf_flowring_delete_peer(struct brcmf_flowring
*flow
, int ifidx
,
434 struct brcmf_bus
*bus_if
= dev_get_drvdata(flow
->dev
);
435 struct brcmf_pub
*drvr
= bus_if
->drvr
;
436 struct brcmf_flowring_hash
*hash
;
437 struct brcmf_flowring_tdls_entry
*prev
;
438 struct brcmf_flowring_tdls_entry
*search
;
443 sta
= (flow
->addr_mode
[ifidx
] == ADDR_INDIRECT
);
445 search
= flow
->tdls_entry
;
448 if (memcmp(search
->mac
, peer
, ETH_ALEN
) == 0) {
453 search
= search
->next
;
457 for (i
= 0; i
< BRCMF_FLOWRING_HASHSIZE
; i
++) {
458 if ((sta
|| (memcmp(hash
[i
].mac
, peer
, ETH_ALEN
) == 0)) &&
459 (hash
[i
].ifidx
== ifidx
)) {
460 flowid
= flow
->hash
[i
].flowid
;
461 if (flow
->rings
[flowid
]->status
== RING_OPEN
) {
462 flow
->rings
[flowid
]->status
= RING_CLOSING
;
463 brcmf_msgbuf_delete_flowring(drvr
, flowid
);
470 prev
->next
= search
->next
;
472 flow
->tdls_entry
= search
->next
;
474 if (flow
->tdls_entry
== NULL
)
475 flow
->tdls_active
= false;
480 void brcmf_flowring_add_tdls_peer(struct brcmf_flowring
*flow
, int ifidx
,
483 struct brcmf_flowring_tdls_entry
*tdls_entry
;
484 struct brcmf_flowring_tdls_entry
*search
;
486 tdls_entry
= kzalloc(sizeof(*tdls_entry
), GFP_ATOMIC
);
487 if (tdls_entry
== NULL
)
490 memcpy(tdls_entry
->mac
, peer
, ETH_ALEN
);
491 tdls_entry
->next
= NULL
;
492 if (flow
->tdls_entry
== NULL
) {
493 flow
->tdls_entry
= tdls_entry
;
495 search
= flow
->tdls_entry
;
496 if (memcmp(search
->mac
, peer
, ETH_ALEN
) == 0)
498 while (search
->next
) {
499 search
= search
->next
;
500 if (memcmp(search
->mac
, peer
, ETH_ALEN
) == 0)
503 search
->next
= tdls_entry
;
506 flow
->tdls_active
= true;