2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <linux/kernel.h>
22 #include <linux/spinlock.h>
23 #include <linux/skbuff.h>
24 #include <linux/leds.h>
25 #include <net/mac80211.h>
28 #define MT_TX_RING_SIZE 256
29 #define MT_MCU_RING_SIZE 32
30 #define MT_RX_BUF_SIZE 2048
35 u32 (*rr
)(struct mt76_dev
*dev
, u32 offset
);
36 void (*wr
)(struct mt76_dev
*dev
, u32 offset
, u32 val
);
37 u32 (*rmw
)(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
);
38 void (*copy
)(struct mt76_dev
*dev
, u32 offset
, const void *data
,
43 MT_TXQ_VO
= IEEE80211_AC_VO
,
44 MT_TXQ_VI
= IEEE80211_AC_VI
,
45 MT_TXQ_BE
= IEEE80211_AC_BE
,
46 MT_TXQ_BK
= IEEE80211_AC_BK
,
60 struct mt76_queue_buf
{
65 struct mt76_queue_entry
{
70 struct mt76_txwi_cache
*txwi
;
74 struct mt76_queue_regs
{
79 } __packed
__aligned(4);
82 struct mt76_queue_regs __iomem
*regs
;
85 struct mt76_queue_entry
*entry
;
86 struct mt76_desc
*desc
;
101 struct sk_buff
*rx_head
;
104 struct mt76_queue_ops
{
105 int (*init
)(struct mt76_dev
*dev
);
107 int (*alloc
)(struct mt76_dev
*dev
, struct mt76_queue
*q
);
109 int (*add_buf
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
110 struct mt76_queue_buf
*buf
, int nbufs
, u32 info
,
111 struct sk_buff
*skb
, void *txwi
);
113 void *(*dequeue
)(struct mt76_dev
*dev
, struct mt76_queue
*q
, bool flush
,
114 int *len
, u32
*info
, bool *more
);
116 void (*rx_reset
)(struct mt76_dev
*dev
, enum mt76_rxq_id qid
);
118 void (*tx_cleanup
)(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
121 void (*kick
)(struct mt76_dev
*dev
, struct mt76_queue
*q
);
124 enum mt76_wcid_flags
{
125 MT_WCID_FLAG_CHECK_PS
,
130 struct mt76_rx_tid __rcu
*aggr
[IEEE80211_NUM_TIDS
];
132 struct work_struct aggr_work
;
142 u8 rx_key_pn
[IEEE80211_NUM_TIDS
][6];
152 struct list_head list
;
153 struct mt76_queue
*hwq
;
154 struct mt76_wcid
*wcid
;
156 struct sk_buff_head retry_q
;
163 struct mt76_txwi_cache
{
166 struct list_head list
;
171 struct rcu_head rcu_head
;
173 struct mt76_dev
*dev
;
176 struct delayed_work reorder_work
;
182 u8 started
:1, stopped
:1, timer_pending
:1;
184 struct sk_buff
*reorder_buf
[];
188 MT76_STATE_INITIALIZED
,
199 struct mt76_driver_ops
{
202 void (*update_survey
)(struct mt76_dev
*dev
);
204 int (*tx_prepare_skb
)(struct mt76_dev
*dev
, void *txwi_ptr
,
205 struct sk_buff
*skb
, struct mt76_queue
*q
,
206 struct mt76_wcid
*wcid
,
207 struct ieee80211_sta
*sta
, u32
*tx_info
);
209 void (*tx_complete_skb
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
210 struct mt76_queue_entry
*e
, bool flush
);
212 void (*rx_skb
)(struct mt76_dev
*dev
, enum mt76_rxq_id q
,
213 struct sk_buff
*skb
);
215 void (*rx_poll_complete
)(struct mt76_dev
*dev
, enum mt76_rxq_id q
);
217 void (*sta_ps
)(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
221 struct mt76_channel_state
{
227 struct ieee80211_supported_band sband
;
228 struct mt76_channel_state
*chan
;
232 struct ieee80211_hw
*hw
;
233 struct cfg80211_chan_def chandef
;
234 struct ieee80211_channel
*main_chan
;
238 const struct mt76_bus_ops
*bus
;
239 const struct mt76_driver_ops
*drv
;
243 struct net_device napi_dev
;
244 struct napi_struct napi
[__MT_RXQ_MAX
];
245 struct sk_buff_head rx_skb
[__MT_RXQ_MAX
];
247 struct list_head txwi_cache
;
248 struct mt76_queue q_tx
[__MT_TXQ_MAX
];
249 struct mt76_queue q_rx
[__MT_RXQ_MAX
];
250 const struct mt76_queue_ops
*queue_ops
;
252 u8 macaddr
[ETH_ALEN
];
256 struct mt76_sband sband_2g
;
257 struct mt76_sband sband_5g
;
258 struct debugfs_blob_wrapper eeprom
;
259 struct debugfs_blob_wrapper otp
;
260 struct mt76_hw_cap cap
;
264 struct led_classdev led_cdev
;
278 struct mt76_rate_power
{
290 struct mt76_rx_status
{
291 struct mt76_wcid
*wcid
;
293 unsigned long reorder_time
;
310 s8 chain_signal
[IEEE80211_MAX_CHAINS
];
313 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
314 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
315 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
316 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
318 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
319 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
321 #define mt76_get_field(_dev, _reg, _field) \
322 FIELD_GET(_field, mt76_rr(dev, _reg))
324 #define mt76_rmw_field(_dev, _reg, _field, _val) \
325 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
327 #define mt76_hw(dev) (dev)->mt76.hw
329 bool __mt76_poll(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
,
332 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
334 bool __mt76_poll_msec(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
,
337 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
339 void mt76_mmio_init(struct mt76_dev
*dev
, void __iomem
*regs
);
341 static inline u16
mt76_chip(struct mt76_dev
*dev
)
343 return dev
->rev
>> 16;
346 static inline u16
mt76_rev(struct mt76_dev
*dev
)
348 return dev
->rev
& 0xffff;
351 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
352 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
354 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
355 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
356 #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
357 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
358 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
359 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
361 static inline struct mt76_channel_state
*
362 mt76_channel_state(struct mt76_dev
*dev
, struct ieee80211_channel
*c
)
364 struct mt76_sband
*msband
;
367 if (c
->band
== NL80211_BAND_2GHZ
)
368 msband
= &dev
->sband_2g
;
370 msband
= &dev
->sband_5g
;
372 idx
= c
- &msband
->sband
.channels
[0];
373 return &msband
->chan
[idx
];
376 int mt76_register_device(struct mt76_dev
*dev
, bool vht
,
377 struct ieee80211_rate
*rates
, int n_rates
);
378 void mt76_unregister_device(struct mt76_dev
*dev
);
380 struct dentry
*mt76_register_debugfs(struct mt76_dev
*dev
);
382 int mt76_eeprom_init(struct mt76_dev
*dev
, int len
);
383 void mt76_eeprom_override(struct mt76_dev
*dev
);
385 static inline struct ieee80211_txq
*
386 mtxq_to_txq(struct mt76_txq
*mtxq
)
390 return container_of(ptr
, struct ieee80211_txq
, drv_priv
);
393 static inline struct ieee80211_sta
*
394 wcid_to_sta(struct mt76_wcid
*wcid
)
398 if (!wcid
|| !wcid
->sta
)
401 return container_of(ptr
, struct ieee80211_sta
, drv_priv
);
404 int mt76_tx_queue_skb(struct mt76_dev
*dev
, struct mt76_queue
*q
,
405 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
406 struct ieee80211_sta
*sta
);
408 void mt76_rx(struct mt76_dev
*dev
, enum mt76_rxq_id q
, struct sk_buff
*skb
);
409 void mt76_tx(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
410 struct mt76_wcid
*wcid
, struct sk_buff
*skb
);
411 void mt76_txq_init(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
);
412 void mt76_txq_remove(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
);
413 void mt76_wake_tx_queue(struct ieee80211_hw
*hw
, struct ieee80211_txq
*txq
);
414 void mt76_stop_tx_queues(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
416 void mt76_txq_schedule(struct mt76_dev
*dev
, struct mt76_queue
*hwq
);
417 void mt76_txq_schedule_all(struct mt76_dev
*dev
);
418 void mt76_release_buffered_frames(struct ieee80211_hw
*hw
,
419 struct ieee80211_sta
*sta
,
420 u16 tids
, int nframes
,
421 enum ieee80211_frame_release_type reason
,
423 void mt76_set_channel(struct mt76_dev
*dev
);
424 int mt76_get_survey(struct ieee80211_hw
*hw
, int idx
,
425 struct survey_info
*survey
);
427 int mt76_rx_aggr_start(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tid
,
429 void mt76_rx_aggr_stop(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tid
);
431 void mt76_wcid_key_setup(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
432 struct ieee80211_key_conf
*key
);
435 void mt76_tx_free(struct mt76_dev
*dev
);
436 void mt76_put_txwi(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
);
437 void mt76_rx_complete(struct mt76_dev
*dev
, struct sk_buff_head
*frames
,
439 void mt76_rx_poll_complete(struct mt76_dev
*dev
, enum mt76_rxq_id q
);
440 void mt76_rx_aggr_reorder(struct sk_buff
*skb
, struct sk_buff_head
*frames
);