2 * Marvell Wireless LAN device driver: 802.11n Aggregation
4 * Copyright (C) 2011, Marvell International Ltd.
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
30 * Creates an AMSDU subframe for aggregation into one AMSDU packet.
32 * The resultant AMSDU subframe format is -
34 * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
35 * | DA | SA | Length | SNAP header | MSDU |
36 * | data[0..5] | data[6..11] | | | data[14..] |
37 * +---- ~ -----+---- ~ ------+---- ~ -----+----- ~ -----+---- ~ -----+
38 * <--6-bytes--> <--6-bytes--> <--2-bytes--><--8-bytes--> <--n-bytes-->
40 * This function also computes the amount of padding required to make the
41 * buffer length multiple of 4 bytes.
43 * Data => |DA|SA|SNAP-TYPE|........ .|
44 * MSDU => |DA|SA|Length|SNAP|...... ..|
47 mwifiex_11n_form_amsdu_pkt(struct sk_buff
*skb_aggr
,
48 struct sk_buff
*skb_src
, int *pad
)
52 struct rfc_1042_hdr snap
= {
56 {0x00, 0x00, 0x00}, /* SNAP OUI */
57 0x0000 /* SNAP type */
59 * This field will be overwritten
60 * later with ethertype
63 struct tx_packet_hdr
*tx_header
;
65 skb_put(skb_aggr
, sizeof(*tx_header
));
67 tx_header
= (struct tx_packet_hdr
*) skb_aggr
->data
;
70 dt_offset
= 2 * ETH_ALEN
;
71 memcpy(&tx_header
->eth803_hdr
, skb_src
->data
, dt_offset
);
73 /* Copy SNAP header */
74 snap
.snap_type
= *(u16
*) ((u8
*)skb_src
->data
+ dt_offset
);
75 dt_offset
+= sizeof(u16
);
77 memcpy(&tx_header
->rfc1042_hdr
, &snap
, sizeof(struct rfc_1042_hdr
));
79 skb_pull(skb_src
, dt_offset
);
81 /* Update Length field */
82 tx_header
->eth803_hdr
.h_proto
= htons(skb_src
->len
+ LLC_SNAP_LEN
);
85 skb_put(skb_aggr
, skb_src
->len
);
86 memcpy(skb_aggr
->data
+ sizeof(*tx_header
), skb_src
->data
,
88 *pad
= (((skb_src
->len
+ LLC_SNAP_LEN
) & 3)) ? (4 - (((skb_src
->len
+
89 LLC_SNAP_LEN
)) & 3)) : 0;
90 skb_put(skb_aggr
, *pad
);
92 return skb_aggr
->len
+ *pad
;
96 * Adds TxPD to AMSDU header.
98 * Each AMSDU packet will contain one TxPD at the beginning,
99 * followed by multiple AMSDU subframes.
102 mwifiex_11n_form_amsdu_txpd(struct mwifiex_private
*priv
,
105 struct txpd
*local_tx_pd
;
107 skb_push(skb
, sizeof(*local_tx_pd
));
109 local_tx_pd
= (struct txpd
*) skb
->data
;
110 memset(local_tx_pd
, 0, sizeof(struct txpd
));
112 /* Original priority has been overwritten */
113 local_tx_pd
->priority
= (u8
) skb
->priority
;
114 local_tx_pd
->pkt_delay_2ms
=
115 mwifiex_wmm_compute_drv_pkt_delay(priv
, skb
);
116 local_tx_pd
->bss_num
= priv
->bss_num
;
117 local_tx_pd
->bss_type
= priv
->bss_type
;
118 /* Always zero as the data is followed by struct txpd */
119 local_tx_pd
->tx_pkt_offset
= cpu_to_le16(sizeof(struct txpd
));
120 local_tx_pd
->tx_pkt_type
= cpu_to_le16(PKT_TYPE_AMSDU
);
121 local_tx_pd
->tx_pkt_length
= cpu_to_le16(skb
->len
-
122 sizeof(*local_tx_pd
));
124 if (local_tx_pd
->tx_control
== 0)
125 /* TxCtrl set by user or default */
126 local_tx_pd
->tx_control
= cpu_to_le32(priv
->pkt_tx_ctrl
);
128 if ((GET_BSS_ROLE(priv
) == MWIFIEX_BSS_ROLE_STA
) &&
129 (priv
->adapter
->pps_uapsd_mode
)) {
130 if (true == mwifiex_check_last_packet_indication(priv
)) {
131 priv
->adapter
->tx_lock_flag
= true;
133 MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET
;
139 * Create aggregated packet.
141 * This function creates an aggregated MSDU packet, by combining buffers
142 * from the RA list. Each individual buffer is encapsulated as an AMSDU
143 * subframe and all such subframes are concatenated together to form the
146 * A TxPD is also added to the front of the resultant AMSDU packets for
147 * transmission. The resultant packets format is -
149 * +---- ~ ----+------ ~ ------+------ ~ ------+-..-+------ ~ ------+
150 * | TxPD |AMSDU sub-frame|AMSDU sub-frame| .. |AMSDU sub-frame|
151 * | | 1 | 2 | .. | n |
152 * +---- ~ ----+------ ~ ------+------ ~ ------+ .. +------ ~ ------+
155 mwifiex_11n_aggregate_pkt(struct mwifiex_private
*priv
,
156 struct mwifiex_ra_list_tbl
*pra_list
, int headroom
,
157 int ptrindex
, unsigned long ra_list_flags
)
158 __releases(&priv
->wmm
.ra_list_spinlock
)
160 struct mwifiex_adapter
*adapter
= priv
->adapter
;
161 struct sk_buff
*skb_aggr
, *skb_src
;
162 struct mwifiex_txinfo
*tx_info_aggr
, *tx_info_src
;
164 struct mwifiex_tx_param tx_param
;
165 struct txpd
*ptx_pd
= NULL
;
167 skb_src
= skb_peek(&pra_list
->skb_head
);
169 spin_unlock_irqrestore(&priv
->wmm
.ra_list_spinlock
,
174 tx_info_src
= MWIFIEX_SKB_TXCB(skb_src
);
175 skb_aggr
= dev_alloc_skb(adapter
->tx_buf_size
);
177 dev_err(adapter
->dev
, "%s: alloc skb_aggr\n", __func__
);
178 spin_unlock_irqrestore(&priv
->wmm
.ra_list_spinlock
,
182 skb_reserve(skb_aggr
, headroom
+ sizeof(struct txpd
));
183 tx_info_aggr
= MWIFIEX_SKB_TXCB(skb_aggr
);
185 tx_info_aggr
->bss_index
= tx_info_src
->bss_index
;
186 skb_aggr
->priority
= skb_src
->priority
;
189 /* Check if AMSDU can accommodate this MSDU */
190 if (skb_tailroom(skb_aggr
) < (skb_src
->len
+ LLC_SNAP_LEN
))
193 skb_src
= skb_dequeue(&pra_list
->skb_head
);
195 pra_list
->total_pkts_size
-= skb_src
->len
;
196 pra_list
->total_pkts
--;
198 atomic_dec(&priv
->wmm
.tx_pkts_queued
);
200 spin_unlock_irqrestore(&priv
->wmm
.ra_list_spinlock
,
202 mwifiex_11n_form_amsdu_pkt(skb_aggr
, skb_src
, &pad
);
204 mwifiex_write_data_complete(adapter
, skb_src
, 0);
206 spin_lock_irqsave(&priv
->wmm
.ra_list_spinlock
, ra_list_flags
);
208 if (!mwifiex_is_ralist_valid(priv
, pra_list
, ptrindex
)) {
209 spin_unlock_irqrestore(&priv
->wmm
.ra_list_spinlock
,
214 if (skb_tailroom(skb_aggr
) < pad
) {
218 skb_put(skb_aggr
, pad
);
220 skb_src
= skb_peek(&pra_list
->skb_head
);
224 spin_unlock_irqrestore(&priv
->wmm
.ra_list_spinlock
, ra_list_flags
);
226 /* Last AMSDU packet does not need padding */
227 skb_trim(skb_aggr
, skb_aggr
->len
- pad
);
230 mwifiex_11n_form_amsdu_txpd(priv
, skb_aggr
);
231 if (GET_BSS_ROLE(priv
) == MWIFIEX_BSS_ROLE_STA
)
232 ptx_pd
= (struct txpd
*)skb_aggr
->data
;
234 skb_push(skb_aggr
, headroom
);
237 * Padding per MSDU will affect the length of next
238 * packet and hence the exact length of next packet
241 * Also, aggregation of transmission buffer, while
242 * downloading the data to the card, wont gain much
243 * on the AMSDU packets as the AMSDU packets utilizes
244 * the transmission buffer space to the maximum
245 * (adapter->tx_buf_size).
247 tx_param
.next_pkt_len
= 0;
249 ret
= adapter
->if_ops
.host_to_card(adapter
, MWIFIEX_TYPE_DATA
,
251 skb_aggr
->len
, &tx_param
);
254 spin_lock_irqsave(&priv
->wmm
.ra_list_spinlock
, ra_list_flags
);
255 if (!mwifiex_is_ralist_valid(priv
, pra_list
, ptrindex
)) {
256 spin_unlock_irqrestore(&priv
->wmm
.ra_list_spinlock
,
258 mwifiex_write_data_complete(adapter
, skb_aggr
, -1);
261 if ((GET_BSS_ROLE(priv
) == MWIFIEX_BSS_ROLE_STA
) &&
262 (adapter
->pps_uapsd_mode
) &&
263 (adapter
->tx_lock_flag
)) {
264 priv
->adapter
->tx_lock_flag
= false;
269 skb_queue_tail(&pra_list
->skb_head
, skb_aggr
);
271 pra_list
->total_pkts_size
+= skb_aggr
->len
;
272 pra_list
->total_pkts
++;
274 atomic_inc(&priv
->wmm
.tx_pkts_queued
);
276 tx_info_aggr
->flags
|= MWIFIEX_BUF_FLAG_REQUEUED_PKT
;
277 spin_unlock_irqrestore(&priv
->wmm
.ra_list_spinlock
,
279 dev_dbg(adapter
->dev
, "data: -EBUSY is returned\n");
282 adapter
->data_sent
= false;
283 dev_err(adapter
->dev
, "%s: host_to_card failed: %#x\n",
285 adapter
->dbg
.num_tx_host_to_card_failure
++;
286 mwifiex_write_data_complete(adapter
, skb_aggr
, ret
);
289 adapter
->data_sent
= false;
292 mwifiex_write_data_complete(adapter
, skb_aggr
, ret
);
298 spin_lock_irqsave(&priv
->wmm
.ra_list_spinlock
, ra_list_flags
);
299 if (mwifiex_is_ralist_valid(priv
, pra_list
, ptrindex
)) {
300 priv
->wmm
.packets_out
[ptrindex
]++;
301 priv
->wmm
.tid_tbl_ptr
[ptrindex
].ra_list_curr
= pra_list
;
303 /* Now bss_prio_cur pointer points to next node */
304 adapter
->bss_prio_tbl
[priv
->bss_priority
].bss_prio_cur
=
306 &adapter
->bss_prio_tbl
[priv
->bss_priority
]
308 struct mwifiex_bss_prio_node
, list
);
309 spin_unlock_irqrestore(&priv
->wmm
.ra_list_spinlock
,