1 /******************************************************************************
3 * Copyright(c) 2009-2012 Realtek Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
26 * Larry Finger <Larry.Finger@lwfinger.net>
28 *****************************************************************************/
36 #include <linux/export.h>
37 #include <linux/kmemleak.h>
38 #include <linux/module.h>
40 MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
41 MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
42 MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
43 MODULE_LICENSE("GPL");
44 MODULE_DESCRIPTION("PCI basic driver for rtlwifi");
46 static const u16 pcibridge_vendors
[PCI_BRIDGE_VENDOR_MAX
] = {
53 static const u8 ac_to_hwq
[] = {
60 static u8
_rtl_mac_to_hwqueue(struct ieee80211_hw
*hw
,
63 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
64 __le16 fc
= rtl_get_fc(skb
);
65 u8 queue_index
= skb_get_queue_mapping(skb
);
67 if (unlikely(ieee80211_is_beacon(fc
)))
69 if (ieee80211_is_mgmt(fc
) || ieee80211_is_ctl(fc
))
71 if (rtlhal
->hw_type
== HARDWARE_TYPE_RTL8192SE
)
72 if (ieee80211_is_nullfunc(fc
))
75 return ac_to_hwq
[queue_index
];
78 /* Update PCI dependent default settings*/
79 static void _rtl_pci_update_default_setting(struct ieee80211_hw
*hw
)
81 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
82 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
83 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
84 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
85 u8 pcibridge_vendor
= pcipriv
->ndis_adapter
.pcibridge_vendor
;
88 ppsc
->reg_rfps_level
= 0;
89 ppsc
->support_aspm
= false;
91 /*Update PCI ASPM setting */
92 ppsc
->const_amdpci_aspm
= rtlpci
->const_amdpci_aspm
;
93 switch (rtlpci
->const_pci_aspm
) {
99 /*ASPM dynamically enabled/disable. */
100 ppsc
->reg_rfps_level
|= RT_RF_LPS_LEVEL_ASPM
;
104 /*ASPM with Clock Req dynamically enabled/disable. */
105 ppsc
->reg_rfps_level
|= (RT_RF_LPS_LEVEL_ASPM
|
106 RT_RF_OFF_LEVL_CLK_REQ
);
111 * Always enable ASPM and Clock Req
112 * from initialization to halt.
114 ppsc
->reg_rfps_level
&= ~(RT_RF_LPS_LEVEL_ASPM
);
115 ppsc
->reg_rfps_level
|= (RT_RF_PS_LEVEL_ALWAYS_ASPM
|
116 RT_RF_OFF_LEVL_CLK_REQ
);
121 * Always enable ASPM without Clock Req
122 * from initialization to halt.
124 ppsc
->reg_rfps_level
&= ~(RT_RF_LPS_LEVEL_ASPM
|
125 RT_RF_OFF_LEVL_CLK_REQ
);
126 ppsc
->reg_rfps_level
|= RT_RF_PS_LEVEL_ALWAYS_ASPM
;
130 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_HALT_NIC
;
132 /*Update Radio OFF setting */
133 switch (rtlpci
->const_hwsw_rfoff_d3
) {
135 if (ppsc
->reg_rfps_level
& RT_RF_LPS_LEVEL_ASPM
)
136 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_ASPM
;
140 if (ppsc
->reg_rfps_level
& RT_RF_LPS_LEVEL_ASPM
)
141 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_ASPM
;
142 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_HALT_NIC
;
146 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_PCI_D3
;
150 /*Set HW definition to determine if it supports ASPM. */
151 switch (rtlpci
->const_support_pciaspm
) {
153 /*Not support ASPM. */
154 bool support_aspm
= false;
155 ppsc
->support_aspm
= support_aspm
;
160 bool support_aspm
= true;
161 bool support_backdoor
= true;
162 ppsc
->support_aspm
= support_aspm
;
164 /*if (priv->oem_id == RT_CID_TOSHIBA &&
165 !priv->ndis_adapter.amd_l1_patch)
166 support_backdoor = false; */
168 ppsc
->support_backdoor
= support_backdoor
;
173 /*ASPM value set by chipset. */
174 if (pcibridge_vendor
== PCI_BRIDGE_VENDOR_INTEL
) {
175 bool support_aspm
= true;
176 ppsc
->support_aspm
= support_aspm
;
180 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
181 "switch case not processed\n");
185 /* toshiba aspm issue, toshiba will set aspm selfly
186 * so we should not set aspm in driver */
187 pci_read_config_byte(rtlpci
->pdev
, 0x80, &init_aspm
);
188 if (rtlpriv
->rtlhal
.hw_type
== HARDWARE_TYPE_RTL8192SE
&&
190 ppsc
->support_aspm
= false;
193 static bool _rtl_pci_platform_switch_device_pci_aspm(
194 struct ieee80211_hw
*hw
,
197 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
198 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
200 if (rtlhal
->hw_type
!= HARDWARE_TYPE_RTL8192SE
)
203 pci_write_config_byte(rtlpci
->pdev
, 0x80, value
);
208 /*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
209 static void _rtl_pci_switch_clk_req(struct ieee80211_hw
*hw
, u8 value
)
211 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
212 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
214 pci_write_config_byte(rtlpci
->pdev
, 0x81, value
);
216 if (rtlhal
->hw_type
== HARDWARE_TYPE_RTL8192SE
)
220 /*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
221 static void rtl_pci_disable_aspm(struct ieee80211_hw
*hw
)
223 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
224 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
225 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
226 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
227 u8 pcibridge_vendor
= pcipriv
->ndis_adapter
.pcibridge_vendor
;
228 u8 num4bytes
= pcipriv
->ndis_adapter
.num4bytes
;
229 /*Retrieve original configuration settings. */
230 u8 linkctrl_reg
= pcipriv
->ndis_adapter
.linkctrl_reg
;
231 u16 pcibridge_linkctrlreg
= pcipriv
->ndis_adapter
.
232 pcibridge_linkctrlreg
;
236 if (!ppsc
->support_aspm
)
239 if (pcibridge_vendor
== PCI_BRIDGE_VENDOR_UNKNOWN
) {
240 RT_TRACE(rtlpriv
, COMP_POWER
, DBG_TRACE
,
241 "PCI(Bridge) UNKNOWN\n");
246 if (ppsc
->reg_rfps_level
& RT_RF_OFF_LEVL_CLK_REQ
) {
247 RT_CLEAR_PS_LEVEL(ppsc
, RT_RF_OFF_LEVL_CLK_REQ
);
248 _rtl_pci_switch_clk_req(hw
, 0x0);
251 /*for promising device will in L0 state after an I/O. */
252 pci_read_config_byte(rtlpci
->pdev
, 0x80, &tmp_u1b
);
254 /*Set corresponding value. */
255 aspmlevel
|= BIT(0) | BIT(1);
256 linkctrl_reg
&= ~aspmlevel
;
257 pcibridge_linkctrlreg
&= ~(BIT(0) | BIT(1));
259 _rtl_pci_platform_switch_device_pci_aspm(hw
, linkctrl_reg
);
262 /*4 Disable Pci Bridge ASPM */
263 pci_write_config_byte(rtlpci
->pdev
, (num4bytes
<< 2),
264 pcibridge_linkctrlreg
);
270 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
271 *power saving We should follow the sequence to enable
272 *RTL8192SE first then enable Pci Bridge ASPM
273 *or the system will show bluescreen.
275 static void rtl_pci_enable_aspm(struct ieee80211_hw
*hw
)
277 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
278 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
279 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
280 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
281 u8 pcibridge_vendor
= pcipriv
->ndis_adapter
.pcibridge_vendor
;
282 u8 num4bytes
= pcipriv
->ndis_adapter
.num4bytes
;
284 u8 u_pcibridge_aspmsetting
;
285 u8 u_device_aspmsetting
;
287 if (!ppsc
->support_aspm
)
290 if (pcibridge_vendor
== PCI_BRIDGE_VENDOR_UNKNOWN
) {
291 RT_TRACE(rtlpriv
, COMP_POWER
, DBG_TRACE
,
292 "PCI(Bridge) UNKNOWN\n");
296 /*4 Enable Pci Bridge ASPM */
298 u_pcibridge_aspmsetting
=
299 pcipriv
->ndis_adapter
.pcibridge_linkctrlreg
|
300 rtlpci
->const_hostpci_aspm_setting
;
302 if (pcibridge_vendor
== PCI_BRIDGE_VENDOR_INTEL
)
303 u_pcibridge_aspmsetting
&= ~BIT(0);
305 pci_write_config_byte(rtlpci
->pdev
, (num4bytes
<< 2),
306 u_pcibridge_aspmsetting
);
308 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
309 "PlatformEnableASPM(): Write reg[%x] = %x\n",
310 (pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
+ 0x10),
311 u_pcibridge_aspmsetting
);
315 /*Get ASPM level (with/without Clock Req) */
316 aspmlevel
= rtlpci
->const_devicepci_aspm_setting
;
317 u_device_aspmsetting
= pcipriv
->ndis_adapter
.linkctrl_reg
;
319 /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
320 /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
322 u_device_aspmsetting
|= aspmlevel
;
324 _rtl_pci_platform_switch_device_pci_aspm(hw
, u_device_aspmsetting
);
326 if (ppsc
->reg_rfps_level
& RT_RF_OFF_LEVL_CLK_REQ
) {
327 _rtl_pci_switch_clk_req(hw
, (ppsc
->reg_rfps_level
&
328 RT_RF_OFF_LEVL_CLK_REQ
) ? 1 : 0);
329 RT_SET_PS_LEVEL(ppsc
, RT_RF_OFF_LEVL_CLK_REQ
);
334 static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw
*hw
)
336 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
342 pci_write_config_byte(rtlpci
->pdev
, 0xe0, 0xa0);
344 pci_read_config_byte(rtlpci
->pdev
, 0xe0, &offset_e0
);
346 if (offset_e0
== 0xA0) {
347 pci_read_config_dword(rtlpci
->pdev
, 0xe4, &offset_e4
);
348 if (offset_e4
& BIT(23))
355 static bool rtl_pci_check_buddy_priv(struct ieee80211_hw
*hw
,
356 struct rtl_priv
**buddy_priv
)
358 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
359 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
360 bool find_buddy_priv
= false;
361 struct rtl_priv
*tpriv
= NULL
;
362 struct rtl_pci_priv
*tpcipriv
= NULL
;
364 if (!list_empty(&rtlpriv
->glb_var
->glb_priv_list
)) {
365 list_for_each_entry(tpriv
, &rtlpriv
->glb_var
->glb_priv_list
,
368 tpcipriv
= (struct rtl_pci_priv
*)tpriv
->priv
;
369 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
370 "pcipriv->ndis_adapter.funcnumber %x\n",
371 pcipriv
->ndis_adapter
.funcnumber
);
372 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
373 "tpcipriv->ndis_adapter.funcnumber %x\n",
374 tpcipriv
->ndis_adapter
.funcnumber
);
376 if ((pcipriv
->ndis_adapter
.busnumber
==
377 tpcipriv
->ndis_adapter
.busnumber
) &&
378 (pcipriv
->ndis_adapter
.devnumber
==
379 tpcipriv
->ndis_adapter
.devnumber
) &&
380 (pcipriv
->ndis_adapter
.funcnumber
!=
381 tpcipriv
->ndis_adapter
.funcnumber
)) {
382 find_buddy_priv
= true;
389 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
390 "find_buddy_priv %d\n", find_buddy_priv
);
395 return find_buddy_priv
;
398 static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw
*hw
)
400 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
401 struct rtl_pci
*rtlpci
= rtl_pcidev(pcipriv
);
402 u8 capabilityoffset
= pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
;
406 num4bbytes
= (capabilityoffset
+ 0x10) / 4;
408 /*Read Link Control Register */
409 pci_read_config_byte(rtlpci
->pdev
, (num4bbytes
<< 2), &linkctrl_reg
);
411 pcipriv
->ndis_adapter
.pcibridge_linkctrlreg
= linkctrl_reg
;
414 static void rtl_pci_parse_configuration(struct pci_dev
*pdev
,
415 struct ieee80211_hw
*hw
)
417 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
418 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
423 /*Link Control Register */
424 pcie_capability_read_word(pdev
, PCI_EXP_LNKCTL
, &linkctrl_reg
);
425 pcipriv
->ndis_adapter
.linkctrl_reg
= (u8
)linkctrl_reg
;
427 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_TRACE
, "Link Control Register =%x\n",
428 pcipriv
->ndis_adapter
.linkctrl_reg
);
430 pci_read_config_byte(pdev
, 0x98, &tmp
);
432 pci_write_config_byte(pdev
, 0x98, tmp
);
435 pci_write_config_byte(pdev
, 0x70f, tmp
);
438 static void rtl_pci_init_aspm(struct ieee80211_hw
*hw
)
440 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
442 _rtl_pci_update_default_setting(hw
);
444 if (ppsc
->reg_rfps_level
& RT_RF_PS_LEVEL_ALWAYS_ASPM
) {
445 /*Always enable ASPM & Clock Req. */
446 rtl_pci_enable_aspm(hw
);
447 RT_SET_PS_LEVEL(ppsc
, RT_RF_PS_LEVEL_ALWAYS_ASPM
);
452 static void _rtl_pci_io_handler_init(struct device
*dev
,
453 struct ieee80211_hw
*hw
)
455 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
457 rtlpriv
->io
.dev
= dev
;
459 rtlpriv
->io
.write8_async
= pci_write8_async
;
460 rtlpriv
->io
.write16_async
= pci_write16_async
;
461 rtlpriv
->io
.write32_async
= pci_write32_async
;
463 rtlpriv
->io
.read8_sync
= pci_read8_sync
;
464 rtlpriv
->io
.read16_sync
= pci_read16_sync
;
465 rtlpriv
->io
.read32_sync
= pci_read32_sync
;
469 static bool _rtl_update_earlymode_info(struct ieee80211_hw
*hw
,
470 struct sk_buff
*skb
, struct rtl_tcb_desc
*tcb_desc
, u8 tid
)
472 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
473 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
474 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
475 struct sk_buff
*next_skb
;
476 u8 additionlen
= FCS_LEN
;
478 /* here open is 4, wep/tkip is 8, aes is 12*/
479 if (info
->control
.hw_key
)
480 additionlen
+= info
->control
.hw_key
->icv_len
;
482 /* The most skb num is 6 */
483 tcb_desc
->empkt_num
= 0;
484 spin_lock_bh(&rtlpriv
->locks
.waitq_lock
);
485 skb_queue_walk(&rtlpriv
->mac80211
.skb_waitq
[tid
], next_skb
) {
486 struct ieee80211_tx_info
*next_info
;
488 next_info
= IEEE80211_SKB_CB(next_skb
);
489 if (next_info
->flags
& IEEE80211_TX_CTL_AMPDU
) {
490 tcb_desc
->empkt_len
[tcb_desc
->empkt_num
] =
491 next_skb
->len
+ additionlen
;
492 tcb_desc
->empkt_num
++;
497 if (skb_queue_is_last(&rtlpriv
->mac80211
.skb_waitq
[tid
],
501 if (tcb_desc
->empkt_num
>= rtlhal
->max_earlymode_num
)
504 spin_unlock_bh(&rtlpriv
->locks
.waitq_lock
);
509 /* just for early mode now */
510 static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw
*hw
)
512 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
513 struct rtl_mac
*mac
= rtl_mac(rtl_priv(hw
));
514 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
515 struct sk_buff
*skb
= NULL
;
516 struct ieee80211_tx_info
*info
= NULL
;
517 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
520 if (!rtlpriv
->rtlhal
.earlymode_enable
)
523 if (rtlpriv
->dm
.supp_phymode_switch
&&
524 (rtlpriv
->easy_concurrent_ctl
.switch_in_process
||
525 (rtlpriv
->buddy_priv
&&
526 rtlpriv
->buddy_priv
->easy_concurrent_ctl
.switch_in_process
)))
528 /* we juse use em for BE/BK/VI/VO */
529 for (tid
= 7; tid
>= 0; tid
--) {
530 u8 hw_queue
= ac_to_hwq
[rtl_tid_to_ac(tid
)];
531 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[hw_queue
];
532 while (!mac
->act_scanning
&&
533 rtlpriv
->psc
.rfpwr_state
== ERFON
) {
534 struct rtl_tcb_desc tcb_desc
;
535 memset(&tcb_desc
, 0, sizeof(struct rtl_tcb_desc
));
537 spin_lock_bh(&rtlpriv
->locks
.waitq_lock
);
538 if (!skb_queue_empty(&mac
->skb_waitq
[tid
]) &&
539 (ring
->entries
- skb_queue_len(&ring
->queue
) >
540 rtlhal
->max_earlymode_num
)) {
541 skb
= skb_dequeue(&mac
->skb_waitq
[tid
]);
543 spin_unlock_bh(&rtlpriv
->locks
.waitq_lock
);
546 spin_unlock_bh(&rtlpriv
->locks
.waitq_lock
);
548 /* Some macaddr can't do early mode. like
549 * multicast/broadcast/no_qos data */
550 info
= IEEE80211_SKB_CB(skb
);
551 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
552 _rtl_update_earlymode_info(hw
, skb
,
555 rtlpriv
->intf_ops
->adapter_tx(hw
, NULL
, skb
, &tcb_desc
);
561 static void _rtl_pci_tx_isr(struct ieee80211_hw
*hw
, int prio
)
563 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
564 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
566 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[prio
];
568 while (skb_queue_len(&ring
->queue
)) {
569 struct rtl_tx_desc
*entry
= &ring
->desc
[ring
->idx
];
571 struct ieee80211_tx_info
*info
;
575 u8 own
= (u8
) rtlpriv
->cfg
->ops
->get_desc((u8
*) entry
, true,
578 /*beacon packet will only use the first
579 *descriptor by defaut, and the own may not
580 *be cleared by the hardware
584 ring
->idx
= (ring
->idx
+ 1) % ring
->entries
;
586 skb
= __skb_dequeue(&ring
->queue
);
587 pci_unmap_single(rtlpci
->pdev
,
589 get_desc((u8
*) entry
, true,
590 HW_DESC_TXBUFF_ADDR
),
591 skb
->len
, PCI_DMA_TODEVICE
);
593 /* remove early mode header */
594 if (rtlpriv
->rtlhal
.earlymode_enable
)
595 skb_pull(skb
, EM_HDR_LEN
);
597 RT_TRACE(rtlpriv
, (COMP_INTR
| COMP_SEND
), DBG_TRACE
,
598 "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
600 skb_queue_len(&ring
->queue
),
601 *(u16
*) (skb
->data
+ 22));
603 if (prio
== TXCMD_QUEUE
) {
609 /* for sw LPS, just after NULL skb send out, we can
610 * sure AP knows we are sleeping, we should not let
613 fc
= rtl_get_fc(skb
);
614 if (ieee80211_is_nullfunc(fc
)) {
615 if (ieee80211_has_pm(fc
)) {
616 rtlpriv
->mac80211
.offchan_delay
= true;
617 rtlpriv
->psc
.state_inap
= true;
619 rtlpriv
->psc
.state_inap
= false;
622 if (ieee80211_is_action(fc
)) {
623 struct ieee80211_mgmt
*action_frame
=
624 (struct ieee80211_mgmt
*)skb
->data
;
625 if (action_frame
->u
.action
.u
.ht_smps
.action
==
626 WLAN_HT_ACTION_SMPS
) {
632 /* update tid tx pkt num */
633 tid
= rtl_get_tid(skb
);
635 rtlpriv
->link_info
.tidtx_inperiod
[tid
]++;
637 info
= IEEE80211_SKB_CB(skb
);
638 ieee80211_tx_info_clear_status(info
);
640 info
->flags
|= IEEE80211_TX_STAT_ACK
;
641 /*info->status.rates[0].count = 1; */
643 ieee80211_tx_status_irqsafe(hw
, skb
);
645 if ((ring
->entries
- skb_queue_len(&ring
->queue
))
648 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_LOUD
,
649 "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%d\n",
651 skb_queue_len(&ring
->queue
));
653 ieee80211_wake_queue(hw
,
654 skb_get_queue_mapping
661 if (((rtlpriv
->link_info
.num_rx_inperiod
+
662 rtlpriv
->link_info
.num_tx_inperiod
) > 8) ||
663 (rtlpriv
->link_info
.num_rx_inperiod
> 2)) {
664 rtlpriv
->enter_ps
= false;
665 schedule_work(&rtlpriv
->works
.lps_change_work
);
669 static void _rtl_receive_one(struct ieee80211_hw
*hw
, struct sk_buff
*skb
,
670 struct ieee80211_rx_status rx_status
)
672 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
673 struct ieee80211_hdr
*hdr
= rtl_get_hdr(skb
);
674 __le16 fc
= rtl_get_fc(skb
);
675 bool unicast
= false;
676 struct sk_buff
*uskb
= NULL
;
680 memcpy(IEEE80211_SKB_RXCB(skb
), &rx_status
, sizeof(rx_status
));
682 if (is_broadcast_ether_addr(hdr
->addr1
)) {
684 } else if (is_multicast_ether_addr(hdr
->addr1
)) {
688 rtlpriv
->stats
.rxbytesunicast
+= skb
->len
;
691 if (ieee80211_is_data(fc
)) {
692 rtlpriv
->cfg
->ops
->led_control(hw
, LED_CTL_RX
);
695 rtlpriv
->link_info
.num_rx_inperiod
++;
698 /* static bcn for roaming */
699 rtl_beacon_statistic(hw
, skb
);
700 rtl_p2p_info(hw
, (void *)skb
->data
, skb
->len
);
703 rtl_swlps_beacon(hw
, (void *)skb
->data
, skb
->len
);
704 rtl_recognize_peer(hw
, (void *)skb
->data
, skb
->len
);
705 if ((rtlpriv
->mac80211
.opmode
== NL80211_IFTYPE_AP
) &&
706 (rtlpriv
->rtlhal
.current_bandtype
== BAND_ON_2_4G
) &&
707 (ieee80211_is_beacon(fc
) || ieee80211_is_probe_resp(fc
)))
710 if (unlikely(!rtl_action_proc(hw
, skb
, false)))
713 uskb
= dev_alloc_skb(skb
->len
+ 128);
715 return; /* exit if allocation failed */
716 memcpy(IEEE80211_SKB_RXCB(uskb
), &rx_status
, sizeof(rx_status
));
717 pdata
= (u8
*)skb_put(uskb
, skb
->len
);
718 memcpy(pdata
, skb
->data
, skb
->len
);
720 ieee80211_rx_irqsafe(hw
, uskb
);
723 static void _rtl_pci_rx_interrupt(struct ieee80211_hw
*hw
)
725 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
726 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
727 int rx_queue_idx
= RTL_PCI_RX_MPDU_QUEUE
;
729 struct ieee80211_rx_status rx_status
= { 0 };
730 unsigned int count
= rtlpci
->rxringcount
;
735 struct rtl_stats stats
= {
739 int index
= rtlpci
->rx_ring
[rx_queue_idx
].idx
;
741 if (rtlpci
->driver_is_goingto_unload
)
746 struct rtl_rx_desc
*pdesc
= &rtlpci
->rx_ring
[rx_queue_idx
].desc
[
749 struct sk_buff
*skb
= rtlpci
->rx_ring
[rx_queue_idx
].rx_buf
[
751 struct sk_buff
*new_skb
= NULL
;
753 own
= (u8
) rtlpriv
->cfg
->ops
->get_desc((u8
*) pdesc
,
756 /*wait data to be filled by hardware */
760 rtlpriv
->cfg
->ops
->query_rx_desc(hw
, &stats
,
764 if (stats
.crc
|| stats
.hwerror
)
767 new_skb
= dev_alloc_skb(rtlpci
->rxbuffersize
);
768 if (unlikely(!new_skb
)) {
769 RT_TRACE(rtlpriv
, (COMP_INTR
| COMP_RECV
), DBG_DMESG
,
770 "can't alloc skb for rx\n");
773 kmemleak_not_leak(new_skb
);
775 pci_unmap_single(rtlpci
->pdev
,
776 *((dma_addr_t
*) skb
->cb
),
777 rtlpci
->rxbuffersize
,
780 skb_put(skb
, rtlpriv
->cfg
->ops
->get_desc((u8
*) pdesc
, false,
782 skb_reserve(skb
, stats
.rx_drvinfo_size
+ stats
.rx_bufshift
);
785 * NOTICE This can not be use for mac80211,
786 * this is done in mac80211 code,
787 * if you done here sec DHCP will fail
788 * skb_trim(skb, skb->len - 4);
791 _rtl_receive_one(hw
, skb
, rx_status
);
793 if (((rtlpriv
->link_info
.num_rx_inperiod
+
794 rtlpriv
->link_info
.num_tx_inperiod
) > 8) ||
795 (rtlpriv
->link_info
.num_rx_inperiod
> 2)) {
796 rtlpriv
->enter_ps
= false;
797 schedule_work(&rtlpriv
->works
.lps_change_work
);
800 dev_kfree_skb_any(skb
);
803 rtlpci
->rx_ring
[rx_queue_idx
].rx_buf
[index
] = skb
;
804 *((dma_addr_t
*) skb
->cb
) =
805 pci_map_single(rtlpci
->pdev
, skb_tail_pointer(skb
),
806 rtlpci
->rxbuffersize
,
810 bufferaddress
= (*((dma_addr_t
*)skb
->cb
));
811 if (pci_dma_mapping_error(rtlpci
->pdev
, bufferaddress
))
814 rtlpriv
->cfg
->ops
->set_desc((u8
*) pdesc
, false,
816 (u8
*)&bufferaddress
);
817 rtlpriv
->cfg
->ops
->set_desc((u8
*)pdesc
, false,
819 (u8
*)&rtlpci
->rxbuffersize
);
821 if (index
== rtlpci
->rxringcount
- 1)
822 rtlpriv
->cfg
->ops
->set_desc((u8
*)pdesc
, false,
826 rtlpriv
->cfg
->ops
->set_desc((u8
*)pdesc
, false, HW_DESC_RXOWN
,
829 index
= (index
+ 1) % rtlpci
->rxringcount
;
832 rtlpci
->rx_ring
[rx_queue_idx
].idx
= index
;
835 static irqreturn_t
_rtl_pci_interrupt(int irq
, void *dev_id
)
837 struct ieee80211_hw
*hw
= dev_id
;
838 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
839 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
843 irqreturn_t ret
= IRQ_HANDLED
;
845 spin_lock_irqsave(&rtlpriv
->locks
.irq_th_lock
, flags
);
847 /*read ISR: 4/8bytes */
848 rtlpriv
->cfg
->ops
->interrupt_recognized(hw
, &inta
, &intb
);
850 /*Shared IRQ or HW disappared */
851 if (!inta
|| inta
== 0xffff) {
856 /*<1> beacon related */
857 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_TBDOK
]) {
858 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
859 "beacon ok interrupt!\n");
862 if (unlikely(inta
& rtlpriv
->cfg
->maps
[RTL_IMR_TBDER
])) {
863 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
864 "beacon err interrupt!\n");
867 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_BDOK
]) {
868 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
, "beacon interrupt!\n");
871 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_BCNINT
]) {
872 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
873 "prepare beacon for interrupt!\n");
874 tasklet_schedule(&rtlpriv
->works
.irq_prepare_bcn_tasklet
);
878 if (unlikely(inta
& rtlpriv
->cfg
->maps
[RTL_IMR_TXFOVW
]))
879 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
, "IMR_TXFOVW!\n");
881 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_MGNTDOK
]) {
882 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
883 "Manage ok interrupt!\n");
884 _rtl_pci_tx_isr(hw
, MGNT_QUEUE
);
887 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_HIGHDOK
]) {
888 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
889 "HIGH_QUEUE ok interrupt!\n");
890 _rtl_pci_tx_isr(hw
, HIGH_QUEUE
);
893 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_BKDOK
]) {
894 rtlpriv
->link_info
.num_tx_inperiod
++;
896 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
897 "BK Tx OK interrupt!\n");
898 _rtl_pci_tx_isr(hw
, BK_QUEUE
);
901 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_BEDOK
]) {
902 rtlpriv
->link_info
.num_tx_inperiod
++;
904 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
905 "BE TX OK interrupt!\n");
906 _rtl_pci_tx_isr(hw
, BE_QUEUE
);
909 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_VIDOK
]) {
910 rtlpriv
->link_info
.num_tx_inperiod
++;
912 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
913 "VI TX OK interrupt!\n");
914 _rtl_pci_tx_isr(hw
, VI_QUEUE
);
917 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_VODOK
]) {
918 rtlpriv
->link_info
.num_tx_inperiod
++;
920 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
921 "Vo TX OK interrupt!\n");
922 _rtl_pci_tx_isr(hw
, VO_QUEUE
);
925 if (rtlhal
->hw_type
== HARDWARE_TYPE_RTL8192SE
) {
926 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_COMDOK
]) {
927 rtlpriv
->link_info
.num_tx_inperiod
++;
929 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
930 "CMD TX OK interrupt!\n");
931 _rtl_pci_tx_isr(hw
, TXCMD_QUEUE
);
936 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_ROK
]) {
937 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
, "Rx ok interrupt!\n");
938 _rtl_pci_rx_interrupt(hw
);
941 if (unlikely(inta
& rtlpriv
->cfg
->maps
[RTL_IMR_RDU
])) {
942 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
,
943 "rx descriptor unavailable!\n");
944 _rtl_pci_rx_interrupt(hw
);
947 if (unlikely(inta
& rtlpriv
->cfg
->maps
[RTL_IMR_RXFOVW
])) {
948 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
, "rx overflow !\n");
949 _rtl_pci_rx_interrupt(hw
);
953 if (rtlhal
->hw_type
== HARDWARE_TYPE_RTL8723AE
) {
954 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_C2HCMD
]) {
955 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
956 "firmware interrupt!\n");
957 queue_delayed_work(rtlpriv
->works
.rtl_wq
,
958 &rtlpriv
->works
.fwevt_wq
, 0);
962 if (rtlpriv
->rtlhal
.earlymode_enable
)
963 tasklet_schedule(&rtlpriv
->works
.irq_tasklet
);
966 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
, flags
);
970 static void _rtl_pci_irq_tasklet(struct ieee80211_hw
*hw
)
972 _rtl_pci_tx_chk_waitq(hw
);
975 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw
*hw
)
977 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
978 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
979 struct rtl_mac
*mac
= rtl_mac(rtl_priv(hw
));
980 struct rtl8192_tx_ring
*ring
= NULL
;
981 struct ieee80211_hdr
*hdr
= NULL
;
982 struct ieee80211_tx_info
*info
= NULL
;
983 struct sk_buff
*pskb
= NULL
;
984 struct rtl_tx_desc
*pdesc
= NULL
;
985 struct rtl_tcb_desc tcb_desc
;
988 memset(&tcb_desc
, 0, sizeof(struct rtl_tcb_desc
));
989 ring
= &rtlpci
->tx_ring
[BEACON_QUEUE
];
990 pskb
= __skb_dequeue(&ring
->queue
);
992 struct rtl_tx_desc
*entry
= &ring
->desc
[ring
->idx
];
993 pci_unmap_single(rtlpci
->pdev
, rtlpriv
->cfg
->ops
->get_desc(
994 (u8
*) entry
, true, HW_DESC_TXBUFF_ADDR
),
995 pskb
->len
, PCI_DMA_TODEVICE
);
999 /*NB: the beacon data buffer must be 32-bit aligned. */
1000 pskb
= ieee80211_beacon_get(hw
, mac
->vif
);
1003 hdr
= rtl_get_hdr(pskb
);
1004 info
= IEEE80211_SKB_CB(pskb
);
1005 pdesc
= &ring
->desc
[0];
1006 rtlpriv
->cfg
->ops
->fill_tx_desc(hw
, hdr
, (u8
*) pdesc
,
1007 info
, NULL
, pskb
, BEACON_QUEUE
, &tcb_desc
);
1009 __skb_queue_tail(&ring
->queue
, pskb
);
1011 rtlpriv
->cfg
->ops
->set_desc((u8
*) pdesc
, true, HW_DESC_OWN
,
1017 static void _rtl_pci_init_trx_var(struct ieee80211_hw
*hw
)
1019 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1022 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++)
1023 rtlpci
->txringcount
[i
] = RT_TXDESC_NUM
;
1026 *we just alloc 2 desc for beacon queue,
1027 *because we just need first desc in hw beacon.
1029 rtlpci
->txringcount
[BEACON_QUEUE
] = 2;
1032 *BE queue need more descriptor for performance
1033 *consideration or, No more tx desc will happen,
1034 *and may cause mac80211 mem leakage.
1036 rtlpci
->txringcount
[BE_QUEUE
] = RT_TXDESC_NUM_BE_QUEUE
;
1038 rtlpci
->rxbuffersize
= 9100; /*2048/1024; */
1039 rtlpci
->rxringcount
= RTL_PCI_MAX_RX_COUNT
; /*64; */
1042 static void _rtl_pci_init_struct(struct ieee80211_hw
*hw
,
1043 struct pci_dev
*pdev
)
1045 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1046 struct rtl_mac
*mac
= rtl_mac(rtl_priv(hw
));
1047 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1048 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
1050 rtlpci
->up_first_time
= true;
1051 rtlpci
->being_init_adapter
= false;
1054 rtlpci
->pdev
= pdev
;
1056 /*Tx/Rx related var */
1057 _rtl_pci_init_trx_var(hw
);
1059 /*IBSS*/ mac
->beacon_interval
= 100;
1062 mac
->min_space_cfg
= 0;
1063 mac
->max_mss_density
= 0;
1064 /*set sane AMPDU defaults */
1065 mac
->current_ampdu_density
= 7;
1066 mac
->current_ampdu_factor
= 3;
1069 rtlpci
->acm_method
= eAcmWay2_SW
;
1072 tasklet_init(&rtlpriv
->works
.irq_tasklet
,
1073 (void (*)(unsigned long))_rtl_pci_irq_tasklet
,
1075 tasklet_init(&rtlpriv
->works
.irq_prepare_bcn_tasklet
,
1076 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet
,
1078 INIT_WORK(&rtlpriv
->works
.lps_change_work
,
1079 rtl_lps_change_work_callback
);
1082 static int _rtl_pci_init_tx_ring(struct ieee80211_hw
*hw
,
1083 unsigned int prio
, unsigned int entries
)
1085 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1086 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1087 struct rtl_tx_desc
*ring
;
1089 u32 nextdescaddress
;
1092 ring
= pci_alloc_consistent(rtlpci
->pdev
,
1093 sizeof(*ring
) * entries
, &dma
);
1095 if (!ring
|| (unsigned long)ring
& 0xFF) {
1096 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1097 "Cannot allocate TX ring (prio = %d)\n", prio
);
1101 memset(ring
, 0, sizeof(*ring
) * entries
);
1102 rtlpci
->tx_ring
[prio
].desc
= ring
;
1103 rtlpci
->tx_ring
[prio
].dma
= dma
;
1104 rtlpci
->tx_ring
[prio
].idx
= 0;
1105 rtlpci
->tx_ring
[prio
].entries
= entries
;
1106 skb_queue_head_init(&rtlpci
->tx_ring
[prio
].queue
);
1108 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
, "queue:%d, ring_addr:%p\n",
1111 for (i
= 0; i
< entries
; i
++) {
1112 nextdescaddress
= (u32
) dma
+
1113 ((i
+ 1) % entries
) *
1116 rtlpriv
->cfg
->ops
->set_desc((u8
*)&(ring
[i
]),
1117 true, HW_DESC_TX_NEXTDESC_ADDR
,
1118 (u8
*)&nextdescaddress
);
1124 static int _rtl_pci_init_rx_ring(struct ieee80211_hw
*hw
)
1126 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1127 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1128 struct rtl_rx_desc
*entry
= NULL
;
1129 int i
, rx_queue_idx
;
1133 *rx_queue_idx 0:RX_MPDU_QUEUE
1134 *rx_queue_idx 1:RX_CMD_QUEUE
1136 for (rx_queue_idx
= 0; rx_queue_idx
< RTL_PCI_MAX_RX_QUEUE
;
1138 rtlpci
->rx_ring
[rx_queue_idx
].desc
=
1139 pci_alloc_consistent(rtlpci
->pdev
,
1140 sizeof(*rtlpci
->rx_ring
[rx_queue_idx
].
1141 desc
) * rtlpci
->rxringcount
,
1142 &rtlpci
->rx_ring
[rx_queue_idx
].dma
);
1144 if (!rtlpci
->rx_ring
[rx_queue_idx
].desc
||
1145 (unsigned long)rtlpci
->rx_ring
[rx_queue_idx
].desc
& 0xFF) {
1146 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1147 "Cannot allocate RX ring\n");
1151 memset(rtlpci
->rx_ring
[rx_queue_idx
].desc
, 0,
1152 sizeof(*rtlpci
->rx_ring
[rx_queue_idx
].desc
) *
1153 rtlpci
->rxringcount
);
1155 rtlpci
->rx_ring
[rx_queue_idx
].idx
= 0;
1157 /* If amsdu_8k is disabled, set buffersize to 4096. This
1158 * change will reduce memory fragmentation.
1160 if (rtlpci
->rxbuffersize
> 4096 &&
1161 rtlpriv
->rtlhal
.disable_amsdu_8k
)
1162 rtlpci
->rxbuffersize
= 4096;
1164 for (i
= 0; i
< rtlpci
->rxringcount
; i
++) {
1165 struct sk_buff
*skb
=
1166 dev_alloc_skb(rtlpci
->rxbuffersize
);
1170 kmemleak_not_leak(skb
);
1171 entry
= &rtlpci
->rx_ring
[rx_queue_idx
].desc
[i
];
1173 /*skb->dev = dev; */
1175 rtlpci
->rx_ring
[rx_queue_idx
].rx_buf
[i
] = skb
;
1178 *just set skb->cb to mapping addr
1179 *for pci_unmap_single use
1181 *((dma_addr_t
*) skb
->cb
) =
1182 pci_map_single(rtlpci
->pdev
, skb_tail_pointer(skb
),
1183 rtlpci
->rxbuffersize
,
1184 PCI_DMA_FROMDEVICE
);
1186 bufferaddress
= (*((dma_addr_t
*)skb
->cb
));
1187 if (pci_dma_mapping_error(rtlpci
->pdev
, bufferaddress
)) {
1188 dev_kfree_skb_any(skb
);
1191 rtlpriv
->cfg
->ops
->set_desc((u8
*)entry
, false,
1192 HW_DESC_RXBUFF_ADDR
,
1193 (u8
*)&bufferaddress
);
1194 rtlpriv
->cfg
->ops
->set_desc((u8
*)entry
, false,
1198 rtlpriv
->cfg
->ops
->set_desc((u8
*) entry
, false,
1203 rtlpriv
->cfg
->ops
->set_desc((u8
*) entry
, false,
1204 HW_DESC_RXERO
, &tmp_one
);
1209 static void _rtl_pci_free_tx_ring(struct ieee80211_hw
*hw
,
1212 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1213 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1214 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[prio
];
1216 while (skb_queue_len(&ring
->queue
)) {
1217 struct rtl_tx_desc
*entry
= &ring
->desc
[ring
->idx
];
1218 struct sk_buff
*skb
= __skb_dequeue(&ring
->queue
);
1220 pci_unmap_single(rtlpci
->pdev
,
1222 ops
->get_desc((u8
*) entry
, true,
1223 HW_DESC_TXBUFF_ADDR
),
1224 skb
->len
, PCI_DMA_TODEVICE
);
1226 ring
->idx
= (ring
->idx
+ 1) % ring
->entries
;
1230 pci_free_consistent(rtlpci
->pdev
,
1231 sizeof(*ring
->desc
) * ring
->entries
,
1232 ring
->desc
, ring
->dma
);
1237 static void _rtl_pci_free_rx_ring(struct rtl_pci
*rtlpci
)
1239 int i
, rx_queue_idx
;
1241 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1242 /*rx_queue_idx 1:RX_CMD_QUEUE */
1243 for (rx_queue_idx
= 0; rx_queue_idx
< RTL_PCI_MAX_RX_QUEUE
;
1245 for (i
= 0; i
< rtlpci
->rxringcount
; i
++) {
1246 struct sk_buff
*skb
=
1247 rtlpci
->rx_ring
[rx_queue_idx
].rx_buf
[i
];
1251 pci_unmap_single(rtlpci
->pdev
,
1252 *((dma_addr_t
*) skb
->cb
),
1253 rtlpci
->rxbuffersize
,
1254 PCI_DMA_FROMDEVICE
);
1258 if (rtlpci
->rx_ring
[rx_queue_idx
].desc
) {
1259 pci_free_consistent(rtlpci
->pdev
,
1260 sizeof(*rtlpci
->rx_ring
[rx_queue_idx
].
1261 desc
) * rtlpci
->rxringcount
,
1262 rtlpci
->rx_ring
[rx_queue_idx
].desc
,
1263 rtlpci
->rx_ring
[rx_queue_idx
].dma
);
1264 rtlpci
->rx_ring
[rx_queue_idx
].desc
= NULL
;
1269 static int _rtl_pci_init_trx_ring(struct ieee80211_hw
*hw
)
1271 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1275 ret
= _rtl_pci_init_rx_ring(hw
);
1279 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++) {
1280 ret
= _rtl_pci_init_tx_ring(hw
, i
,
1281 rtlpci
->txringcount
[i
]);
1283 goto err_free_rings
;
1289 _rtl_pci_free_rx_ring(rtlpci
);
1291 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++)
1292 if (rtlpci
->tx_ring
[i
].desc
)
1293 _rtl_pci_free_tx_ring(hw
, i
);
1298 static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw
*hw
)
1300 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1304 _rtl_pci_free_rx_ring(rtlpci
);
1307 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++)
1308 _rtl_pci_free_tx_ring(hw
, i
);
1313 int rtl_pci_reset_trx_ring(struct ieee80211_hw
*hw
)
1315 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1316 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1317 int i
, rx_queue_idx
;
1318 unsigned long flags
;
1321 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1322 /*rx_queue_idx 1:RX_CMD_QUEUE */
1323 for (rx_queue_idx
= 0; rx_queue_idx
< RTL_PCI_MAX_RX_QUEUE
;
1326 *force the rx_ring[RX_MPDU_QUEUE/
1327 *RX_CMD_QUEUE].idx to the first one
1329 if (rtlpci
->rx_ring
[rx_queue_idx
].desc
) {
1330 struct rtl_rx_desc
*entry
= NULL
;
1332 for (i
= 0; i
< rtlpci
->rxringcount
; i
++) {
1333 entry
= &rtlpci
->rx_ring
[rx_queue_idx
].desc
[i
];
1334 rtlpriv
->cfg
->ops
->set_desc((u8
*) entry
,
1339 rtlpci
->rx_ring
[rx_queue_idx
].idx
= 0;
1344 *after reset, release previous pending packet,
1345 *and force the tx idx to the first one
1347 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++) {
1348 if (rtlpci
->tx_ring
[i
].desc
) {
1349 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[i
];
1351 while (skb_queue_len(&ring
->queue
)) {
1352 struct rtl_tx_desc
*entry
;
1353 struct sk_buff
*skb
;
1355 spin_lock_irqsave(&rtlpriv
->locks
.irq_th_lock
,
1357 entry
= &ring
->desc
[ring
->idx
];
1358 skb
= __skb_dequeue(&ring
->queue
);
1359 pci_unmap_single(rtlpci
->pdev
,
1364 HW_DESC_TXBUFF_ADDR
),
1365 skb
->len
, PCI_DMA_TODEVICE
);
1366 ring
->idx
= (ring
->idx
+ 1) % ring
->entries
;
1367 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
,
1378 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw
*hw
,
1379 struct ieee80211_sta
*sta
,
1380 struct sk_buff
*skb
)
1382 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1383 struct rtl_sta_info
*sta_entry
= NULL
;
1384 u8 tid
= rtl_get_tid(skb
);
1385 __le16 fc
= rtl_get_fc(skb
);
1389 sta_entry
= (struct rtl_sta_info
*)sta
->drv_priv
;
1391 if (!rtlpriv
->rtlhal
.earlymode_enable
)
1393 if (ieee80211_is_nullfunc(fc
))
1395 if (ieee80211_is_qos_nullfunc(fc
))
1397 if (ieee80211_is_pspoll(fc
))
1399 if (sta_entry
->tids
[tid
].agg
.agg_state
!= RTL_AGG_OPERATIONAL
)
1401 if (_rtl_mac_to_hwqueue(hw
, skb
) > VO_QUEUE
)
1406 /* maybe every tid should be checked */
1407 if (!rtlpriv
->link_info
.higher_busytxtraffic
[tid
])
1410 spin_lock_bh(&rtlpriv
->locks
.waitq_lock
);
1411 skb_queue_tail(&rtlpriv
->mac80211
.skb_waitq
[tid
], skb
);
1412 spin_unlock_bh(&rtlpriv
->locks
.waitq_lock
);
1417 static int rtl_pci_tx(struct ieee80211_hw
*hw
,
1418 struct ieee80211_sta
*sta
,
1419 struct sk_buff
*skb
,
1420 struct rtl_tcb_desc
*ptcb_desc
)
1422 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1423 struct rtl_sta_info
*sta_entry
= NULL
;
1424 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1425 struct rtl8192_tx_ring
*ring
;
1426 struct rtl_tx_desc
*pdesc
;
1428 u8 hw_queue
= _rtl_mac_to_hwqueue(hw
, skb
);
1429 unsigned long flags
;
1430 struct ieee80211_hdr
*hdr
= rtl_get_hdr(skb
);
1431 __le16 fc
= rtl_get_fc(skb
);
1432 u8
*pda_addr
= hdr
->addr1
;
1433 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1440 if (ieee80211_is_mgmt(fc
))
1441 rtl_tx_mgmt_proc(hw
, skb
);
1443 if (rtlpriv
->psc
.sw_ps_enabled
) {
1444 if (ieee80211_is_data(fc
) && !ieee80211_is_nullfunc(fc
) &&
1445 !ieee80211_has_pm(fc
))
1446 hdr
->frame_control
|= cpu_to_le16(IEEE80211_FCTL_PM
);
1449 rtl_action_proc(hw
, skb
, true);
1451 if (is_multicast_ether_addr(pda_addr
))
1452 rtlpriv
->stats
.txbytesmulticast
+= skb
->len
;
1453 else if (is_broadcast_ether_addr(pda_addr
))
1454 rtlpriv
->stats
.txbytesbroadcast
+= skb
->len
;
1456 rtlpriv
->stats
.txbytesunicast
+= skb
->len
;
1458 spin_lock_irqsave(&rtlpriv
->locks
.irq_th_lock
, flags
);
1459 ring
= &rtlpci
->tx_ring
[hw_queue
];
1460 if (hw_queue
!= BEACON_QUEUE
)
1461 idx
= (ring
->idx
+ skb_queue_len(&ring
->queue
)) %
1466 pdesc
= &ring
->desc
[idx
];
1467 own
= (u8
) rtlpriv
->cfg
->ops
->get_desc((u8
*) pdesc
,
1470 if ((own
== 1) && (hw_queue
!= BEACON_QUEUE
)) {
1471 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
,
1472 "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1473 hw_queue
, ring
->idx
, idx
,
1474 skb_queue_len(&ring
->queue
));
1476 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
, flags
);
1480 if (ieee80211_is_data_qos(fc
)) {
1481 tid
= rtl_get_tid(skb
);
1483 sta_entry
= (struct rtl_sta_info
*)sta
->drv_priv
;
1484 seq_number
= (le16_to_cpu(hdr
->seq_ctrl
) &
1485 IEEE80211_SCTL_SEQ
) >> 4;
1488 if (!ieee80211_has_morefrags(hdr
->frame_control
))
1489 sta_entry
->tids
[tid
].seq_number
= seq_number
;
1493 if (ieee80211_is_data(fc
))
1494 rtlpriv
->cfg
->ops
->led_control(hw
, LED_CTL_TX
);
1496 rtlpriv
->cfg
->ops
->fill_tx_desc(hw
, hdr
, (u8
*)pdesc
,
1497 info
, sta
, skb
, hw_queue
, ptcb_desc
);
1499 __skb_queue_tail(&ring
->queue
, skb
);
1501 rtlpriv
->cfg
->ops
->set_desc((u8
*)pdesc
, true,
1502 HW_DESC_OWN
, &temp_one
);
1505 if ((ring
->entries
- skb_queue_len(&ring
->queue
)) < 2 &&
1506 hw_queue
!= BEACON_QUEUE
) {
1508 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_LOUD
,
1509 "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1510 hw_queue
, ring
->idx
, idx
,
1511 skb_queue_len(&ring
->queue
));
1513 ieee80211_stop_queue(hw
, skb_get_queue_mapping(skb
));
1516 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
, flags
);
1518 rtlpriv
->cfg
->ops
->tx_polling(hw
, hw_queue
);
1523 static void rtl_pci_flush(struct ieee80211_hw
*hw
, bool drop
)
1525 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1526 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
1527 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
1528 struct rtl_mac
*mac
= rtl_mac(rtl_priv(hw
));
1531 struct rtl8192_tx_ring
*ring
;
1536 for (queue_id
= RTL_PCI_MAX_TX_QUEUE_COUNT
- 1; queue_id
>= 0;) {
1538 ring
= &pcipriv
->dev
.tx_ring
[queue_id
];
1539 queue_len
= skb_queue_len(&ring
->queue
);
1540 if (queue_len
== 0 || queue_id
== BEACON_QUEUE
||
1541 queue_id
== TXCMD_QUEUE
) {
1549 /* we just wait 1s for all queues */
1550 if (rtlpriv
->psc
.rfpwr_state
== ERFOFF
||
1551 is_hal_stop(rtlhal
) || i
>= 200)
1556 static void rtl_pci_deinit(struct ieee80211_hw
*hw
)
1558 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1559 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1561 _rtl_pci_deinit_trx_ring(hw
);
1563 synchronize_irq(rtlpci
->pdev
->irq
);
1564 tasklet_kill(&rtlpriv
->works
.irq_tasklet
);
1565 cancel_work_sync(&rtlpriv
->works
.lps_change_work
);
1567 flush_workqueue(rtlpriv
->works
.rtl_wq
);
1568 destroy_workqueue(rtlpriv
->works
.rtl_wq
);
1572 static int rtl_pci_init(struct ieee80211_hw
*hw
, struct pci_dev
*pdev
)
1574 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1577 _rtl_pci_init_struct(hw
, pdev
);
1579 err
= _rtl_pci_init_trx_ring(hw
);
1581 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1582 "tx ring initialization failed\n");
1589 static int rtl_pci_start(struct ieee80211_hw
*hw
)
1591 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1592 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
1593 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1594 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
1598 rtl_pci_reset_trx_ring(hw
);
1600 rtlpci
->driver_is_goingto_unload
= false;
1601 err
= rtlpriv
->cfg
->ops
->hw_init(hw
);
1603 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1604 "Failed to config hardware!\n");
1608 rtlpriv
->cfg
->ops
->enable_interrupt(hw
);
1609 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
, "enable_interrupt OK\n");
1611 rtl_init_rx_config(hw
);
1613 /*should be after adapter start and interrupt enable. */
1614 set_hal_start(rtlhal
);
1616 RT_CLEAR_PS_LEVEL(ppsc
, RT_RF_OFF_LEVL_HALT_NIC
);
1618 rtlpci
->up_first_time
= false;
1620 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
, "OK\n");
1624 static void rtl_pci_stop(struct ieee80211_hw
*hw
)
1626 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1627 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1628 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
1629 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
1630 unsigned long flags
;
1631 u8 RFInProgressTimeOut
= 0;
1634 *should be before disable interrupt&adapter
1635 *and will do it immediately.
1637 set_hal_stop(rtlhal
);
1639 rtlpci
->driver_is_goingto_unload
= true;
1640 rtlpriv
->cfg
->ops
->disable_interrupt(hw
);
1641 cancel_work_sync(&rtlpriv
->works
.lps_change_work
);
1643 spin_lock_irqsave(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1644 while (ppsc
->rfchange_inprogress
) {
1645 spin_unlock_irqrestore(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1646 if (RFInProgressTimeOut
> 100) {
1647 spin_lock_irqsave(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1651 RFInProgressTimeOut
++;
1652 spin_lock_irqsave(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1654 ppsc
->rfchange_inprogress
= true;
1655 spin_unlock_irqrestore(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1657 rtlpriv
->cfg
->ops
->hw_disable(hw
);
1658 /* some things are not needed if firmware not available */
1659 if (!rtlpriv
->max_fw_size
)
1661 rtlpriv
->cfg
->ops
->led_control(hw
, LED_CTL_POWER_OFF
);
1663 spin_lock_irqsave(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1664 ppsc
->rfchange_inprogress
= false;
1665 spin_unlock_irqrestore(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1667 rtl_pci_enable_aspm(hw
);
1670 static bool _rtl_pci_find_adapter(struct pci_dev
*pdev
,
1671 struct ieee80211_hw
*hw
)
1673 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1674 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
1675 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
1676 struct pci_dev
*bridge_pdev
= pdev
->bus
->self
;
1683 pcipriv
->ndis_adapter
.pcibridge_vendor
= PCI_BRIDGE_VENDOR_UNKNOWN
;
1684 venderid
= pdev
->vendor
;
1685 deviceid
= pdev
->device
;
1686 pci_read_config_byte(pdev
, 0x8, &revisionid
);
1687 pci_read_config_word(pdev
, 0x3C, &irqline
);
1689 /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
1690 * r8192e_pci, and RTL8192SE, which uses this driver. If the
1691 * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
1692 * the correct driver is r8192e_pci, thus this routine should
1695 if (deviceid
== RTL_PCI_8192SE_DID
&&
1696 revisionid
== RTL_PCI_REVISION_ID_8192PCIE
)
1699 if (deviceid
== RTL_PCI_8192_DID
||
1700 deviceid
== RTL_PCI_0044_DID
||
1701 deviceid
== RTL_PCI_0047_DID
||
1702 deviceid
== RTL_PCI_8192SE_DID
||
1703 deviceid
== RTL_PCI_8174_DID
||
1704 deviceid
== RTL_PCI_8173_DID
||
1705 deviceid
== RTL_PCI_8172_DID
||
1706 deviceid
== RTL_PCI_8171_DID
) {
1707 switch (revisionid
) {
1708 case RTL_PCI_REVISION_ID_8192PCIE
:
1709 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1710 "8192 PCI-E is found - vid/did=%x/%x\n",
1711 venderid
, deviceid
);
1712 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192E
;
1714 case RTL_PCI_REVISION_ID_8192SE
:
1715 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1716 "8192SE is found - vid/did=%x/%x\n",
1717 venderid
, deviceid
);
1718 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192SE
;
1721 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
,
1722 "Err: Unknown device - vid/did=%x/%x\n",
1723 venderid
, deviceid
);
1724 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192SE
;
1728 } else if (deviceid
== RTL_PCI_8723AE_DID
) {
1729 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8723AE
;
1730 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1731 "8723AE PCI-E is found - "
1732 "vid/did=%x/%x\n", venderid
, deviceid
);
1733 } else if (deviceid
== RTL_PCI_8192CET_DID
||
1734 deviceid
== RTL_PCI_8192CE_DID
||
1735 deviceid
== RTL_PCI_8191CE_DID
||
1736 deviceid
== RTL_PCI_8188CE_DID
) {
1737 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192CE
;
1738 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1739 "8192C PCI-E is found - vid/did=%x/%x\n",
1740 venderid
, deviceid
);
1741 } else if (deviceid
== RTL_PCI_8192DE_DID
||
1742 deviceid
== RTL_PCI_8192DE_DID2
) {
1743 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192DE
;
1744 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1745 "8192D PCI-E is found - vid/did=%x/%x\n",
1746 venderid
, deviceid
);
1747 } else if (deviceid
== RTL_PCI_8188EE_DID
) {
1748 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8188EE
;
1749 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
1750 "Find adapter, Hardware type is 8188EE\n");
1752 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
,
1753 "Err: Unknown device - vid/did=%x/%x\n",
1754 venderid
, deviceid
);
1756 rtlhal
->hw_type
= RTL_DEFAULT_HARDWARE_TYPE
;
1759 if (rtlhal
->hw_type
== HARDWARE_TYPE_RTL8192DE
) {
1760 if (revisionid
== 0 || revisionid
== 1) {
1761 if (revisionid
== 0) {
1762 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
1763 "Find 92DE MAC0\n");
1764 rtlhal
->interfaceindex
= 0;
1765 } else if (revisionid
== 1) {
1766 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
1767 "Find 92DE MAC1\n");
1768 rtlhal
->interfaceindex
= 1;
1771 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
1772 "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
1773 venderid
, deviceid
, revisionid
);
1774 rtlhal
->interfaceindex
= 0;
1778 pcipriv
->ndis_adapter
.busnumber
= pdev
->bus
->number
;
1779 pcipriv
->ndis_adapter
.devnumber
= PCI_SLOT(pdev
->devfn
);
1780 pcipriv
->ndis_adapter
.funcnumber
= PCI_FUNC(pdev
->devfn
);
1782 /* some ARM have no bridge_pdev and will crash here
1783 * so we should check if bridge_pdev is NULL
1786 /*find bridge info if available */
1787 pcipriv
->ndis_adapter
.pcibridge_vendorid
= bridge_pdev
->vendor
;
1788 for (tmp
= 0; tmp
< PCI_BRIDGE_VENDOR_MAX
; tmp
++) {
1789 if (bridge_pdev
->vendor
== pcibridge_vendors
[tmp
]) {
1790 pcipriv
->ndis_adapter
.pcibridge_vendor
= tmp
;
1791 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1792 "Pci Bridge Vendor is found index: %d\n",
1799 if (pcipriv
->ndis_adapter
.pcibridge_vendor
!=
1800 PCI_BRIDGE_VENDOR_UNKNOWN
) {
1801 pcipriv
->ndis_adapter
.pcibridge_busnum
=
1802 bridge_pdev
->bus
->number
;
1803 pcipriv
->ndis_adapter
.pcibridge_devnum
=
1804 PCI_SLOT(bridge_pdev
->devfn
);
1805 pcipriv
->ndis_adapter
.pcibridge_funcnum
=
1806 PCI_FUNC(bridge_pdev
->devfn
);
1807 pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
=
1808 pci_pcie_cap(bridge_pdev
);
1809 pcipriv
->ndis_adapter
.num4bytes
=
1810 (pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
+ 0x10) / 4;
1812 rtl_pci_get_linkcontrol_field(hw
);
1814 if (pcipriv
->ndis_adapter
.pcibridge_vendor
==
1815 PCI_BRIDGE_VENDOR_AMD
) {
1816 pcipriv
->ndis_adapter
.amd_l1_patch
=
1817 rtl_pci_get_amd_l1_patch(hw
);
1821 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1822 "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
1823 pcipriv
->ndis_adapter
.busnumber
,
1824 pcipriv
->ndis_adapter
.devnumber
,
1825 pcipriv
->ndis_adapter
.funcnumber
,
1826 pdev
->vendor
, pcipriv
->ndis_adapter
.linkctrl_reg
);
1828 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1829 "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
1830 pcipriv
->ndis_adapter
.pcibridge_busnum
,
1831 pcipriv
->ndis_adapter
.pcibridge_devnum
,
1832 pcipriv
->ndis_adapter
.pcibridge_funcnum
,
1833 pcibridge_vendors
[pcipriv
->ndis_adapter
.pcibridge_vendor
],
1834 pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
,
1835 pcipriv
->ndis_adapter
.pcibridge_linkctrlreg
,
1836 pcipriv
->ndis_adapter
.amd_l1_patch
);
1838 rtl_pci_parse_configuration(pdev
, hw
);
1839 list_add_tail(&rtlpriv
->list
, &rtlpriv
->glb_var
->glb_priv_list
);
1844 int rtl_pci_probe(struct pci_dev
*pdev
,
1845 const struct pci_device_id
*id
)
1847 struct ieee80211_hw
*hw
= NULL
;
1849 struct rtl_priv
*rtlpriv
= NULL
;
1850 struct rtl_pci_priv
*pcipriv
= NULL
;
1851 struct rtl_pci
*rtlpci
;
1852 unsigned long pmem_start
, pmem_len
, pmem_flags
;
1855 err
= pci_enable_device(pdev
);
1857 RT_ASSERT(false, "%s : Cannot enable new PCI device\n",
1862 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) {
1863 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32))) {
1865 "Unable to obtain 32bit DMA for consistent allocations\n");
1871 pci_set_master(pdev
);
1873 hw
= ieee80211_alloc_hw(sizeof(struct rtl_pci_priv
) +
1874 sizeof(struct rtl_priv
), &rtl_ops
);
1877 "%s : ieee80211 alloc failed\n", pci_name(pdev
));
1882 SET_IEEE80211_DEV(hw
, &pdev
->dev
);
1883 pci_set_drvdata(pdev
, hw
);
1887 pcipriv
= (void *)rtlpriv
->priv
;
1888 pcipriv
->dev
.pdev
= pdev
;
1889 init_completion(&rtlpriv
->firmware_loading_complete
);
1891 /* init cfg & intf_ops */
1892 rtlpriv
->rtlhal
.interface
= INTF_PCI
;
1893 rtlpriv
->cfg
= (struct rtl_hal_cfg
*)(id
->driver_data
);
1894 rtlpriv
->intf_ops
= &rtl_pci_ops
;
1895 rtlpriv
->glb_var
= &rtl_global_var
;
1898 *init dbgp flags before all
1899 *other functions, because we will
1900 *use it in other funtions like
1901 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
1902 *you can not use these macro
1905 rtl_dbgp_flag_init(hw
);
1908 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
1910 RT_ASSERT(false, "Can't obtain PCI resources\n");
1914 pmem_start
= pci_resource_start(pdev
, rtlpriv
->cfg
->bar_id
);
1915 pmem_len
= pci_resource_len(pdev
, rtlpriv
->cfg
->bar_id
);
1916 pmem_flags
= pci_resource_flags(pdev
, rtlpriv
->cfg
->bar_id
);
1918 /*shared mem start */
1919 rtlpriv
->io
.pci_mem_start
=
1920 (unsigned long)pci_iomap(pdev
,
1921 rtlpriv
->cfg
->bar_id
, pmem_len
);
1922 if (rtlpriv
->io
.pci_mem_start
== 0) {
1923 RT_ASSERT(false, "Can't map PCI mem\n");
1928 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1929 "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
1930 pmem_start
, pmem_len
, pmem_flags
,
1931 rtlpriv
->io
.pci_mem_start
);
1933 /* Disable Clk Request */
1934 pci_write_config_byte(pdev
, 0x81, 0);
1936 pci_write_config_byte(pdev
, 0x44, 0);
1937 pci_write_config_byte(pdev
, 0x04, 0x06);
1938 pci_write_config_byte(pdev
, 0x04, 0x07);
1941 if (!_rtl_pci_find_adapter(pdev
, hw
)) {
1946 /* Init IO handler */
1947 _rtl_pci_io_handler_init(&pdev
->dev
, hw
);
1949 /*like read eeprom and so on */
1950 rtlpriv
->cfg
->ops
->read_eeprom_info(hw
);
1953 rtl_pci_init_aspm(hw
);
1955 /* Init mac80211 sw */
1956 err
= rtl_init_core(hw
);
1958 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1959 "Can't allocate sw for mac80211\n");
1964 err
= rtl_pci_init(hw
, pdev
);
1966 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
, "Failed to init PCI\n");
1970 if (rtlpriv
->cfg
->ops
->init_sw_vars(hw
)) {
1971 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
, "Can't init_sw_vars\n");
1976 rtlpriv
->cfg
->ops
->init_sw_leds(hw
);
1978 err
= sysfs_create_group(&pdev
->dev
.kobj
, &rtl_attribute_group
);
1980 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1981 "failed to create sysfs device attributes\n");
1985 rtlpci
= rtl_pcidev(pcipriv
);
1986 err
= request_irq(rtlpci
->pdev
->irq
, &_rtl_pci_interrupt
,
1987 IRQF_SHARED
, KBUILD_MODNAME
, hw
);
1989 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1990 "%s: failed to register IRQ handler\n",
1991 wiphy_name(hw
->wiphy
));
1994 rtlpci
->irq_alloc
= 1;
1999 rtl_deinit_core(hw
);
2001 if (rtlpriv
->io
.pci_mem_start
!= 0)
2002 pci_iounmap(pdev
, (void __iomem
*)rtlpriv
->io
.pci_mem_start
);
2005 pci_release_regions(pdev
);
2006 complete(&rtlpriv
->firmware_loading_complete
);
2010 ieee80211_free_hw(hw
);
2011 pci_disable_device(pdev
);
2016 EXPORT_SYMBOL(rtl_pci_probe
);
2018 void rtl_pci_disconnect(struct pci_dev
*pdev
)
2020 struct ieee80211_hw
*hw
= pci_get_drvdata(pdev
);
2021 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
2022 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
2023 struct rtl_pci
*rtlpci
= rtl_pcidev(pcipriv
);
2024 struct rtl_mac
*rtlmac
= rtl_mac(rtlpriv
);
2026 /* just in case driver is removed before firmware callback */
2027 wait_for_completion(&rtlpriv
->firmware_loading_complete
);
2028 clear_bit(RTL_STATUS_INTERFACE_START
, &rtlpriv
->status
);
2030 sysfs_remove_group(&pdev
->dev
.kobj
, &rtl_attribute_group
);
2032 /*ieee80211_unregister_hw will call ops_stop */
2033 if (rtlmac
->mac80211_registered
== 1) {
2034 ieee80211_unregister_hw(hw
);
2035 rtlmac
->mac80211_registered
= 0;
2037 rtl_deinit_deferred_work(hw
);
2038 rtlpriv
->intf_ops
->adapter_stop(hw
);
2040 rtlpriv
->cfg
->ops
->disable_interrupt(hw
);
2043 rtl_deinit_rfkill(hw
);
2046 rtl_deinit_core(hw
);
2047 rtlpriv
->cfg
->ops
->deinit_sw_vars(hw
);
2049 if (rtlpci
->irq_alloc
) {
2050 synchronize_irq(rtlpci
->pdev
->irq
);
2051 free_irq(rtlpci
->pdev
->irq
, hw
);
2052 rtlpci
->irq_alloc
= 0;
2055 list_del(&rtlpriv
->list
);
2056 if (rtlpriv
->io
.pci_mem_start
!= 0) {
2057 pci_iounmap(pdev
, (void __iomem
*)rtlpriv
->io
.pci_mem_start
);
2058 pci_release_regions(pdev
);
2061 pci_disable_device(pdev
);
2063 rtl_pci_disable_aspm(hw
);
2065 ieee80211_free_hw(hw
);
2067 EXPORT_SYMBOL(rtl_pci_disconnect
);
2069 #ifdef CONFIG_PM_SLEEP
2070 /***************************************
2071 kernel pci power state define:
2072 PCI_D0 ((pci_power_t __force) 0)
2073 PCI_D1 ((pci_power_t __force) 1)
2074 PCI_D2 ((pci_power_t __force) 2)
2075 PCI_D3hot ((pci_power_t __force) 3)
2076 PCI_D3cold ((pci_power_t __force) 4)
2077 PCI_UNKNOWN ((pci_power_t __force) 5)
2079 This function is called when system
2080 goes into suspend state mac80211 will
2081 call rtl_mac_stop() from the mac80211
2082 suspend function first, So there is
2083 no need to call hw_disable here.
2084 ****************************************/
2085 int rtl_pci_suspend(struct device
*dev
)
2087 struct pci_dev
*pdev
= to_pci_dev(dev
);
2088 struct ieee80211_hw
*hw
= pci_get_drvdata(pdev
);
2089 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
2091 rtlpriv
->cfg
->ops
->hw_suspend(hw
);
2092 rtl_deinit_rfkill(hw
);
2096 EXPORT_SYMBOL(rtl_pci_suspend
);
2098 int rtl_pci_resume(struct device
*dev
)
2100 struct pci_dev
*pdev
= to_pci_dev(dev
);
2101 struct ieee80211_hw
*hw
= pci_get_drvdata(pdev
);
2102 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
2104 rtlpriv
->cfg
->ops
->hw_resume(hw
);
2105 rtl_init_rfkill(hw
);
2108 EXPORT_SYMBOL(rtl_pci_resume
);
2109 #endif /* CONFIG_PM_SLEEP */
2111 struct rtl_intf_ops rtl_pci_ops
= {
2112 .read_efuse_byte
= read_efuse_byte
,
2113 .adapter_start
= rtl_pci_start
,
2114 .adapter_stop
= rtl_pci_stop
,
2115 .check_buddy_priv
= rtl_pci_check_buddy_priv
,
2116 .adapter_tx
= rtl_pci_tx
,
2117 .flush
= rtl_pci_flush
,
2118 .reset_trx_ring
= rtl_pci_reset_trx_ring
,
2119 .waitq_insert
= rtl_pci_tx_chk_waitq_insert
,
2121 .disable_aspm
= rtl_pci_disable_aspm
,
2122 .enable_aspm
= rtl_pci_enable_aspm
,