1 /******************************************************************************
3 * Copyright(c) 2009-2010 Realtek Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
26 * Larry Finger <Larry.Finger@lwfinger.net>
28 *****************************************************************************/
36 static const u16 pcibridge_vendors
[PCI_BRIDGE_VENDOR_MAX
] = {
43 /* Update PCI dependent default settings*/
44 static void _rtl_pci_update_default_setting(struct ieee80211_hw
*hw
)
46 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
47 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
48 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
49 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
50 u8 pcibridge_vendor
= pcipriv
->ndis_adapter
.pcibridge_vendor
;
52 ppsc
->reg_rfps_level
= 0;
53 ppsc
->b_support_aspm
= 0;
55 /*Update PCI ASPM setting */
56 ppsc
->const_amdpci_aspm
= rtlpci
->const_amdpci_aspm
;
57 switch (rtlpci
->const_pci_aspm
) {
63 /*ASPM dynamically enabled/disable. */
64 ppsc
->reg_rfps_level
|= RT_RF_LPS_LEVEL_ASPM
;
68 /*ASPM with Clock Req dynamically enabled/disable. */
69 ppsc
->reg_rfps_level
|= (RT_RF_LPS_LEVEL_ASPM
|
70 RT_RF_OFF_LEVL_CLK_REQ
);
75 * Always enable ASPM and Clock Req
76 * from initialization to halt.
78 ppsc
->reg_rfps_level
&= ~(RT_RF_LPS_LEVEL_ASPM
);
79 ppsc
->reg_rfps_level
|= (RT_RF_PS_LEVEL_ALWAYS_ASPM
|
80 RT_RF_OFF_LEVL_CLK_REQ
);
85 * Always enable ASPM without Clock Req
86 * from initialization to halt.
88 ppsc
->reg_rfps_level
&= ~(RT_RF_LPS_LEVEL_ASPM
|
89 RT_RF_OFF_LEVL_CLK_REQ
);
90 ppsc
->reg_rfps_level
|= RT_RF_PS_LEVEL_ALWAYS_ASPM
;
94 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_HALT_NIC
;
96 /*Update Radio OFF setting */
97 switch (rtlpci
->const_hwsw_rfoff_d3
) {
99 if (ppsc
->reg_rfps_level
& RT_RF_LPS_LEVEL_ASPM
)
100 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_ASPM
;
104 if (ppsc
->reg_rfps_level
& RT_RF_LPS_LEVEL_ASPM
)
105 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_ASPM
;
106 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_HALT_NIC
;
110 ppsc
->reg_rfps_level
|= RT_RF_OFF_LEVL_PCI_D3
;
114 /*Set HW definition to determine if it supports ASPM. */
115 switch (rtlpci
->const_support_pciaspm
) {
117 /*Not support ASPM. */
118 bool b_support_aspm
= false;
119 ppsc
->b_support_aspm
= b_support_aspm
;
124 bool b_support_aspm
= true;
125 bool b_support_backdoor
= true;
126 ppsc
->b_support_aspm
= b_support_aspm
;
128 /*if(priv->oem_id == RT_CID_TOSHIBA &&
129 !priv->ndis_adapter.amd_l1_patch)
130 b_support_backdoor = false; */
132 ppsc
->b_support_backdoor
= b_support_backdoor
;
137 /*ASPM value set by chipset. */
138 if (pcibridge_vendor
== PCI_BRIDGE_VENDOR_INTEL
) {
139 bool b_support_aspm
= true;
140 ppsc
->b_support_aspm
= b_support_aspm
;
144 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
145 ("switch case not process\n"));
150 static bool _rtl_pci_platform_switch_device_pci_aspm(
151 struct ieee80211_hw
*hw
,
154 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
155 bool bresult
= false;
159 pci_write_config_byte(rtlpci
->pdev
, 0x80, value
);
164 /*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
165 static bool _rtl_pci_switch_clk_req(struct ieee80211_hw
*hw
, u8 value
)
167 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
169 bool bresult
= false;
173 pci_write_config_byte(rtlpci
->pdev
, 0x81, value
);
179 /*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
180 static void rtl_pci_disable_aspm(struct ieee80211_hw
*hw
)
182 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
183 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
184 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
185 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
186 u8 pcibridge_vendor
= pcipriv
->ndis_adapter
.pcibridge_vendor
;
187 u32 pcicfg_addrport
= pcipriv
->ndis_adapter
.pcicfg_addrport
;
188 u8 num4bytes
= pcipriv
->ndis_adapter
.num4bytes
;
189 /*Retrieve original configuration settings. */
190 u8 linkctrl_reg
= pcipriv
->ndis_adapter
.linkctrl_reg
;
191 u16 pcibridge_linkctrlreg
= pcipriv
->ndis_adapter
.
192 pcibridge_linkctrlreg
;
195 if (pcibridge_vendor
== PCI_BRIDGE_VENDOR_UNKNOWN
) {
196 RT_TRACE(rtlpriv
, COMP_POWER
, DBG_TRACE
,
197 ("PCI(Bridge) UNKNOWN.\n"));
202 if (ppsc
->reg_rfps_level
& RT_RF_OFF_LEVL_CLK_REQ
) {
203 RT_CLEAR_PS_LEVEL(ppsc
, RT_RF_OFF_LEVL_CLK_REQ
);
204 _rtl_pci_switch_clk_req(hw
, 0x0);
208 /*for promising device will in L0 state after an I/O. */
210 pci_read_config_byte(rtlpci
->pdev
, 0x80, &tmp_u1b
);
213 /*Set corresponding value. */
214 aspmlevel
|= BIT(0) | BIT(1);
215 linkctrl_reg
&= ~aspmlevel
;
216 pcibridge_linkctrlreg
&= ~(BIT(0) | BIT(1));
218 _rtl_pci_platform_switch_device_pci_aspm(hw
, linkctrl_reg
);
221 /*4 Disable Pci Bridge ASPM */
222 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS
,
223 pcicfg_addrport
+ (num4bytes
<< 2));
224 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA
, pcibridge_linkctrlreg
);
231 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
232 *power saving We should follow the sequence to enable
233 *RTL8192SE first then enable Pci Bridge ASPM
234 *or the system will show bluescreen.
236 static void rtl_pci_enable_aspm(struct ieee80211_hw
*hw
)
238 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
239 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
240 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
241 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
242 u8 pcibridge_busnum
= pcipriv
->ndis_adapter
.pcibridge_busnum
;
243 u8 pcibridge_devnum
= pcipriv
->ndis_adapter
.pcibridge_devnum
;
244 u8 pcibridge_funcnum
= pcipriv
->ndis_adapter
.pcibridge_funcnum
;
245 u8 pcibridge_vendor
= pcipriv
->ndis_adapter
.pcibridge_vendor
;
246 u32 pcicfg_addrport
= pcipriv
->ndis_adapter
.pcicfg_addrport
;
247 u8 num4bytes
= pcipriv
->ndis_adapter
.num4bytes
;
249 u8 u_pcibridge_aspmsetting
;
250 u8 u_device_aspmsetting
;
252 if (pcibridge_vendor
== PCI_BRIDGE_VENDOR_UNKNOWN
) {
253 RT_TRACE(rtlpriv
, COMP_POWER
, DBG_TRACE
,
254 ("PCI(Bridge) UNKNOWN.\n"));
258 /*4 Enable Pci Bridge ASPM */
259 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS
,
260 pcicfg_addrport
+ (num4bytes
<< 2));
262 u_pcibridge_aspmsetting
=
263 pcipriv
->ndis_adapter
.pcibridge_linkctrlreg
|
264 rtlpci
->const_hostpci_aspm_setting
;
266 if (pcibridge_vendor
== PCI_BRIDGE_VENDOR_INTEL
)
267 u_pcibridge_aspmsetting
&= ~BIT(0);
269 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA
, u_pcibridge_aspmsetting
);
271 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
272 ("PlatformEnableASPM():PciBridge busnumber[%x], "
273 "DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
274 pcibridge_busnum
, pcibridge_devnum
, pcibridge_funcnum
,
275 (pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
+ 0x10),
276 u_pcibridge_aspmsetting
));
280 /*Get ASPM level (with/without Clock Req) */
281 aspmlevel
= rtlpci
->const_devicepci_aspm_setting
;
282 u_device_aspmsetting
= pcipriv
->ndis_adapter
.linkctrl_reg
;
284 /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
285 /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
287 u_device_aspmsetting
|= aspmlevel
;
289 _rtl_pci_platform_switch_device_pci_aspm(hw
, u_device_aspmsetting
);
291 if (ppsc
->reg_rfps_level
& RT_RF_OFF_LEVL_CLK_REQ
) {
292 _rtl_pci_switch_clk_req(hw
, (ppsc
->reg_rfps_level
&
293 RT_RF_OFF_LEVL_CLK_REQ
) ? 1 : 0);
294 RT_SET_PS_LEVEL(ppsc
, RT_RF_OFF_LEVL_CLK_REQ
);
299 static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw
*hw
)
301 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
302 u32 pcicfg_addrport
= pcipriv
->ndis_adapter
.pcicfg_addrport
;
308 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS
,
309 pcicfg_addrport
+ 0xE0);
310 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA
, 0xA0);
312 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS
,
313 pcicfg_addrport
+ 0xE0);
314 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA
, &offset_e0
);
316 if (offset_e0
== 0xA0) {
317 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS
,
318 pcicfg_addrport
+ 0xE4);
319 rtl_pci_raw_read_port_ulong(PCI_CONF_DATA
, &offset_e4
);
320 if (offset_e4
& BIT(23))
327 static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw
*hw
)
329 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
330 u8 capabilityoffset
= pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
;
331 u32 pcicfg_addrport
= pcipriv
->ndis_adapter
.pcicfg_addrport
;
335 num4bBytes
= (capabilityoffset
+ 0x10) / 4;
337 /*Read Link Control Register */
338 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS
,
339 pcicfg_addrport
+ (num4bBytes
<< 2));
340 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA
, &linkctrl_reg
);
342 pcipriv
->ndis_adapter
.pcibridge_linkctrlreg
= linkctrl_reg
;
345 static void rtl_pci_parse_configuration(struct pci_dev
*pdev
,
346 struct ieee80211_hw
*hw
)
348 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
349 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
355 /*Link Control Register */
356 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
357 pci_read_config_byte(pdev
, pos
+ PCI_EXP_LNKCTL
, &linkctrl_reg
);
358 pcipriv
->ndis_adapter
.linkctrl_reg
= linkctrl_reg
;
360 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_TRACE
,
361 ("Link Control Register =%x\n",
362 pcipriv
->ndis_adapter
.linkctrl_reg
));
364 pci_read_config_byte(pdev
, 0x98, &tmp
);
366 pci_write_config_byte(pdev
, 0x98, tmp
);
369 pci_write_config_byte(pdev
, 0x70f, tmp
);
372 static void _rtl_pci_initialize_adapter_common(struct ieee80211_hw
*hw
)
374 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
376 _rtl_pci_update_default_setting(hw
);
378 if (ppsc
->reg_rfps_level
& RT_RF_PS_LEVEL_ALWAYS_ASPM
) {
379 /*Always enable ASPM & Clock Req. */
380 rtl_pci_enable_aspm(hw
);
381 RT_SET_PS_LEVEL(ppsc
, RT_RF_PS_LEVEL_ALWAYS_ASPM
);
386 static void rtl_pci_init_aspm(struct ieee80211_hw
*hw
)
388 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
390 /*close ASPM for AMD defaultly */
391 rtlpci
->const_amdpci_aspm
= 0;
396 * 1 - Enable ASPM without Clock Req,
397 * 2 - Enable ASPM with Clock Req,
398 * 3 - Alwyas Enable ASPM with Clock Req,
399 * 4 - Always Enable ASPM without Clock Req.
400 * set defult to RTL8192CE:3 RTL8192E:2
402 rtlpci
->const_pci_aspm
= 3;
404 /*Setting for PCI-E device */
405 rtlpci
->const_devicepci_aspm_setting
= 0x03;
407 /*Setting for PCI-E bridge */
408 rtlpci
->const_hostpci_aspm_setting
= 0x02;
411 * In Hw/Sw Radio Off situation.
413 * 1 - From ASPM setting without low Mac Pwr,
414 * 2 - From ASPM setting with low Mac Pwr,
416 * set default to RTL8192CE:0 RTL8192SE:2
418 rtlpci
->const_hwsw_rfoff_d3
= 0;
421 * This setting works for those device with
422 * backdoor ASPM setting such as EPHY setting.
423 * 0 - Not support ASPM,
425 * 2 - According to chipset.
427 rtlpci
->const_support_pciaspm
= 1;
429 _rtl_pci_initialize_adapter_common(hw
);
432 static void _rtl_pci_io_handler_init(struct device
*dev
,
433 struct ieee80211_hw
*hw
)
435 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
437 rtlpriv
->io
.dev
= dev
;
439 rtlpriv
->io
.write8_async
= pci_write8_async
;
440 rtlpriv
->io
.write16_async
= pci_write16_async
;
441 rtlpriv
->io
.write32_async
= pci_write32_async
;
443 rtlpriv
->io
.read8_sync
= pci_read8_sync
;
444 rtlpriv
->io
.read16_sync
= pci_read16_sync
;
445 rtlpriv
->io
.read32_sync
= pci_read32_sync
;
449 static void _rtl_pci_io_handler_release(struct ieee80211_hw
*hw
)
453 static void _rtl_pci_tx_isr(struct ieee80211_hw
*hw
, int prio
)
455 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
456 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
458 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[prio
];
460 while (skb_queue_len(&ring
->queue
)) {
461 struct rtl_tx_desc
*entry
= &ring
->desc
[ring
->idx
];
463 struct ieee80211_tx_info
*info
;
465 u8 own
= (u8
) rtlpriv
->cfg
->ops
->get_desc((u8
*) entry
, true,
469 *beacon packet will only use the first
470 *descriptor defautly,and the own may not
471 *be cleared by the hardware
475 ring
->idx
= (ring
->idx
+ 1) % ring
->entries
;
477 skb
= __skb_dequeue(&ring
->queue
);
478 pci_unmap_single(rtlpci
->pdev
,
479 le32_to_cpu(rtlpriv
->cfg
->ops
->
480 get_desc((u8
*) entry
, true,
481 HW_DESC_TXBUFF_ADDR
)),
482 skb
->len
, PCI_DMA_TODEVICE
);
484 RT_TRACE(rtlpriv
, (COMP_INTR
| COMP_SEND
), DBG_TRACE
,
485 ("new ring->idx:%d, "
486 "free: skb_queue_len:%d, free: seq:%x\n",
488 skb_queue_len(&ring
->queue
),
489 *(u16
*) (skb
->data
+ 22)));
491 info
= IEEE80211_SKB_CB(skb
);
492 ieee80211_tx_info_clear_status(info
);
494 info
->flags
|= IEEE80211_TX_STAT_ACK
;
495 /*info->status.rates[0].count = 1; */
497 ieee80211_tx_status_irqsafe(hw
, skb
);
499 if ((ring
->entries
- skb_queue_len(&ring
->queue
))
502 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_LOUD
,
503 ("more desc left, wake"
504 "skb_queue@%d,ring->idx = %d,"
505 "skb_queue_len = 0x%d\n",
507 skb_queue_len(&ring
->queue
)));
509 ieee80211_wake_queue(hw
,
510 skb_get_queue_mapping
517 if (((rtlpriv
->link_info
.num_rx_inperiod
+
518 rtlpriv
->link_info
.num_tx_inperiod
) > 8) ||
519 (rtlpriv
->link_info
.num_rx_inperiod
> 2)) {
524 static void _rtl_pci_rx_interrupt(struct ieee80211_hw
*hw
)
526 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
527 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
528 int rx_queue_idx
= RTL_PCI_RX_MPDU_QUEUE
;
530 struct ieee80211_rx_status rx_status
= { 0 };
531 unsigned int count
= rtlpci
->rxringcount
;
535 bool unicast
= false;
537 struct rtl_stats stats
= {
546 struct rtl_rx_desc
*pdesc
= &rtlpci
->rx_ring
[rx_queue_idx
].desc
[
547 rtlpci
->rx_ring
[rx_queue_idx
].idx
];
549 struct sk_buff
*skb
= rtlpci
->rx_ring
[rx_queue_idx
].rx_buf
[
550 rtlpci
->rx_ring
[rx_queue_idx
].idx
];
552 own
= (u8
) rtlpriv
->cfg
->ops
->get_desc((u8
*) pdesc
,
556 /*wait data to be filled by hardware */
559 struct ieee80211_hdr
*hdr
;
561 struct sk_buff
*new_skb
= NULL
;
563 rtlpriv
->cfg
->ops
->query_rx_desc(hw
, &stats
,
567 pci_unmap_single(rtlpci
->pdev
,
568 *((dma_addr_t
*) skb
->cb
),
569 rtlpci
->rxbuffersize
,
572 skb_put(skb
, rtlpriv
->cfg
->ops
->get_desc((u8
*) pdesc
,
576 stats
.rx_drvinfo_size
+ stats
.rx_bufshift
);
579 *NOTICE This can not be use for mac80211,
580 *this is done in mac80211 code,
581 *if you done here sec DHCP will fail
582 *skb_trim(skb, skb->len - 4);
585 hdr
= (struct ieee80211_hdr
*)(skb
->data
);
586 fc
= le16_to_cpu(hdr
->frame_control
);
589 memcpy(IEEE80211_SKB_RXCB(skb
), &rx_status
,
592 if (is_broadcast_ether_addr(hdr
->addr1
))
595 if (is_multicast_ether_addr(hdr
->addr1
))
599 rtlpriv
->stats
.rxbytesunicast
+=
604 rtl_is_special_data(hw
, skb
, false);
606 if (ieee80211_is_data(fc
)) {
607 rtlpriv
->cfg
->ops
->led_control(hw
,
615 if (unlikely(!rtl_action_proc(hw
, skb
,
617 dev_kfree_skb_any(skb
);
619 struct sk_buff
*uskb
= NULL
;
621 uskb
= dev_alloc_skb(skb
->len
+ 128);
622 memcpy(IEEE80211_SKB_RXCB(uskb
),
625 pdata
= (u8
*)skb_put(uskb
, skb
->len
);
626 memcpy(pdata
, skb
->data
, skb
->len
);
627 dev_kfree_skb_any(skb
);
629 ieee80211_rx_irqsafe(hw
, uskb
);
632 dev_kfree_skb_any(skb
);
635 if (((rtlpriv
->link_info
.num_rx_inperiod
+
636 rtlpriv
->link_info
.num_tx_inperiod
) > 8) ||
637 (rtlpriv
->link_info
.num_rx_inperiod
> 2)) {
641 new_skb
= dev_alloc_skb(rtlpci
->rxbuffersize
);
642 if (unlikely(!new_skb
)) {
643 RT_TRACE(rtlpriv
, (COMP_INTR
| COMP_RECV
),
645 ("can't alloc skb for rx\n"));
651 rtlpci
->rx_ring
[rx_queue_idx
].rx_buf
[rtlpci
->
655 *((dma_addr_t
*) skb
->cb
) =
656 pci_map_single(rtlpci
->pdev
, skb_tail_pointer(skb
),
657 rtlpci
->rxbuffersize
,
662 bufferaddress
= cpu_to_le32(*((dma_addr_t
*) skb
->cb
));
664 rtlpriv
->cfg
->ops
->set_desc((u8
*) pdesc
, false,
666 (u8
*)&bufferaddress
);
667 rtlpriv
->cfg
->ops
->set_desc((u8
*)pdesc
, false, HW_DESC_RXOWN
,
669 rtlpriv
->cfg
->ops
->set_desc((u8
*)pdesc
, false,
671 (u8
*)&rtlpci
->rxbuffersize
);
673 if (rtlpci
->rx_ring
[rx_queue_idx
].idx
==
674 rtlpci
->rxringcount
- 1)
675 rtlpriv
->cfg
->ops
->set_desc((u8
*)pdesc
, false,
679 rtlpci
->rx_ring
[rx_queue_idx
].idx
=
680 (rtlpci
->rx_ring
[rx_queue_idx
].idx
+ 1) %
686 void _rtl_pci_tx_interrupt(struct ieee80211_hw
*hw
)
688 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
689 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
692 for (prio
= 0; prio
< RTL_PCI_MAX_TX_QUEUE_COUNT
; prio
++) {
693 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[prio
];
695 while (skb_queue_len(&ring
->queue
)) {
696 struct rtl_tx_desc
*entry
= &ring
->desc
[ring
->idx
];
698 struct ieee80211_tx_info
*info
;
702 *beacon packet will only use the first
703 *descriptor defautly, and the own may not
704 *be cleared by the hardware, and
705 *beacon will free in prepare beacon
707 if (prio
== BEACON_QUEUE
|| prio
== TXCMD_QUEUE
||
711 own
= (u8
)rtlpriv
->cfg
->ops
->get_desc((u8
*)entry
,
718 skb
= __skb_dequeue(&ring
->queue
);
719 pci_unmap_single(rtlpci
->pdev
,
720 le32_to_cpu(rtlpriv
->cfg
->ops
->
721 get_desc((u8
*) entry
,
723 HW_DESC_TXBUFF_ADDR
)),
724 skb
->len
, PCI_DMA_TODEVICE
);
726 ring
->idx
= (ring
->idx
+ 1) % ring
->entries
;
728 info
= IEEE80211_SKB_CB(skb
);
729 ieee80211_tx_info_clear_status(info
);
731 info
->flags
|= IEEE80211_TX_STAT_ACK
;
732 /*info->status.rates[0].count = 1; */
734 ieee80211_tx_status_irqsafe(hw
, skb
);
736 if ((ring
->entries
- skb_queue_len(&ring
->queue
))
737 == 2 && prio
!= BEACON_QUEUE
) {
738 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
739 ("more desc left, wake "
740 "skb_queue@%d,ring->idx = %d,"
741 "skb_queue_len = 0x%d\n",
743 skb_queue_len(&ring
->queue
)));
745 ieee80211_wake_queue(hw
,
746 skb_get_queue_mapping
755 static irqreturn_t
_rtl_pci_interrupt(int irq
, void *dev_id
)
757 struct ieee80211_hw
*hw
= dev_id
;
758 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
759 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
764 if (rtlpci
->irq_enabled
== 0)
767 spin_lock_irqsave(&rtlpriv
->locks
.irq_th_lock
, flags
);
769 /*read ISR: 4/8bytes */
770 rtlpriv
->cfg
->ops
->interrupt_recognized(hw
, &inta
, &intb
);
772 /*Shared IRQ or HW disappared */
773 if (!inta
|| inta
== 0xffff)
776 /*<1> beacon related */
777 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_TBDOK
]) {
778 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
779 ("beacon ok interrupt!\n"));
782 if (unlikely(inta
& rtlpriv
->cfg
->maps
[RTL_IMR_TBDER
])) {
783 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
784 ("beacon err interrupt!\n"));
787 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_BDOK
]) {
788 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
789 ("beacon interrupt!\n"));
792 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_BcnInt
]) {
793 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
794 ("prepare beacon for interrupt!\n"));
795 tasklet_schedule(&rtlpriv
->works
.irq_prepare_bcn_tasklet
);
799 if (unlikely(inta
& rtlpriv
->cfg
->maps
[RTL_IMR_TXFOVW
]))
800 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
, ("IMR_TXFOVW!\n"));
802 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_MGNTDOK
]) {
803 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
804 ("Manage ok interrupt!\n"));
805 _rtl_pci_tx_isr(hw
, MGNT_QUEUE
);
808 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_HIGHDOK
]) {
809 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
810 ("HIGH_QUEUE ok interrupt!\n"));
811 _rtl_pci_tx_isr(hw
, HIGH_QUEUE
);
814 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_BKDOK
]) {
815 rtlpriv
->link_info
.num_tx_inperiod
++;
817 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
818 ("BK Tx OK interrupt!\n"));
819 _rtl_pci_tx_isr(hw
, BK_QUEUE
);
822 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_BEDOK
]) {
823 rtlpriv
->link_info
.num_tx_inperiod
++;
825 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
826 ("BE TX OK interrupt!\n"));
827 _rtl_pci_tx_isr(hw
, BE_QUEUE
);
830 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_VIDOK
]) {
831 rtlpriv
->link_info
.num_tx_inperiod
++;
833 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
834 ("VI TX OK interrupt!\n"));
835 _rtl_pci_tx_isr(hw
, VI_QUEUE
);
838 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_VODOK
]) {
839 rtlpriv
->link_info
.num_tx_inperiod
++;
841 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
,
842 ("Vo TX OK interrupt!\n"));
843 _rtl_pci_tx_isr(hw
, VO_QUEUE
);
847 if (inta
& rtlpriv
->cfg
->maps
[RTL_IMR_ROK
]) {
848 RT_TRACE(rtlpriv
, COMP_INTR
, DBG_TRACE
, ("Rx ok interrupt!\n"));
849 tasklet_schedule(&rtlpriv
->works
.irq_tasklet
);
852 if (unlikely(inta
& rtlpriv
->cfg
->maps
[RTL_IMR_RDU
])) {
853 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
,
854 ("rx descriptor unavailable!\n"));
855 tasklet_schedule(&rtlpriv
->works
.irq_tasklet
);
858 if (unlikely(inta
& rtlpriv
->cfg
->maps
[RTL_IMR_RXFOVW
])) {
859 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
, ("rx overflow !\n"));
860 tasklet_schedule(&rtlpriv
->works
.irq_tasklet
);
863 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
, flags
);
867 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
, flags
);
871 static void _rtl_pci_irq_tasklet(struct ieee80211_hw
*hw
)
873 _rtl_pci_rx_interrupt(hw
);
876 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw
*hw
)
878 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
879 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
880 struct rtl_mac
*mac
= rtl_mac(rtl_priv(hw
));
881 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[BEACON_QUEUE
];
882 struct ieee80211_hdr
*hdr
= NULL
;
883 struct ieee80211_tx_info
*info
= NULL
;
884 struct sk_buff
*pskb
= NULL
;
885 struct rtl_tx_desc
*pdesc
= NULL
;
886 unsigned int queue_index
;
889 ring
= &rtlpci
->tx_ring
[BEACON_QUEUE
];
890 pskb
= __skb_dequeue(&ring
->queue
);
894 /*NB: the beacon data buffer must be 32-bit aligned. */
895 pskb
= ieee80211_beacon_get(hw
, mac
->vif
);
898 hdr
= (struct ieee80211_hdr
*)(pskb
->data
);
899 info
= IEEE80211_SKB_CB(pskb
);
901 queue_index
= BEACON_QUEUE
;
903 pdesc
= &ring
->desc
[0];
904 rtlpriv
->cfg
->ops
->fill_tx_desc(hw
, hdr
, (u8
*) pdesc
,
905 info
, pskb
, queue_index
);
907 __skb_queue_tail(&ring
->queue
, pskb
);
909 rtlpriv
->cfg
->ops
->set_desc((u8
*) pdesc
, true, HW_DESC_OWN
,
915 static void _rtl_pci_init_trx_var(struct ieee80211_hw
*hw
)
917 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
920 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++)
921 rtlpci
->txringcount
[i
] = RT_TXDESC_NUM
;
924 *we just alloc 2 desc for beacon queue,
925 *because we just need first desc in hw beacon.
927 rtlpci
->txringcount
[BEACON_QUEUE
] = 2;
930 *BE queue need more descriptor for performance
931 *consideration or, No more tx desc will happen,
932 *and may cause mac80211 mem leakage.
934 rtlpci
->txringcount
[BE_QUEUE
] = RT_TXDESC_NUM_BE_QUEUE
;
936 rtlpci
->rxbuffersize
= 9100; /*2048/1024; */
937 rtlpci
->rxringcount
= RTL_PCI_MAX_RX_COUNT
; /*64; */
940 static void _rtl_pci_init_struct(struct ieee80211_hw
*hw
,
941 struct pci_dev
*pdev
)
943 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
944 struct rtl_mac
*mac
= rtl_mac(rtl_priv(hw
));
945 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
946 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
947 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
949 rtlpci
->up_first_time
= true;
950 rtlpci
->being_init_adapter
= false;
955 ppsc
->b_inactiveps
= false;
956 ppsc
->b_leisure_ps
= true;
957 ppsc
->b_fwctrl_lps
= true;
958 ppsc
->b_reg_fwctrl_lps
= 3;
959 ppsc
->reg_max_lps_awakeintvl
= 5;
961 if (ppsc
->b_reg_fwctrl_lps
== 1)
962 ppsc
->fwctrl_psmode
= FW_PS_MIN_MODE
;
963 else if (ppsc
->b_reg_fwctrl_lps
== 2)
964 ppsc
->fwctrl_psmode
= FW_PS_MAX_MODE
;
965 else if (ppsc
->b_reg_fwctrl_lps
== 3)
966 ppsc
->fwctrl_psmode
= FW_PS_DTIM_MODE
;
968 /*Tx/Rx related var */
969 _rtl_pci_init_trx_var(hw
);
971 /*IBSS*/ mac
->beacon_interval
= 100;
973 /*AMPDU*/ mac
->min_space_cfg
= 0;
974 mac
->max_mss_density
= 0;
975 /*set sane AMPDU defaults */
976 mac
->current_ampdu_density
= 7;
977 mac
->current_ampdu_factor
= 3;
979 /*QOS*/ rtlpci
->acm_method
= eAcmWay2_SW
;
982 tasklet_init(&rtlpriv
->works
.irq_tasklet
,
983 (void (*)(unsigned long))_rtl_pci_irq_tasklet
,
985 tasklet_init(&rtlpriv
->works
.irq_prepare_bcn_tasklet
,
986 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet
,
990 static int _rtl_pci_init_tx_ring(struct ieee80211_hw
*hw
,
991 unsigned int prio
, unsigned int entries
)
993 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
994 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
995 struct rtl_tx_desc
*ring
;
1000 ring
= pci_alloc_consistent(rtlpci
->pdev
,
1001 sizeof(*ring
) * entries
, &dma
);
1003 if (!ring
|| (unsigned long)ring
& 0xFF) {
1004 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1005 ("Cannot allocate TX ring (prio = %d)\n", prio
));
1009 memset(ring
, 0, sizeof(*ring
) * entries
);
1010 rtlpci
->tx_ring
[prio
].desc
= ring
;
1011 rtlpci
->tx_ring
[prio
].dma
= dma
;
1012 rtlpci
->tx_ring
[prio
].idx
= 0;
1013 rtlpci
->tx_ring
[prio
].entries
= entries
;
1014 skb_queue_head_init(&rtlpci
->tx_ring
[prio
].queue
);
1016 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
,
1017 ("queue:%d, ring_addr:%p\n", prio
, ring
));
1019 for (i
= 0; i
< entries
; i
++) {
1020 nextdescaddress
= cpu_to_le32((u32
) dma
+
1021 ((i
+ 1) % entries
) *
1024 rtlpriv
->cfg
->ops
->set_desc((u8
*)&(ring
[i
]),
1025 true, HW_DESC_TX_NEXTDESC_ADDR
,
1026 (u8
*)&nextdescaddress
);
1032 static int _rtl_pci_init_rx_ring(struct ieee80211_hw
*hw
)
1034 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1035 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1036 struct rtl_rx_desc
*entry
= NULL
;
1037 int i
, rx_queue_idx
;
1041 *rx_queue_idx 0:RX_MPDU_QUEUE
1042 *rx_queue_idx 1:RX_CMD_QUEUE
1044 for (rx_queue_idx
= 0; rx_queue_idx
< RTL_PCI_MAX_RX_QUEUE
;
1046 rtlpci
->rx_ring
[rx_queue_idx
].desc
=
1047 pci_alloc_consistent(rtlpci
->pdev
,
1048 sizeof(*rtlpci
->rx_ring
[rx_queue_idx
].
1049 desc
) * rtlpci
->rxringcount
,
1050 &rtlpci
->rx_ring
[rx_queue_idx
].dma
);
1052 if (!rtlpci
->rx_ring
[rx_queue_idx
].desc
||
1053 (unsigned long)rtlpci
->rx_ring
[rx_queue_idx
].desc
& 0xFF) {
1054 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1055 ("Cannot allocate RX ring\n"));
1059 memset(rtlpci
->rx_ring
[rx_queue_idx
].desc
, 0,
1060 sizeof(*rtlpci
->rx_ring
[rx_queue_idx
].desc
) *
1061 rtlpci
->rxringcount
);
1063 rtlpci
->rx_ring
[rx_queue_idx
].idx
= 0;
1065 for (i
= 0; i
< rtlpci
->rxringcount
; i
++) {
1066 struct sk_buff
*skb
=
1067 dev_alloc_skb(rtlpci
->rxbuffersize
);
1069 entry
= &rtlpci
->rx_ring
[rx_queue_idx
].desc
[i
];
1073 /*skb->dev = dev; */
1075 rtlpci
->rx_ring
[rx_queue_idx
].rx_buf
[i
] = skb
;
1078 *just set skb->cb to mapping addr
1079 *for pci_unmap_single use
1081 *((dma_addr_t
*) skb
->cb
) =
1082 pci_map_single(rtlpci
->pdev
, skb_tail_pointer(skb
),
1083 rtlpci
->rxbuffersize
,
1084 PCI_DMA_FROMDEVICE
);
1086 bufferaddress
= cpu_to_le32(*((dma_addr_t
*)skb
->cb
));
1087 rtlpriv
->cfg
->ops
->set_desc((u8
*)entry
, false,
1088 HW_DESC_RXBUFF_ADDR
,
1089 (u8
*)&bufferaddress
);
1090 rtlpriv
->cfg
->ops
->set_desc((u8
*)entry
, false,
1094 rtlpriv
->cfg
->ops
->set_desc((u8
*) entry
, false,
1099 rtlpriv
->cfg
->ops
->set_desc((u8
*) entry
, false,
1100 HW_DESC_RXERO
, (u8
*)&tmp_one
);
1105 static void _rtl_pci_free_tx_ring(struct ieee80211_hw
*hw
,
1108 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1109 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1110 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[prio
];
1112 while (skb_queue_len(&ring
->queue
)) {
1113 struct rtl_tx_desc
*entry
= &ring
->desc
[ring
->idx
];
1114 struct sk_buff
*skb
= __skb_dequeue(&ring
->queue
);
1116 pci_unmap_single(rtlpci
->pdev
,
1117 le32_to_cpu(rtlpriv
->cfg
->
1118 ops
->get_desc((u8
*) entry
, true,
1119 HW_DESC_TXBUFF_ADDR
)),
1120 skb
->len
, PCI_DMA_TODEVICE
);
1122 ring
->idx
= (ring
->idx
+ 1) % ring
->entries
;
1125 pci_free_consistent(rtlpci
->pdev
,
1126 sizeof(*ring
->desc
) * ring
->entries
,
1127 ring
->desc
, ring
->dma
);
1131 static void _rtl_pci_free_rx_ring(struct rtl_pci
*rtlpci
)
1133 int i
, rx_queue_idx
;
1135 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1136 /*rx_queue_idx 1:RX_CMD_QUEUE */
1137 for (rx_queue_idx
= 0; rx_queue_idx
< RTL_PCI_MAX_RX_QUEUE
;
1139 for (i
= 0; i
< rtlpci
->rxringcount
; i
++) {
1140 struct sk_buff
*skb
=
1141 rtlpci
->rx_ring
[rx_queue_idx
].rx_buf
[i
];
1145 pci_unmap_single(rtlpci
->pdev
,
1146 *((dma_addr_t
*) skb
->cb
),
1147 rtlpci
->rxbuffersize
,
1148 PCI_DMA_FROMDEVICE
);
1152 pci_free_consistent(rtlpci
->pdev
,
1153 sizeof(*rtlpci
->rx_ring
[rx_queue_idx
].
1154 desc
) * rtlpci
->rxringcount
,
1155 rtlpci
->rx_ring
[rx_queue_idx
].desc
,
1156 rtlpci
->rx_ring
[rx_queue_idx
].dma
);
1157 rtlpci
->rx_ring
[rx_queue_idx
].desc
= NULL
;
1161 static int _rtl_pci_init_trx_ring(struct ieee80211_hw
*hw
)
1163 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1167 ret
= _rtl_pci_init_rx_ring(hw
);
1171 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++) {
1172 ret
= _rtl_pci_init_tx_ring(hw
, i
,
1173 rtlpci
->txringcount
[i
]);
1175 goto err_free_rings
;
1181 _rtl_pci_free_rx_ring(rtlpci
);
1183 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++)
1184 if (rtlpci
->tx_ring
[i
].desc
)
1185 _rtl_pci_free_tx_ring(hw
, i
);
1190 static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw
*hw
)
1192 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1196 _rtl_pci_free_rx_ring(rtlpci
);
1199 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++)
1200 _rtl_pci_free_tx_ring(hw
, i
);
1205 int rtl_pci_reset_trx_ring(struct ieee80211_hw
*hw
)
1207 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1208 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1209 int i
, rx_queue_idx
;
1210 unsigned long flags
;
1213 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1214 /*rx_queue_idx 1:RX_CMD_QUEUE */
1215 for (rx_queue_idx
= 0; rx_queue_idx
< RTL_PCI_MAX_RX_QUEUE
;
1218 *force the rx_ring[RX_MPDU_QUEUE/
1219 *RX_CMD_QUEUE].idx to the first one
1221 if (rtlpci
->rx_ring
[rx_queue_idx
].desc
) {
1222 struct rtl_rx_desc
*entry
= NULL
;
1224 for (i
= 0; i
< rtlpci
->rxringcount
; i
++) {
1225 entry
= &rtlpci
->rx_ring
[rx_queue_idx
].desc
[i
];
1226 rtlpriv
->cfg
->ops
->set_desc((u8
*) entry
,
1231 rtlpci
->rx_ring
[rx_queue_idx
].idx
= 0;
1236 *after reset, release previous pending packet,
1237 *and force the tx idx to the first one
1239 spin_lock_irqsave(&rtlpriv
->locks
.irq_th_lock
, flags
);
1240 for (i
= 0; i
< RTL_PCI_MAX_TX_QUEUE_COUNT
; i
++) {
1241 if (rtlpci
->tx_ring
[i
].desc
) {
1242 struct rtl8192_tx_ring
*ring
= &rtlpci
->tx_ring
[i
];
1244 while (skb_queue_len(&ring
->queue
)) {
1245 struct rtl_tx_desc
*entry
=
1246 &ring
->desc
[ring
->idx
];
1247 struct sk_buff
*skb
=
1248 __skb_dequeue(&ring
->queue
);
1250 pci_unmap_single(rtlpci
->pdev
,
1251 le32_to_cpu(rtlpriv
->cfg
->ops
->
1255 HW_DESC_TXBUFF_ADDR
)),
1256 skb
->len
, PCI_DMA_TODEVICE
);
1258 ring
->idx
= (ring
->idx
+ 1) % ring
->entries
;
1264 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
, flags
);
1269 unsigned int _rtl_mac_to_hwqueue(u16 fc
,
1270 unsigned int mac80211_queue_index
)
1272 unsigned int hw_queue_index
;
1274 if (unlikely(ieee80211_is_beacon(fc
))) {
1275 hw_queue_index
= BEACON_QUEUE
;
1279 if (ieee80211_is_mgmt(fc
)) {
1280 hw_queue_index
= MGNT_QUEUE
;
1284 switch (mac80211_queue_index
) {
1286 hw_queue_index
= VO_QUEUE
;
1289 hw_queue_index
= VI_QUEUE
;
1292 hw_queue_index
= BE_QUEUE
;;
1295 hw_queue_index
= BK_QUEUE
;
1298 hw_queue_index
= BE_QUEUE
;
1299 RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
1300 mac80211_queue_index
));
1305 return hw_queue_index
;
1308 int rtl_pci_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
1310 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1311 struct rtl_mac
*mac
= rtl_mac(rtl_priv(hw
));
1312 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1313 struct rtl8192_tx_ring
*ring
;
1314 struct rtl_tx_desc
*pdesc
;
1316 unsigned int queue_index
, hw_queue
;
1317 unsigned long flags
;
1318 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)(skb
->data
);
1319 u16 fc
= le16_to_cpu(hdr
->frame_control
);
1320 u8
*pda_addr
= hdr
->addr1
;
1321 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1329 if (ieee80211_is_mgmt(fc
))
1330 rtl_tx_mgmt_proc(hw
, skb
);
1331 rtl_action_proc(hw
, skb
, true);
1333 queue_index
= skb_get_queue_mapping(skb
);
1334 hw_queue
= _rtl_mac_to_hwqueue(fc
, queue_index
);
1336 if (is_multicast_ether_addr(pda_addr
))
1337 rtlpriv
->stats
.txbytesmulticast
+= skb
->len
;
1338 else if (is_broadcast_ether_addr(pda_addr
))
1339 rtlpriv
->stats
.txbytesbroadcast
+= skb
->len
;
1341 rtlpriv
->stats
.txbytesunicast
+= skb
->len
;
1343 spin_lock_irqsave(&rtlpriv
->locks
.irq_th_lock
, flags
);
1345 ring
= &rtlpci
->tx_ring
[hw_queue
];
1346 if (hw_queue
!= BEACON_QUEUE
)
1347 idx
= (ring
->idx
+ skb_queue_len(&ring
->queue
)) %
1352 pdesc
= &ring
->desc
[idx
];
1353 own
= (u8
) rtlpriv
->cfg
->ops
->get_desc((u8
*) pdesc
,
1356 if ((own
== 1) && (hw_queue
!= BEACON_QUEUE
)) {
1357 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
,
1358 ("No more TX desc@%d, ring->idx = %d,"
1359 "idx = %d, skb_queue_len = 0x%d\n",
1360 hw_queue
, ring
->idx
, idx
,
1361 skb_queue_len(&ring
->queue
)));
1363 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
, flags
);
1368 *if(ieee80211_is_nullfunc(fc)) {
1369 * spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1374 if (ieee80211_is_data_qos(fc
)) {
1375 qc
= ieee80211_get_qos_ctl(hdr
);
1376 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
1378 seq_number
= mac
->tids
[tid
].seq_number
;
1379 seq_number
&= IEEE80211_SCTL_SEQ
;
1381 *hdr->seq_ctrl = hdr->seq_ctrl &
1382 *cpu_to_le16(IEEE80211_SCTL_FRAG);
1383 *hdr->seq_ctrl |= cpu_to_le16(seq_number);
1389 if (ieee80211_is_data(fc
))
1390 rtlpriv
->cfg
->ops
->led_control(hw
, LED_CTL_TX
);
1392 rtlpriv
->cfg
->ops
->fill_tx_desc(hw
, hdr
, (u8
*) pdesc
,
1393 info
, skb
, hw_queue
);
1395 __skb_queue_tail(&ring
->queue
, skb
);
1397 rtlpriv
->cfg
->ops
->set_desc((u8
*) pdesc
, true,
1398 HW_DESC_OWN
, (u8
*)&temp_one
);
1400 if (!ieee80211_has_morefrags(hdr
->frame_control
)) {
1402 mac
->tids
[tid
].seq_number
= seq_number
;
1405 if ((ring
->entries
- skb_queue_len(&ring
->queue
)) < 2 &&
1406 hw_queue
!= BEACON_QUEUE
) {
1408 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_LOUD
,
1409 ("less desc left, stop skb_queue@%d, "
1411 "idx = %d, skb_queue_len = 0x%d\n",
1412 hw_queue
, ring
->idx
, idx
,
1413 skb_queue_len(&ring
->queue
)));
1415 ieee80211_stop_queue(hw
, skb_get_queue_mapping(skb
));
1418 spin_unlock_irqrestore(&rtlpriv
->locks
.irq_th_lock
, flags
);
1420 rtlpriv
->cfg
->ops
->tx_polling(hw
, hw_queue
);
1425 void rtl_pci_deinit(struct ieee80211_hw
*hw
)
1427 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1428 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1430 _rtl_pci_deinit_trx_ring(hw
);
1432 synchronize_irq(rtlpci
->pdev
->irq
);
1433 tasklet_kill(&rtlpriv
->works
.irq_tasklet
);
1435 flush_workqueue(rtlpriv
->works
.rtl_wq
);
1436 destroy_workqueue(rtlpriv
->works
.rtl_wq
);
1440 int rtl_pci_init(struct ieee80211_hw
*hw
, struct pci_dev
*pdev
)
1442 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1445 _rtl_pci_init_struct(hw
, pdev
);
1447 err
= _rtl_pci_init_trx_ring(hw
);
1449 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1450 ("tx ring initialization failed"));
1457 int rtl_pci_start(struct ieee80211_hw
*hw
)
1459 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1460 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
1461 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1462 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
1466 rtl_pci_reset_trx_ring(hw
);
1468 rtlpci
->driver_is_goingto_unload
= false;
1469 err
= rtlpriv
->cfg
->ops
->hw_init(hw
);
1471 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1472 ("Failed to config hardware!\n"));
1476 rtlpriv
->cfg
->ops
->enable_interrupt(hw
);
1477 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_LOUD
, ("enable_interrupt OK\n"));
1479 rtl_init_rx_config(hw
);
1481 /*should after adapter start and interrupt enable. */
1482 set_hal_start(rtlhal
);
1484 RT_CLEAR_PS_LEVEL(ppsc
, RT_RF_OFF_LEVL_HALT_NIC
);
1486 rtlpci
->up_first_time
= false;
1488 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
, ("OK\n"));
1492 void rtl_pci_stop(struct ieee80211_hw
*hw
)
1494 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1495 struct rtl_pci
*rtlpci
= rtl_pcidev(rtl_pcipriv(hw
));
1496 struct rtl_ps_ctl
*ppsc
= rtl_psc(rtl_priv(hw
));
1497 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
1498 unsigned long flags
;
1499 u8 RFInProgressTimeOut
= 0;
1502 *should before disable interrrupt&adapter
1503 *and will do it immediately.
1505 set_hal_stop(rtlhal
);
1507 rtlpriv
->cfg
->ops
->disable_interrupt(hw
);
1509 spin_lock_irqsave(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1510 while (ppsc
->rfchange_inprogress
) {
1511 spin_unlock_irqrestore(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1512 if (RFInProgressTimeOut
> 100) {
1513 spin_lock_irqsave(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1517 RFInProgressTimeOut
++;
1518 spin_lock_irqsave(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1520 ppsc
->rfchange_inprogress
= true;
1521 spin_unlock_irqrestore(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1523 rtlpci
->driver_is_goingto_unload
= true;
1524 rtlpriv
->cfg
->ops
->hw_disable(hw
);
1525 rtlpriv
->cfg
->ops
->led_control(hw
, LED_CTL_POWER_OFF
);
1527 spin_lock_irqsave(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1528 ppsc
->rfchange_inprogress
= false;
1529 spin_unlock_irqrestore(&rtlpriv
->locks
.rf_ps_lock
, flags
);
1531 rtl_pci_enable_aspm(hw
);
1534 static bool _rtl_pci_find_adapter(struct pci_dev
*pdev
,
1535 struct ieee80211_hw
*hw
)
1537 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1538 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
1539 struct rtl_hal
*rtlhal
= rtl_hal(rtl_priv(hw
));
1540 struct pci_dev
*bridge_pdev
= pdev
->bus
->self
;
1547 venderid
= pdev
->vendor
;
1548 deviceid
= pdev
->device
;
1549 pci_read_config_byte(pdev
, 0x8, &revisionid
);
1550 pci_read_config_word(pdev
, 0x3C, &irqline
);
1552 if (deviceid
== RTL_PCI_8192_DID
||
1553 deviceid
== RTL_PCI_0044_DID
||
1554 deviceid
== RTL_PCI_0047_DID
||
1555 deviceid
== RTL_PCI_8192SE_DID
||
1556 deviceid
== RTL_PCI_8174_DID
||
1557 deviceid
== RTL_PCI_8173_DID
||
1558 deviceid
== RTL_PCI_8172_DID
||
1559 deviceid
== RTL_PCI_8171_DID
) {
1560 switch (revisionid
) {
1561 case RTL_PCI_REVISION_ID_8192PCIE
:
1562 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1563 ("8192 PCI-E is found - "
1564 "vid/did=%x/%x\n", venderid
, deviceid
));
1565 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192E
;
1567 case RTL_PCI_REVISION_ID_8192SE
:
1568 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1569 ("8192SE is found - "
1570 "vid/did=%x/%x\n", venderid
, deviceid
));
1571 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192SE
;
1574 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
,
1575 ("Err: Unknown device - "
1576 "vid/did=%x/%x\n", venderid
, deviceid
));
1577 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192SE
;
1581 } else if (deviceid
== RTL_PCI_8192CET_DID
||
1582 deviceid
== RTL_PCI_8192CE_DID
||
1583 deviceid
== RTL_PCI_8191CE_DID
||
1584 deviceid
== RTL_PCI_8188CE_DID
) {
1585 rtlhal
->hw_type
= HARDWARE_TYPE_RTL8192CE
;
1586 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1587 ("8192C PCI-E is found - "
1588 "vid/did=%x/%x\n", venderid
, deviceid
));
1590 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_WARNING
,
1591 ("Err: Unknown device -"
1592 " vid/did=%x/%x\n", venderid
, deviceid
));
1594 rtlhal
->hw_type
= RTL_DEFAULT_HARDWARE_TYPE
;
1598 pcipriv
->ndis_adapter
.busnumber
= pdev
->bus
->number
;
1599 pcipriv
->ndis_adapter
.devnumber
= PCI_SLOT(pdev
->devfn
);
1600 pcipriv
->ndis_adapter
.funcnumber
= PCI_FUNC(pdev
->devfn
);
1602 /*find bridge info */
1603 pcipriv
->ndis_adapter
.pcibridge_vendorid
= bridge_pdev
->vendor
;
1604 for (tmp
= 0; tmp
< PCI_BRIDGE_VENDOR_MAX
; tmp
++) {
1605 if (bridge_pdev
->vendor
== pcibridge_vendors
[tmp
]) {
1606 pcipriv
->ndis_adapter
.pcibridge_vendor
= tmp
;
1607 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1608 ("Pci Bridge Vendor is found index: %d\n",
1614 if (pcipriv
->ndis_adapter
.pcibridge_vendor
!=
1615 PCI_BRIDGE_VENDOR_UNKNOWN
) {
1616 pcipriv
->ndis_adapter
.pcibridge_busnum
=
1617 bridge_pdev
->bus
->number
;
1618 pcipriv
->ndis_adapter
.pcibridge_devnum
=
1619 PCI_SLOT(bridge_pdev
->devfn
);
1620 pcipriv
->ndis_adapter
.pcibridge_funcnum
=
1621 PCI_FUNC(bridge_pdev
->devfn
);
1622 pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
=
1623 pci_pcie_cap(bridge_pdev
);
1624 pcipriv
->ndis_adapter
.pcicfg_addrport
=
1625 (pcipriv
->ndis_adapter
.pcibridge_busnum
<< 16) |
1626 (pcipriv
->ndis_adapter
.pcibridge_devnum
<< 11) |
1627 (pcipriv
->ndis_adapter
.pcibridge_funcnum
<< 8) | (1 << 31);
1628 pcipriv
->ndis_adapter
.num4bytes
=
1629 (pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
+ 0x10) / 4;
1631 rtl_pci_get_linkcontrol_field(hw
);
1633 if (pcipriv
->ndis_adapter
.pcibridge_vendor
==
1634 PCI_BRIDGE_VENDOR_AMD
) {
1635 pcipriv
->ndis_adapter
.amd_l1_patch
=
1636 rtl_pci_get_amd_l1_patch(hw
);
1640 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1641 ("pcidev busnumber:devnumber:funcnumber:"
1642 "vendor:link_ctl %d:%d:%d:%x:%x\n",
1643 pcipriv
->ndis_adapter
.busnumber
,
1644 pcipriv
->ndis_adapter
.devnumber
,
1645 pcipriv
->ndis_adapter
.funcnumber
,
1646 pdev
->vendor
, pcipriv
->ndis_adapter
.linkctrl_reg
));
1648 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1649 ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
1650 "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
1651 pcipriv
->ndis_adapter
.pcibridge_busnum
,
1652 pcipriv
->ndis_adapter
.pcibridge_devnum
,
1653 pcipriv
->ndis_adapter
.pcibridge_funcnum
,
1654 pcibridge_vendors
[pcipriv
->ndis_adapter
.pcibridge_vendor
],
1655 pcipriv
->ndis_adapter
.pcibridge_pciehdr_offset
,
1656 pcipriv
->ndis_adapter
.pcibridge_linkctrlreg
,
1657 pcipriv
->ndis_adapter
.amd_l1_patch
));
1659 rtl_pci_parse_configuration(pdev
, hw
);
1664 int __devinit
rtl_pci_probe(struct pci_dev
*pdev
,
1665 const struct pci_device_id
*id
)
1667 struct ieee80211_hw
*hw
= NULL
;
1669 struct rtl_priv
*rtlpriv
= NULL
;
1670 struct rtl_pci_priv
*pcipriv
= NULL
;
1671 struct rtl_pci
*rtlpci
;
1672 unsigned long pmem_start
, pmem_len
, pmem_flags
;
1675 err
= pci_enable_device(pdev
);
1678 ("%s : Cannot enable new PCI device\n",
1683 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) {
1684 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32))) {
1685 RT_ASSERT(false, ("Unable to obtain 32bit DMA "
1686 "for consistent allocations\n"));
1687 pci_disable_device(pdev
);
1692 pci_set_master(pdev
);
1694 hw
= ieee80211_alloc_hw(sizeof(struct rtl_pci_priv
) +
1695 sizeof(struct rtl_priv
), &rtl_ops
);
1698 ("%s : ieee80211 alloc failed\n", pci_name(pdev
)));
1703 SET_IEEE80211_DEV(hw
, &pdev
->dev
);
1704 pci_set_drvdata(pdev
, hw
);
1707 pcipriv
= (void *)rtlpriv
->priv
;
1708 pcipriv
->dev
.pdev
= pdev
;
1711 *init dbgp flags before all
1712 *other functions, because we will
1713 *use it in other funtions like
1714 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
1715 *you can not use these macro
1718 rtl_dbgp_flag_init(hw
);
1721 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
1723 RT_ASSERT(false, ("Can't obtain PCI resources\n"));
1727 pmem_start
= pci_resource_start(pdev
, 2);
1728 pmem_len
= pci_resource_len(pdev
, 2);
1729 pmem_flags
= pci_resource_flags(pdev
, 2);
1731 /*shared mem start */
1732 rtlpriv
->io
.pci_mem_start
=
1733 (unsigned long)pci_iomap(pdev
, 2, pmem_len
);
1734 if (rtlpriv
->io
.pci_mem_start
== 0) {
1735 RT_ASSERT(false, ("Can't map PCI mem\n"));
1739 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1740 ("mem mapped space: start: 0x%08lx len:%08lx "
1741 "flags:%08lx, after map:0x%08lx\n",
1742 pmem_start
, pmem_len
, pmem_flags
,
1743 rtlpriv
->io
.pci_mem_start
));
1745 /* Disable Clk Request */
1746 pci_write_config_byte(pdev
, 0x81, 0);
1748 pci_write_config_byte(pdev
, 0x44, 0);
1749 pci_write_config_byte(pdev
, 0x04, 0x06);
1750 pci_write_config_byte(pdev
, 0x04, 0x07);
1752 /* init cfg & intf_ops */
1753 rtlpriv
->rtlhal
.interface
= INTF_PCI
;
1754 rtlpriv
->cfg
= (struct rtl_hal_cfg
*)(id
->driver_data
);
1755 rtlpriv
->intf_ops
= &rtl_pci_ops
;
1758 _rtl_pci_find_adapter(pdev
, hw
);
1760 /* Init IO handler */
1761 _rtl_pci_io_handler_init(&pdev
->dev
, hw
);
1763 /*like read eeprom and so on */
1764 rtlpriv
->cfg
->ops
->read_eeprom_info(hw
);
1766 if (rtlpriv
->cfg
->ops
->init_sw_vars(hw
)) {
1767 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1768 ("Can't init_sw_vars.\n"));
1772 rtlpriv
->cfg
->ops
->init_sw_leds(hw
);
1775 rtl_pci_init_aspm(hw
);
1777 /* Init mac80211 sw */
1778 err
= rtl_init_core(hw
);
1780 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1781 ("Can't allocate sw for mac80211.\n"));
1786 err
= !rtl_pci_init(hw
, pdev
);
1788 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1789 ("Failed to init PCI.\n"));
1793 err
= ieee80211_register_hw(hw
);
1795 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1796 ("Can't register mac80211 hw.\n"));
1799 rtlpriv
->mac80211
.mac80211_registered
= 1;
1802 err
= sysfs_create_group(&pdev
->dev
.kobj
, &rtl_attribute_group
);
1804 RT_TRACE(rtlpriv
, COMP_ERR
, DBG_EMERG
,
1805 ("failed to create sysfs device attributes\n"));
1810 rtl_init_rfkill(hw
);
1812 rtlpci
= rtl_pcidev(pcipriv
);
1813 err
= request_irq(rtlpci
->pdev
->irq
, &_rtl_pci_interrupt
,
1814 IRQF_SHARED
, KBUILD_MODNAME
, hw
);
1816 RT_TRACE(rtlpriv
, COMP_INIT
, DBG_DMESG
,
1817 ("%s: failed to register IRQ handler\n",
1818 wiphy_name(hw
->wiphy
)));
1821 rtlpci
->irq_alloc
= 1;
1824 set_bit(RTL_STATUS_INTERFACE_START
, &rtlpriv
->status
);
1828 pci_set_drvdata(pdev
, NULL
);
1829 rtl_deinit_core(hw
);
1830 _rtl_pci_io_handler_release(hw
);
1831 ieee80211_free_hw(hw
);
1833 if (rtlpriv
->io
.pci_mem_start
!= 0)
1834 pci_iounmap(pdev
, (void *)rtlpriv
->io
.pci_mem_start
);
1837 pci_release_regions(pdev
);
1841 pci_disable_device(pdev
);
1846 EXPORT_SYMBOL(rtl_pci_probe
);
1848 void rtl_pci_disconnect(struct pci_dev
*pdev
)
1850 struct ieee80211_hw
*hw
= pci_get_drvdata(pdev
);
1851 struct rtl_pci_priv
*pcipriv
= rtl_pcipriv(hw
);
1852 struct rtl_priv
*rtlpriv
= rtl_priv(hw
);
1853 struct rtl_pci
*rtlpci
= rtl_pcidev(pcipriv
);
1854 struct rtl_mac
*rtlmac
= rtl_mac(rtlpriv
);
1856 clear_bit(RTL_STATUS_INTERFACE_START
, &rtlpriv
->status
);
1858 sysfs_remove_group(&pdev
->dev
.kobj
, &rtl_attribute_group
);
1860 /*ieee80211_unregister_hw will call ops_stop */
1861 if (rtlmac
->mac80211_registered
== 1) {
1862 ieee80211_unregister_hw(hw
);
1863 rtlmac
->mac80211_registered
= 0;
1865 rtl_deinit_deferred_work(hw
);
1866 rtlpriv
->intf_ops
->adapter_stop(hw
);
1870 rtl_deinit_rfkill(hw
);
1873 rtl_deinit_core(hw
);
1874 rtlpriv
->cfg
->ops
->deinit_sw_leds(hw
);
1875 _rtl_pci_io_handler_release(hw
);
1876 rtlpriv
->cfg
->ops
->deinit_sw_vars(hw
);
1878 if (rtlpci
->irq_alloc
) {
1879 free_irq(rtlpci
->pdev
->irq
, hw
);
1880 rtlpci
->irq_alloc
= 0;
1883 if (rtlpriv
->io
.pci_mem_start
!= 0) {
1884 pci_iounmap(pdev
, (void *)rtlpriv
->io
.pci_mem_start
);
1885 pci_release_regions(pdev
);
1888 pci_disable_device(pdev
);
1889 pci_set_drvdata(pdev
, NULL
);
1891 ieee80211_free_hw(hw
);
1893 EXPORT_SYMBOL(rtl_pci_disconnect
);
1895 /***************************************
1896 kernel pci power state define:
1897 PCI_D0 ((pci_power_t __force) 0)
1898 PCI_D1 ((pci_power_t __force) 1)
1899 PCI_D2 ((pci_power_t __force) 2)
1900 PCI_D3hot ((pci_power_t __force) 3)
1901 PCI_D3cold ((pci_power_t __force) 4)
1902 PCI_UNKNOWN ((pci_power_t __force) 5)
1904 This function is called when system
1905 goes into suspend state mac80211 will
1906 call rtl_mac_stop() from the mac80211
1907 suspend function first, So there is
1908 no need to call hw_disable here.
1909 ****************************************/
1910 int rtl_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1912 pci_save_state(pdev
);
1913 pci_disable_device(pdev
);
1914 pci_set_power_state(pdev
, PCI_D3hot
);
1918 EXPORT_SYMBOL(rtl_pci_suspend
);
1920 int rtl_pci_resume(struct pci_dev
*pdev
)
1924 pci_set_power_state(pdev
, PCI_D0
);
1925 ret
= pci_enable_device(pdev
);
1927 RT_ASSERT(false, ("ERR: <======\n"));
1931 pci_restore_state(pdev
);
1935 EXPORT_SYMBOL(rtl_pci_resume
);
1937 struct rtl_intf_ops rtl_pci_ops
= {
1938 .adapter_start
= rtl_pci_start
,
1939 .adapter_stop
= rtl_pci_stop
,
1940 .adapter_tx
= rtl_pci_tx
,
1941 .reset_trx_ring
= rtl_pci_reset_trx_ring
,
1943 .disable_aspm
= rtl_pci_disable_aspm
,
1944 .enable_aspm
= rtl_pci_enable_aspm
,