1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
11 void rtw_set_channel_mac(struct rtw_dev
*rtwdev
, u8 channel
, u8 bw
,
14 u8 txsc40
= 0, txsc20
= 0;
18 txsc20
= primary_ch_idx
;
19 if (bw
== RTW_CHANNEL_WIDTH_80
) {
20 if (txsc20
== RTW_SC_20_UPPER
|| txsc20
== RTW_SC_20_UPMOST
)
21 txsc40
= RTW_SC_40_UPPER
;
23 txsc40
= RTW_SC_40_LOWER
;
25 rtw_write8(rtwdev
, REG_DATA_SC
,
26 BIT_TXSC_20M(txsc20
) | BIT_TXSC_40M(txsc40
));
28 value32
= rtw_read32(rtwdev
, REG_WMAC_TRXPTCL_CTL
);
29 value32
&= ~BIT_RFMOD
;
31 case RTW_CHANNEL_WIDTH_80
:
32 value32
|= BIT_RFMOD_80M
;
34 case RTW_CHANNEL_WIDTH_40
:
35 value32
|= BIT_RFMOD_40M
;
37 case RTW_CHANNEL_WIDTH_20
:
41 rtw_write32(rtwdev
, REG_WMAC_TRXPTCL_CTL
, value32
);
43 value32
= rtw_read32(rtwdev
, REG_AFE_CTRL1
) & ~(BIT_MAC_CLK_SEL
);
44 value32
|= (MAC_CLK_HW_DEF_80M
<< BIT_SHIFT_MAC_CLK_SEL
);
45 rtw_write32(rtwdev
, REG_AFE_CTRL1
, value32
);
47 rtw_write8(rtwdev
, REG_USTIME_TSF
, MAC_CLK_SPEED
);
48 rtw_write8(rtwdev
, REG_USTIME_EDCA
, MAC_CLK_SPEED
);
50 value8
= rtw_read8(rtwdev
, REG_CCK_CHECK
);
51 value8
= value8
& ~BIT_CHECK_CCK_EN
;
52 if (IS_CH_5G_BAND(channel
))
53 value8
|= BIT_CHECK_CCK_EN
;
54 rtw_write8(rtwdev
, REG_CCK_CHECK
, value8
);
57 static int rtw_mac_pre_system_cfg(struct rtw_dev
*rtwdev
)
62 rtw_write8(rtwdev
, REG_RSV_CTRL
, 0);
64 switch (rtw_hci_type(rtwdev
)) {
65 case RTW_HCI_TYPE_PCIE
:
66 rtw_write32_set(rtwdev
, REG_HCI_OPT_CTRL
, BIT_BT_DIG_CLK_EN
);
68 case RTW_HCI_TYPE_USB
:
75 value32
= rtw_read32(rtwdev
, REG_PAD_CTRL1
);
76 value32
|= BIT_PAPE_WLBT_SEL
| BIT_LNAON_WLBT_SEL
;
77 rtw_write32(rtwdev
, REG_PAD_CTRL1
, value32
);
79 value32
= rtw_read32(rtwdev
, REG_LED_CFG
);
80 value32
&= ~(BIT_PAPE_SEL_EN
| BIT_LNAON_SEL_EN
);
81 rtw_write32(rtwdev
, REG_LED_CFG
, value32
);
83 value32
= rtw_read32(rtwdev
, REG_GPIO_MUXCFG
);
84 value32
|= BIT_WLRFE_4_5_EN
;
85 rtw_write32(rtwdev
, REG_GPIO_MUXCFG
, value32
);
88 value8
= rtw_read8(rtwdev
, REG_SYS_FUNC_EN
);
89 value8
&= ~(BIT_FEN_BB_RSTB
| BIT_FEN_BB_GLB_RST
);
90 rtw_write8(rtwdev
, REG_SYS_FUNC_EN
, value8
);
92 value8
= rtw_read8(rtwdev
, REG_RF_CTRL
);
93 value8
&= ~(BIT_RF_SDM_RSTB
| BIT_RF_RSTB
| BIT_RF_EN
);
94 rtw_write8(rtwdev
, REG_RF_CTRL
, value8
);
96 value32
= rtw_read32(rtwdev
, REG_WLRF1
);
97 value32
&= ~BIT_WLRF1_BBRF_EN
;
98 rtw_write32(rtwdev
, REG_WLRF1
, value32
);
103 static int rtw_pwr_cmd_polling(struct rtw_dev
*rtwdev
,
104 const struct rtw_pwr_seq_cmd
*cmd
)
109 u32 cnt
= RTW_PWR_POLLING_CNT
;
111 if (cmd
->base
== RTW_PWR_ADDR_SDIO
)
112 offset
= cmd
->offset
| SDIO_LOCAL_OFFSET
;
114 offset
= cmd
->offset
;
118 value
= rtw_read8(rtwdev
, offset
);
120 if (value
== (cmd
->value
& cmd
->mask
))
123 if (rtw_hci_type(rtwdev
) == RTW_HCI_TYPE_PCIE
&&
125 value
= rtw_read8(rtwdev
, REG_SYS_PW_CTRL
);
127 rtw_write8(rtwdev
, REG_SYS_PW_CTRL
, value
);
129 rtw_write8(rtwdev
, REG_SYS_PW_CTRL
, value
);
130 cnt
= RTW_PWR_POLLING_CNT
;
141 static int rtw_sub_pwr_seq_parser(struct rtw_dev
*rtwdev
, u8 intf_mask
,
143 const struct rtw_pwr_seq_cmd
*cmd
)
145 const struct rtw_pwr_seq_cmd
*cur_cmd
;
149 for (cur_cmd
= cmd
; cur_cmd
->cmd
!= RTW_PWR_CMD_END
; cur_cmd
++) {
150 if (!(cur_cmd
->intf_mask
& intf_mask
) ||
151 !(cur_cmd
->cut_mask
& cut_mask
))
154 switch (cur_cmd
->cmd
) {
155 case RTW_PWR_CMD_WRITE
:
156 offset
= cur_cmd
->offset
;
158 if (cur_cmd
->base
== RTW_PWR_ADDR_SDIO
)
159 offset
|= SDIO_LOCAL_OFFSET
;
161 value
= rtw_read8(rtwdev
, offset
);
162 value
&= ~cur_cmd
->mask
;
163 value
|= (cur_cmd
->value
& cur_cmd
->mask
);
164 rtw_write8(rtwdev
, offset
, value
);
166 case RTW_PWR_CMD_POLLING
:
167 if (rtw_pwr_cmd_polling(rtwdev
, cur_cmd
))
170 case RTW_PWR_CMD_DELAY
:
171 if (cur_cmd
->value
== RTW_PWR_DELAY_US
)
172 udelay(cur_cmd
->offset
);
174 mdelay(cur_cmd
->offset
);
176 case RTW_PWR_CMD_READ
:
186 static int rtw_pwr_seq_parser(struct rtw_dev
*rtwdev
,
187 const struct rtw_pwr_seq_cmd
**cmd_seq
)
193 const struct rtw_pwr_seq_cmd
*cmd
;
196 cut
= rtwdev
->hal
.cut_version
;
197 cut_mask
= cut_version_to_mask(cut
);
198 switch (rtw_hci_type(rtwdev
)) {
199 case RTW_HCI_TYPE_PCIE
:
202 case RTW_HCI_TYPE_USB
:
214 ret
= rtw_sub_pwr_seq_parser(rtwdev
, intf_mask
, cut_mask
, cmd
);
224 static int rtw_mac_power_switch(struct rtw_dev
*rtwdev
, bool pwr_on
)
226 struct rtw_chip_info
*chip
= rtwdev
->chip
;
227 const struct rtw_pwr_seq_cmd
**pwr_seq
;
231 rpwm
= rtw_read8(rtwdev
, rtwdev
->hci
.rpwm_addr
);
233 /* Check FW still exist or not */
234 if (rtw_read16(rtwdev
, REG_MCUFW_CTRL
) == 0xC078) {
235 rpwm
= (rpwm
^ BIT_RPWM_TOGGLE
) & BIT_RPWM_TOGGLE
;
236 rtw_write8(rtwdev
, rtwdev
->hci
.rpwm_addr
, rpwm
);
239 if (rtw_read8(rtwdev
, REG_CR
) == 0xea)
241 else if (rtw_hci_type(rtwdev
) == RTW_HCI_TYPE_USB
&&
242 (rtw_read8(rtwdev
, REG_SYS_STATUS1
+ 1) & BIT(0)))
247 if (pwr_on
&& cur_pwr
)
250 pwr_seq
= pwr_on
? chip
->pwr_on_seq
: chip
->pwr_off_seq
;
251 if (rtw_pwr_seq_parser(rtwdev
, pwr_seq
))
257 static int rtw_mac_init_system_cfg(struct rtw_dev
*rtwdev
)
259 u8 sys_func_en
= rtwdev
->chip
->sys_func_en
;
263 value
= rtw_read32(rtwdev
, REG_CPU_DMEM_CON
);
264 value
|= BIT_WL_PLATFORM_RST
| BIT_DDMA_EN
;
265 rtw_write32(rtwdev
, REG_CPU_DMEM_CON
, value
);
267 rtw_write8_set(rtwdev
, REG_SYS_FUNC_EN
+ 1, sys_func_en
);
268 value8
= (rtw_read8(rtwdev
, REG_CR_EXT
+ 3) & 0xF0) | 0x0C;
269 rtw_write8(rtwdev
, REG_CR_EXT
+ 3, value8
);
271 /* disable boot-from-flash for driver's DL FW */
272 tmp
= rtw_read32(rtwdev
, REG_MCUFW_CTRL
);
273 if (tmp
& BIT_BOOT_FSPI_EN
) {
274 rtw_write32(rtwdev
, REG_MCUFW_CTRL
, tmp
& (~BIT_BOOT_FSPI_EN
));
275 value
= rtw_read32(rtwdev
, REG_GPIO_MUXCFG
) & (~BIT_FSPI_EN
);
276 rtw_write32(rtwdev
, REG_GPIO_MUXCFG
, value
);
282 int rtw_mac_power_on(struct rtw_dev
*rtwdev
)
286 ret
= rtw_mac_pre_system_cfg(rtwdev
);
290 ret
= rtw_mac_power_switch(rtwdev
, true);
291 if (ret
== -EALREADY
) {
292 rtw_mac_power_switch(rtwdev
, false);
293 ret
= rtw_mac_power_switch(rtwdev
, true);
300 ret
= rtw_mac_init_system_cfg(rtwdev
);
307 rtw_err(rtwdev
, "mac power on failed");
311 void rtw_mac_power_off(struct rtw_dev
*rtwdev
)
313 rtw_mac_power_switch(rtwdev
, false);
316 static bool check_firmware_size(const u8
*data
, u32 size
)
318 const struct rtw_fw_hdr
*fw_hdr
= (const struct rtw_fw_hdr
*)data
;
324 dmem_size
= le32_to_cpu(fw_hdr
->dmem_size
);
325 imem_size
= le32_to_cpu(fw_hdr
->imem_size
);
326 emem_size
= (fw_hdr
->mem_usage
& BIT(4)) ?
327 le32_to_cpu(fw_hdr
->emem_size
) : 0;
329 dmem_size
+= FW_HDR_CHKSUM_SIZE
;
330 imem_size
+= FW_HDR_CHKSUM_SIZE
;
331 emem_size
+= emem_size
? FW_HDR_CHKSUM_SIZE
: 0;
332 real_size
= FW_HDR_SIZE
+ dmem_size
+ imem_size
+ emem_size
;
333 if (real_size
!= size
)
339 static void wlan_cpu_enable(struct rtw_dev
*rtwdev
, bool enable
)
342 /* cpu io interface enable */
343 rtw_write8_set(rtwdev
, REG_RSV_CTRL
+ 1, BIT_WLMCU_IOIF
);
346 rtw_write8_set(rtwdev
, REG_SYS_FUNC_EN
+ 1, BIT_FEN_CPUEN
);
348 /* cpu io interface disable */
349 rtw_write8_clr(rtwdev
, REG_SYS_FUNC_EN
+ 1, BIT_FEN_CPUEN
);
352 rtw_write8_clr(rtwdev
, REG_RSV_CTRL
+ 1, BIT_WLMCU_IOIF
);
356 #define DLFW_RESTORE_REG_NUM 6
358 static void download_firmware_reg_backup(struct rtw_dev
*rtwdev
,
359 struct rtw_backup_info
*bckp
)
364 /* set HIQ to hi priority */
365 bckp
[bckp_idx
].len
= 1;
366 bckp
[bckp_idx
].reg
= REG_TXDMA_PQ_MAP
+ 1;
367 bckp
[bckp_idx
].val
= rtw_read8(rtwdev
, REG_TXDMA_PQ_MAP
+ 1);
369 tmp
= RTW_DMA_MAPPING_HIGH
<< 6;
370 rtw_write8(rtwdev
, REG_TXDMA_PQ_MAP
+ 1, tmp
);
372 /* DLFW only use HIQ, map HIQ to hi priority */
373 bckp
[bckp_idx
].len
= 1;
374 bckp
[bckp_idx
].reg
= REG_CR
;
375 bckp
[bckp_idx
].val
= rtw_read8(rtwdev
, REG_CR
);
377 bckp
[bckp_idx
].len
= 4;
378 bckp
[bckp_idx
].reg
= REG_H2CQ_CSR
;
379 bckp
[bckp_idx
].val
= BIT_H2CQ_FULL
;
381 tmp
= BIT_HCI_TXDMA_EN
| BIT_TXDMA_EN
;
382 rtw_write8(rtwdev
, REG_CR
, tmp
);
383 rtw_write32(rtwdev
, REG_H2CQ_CSR
, BIT_H2CQ_FULL
);
385 /* Config hi priority queue and public priority queue page number */
386 bckp
[bckp_idx
].len
= 2;
387 bckp
[bckp_idx
].reg
= REG_FIFOPAGE_INFO_1
;
388 bckp
[bckp_idx
].val
= rtw_read16(rtwdev
, REG_FIFOPAGE_INFO_1
);
390 bckp
[bckp_idx
].len
= 4;
391 bckp
[bckp_idx
].reg
= REG_RQPN_CTRL_2
;
392 bckp
[bckp_idx
].val
= rtw_read32(rtwdev
, REG_RQPN_CTRL_2
) | BIT_LD_RQPN
;
394 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_1
, 0x200);
395 rtw_write32(rtwdev
, REG_RQPN_CTRL_2
, bckp
[bckp_idx
- 1].val
);
397 /* Disable beacon related functions */
398 tmp
= rtw_read8(rtwdev
, REG_BCN_CTRL
);
399 bckp
[bckp_idx
].len
= 1;
400 bckp
[bckp_idx
].reg
= REG_BCN_CTRL
;
401 bckp
[bckp_idx
].val
= tmp
;
403 tmp
= (u8
)((tmp
& (~BIT_EN_BCN_FUNCTION
)) | BIT_DIS_TSF_UDT
);
404 rtw_write8(rtwdev
, REG_BCN_CTRL
, tmp
);
406 WARN(bckp_idx
!= DLFW_RESTORE_REG_NUM
, "wrong backup number\n");
409 static void download_firmware_reset_platform(struct rtw_dev
*rtwdev
)
411 rtw_write8_clr(rtwdev
, REG_CPU_DMEM_CON
+ 2, BIT_WL_PLATFORM_RST
>> 16);
412 rtw_write8_clr(rtwdev
, REG_SYS_CLK_CTRL
+ 1, BIT_CPU_CLK_EN
>> 8);
413 rtw_write8_set(rtwdev
, REG_CPU_DMEM_CON
+ 2, BIT_WL_PLATFORM_RST
>> 16);
414 rtw_write8_set(rtwdev
, REG_SYS_CLK_CTRL
+ 1, BIT_CPU_CLK_EN
>> 8);
417 static void download_firmware_reg_restore(struct rtw_dev
*rtwdev
,
418 struct rtw_backup_info
*bckp
,
421 rtw_restore_reg(rtwdev
, bckp
, bckp_num
);
424 #define TX_DESC_SIZE 48
426 static int send_firmware_pkt_rsvd_page(struct rtw_dev
*rtwdev
, u16 pg_addr
,
427 const u8
*data
, u32 size
)
432 buf
= kmemdup(data
, size
, GFP_KERNEL
);
436 ret
= rtw_fw_write_data_rsvd_page(rtwdev
, pg_addr
, buf
, size
);
442 send_firmware_pkt(struct rtw_dev
*rtwdev
, u16 pg_addr
, const u8
*data
, u32 size
)
446 if (rtw_hci_type(rtwdev
) == RTW_HCI_TYPE_USB
&&
447 !((size
+ TX_DESC_SIZE
) & (512 - 1)))
450 ret
= send_firmware_pkt_rsvd_page(rtwdev
, pg_addr
, data
, size
);
452 rtw_err(rtwdev
, "failed to download rsvd page\n");
458 iddma_enable(struct rtw_dev
*rtwdev
, u32 src
, u32 dst
, u32 ctrl
)
460 rtw_write32(rtwdev
, REG_DDMA_CH0SA
, src
);
461 rtw_write32(rtwdev
, REG_DDMA_CH0DA
, dst
);
462 rtw_write32(rtwdev
, REG_DDMA_CH0CTRL
, ctrl
);
464 if (!check_hw_ready(rtwdev
, REG_DDMA_CH0CTRL
, BIT_DDMACH0_OWN
, 0))
470 static int iddma_download_firmware(struct rtw_dev
*rtwdev
, u32 src
, u32 dst
,
473 u32 ch0_ctrl
= BIT_DDMACH0_CHKSUM_EN
| BIT_DDMACH0_OWN
;
475 if (!check_hw_ready(rtwdev
, REG_DDMA_CH0CTRL
, BIT_DDMACH0_OWN
, 0))
478 ch0_ctrl
|= len
& BIT_MASK_DDMACH0_DLEN
;
480 ch0_ctrl
|= BIT_DDMACH0_CHKSUM_CONT
;
482 if (iddma_enable(rtwdev
, src
, dst
, ch0_ctrl
))
489 check_fw_checksum(struct rtw_dev
*rtwdev
, u32 addr
)
493 fw_ctrl
= rtw_read8(rtwdev
, REG_MCUFW_CTRL
);
495 if (rtw_read32(rtwdev
, REG_DDMA_CH0CTRL
) & BIT_DDMACH0_CHKSUM_STS
) {
496 if (addr
< OCPBASE_DMEM_88XX
) {
497 fw_ctrl
|= BIT_IMEM_DW_OK
;
498 fw_ctrl
&= ~BIT_IMEM_CHKSUM_OK
;
499 rtw_write8(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
501 fw_ctrl
|= BIT_DMEM_DW_OK
;
502 fw_ctrl
&= ~BIT_DMEM_CHKSUM_OK
;
503 rtw_write8(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
506 rtw_err(rtwdev
, "invalid fw checksum\n");
511 if (addr
< OCPBASE_DMEM_88XX
) {
512 fw_ctrl
|= (BIT_IMEM_DW_OK
| BIT_IMEM_CHKSUM_OK
);
513 rtw_write8(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
515 fw_ctrl
|= (BIT_DMEM_DW_OK
| BIT_DMEM_CHKSUM_OK
);
516 rtw_write8(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
523 download_firmware_to_mem(struct rtw_dev
*rtwdev
, const u8
*data
,
524 u32 src
, u32 dst
, u32 size
)
526 struct rtw_chip_info
*chip
= rtwdev
->chip
;
527 u32 desc_size
= chip
->tx_pkt_desc_sz
;
532 u32 max_size
= 0x1000;
540 val
= rtw_read32(rtwdev
, REG_DDMA_CH0CTRL
);
541 val
|= BIT_DDMACH0_RESET_CHKSUM_STS
;
542 rtw_write32(rtwdev
, REG_DDMA_CH0CTRL
, val
);
544 while (residue_size
) {
545 if (residue_size
>= max_size
)
548 pkt_size
= residue_size
;
550 ret
= send_firmware_pkt(rtwdev
, (u16
)(src
>> 7),
551 data
+ mem_offset
, pkt_size
);
555 ret
= iddma_download_firmware(rtwdev
, OCPBASE_TXBUF_88XX
+
557 dst
+ mem_offset
, pkt_size
,
563 mem_offset
+= pkt_size
;
564 residue_size
-= pkt_size
;
567 if (!check_fw_checksum(rtwdev
, dst
))
574 start_download_firmware(struct rtw_dev
*rtwdev
, const u8
*data
, u32 size
)
576 const struct rtw_fw_hdr
*fw_hdr
= (const struct rtw_fw_hdr
*)data
;
585 dmem_size
= le32_to_cpu(fw_hdr
->dmem_size
);
586 imem_size
= le32_to_cpu(fw_hdr
->imem_size
);
587 emem_size
= (fw_hdr
->mem_usage
& BIT(4)) ?
588 le32_to_cpu(fw_hdr
->emem_size
) : 0;
589 dmem_size
+= FW_HDR_CHKSUM_SIZE
;
590 imem_size
+= FW_HDR_CHKSUM_SIZE
;
591 emem_size
+= emem_size
? FW_HDR_CHKSUM_SIZE
: 0;
593 val
= (u16
)(rtw_read16(rtwdev
, REG_MCUFW_CTRL
) & 0x3800);
594 val
|= BIT_MCUFWDL_EN
;
595 rtw_write16(rtwdev
, REG_MCUFW_CTRL
, val
);
597 cur_fw
= data
+ FW_HDR_SIZE
;
598 addr
= le32_to_cpu(fw_hdr
->dmem_addr
);
600 ret
= download_firmware_to_mem(rtwdev
, cur_fw
, 0, addr
, dmem_size
);
604 cur_fw
= data
+ FW_HDR_SIZE
+ dmem_size
;
605 addr
= le32_to_cpu(fw_hdr
->imem_addr
);
607 ret
= download_firmware_to_mem(rtwdev
, cur_fw
, 0, addr
, imem_size
);
612 cur_fw
= data
+ FW_HDR_SIZE
+ dmem_size
+ imem_size
;
613 addr
= le32_to_cpu(fw_hdr
->emem_addr
);
615 ret
= download_firmware_to_mem(rtwdev
, cur_fw
, 0, addr
,
624 static int download_firmware_validate(struct rtw_dev
*rtwdev
)
628 if (!check_hw_ready(rtwdev
, REG_MCUFW_CTRL
, FW_READY_MASK
, FW_READY
)) {
629 fw_key
= rtw_read32(rtwdev
, REG_FW_DBG7
) & FW_KEY_MASK
;
630 if (fw_key
== ILLEGAL_KEY_GROUP
)
631 rtw_err(rtwdev
, "invalid fw key\n");
638 static void download_firmware_end_flow(struct rtw_dev
*rtwdev
)
642 rtw_write32(rtwdev
, REG_TXDMA_STATUS
, BTI_PAGE_OVF
);
644 /* Check IMEM & DMEM checksum is OK or not */
645 fw_ctrl
= rtw_read16(rtwdev
, REG_MCUFW_CTRL
);
646 if ((fw_ctrl
& BIT_CHECK_SUM_OK
) != BIT_CHECK_SUM_OK
)
649 fw_ctrl
= (fw_ctrl
| BIT_FW_DW_RDY
) & ~BIT_MCUFWDL_EN
;
650 rtw_write16(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
653 int rtw_download_firmware(struct rtw_dev
*rtwdev
, struct rtw_fw_state
*fw
)
655 struct rtw_backup_info bckp
[DLFW_RESTORE_REG_NUM
];
656 const u8
*data
= fw
->firmware
->data
;
657 u32 size
= fw
->firmware
->size
;
661 if (!check_firmware_size(data
, size
))
664 if (!ltecoex_read_reg(rtwdev
, 0x38, <ecoex_bckp
))
667 wlan_cpu_enable(rtwdev
, false);
669 download_firmware_reg_backup(rtwdev
, bckp
);
670 download_firmware_reset_platform(rtwdev
);
672 ret
= start_download_firmware(rtwdev
, data
, size
);
676 download_firmware_reg_restore(rtwdev
, bckp
, DLFW_RESTORE_REG_NUM
);
678 download_firmware_end_flow(rtwdev
);
680 wlan_cpu_enable(rtwdev
, true);
682 if (!ltecoex_reg_write(rtwdev
, 0x38, ltecoex_bckp
))
685 ret
= download_firmware_validate(rtwdev
);
689 /* reset desc and index */
690 rtw_hci_setup(rtwdev
);
692 rtwdev
->h2c
.last_box_num
= 0;
695 set_bit(RTW_FLAG_FW_RUNNING
, rtwdev
->flags
);
700 /* Disable FWDL_EN */
701 rtw_write8_clr(rtwdev
, REG_MCUFW_CTRL
, BIT_MCUFWDL_EN
);
702 rtw_write8_set(rtwdev
, REG_SYS_FUNC_EN
+ 1, BIT_FEN_CPUEN
);
707 static u32
get_priority_queues(struct rtw_dev
*rtwdev
, u32 queues
)
709 const struct rtw_rqpn
*rqpn
= rtwdev
->fifo
.rqpn
;
712 if (queues
& BIT(IEEE80211_AC_VO
))
713 prio_queues
|= BIT(rqpn
->dma_map_vo
);
714 if (queues
& BIT(IEEE80211_AC_VI
))
715 prio_queues
|= BIT(rqpn
->dma_map_vi
);
716 if (queues
& BIT(IEEE80211_AC_BE
))
717 prio_queues
|= BIT(rqpn
->dma_map_be
);
718 if (queues
& BIT(IEEE80211_AC_BK
))
719 prio_queues
|= BIT(rqpn
->dma_map_bk
);
724 static void __rtw_mac_flush_prio_queue(struct rtw_dev
*rtwdev
,
725 u32 prio_queue
, bool drop
)
728 u16 avail_page
, rsvd_page
;
731 switch (prio_queue
) {
732 case RTW_DMA_MAPPING_EXTRA
:
733 addr
= REG_FIFOPAGE_INFO_4
;
735 case RTW_DMA_MAPPING_LOW
:
736 addr
= REG_FIFOPAGE_INFO_2
;
738 case RTW_DMA_MAPPING_NORMAL
:
739 addr
= REG_FIFOPAGE_INFO_3
;
741 case RTW_DMA_MAPPING_HIGH
:
742 addr
= REG_FIFOPAGE_INFO_1
;
748 /* check if all of the reserved pages are available for 100 msecs */
749 for (i
= 0; i
< 5; i
++) {
750 rsvd_page
= rtw_read16(rtwdev
, addr
);
751 avail_page
= rtw_read16(rtwdev
, addr
+ 2);
752 if (rsvd_page
== avail_page
)
758 /* priority queue is still not empty, throw a warning,
760 * Note that if we want to flush the tx queue when having a lot of
761 * traffic (ex, 100Mbps up), some of the packets could be dropped.
762 * And it requires like ~2secs to flush the full priority queue.
765 rtw_warn(rtwdev
, "timed out to flush queue %d\n", prio_queue
);
768 static void rtw_mac_flush_prio_queues(struct rtw_dev
*rtwdev
,
769 u32 prio_queues
, bool drop
)
773 for (q
= 0; q
< RTW_DMA_MAPPING_MAX
; q
++)
774 if (prio_queues
& BIT(q
))
775 __rtw_mac_flush_prio_queue(rtwdev
, q
, drop
);
778 void rtw_mac_flush_queues(struct rtw_dev
*rtwdev
, u32 queues
, bool drop
)
782 /* If all of the hardware queues are requested to flush,
783 * or the priority queues are not mapped yet,
784 * flush all of the priority queues
786 if (queues
== BIT(rtwdev
->hw
->queues
) - 1 || !rtwdev
->fifo
.rqpn
)
787 prio_queues
= BIT(RTW_DMA_MAPPING_MAX
) - 1;
789 prio_queues
= get_priority_queues(rtwdev
, queues
);
791 rtw_mac_flush_prio_queues(rtwdev
, prio_queues
, drop
);
794 static int txdma_queue_mapping(struct rtw_dev
*rtwdev
)
796 struct rtw_chip_info
*chip
= rtwdev
->chip
;
797 const struct rtw_rqpn
*rqpn
= NULL
;
798 u16 txdma_pq_map
= 0;
800 switch (rtw_hci_type(rtwdev
)) {
801 case RTW_HCI_TYPE_PCIE
:
802 rqpn
= &chip
->rqpn_table
[1];
804 case RTW_HCI_TYPE_USB
:
805 if (rtwdev
->hci
.bulkout_num
== 2)
806 rqpn
= &chip
->rqpn_table
[2];
807 else if (rtwdev
->hci
.bulkout_num
== 3)
808 rqpn
= &chip
->rqpn_table
[3];
809 else if (rtwdev
->hci
.bulkout_num
== 4)
810 rqpn
= &chip
->rqpn_table
[4];
818 rtwdev
->fifo
.rqpn
= rqpn
;
819 txdma_pq_map
|= BIT_TXDMA_HIQ_MAP(rqpn
->dma_map_hi
);
820 txdma_pq_map
|= BIT_TXDMA_MGQ_MAP(rqpn
->dma_map_mg
);
821 txdma_pq_map
|= BIT_TXDMA_BKQ_MAP(rqpn
->dma_map_bk
);
822 txdma_pq_map
|= BIT_TXDMA_BEQ_MAP(rqpn
->dma_map_be
);
823 txdma_pq_map
|= BIT_TXDMA_VIQ_MAP(rqpn
->dma_map_vi
);
824 txdma_pq_map
|= BIT_TXDMA_VOQ_MAP(rqpn
->dma_map_vo
);
825 rtw_write16(rtwdev
, REG_TXDMA_PQ_MAP
, txdma_pq_map
);
827 rtw_write8(rtwdev
, REG_CR
, 0);
828 rtw_write8(rtwdev
, REG_CR
, MAC_TRX_ENABLE
);
829 rtw_write32(rtwdev
, REG_H2CQ_CSR
, BIT_H2CQ_FULL
);
834 static int set_trx_fifo_info(struct rtw_dev
*rtwdev
)
836 struct rtw_fifo_conf
*fifo
= &rtwdev
->fifo
;
837 struct rtw_chip_info
*chip
= rtwdev
->chip
;
839 u8 csi_buf_pg_num
= chip
->csi_buf_pg_num
;
841 /* config rsvd page num */
842 fifo
->rsvd_drv_pg_num
= 8;
843 fifo
->txff_pg_num
= chip
->txff_size
>> 7;
844 fifo
->rsvd_pg_num
= fifo
->rsvd_drv_pg_num
+
845 RSVD_PG_H2C_EXTRAINFO_NUM
+
846 RSVD_PG_H2C_STATICINFO_NUM
+
848 RSVD_PG_CPU_INSTRUCTION_NUM
+
849 RSVD_PG_FW_TXBUF_NUM
+
852 if (fifo
->rsvd_pg_num
> fifo
->txff_pg_num
)
855 fifo
->acq_pg_num
= fifo
->txff_pg_num
- fifo
->rsvd_pg_num
;
856 fifo
->rsvd_boundary
= fifo
->txff_pg_num
- fifo
->rsvd_pg_num
;
858 cur_pg_addr
= fifo
->txff_pg_num
;
859 cur_pg_addr
-= csi_buf_pg_num
;
860 fifo
->rsvd_csibuf_addr
= cur_pg_addr
;
861 cur_pg_addr
-= RSVD_PG_FW_TXBUF_NUM
;
862 fifo
->rsvd_fw_txbuf_addr
= cur_pg_addr
;
863 cur_pg_addr
-= RSVD_PG_CPU_INSTRUCTION_NUM
;
864 fifo
->rsvd_cpu_instr_addr
= cur_pg_addr
;
865 cur_pg_addr
-= RSVD_PG_H2CQ_NUM
;
866 fifo
->rsvd_h2cq_addr
= cur_pg_addr
;
867 cur_pg_addr
-= RSVD_PG_H2C_STATICINFO_NUM
;
868 fifo
->rsvd_h2c_sta_info_addr
= cur_pg_addr
;
869 cur_pg_addr
-= RSVD_PG_H2C_EXTRAINFO_NUM
;
870 fifo
->rsvd_h2c_info_addr
= cur_pg_addr
;
871 cur_pg_addr
-= fifo
->rsvd_drv_pg_num
;
872 fifo
->rsvd_drv_addr
= cur_pg_addr
;
874 if (fifo
->rsvd_boundary
!= fifo
->rsvd_drv_addr
) {
875 rtw_err(rtwdev
, "wrong rsvd driver address\n");
882 static int priority_queue_cfg(struct rtw_dev
*rtwdev
)
884 struct rtw_fifo_conf
*fifo
= &rtwdev
->fifo
;
885 struct rtw_chip_info
*chip
= rtwdev
->chip
;
886 const struct rtw_page_table
*pg_tbl
= NULL
;
890 ret
= set_trx_fifo_info(rtwdev
);
894 switch (rtw_hci_type(rtwdev
)) {
895 case RTW_HCI_TYPE_PCIE
:
896 pg_tbl
= &chip
->page_table
[1];
898 case RTW_HCI_TYPE_USB
:
899 if (rtwdev
->hci
.bulkout_num
== 2)
900 pg_tbl
= &chip
->page_table
[2];
901 else if (rtwdev
->hci
.bulkout_num
== 3)
902 pg_tbl
= &chip
->page_table
[3];
903 else if (rtwdev
->hci
.bulkout_num
== 4)
904 pg_tbl
= &chip
->page_table
[4];
912 pubq_num
= fifo
->acq_pg_num
- pg_tbl
->hq_num
- pg_tbl
->lq_num
-
913 pg_tbl
->nq_num
- pg_tbl
->exq_num
- pg_tbl
->gapq_num
;
914 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_1
, pg_tbl
->hq_num
);
915 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_2
, pg_tbl
->lq_num
);
916 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_3
, pg_tbl
->nq_num
);
917 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_4
, pg_tbl
->exq_num
);
918 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_5
, pubq_num
);
919 rtw_write32_set(rtwdev
, REG_RQPN_CTRL_2
, BIT_LD_RQPN
);
921 rtw_write16(rtwdev
, REG_FIFOPAGE_CTRL_2
, fifo
->rsvd_boundary
);
922 rtw_write8_set(rtwdev
, REG_FWHW_TXQ_CTRL
+ 2, BIT_EN_WR_FREE_TAIL
>> 16);
924 rtw_write16(rtwdev
, REG_BCNQ_BDNY_V1
, fifo
->rsvd_boundary
);
925 rtw_write16(rtwdev
, REG_FIFOPAGE_CTRL_2
+ 2, fifo
->rsvd_boundary
);
926 rtw_write16(rtwdev
, REG_BCNQ1_BDNY_V1
, fifo
->rsvd_boundary
);
927 rtw_write32(rtwdev
, REG_RXFF_BNDY
, chip
->rxff_size
- C2H_PKT_BUF
- 1);
928 rtw_write8_set(rtwdev
, REG_AUTO_LLT_V1
, BIT_AUTO_INIT_LLT_V1
);
930 if (!check_hw_ready(rtwdev
, REG_AUTO_LLT_V1
, BIT_AUTO_INIT_LLT_V1
, 0))
933 rtw_write8(rtwdev
, REG_CR
+ 3, 0);
938 static int init_h2c(struct rtw_dev
*rtwdev
)
940 struct rtw_fifo_conf
*fifo
= &rtwdev
->fifo
;
948 h2cq_addr
= fifo
->rsvd_h2cq_addr
<< TX_PAGE_SIZE_SHIFT
;
949 h2cq_size
= RSVD_PG_H2CQ_NUM
<< TX_PAGE_SIZE_SHIFT
;
951 value32
= rtw_read32(rtwdev
, REG_H2C_HEAD
);
952 value32
= (value32
& 0xFFFC0000) | h2cq_addr
;
953 rtw_write32(rtwdev
, REG_H2C_HEAD
, value32
);
955 value32
= rtw_read32(rtwdev
, REG_H2C_READ_ADDR
);
956 value32
= (value32
& 0xFFFC0000) | h2cq_addr
;
957 rtw_write32(rtwdev
, REG_H2C_READ_ADDR
, value32
);
959 value32
= rtw_read32(rtwdev
, REG_H2C_TAIL
);
960 value32
&= 0xFFFC0000;
961 value32
|= (h2cq_addr
+ h2cq_size
);
962 rtw_write32(rtwdev
, REG_H2C_TAIL
, value32
);
964 value8
= rtw_read8(rtwdev
, REG_H2C_INFO
);
965 value8
= (u8
)((value8
& 0xFC) | 0x01);
966 rtw_write8(rtwdev
, REG_H2C_INFO
, value8
);
968 value8
= rtw_read8(rtwdev
, REG_H2C_INFO
);
969 value8
= (u8
)((value8
& 0xFB) | 0x04);
970 rtw_write8(rtwdev
, REG_H2C_INFO
, value8
);
972 value8
= rtw_read8(rtwdev
, REG_TXDMA_OFFSET_CHK
+ 1);
973 value8
= (u8
)((value8
& 0x7f) | 0x80);
974 rtw_write8(rtwdev
, REG_TXDMA_OFFSET_CHK
+ 1, value8
);
976 wp
= rtw_read32(rtwdev
, REG_H2C_PKT_WRITEADDR
) & 0x3FFFF;
977 rp
= rtw_read32(rtwdev
, REG_H2C_PKT_READADDR
) & 0x3FFFF;
978 h2cq_free
= wp
>= rp
? h2cq_size
- (wp
- rp
) : rp
- wp
;
980 if (h2cq_size
!= h2cq_free
) {
981 rtw_err(rtwdev
, "H2C queue mismatch\n");
988 static int rtw_init_trx_cfg(struct rtw_dev
*rtwdev
)
992 ret
= txdma_queue_mapping(rtwdev
);
996 ret
= priority_queue_cfg(rtwdev
);
1000 ret
= init_h2c(rtwdev
);
1007 static int rtw_drv_info_cfg(struct rtw_dev
*rtwdev
)
1011 rtw_write8(rtwdev
, REG_RX_DRVINFO_SZ
, PHY_STATUS_SIZE
);
1012 value8
= rtw_read8(rtwdev
, REG_TRXFF_BNDY
+ 1);
1014 /* For rxdesc len = 0 issue */
1016 rtw_write8(rtwdev
, REG_TRXFF_BNDY
+ 1, value8
);
1017 rtw_write32_set(rtwdev
, REG_RCR
, BIT_APP_PHYSTS
);
1018 rtw_write32_clr(rtwdev
, REG_WMAC_OPTION_FUNCTION
+ 4, BIT(8) | BIT(9));
1023 int rtw_mac_init(struct rtw_dev
*rtwdev
)
1025 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1028 ret
= rtw_init_trx_cfg(rtwdev
);
1032 ret
= chip
->ops
->mac_init(rtwdev
);
1036 ret
= rtw_drv_info_cfg(rtwdev
);
1040 rtw_hci_interface_cfg(rtwdev
);