1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
11 void rtw_set_channel_mac(struct rtw_dev
*rtwdev
, u8 channel
, u8 bw
,
14 u8 txsc40
= 0, txsc20
= 0;
18 txsc20
= primary_ch_idx
;
19 if (bw
== RTW_CHANNEL_WIDTH_80
) {
20 if (txsc20
== 1 || txsc20
== 3)
25 rtw_write8(rtwdev
, REG_DATA_SC
,
26 BIT_TXSC_20M(txsc20
) | BIT_TXSC_40M(txsc40
));
28 value32
= rtw_read32(rtwdev
, REG_WMAC_TRXPTCL_CTL
);
29 value32
&= ~BIT_RFMOD
;
31 case RTW_CHANNEL_WIDTH_80
:
32 value32
|= BIT_RFMOD_80M
;
34 case RTW_CHANNEL_WIDTH_40
:
35 value32
|= BIT_RFMOD_40M
;
37 case RTW_CHANNEL_WIDTH_20
:
41 rtw_write32(rtwdev
, REG_WMAC_TRXPTCL_CTL
, value32
);
43 value32
= rtw_read32(rtwdev
, REG_AFE_CTRL1
) & ~(BIT_MAC_CLK_SEL
);
44 value32
|= (MAC_CLK_HW_DEF_80M
<< BIT_SHIFT_MAC_CLK_SEL
);
45 rtw_write32(rtwdev
, REG_AFE_CTRL1
, value32
);
47 rtw_write8(rtwdev
, REG_USTIME_TSF
, MAC_CLK_SPEED
);
48 rtw_write8(rtwdev
, REG_USTIME_EDCA
, MAC_CLK_SPEED
);
50 value8
= rtw_read8(rtwdev
, REG_CCK_CHECK
);
51 value8
= value8
& ~BIT_CHECK_CCK_EN
;
52 if (IS_CH_5G_BAND(channel
))
53 value8
|= BIT_CHECK_CCK_EN
;
54 rtw_write8(rtwdev
, REG_CCK_CHECK
, value8
);
57 static int rtw_mac_pre_system_cfg(struct rtw_dev
*rtwdev
)
62 rtw_write8(rtwdev
, REG_RSV_CTRL
, 0);
64 switch (rtw_hci_type(rtwdev
)) {
65 case RTW_HCI_TYPE_PCIE
:
66 rtw_write32_set(rtwdev
, REG_HCI_OPT_CTRL
, BIT_BT_DIG_CLK_EN
);
68 case RTW_HCI_TYPE_USB
:
75 value32
= rtw_read32(rtwdev
, REG_PAD_CTRL1
);
76 value32
|= BIT_PAPE_WLBT_SEL
| BIT_LNAON_WLBT_SEL
;
77 rtw_write32(rtwdev
, REG_PAD_CTRL1
, value32
);
79 value32
= rtw_read32(rtwdev
, REG_LED_CFG
);
80 value32
&= ~(BIT_PAPE_SEL_EN
| BIT_LNAON_SEL_EN
);
81 rtw_write32(rtwdev
, REG_LED_CFG
, value32
);
83 value32
= rtw_read32(rtwdev
, REG_GPIO_MUXCFG
);
84 value32
|= BIT_WLRFE_4_5_EN
;
85 rtw_write32(rtwdev
, REG_GPIO_MUXCFG
, value32
);
88 value8
= rtw_read8(rtwdev
, REG_SYS_FUNC_EN
);
89 value8
&= ~(BIT_FEN_BB_RSTB
| BIT_FEN_BB_GLB_RST
);
90 rtw_write8(rtwdev
, REG_SYS_FUNC_EN
, value8
);
92 value8
= rtw_read8(rtwdev
, REG_RF_CTRL
);
93 value8
&= ~(BIT_RF_SDM_RSTB
| BIT_RF_RSTB
| BIT_RF_EN
);
94 rtw_write8(rtwdev
, REG_RF_CTRL
, value8
);
96 value32
= rtw_read32(rtwdev
, REG_WLRF1
);
97 value32
&= ~BIT_WLRF1_BBRF_EN
;
98 rtw_write32(rtwdev
, REG_WLRF1
, value32
);
103 static int rtw_pwr_cmd_polling(struct rtw_dev
*rtwdev
,
104 struct rtw_pwr_seq_cmd
*cmd
)
109 u32 cnt
= RTW_PWR_POLLING_CNT
;
111 if (cmd
->base
== RTW_PWR_ADDR_SDIO
)
112 offset
= cmd
->offset
| SDIO_LOCAL_OFFSET
;
114 offset
= cmd
->offset
;
118 value
= rtw_read8(rtwdev
, offset
);
120 if (value
== (cmd
->value
& cmd
->mask
))
123 if (rtw_hci_type(rtwdev
) == RTW_HCI_TYPE_PCIE
&&
125 value
= rtw_read8(rtwdev
, REG_SYS_PW_CTRL
);
127 rtw_write8(rtwdev
, REG_SYS_PW_CTRL
, value
);
129 rtw_write8(rtwdev
, REG_SYS_PW_CTRL
, value
);
130 cnt
= RTW_PWR_POLLING_CNT
;
141 static int rtw_sub_pwr_seq_parser(struct rtw_dev
*rtwdev
, u8 intf_mask
,
142 u8 cut_mask
, struct rtw_pwr_seq_cmd
*cmd
)
144 struct rtw_pwr_seq_cmd
*cur_cmd
;
148 for (cur_cmd
= cmd
; cur_cmd
->cmd
!= RTW_PWR_CMD_END
; cur_cmd
++) {
149 if (!(cur_cmd
->intf_mask
& intf_mask
) ||
150 !(cur_cmd
->cut_mask
& cut_mask
))
153 switch (cur_cmd
->cmd
) {
154 case RTW_PWR_CMD_WRITE
:
155 offset
= cur_cmd
->offset
;
157 if (cur_cmd
->base
== RTW_PWR_ADDR_SDIO
)
158 offset
|= SDIO_LOCAL_OFFSET
;
160 value
= rtw_read8(rtwdev
, offset
);
161 value
&= ~cur_cmd
->mask
;
162 value
|= (cur_cmd
->value
& cur_cmd
->mask
);
163 rtw_write8(rtwdev
, offset
, value
);
165 case RTW_PWR_CMD_POLLING
:
166 if (rtw_pwr_cmd_polling(rtwdev
, cur_cmd
))
169 case RTW_PWR_CMD_DELAY
:
170 if (cur_cmd
->value
== RTW_PWR_DELAY_US
)
171 udelay(cur_cmd
->offset
);
173 mdelay(cur_cmd
->offset
);
175 case RTW_PWR_CMD_READ
:
185 static int rtw_pwr_seq_parser(struct rtw_dev
*rtwdev
,
186 struct rtw_pwr_seq_cmd
**cmd_seq
)
192 struct rtw_pwr_seq_cmd
*cmd
;
195 cut
= rtwdev
->hal
.cut_version
;
196 cut_mask
= cut_version_to_mask(cut
);
197 switch (rtw_hci_type(rtwdev
)) {
198 case RTW_HCI_TYPE_PCIE
:
201 case RTW_HCI_TYPE_USB
:
213 ret
= rtw_sub_pwr_seq_parser(rtwdev
, intf_mask
, cut_mask
, cmd
);
223 static int rtw_mac_power_switch(struct rtw_dev
*rtwdev
, bool pwr_on
)
225 struct rtw_chip_info
*chip
= rtwdev
->chip
;
226 struct rtw_pwr_seq_cmd
**pwr_seq
;
230 rpwm
= rtw_read8(rtwdev
, rtwdev
->hci
.rpwm_addr
);
232 /* Check FW still exist or not */
233 if (rtw_read16(rtwdev
, REG_MCUFW_CTRL
) == 0xC078) {
234 rpwm
= (rpwm
^ BIT_RPWM_TOGGLE
) & BIT_RPWM_TOGGLE
;
235 rtw_write8(rtwdev
, rtwdev
->hci
.rpwm_addr
, rpwm
);
238 if (rtw_read8(rtwdev
, REG_CR
) == 0xea)
240 else if (rtw_hci_type(rtwdev
) == RTW_HCI_TYPE_USB
&&
241 (rtw_read8(rtwdev
, REG_SYS_STATUS1
+ 1) & BIT(0)))
246 if (pwr_on
&& cur_pwr
)
249 pwr_seq
= pwr_on
? chip
->pwr_on_seq
: chip
->pwr_off_seq
;
250 if (rtw_pwr_seq_parser(rtwdev
, pwr_seq
))
256 static int rtw_mac_init_system_cfg(struct rtw_dev
*rtwdev
)
258 u8 sys_func_en
= rtwdev
->chip
->sys_func_en
;
262 value
= rtw_read32(rtwdev
, REG_CPU_DMEM_CON
);
263 value
|= BIT_WL_PLATFORM_RST
| BIT_DDMA_EN
;
264 rtw_write32(rtwdev
, REG_CPU_DMEM_CON
, value
);
266 rtw_write8_set(rtwdev
, REG_SYS_FUNC_EN
+ 1, sys_func_en
);
267 value8
= (rtw_read8(rtwdev
, REG_CR_EXT
+ 3) & 0xF0) | 0x0C;
268 rtw_write8(rtwdev
, REG_CR_EXT
+ 3, value8
);
270 /* disable boot-from-flash for driver's DL FW */
271 tmp
= rtw_read32(rtwdev
, REG_MCUFW_CTRL
);
272 if (tmp
& BIT_BOOT_FSPI_EN
) {
273 rtw_write32(rtwdev
, REG_MCUFW_CTRL
, tmp
& (~BIT_BOOT_FSPI_EN
));
274 value
= rtw_read32(rtwdev
, REG_GPIO_MUXCFG
) & (~BIT_FSPI_EN
);
275 rtw_write32(rtwdev
, REG_GPIO_MUXCFG
, value
);
281 int rtw_mac_power_on(struct rtw_dev
*rtwdev
)
285 ret
= rtw_mac_pre_system_cfg(rtwdev
);
289 ret
= rtw_mac_power_switch(rtwdev
, true);
290 if (ret
== -EALREADY
) {
291 rtw_mac_power_switch(rtwdev
, false);
292 ret
= rtw_mac_power_switch(rtwdev
, true);
299 ret
= rtw_mac_init_system_cfg(rtwdev
);
306 rtw_err(rtwdev
, "mac power on failed");
310 void rtw_mac_power_off(struct rtw_dev
*rtwdev
)
312 rtw_mac_power_switch(rtwdev
, false);
315 static bool check_firmware_size(const u8
*data
, u32 size
)
317 const struct rtw_fw_hdr
*fw_hdr
= (const struct rtw_fw_hdr
*)data
;
323 dmem_size
= le32_to_cpu(fw_hdr
->dmem_size
);
324 imem_size
= le32_to_cpu(fw_hdr
->imem_size
);
325 emem_size
= (fw_hdr
->mem_usage
& BIT(4)) ?
326 le32_to_cpu(fw_hdr
->emem_size
) : 0;
328 dmem_size
+= FW_HDR_CHKSUM_SIZE
;
329 imem_size
+= FW_HDR_CHKSUM_SIZE
;
330 emem_size
+= emem_size
? FW_HDR_CHKSUM_SIZE
: 0;
331 real_size
= FW_HDR_SIZE
+ dmem_size
+ imem_size
+ emem_size
;
332 if (real_size
!= size
)
338 static void wlan_cpu_enable(struct rtw_dev
*rtwdev
, bool enable
)
341 /* cpu io interface enable */
342 rtw_write8_set(rtwdev
, REG_RSV_CTRL
+ 1, BIT_WLMCU_IOIF
);
345 rtw_write8_set(rtwdev
, REG_SYS_FUNC_EN
+ 1, BIT_FEN_CPUEN
);
347 /* cpu io interface disable */
348 rtw_write8_clr(rtwdev
, REG_SYS_FUNC_EN
+ 1, BIT_FEN_CPUEN
);
351 rtw_write8_clr(rtwdev
, REG_RSV_CTRL
+ 1, BIT_WLMCU_IOIF
);
355 #define DLFW_RESTORE_REG_NUM 6
357 static void download_firmware_reg_backup(struct rtw_dev
*rtwdev
,
358 struct rtw_backup_info
*bckp
)
363 /* set HIQ to hi priority */
364 bckp
[bckp_idx
].len
= 1;
365 bckp
[bckp_idx
].reg
= REG_TXDMA_PQ_MAP
+ 1;
366 bckp
[bckp_idx
].val
= rtw_read8(rtwdev
, REG_TXDMA_PQ_MAP
+ 1);
368 tmp
= RTW_DMA_MAPPING_HIGH
<< 6;
369 rtw_write8(rtwdev
, REG_TXDMA_PQ_MAP
+ 1, tmp
);
371 /* DLFW only use HIQ, map HIQ to hi priority */
372 bckp
[bckp_idx
].len
= 1;
373 bckp
[bckp_idx
].reg
= REG_CR
;
374 bckp
[bckp_idx
].val
= rtw_read8(rtwdev
, REG_CR
);
376 bckp
[bckp_idx
].len
= 4;
377 bckp
[bckp_idx
].reg
= REG_H2CQ_CSR
;
378 bckp
[bckp_idx
].val
= BIT_H2CQ_FULL
;
380 tmp
= BIT_HCI_TXDMA_EN
| BIT_TXDMA_EN
;
381 rtw_write8(rtwdev
, REG_CR
, tmp
);
382 rtw_write32(rtwdev
, REG_H2CQ_CSR
, BIT_H2CQ_FULL
);
384 /* Config hi priority queue and public priority queue page number */
385 bckp
[bckp_idx
].len
= 2;
386 bckp
[bckp_idx
].reg
= REG_FIFOPAGE_INFO_1
;
387 bckp
[bckp_idx
].val
= rtw_read16(rtwdev
, REG_FIFOPAGE_INFO_1
);
389 bckp
[bckp_idx
].len
= 4;
390 bckp
[bckp_idx
].reg
= REG_RQPN_CTRL_2
;
391 bckp
[bckp_idx
].val
= rtw_read32(rtwdev
, REG_RQPN_CTRL_2
) | BIT_LD_RQPN
;
393 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_1
, 0x200);
394 rtw_write32(rtwdev
, REG_RQPN_CTRL_2
, bckp
[bckp_idx
- 1].val
);
396 /* Disable beacon related functions */
397 tmp
= rtw_read8(rtwdev
, REG_BCN_CTRL
);
398 bckp
[bckp_idx
].len
= 1;
399 bckp
[bckp_idx
].reg
= REG_BCN_CTRL
;
400 bckp
[bckp_idx
].val
= tmp
;
402 tmp
= (u8
)((tmp
& (~BIT_EN_BCN_FUNCTION
)) | BIT_DIS_TSF_UDT
);
403 rtw_write8(rtwdev
, REG_BCN_CTRL
, tmp
);
405 WARN(bckp_idx
!= DLFW_RESTORE_REG_NUM
, "wrong backup number\n");
408 static void download_firmware_reset_platform(struct rtw_dev
*rtwdev
)
410 rtw_write8_clr(rtwdev
, REG_CPU_DMEM_CON
+ 2, BIT_WL_PLATFORM_RST
>> 16);
411 rtw_write8_clr(rtwdev
, REG_SYS_CLK_CTRL
+ 1, BIT_CPU_CLK_EN
>> 8);
412 rtw_write8_set(rtwdev
, REG_CPU_DMEM_CON
+ 2, BIT_WL_PLATFORM_RST
>> 16);
413 rtw_write8_set(rtwdev
, REG_SYS_CLK_CTRL
+ 1, BIT_CPU_CLK_EN
>> 8);
416 static void download_firmware_reg_restore(struct rtw_dev
*rtwdev
,
417 struct rtw_backup_info
*bckp
,
420 rtw_restore_reg(rtwdev
, bckp
, bckp_num
);
423 #define TX_DESC_SIZE 48
425 static int send_firmware_pkt_rsvd_page(struct rtw_dev
*rtwdev
, u16 pg_addr
,
426 const u8
*data
, u32 size
)
431 buf
= kmemdup(data
, size
, GFP_KERNEL
);
435 ret
= rtw_fw_write_data_rsvd_page(rtwdev
, pg_addr
, buf
, size
);
441 send_firmware_pkt(struct rtw_dev
*rtwdev
, u16 pg_addr
, const u8
*data
, u32 size
)
445 if (rtw_hci_type(rtwdev
) == RTW_HCI_TYPE_USB
&&
446 !((size
+ TX_DESC_SIZE
) & (512 - 1)))
449 ret
= send_firmware_pkt_rsvd_page(rtwdev
, pg_addr
, data
, size
);
451 rtw_err(rtwdev
, "failed to download rsvd page\n");
457 iddma_enable(struct rtw_dev
*rtwdev
, u32 src
, u32 dst
, u32 ctrl
)
459 rtw_write32(rtwdev
, REG_DDMA_CH0SA
, src
);
460 rtw_write32(rtwdev
, REG_DDMA_CH0DA
, dst
);
461 rtw_write32(rtwdev
, REG_DDMA_CH0CTRL
, ctrl
);
463 if (!check_hw_ready(rtwdev
, REG_DDMA_CH0CTRL
, BIT_DDMACH0_OWN
, 0))
469 static int iddma_download_firmware(struct rtw_dev
*rtwdev
, u32 src
, u32 dst
,
472 u32 ch0_ctrl
= BIT_DDMACH0_CHKSUM_EN
| BIT_DDMACH0_OWN
;
474 if (!check_hw_ready(rtwdev
, REG_DDMA_CH0CTRL
, BIT_DDMACH0_OWN
, 0))
477 ch0_ctrl
|= len
& BIT_MASK_DDMACH0_DLEN
;
479 ch0_ctrl
|= BIT_DDMACH0_CHKSUM_CONT
;
481 if (iddma_enable(rtwdev
, src
, dst
, ch0_ctrl
))
488 check_fw_checksum(struct rtw_dev
*rtwdev
, u32 addr
)
492 fw_ctrl
= rtw_read8(rtwdev
, REG_MCUFW_CTRL
);
494 if (rtw_read32(rtwdev
, REG_DDMA_CH0CTRL
) & BIT_DDMACH0_CHKSUM_STS
) {
495 if (addr
< OCPBASE_DMEM_88XX
) {
496 fw_ctrl
|= BIT_IMEM_DW_OK
;
497 fw_ctrl
&= ~BIT_IMEM_CHKSUM_OK
;
498 rtw_write8(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
500 fw_ctrl
|= BIT_DMEM_DW_OK
;
501 fw_ctrl
&= ~BIT_DMEM_CHKSUM_OK
;
502 rtw_write8(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
505 rtw_err(rtwdev
, "invalid fw checksum\n");
510 if (addr
< OCPBASE_DMEM_88XX
) {
511 fw_ctrl
|= (BIT_IMEM_DW_OK
| BIT_IMEM_CHKSUM_OK
);
512 rtw_write8(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
514 fw_ctrl
|= (BIT_DMEM_DW_OK
| BIT_DMEM_CHKSUM_OK
);
515 rtw_write8(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
522 download_firmware_to_mem(struct rtw_dev
*rtwdev
, const u8
*data
,
523 u32 src
, u32 dst
, u32 size
)
525 struct rtw_chip_info
*chip
= rtwdev
->chip
;
526 u32 desc_size
= chip
->tx_pkt_desc_sz
;
531 u32 max_size
= 0x1000;
539 val
= rtw_read32(rtwdev
, REG_DDMA_CH0CTRL
);
540 val
|= BIT_DDMACH0_RESET_CHKSUM_STS
;
541 rtw_write32(rtwdev
, REG_DDMA_CH0CTRL
, val
);
543 while (residue_size
) {
544 if (residue_size
>= max_size
)
547 pkt_size
= residue_size
;
549 ret
= send_firmware_pkt(rtwdev
, (u16
)(src
>> 7),
550 data
+ mem_offset
, pkt_size
);
554 ret
= iddma_download_firmware(rtwdev
, OCPBASE_TXBUF_88XX
+
556 dst
+ mem_offset
, pkt_size
,
562 mem_offset
+= pkt_size
;
563 residue_size
-= pkt_size
;
566 if (!check_fw_checksum(rtwdev
, dst
))
573 start_download_firmware(struct rtw_dev
*rtwdev
, const u8
*data
, u32 size
)
575 const struct rtw_fw_hdr
*fw_hdr
= (const struct rtw_fw_hdr
*)data
;
584 dmem_size
= le32_to_cpu(fw_hdr
->dmem_size
);
585 imem_size
= le32_to_cpu(fw_hdr
->imem_size
);
586 emem_size
= (fw_hdr
->mem_usage
& BIT(4)) ?
587 le32_to_cpu(fw_hdr
->emem_size
) : 0;
588 dmem_size
+= FW_HDR_CHKSUM_SIZE
;
589 imem_size
+= FW_HDR_CHKSUM_SIZE
;
590 emem_size
+= emem_size
? FW_HDR_CHKSUM_SIZE
: 0;
592 val
= (u16
)(rtw_read16(rtwdev
, REG_MCUFW_CTRL
) & 0x3800);
593 val
|= BIT_MCUFWDL_EN
;
594 rtw_write16(rtwdev
, REG_MCUFW_CTRL
, val
);
596 cur_fw
= data
+ FW_HDR_SIZE
;
597 addr
= le32_to_cpu(fw_hdr
->dmem_addr
);
599 ret
= download_firmware_to_mem(rtwdev
, cur_fw
, 0, addr
, dmem_size
);
603 cur_fw
= data
+ FW_HDR_SIZE
+ dmem_size
;
604 addr
= le32_to_cpu(fw_hdr
->imem_addr
);
606 ret
= download_firmware_to_mem(rtwdev
, cur_fw
, 0, addr
, imem_size
);
611 cur_fw
= data
+ FW_HDR_SIZE
+ dmem_size
+ imem_size
;
612 addr
= le32_to_cpu(fw_hdr
->emem_addr
);
614 ret
= download_firmware_to_mem(rtwdev
, cur_fw
, 0, addr
,
623 static int download_firmware_validate(struct rtw_dev
*rtwdev
)
627 if (!check_hw_ready(rtwdev
, REG_MCUFW_CTRL
, FW_READY_MASK
, FW_READY
)) {
628 fw_key
= rtw_read32(rtwdev
, REG_FW_DBG7
) & FW_KEY_MASK
;
629 if (fw_key
== ILLEGAL_KEY_GROUP
)
630 rtw_err(rtwdev
, "invalid fw key\n");
637 static void download_firmware_end_flow(struct rtw_dev
*rtwdev
)
641 rtw_write32(rtwdev
, REG_TXDMA_STATUS
, BTI_PAGE_OVF
);
643 /* Check IMEM & DMEM checksum is OK or not */
644 fw_ctrl
= rtw_read16(rtwdev
, REG_MCUFW_CTRL
);
645 if ((fw_ctrl
& BIT_CHECK_SUM_OK
) != BIT_CHECK_SUM_OK
)
648 fw_ctrl
= (fw_ctrl
| BIT_FW_DW_RDY
) & ~BIT_MCUFWDL_EN
;
649 rtw_write16(rtwdev
, REG_MCUFW_CTRL
, fw_ctrl
);
652 int rtw_download_firmware(struct rtw_dev
*rtwdev
, struct rtw_fw_state
*fw
)
654 struct rtw_backup_info bckp
[DLFW_RESTORE_REG_NUM
];
655 const u8
*data
= fw
->firmware
->data
;
656 u32 size
= fw
->firmware
->size
;
660 if (!check_firmware_size(data
, size
))
663 if (!ltecoex_read_reg(rtwdev
, 0x38, <ecoex_bckp
))
666 wlan_cpu_enable(rtwdev
, false);
668 download_firmware_reg_backup(rtwdev
, bckp
);
669 download_firmware_reset_platform(rtwdev
);
671 ret
= start_download_firmware(rtwdev
, data
, size
);
675 download_firmware_reg_restore(rtwdev
, bckp
, DLFW_RESTORE_REG_NUM
);
677 download_firmware_end_flow(rtwdev
);
679 wlan_cpu_enable(rtwdev
, true);
681 if (!ltecoex_reg_write(rtwdev
, 0x38, ltecoex_bckp
))
684 ret
= download_firmware_validate(rtwdev
);
688 /* reset desc and index */
689 rtw_hci_setup(rtwdev
);
691 rtwdev
->h2c
.last_box_num
= 0;
694 set_bit(RTW_FLAG_FW_RUNNING
, rtwdev
->flags
);
699 /* Disable FWDL_EN */
700 rtw_write8_clr(rtwdev
, REG_MCUFW_CTRL
, BIT_MCUFWDL_EN
);
701 rtw_write8_set(rtwdev
, REG_SYS_FUNC_EN
+ 1, BIT_FEN_CPUEN
);
706 static u32
get_priority_queues(struct rtw_dev
*rtwdev
, u32 queues
)
708 struct rtw_rqpn
*rqpn
= rtwdev
->fifo
.rqpn
;
711 if (queues
& BIT(IEEE80211_AC_VO
))
712 prio_queues
|= BIT(rqpn
->dma_map_vo
);
713 if (queues
& BIT(IEEE80211_AC_VI
))
714 prio_queues
|= BIT(rqpn
->dma_map_vi
);
715 if (queues
& BIT(IEEE80211_AC_BE
))
716 prio_queues
|= BIT(rqpn
->dma_map_be
);
717 if (queues
& BIT(IEEE80211_AC_BK
))
718 prio_queues
|= BIT(rqpn
->dma_map_bk
);
723 static void __rtw_mac_flush_prio_queue(struct rtw_dev
*rtwdev
,
724 u32 prio_queue
, bool drop
)
727 u16 avail_page
, rsvd_page
;
730 switch (prio_queue
) {
731 case RTW_DMA_MAPPING_EXTRA
:
732 addr
= REG_FIFOPAGE_INFO_4
;
734 case RTW_DMA_MAPPING_LOW
:
735 addr
= REG_FIFOPAGE_INFO_2
;
737 case RTW_DMA_MAPPING_NORMAL
:
738 addr
= REG_FIFOPAGE_INFO_3
;
740 case RTW_DMA_MAPPING_HIGH
:
741 addr
= REG_FIFOPAGE_INFO_1
;
747 /* check if all of the reserved pages are available for 100 msecs */
748 for (i
= 0; i
< 5; i
++) {
749 rsvd_page
= rtw_read16(rtwdev
, addr
);
750 avail_page
= rtw_read16(rtwdev
, addr
+ 2);
751 if (rsvd_page
== avail_page
)
757 /* priority queue is still not empty, throw a warning,
759 * Note that if we want to flush the tx queue when having a lot of
760 * traffic (ex, 100Mbps up), some of the packets could be dropped.
761 * And it requires like ~2secs to flush the full priority queue.
764 rtw_warn(rtwdev
, "timed out to flush queue %d\n", prio_queue
);
767 static void rtw_mac_flush_prio_queues(struct rtw_dev
*rtwdev
,
768 u32 prio_queues
, bool drop
)
772 for (q
= 0; q
< RTW_DMA_MAPPING_MAX
; q
++)
773 if (prio_queues
& BIT(q
))
774 __rtw_mac_flush_prio_queue(rtwdev
, q
, drop
);
777 void rtw_mac_flush_queues(struct rtw_dev
*rtwdev
, u32 queues
, bool drop
)
781 /* If all of the hardware queues are requested to flush,
782 * or the priority queues are not mapped yet,
783 * flush all of the priority queues
785 if (queues
== BIT(rtwdev
->hw
->queues
) - 1 || !rtwdev
->fifo
.rqpn
)
786 prio_queues
= BIT(RTW_DMA_MAPPING_MAX
) - 1;
788 prio_queues
= get_priority_queues(rtwdev
, queues
);
790 rtw_mac_flush_prio_queues(rtwdev
, prio_queues
, drop
);
793 static int txdma_queue_mapping(struct rtw_dev
*rtwdev
)
795 struct rtw_chip_info
*chip
= rtwdev
->chip
;
796 struct rtw_rqpn
*rqpn
= NULL
;
797 u16 txdma_pq_map
= 0;
799 switch (rtw_hci_type(rtwdev
)) {
800 case RTW_HCI_TYPE_PCIE
:
801 rqpn
= &chip
->rqpn_table
[1];
803 case RTW_HCI_TYPE_USB
:
804 if (rtwdev
->hci
.bulkout_num
== 2)
805 rqpn
= &chip
->rqpn_table
[2];
806 else if (rtwdev
->hci
.bulkout_num
== 3)
807 rqpn
= &chip
->rqpn_table
[3];
808 else if (rtwdev
->hci
.bulkout_num
== 4)
809 rqpn
= &chip
->rqpn_table
[4];
817 rtwdev
->fifo
.rqpn
= rqpn
;
818 txdma_pq_map
|= BIT_TXDMA_HIQ_MAP(rqpn
->dma_map_hi
);
819 txdma_pq_map
|= BIT_TXDMA_MGQ_MAP(rqpn
->dma_map_mg
);
820 txdma_pq_map
|= BIT_TXDMA_BKQ_MAP(rqpn
->dma_map_bk
);
821 txdma_pq_map
|= BIT_TXDMA_BEQ_MAP(rqpn
->dma_map_be
);
822 txdma_pq_map
|= BIT_TXDMA_VIQ_MAP(rqpn
->dma_map_vi
);
823 txdma_pq_map
|= BIT_TXDMA_VOQ_MAP(rqpn
->dma_map_vo
);
824 rtw_write16(rtwdev
, REG_TXDMA_PQ_MAP
, txdma_pq_map
);
826 rtw_write8(rtwdev
, REG_CR
, 0);
827 rtw_write8(rtwdev
, REG_CR
, MAC_TRX_ENABLE
);
828 rtw_write32(rtwdev
, REG_H2CQ_CSR
, BIT_H2CQ_FULL
);
833 static int set_trx_fifo_info(struct rtw_dev
*rtwdev
)
835 struct rtw_fifo_conf
*fifo
= &rtwdev
->fifo
;
836 struct rtw_chip_info
*chip
= rtwdev
->chip
;
838 u8 csi_buf_pg_num
= chip
->csi_buf_pg_num
;
840 /* config rsvd page num */
841 fifo
->rsvd_drv_pg_num
= 8;
842 fifo
->txff_pg_num
= chip
->txff_size
>> 7;
843 fifo
->rsvd_pg_num
= fifo
->rsvd_drv_pg_num
+
844 RSVD_PG_H2C_EXTRAINFO_NUM
+
845 RSVD_PG_H2C_STATICINFO_NUM
+
847 RSVD_PG_CPU_INSTRUCTION_NUM
+
848 RSVD_PG_FW_TXBUF_NUM
+
851 if (fifo
->rsvd_pg_num
> fifo
->txff_pg_num
)
854 fifo
->acq_pg_num
= fifo
->txff_pg_num
- fifo
->rsvd_pg_num
;
855 fifo
->rsvd_boundary
= fifo
->txff_pg_num
- fifo
->rsvd_pg_num
;
857 cur_pg_addr
= fifo
->txff_pg_num
;
858 cur_pg_addr
-= csi_buf_pg_num
;
859 fifo
->rsvd_csibuf_addr
= cur_pg_addr
;
860 cur_pg_addr
-= RSVD_PG_FW_TXBUF_NUM
;
861 fifo
->rsvd_fw_txbuf_addr
= cur_pg_addr
;
862 cur_pg_addr
-= RSVD_PG_CPU_INSTRUCTION_NUM
;
863 fifo
->rsvd_cpu_instr_addr
= cur_pg_addr
;
864 cur_pg_addr
-= RSVD_PG_H2CQ_NUM
;
865 fifo
->rsvd_h2cq_addr
= cur_pg_addr
;
866 cur_pg_addr
-= RSVD_PG_H2C_STATICINFO_NUM
;
867 fifo
->rsvd_h2c_sta_info_addr
= cur_pg_addr
;
868 cur_pg_addr
-= RSVD_PG_H2C_EXTRAINFO_NUM
;
869 fifo
->rsvd_h2c_info_addr
= cur_pg_addr
;
870 cur_pg_addr
-= fifo
->rsvd_drv_pg_num
;
871 fifo
->rsvd_drv_addr
= cur_pg_addr
;
873 if (fifo
->rsvd_boundary
!= fifo
->rsvd_drv_addr
) {
874 rtw_err(rtwdev
, "wrong rsvd driver address\n");
881 static int priority_queue_cfg(struct rtw_dev
*rtwdev
)
883 struct rtw_fifo_conf
*fifo
= &rtwdev
->fifo
;
884 struct rtw_chip_info
*chip
= rtwdev
->chip
;
885 struct rtw_page_table
*pg_tbl
= NULL
;
889 ret
= set_trx_fifo_info(rtwdev
);
893 switch (rtw_hci_type(rtwdev
)) {
894 case RTW_HCI_TYPE_PCIE
:
895 pg_tbl
= &chip
->page_table
[1];
897 case RTW_HCI_TYPE_USB
:
898 if (rtwdev
->hci
.bulkout_num
== 2)
899 pg_tbl
= &chip
->page_table
[2];
900 else if (rtwdev
->hci
.bulkout_num
== 3)
901 pg_tbl
= &chip
->page_table
[3];
902 else if (rtwdev
->hci
.bulkout_num
== 4)
903 pg_tbl
= &chip
->page_table
[4];
911 pubq_num
= fifo
->acq_pg_num
- pg_tbl
->hq_num
- pg_tbl
->lq_num
-
912 pg_tbl
->nq_num
- pg_tbl
->exq_num
- pg_tbl
->gapq_num
;
913 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_1
, pg_tbl
->hq_num
);
914 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_2
, pg_tbl
->lq_num
);
915 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_3
, pg_tbl
->nq_num
);
916 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_4
, pg_tbl
->exq_num
);
917 rtw_write16(rtwdev
, REG_FIFOPAGE_INFO_5
, pubq_num
);
918 rtw_write32_set(rtwdev
, REG_RQPN_CTRL_2
, BIT_LD_RQPN
);
920 rtw_write16(rtwdev
, REG_FIFOPAGE_CTRL_2
, fifo
->rsvd_boundary
);
921 rtw_write8_set(rtwdev
, REG_FWHW_TXQ_CTRL
+ 2, BIT_EN_WR_FREE_TAIL
>> 16);
923 rtw_write16(rtwdev
, REG_BCNQ_BDNY_V1
, fifo
->rsvd_boundary
);
924 rtw_write16(rtwdev
, REG_FIFOPAGE_CTRL_2
+ 2, fifo
->rsvd_boundary
);
925 rtw_write16(rtwdev
, REG_BCNQ1_BDNY_V1
, fifo
->rsvd_boundary
);
926 rtw_write32(rtwdev
, REG_RXFF_BNDY
, chip
->rxff_size
- C2H_PKT_BUF
- 1);
927 rtw_write8_set(rtwdev
, REG_AUTO_LLT_V1
, BIT_AUTO_INIT_LLT_V1
);
929 if (!check_hw_ready(rtwdev
, REG_AUTO_LLT_V1
, BIT_AUTO_INIT_LLT_V1
, 0))
932 rtw_write8(rtwdev
, REG_CR
+ 3, 0);
937 static int init_h2c(struct rtw_dev
*rtwdev
)
939 struct rtw_fifo_conf
*fifo
= &rtwdev
->fifo
;
947 h2cq_addr
= fifo
->rsvd_h2cq_addr
<< TX_PAGE_SIZE_SHIFT
;
948 h2cq_size
= RSVD_PG_H2CQ_NUM
<< TX_PAGE_SIZE_SHIFT
;
950 value32
= rtw_read32(rtwdev
, REG_H2C_HEAD
);
951 value32
= (value32
& 0xFFFC0000) | h2cq_addr
;
952 rtw_write32(rtwdev
, REG_H2C_HEAD
, value32
);
954 value32
= rtw_read32(rtwdev
, REG_H2C_READ_ADDR
);
955 value32
= (value32
& 0xFFFC0000) | h2cq_addr
;
956 rtw_write32(rtwdev
, REG_H2C_READ_ADDR
, value32
);
958 value32
= rtw_read32(rtwdev
, REG_H2C_TAIL
);
959 value32
&= 0xFFFC0000;
960 value32
|= (h2cq_addr
+ h2cq_size
);
961 rtw_write32(rtwdev
, REG_H2C_TAIL
, value32
);
963 value8
= rtw_read8(rtwdev
, REG_H2C_INFO
);
964 value8
= (u8
)((value8
& 0xFC) | 0x01);
965 rtw_write8(rtwdev
, REG_H2C_INFO
, value8
);
967 value8
= rtw_read8(rtwdev
, REG_H2C_INFO
);
968 value8
= (u8
)((value8
& 0xFB) | 0x04);
969 rtw_write8(rtwdev
, REG_H2C_INFO
, value8
);
971 value8
= rtw_read8(rtwdev
, REG_TXDMA_OFFSET_CHK
+ 1);
972 value8
= (u8
)((value8
& 0x7f) | 0x80);
973 rtw_write8(rtwdev
, REG_TXDMA_OFFSET_CHK
+ 1, value8
);
975 wp
= rtw_read32(rtwdev
, REG_H2C_PKT_WRITEADDR
) & 0x3FFFF;
976 rp
= rtw_read32(rtwdev
, REG_H2C_PKT_READADDR
) & 0x3FFFF;
977 h2cq_free
= wp
>= rp
? h2cq_size
- (wp
- rp
) : rp
- wp
;
979 if (h2cq_size
!= h2cq_free
) {
980 rtw_err(rtwdev
, "H2C queue mismatch\n");
987 static int rtw_init_trx_cfg(struct rtw_dev
*rtwdev
)
991 ret
= txdma_queue_mapping(rtwdev
);
995 ret
= priority_queue_cfg(rtwdev
);
999 ret
= init_h2c(rtwdev
);
1006 static int rtw_drv_info_cfg(struct rtw_dev
*rtwdev
)
1010 rtw_write8(rtwdev
, REG_RX_DRVINFO_SZ
, PHY_STATUS_SIZE
);
1011 value8
= rtw_read8(rtwdev
, REG_TRXFF_BNDY
+ 1);
1013 /* For rxdesc len = 0 issue */
1015 rtw_write8(rtwdev
, REG_TRXFF_BNDY
+ 1, value8
);
1016 rtw_write32_set(rtwdev
, REG_RCR
, BIT_APP_PHYSTS
);
1017 rtw_write32_clr(rtwdev
, REG_WMAC_OPTION_FUNCTION
+ 4, BIT(8) | BIT(9));
1022 int rtw_mac_init(struct rtw_dev
*rtwdev
)
1024 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1027 ret
= rtw_init_trx_cfg(rtwdev
);
1031 ret
= chip
->ops
->mac_init(rtwdev
);
1035 ret
= rtw_drv_info_cfg(rtwdev
);
1039 rtw_hci_interface_cfg(rtwdev
);