treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / wireless / realtek / rtw88 / mac.c
blobcadf0abbe16bb9b8932f58e9aa3e44002f518362
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
3 */
5 #include "main.h"
6 #include "mac.h"
7 #include "reg.h"
8 #include "fw.h"
9 #include "debug.h"
11 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
12 u8 primary_ch_idx)
14 u8 txsc40 = 0, txsc20 = 0;
15 u32 value32;
16 u8 value8;
18 txsc20 = primary_ch_idx;
19 if (bw == RTW_CHANNEL_WIDTH_80) {
20 if (txsc20 == 1 || txsc20 == 3)
21 txsc40 = 9;
22 else
23 txsc40 = 10;
25 rtw_write8(rtwdev, REG_DATA_SC,
26 BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
28 value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
29 value32 &= ~BIT_RFMOD;
30 switch (bw) {
31 case RTW_CHANNEL_WIDTH_80:
32 value32 |= BIT_RFMOD_80M;
33 break;
34 case RTW_CHANNEL_WIDTH_40:
35 value32 |= BIT_RFMOD_40M;
36 break;
37 case RTW_CHANNEL_WIDTH_20:
38 default:
39 break;
41 rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
43 value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
44 value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
45 rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
47 rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
48 rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
50 value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
51 value8 = value8 & ~BIT_CHECK_CCK_EN;
52 if (IS_CH_5G_BAND(channel))
53 value8 |= BIT_CHECK_CCK_EN;
54 rtw_write8(rtwdev, REG_CCK_CHECK, value8);
57 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
59 u32 value32;
60 u8 value8;
62 rtw_write8(rtwdev, REG_RSV_CTRL, 0);
64 switch (rtw_hci_type(rtwdev)) {
65 case RTW_HCI_TYPE_PCIE:
66 rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
67 break;
68 case RTW_HCI_TYPE_USB:
69 break;
70 default:
71 return -EINVAL;
74 /* config PIN Mux */
75 value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
76 value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
77 rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
79 value32 = rtw_read32(rtwdev, REG_LED_CFG);
80 value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
81 rtw_write32(rtwdev, REG_LED_CFG, value32);
83 value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
84 value32 |= BIT_WLRFE_4_5_EN;
85 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
87 /* disable BB/RF */
88 value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
89 value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
90 rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
92 value8 = rtw_read8(rtwdev, REG_RF_CTRL);
93 value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
94 rtw_write8(rtwdev, REG_RF_CTRL, value8);
96 value32 = rtw_read32(rtwdev, REG_WLRF1);
97 value32 &= ~BIT_WLRF1_BBRF_EN;
98 rtw_write32(rtwdev, REG_WLRF1, value32);
100 return 0;
103 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
104 struct rtw_pwr_seq_cmd *cmd)
106 u8 value;
107 u8 flag = 0;
108 u32 offset;
109 u32 cnt = RTW_PWR_POLLING_CNT;
111 if (cmd->base == RTW_PWR_ADDR_SDIO)
112 offset = cmd->offset | SDIO_LOCAL_OFFSET;
113 else
114 offset = cmd->offset;
116 do {
117 cnt--;
118 value = rtw_read8(rtwdev, offset);
119 value &= cmd->mask;
120 if (value == (cmd->value & cmd->mask))
121 return 0;
122 if (cnt == 0) {
123 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
124 flag == 0) {
125 value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
126 value |= BIT(3);
127 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
128 value &= ~BIT(3);
129 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
130 cnt = RTW_PWR_POLLING_CNT;
131 flag = 1;
132 } else {
133 return -EBUSY;
135 } else {
136 udelay(50);
138 } while (1);
141 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
142 u8 cut_mask, struct rtw_pwr_seq_cmd *cmd)
144 struct rtw_pwr_seq_cmd *cur_cmd;
145 u32 offset;
146 u8 value;
148 for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
149 if (!(cur_cmd->intf_mask & intf_mask) ||
150 !(cur_cmd->cut_mask & cut_mask))
151 continue;
153 switch (cur_cmd->cmd) {
154 case RTW_PWR_CMD_WRITE:
155 offset = cur_cmd->offset;
157 if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
158 offset |= SDIO_LOCAL_OFFSET;
160 value = rtw_read8(rtwdev, offset);
161 value &= ~cur_cmd->mask;
162 value |= (cur_cmd->value & cur_cmd->mask);
163 rtw_write8(rtwdev, offset, value);
164 break;
165 case RTW_PWR_CMD_POLLING:
166 if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
167 return -EBUSY;
168 break;
169 case RTW_PWR_CMD_DELAY:
170 if (cur_cmd->value == RTW_PWR_DELAY_US)
171 udelay(cur_cmd->offset);
172 else
173 mdelay(cur_cmd->offset);
174 break;
175 case RTW_PWR_CMD_READ:
176 break;
177 default:
178 return -EINVAL;
182 return 0;
185 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
186 struct rtw_pwr_seq_cmd **cmd_seq)
188 u8 cut_mask;
189 u8 intf_mask;
190 u8 cut;
191 u32 idx = 0;
192 struct rtw_pwr_seq_cmd *cmd;
193 int ret;
195 cut = rtwdev->hal.cut_version;
196 cut_mask = cut_version_to_mask(cut);
197 switch (rtw_hci_type(rtwdev)) {
198 case RTW_HCI_TYPE_PCIE:
199 intf_mask = BIT(2);
200 break;
201 case RTW_HCI_TYPE_USB:
202 intf_mask = BIT(1);
203 break;
204 default:
205 return -EINVAL;
208 do {
209 cmd = cmd_seq[idx];
210 if (!cmd)
211 break;
213 ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
214 if (ret)
215 return -EBUSY;
217 idx++;
218 } while (1);
220 return 0;
223 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
225 struct rtw_chip_info *chip = rtwdev->chip;
226 struct rtw_pwr_seq_cmd **pwr_seq;
227 u8 rpwm;
228 bool cur_pwr;
230 rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
232 /* Check FW still exist or not */
233 if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
234 rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
235 rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
238 if (rtw_read8(rtwdev, REG_CR) == 0xea)
239 cur_pwr = false;
240 else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
241 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
242 cur_pwr = false;
243 else
244 cur_pwr = true;
246 if (pwr_on && cur_pwr)
247 return -EALREADY;
249 pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
250 if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
251 return -EINVAL;
253 return 0;
256 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
258 u8 sys_func_en = rtwdev->chip->sys_func_en;
259 u8 value8;
260 u32 value, tmp;
262 value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
263 value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
264 rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
266 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
267 value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
268 rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
270 /* disable boot-from-flash for driver's DL FW */
271 tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
272 if (tmp & BIT_BOOT_FSPI_EN) {
273 rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
274 value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
275 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
278 return 0;
281 int rtw_mac_power_on(struct rtw_dev *rtwdev)
283 int ret = 0;
285 ret = rtw_mac_pre_system_cfg(rtwdev);
286 if (ret)
287 goto err;
289 ret = rtw_mac_power_switch(rtwdev, true);
290 if (ret == -EALREADY) {
291 rtw_mac_power_switch(rtwdev, false);
292 ret = rtw_mac_power_switch(rtwdev, true);
293 if (ret)
294 goto err;
295 } else if (ret) {
296 goto err;
299 ret = rtw_mac_init_system_cfg(rtwdev);
300 if (ret)
301 goto err;
303 return 0;
305 err:
306 rtw_err(rtwdev, "mac power on failed");
307 return ret;
310 void rtw_mac_power_off(struct rtw_dev *rtwdev)
312 rtw_mac_power_switch(rtwdev, false);
315 static bool check_firmware_size(const u8 *data, u32 size)
317 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
318 u32 dmem_size;
319 u32 imem_size;
320 u32 emem_size;
321 u32 real_size;
323 dmem_size = le32_to_cpu(fw_hdr->dmem_size);
324 imem_size = le32_to_cpu(fw_hdr->imem_size);
325 emem_size = (fw_hdr->mem_usage & BIT(4)) ?
326 le32_to_cpu(fw_hdr->emem_size) : 0;
328 dmem_size += FW_HDR_CHKSUM_SIZE;
329 imem_size += FW_HDR_CHKSUM_SIZE;
330 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
331 real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
332 if (real_size != size)
333 return false;
335 return true;
338 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
340 if (enable) {
341 /* cpu io interface enable */
342 rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
344 /* cpu enable */
345 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
346 } else {
347 /* cpu io interface disable */
348 rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
350 /* cpu disable */
351 rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
355 #define DLFW_RESTORE_REG_NUM 6
357 static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
358 struct rtw_backup_info *bckp)
360 u8 tmp;
361 u8 bckp_idx = 0;
363 /* set HIQ to hi priority */
364 bckp[bckp_idx].len = 1;
365 bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
366 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
367 bckp_idx++;
368 tmp = RTW_DMA_MAPPING_HIGH << 6;
369 rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
371 /* DLFW only use HIQ, map HIQ to hi priority */
372 bckp[bckp_idx].len = 1;
373 bckp[bckp_idx].reg = REG_CR;
374 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
375 bckp_idx++;
376 bckp[bckp_idx].len = 4;
377 bckp[bckp_idx].reg = REG_H2CQ_CSR;
378 bckp[bckp_idx].val = BIT_H2CQ_FULL;
379 bckp_idx++;
380 tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
381 rtw_write8(rtwdev, REG_CR, tmp);
382 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
384 /* Config hi priority queue and public priority queue page number */
385 bckp[bckp_idx].len = 2;
386 bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
387 bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
388 bckp_idx++;
389 bckp[bckp_idx].len = 4;
390 bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
391 bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
392 bckp_idx++;
393 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
394 rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
396 /* Disable beacon related functions */
397 tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
398 bckp[bckp_idx].len = 1;
399 bckp[bckp_idx].reg = REG_BCN_CTRL;
400 bckp[bckp_idx].val = tmp;
401 bckp_idx++;
402 tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
403 rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
405 WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
408 static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
410 rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
411 rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
412 rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
413 rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
416 static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
417 struct rtw_backup_info *bckp,
418 u8 bckp_num)
420 rtw_restore_reg(rtwdev, bckp, bckp_num);
423 #define TX_DESC_SIZE 48
425 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
426 const u8 *data, u32 size)
428 u8 *buf;
429 int ret;
431 buf = kmemdup(data, size, GFP_KERNEL);
432 if (!buf)
433 return -ENOMEM;
435 ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
436 kfree(buf);
437 return ret;
440 static int
441 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
443 int ret;
445 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
446 !((size + TX_DESC_SIZE) & (512 - 1)))
447 size += 1;
449 ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
450 if (ret)
451 rtw_err(rtwdev, "failed to download rsvd page\n");
453 return ret;
456 static int
457 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
459 rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
460 rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
461 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
463 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
464 return -EBUSY;
466 return 0;
469 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
470 u32 len, u8 first)
472 u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
474 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
475 return -EBUSY;
477 ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
478 if (!first)
479 ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
481 if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
482 return -EBUSY;
484 return 0;
487 static bool
488 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
490 u8 fw_ctrl;
492 fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
494 if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
495 if (addr < OCPBASE_DMEM_88XX) {
496 fw_ctrl |= BIT_IMEM_DW_OK;
497 fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
498 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
499 } else {
500 fw_ctrl |= BIT_DMEM_DW_OK;
501 fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
502 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
505 rtw_err(rtwdev, "invalid fw checksum\n");
507 return false;
510 if (addr < OCPBASE_DMEM_88XX) {
511 fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
512 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
513 } else {
514 fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
515 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
518 return true;
521 static int
522 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
523 u32 src, u32 dst, u32 size)
525 struct rtw_chip_info *chip = rtwdev->chip;
526 u32 desc_size = chip->tx_pkt_desc_sz;
527 u8 first_part;
528 u32 mem_offset;
529 u32 residue_size;
530 u32 pkt_size;
531 u32 max_size = 0x1000;
532 u32 val;
533 int ret;
535 mem_offset = 0;
536 first_part = 1;
537 residue_size = size;
539 val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
540 val |= BIT_DDMACH0_RESET_CHKSUM_STS;
541 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
543 while (residue_size) {
544 if (residue_size >= max_size)
545 pkt_size = max_size;
546 else
547 pkt_size = residue_size;
549 ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
550 data + mem_offset, pkt_size);
551 if (ret)
552 return ret;
554 ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
555 src + desc_size,
556 dst + mem_offset, pkt_size,
557 first_part);
558 if (ret)
559 return ret;
561 first_part = 0;
562 mem_offset += pkt_size;
563 residue_size -= pkt_size;
566 if (!check_fw_checksum(rtwdev, dst))
567 return -EINVAL;
569 return 0;
572 static int
573 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
575 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
576 const u8 *cur_fw;
577 u16 val;
578 u32 imem_size;
579 u32 dmem_size;
580 u32 emem_size;
581 u32 addr;
582 int ret;
584 dmem_size = le32_to_cpu(fw_hdr->dmem_size);
585 imem_size = le32_to_cpu(fw_hdr->imem_size);
586 emem_size = (fw_hdr->mem_usage & BIT(4)) ?
587 le32_to_cpu(fw_hdr->emem_size) : 0;
588 dmem_size += FW_HDR_CHKSUM_SIZE;
589 imem_size += FW_HDR_CHKSUM_SIZE;
590 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
592 val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
593 val |= BIT_MCUFWDL_EN;
594 rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
596 cur_fw = data + FW_HDR_SIZE;
597 addr = le32_to_cpu(fw_hdr->dmem_addr);
598 addr &= ~BIT(31);
599 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
600 if (ret)
601 return ret;
603 cur_fw = data + FW_HDR_SIZE + dmem_size;
604 addr = le32_to_cpu(fw_hdr->imem_addr);
605 addr &= ~BIT(31);
606 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
607 if (ret)
608 return ret;
610 if (emem_size) {
611 cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
612 addr = le32_to_cpu(fw_hdr->emem_addr);
613 addr &= ~BIT(31);
614 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
615 emem_size);
616 if (ret)
617 return ret;
620 return 0;
623 static int download_firmware_validate(struct rtw_dev *rtwdev)
625 u32 fw_key;
627 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
628 fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
629 if (fw_key == ILLEGAL_KEY_GROUP)
630 rtw_err(rtwdev, "invalid fw key\n");
631 return -EINVAL;
634 return 0;
637 static void download_firmware_end_flow(struct rtw_dev *rtwdev)
639 u16 fw_ctrl;
641 rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
643 /* Check IMEM & DMEM checksum is OK or not */
644 fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
645 if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
646 return;
648 fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
649 rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
652 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
654 struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
655 const u8 *data = fw->firmware->data;
656 u32 size = fw->firmware->size;
657 u32 ltecoex_bckp;
658 int ret;
660 if (!check_firmware_size(data, size))
661 return -EINVAL;
663 if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
664 return -EBUSY;
666 wlan_cpu_enable(rtwdev, false);
668 download_firmware_reg_backup(rtwdev, bckp);
669 download_firmware_reset_platform(rtwdev);
671 ret = start_download_firmware(rtwdev, data, size);
672 if (ret)
673 goto dlfw_fail;
675 download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
677 download_firmware_end_flow(rtwdev);
679 wlan_cpu_enable(rtwdev, true);
681 if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
682 return -EBUSY;
684 ret = download_firmware_validate(rtwdev);
685 if (ret)
686 goto dlfw_fail;
688 /* reset desc and index */
689 rtw_hci_setup(rtwdev);
691 rtwdev->h2c.last_box_num = 0;
692 rtwdev->h2c.seq = 0;
694 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
696 return 0;
698 dlfw_fail:
699 /* Disable FWDL_EN */
700 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
701 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
703 return ret;
706 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
708 struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
709 u32 prio_queues = 0;
711 if (queues & BIT(IEEE80211_AC_VO))
712 prio_queues |= BIT(rqpn->dma_map_vo);
713 if (queues & BIT(IEEE80211_AC_VI))
714 prio_queues |= BIT(rqpn->dma_map_vi);
715 if (queues & BIT(IEEE80211_AC_BE))
716 prio_queues |= BIT(rqpn->dma_map_be);
717 if (queues & BIT(IEEE80211_AC_BK))
718 prio_queues |= BIT(rqpn->dma_map_bk);
720 return prio_queues;
723 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
724 u32 prio_queue, bool drop)
726 u32 addr;
727 u16 avail_page, rsvd_page;
728 int i;
730 switch (prio_queue) {
731 case RTW_DMA_MAPPING_EXTRA:
732 addr = REG_FIFOPAGE_INFO_4;
733 break;
734 case RTW_DMA_MAPPING_LOW:
735 addr = REG_FIFOPAGE_INFO_2;
736 break;
737 case RTW_DMA_MAPPING_NORMAL:
738 addr = REG_FIFOPAGE_INFO_3;
739 break;
740 case RTW_DMA_MAPPING_HIGH:
741 addr = REG_FIFOPAGE_INFO_1;
742 break;
743 default:
744 return;
747 /* check if all of the reserved pages are available for 100 msecs */
748 for (i = 0; i < 5; i++) {
749 rsvd_page = rtw_read16(rtwdev, addr);
750 avail_page = rtw_read16(rtwdev, addr + 2);
751 if (rsvd_page == avail_page)
752 return;
754 msleep(20);
757 /* priority queue is still not empty, throw a warning,
759 * Note that if we want to flush the tx queue when having a lot of
760 * traffic (ex, 100Mbps up), some of the packets could be dropped.
761 * And it requires like ~2secs to flush the full priority queue.
763 if (!drop)
764 rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
767 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
768 u32 prio_queues, bool drop)
770 u32 q;
772 for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
773 if (prio_queues & BIT(q))
774 __rtw_mac_flush_prio_queue(rtwdev, q, drop);
777 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
779 u32 prio_queues = 0;
781 /* If all of the hardware queues are requested to flush,
782 * or the priority queues are not mapped yet,
783 * flush all of the priority queues
785 if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
786 prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
787 else
788 prio_queues = get_priority_queues(rtwdev, queues);
790 rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
793 static int txdma_queue_mapping(struct rtw_dev *rtwdev)
795 struct rtw_chip_info *chip = rtwdev->chip;
796 struct rtw_rqpn *rqpn = NULL;
797 u16 txdma_pq_map = 0;
799 switch (rtw_hci_type(rtwdev)) {
800 case RTW_HCI_TYPE_PCIE:
801 rqpn = &chip->rqpn_table[1];
802 break;
803 case RTW_HCI_TYPE_USB:
804 if (rtwdev->hci.bulkout_num == 2)
805 rqpn = &chip->rqpn_table[2];
806 else if (rtwdev->hci.bulkout_num == 3)
807 rqpn = &chip->rqpn_table[3];
808 else if (rtwdev->hci.bulkout_num == 4)
809 rqpn = &chip->rqpn_table[4];
810 else
811 return -EINVAL;
812 break;
813 default:
814 return -EINVAL;
817 rtwdev->fifo.rqpn = rqpn;
818 txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
819 txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
820 txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
821 txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
822 txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
823 txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
824 rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
826 rtw_write8(rtwdev, REG_CR, 0);
827 rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
828 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
830 return 0;
833 static int set_trx_fifo_info(struct rtw_dev *rtwdev)
835 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
836 struct rtw_chip_info *chip = rtwdev->chip;
837 u16 cur_pg_addr;
838 u8 csi_buf_pg_num = chip->csi_buf_pg_num;
840 /* config rsvd page num */
841 fifo->rsvd_drv_pg_num = 8;
842 fifo->txff_pg_num = chip->txff_size >> 7;
843 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
844 RSVD_PG_H2C_EXTRAINFO_NUM +
845 RSVD_PG_H2C_STATICINFO_NUM +
846 RSVD_PG_H2CQ_NUM +
847 RSVD_PG_CPU_INSTRUCTION_NUM +
848 RSVD_PG_FW_TXBUF_NUM +
849 csi_buf_pg_num;
851 if (fifo->rsvd_pg_num > fifo->txff_pg_num)
852 return -ENOMEM;
854 fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
855 fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
857 cur_pg_addr = fifo->txff_pg_num;
858 cur_pg_addr -= csi_buf_pg_num;
859 fifo->rsvd_csibuf_addr = cur_pg_addr;
860 cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
861 fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
862 cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
863 fifo->rsvd_cpu_instr_addr = cur_pg_addr;
864 cur_pg_addr -= RSVD_PG_H2CQ_NUM;
865 fifo->rsvd_h2cq_addr = cur_pg_addr;
866 cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
867 fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
868 cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
869 fifo->rsvd_h2c_info_addr = cur_pg_addr;
870 cur_pg_addr -= fifo->rsvd_drv_pg_num;
871 fifo->rsvd_drv_addr = cur_pg_addr;
873 if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
874 rtw_err(rtwdev, "wrong rsvd driver address\n");
875 return -EINVAL;
878 return 0;
881 static int priority_queue_cfg(struct rtw_dev *rtwdev)
883 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
884 struct rtw_chip_info *chip = rtwdev->chip;
885 struct rtw_page_table *pg_tbl = NULL;
886 u16 pubq_num;
887 int ret;
889 ret = set_trx_fifo_info(rtwdev);
890 if (ret)
891 return ret;
893 switch (rtw_hci_type(rtwdev)) {
894 case RTW_HCI_TYPE_PCIE:
895 pg_tbl = &chip->page_table[1];
896 break;
897 case RTW_HCI_TYPE_USB:
898 if (rtwdev->hci.bulkout_num == 2)
899 pg_tbl = &chip->page_table[2];
900 else if (rtwdev->hci.bulkout_num == 3)
901 pg_tbl = &chip->page_table[3];
902 else if (rtwdev->hci.bulkout_num == 4)
903 pg_tbl = &chip->page_table[4];
904 else
905 return -EINVAL;
906 break;
907 default:
908 return -EINVAL;
911 pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
912 pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
913 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
914 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
915 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
916 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
917 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
918 rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
920 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
921 rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
923 rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
924 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
925 rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
926 rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
927 rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
929 if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
930 return -EBUSY;
932 rtw_write8(rtwdev, REG_CR + 3, 0);
934 return 0;
937 static int init_h2c(struct rtw_dev *rtwdev)
939 struct rtw_fifo_conf *fifo = &rtwdev->fifo;
940 u8 value8;
941 u32 value32;
942 u32 h2cq_addr;
943 u32 h2cq_size;
944 u32 h2cq_free;
945 u32 wp, rp;
947 h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
948 h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
950 value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
951 value32 = (value32 & 0xFFFC0000) | h2cq_addr;
952 rtw_write32(rtwdev, REG_H2C_HEAD, value32);
954 value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
955 value32 = (value32 & 0xFFFC0000) | h2cq_addr;
956 rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
958 value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
959 value32 &= 0xFFFC0000;
960 value32 |= (h2cq_addr + h2cq_size);
961 rtw_write32(rtwdev, REG_H2C_TAIL, value32);
963 value8 = rtw_read8(rtwdev, REG_H2C_INFO);
964 value8 = (u8)((value8 & 0xFC) | 0x01);
965 rtw_write8(rtwdev, REG_H2C_INFO, value8);
967 value8 = rtw_read8(rtwdev, REG_H2C_INFO);
968 value8 = (u8)((value8 & 0xFB) | 0x04);
969 rtw_write8(rtwdev, REG_H2C_INFO, value8);
971 value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
972 value8 = (u8)((value8 & 0x7f) | 0x80);
973 rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
975 wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
976 rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
977 h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
979 if (h2cq_size != h2cq_free) {
980 rtw_err(rtwdev, "H2C queue mismatch\n");
981 return -EINVAL;
984 return 0;
987 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
989 int ret;
991 ret = txdma_queue_mapping(rtwdev);
992 if (ret)
993 return ret;
995 ret = priority_queue_cfg(rtwdev);
996 if (ret)
997 return ret;
999 ret = init_h2c(rtwdev);
1000 if (ret)
1001 return ret;
1003 return 0;
1006 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
1008 u8 value8;
1010 rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
1011 value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
1012 value8 &= 0xF0;
1013 /* For rxdesc len = 0 issue */
1014 value8 |= 0xF;
1015 rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
1016 rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
1017 rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
1019 return 0;
1022 int rtw_mac_init(struct rtw_dev *rtwdev)
1024 struct rtw_chip_info *chip = rtwdev->chip;
1025 int ret;
1027 ret = rtw_init_trx_cfg(rtwdev);
1028 if (ret)
1029 return ret;
1031 ret = chip->ops->mac_init(rtwdev);
1032 if (ret)
1033 return ret;
1035 ret = rtw_drv_info_cfg(rtwdev);
1036 if (ret)
1037 return ret;
1039 rtw_hci_interface_cfg(rtwdev);
1041 return 0;