1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <linux/completion.h>
8 #include <linux/device.h>
9 #include <linux/debugfs.h>
10 #include <linux/idr.h>
11 #include <linux/kernel.h>
13 #include <linux/of_address.h>
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/platform_device.h>
17 #include <linux/firmware/qcom/qcom_scm.h>
18 #include <linux/soc/qcom/smem.h>
19 #include <linux/string.h>
25 #define ATH10K_QMI_CLIENT_ID 0x4b4e454c
26 #define ATH10K_QMI_TIMEOUT 30
27 #define SMEM_IMAGE_VERSION_TABLE 469
28 #define SMEM_IMAGE_TABLE_CNSS_INDEX 13
29 #define SMEM_IMAGE_VERSION_ENTRY_SIZE 128
30 #define SMEM_IMAGE_VERSION_NAME_SIZE 75
32 static int ath10k_qmi_map_msa_permission(struct ath10k_qmi
*qmi
,
33 struct ath10k_msa_mem_info
*mem_info
)
35 struct qcom_scm_vmperm dst_perms
[3];
36 struct ath10k
*ar
= qmi
->ar
;
41 src_perms
= BIT(QCOM_SCM_VMID_HLOS
);
43 dst_perms
[0].vmid
= QCOM_SCM_VMID_MSS_MSA
;
44 dst_perms
[0].perm
= QCOM_SCM_PERM_RW
;
45 dst_perms
[1].vmid
= QCOM_SCM_VMID_WLAN
;
46 dst_perms
[1].perm
= QCOM_SCM_PERM_RW
;
48 if (mem_info
->secure
) {
51 dst_perms
[2].vmid
= QCOM_SCM_VMID_WLAN_CE
;
52 dst_perms
[2].perm
= QCOM_SCM_PERM_RW
;
56 ret
= qcom_scm_assign_mem(mem_info
->addr
, mem_info
->size
,
57 &src_perms
, dst_perms
, perm_count
);
59 ath10k_err(ar
, "failed to assign msa map permissions: %d\n", ret
);
64 static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi
*qmi
,
65 struct ath10k_msa_mem_info
*mem_info
)
67 struct qcom_scm_vmperm dst_perms
;
68 struct ath10k
*ar
= qmi
->ar
;
72 src_perms
= BIT(QCOM_SCM_VMID_MSS_MSA
) | BIT(QCOM_SCM_VMID_WLAN
);
74 if (!mem_info
->secure
)
75 src_perms
|= BIT(QCOM_SCM_VMID_WLAN_CE
);
77 dst_perms
.vmid
= QCOM_SCM_VMID_HLOS
;
78 dst_perms
.perm
= QCOM_SCM_PERM_RW
;
80 ret
= qcom_scm_assign_mem(mem_info
->addr
, mem_info
->size
,
81 &src_perms
, &dst_perms
, 1);
83 ath10k_err(ar
, "failed to unmap msa permissions: %d\n", ret
);
88 static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi
*qmi
)
93 if (qmi
->msa_fixed_perm
)
96 for (i
= 0; i
< qmi
->nr_mem_region
; i
++) {
97 ret
= ath10k_qmi_map_msa_permission(qmi
, &qmi
->mem_region
[i
]);
105 for (i
--; i
>= 0; i
--)
106 ath10k_qmi_unmap_msa_permission(qmi
, &qmi
->mem_region
[i
]);
110 static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi
*qmi
)
114 if (qmi
->msa_fixed_perm
)
117 for (i
= 0; i
< qmi
->nr_mem_region
; i
++)
118 ath10k_qmi_unmap_msa_permission(qmi
, &qmi
->mem_region
[i
]);
121 static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi
*qmi
)
123 struct wlfw_msa_info_resp_msg_v01 resp
= {};
124 struct wlfw_msa_info_req_msg_v01 req
= {};
125 struct ath10k
*ar
= qmi
->ar
;
126 phys_addr_t max_mapped_addr
;
131 req
.msa_addr
= ar
->msa
.paddr
;
132 req
.size
= ar
->msa
.mem_size
;
134 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
135 wlfw_msa_info_resp_msg_v01_ei
, &resp
);
139 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
140 QMI_WLFW_MSA_INFO_REQ_V01
,
141 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN
,
142 wlfw_msa_info_req_msg_v01_ei
, &req
);
144 qmi_txn_cancel(&txn
);
145 ath10k_err(ar
, "failed to send msa mem info req: %d\n", ret
);
149 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
153 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
154 ath10k_err(ar
, "msa info req rejected: %d\n", resp
.resp
.error
);
159 if (resp
.mem_region_info_len
> QMI_WLFW_MAX_MEM_REG_V01
) {
160 ath10k_err(ar
, "invalid memory region length received: %d\n",
161 resp
.mem_region_info_len
);
166 max_mapped_addr
= ar
->msa
.paddr
+ ar
->msa
.mem_size
;
167 qmi
->nr_mem_region
= resp
.mem_region_info_len
;
168 for (i
= 0; i
< resp
.mem_region_info_len
; i
++) {
169 if (resp
.mem_region_info
[i
].size
> ar
->msa
.mem_size
||
170 resp
.mem_region_info
[i
].region_addr
> max_mapped_addr
||
171 resp
.mem_region_info
[i
].region_addr
< ar
->msa
.paddr
||
172 resp
.mem_region_info
[i
].size
+
173 resp
.mem_region_info
[i
].region_addr
> max_mapped_addr
) {
174 ath10k_err(ar
, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
175 resp
.mem_region_info
[i
].region_addr
,
176 resp
.mem_region_info
[i
].size
);
180 qmi
->mem_region
[i
].addr
= resp
.mem_region_info
[i
].region_addr
;
181 qmi
->mem_region
[i
].size
= resp
.mem_region_info
[i
].size
;
182 qmi
->mem_region
[i
].secure
= resp
.mem_region_info
[i
].secure_flag
;
183 ath10k_dbg(ar
, ATH10K_DBG_QMI
,
184 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
185 i
, &qmi
->mem_region
[i
].addr
,
186 qmi
->mem_region
[i
].size
,
187 qmi
->mem_region
[i
].secure
);
190 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi msa mem info request completed\n");
194 memset(&qmi
->mem_region
[0], 0, sizeof(qmi
->mem_region
[0]) * i
);
199 static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi
*qmi
)
201 struct wlfw_msa_ready_resp_msg_v01 resp
= {};
202 struct wlfw_msa_ready_req_msg_v01 req
= {};
203 struct ath10k
*ar
= qmi
->ar
;
207 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
208 wlfw_msa_ready_resp_msg_v01_ei
, &resp
);
212 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
213 QMI_WLFW_MSA_READY_REQ_V01
,
214 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN
,
215 wlfw_msa_ready_req_msg_v01_ei
, &req
);
217 qmi_txn_cancel(&txn
);
218 ath10k_err(ar
, "failed to send msa mem ready request: %d\n", ret
);
222 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
226 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
227 ath10k_err(ar
, "msa ready request rejected: %d\n", resp
.resp
.error
);
231 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi msa mem ready request completed\n");
238 static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi
*qmi
)
240 struct wlfw_bdf_download_resp_msg_v01 resp
= {};
241 struct wlfw_bdf_download_req_msg_v01
*req
;
242 struct ath10k
*ar
= qmi
->ar
;
243 unsigned int remaining
;
248 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
252 temp
= ar
->normal_mode_fw
.board_data
;
253 remaining
= ar
->normal_mode_fw
.board_len
;
257 req
->file_id_valid
= 1;
259 req
->total_size_valid
= 1;
260 req
->total_size
= ar
->normal_mode_fw
.board_len
;
261 req
->seg_id_valid
= 1;
265 if (remaining
> QMI_WLFW_MAX_DATA_SIZE_V01
) {
266 req
->data_len
= QMI_WLFW_MAX_DATA_SIZE_V01
;
268 req
->data_len
= remaining
;
272 memcpy(req
->data
, temp
, req
->data_len
);
274 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
275 wlfw_bdf_download_resp_msg_v01_ei
,
280 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
281 QMI_WLFW_BDF_DOWNLOAD_REQ_V01
,
282 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN
,
283 wlfw_bdf_download_req_msg_v01_ei
, req
);
285 qmi_txn_cancel(&txn
);
289 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
294 /* end = 1 triggers a CRC check on the BDF. If this fails, we
295 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
296 * willing to use the BDF. For some platforms, all the valid
297 * released BDFs fail this CRC check, so attempt to detect this
298 * scenario and treat it as non-fatal.
300 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
&&
302 resp
.resp
.result
== QMI_ERR_MALFORMED_MSG_V01
)) {
303 ath10k_err(ar
, "failed to download board data file: %d\n",
309 remaining
-= req
->data_len
;
310 temp
+= req
->data_len
;
314 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi bdf download request completed\n");
324 static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi
*qmi
)
326 struct wlfw_cal_report_resp_msg_v01 resp
= {};
327 struct wlfw_cal_report_req_msg_v01 req
= {};
328 struct ath10k
*ar
= qmi
->ar
;
329 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
334 if (ar_snoc
->xo_cal_supported
) {
335 req
.xo_cal_data_valid
= 1;
336 req
.xo_cal_data
= ar_snoc
->xo_cal_data
;
339 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_cal_report_resp_msg_v01_ei
,
344 for (i
= 0; i
< QMI_WLFW_MAX_NUM_CAL_V01
; i
++) {
345 if (qmi
->cal_data
[i
].total_size
&&
346 qmi
->cal_data
[i
].data
) {
347 req
.meta_data
[j
] = qmi
->cal_data
[i
].cal_id
;
351 req
.meta_data_len
= j
;
353 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
354 QMI_WLFW_CAL_REPORT_REQ_V01
,
355 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN
,
356 wlfw_cal_report_req_msg_v01_ei
, &req
);
358 qmi_txn_cancel(&txn
);
359 ath10k_err(ar
, "failed to send calibration request: %d\n", ret
);
363 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
367 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
368 ath10k_err(ar
, "calibration request rejected: %d\n", resp
.resp
.error
);
373 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi cal report request completed\n");
381 ath10k_qmi_mode_send_sync_msg(struct ath10k
*ar
, enum wlfw_driver_mode_enum_v01 mode
)
383 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
384 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
385 struct wlfw_wlan_mode_resp_msg_v01 resp
= {};
386 struct wlfw_wlan_mode_req_msg_v01 req
= {};
390 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
391 wlfw_wlan_mode_resp_msg_v01_ei
,
397 req
.hw_debug_valid
= 1;
400 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
401 QMI_WLFW_WLAN_MODE_REQ_V01
,
402 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN
,
403 wlfw_wlan_mode_req_msg_v01_ei
, &req
);
405 qmi_txn_cancel(&txn
);
406 ath10k_err(ar
, "failed to send wlan mode %d request: %d\n", mode
, ret
);
410 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
414 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
415 ath10k_err(ar
, "more request rejected: %d\n", resp
.resp
.error
);
420 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi wlan mode req completed: %d\n", mode
);
428 ath10k_qmi_cfg_send_sync_msg(struct ath10k
*ar
,
429 struct ath10k_qmi_wlan_enable_cfg
*config
,
432 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
433 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
434 struct wlfw_wlan_cfg_resp_msg_v01 resp
= {};
435 struct wlfw_wlan_cfg_req_msg_v01
*req
;
440 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
444 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
445 wlfw_wlan_cfg_resp_msg_v01_ei
,
450 req
->host_version_valid
= 0;
452 req
->tgt_cfg_valid
= 1;
453 if (config
->num_ce_tgt_cfg
> QMI_WLFW_MAX_NUM_CE_V01
)
454 req
->tgt_cfg_len
= QMI_WLFW_MAX_NUM_CE_V01
;
456 req
->tgt_cfg_len
= config
->num_ce_tgt_cfg
;
457 for (i
= 0; i
< req
->tgt_cfg_len
; i
++) {
458 req
->tgt_cfg
[i
].pipe_num
= config
->ce_tgt_cfg
[i
].pipe_num
;
459 req
->tgt_cfg
[i
].pipe_dir
= config
->ce_tgt_cfg
[i
].pipe_dir
;
460 req
->tgt_cfg
[i
].nentries
= config
->ce_tgt_cfg
[i
].nentries
;
461 req
->tgt_cfg
[i
].nbytes_max
= config
->ce_tgt_cfg
[i
].nbytes_max
;
462 req
->tgt_cfg
[i
].flags
= config
->ce_tgt_cfg
[i
].flags
;
465 req
->svc_cfg_valid
= 1;
466 if (config
->num_ce_svc_pipe_cfg
> QMI_WLFW_MAX_NUM_SVC_V01
)
467 req
->svc_cfg_len
= QMI_WLFW_MAX_NUM_SVC_V01
;
469 req
->svc_cfg_len
= config
->num_ce_svc_pipe_cfg
;
470 for (i
= 0; i
< req
->svc_cfg_len
; i
++) {
471 req
->svc_cfg
[i
].service_id
= config
->ce_svc_cfg
[i
].service_id
;
472 req
->svc_cfg
[i
].pipe_dir
= config
->ce_svc_cfg
[i
].pipe_dir
;
473 req
->svc_cfg
[i
].pipe_num
= config
->ce_svc_cfg
[i
].pipe_num
;
476 req
->shadow_reg_valid
= 1;
477 if (config
->num_shadow_reg_cfg
>
478 QMI_WLFW_MAX_NUM_SHADOW_REG_V01
)
479 req
->shadow_reg_len
= QMI_WLFW_MAX_NUM_SHADOW_REG_V01
;
481 req
->shadow_reg_len
= config
->num_shadow_reg_cfg
;
483 memcpy(req
->shadow_reg
, config
->shadow_reg_cfg
,
484 sizeof(struct wlfw_shadow_reg_cfg_s_v01
) * req
->shadow_reg_len
);
486 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
487 QMI_WLFW_WLAN_CFG_REQ_V01
,
488 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN
,
489 wlfw_wlan_cfg_req_msg_v01_ei
, req
);
491 qmi_txn_cancel(&txn
);
492 ath10k_err(ar
, "failed to send config request: %d\n", ret
);
496 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
500 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
501 ath10k_err(ar
, "config request rejected: %d\n", resp
.resp
.error
);
506 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi config request completed\n");
515 int ath10k_qmi_wlan_enable(struct ath10k
*ar
,
516 struct ath10k_qmi_wlan_enable_cfg
*config
,
517 enum wlfw_driver_mode_enum_v01 mode
,
522 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi mode %d config %p\n",
525 ret
= ath10k_qmi_cfg_send_sync_msg(ar
, config
, version
);
527 ath10k_err(ar
, "failed to send qmi config: %d\n", ret
);
531 ret
= ath10k_qmi_mode_send_sync_msg(ar
, mode
);
533 ath10k_err(ar
, "failed to send qmi mode: %d\n", ret
);
540 int ath10k_qmi_wlan_disable(struct ath10k
*ar
)
542 return ath10k_qmi_mode_send_sync_msg(ar
, QMI_WLFW_OFF_V01
);
545 static void ath10k_qmi_add_wlan_ver_smem(struct ath10k
*ar
, const char *fw_build_id
)
548 size_t smem_item_size
;
549 const u32 smem_img_idx_wlan
= SMEM_IMAGE_TABLE_CNSS_INDEX
*
550 SMEM_IMAGE_VERSION_ENTRY_SIZE
;
552 table_ptr
= qcom_smem_get(QCOM_SMEM_HOST_ANY
,
553 SMEM_IMAGE_VERSION_TABLE
,
556 if (IS_ERR(table_ptr
)) {
557 ath10k_err(ar
, "smem image version table not found\n");
561 if (smem_img_idx_wlan
+ SMEM_IMAGE_VERSION_ENTRY_SIZE
>
563 ath10k_err(ar
, "smem block size too small: %zu\n",
568 strscpy(table_ptr
+ smem_img_idx_wlan
, fw_build_id
,
569 SMEM_IMAGE_VERSION_NAME_SIZE
);
572 static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi
*qmi
)
574 struct wlfw_cap_resp_msg_v01
*resp
;
575 struct wlfw_cap_req_msg_v01 req
= {};
576 struct ath10k
*ar
= qmi
->ar
;
577 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
581 resp
= kzalloc(sizeof(*resp
), GFP_KERNEL
);
585 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_cap_resp_msg_v01_ei
, resp
);
589 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
590 QMI_WLFW_CAP_REQ_V01
,
591 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN
,
592 wlfw_cap_req_msg_v01_ei
, &req
);
594 qmi_txn_cancel(&txn
);
595 ath10k_err(ar
, "failed to send capability request: %d\n", ret
);
599 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
603 if (resp
->resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
604 ath10k_err(ar
, "capability req rejected: %d\n", resp
->resp
.error
);
609 if (resp
->chip_info_valid
) {
610 qmi
->chip_info
.chip_id
= resp
->chip_info
.chip_id
;
611 qmi
->chip_info
.chip_family
= resp
->chip_info
.chip_family
;
613 qmi
->chip_info
.chip_id
= 0xFF;
616 if (resp
->board_info_valid
)
617 qmi
->board_info
.board_id
= resp
->board_info
.board_id
;
619 qmi
->board_info
.board_id
= 0xFF;
621 if (resp
->soc_info_valid
)
622 qmi
->soc_info
.soc_id
= resp
->soc_info
.soc_id
;
624 if (resp
->fw_version_info_valid
) {
625 qmi
->fw_version
= resp
->fw_version_info
.fw_version
;
626 strscpy(qmi
->fw_build_timestamp
, resp
->fw_version_info
.fw_build_timestamp
,
627 sizeof(qmi
->fw_build_timestamp
));
630 if (resp
->fw_build_id_valid
)
631 strscpy(qmi
->fw_build_id
, resp
->fw_build_id
,
632 MAX_BUILD_ID_LEN
+ 1);
634 if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED
, &ar_snoc
->flags
)) {
635 ath10k_info(ar
, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
636 qmi
->chip_info
.chip_id
, qmi
->chip_info
.chip_family
,
637 qmi
->board_info
.board_id
, qmi
->soc_info
.soc_id
);
638 ath10k_info(ar
, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
639 qmi
->fw_version
, qmi
->fw_build_timestamp
, qmi
->fw_build_id
);
642 if (resp
->fw_build_id_valid
)
643 ath10k_qmi_add_wlan_ver_smem(ar
, qmi
->fw_build_id
);
653 static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi
*qmi
)
655 struct wlfw_host_cap_resp_msg_v01 resp
= {};
656 struct wlfw_host_cap_req_msg_v01 req
= {};
657 const struct qmi_elem_info
*req_ei
;
658 struct ath10k
*ar
= qmi
->ar
;
659 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
663 req
.daemon_support_valid
= 1;
664 req
.daemon_support
= 0;
666 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_host_cap_resp_msg_v01_ei
,
671 if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK
, &ar_snoc
->flags
))
672 req_ei
= wlfw_host_cap_8bit_req_msg_v01_ei
;
674 req_ei
= wlfw_host_cap_req_msg_v01_ei
;
676 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
677 QMI_WLFW_HOST_CAP_REQ_V01
,
678 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN
,
681 qmi_txn_cancel(&txn
);
682 ath10k_err(ar
, "failed to send host capability request: %d\n", ret
);
686 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
690 /* older FW didn't support this request, which is not fatal */
691 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
&&
692 resp
.resp
.error
!= QMI_ERR_NOT_SUPPORTED_V01
) {
693 ath10k_err(ar
, "host capability request rejected: %d\n", resp
.resp
.error
);
698 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi host capability request completed\n");
705 int ath10k_qmi_set_fw_log_mode(struct ath10k
*ar
, u8 fw_log_mode
)
707 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
708 struct wlfw_ini_resp_msg_v01 resp
= {};
709 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
710 struct wlfw_ini_req_msg_v01 req
= {};
714 req
.enablefwlog_valid
= 1;
715 req
.enablefwlog
= fw_log_mode
;
717 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_ini_resp_msg_v01_ei
,
722 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
723 QMI_WLFW_INI_REQ_V01
,
724 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN
,
725 wlfw_ini_req_msg_v01_ei
, &req
);
727 qmi_txn_cancel(&txn
);
728 ath10k_err(ar
, "failed to send fw log request: %d\n", ret
);
732 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
736 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
737 ath10k_err(ar
, "fw log request rejected: %d\n",
742 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi fw log request completed, mode: %d\n",
751 ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi
*qmi
)
753 struct wlfw_ind_register_resp_msg_v01 resp
= {};
754 struct wlfw_ind_register_req_msg_v01 req
= {};
755 struct ath10k
*ar
= qmi
->ar
;
756 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
760 req
.client_id_valid
= 1;
761 req
.client_id
= ATH10K_QMI_CLIENT_ID
;
762 req
.fw_ready_enable_valid
= 1;
763 req
.fw_ready_enable
= 1;
764 req
.msa_ready_enable_valid
= 1;
765 req
.msa_ready_enable
= 1;
767 if (ar_snoc
->xo_cal_supported
) {
768 req
.xo_cal_enable_valid
= 1;
769 req
.xo_cal_enable
= 1;
772 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
773 wlfw_ind_register_resp_msg_v01_ei
, &resp
);
777 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
778 QMI_WLFW_IND_REGISTER_REQ_V01
,
779 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN
,
780 wlfw_ind_register_req_msg_v01_ei
, &req
);
782 qmi_txn_cancel(&txn
);
783 ath10k_err(ar
, "failed to send indication registered request: %d\n", ret
);
787 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
791 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
792 ath10k_err(ar
, "indication request rejected: %d\n", resp
.resp
.error
);
797 if (resp
.fw_status_valid
) {
798 if (resp
.fw_status
& QMI_WLFW_FW_READY_V01
)
799 qmi
->fw_ready
= true;
801 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi indication register request completed\n");
808 static void ath10k_qmi_event_server_arrive(struct ath10k_qmi
*qmi
)
810 struct ath10k
*ar
= qmi
->ar
;
813 ret
= ath10k_qmi_ind_register_send_sync_msg(qmi
);
818 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_READY_IND
);
822 ret
= ath10k_qmi_host_cap_send_sync(qmi
);
826 ret
= ath10k_qmi_msa_mem_info_send_sync_msg(qmi
);
831 * HACK: sleep for a while between receiving the msa info response
832 * and the XPU update to prevent SDM845 from crashing due to a security
833 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
837 ret
= ath10k_qmi_setup_msa_permissions(qmi
);
841 ret
= ath10k_qmi_msa_ready_send_sync_msg(qmi
);
845 ret
= ath10k_qmi_cap_send_sync_msg(qmi
);
852 ath10k_qmi_remove_msa_permission(qmi
);
855 static int ath10k_qmi_fetch_board_file(struct ath10k_qmi
*qmi
)
857 struct ath10k
*ar
= qmi
->ar
;
860 ar
->hif
.bus
= ATH10K_BUS_SNOC
;
861 ar
->id
.qmi_ids_valid
= true;
862 ar
->id
.qmi_board_id
= qmi
->board_info
.board_id
;
863 ar
->id
.qmi_chip_id
= qmi
->chip_info
.chip_id
;
864 ar
->hw_params
.fw
.dir
= WCN3990_HW_1_0_FW_DIR
;
866 ret
= ath10k_core_check_dt(ar
);
868 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "DT bdf variant name not set.\n");
870 return ath10k_core_fetch_board_file(qmi
->ar
, ATH10K_BD_IE_BOARD
);
874 ath10k_qmi_driver_event_post(struct ath10k_qmi
*qmi
,
875 enum ath10k_qmi_driver_event_type type
,
878 struct ath10k_qmi_driver_event
*event
;
880 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
887 spin_lock(&qmi
->event_lock
);
888 list_add_tail(&event
->list
, &qmi
->event_list
);
889 spin_unlock(&qmi
->event_lock
);
891 queue_work(qmi
->event_wq
, &qmi
->event_work
);
896 static void ath10k_qmi_event_server_exit(struct ath10k_qmi
*qmi
)
898 struct ath10k
*ar
= qmi
->ar
;
899 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
901 ath10k_qmi_remove_msa_permission(qmi
);
902 ath10k_core_free_board_files(ar
);
903 if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING
, &ar_snoc
->flags
) &&
904 !test_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED
, &ar_snoc
->flags
))
905 ath10k_snoc_fw_crashed_dump(ar
);
907 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_DOWN_IND
);
908 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw qmi service disconnected\n");
911 static void ath10k_qmi_event_msa_ready(struct ath10k_qmi
*qmi
)
915 ret
= ath10k_qmi_fetch_board_file(qmi
);
919 ret
= ath10k_qmi_bdf_dnld_send_sync(qmi
);
923 ret
= ath10k_qmi_send_cal_report_req(qmi
);
929 static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi
*qmi
)
931 struct ath10k
*ar
= qmi
->ar
;
933 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw ready event received\n");
934 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_READY_IND
);
939 static void ath10k_qmi_fw_ready_ind(struct qmi_handle
*qmi_hdl
,
940 struct sockaddr_qrtr
*sq
,
941 struct qmi_txn
*txn
, const void *data
)
943 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
945 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_FW_READY_IND
, NULL
);
948 static void ath10k_qmi_msa_ready_ind(struct qmi_handle
*qmi_hdl
,
949 struct sockaddr_qrtr
*sq
,
950 struct qmi_txn
*txn
, const void *data
)
952 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
954 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_MSA_READY_IND
, NULL
);
957 static const struct qmi_msg_handler qmi_msg_handler
[] = {
959 .type
= QMI_INDICATION
,
960 .msg_id
= QMI_WLFW_FW_READY_IND_V01
,
961 .ei
= wlfw_fw_ready_ind_msg_v01_ei
,
962 .decoded_size
= sizeof(struct wlfw_fw_ready_ind_msg_v01
),
963 .fn
= ath10k_qmi_fw_ready_ind
,
966 .type
= QMI_INDICATION
,
967 .msg_id
= QMI_WLFW_MSA_READY_IND_V01
,
968 .ei
= wlfw_msa_ready_ind_msg_v01_ei
,
969 .decoded_size
= sizeof(struct wlfw_msa_ready_ind_msg_v01
),
970 .fn
= ath10k_qmi_msa_ready_ind
,
975 static int ath10k_qmi_new_server(struct qmi_handle
*qmi_hdl
,
976 struct qmi_service
*service
)
978 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
979 struct sockaddr_qrtr
*sq
= &qmi
->sq
;
980 struct ath10k
*ar
= qmi
->ar
;
983 sq
->sq_family
= AF_QIPCRTR
;
984 sq
->sq_node
= service
->node
;
985 sq
->sq_port
= service
->port
;
987 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw qmi service found\n");
989 ret
= kernel_connect(qmi_hdl
->sock
, (struct sockaddr
*)&qmi
->sq
,
992 ath10k_err(ar
, "failed to connect to a remote QMI service port\n");
996 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi wifi fw qmi service connected\n");
997 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_SERVER_ARRIVE
, NULL
);
1002 static void ath10k_qmi_del_server(struct qmi_handle
*qmi_hdl
,
1003 struct qmi_service
*service
)
1005 struct ath10k_qmi
*qmi
=
1006 container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
1008 qmi
->fw_ready
= false;
1011 * The del_server event is to be processed only if coming from
1012 * the qmi server. The qmi infrastructure sends del_server, when
1013 * any client releases the qmi handle. In this case do not process
1014 * this del_server event.
1016 if (qmi
->state
== ATH10K_QMI_STATE_INIT_DONE
)
1017 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_SERVER_EXIT
,
1021 static const struct qmi_ops ath10k_qmi_ops
= {
1022 .new_server
= ath10k_qmi_new_server
,
1023 .del_server
= ath10k_qmi_del_server
,
1026 static void ath10k_qmi_driver_event_work(struct work_struct
*work
)
1028 struct ath10k_qmi
*qmi
= container_of(work
, struct ath10k_qmi
,
1030 struct ath10k_qmi_driver_event
*event
;
1031 struct ath10k
*ar
= qmi
->ar
;
1033 spin_lock(&qmi
->event_lock
);
1034 while (!list_empty(&qmi
->event_list
)) {
1035 event
= list_first_entry(&qmi
->event_list
,
1036 struct ath10k_qmi_driver_event
, list
);
1037 list_del(&event
->list
);
1038 spin_unlock(&qmi
->event_lock
);
1040 switch (event
->type
) {
1041 case ATH10K_QMI_EVENT_SERVER_ARRIVE
:
1042 ath10k_qmi_event_server_arrive(qmi
);
1043 if (qmi
->no_msa_ready_indicator
) {
1044 ath10k_info(ar
, "qmi not waiting for msa_ready indicator");
1045 ath10k_qmi_event_msa_ready(qmi
);
1048 case ATH10K_QMI_EVENT_SERVER_EXIT
:
1049 ath10k_qmi_event_server_exit(qmi
);
1051 case ATH10K_QMI_EVENT_FW_READY_IND
:
1052 ath10k_qmi_event_fw_ready_ind(qmi
);
1054 case ATH10K_QMI_EVENT_MSA_READY_IND
:
1055 if (qmi
->no_msa_ready_indicator
) {
1056 ath10k_warn(ar
, "qmi unexpected msa_ready indicator");
1059 ath10k_qmi_event_msa_ready(qmi
);
1062 ath10k_warn(ar
, "invalid event type: %d", event
->type
);
1066 spin_lock(&qmi
->event_lock
);
1068 spin_unlock(&qmi
->event_lock
);
1071 int ath10k_qmi_init(struct ath10k
*ar
, u32 msa_size
)
1073 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
1074 struct device
*dev
= ar
->dev
;
1075 struct ath10k_qmi
*qmi
;
1078 qmi
= kzalloc(sizeof(*qmi
), GFP_KERNEL
);
1085 if (of_property_read_bool(dev
->of_node
, "qcom,msa-fixed-perm"))
1086 qmi
->msa_fixed_perm
= true;
1088 if (of_property_read_bool(dev
->of_node
, "qcom,no-msa-ready-indicator"))
1089 qmi
->no_msa_ready_indicator
= true;
1091 ret
= qmi_handle_init(&qmi
->qmi_hdl
,
1092 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN
,
1093 &ath10k_qmi_ops
, qmi_msg_handler
);
1097 qmi
->event_wq
= alloc_ordered_workqueue("ath10k_qmi_driver_event", 0);
1098 if (!qmi
->event_wq
) {
1099 ath10k_err(ar
, "failed to allocate workqueue\n");
1101 goto err_release_qmi_handle
;
1104 INIT_LIST_HEAD(&qmi
->event_list
);
1105 spin_lock_init(&qmi
->event_lock
);
1106 INIT_WORK(&qmi
->event_work
, ath10k_qmi_driver_event_work
);
1108 ret
= qmi_add_lookup(&qmi
->qmi_hdl
, WLFW_SERVICE_ID_V01
,
1109 WLFW_SERVICE_VERS_V01
, 0);
1111 goto err_qmi_lookup
;
1113 qmi
->state
= ATH10K_QMI_STATE_INIT_DONE
;
1117 destroy_workqueue(qmi
->event_wq
);
1119 err_release_qmi_handle
:
1120 qmi_handle_release(&qmi
->qmi_hdl
);
1127 int ath10k_qmi_deinit(struct ath10k
*ar
)
1129 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
1130 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
1132 qmi
->state
= ATH10K_QMI_STATE_DEINIT
;
1133 qmi_handle_release(&qmi
->qmi_hdl
);
1134 cancel_work_sync(&qmi
->event_work
);
1135 destroy_workqueue(qmi
->event_wq
);
1137 ar_snoc
->qmi
= NULL
;