1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
6 #include <linux/completion.h>
7 #include <linux/device.h>
8 #include <linux/debugfs.h>
10 #include <linux/kernel.h>
12 #include <linux/of_address.h>
13 #include <linux/module.h>
14 #include <linux/net.h>
15 #include <linux/platform_device.h>
16 #include <linux/qcom_scm.h>
17 #include <linux/string.h>
23 #define ATH10K_QMI_CLIENT_ID 0x4b4e454c
24 #define ATH10K_QMI_TIMEOUT 30
26 static int ath10k_qmi_map_msa_permission(struct ath10k_qmi
*qmi
,
27 struct ath10k_msa_mem_info
*mem_info
)
29 struct qcom_scm_vmperm dst_perms
[3];
30 struct ath10k
*ar
= qmi
->ar
;
31 unsigned int src_perms
;
35 src_perms
= BIT(QCOM_SCM_VMID_HLOS
);
37 dst_perms
[0].vmid
= QCOM_SCM_VMID_MSS_MSA
;
38 dst_perms
[0].perm
= QCOM_SCM_PERM_RW
;
39 dst_perms
[1].vmid
= QCOM_SCM_VMID_WLAN
;
40 dst_perms
[1].perm
= QCOM_SCM_PERM_RW
;
42 if (mem_info
->secure
) {
45 dst_perms
[2].vmid
= QCOM_SCM_VMID_WLAN_CE
;
46 dst_perms
[2].perm
= QCOM_SCM_PERM_RW
;
50 ret
= qcom_scm_assign_mem(mem_info
->addr
, mem_info
->size
,
51 &src_perms
, dst_perms
, perm_count
);
53 ath10k_err(ar
, "failed to assign msa map permissions: %d\n", ret
);
58 static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi
*qmi
,
59 struct ath10k_msa_mem_info
*mem_info
)
61 struct qcom_scm_vmperm dst_perms
;
62 struct ath10k
*ar
= qmi
->ar
;
63 unsigned int src_perms
;
66 src_perms
= BIT(QCOM_SCM_VMID_MSS_MSA
) | BIT(QCOM_SCM_VMID_WLAN
);
68 if (!mem_info
->secure
)
69 src_perms
|= BIT(QCOM_SCM_VMID_WLAN_CE
);
71 dst_perms
.vmid
= QCOM_SCM_VMID_HLOS
;
72 dst_perms
.perm
= QCOM_SCM_PERM_RW
;
74 ret
= qcom_scm_assign_mem(mem_info
->addr
, mem_info
->size
,
75 &src_perms
, &dst_perms
, 1);
77 ath10k_err(ar
, "failed to unmap msa permissions: %d\n", ret
);
82 static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi
*qmi
)
87 if (qmi
->msa_fixed_perm
)
90 for (i
= 0; i
< qmi
->nr_mem_region
; i
++) {
91 ret
= ath10k_qmi_map_msa_permission(qmi
, &qmi
->mem_region
[i
]);
99 for (i
--; i
>= 0; i
--)
100 ath10k_qmi_unmap_msa_permission(qmi
, &qmi
->mem_region
[i
]);
104 static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi
*qmi
)
108 if (qmi
->msa_fixed_perm
)
111 for (i
= 0; i
< qmi
->nr_mem_region
; i
++)
112 ath10k_qmi_unmap_msa_permission(qmi
, &qmi
->mem_region
[i
]);
115 static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi
*qmi
)
117 struct wlfw_msa_info_resp_msg_v01 resp
= {};
118 struct wlfw_msa_info_req_msg_v01 req
= {};
119 struct ath10k
*ar
= qmi
->ar
;
120 phys_addr_t max_mapped_addr
;
125 req
.msa_addr
= ar
->msa
.paddr
;
126 req
.size
= ar
->msa
.mem_size
;
128 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
129 wlfw_msa_info_resp_msg_v01_ei
, &resp
);
133 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
134 QMI_WLFW_MSA_INFO_REQ_V01
,
135 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN
,
136 wlfw_msa_info_req_msg_v01_ei
, &req
);
138 qmi_txn_cancel(&txn
);
139 ath10k_err(ar
, "failed to send msa mem info req: %d\n", ret
);
143 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
147 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
148 ath10k_err(ar
, "msa info req rejected: %d\n", resp
.resp
.error
);
153 if (resp
.mem_region_info_len
> QMI_WLFW_MAX_MEM_REG_V01
) {
154 ath10k_err(ar
, "invalid memory region length received: %d\n",
155 resp
.mem_region_info_len
);
160 max_mapped_addr
= ar
->msa
.paddr
+ ar
->msa
.mem_size
;
161 qmi
->nr_mem_region
= resp
.mem_region_info_len
;
162 for (i
= 0; i
< resp
.mem_region_info_len
; i
++) {
163 if (resp
.mem_region_info
[i
].size
> ar
->msa
.mem_size
||
164 resp
.mem_region_info
[i
].region_addr
> max_mapped_addr
||
165 resp
.mem_region_info
[i
].region_addr
< ar
->msa
.paddr
||
166 resp
.mem_region_info
[i
].size
+
167 resp
.mem_region_info
[i
].region_addr
> max_mapped_addr
) {
168 ath10k_err(ar
, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
169 resp
.mem_region_info
[i
].region_addr
,
170 resp
.mem_region_info
[i
].size
);
174 qmi
->mem_region
[i
].addr
= resp
.mem_region_info
[i
].region_addr
;
175 qmi
->mem_region
[i
].size
= resp
.mem_region_info
[i
].size
;
176 qmi
->mem_region
[i
].secure
= resp
.mem_region_info
[i
].secure_flag
;
177 ath10k_dbg(ar
, ATH10K_DBG_QMI
,
178 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
179 i
, &qmi
->mem_region
[i
].addr
,
180 qmi
->mem_region
[i
].size
,
181 qmi
->mem_region
[i
].secure
);
184 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi msa mem info request completed\n");
188 memset(&qmi
->mem_region
[0], 0, sizeof(qmi
->mem_region
[0]) * i
);
193 static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi
*qmi
)
195 struct wlfw_msa_ready_resp_msg_v01 resp
= {};
196 struct wlfw_msa_ready_req_msg_v01 req
= {};
197 struct ath10k
*ar
= qmi
->ar
;
201 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
202 wlfw_msa_ready_resp_msg_v01_ei
, &resp
);
206 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
207 QMI_WLFW_MSA_READY_REQ_V01
,
208 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN
,
209 wlfw_msa_ready_req_msg_v01_ei
, &req
);
211 qmi_txn_cancel(&txn
);
212 ath10k_err(ar
, "failed to send msa mem ready request: %d\n", ret
);
216 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
220 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
221 ath10k_err(ar
, "msa ready request rejected: %d\n", resp
.resp
.error
);
225 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi msa mem ready request completed\n");
232 static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi
*qmi
)
234 struct wlfw_bdf_download_resp_msg_v01 resp
= {};
235 struct wlfw_bdf_download_req_msg_v01
*req
;
236 struct ath10k
*ar
= qmi
->ar
;
237 unsigned int remaining
;
242 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
246 temp
= ar
->normal_mode_fw
.board_data
;
247 remaining
= ar
->normal_mode_fw
.board_len
;
251 req
->file_id_valid
= 1;
253 req
->total_size_valid
= 1;
254 req
->total_size
= ar
->normal_mode_fw
.board_len
;
255 req
->seg_id_valid
= 1;
259 if (remaining
> QMI_WLFW_MAX_DATA_SIZE_V01
) {
260 req
->data_len
= QMI_WLFW_MAX_DATA_SIZE_V01
;
262 req
->data_len
= remaining
;
266 memcpy(req
->data
, temp
, req
->data_len
);
268 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
269 wlfw_bdf_download_resp_msg_v01_ei
,
274 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
275 QMI_WLFW_BDF_DOWNLOAD_REQ_V01
,
276 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN
,
277 wlfw_bdf_download_req_msg_v01_ei
, req
);
279 qmi_txn_cancel(&txn
);
283 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
288 /* end = 1 triggers a CRC check on the BDF. If this fails, we
289 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
290 * willing to use the BDF. For some platforms, all the valid
291 * released BDFs fail this CRC check, so attempt to detect this
292 * scenario and treat it as non-fatal.
294 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
&&
296 resp
.resp
.result
== QMI_ERR_MALFORMED_MSG_V01
)) {
297 ath10k_err(ar
, "failed to download board data file: %d\n",
303 remaining
-= req
->data_len
;
304 temp
+= req
->data_len
;
308 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi bdf download request completed\n");
318 static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi
*qmi
)
320 struct wlfw_cal_report_resp_msg_v01 resp
= {};
321 struct wlfw_cal_report_req_msg_v01 req
= {};
322 struct ath10k
*ar
= qmi
->ar
;
323 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
328 if (ar_snoc
->xo_cal_supported
) {
329 req
.xo_cal_data_valid
= 1;
330 req
.xo_cal_data
= ar_snoc
->xo_cal_data
;
333 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_cal_report_resp_msg_v01_ei
,
338 for (i
= 0; i
< QMI_WLFW_MAX_NUM_CAL_V01
; i
++) {
339 if (qmi
->cal_data
[i
].total_size
&&
340 qmi
->cal_data
[i
].data
) {
341 req
.meta_data
[j
] = qmi
->cal_data
[i
].cal_id
;
345 req
.meta_data_len
= j
;
347 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
348 QMI_WLFW_CAL_REPORT_REQ_V01
,
349 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN
,
350 wlfw_cal_report_req_msg_v01_ei
, &req
);
352 qmi_txn_cancel(&txn
);
353 ath10k_err(ar
, "failed to send calibration request: %d\n", ret
);
357 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
361 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
362 ath10k_err(ar
, "calibration request rejected: %d\n", resp
.resp
.error
);
367 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi cal report request completed\n");
375 ath10k_qmi_mode_send_sync_msg(struct ath10k
*ar
, enum wlfw_driver_mode_enum_v01 mode
)
377 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
378 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
379 struct wlfw_wlan_mode_resp_msg_v01 resp
= {};
380 struct wlfw_wlan_mode_req_msg_v01 req
= {};
384 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
385 wlfw_wlan_mode_resp_msg_v01_ei
,
391 req
.hw_debug_valid
= 1;
394 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
395 QMI_WLFW_WLAN_MODE_REQ_V01
,
396 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN
,
397 wlfw_wlan_mode_req_msg_v01_ei
, &req
);
399 qmi_txn_cancel(&txn
);
400 ath10k_err(ar
, "failed to send wlan mode %d request: %d\n", mode
, ret
);
404 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
408 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
409 ath10k_err(ar
, "more request rejected: %d\n", resp
.resp
.error
);
414 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi wlan mode req completed: %d\n", mode
);
422 ath10k_qmi_cfg_send_sync_msg(struct ath10k
*ar
,
423 struct ath10k_qmi_wlan_enable_cfg
*config
,
426 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
427 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
428 struct wlfw_wlan_cfg_resp_msg_v01 resp
= {};
429 struct wlfw_wlan_cfg_req_msg_v01
*req
;
434 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
438 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
439 wlfw_wlan_cfg_resp_msg_v01_ei
,
444 req
->host_version_valid
= 0;
446 req
->tgt_cfg_valid
= 1;
447 if (config
->num_ce_tgt_cfg
> QMI_WLFW_MAX_NUM_CE_V01
)
448 req
->tgt_cfg_len
= QMI_WLFW_MAX_NUM_CE_V01
;
450 req
->tgt_cfg_len
= config
->num_ce_tgt_cfg
;
451 for (i
= 0; i
< req
->tgt_cfg_len
; i
++) {
452 req
->tgt_cfg
[i
].pipe_num
= config
->ce_tgt_cfg
[i
].pipe_num
;
453 req
->tgt_cfg
[i
].pipe_dir
= config
->ce_tgt_cfg
[i
].pipe_dir
;
454 req
->tgt_cfg
[i
].nentries
= config
->ce_tgt_cfg
[i
].nentries
;
455 req
->tgt_cfg
[i
].nbytes_max
= config
->ce_tgt_cfg
[i
].nbytes_max
;
456 req
->tgt_cfg
[i
].flags
= config
->ce_tgt_cfg
[i
].flags
;
459 req
->svc_cfg_valid
= 1;
460 if (config
->num_ce_svc_pipe_cfg
> QMI_WLFW_MAX_NUM_SVC_V01
)
461 req
->svc_cfg_len
= QMI_WLFW_MAX_NUM_SVC_V01
;
463 req
->svc_cfg_len
= config
->num_ce_svc_pipe_cfg
;
464 for (i
= 0; i
< req
->svc_cfg_len
; i
++) {
465 req
->svc_cfg
[i
].service_id
= config
->ce_svc_cfg
[i
].service_id
;
466 req
->svc_cfg
[i
].pipe_dir
= config
->ce_svc_cfg
[i
].pipe_dir
;
467 req
->svc_cfg
[i
].pipe_num
= config
->ce_svc_cfg
[i
].pipe_num
;
470 req
->shadow_reg_valid
= 1;
471 if (config
->num_shadow_reg_cfg
>
472 QMI_WLFW_MAX_NUM_SHADOW_REG_V01
)
473 req
->shadow_reg_len
= QMI_WLFW_MAX_NUM_SHADOW_REG_V01
;
475 req
->shadow_reg_len
= config
->num_shadow_reg_cfg
;
477 memcpy(req
->shadow_reg
, config
->shadow_reg_cfg
,
478 sizeof(struct wlfw_shadow_reg_cfg_s_v01
) * req
->shadow_reg_len
);
480 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
481 QMI_WLFW_WLAN_CFG_REQ_V01
,
482 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN
,
483 wlfw_wlan_cfg_req_msg_v01_ei
, req
);
485 qmi_txn_cancel(&txn
);
486 ath10k_err(ar
, "failed to send config request: %d\n", ret
);
490 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
494 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
495 ath10k_err(ar
, "config request rejected: %d\n", resp
.resp
.error
);
500 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi config request completed\n");
509 int ath10k_qmi_wlan_enable(struct ath10k
*ar
,
510 struct ath10k_qmi_wlan_enable_cfg
*config
,
511 enum wlfw_driver_mode_enum_v01 mode
,
516 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi mode %d config %p\n",
519 ret
= ath10k_qmi_cfg_send_sync_msg(ar
, config
, version
);
521 ath10k_err(ar
, "failed to send qmi config: %d\n", ret
);
525 ret
= ath10k_qmi_mode_send_sync_msg(ar
, mode
);
527 ath10k_err(ar
, "failed to send qmi mode: %d\n", ret
);
534 int ath10k_qmi_wlan_disable(struct ath10k
*ar
)
536 return ath10k_qmi_mode_send_sync_msg(ar
, QMI_WLFW_OFF_V01
);
539 static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi
*qmi
)
541 struct wlfw_cap_resp_msg_v01
*resp
;
542 struct wlfw_cap_req_msg_v01 req
= {};
543 struct ath10k
*ar
= qmi
->ar
;
544 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
548 resp
= kzalloc(sizeof(*resp
), GFP_KERNEL
);
552 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_cap_resp_msg_v01_ei
, resp
);
556 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
557 QMI_WLFW_CAP_REQ_V01
,
558 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN
,
559 wlfw_cap_req_msg_v01_ei
, &req
);
561 qmi_txn_cancel(&txn
);
562 ath10k_err(ar
, "failed to send capability request: %d\n", ret
);
566 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
570 if (resp
->resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
571 ath10k_err(ar
, "capability req rejected: %d\n", resp
->resp
.error
);
576 if (resp
->chip_info_valid
) {
577 qmi
->chip_info
.chip_id
= resp
->chip_info
.chip_id
;
578 qmi
->chip_info
.chip_family
= resp
->chip_info
.chip_family
;
580 qmi
->chip_info
.chip_id
= 0xFF;
583 if (resp
->board_info_valid
)
584 qmi
->board_info
.board_id
= resp
->board_info
.board_id
;
586 qmi
->board_info
.board_id
= 0xFF;
588 if (resp
->soc_info_valid
)
589 qmi
->soc_info
.soc_id
= resp
->soc_info
.soc_id
;
591 if (resp
->fw_version_info_valid
) {
592 qmi
->fw_version
= resp
->fw_version_info
.fw_version
;
593 strlcpy(qmi
->fw_build_timestamp
, resp
->fw_version_info
.fw_build_timestamp
,
594 sizeof(qmi
->fw_build_timestamp
));
597 if (resp
->fw_build_id_valid
)
598 strlcpy(qmi
->fw_build_id
, resp
->fw_build_id
,
599 MAX_BUILD_ID_LEN
+ 1);
601 if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED
, &ar_snoc
->flags
)) {
602 ath10k_info(ar
, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
603 qmi
->chip_info
.chip_id
, qmi
->chip_info
.chip_family
,
604 qmi
->board_info
.board_id
, qmi
->soc_info
.soc_id
);
605 ath10k_info(ar
, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
606 qmi
->fw_version
, qmi
->fw_build_timestamp
, qmi
->fw_build_id
);
617 static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi
*qmi
)
619 struct wlfw_host_cap_resp_msg_v01 resp
= {};
620 struct wlfw_host_cap_req_msg_v01 req
= {};
621 struct qmi_elem_info
*req_ei
;
622 struct ath10k
*ar
= qmi
->ar
;
623 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
627 req
.daemon_support_valid
= 1;
628 req
.daemon_support
= 0;
630 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_host_cap_resp_msg_v01_ei
,
635 if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK
, &ar_snoc
->flags
))
636 req_ei
= wlfw_host_cap_8bit_req_msg_v01_ei
;
638 req_ei
= wlfw_host_cap_req_msg_v01_ei
;
640 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
641 QMI_WLFW_HOST_CAP_REQ_V01
,
642 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN
,
645 qmi_txn_cancel(&txn
);
646 ath10k_err(ar
, "failed to send host capability request: %d\n", ret
);
650 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
654 /* older FW didn't support this request, which is not fatal */
655 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
&&
656 resp
.resp
.error
!= QMI_ERR_NOT_SUPPORTED_V01
) {
657 ath10k_err(ar
, "host capability request rejected: %d\n", resp
.resp
.error
);
662 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi host capability request completed\n");
669 int ath10k_qmi_set_fw_log_mode(struct ath10k
*ar
, u8 fw_log_mode
)
671 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
672 struct wlfw_ini_resp_msg_v01 resp
= {};
673 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
674 struct wlfw_ini_req_msg_v01 req
= {};
678 req
.enablefwlog_valid
= 1;
679 req
.enablefwlog
= fw_log_mode
;
681 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_ini_resp_msg_v01_ei
,
686 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
687 QMI_WLFW_INI_REQ_V01
,
688 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN
,
689 wlfw_ini_req_msg_v01_ei
, &req
);
691 qmi_txn_cancel(&txn
);
692 ath10k_err(ar
, "failed to send fw log request: %d\n", ret
);
696 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
700 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
701 ath10k_err(ar
, "fw log request rejected: %d\n",
706 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi fw log request completed, mode: %d\n",
715 ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi
*qmi
)
717 struct wlfw_ind_register_resp_msg_v01 resp
= {};
718 struct wlfw_ind_register_req_msg_v01 req
= {};
719 struct ath10k
*ar
= qmi
->ar
;
720 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
724 req
.client_id_valid
= 1;
725 req
.client_id
= ATH10K_QMI_CLIENT_ID
;
726 req
.fw_ready_enable_valid
= 1;
727 req
.fw_ready_enable
= 1;
728 req
.msa_ready_enable_valid
= 1;
729 req
.msa_ready_enable
= 1;
731 if (ar_snoc
->xo_cal_supported
) {
732 req
.xo_cal_enable_valid
= 1;
733 req
.xo_cal_enable
= 1;
736 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
737 wlfw_ind_register_resp_msg_v01_ei
, &resp
);
741 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
742 QMI_WLFW_IND_REGISTER_REQ_V01
,
743 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN
,
744 wlfw_ind_register_req_msg_v01_ei
, &req
);
746 qmi_txn_cancel(&txn
);
747 ath10k_err(ar
, "failed to send indication registered request: %d\n", ret
);
751 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
755 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
756 ath10k_err(ar
, "indication request rejected: %d\n", resp
.resp
.error
);
761 if (resp
.fw_status_valid
) {
762 if (resp
.fw_status
& QMI_WLFW_FW_READY_V01
)
763 qmi
->fw_ready
= true;
765 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi indication register request completed\n");
772 static void ath10k_qmi_event_server_arrive(struct ath10k_qmi
*qmi
)
774 struct ath10k
*ar
= qmi
->ar
;
777 ret
= ath10k_qmi_ind_register_send_sync_msg(qmi
);
782 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_READY_IND
);
786 ret
= ath10k_qmi_host_cap_send_sync(qmi
);
790 ret
= ath10k_qmi_msa_mem_info_send_sync_msg(qmi
);
795 * HACK: sleep for a while inbetween receiving the msa info response
796 * and the XPU update to prevent SDM845 from crashing due to a security
797 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
801 ret
= ath10k_qmi_setup_msa_permissions(qmi
);
805 ret
= ath10k_qmi_msa_ready_send_sync_msg(qmi
);
809 ret
= ath10k_qmi_cap_send_sync_msg(qmi
);
816 ath10k_qmi_remove_msa_permission(qmi
);
819 static int ath10k_qmi_fetch_board_file(struct ath10k_qmi
*qmi
)
821 struct ath10k
*ar
= qmi
->ar
;
824 ar
->hif
.bus
= ATH10K_BUS_SNOC
;
825 ar
->id
.qmi_ids_valid
= true;
826 ar
->id
.qmi_board_id
= qmi
->board_info
.board_id
;
827 ar
->id
.qmi_chip_id
= qmi
->chip_info
.chip_id
;
828 ar
->hw_params
.fw
.dir
= WCN3990_HW_1_0_FW_DIR
;
830 ret
= ath10k_core_check_dt(ar
);
832 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "DT bdf variant name not set.\n");
834 return ath10k_core_fetch_board_file(qmi
->ar
, ATH10K_BD_IE_BOARD
);
838 ath10k_qmi_driver_event_post(struct ath10k_qmi
*qmi
,
839 enum ath10k_qmi_driver_event_type type
,
842 struct ath10k_qmi_driver_event
*event
;
844 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
851 spin_lock(&qmi
->event_lock
);
852 list_add_tail(&event
->list
, &qmi
->event_list
);
853 spin_unlock(&qmi
->event_lock
);
855 queue_work(qmi
->event_wq
, &qmi
->event_work
);
860 static void ath10k_qmi_event_server_exit(struct ath10k_qmi
*qmi
)
862 struct ath10k
*ar
= qmi
->ar
;
863 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
865 ath10k_qmi_remove_msa_permission(qmi
);
866 ath10k_core_free_board_files(ar
);
867 if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING
, &ar_snoc
->flags
))
868 ath10k_snoc_fw_crashed_dump(ar
);
870 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_DOWN_IND
);
871 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw qmi service disconnected\n");
874 static void ath10k_qmi_event_msa_ready(struct ath10k_qmi
*qmi
)
878 ret
= ath10k_qmi_fetch_board_file(qmi
);
882 ret
= ath10k_qmi_bdf_dnld_send_sync(qmi
);
886 ret
= ath10k_qmi_send_cal_report_req(qmi
);
892 static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi
*qmi
)
894 struct ath10k
*ar
= qmi
->ar
;
896 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw ready event received\n");
897 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_READY_IND
);
902 static void ath10k_qmi_fw_ready_ind(struct qmi_handle
*qmi_hdl
,
903 struct sockaddr_qrtr
*sq
,
904 struct qmi_txn
*txn
, const void *data
)
906 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
908 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_FW_READY_IND
, NULL
);
911 static void ath10k_qmi_msa_ready_ind(struct qmi_handle
*qmi_hdl
,
912 struct sockaddr_qrtr
*sq
,
913 struct qmi_txn
*txn
, const void *data
)
915 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
917 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_MSA_READY_IND
, NULL
);
920 static const struct qmi_msg_handler qmi_msg_handler
[] = {
922 .type
= QMI_INDICATION
,
923 .msg_id
= QMI_WLFW_FW_READY_IND_V01
,
924 .ei
= wlfw_fw_ready_ind_msg_v01_ei
,
925 .decoded_size
= sizeof(struct wlfw_fw_ready_ind_msg_v01
),
926 .fn
= ath10k_qmi_fw_ready_ind
,
929 .type
= QMI_INDICATION
,
930 .msg_id
= QMI_WLFW_MSA_READY_IND_V01
,
931 .ei
= wlfw_msa_ready_ind_msg_v01_ei
,
932 .decoded_size
= sizeof(struct wlfw_msa_ready_ind_msg_v01
),
933 .fn
= ath10k_qmi_msa_ready_ind
,
938 static int ath10k_qmi_new_server(struct qmi_handle
*qmi_hdl
,
939 struct qmi_service
*service
)
941 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
942 struct sockaddr_qrtr
*sq
= &qmi
->sq
;
943 struct ath10k
*ar
= qmi
->ar
;
946 sq
->sq_family
= AF_QIPCRTR
;
947 sq
->sq_node
= service
->node
;
948 sq
->sq_port
= service
->port
;
950 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw qmi service found\n");
952 ret
= kernel_connect(qmi_hdl
->sock
, (struct sockaddr
*)&qmi
->sq
,
955 ath10k_err(ar
, "failed to connect to a remote QMI service port\n");
959 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi wifi fw qmi service connected\n");
960 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_SERVER_ARRIVE
, NULL
);
965 static void ath10k_qmi_del_server(struct qmi_handle
*qmi_hdl
,
966 struct qmi_service
*service
)
968 struct ath10k_qmi
*qmi
=
969 container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
971 qmi
->fw_ready
= false;
974 * The del_server event is to be processed only if coming from
975 * the qmi server. The qmi infrastructure sends del_server, when
976 * any client releases the qmi handle. In this case do not process
977 * this del_server event.
979 if (qmi
->state
== ATH10K_QMI_STATE_INIT_DONE
)
980 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_SERVER_EXIT
,
984 static const struct qmi_ops ath10k_qmi_ops
= {
985 .new_server
= ath10k_qmi_new_server
,
986 .del_server
= ath10k_qmi_del_server
,
989 static void ath10k_qmi_driver_event_work(struct work_struct
*work
)
991 struct ath10k_qmi
*qmi
= container_of(work
, struct ath10k_qmi
,
993 struct ath10k_qmi_driver_event
*event
;
994 struct ath10k
*ar
= qmi
->ar
;
996 spin_lock(&qmi
->event_lock
);
997 while (!list_empty(&qmi
->event_list
)) {
998 event
= list_first_entry(&qmi
->event_list
,
999 struct ath10k_qmi_driver_event
, list
);
1000 list_del(&event
->list
);
1001 spin_unlock(&qmi
->event_lock
);
1003 switch (event
->type
) {
1004 case ATH10K_QMI_EVENT_SERVER_ARRIVE
:
1005 ath10k_qmi_event_server_arrive(qmi
);
1007 case ATH10K_QMI_EVENT_SERVER_EXIT
:
1008 ath10k_qmi_event_server_exit(qmi
);
1010 case ATH10K_QMI_EVENT_FW_READY_IND
:
1011 ath10k_qmi_event_fw_ready_ind(qmi
);
1013 case ATH10K_QMI_EVENT_MSA_READY_IND
:
1014 ath10k_qmi_event_msa_ready(qmi
);
1017 ath10k_warn(ar
, "invalid event type: %d", event
->type
);
1021 spin_lock(&qmi
->event_lock
);
1023 spin_unlock(&qmi
->event_lock
);
1026 int ath10k_qmi_init(struct ath10k
*ar
, u32 msa_size
)
1028 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
1029 struct device
*dev
= ar
->dev
;
1030 struct ath10k_qmi
*qmi
;
1033 qmi
= kzalloc(sizeof(*qmi
), GFP_KERNEL
);
1040 if (of_property_read_bool(dev
->of_node
, "qcom,msa-fixed-perm"))
1041 qmi
->msa_fixed_perm
= true;
1043 ret
= qmi_handle_init(&qmi
->qmi_hdl
,
1044 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN
,
1045 &ath10k_qmi_ops
, qmi_msg_handler
);
1049 qmi
->event_wq
= alloc_workqueue("ath10k_qmi_driver_event",
1051 if (!qmi
->event_wq
) {
1052 ath10k_err(ar
, "failed to allocate workqueue\n");
1054 goto err_release_qmi_handle
;
1057 INIT_LIST_HEAD(&qmi
->event_list
);
1058 spin_lock_init(&qmi
->event_lock
);
1059 INIT_WORK(&qmi
->event_work
, ath10k_qmi_driver_event_work
);
1061 ret
= qmi_add_lookup(&qmi
->qmi_hdl
, WLFW_SERVICE_ID_V01
,
1062 WLFW_SERVICE_VERS_V01
, 0);
1064 goto err_qmi_lookup
;
1066 qmi
->state
= ATH10K_QMI_STATE_INIT_DONE
;
1070 destroy_workqueue(qmi
->event_wq
);
1072 err_release_qmi_handle
:
1073 qmi_handle_release(&qmi
->qmi_hdl
);
1080 int ath10k_qmi_deinit(struct ath10k
*ar
)
1082 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
1083 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
1085 qmi
->state
= ATH10K_QMI_STATE_DEINIT
;
1086 qmi_handle_release(&qmi
->qmi_hdl
);
1087 cancel_work_sync(&qmi
->event_work
);
1088 destroy_workqueue(qmi
->event_wq
);
1090 ar_snoc
->qmi
= NULL
;