1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
6 #include <linux/completion.h>
7 #include <linux/device.h>
8 #include <linux/debugfs.h>
10 #include <linux/kernel.h>
12 #include <linux/of_address.h>
13 #include <linux/module.h>
14 #include <linux/net.h>
15 #include <linux/platform_device.h>
16 #include <linux/qcom_scm.h>
17 #include <linux/string.h>
23 #define ATH10K_QMI_CLIENT_ID 0x4b4e454c
24 #define ATH10K_QMI_TIMEOUT 30
26 static int ath10k_qmi_map_msa_permission(struct ath10k_qmi
*qmi
,
27 struct ath10k_msa_mem_info
*mem_info
)
29 struct qcom_scm_vmperm dst_perms
[3];
30 struct ath10k
*ar
= qmi
->ar
;
31 unsigned int src_perms
;
35 src_perms
= BIT(QCOM_SCM_VMID_HLOS
);
37 dst_perms
[0].vmid
= QCOM_SCM_VMID_MSS_MSA
;
38 dst_perms
[0].perm
= QCOM_SCM_PERM_RW
;
39 dst_perms
[1].vmid
= QCOM_SCM_VMID_WLAN
;
40 dst_perms
[1].perm
= QCOM_SCM_PERM_RW
;
42 if (mem_info
->secure
) {
45 dst_perms
[2].vmid
= QCOM_SCM_VMID_WLAN_CE
;
46 dst_perms
[2].perm
= QCOM_SCM_PERM_RW
;
50 ret
= qcom_scm_assign_mem(mem_info
->addr
, mem_info
->size
,
51 &src_perms
, dst_perms
, perm_count
);
53 ath10k_err(ar
, "failed to assign msa map permissions: %d\n", ret
);
58 static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi
*qmi
,
59 struct ath10k_msa_mem_info
*mem_info
)
61 struct qcom_scm_vmperm dst_perms
;
62 struct ath10k
*ar
= qmi
->ar
;
63 unsigned int src_perms
;
66 src_perms
= BIT(QCOM_SCM_VMID_MSS_MSA
) | BIT(QCOM_SCM_VMID_WLAN
);
68 if (!mem_info
->secure
)
69 src_perms
|= BIT(QCOM_SCM_VMID_WLAN_CE
);
71 dst_perms
.vmid
= QCOM_SCM_VMID_HLOS
;
72 dst_perms
.perm
= QCOM_SCM_PERM_RW
;
74 ret
= qcom_scm_assign_mem(mem_info
->addr
, mem_info
->size
,
75 &src_perms
, &dst_perms
, 1);
77 ath10k_err(ar
, "failed to unmap msa permissions: %d\n", ret
);
82 static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi
*qmi
)
87 if (qmi
->msa_fixed_perm
)
90 for (i
= 0; i
< qmi
->nr_mem_region
; i
++) {
91 ret
= ath10k_qmi_map_msa_permission(qmi
, &qmi
->mem_region
[i
]);
99 for (i
--; i
>= 0; i
--)
100 ath10k_qmi_unmap_msa_permission(qmi
, &qmi
->mem_region
[i
]);
104 static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi
*qmi
)
108 if (qmi
->msa_fixed_perm
)
111 for (i
= 0; i
< qmi
->nr_mem_region
; i
++)
112 ath10k_qmi_unmap_msa_permission(qmi
, &qmi
->mem_region
[i
]);
115 static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi
*qmi
)
117 struct wlfw_msa_info_resp_msg_v01 resp
= {};
118 struct wlfw_msa_info_req_msg_v01 req
= {};
119 struct ath10k
*ar
= qmi
->ar
;
120 phys_addr_t max_mapped_addr
;
125 req
.msa_addr
= qmi
->msa_pa
;
126 req
.size
= qmi
->msa_mem_size
;
128 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
129 wlfw_msa_info_resp_msg_v01_ei
, &resp
);
133 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
134 QMI_WLFW_MSA_INFO_REQ_V01
,
135 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN
,
136 wlfw_msa_info_req_msg_v01_ei
, &req
);
138 qmi_txn_cancel(&txn
);
139 ath10k_err(ar
, "failed to send msa mem info req: %d\n", ret
);
143 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
147 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
148 ath10k_err(ar
, "msa info req rejected: %d\n", resp
.resp
.error
);
153 if (resp
.mem_region_info_len
> QMI_WLFW_MAX_MEM_REG_V01
) {
154 ath10k_err(ar
, "invalid memory region length received: %d\n",
155 resp
.mem_region_info_len
);
160 max_mapped_addr
= qmi
->msa_pa
+ qmi
->msa_mem_size
;
161 qmi
->nr_mem_region
= resp
.mem_region_info_len
;
162 for (i
= 0; i
< resp
.mem_region_info_len
; i
++) {
163 if (resp
.mem_region_info
[i
].size
> qmi
->msa_mem_size
||
164 resp
.mem_region_info
[i
].region_addr
> max_mapped_addr
||
165 resp
.mem_region_info
[i
].region_addr
< qmi
->msa_pa
||
166 resp
.mem_region_info
[i
].size
+
167 resp
.mem_region_info
[i
].region_addr
> max_mapped_addr
) {
168 ath10k_err(ar
, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
169 resp
.mem_region_info
[i
].region_addr
,
170 resp
.mem_region_info
[i
].size
);
174 qmi
->mem_region
[i
].addr
= resp
.mem_region_info
[i
].region_addr
;
175 qmi
->mem_region
[i
].size
= resp
.mem_region_info
[i
].size
;
176 qmi
->mem_region
[i
].secure
= resp
.mem_region_info
[i
].secure_flag
;
177 ath10k_dbg(ar
, ATH10K_DBG_QMI
,
178 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
179 i
, &qmi
->mem_region
[i
].addr
,
180 qmi
->mem_region
[i
].size
,
181 qmi
->mem_region
[i
].secure
);
184 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi msa mem info request completed\n");
188 memset(&qmi
->mem_region
[0], 0, sizeof(qmi
->mem_region
[0]) * i
);
193 static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi
*qmi
)
195 struct wlfw_msa_ready_resp_msg_v01 resp
= {};
196 struct wlfw_msa_ready_req_msg_v01 req
= {};
197 struct ath10k
*ar
= qmi
->ar
;
201 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
202 wlfw_msa_ready_resp_msg_v01_ei
, &resp
);
206 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
207 QMI_WLFW_MSA_READY_REQ_V01
,
208 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN
,
209 wlfw_msa_ready_req_msg_v01_ei
, &req
);
211 qmi_txn_cancel(&txn
);
212 ath10k_err(ar
, "failed to send msa mem ready request: %d\n", ret
);
216 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
220 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
221 ath10k_err(ar
, "msa ready request rejected: %d\n", resp
.resp
.error
);
225 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi msa mem ready request completed\n");
232 static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi
*qmi
)
234 struct wlfw_bdf_download_resp_msg_v01 resp
= {};
235 struct wlfw_bdf_download_req_msg_v01
*req
;
236 struct ath10k
*ar
= qmi
->ar
;
237 unsigned int remaining
;
242 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
246 temp
= ar
->normal_mode_fw
.board_data
;
247 remaining
= ar
->normal_mode_fw
.board_len
;
251 req
->file_id_valid
= 1;
253 req
->total_size_valid
= 1;
254 req
->total_size
= ar
->normal_mode_fw
.board_len
;
255 req
->seg_id_valid
= 1;
259 if (remaining
> QMI_WLFW_MAX_DATA_SIZE_V01
) {
260 req
->data_len
= QMI_WLFW_MAX_DATA_SIZE_V01
;
262 req
->data_len
= remaining
;
266 memcpy(req
->data
, temp
, req
->data_len
);
268 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
269 wlfw_bdf_download_resp_msg_v01_ei
,
274 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
275 QMI_WLFW_BDF_DOWNLOAD_REQ_V01
,
276 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN
,
277 wlfw_bdf_download_req_msg_v01_ei
, req
);
279 qmi_txn_cancel(&txn
);
283 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
288 /* end = 1 triggers a CRC check on the BDF. If this fails, we
289 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
290 * willing to use the BDF. For some platforms, all the valid
291 * released BDFs fail this CRC check, so attempt to detect this
292 * scenario and treat it as non-fatal.
294 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
&&
296 resp
.resp
.result
== QMI_ERR_MALFORMED_MSG_V01
)) {
297 ath10k_err(ar
, "failed to download board data file: %d\n",
303 remaining
-= req
->data_len
;
304 temp
+= req
->data_len
;
308 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi bdf download request completed\n");
318 static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi
*qmi
)
320 struct wlfw_cal_report_resp_msg_v01 resp
= {};
321 struct wlfw_cal_report_req_msg_v01 req
= {};
322 struct ath10k
*ar
= qmi
->ar
;
323 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
328 if (ar_snoc
->xo_cal_supported
) {
329 req
.xo_cal_data_valid
= 1;
330 req
.xo_cal_data
= ar_snoc
->xo_cal_data
;
333 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_cal_report_resp_msg_v01_ei
,
338 for (i
= 0; i
< QMI_WLFW_MAX_NUM_CAL_V01
; i
++) {
339 if (qmi
->cal_data
[i
].total_size
&&
340 qmi
->cal_data
[i
].data
) {
341 req
.meta_data
[j
] = qmi
->cal_data
[i
].cal_id
;
345 req
.meta_data_len
= j
;
347 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
348 QMI_WLFW_CAL_REPORT_REQ_V01
,
349 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN
,
350 wlfw_cal_report_req_msg_v01_ei
, &req
);
352 qmi_txn_cancel(&txn
);
353 ath10k_err(ar
, "failed to send calibration request: %d\n", ret
);
357 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
361 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
362 ath10k_err(ar
, "calibration request rejected: %d\n", resp
.resp
.error
);
367 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi cal report request completed\n");
375 ath10k_qmi_mode_send_sync_msg(struct ath10k
*ar
, enum wlfw_driver_mode_enum_v01 mode
)
377 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
378 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
379 struct wlfw_wlan_mode_resp_msg_v01 resp
= {};
380 struct wlfw_wlan_mode_req_msg_v01 req
= {};
384 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
385 wlfw_wlan_mode_resp_msg_v01_ei
,
391 req
.hw_debug_valid
= 1;
394 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
395 QMI_WLFW_WLAN_MODE_REQ_V01
,
396 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN
,
397 wlfw_wlan_mode_req_msg_v01_ei
, &req
);
399 qmi_txn_cancel(&txn
);
400 ath10k_err(ar
, "failed to send wlan mode %d request: %d\n", mode
, ret
);
404 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
408 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
409 ath10k_err(ar
, "more request rejected: %d\n", resp
.resp
.error
);
414 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi wlan mode req completed: %d\n", mode
);
422 ath10k_qmi_cfg_send_sync_msg(struct ath10k
*ar
,
423 struct ath10k_qmi_wlan_enable_cfg
*config
,
426 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
427 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
428 struct wlfw_wlan_cfg_resp_msg_v01 resp
= {};
429 struct wlfw_wlan_cfg_req_msg_v01
*req
;
434 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
438 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
439 wlfw_wlan_cfg_resp_msg_v01_ei
,
444 req
->host_version_valid
= 0;
446 req
->tgt_cfg_valid
= 1;
447 if (config
->num_ce_tgt_cfg
> QMI_WLFW_MAX_NUM_CE_V01
)
448 req
->tgt_cfg_len
= QMI_WLFW_MAX_NUM_CE_V01
;
450 req
->tgt_cfg_len
= config
->num_ce_tgt_cfg
;
451 for (i
= 0; i
< req
->tgt_cfg_len
; i
++) {
452 req
->tgt_cfg
[i
].pipe_num
= config
->ce_tgt_cfg
[i
].pipe_num
;
453 req
->tgt_cfg
[i
].pipe_dir
= config
->ce_tgt_cfg
[i
].pipe_dir
;
454 req
->tgt_cfg
[i
].nentries
= config
->ce_tgt_cfg
[i
].nentries
;
455 req
->tgt_cfg
[i
].nbytes_max
= config
->ce_tgt_cfg
[i
].nbytes_max
;
456 req
->tgt_cfg
[i
].flags
= config
->ce_tgt_cfg
[i
].flags
;
459 req
->svc_cfg_valid
= 1;
460 if (config
->num_ce_svc_pipe_cfg
> QMI_WLFW_MAX_NUM_SVC_V01
)
461 req
->svc_cfg_len
= QMI_WLFW_MAX_NUM_SVC_V01
;
463 req
->svc_cfg_len
= config
->num_ce_svc_pipe_cfg
;
464 for (i
= 0; i
< req
->svc_cfg_len
; i
++) {
465 req
->svc_cfg
[i
].service_id
= config
->ce_svc_cfg
[i
].service_id
;
466 req
->svc_cfg
[i
].pipe_dir
= config
->ce_svc_cfg
[i
].pipe_dir
;
467 req
->svc_cfg
[i
].pipe_num
= config
->ce_svc_cfg
[i
].pipe_num
;
470 req
->shadow_reg_valid
= 1;
471 if (config
->num_shadow_reg_cfg
>
472 QMI_WLFW_MAX_NUM_SHADOW_REG_V01
)
473 req
->shadow_reg_len
= QMI_WLFW_MAX_NUM_SHADOW_REG_V01
;
475 req
->shadow_reg_len
= config
->num_shadow_reg_cfg
;
477 memcpy(req
->shadow_reg
, config
->shadow_reg_cfg
,
478 sizeof(struct wlfw_shadow_reg_cfg_s_v01
) * req
->shadow_reg_len
);
480 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
481 QMI_WLFW_WLAN_CFG_REQ_V01
,
482 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN
,
483 wlfw_wlan_cfg_req_msg_v01_ei
, req
);
485 qmi_txn_cancel(&txn
);
486 ath10k_err(ar
, "failed to send config request: %d\n", ret
);
490 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
494 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
495 ath10k_err(ar
, "config request rejected: %d\n", resp
.resp
.error
);
500 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi config request completed\n");
509 int ath10k_qmi_wlan_enable(struct ath10k
*ar
,
510 struct ath10k_qmi_wlan_enable_cfg
*config
,
511 enum wlfw_driver_mode_enum_v01 mode
,
516 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi mode %d config %p\n",
519 ret
= ath10k_qmi_cfg_send_sync_msg(ar
, config
, version
);
521 ath10k_err(ar
, "failed to send qmi config: %d\n", ret
);
525 ret
= ath10k_qmi_mode_send_sync_msg(ar
, mode
);
527 ath10k_err(ar
, "failed to send qmi mode: %d\n", ret
);
534 int ath10k_qmi_wlan_disable(struct ath10k
*ar
)
536 return ath10k_qmi_mode_send_sync_msg(ar
, QMI_WLFW_OFF_V01
);
539 static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi
*qmi
)
541 struct wlfw_cap_resp_msg_v01
*resp
;
542 struct wlfw_cap_req_msg_v01 req
= {};
543 struct ath10k
*ar
= qmi
->ar
;
544 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
548 resp
= kzalloc(sizeof(*resp
), GFP_KERNEL
);
552 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_cap_resp_msg_v01_ei
, resp
);
556 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
557 QMI_WLFW_CAP_REQ_V01
,
558 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN
,
559 wlfw_cap_req_msg_v01_ei
, &req
);
561 qmi_txn_cancel(&txn
);
562 ath10k_err(ar
, "failed to send capability request: %d\n", ret
);
566 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
570 if (resp
->resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
571 ath10k_err(ar
, "capability req rejected: %d\n", resp
->resp
.error
);
576 if (resp
->chip_info_valid
) {
577 qmi
->chip_info
.chip_id
= resp
->chip_info
.chip_id
;
578 qmi
->chip_info
.chip_family
= resp
->chip_info
.chip_family
;
581 if (resp
->board_info_valid
)
582 qmi
->board_info
.board_id
= resp
->board_info
.board_id
;
584 qmi
->board_info
.board_id
= 0xFF;
586 if (resp
->soc_info_valid
)
587 qmi
->soc_info
.soc_id
= resp
->soc_info
.soc_id
;
589 if (resp
->fw_version_info_valid
) {
590 qmi
->fw_version
= resp
->fw_version_info
.fw_version
;
591 strlcpy(qmi
->fw_build_timestamp
, resp
->fw_version_info
.fw_build_timestamp
,
592 sizeof(qmi
->fw_build_timestamp
));
595 if (resp
->fw_build_id_valid
)
596 strlcpy(qmi
->fw_build_id
, resp
->fw_build_id
,
597 MAX_BUILD_ID_LEN
+ 1);
599 if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED
, &ar_snoc
->flags
)) {
600 ath10k_info(ar
, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
601 qmi
->chip_info
.chip_id
, qmi
->chip_info
.chip_family
,
602 qmi
->board_info
.board_id
, qmi
->soc_info
.soc_id
);
603 ath10k_info(ar
, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
604 qmi
->fw_version
, qmi
->fw_build_timestamp
, qmi
->fw_build_id
);
615 static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi
*qmi
)
617 struct wlfw_host_cap_resp_msg_v01 resp
= {};
618 struct wlfw_host_cap_req_msg_v01 req
= {};
619 struct qmi_elem_info
*req_ei
;
620 struct ath10k
*ar
= qmi
->ar
;
621 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
625 req
.daemon_support_valid
= 1;
626 req
.daemon_support
= 0;
628 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_host_cap_resp_msg_v01_ei
,
633 if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK
, &ar_snoc
->flags
))
634 req_ei
= wlfw_host_cap_8bit_req_msg_v01_ei
;
636 req_ei
= wlfw_host_cap_req_msg_v01_ei
;
638 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
639 QMI_WLFW_HOST_CAP_REQ_V01
,
640 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN
,
643 qmi_txn_cancel(&txn
);
644 ath10k_err(ar
, "failed to send host capability request: %d\n", ret
);
648 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
652 /* older FW didn't support this request, which is not fatal */
653 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
&&
654 resp
.resp
.error
!= QMI_ERR_NOT_SUPPORTED_V01
) {
655 ath10k_err(ar
, "host capability request rejected: %d\n", resp
.resp
.error
);
660 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi host capability request completed\n");
667 int ath10k_qmi_set_fw_log_mode(struct ath10k
*ar
, u8 fw_log_mode
)
669 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
670 struct wlfw_ini_resp_msg_v01 resp
= {};
671 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
672 struct wlfw_ini_req_msg_v01 req
= {};
676 req
.enablefwlog_valid
= 1;
677 req
.enablefwlog
= fw_log_mode
;
679 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
, wlfw_ini_resp_msg_v01_ei
,
684 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
685 QMI_WLFW_INI_REQ_V01
,
686 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN
,
687 wlfw_ini_req_msg_v01_ei
, &req
);
689 qmi_txn_cancel(&txn
);
690 ath10k_err(ar
, "failed to send fw log request: %d\n", ret
);
694 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
698 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
699 ath10k_err(ar
, "fw log request rejected: %d\n",
704 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi fw log request completed, mode: %d\n",
713 ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi
*qmi
)
715 struct wlfw_ind_register_resp_msg_v01 resp
= {};
716 struct wlfw_ind_register_req_msg_v01 req
= {};
717 struct ath10k
*ar
= qmi
->ar
;
718 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
722 req
.client_id_valid
= 1;
723 req
.client_id
= ATH10K_QMI_CLIENT_ID
;
724 req
.fw_ready_enable_valid
= 1;
725 req
.fw_ready_enable
= 1;
726 req
.msa_ready_enable_valid
= 1;
727 req
.msa_ready_enable
= 1;
729 if (ar_snoc
->xo_cal_supported
) {
730 req
.xo_cal_enable_valid
= 1;
731 req
.xo_cal_enable
= 1;
734 ret
= qmi_txn_init(&qmi
->qmi_hdl
, &txn
,
735 wlfw_ind_register_resp_msg_v01_ei
, &resp
);
739 ret
= qmi_send_request(&qmi
->qmi_hdl
, NULL
, &txn
,
740 QMI_WLFW_IND_REGISTER_REQ_V01
,
741 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN
,
742 wlfw_ind_register_req_msg_v01_ei
, &req
);
744 qmi_txn_cancel(&txn
);
745 ath10k_err(ar
, "failed to send indication registered request: %d\n", ret
);
749 ret
= qmi_txn_wait(&txn
, ATH10K_QMI_TIMEOUT
* HZ
);
753 if (resp
.resp
.result
!= QMI_RESULT_SUCCESS_V01
) {
754 ath10k_err(ar
, "indication request rejected: %d\n", resp
.resp
.error
);
759 if (resp
.fw_status_valid
) {
760 if (resp
.fw_status
& QMI_WLFW_FW_READY_V01
)
761 qmi
->fw_ready
= true;
763 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi indication register request completed\n");
770 static void ath10k_qmi_event_server_arrive(struct ath10k_qmi
*qmi
)
772 struct ath10k
*ar
= qmi
->ar
;
775 ret
= ath10k_qmi_ind_register_send_sync_msg(qmi
);
780 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_READY_IND
);
784 ret
= ath10k_qmi_host_cap_send_sync(qmi
);
788 ret
= ath10k_qmi_msa_mem_info_send_sync_msg(qmi
);
793 * HACK: sleep for a while inbetween receiving the msa info response
794 * and the XPU update to prevent SDM845 from crashing due to a security
795 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
799 ret
= ath10k_qmi_setup_msa_permissions(qmi
);
803 ret
= ath10k_qmi_msa_ready_send_sync_msg(qmi
);
807 ret
= ath10k_qmi_cap_send_sync_msg(qmi
);
814 ath10k_qmi_remove_msa_permission(qmi
);
817 static int ath10k_qmi_fetch_board_file(struct ath10k_qmi
*qmi
)
819 struct ath10k
*ar
= qmi
->ar
;
821 ar
->hif
.bus
= ATH10K_BUS_SNOC
;
822 ar
->id
.qmi_ids_valid
= true;
823 ar
->id
.qmi_board_id
= qmi
->board_info
.board_id
;
824 ar
->hw_params
.fw
.dir
= WCN3990_HW_1_0_FW_DIR
;
826 return ath10k_core_fetch_board_file(qmi
->ar
, ATH10K_BD_IE_BOARD
);
830 ath10k_qmi_driver_event_post(struct ath10k_qmi
*qmi
,
831 enum ath10k_qmi_driver_event_type type
,
834 struct ath10k_qmi_driver_event
*event
;
836 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
843 spin_lock(&qmi
->event_lock
);
844 list_add_tail(&event
->list
, &qmi
->event_list
);
845 spin_unlock(&qmi
->event_lock
);
847 queue_work(qmi
->event_wq
, &qmi
->event_work
);
852 static void ath10k_qmi_event_server_exit(struct ath10k_qmi
*qmi
)
854 struct ath10k
*ar
= qmi
->ar
;
855 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
857 ath10k_qmi_remove_msa_permission(qmi
);
858 ath10k_core_free_board_files(ar
);
859 if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING
, &ar_snoc
->flags
))
860 ath10k_snoc_fw_crashed_dump(ar
);
862 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_DOWN_IND
);
863 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw qmi service disconnected\n");
866 static void ath10k_qmi_event_msa_ready(struct ath10k_qmi
*qmi
)
870 ret
= ath10k_qmi_fetch_board_file(qmi
);
874 ret
= ath10k_qmi_bdf_dnld_send_sync(qmi
);
878 ret
= ath10k_qmi_send_cal_report_req(qmi
);
884 static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi
*qmi
)
886 struct ath10k
*ar
= qmi
->ar
;
888 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw ready event received\n");
889 ath10k_snoc_fw_indication(ar
, ATH10K_QMI_EVENT_FW_READY_IND
);
894 static void ath10k_qmi_fw_ready_ind(struct qmi_handle
*qmi_hdl
,
895 struct sockaddr_qrtr
*sq
,
896 struct qmi_txn
*txn
, const void *data
)
898 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
900 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_FW_READY_IND
, NULL
);
903 static void ath10k_qmi_msa_ready_ind(struct qmi_handle
*qmi_hdl
,
904 struct sockaddr_qrtr
*sq
,
905 struct qmi_txn
*txn
, const void *data
)
907 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
909 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_MSA_READY_IND
, NULL
);
912 static struct qmi_msg_handler qmi_msg_handler
[] = {
914 .type
= QMI_INDICATION
,
915 .msg_id
= QMI_WLFW_FW_READY_IND_V01
,
916 .ei
= wlfw_fw_ready_ind_msg_v01_ei
,
917 .decoded_size
= sizeof(struct wlfw_fw_ready_ind_msg_v01
),
918 .fn
= ath10k_qmi_fw_ready_ind
,
921 .type
= QMI_INDICATION
,
922 .msg_id
= QMI_WLFW_MSA_READY_IND_V01
,
923 .ei
= wlfw_msa_ready_ind_msg_v01_ei
,
924 .decoded_size
= sizeof(struct wlfw_msa_ready_ind_msg_v01
),
925 .fn
= ath10k_qmi_msa_ready_ind
,
930 static int ath10k_qmi_new_server(struct qmi_handle
*qmi_hdl
,
931 struct qmi_service
*service
)
933 struct ath10k_qmi
*qmi
= container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
934 struct sockaddr_qrtr
*sq
= &qmi
->sq
;
935 struct ath10k
*ar
= qmi
->ar
;
938 sq
->sq_family
= AF_QIPCRTR
;
939 sq
->sq_node
= service
->node
;
940 sq
->sq_port
= service
->port
;
942 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "wifi fw qmi service found\n");
944 ret
= kernel_connect(qmi_hdl
->sock
, (struct sockaddr
*)&qmi
->sq
,
947 ath10k_err(ar
, "failed to connect to a remote QMI service port\n");
951 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "qmi wifi fw qmi service connected\n");
952 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_SERVER_ARRIVE
, NULL
);
957 static void ath10k_qmi_del_server(struct qmi_handle
*qmi_hdl
,
958 struct qmi_service
*service
)
960 struct ath10k_qmi
*qmi
=
961 container_of(qmi_hdl
, struct ath10k_qmi
, qmi_hdl
);
963 qmi
->fw_ready
= false;
964 ath10k_qmi_driver_event_post(qmi
, ATH10K_QMI_EVENT_SERVER_EXIT
, NULL
);
967 static struct qmi_ops ath10k_qmi_ops
= {
968 .new_server
= ath10k_qmi_new_server
,
969 .del_server
= ath10k_qmi_del_server
,
972 static void ath10k_qmi_driver_event_work(struct work_struct
*work
)
974 struct ath10k_qmi
*qmi
= container_of(work
, struct ath10k_qmi
,
976 struct ath10k_qmi_driver_event
*event
;
977 struct ath10k
*ar
= qmi
->ar
;
979 spin_lock(&qmi
->event_lock
);
980 while (!list_empty(&qmi
->event_list
)) {
981 event
= list_first_entry(&qmi
->event_list
,
982 struct ath10k_qmi_driver_event
, list
);
983 list_del(&event
->list
);
984 spin_unlock(&qmi
->event_lock
);
986 switch (event
->type
) {
987 case ATH10K_QMI_EVENT_SERVER_ARRIVE
:
988 ath10k_qmi_event_server_arrive(qmi
);
990 case ATH10K_QMI_EVENT_SERVER_EXIT
:
991 ath10k_qmi_event_server_exit(qmi
);
993 case ATH10K_QMI_EVENT_FW_READY_IND
:
994 ath10k_qmi_event_fw_ready_ind(qmi
);
996 case ATH10K_QMI_EVENT_MSA_READY_IND
:
997 ath10k_qmi_event_msa_ready(qmi
);
1000 ath10k_warn(ar
, "invalid event type: %d", event
->type
);
1004 spin_lock(&qmi
->event_lock
);
1006 spin_unlock(&qmi
->event_lock
);
1009 static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi
*qmi
, u32 msa_size
)
1011 struct ath10k
*ar
= qmi
->ar
;
1012 struct device
*dev
= ar
->dev
;
1013 struct device_node
*node
;
1017 node
= of_parse_phandle(dev
->of_node
, "memory-region", 0);
1019 ret
= of_address_to_resource(node
, 0, &r
);
1021 dev_err(dev
, "failed to resolve msa fixed region\n");
1026 qmi
->msa_pa
= r
.start
;
1027 qmi
->msa_mem_size
= resource_size(&r
);
1028 qmi
->msa_va
= devm_memremap(dev
, qmi
->msa_pa
, qmi
->msa_mem_size
,
1030 if (IS_ERR(qmi
->msa_va
)) {
1031 dev_err(dev
, "failed to map memory region: %pa\n", &r
.start
);
1032 return PTR_ERR(qmi
->msa_va
);
1035 qmi
->msa_va
= dmam_alloc_coherent(dev
, msa_size
,
1036 &qmi
->msa_pa
, GFP_KERNEL
);
1038 ath10k_err(ar
, "failed to allocate dma memory for msa region\n");
1041 qmi
->msa_mem_size
= msa_size
;
1044 if (of_property_read_bool(dev
->of_node
, "qcom,msa-fixed-perm"))
1045 qmi
->msa_fixed_perm
= true;
1047 ath10k_dbg(ar
, ATH10K_DBG_QMI
, "msa pa: %pad , msa va: 0x%p\n",
1054 int ath10k_qmi_init(struct ath10k
*ar
, u32 msa_size
)
1056 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
1057 struct ath10k_qmi
*qmi
;
1060 qmi
= kzalloc(sizeof(*qmi
), GFP_KERNEL
);
1067 ret
= ath10k_qmi_setup_msa_resources(qmi
, msa_size
);
1071 ret
= qmi_handle_init(&qmi
->qmi_hdl
,
1072 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN
,
1073 &ath10k_qmi_ops
, qmi_msg_handler
);
1077 qmi
->event_wq
= alloc_workqueue("ath10k_qmi_driver_event",
1079 if (!qmi
->event_wq
) {
1080 ath10k_err(ar
, "failed to allocate workqueue\n");
1082 goto err_release_qmi_handle
;
1085 INIT_LIST_HEAD(&qmi
->event_list
);
1086 spin_lock_init(&qmi
->event_lock
);
1087 INIT_WORK(&qmi
->event_work
, ath10k_qmi_driver_event_work
);
1089 ret
= qmi_add_lookup(&qmi
->qmi_hdl
, WLFW_SERVICE_ID_V01
,
1090 WLFW_SERVICE_VERS_V01
, 0);
1092 goto err_qmi_lookup
;
1097 destroy_workqueue(qmi
->event_wq
);
1099 err_release_qmi_handle
:
1100 qmi_handle_release(&qmi
->qmi_hdl
);
1107 int ath10k_qmi_deinit(struct ath10k
*ar
)
1109 struct ath10k_snoc
*ar_snoc
= ath10k_snoc_priv(ar
);
1110 struct ath10k_qmi
*qmi
= ar_snoc
->qmi
;
1112 qmi_handle_release(&qmi
->qmi_hdl
);
1113 cancel_work_sync(&qmi
->event_work
);
1114 destroy_workqueue(qmi
->event_wq
);
1116 ar_snoc
->qmi
= NULL
;