gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath10k / qmi.c
blob85dce43c5439b6516ba59a7d64d46769522f2f65
1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 */
6 #include <linux/completion.h>
7 #include <linux/device.h>
8 #include <linux/debugfs.h>
9 #include <linux/idr.h>
10 #include <linux/kernel.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/module.h>
14 #include <linux/net.h>
15 #include <linux/platform_device.h>
16 #include <linux/qcom_scm.h>
17 #include <linux/string.h>
18 #include <net/sock.h>
20 #include "debug.h"
21 #include "snoc.h"
23 #define ATH10K_QMI_CLIENT_ID 0x4b4e454c
24 #define ATH10K_QMI_TIMEOUT 30
26 static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
27 struct ath10k_msa_mem_info *mem_info)
29 struct qcom_scm_vmperm dst_perms[3];
30 struct ath10k *ar = qmi->ar;
31 unsigned int src_perms;
32 u32 perm_count;
33 int ret;
35 src_perms = BIT(QCOM_SCM_VMID_HLOS);
37 dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
38 dst_perms[0].perm = QCOM_SCM_PERM_RW;
39 dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
40 dst_perms[1].perm = QCOM_SCM_PERM_RW;
42 if (mem_info->secure) {
43 perm_count = 2;
44 } else {
45 dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
46 dst_perms[2].perm = QCOM_SCM_PERM_RW;
47 perm_count = 3;
50 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
51 &src_perms, dst_perms, perm_count);
52 if (ret < 0)
53 ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
55 return ret;
58 static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
59 struct ath10k_msa_mem_info *mem_info)
61 struct qcom_scm_vmperm dst_perms;
62 struct ath10k *ar = qmi->ar;
63 unsigned int src_perms;
64 int ret;
66 src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
68 if (!mem_info->secure)
69 src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
71 dst_perms.vmid = QCOM_SCM_VMID_HLOS;
72 dst_perms.perm = QCOM_SCM_PERM_RW;
74 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
75 &src_perms, &dst_perms, 1);
76 if (ret < 0)
77 ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
79 return ret;
82 static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
84 int ret;
85 int i;
87 if (qmi->msa_fixed_perm)
88 return 0;
90 for (i = 0; i < qmi->nr_mem_region; i++) {
91 ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
92 if (ret)
93 goto err_unmap;
96 return 0;
98 err_unmap:
99 for (i--; i >= 0; i--)
100 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
101 return ret;
104 static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
106 int i;
108 if (qmi->msa_fixed_perm)
109 return;
111 for (i = 0; i < qmi->nr_mem_region; i++)
112 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
115 static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
117 struct wlfw_msa_info_resp_msg_v01 resp = {};
118 struct wlfw_msa_info_req_msg_v01 req = {};
119 struct ath10k *ar = qmi->ar;
120 phys_addr_t max_mapped_addr;
121 struct qmi_txn txn;
122 int ret;
123 int i;
125 req.msa_addr = qmi->msa_pa;
126 req.size = qmi->msa_mem_size;
128 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
129 wlfw_msa_info_resp_msg_v01_ei, &resp);
130 if (ret < 0)
131 goto out;
133 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
134 QMI_WLFW_MSA_INFO_REQ_V01,
135 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
136 wlfw_msa_info_req_msg_v01_ei, &req);
137 if (ret < 0) {
138 qmi_txn_cancel(&txn);
139 ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
140 goto out;
143 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
144 if (ret < 0)
145 goto out;
147 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
148 ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
149 ret = -EINVAL;
150 goto out;
153 if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
154 ath10k_err(ar, "invalid memory region length received: %d\n",
155 resp.mem_region_info_len);
156 ret = -EINVAL;
157 goto out;
160 max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size;
161 qmi->nr_mem_region = resp.mem_region_info_len;
162 for (i = 0; i < resp.mem_region_info_len; i++) {
163 if (resp.mem_region_info[i].size > qmi->msa_mem_size ||
164 resp.mem_region_info[i].region_addr > max_mapped_addr ||
165 resp.mem_region_info[i].region_addr < qmi->msa_pa ||
166 resp.mem_region_info[i].size +
167 resp.mem_region_info[i].region_addr > max_mapped_addr) {
168 ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
169 resp.mem_region_info[i].region_addr,
170 resp.mem_region_info[i].size);
171 ret = -EINVAL;
172 goto fail_unwind;
174 qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
175 qmi->mem_region[i].size = resp.mem_region_info[i].size;
176 qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
177 ath10k_dbg(ar, ATH10K_DBG_QMI,
178 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
179 i, &qmi->mem_region[i].addr,
180 qmi->mem_region[i].size,
181 qmi->mem_region[i].secure);
184 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
185 return 0;
187 fail_unwind:
188 memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
189 out:
190 return ret;
193 static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
195 struct wlfw_msa_ready_resp_msg_v01 resp = {};
196 struct wlfw_msa_ready_req_msg_v01 req = {};
197 struct ath10k *ar = qmi->ar;
198 struct qmi_txn txn;
199 int ret;
201 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
202 wlfw_msa_ready_resp_msg_v01_ei, &resp);
203 if (ret < 0)
204 goto out;
206 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
207 QMI_WLFW_MSA_READY_REQ_V01,
208 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
209 wlfw_msa_ready_req_msg_v01_ei, &req);
210 if (ret < 0) {
211 qmi_txn_cancel(&txn);
212 ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
213 goto out;
216 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
217 if (ret < 0)
218 goto out;
220 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
221 ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
222 ret = -EINVAL;
225 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
226 return 0;
228 out:
229 return ret;
232 static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
234 struct wlfw_bdf_download_resp_msg_v01 resp = {};
235 struct wlfw_bdf_download_req_msg_v01 *req;
236 struct ath10k *ar = qmi->ar;
237 unsigned int remaining;
238 struct qmi_txn txn;
239 const u8 *temp;
240 int ret;
242 req = kzalloc(sizeof(*req), GFP_KERNEL);
243 if (!req)
244 return -ENOMEM;
246 temp = ar->normal_mode_fw.board_data;
247 remaining = ar->normal_mode_fw.board_len;
249 while (remaining) {
250 req->valid = 1;
251 req->file_id_valid = 1;
252 req->file_id = 0;
253 req->total_size_valid = 1;
254 req->total_size = ar->normal_mode_fw.board_len;
255 req->seg_id_valid = 1;
256 req->data_valid = 1;
257 req->end_valid = 1;
259 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
260 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
261 } else {
262 req->data_len = remaining;
263 req->end = 1;
266 memcpy(req->data, temp, req->data_len);
268 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
269 wlfw_bdf_download_resp_msg_v01_ei,
270 &resp);
271 if (ret < 0)
272 goto out;
274 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
275 QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
276 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
277 wlfw_bdf_download_req_msg_v01_ei, req);
278 if (ret < 0) {
279 qmi_txn_cancel(&txn);
280 goto out;
283 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
285 if (ret < 0)
286 goto out;
288 /* end = 1 triggers a CRC check on the BDF. If this fails, we
289 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
290 * willing to use the BDF. For some platforms, all the valid
291 * released BDFs fail this CRC check, so attempt to detect this
292 * scenario and treat it as non-fatal.
294 if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
295 !(req->end == 1 &&
296 resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
297 ath10k_err(ar, "failed to download board data file: %d\n",
298 resp.resp.error);
299 ret = -EINVAL;
300 goto out;
303 remaining -= req->data_len;
304 temp += req->data_len;
305 req->seg_id++;
308 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
310 kfree(req);
311 return 0;
313 out:
314 kfree(req);
315 return ret;
318 static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
320 struct wlfw_cal_report_resp_msg_v01 resp = {};
321 struct wlfw_cal_report_req_msg_v01 req = {};
322 struct ath10k *ar = qmi->ar;
323 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
324 struct qmi_txn txn;
325 int i, j = 0;
326 int ret;
328 if (ar_snoc->xo_cal_supported) {
329 req.xo_cal_data_valid = 1;
330 req.xo_cal_data = ar_snoc->xo_cal_data;
333 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
334 &resp);
335 if (ret < 0)
336 goto out;
338 for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
339 if (qmi->cal_data[i].total_size &&
340 qmi->cal_data[i].data) {
341 req.meta_data[j] = qmi->cal_data[i].cal_id;
342 j++;
345 req.meta_data_len = j;
347 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
348 QMI_WLFW_CAL_REPORT_REQ_V01,
349 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
350 wlfw_cal_report_req_msg_v01_ei, &req);
351 if (ret < 0) {
352 qmi_txn_cancel(&txn);
353 ath10k_err(ar, "failed to send calibration request: %d\n", ret);
354 goto out;
357 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
358 if (ret < 0)
359 goto out;
361 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
362 ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
363 ret = -EINVAL;
364 goto out;
367 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
368 return 0;
370 out:
371 return ret;
374 static int
375 ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
377 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
378 struct ath10k_qmi *qmi = ar_snoc->qmi;
379 struct wlfw_wlan_mode_resp_msg_v01 resp = {};
380 struct wlfw_wlan_mode_req_msg_v01 req = {};
381 struct qmi_txn txn;
382 int ret;
384 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
385 wlfw_wlan_mode_resp_msg_v01_ei,
386 &resp);
387 if (ret < 0)
388 goto out;
390 req.mode = mode;
391 req.hw_debug_valid = 1;
392 req.hw_debug = 0;
394 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
395 QMI_WLFW_WLAN_MODE_REQ_V01,
396 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
397 wlfw_wlan_mode_req_msg_v01_ei, &req);
398 if (ret < 0) {
399 qmi_txn_cancel(&txn);
400 ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
401 goto out;
404 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
405 if (ret < 0)
406 goto out;
408 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
409 ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
410 ret = -EINVAL;
411 goto out;
414 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
415 return 0;
417 out:
418 return ret;
421 static int
422 ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
423 struct ath10k_qmi_wlan_enable_cfg *config,
424 const char *version)
426 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
427 struct ath10k_qmi *qmi = ar_snoc->qmi;
428 struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
429 struct wlfw_wlan_cfg_req_msg_v01 *req;
430 struct qmi_txn txn;
431 int ret;
432 u32 i;
434 req = kzalloc(sizeof(*req), GFP_KERNEL);
435 if (!req)
436 return -ENOMEM;
438 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
439 wlfw_wlan_cfg_resp_msg_v01_ei,
440 &resp);
441 if (ret < 0)
442 goto out;
444 req->host_version_valid = 0;
446 req->tgt_cfg_valid = 1;
447 if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
448 req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
449 else
450 req->tgt_cfg_len = config->num_ce_tgt_cfg;
451 for (i = 0; i < req->tgt_cfg_len; i++) {
452 req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
453 req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
454 req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
455 req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
456 req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
459 req->svc_cfg_valid = 1;
460 if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
461 req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
462 else
463 req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
464 for (i = 0; i < req->svc_cfg_len; i++) {
465 req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
466 req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
467 req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
470 req->shadow_reg_valid = 1;
471 if (config->num_shadow_reg_cfg >
472 QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
473 req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
474 else
475 req->shadow_reg_len = config->num_shadow_reg_cfg;
477 memcpy(req->shadow_reg, config->shadow_reg_cfg,
478 sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
480 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
481 QMI_WLFW_WLAN_CFG_REQ_V01,
482 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
483 wlfw_wlan_cfg_req_msg_v01_ei, req);
484 if (ret < 0) {
485 qmi_txn_cancel(&txn);
486 ath10k_err(ar, "failed to send config request: %d\n", ret);
487 goto out;
490 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
491 if (ret < 0)
492 goto out;
494 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
495 ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
496 ret = -EINVAL;
497 goto out;
500 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
501 kfree(req);
502 return 0;
504 out:
505 kfree(req);
506 return ret;
509 int ath10k_qmi_wlan_enable(struct ath10k *ar,
510 struct ath10k_qmi_wlan_enable_cfg *config,
511 enum wlfw_driver_mode_enum_v01 mode,
512 const char *version)
514 int ret;
516 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
517 mode, config);
519 ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
520 if (ret) {
521 ath10k_err(ar, "failed to send qmi config: %d\n", ret);
522 return ret;
525 ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
526 if (ret) {
527 ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
528 return ret;
531 return 0;
534 int ath10k_qmi_wlan_disable(struct ath10k *ar)
536 return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
539 static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
541 struct wlfw_cap_resp_msg_v01 *resp;
542 struct wlfw_cap_req_msg_v01 req = {};
543 struct ath10k *ar = qmi->ar;
544 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
545 struct qmi_txn txn;
546 int ret;
548 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
549 if (!resp)
550 return -ENOMEM;
552 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
553 if (ret < 0)
554 goto out;
556 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
557 QMI_WLFW_CAP_REQ_V01,
558 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
559 wlfw_cap_req_msg_v01_ei, &req);
560 if (ret < 0) {
561 qmi_txn_cancel(&txn);
562 ath10k_err(ar, "failed to send capability request: %d\n", ret);
563 goto out;
566 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
567 if (ret < 0)
568 goto out;
570 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
571 ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
572 ret = -EINVAL;
573 goto out;
576 if (resp->chip_info_valid) {
577 qmi->chip_info.chip_id = resp->chip_info.chip_id;
578 qmi->chip_info.chip_family = resp->chip_info.chip_family;
581 if (resp->board_info_valid)
582 qmi->board_info.board_id = resp->board_info.board_id;
583 else
584 qmi->board_info.board_id = 0xFF;
586 if (resp->soc_info_valid)
587 qmi->soc_info.soc_id = resp->soc_info.soc_id;
589 if (resp->fw_version_info_valid) {
590 qmi->fw_version = resp->fw_version_info.fw_version;
591 strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
592 sizeof(qmi->fw_build_timestamp));
595 if (resp->fw_build_id_valid)
596 strlcpy(qmi->fw_build_id, resp->fw_build_id,
597 MAX_BUILD_ID_LEN + 1);
599 if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
600 ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
601 qmi->chip_info.chip_id, qmi->chip_info.chip_family,
602 qmi->board_info.board_id, qmi->soc_info.soc_id);
603 ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
604 qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
607 kfree(resp);
608 return 0;
610 out:
611 kfree(resp);
612 return ret;
615 static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
617 struct wlfw_host_cap_resp_msg_v01 resp = {};
618 struct wlfw_host_cap_req_msg_v01 req = {};
619 struct qmi_elem_info *req_ei;
620 struct ath10k *ar = qmi->ar;
621 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
622 struct qmi_txn txn;
623 int ret;
625 req.daemon_support_valid = 1;
626 req.daemon_support = 0;
628 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
629 &resp);
630 if (ret < 0)
631 goto out;
633 if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
634 req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
635 else
636 req_ei = wlfw_host_cap_req_msg_v01_ei;
638 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
639 QMI_WLFW_HOST_CAP_REQ_V01,
640 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
641 req_ei, &req);
642 if (ret < 0) {
643 qmi_txn_cancel(&txn);
644 ath10k_err(ar, "failed to send host capability request: %d\n", ret);
645 goto out;
648 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
649 if (ret < 0)
650 goto out;
652 /* older FW didn't support this request, which is not fatal */
653 if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
654 resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
655 ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
656 ret = -EINVAL;
657 goto out;
660 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
661 return 0;
663 out:
664 return ret;
667 int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
669 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
670 struct wlfw_ini_resp_msg_v01 resp = {};
671 struct ath10k_qmi *qmi = ar_snoc->qmi;
672 struct wlfw_ini_req_msg_v01 req = {};
673 struct qmi_txn txn;
674 int ret;
676 req.enablefwlog_valid = 1;
677 req.enablefwlog = fw_log_mode;
679 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei,
680 &resp);
681 if (ret < 0)
682 goto out;
684 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
685 QMI_WLFW_INI_REQ_V01,
686 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
687 wlfw_ini_req_msg_v01_ei, &req);
688 if (ret < 0) {
689 qmi_txn_cancel(&txn);
690 ath10k_err(ar, "failed to send fw log request: %d\n", ret);
691 goto out;
694 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
695 if (ret < 0)
696 goto out;
698 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
699 ath10k_err(ar, "fw log request rejected: %d\n",
700 resp.resp.error);
701 ret = -EINVAL;
702 goto out;
704 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n",
705 fw_log_mode);
706 return 0;
708 out:
709 return ret;
712 static int
713 ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
715 struct wlfw_ind_register_resp_msg_v01 resp = {};
716 struct wlfw_ind_register_req_msg_v01 req = {};
717 struct ath10k *ar = qmi->ar;
718 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
719 struct qmi_txn txn;
720 int ret;
722 req.client_id_valid = 1;
723 req.client_id = ATH10K_QMI_CLIENT_ID;
724 req.fw_ready_enable_valid = 1;
725 req.fw_ready_enable = 1;
726 req.msa_ready_enable_valid = 1;
727 req.msa_ready_enable = 1;
729 if (ar_snoc->xo_cal_supported) {
730 req.xo_cal_enable_valid = 1;
731 req.xo_cal_enable = 1;
734 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
735 wlfw_ind_register_resp_msg_v01_ei, &resp);
736 if (ret < 0)
737 goto out;
739 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
740 QMI_WLFW_IND_REGISTER_REQ_V01,
741 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
742 wlfw_ind_register_req_msg_v01_ei, &req);
743 if (ret < 0) {
744 qmi_txn_cancel(&txn);
745 ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
746 goto out;
749 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
750 if (ret < 0)
751 goto out;
753 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
754 ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
755 ret = -EINVAL;
756 goto out;
759 if (resp.fw_status_valid) {
760 if (resp.fw_status & QMI_WLFW_FW_READY_V01)
761 qmi->fw_ready = true;
763 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
764 return 0;
766 out:
767 return ret;
770 static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
772 struct ath10k *ar = qmi->ar;
773 int ret;
775 ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
776 if (ret)
777 return;
779 if (qmi->fw_ready) {
780 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
781 return;
784 ret = ath10k_qmi_host_cap_send_sync(qmi);
785 if (ret)
786 return;
788 ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
789 if (ret)
790 return;
793 * HACK: sleep for a while inbetween receiving the msa info response
794 * and the XPU update to prevent SDM845 from crashing due to a security
795 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
797 msleep(20);
799 ret = ath10k_qmi_setup_msa_permissions(qmi);
800 if (ret)
801 return;
803 ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
804 if (ret)
805 goto err_setup_msa;
807 ret = ath10k_qmi_cap_send_sync_msg(qmi);
808 if (ret)
809 goto err_setup_msa;
811 return;
813 err_setup_msa:
814 ath10k_qmi_remove_msa_permission(qmi);
817 static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
819 struct ath10k *ar = qmi->ar;
821 ar->hif.bus = ATH10K_BUS_SNOC;
822 ar->id.qmi_ids_valid = true;
823 ar->id.qmi_board_id = qmi->board_info.board_id;
824 ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
826 return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
829 static int
830 ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
831 enum ath10k_qmi_driver_event_type type,
832 void *data)
834 struct ath10k_qmi_driver_event *event;
836 event = kzalloc(sizeof(*event), GFP_ATOMIC);
837 if (!event)
838 return -ENOMEM;
840 event->type = type;
841 event->data = data;
843 spin_lock(&qmi->event_lock);
844 list_add_tail(&event->list, &qmi->event_list);
845 spin_unlock(&qmi->event_lock);
847 queue_work(qmi->event_wq, &qmi->event_work);
849 return 0;
852 static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
854 struct ath10k *ar = qmi->ar;
855 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
857 ath10k_qmi_remove_msa_permission(qmi);
858 ath10k_core_free_board_files(ar);
859 if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
860 ath10k_snoc_fw_crashed_dump(ar);
862 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
863 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
866 static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
868 int ret;
870 ret = ath10k_qmi_fetch_board_file(qmi);
871 if (ret)
872 goto out;
874 ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
875 if (ret)
876 goto out;
878 ret = ath10k_qmi_send_cal_report_req(qmi);
880 out:
881 return;
884 static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
886 struct ath10k *ar = qmi->ar;
888 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
889 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
891 return 0;
894 static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
895 struct sockaddr_qrtr *sq,
896 struct qmi_txn *txn, const void *data)
898 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
900 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
903 static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
904 struct sockaddr_qrtr *sq,
905 struct qmi_txn *txn, const void *data)
907 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
909 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
912 static struct qmi_msg_handler qmi_msg_handler[] = {
914 .type = QMI_INDICATION,
915 .msg_id = QMI_WLFW_FW_READY_IND_V01,
916 .ei = wlfw_fw_ready_ind_msg_v01_ei,
917 .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
918 .fn = ath10k_qmi_fw_ready_ind,
921 .type = QMI_INDICATION,
922 .msg_id = QMI_WLFW_MSA_READY_IND_V01,
923 .ei = wlfw_msa_ready_ind_msg_v01_ei,
924 .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
925 .fn = ath10k_qmi_msa_ready_ind,
930 static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
931 struct qmi_service *service)
933 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
934 struct sockaddr_qrtr *sq = &qmi->sq;
935 struct ath10k *ar = qmi->ar;
936 int ret;
938 sq->sq_family = AF_QIPCRTR;
939 sq->sq_node = service->node;
940 sq->sq_port = service->port;
942 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
944 ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
945 sizeof(qmi->sq), 0);
946 if (ret) {
947 ath10k_err(ar, "failed to connect to a remote QMI service port\n");
948 return ret;
951 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
952 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
954 return ret;
957 static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
958 struct qmi_service *service)
960 struct ath10k_qmi *qmi =
961 container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
963 qmi->fw_ready = false;
964 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL);
967 static struct qmi_ops ath10k_qmi_ops = {
968 .new_server = ath10k_qmi_new_server,
969 .del_server = ath10k_qmi_del_server,
972 static void ath10k_qmi_driver_event_work(struct work_struct *work)
974 struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
975 event_work);
976 struct ath10k_qmi_driver_event *event;
977 struct ath10k *ar = qmi->ar;
979 spin_lock(&qmi->event_lock);
980 while (!list_empty(&qmi->event_list)) {
981 event = list_first_entry(&qmi->event_list,
982 struct ath10k_qmi_driver_event, list);
983 list_del(&event->list);
984 spin_unlock(&qmi->event_lock);
986 switch (event->type) {
987 case ATH10K_QMI_EVENT_SERVER_ARRIVE:
988 ath10k_qmi_event_server_arrive(qmi);
989 break;
990 case ATH10K_QMI_EVENT_SERVER_EXIT:
991 ath10k_qmi_event_server_exit(qmi);
992 break;
993 case ATH10K_QMI_EVENT_FW_READY_IND:
994 ath10k_qmi_event_fw_ready_ind(qmi);
995 break;
996 case ATH10K_QMI_EVENT_MSA_READY_IND:
997 ath10k_qmi_event_msa_ready(qmi);
998 break;
999 default:
1000 ath10k_warn(ar, "invalid event type: %d", event->type);
1001 break;
1003 kfree(event);
1004 spin_lock(&qmi->event_lock);
1006 spin_unlock(&qmi->event_lock);
1009 static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
1011 struct ath10k *ar = qmi->ar;
1012 struct device *dev = ar->dev;
1013 struct device_node *node;
1014 struct resource r;
1015 int ret;
1017 node = of_parse_phandle(dev->of_node, "memory-region", 0);
1018 if (node) {
1019 ret = of_address_to_resource(node, 0, &r);
1020 if (ret) {
1021 dev_err(dev, "failed to resolve msa fixed region\n");
1022 return ret;
1024 of_node_put(node);
1026 qmi->msa_pa = r.start;
1027 qmi->msa_mem_size = resource_size(&r);
1028 qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
1029 MEMREMAP_WT);
1030 if (IS_ERR(qmi->msa_va)) {
1031 dev_err(dev, "failed to map memory region: %pa\n", &r.start);
1032 return PTR_ERR(qmi->msa_va);
1034 } else {
1035 qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
1036 &qmi->msa_pa, GFP_KERNEL);
1037 if (!qmi->msa_va) {
1038 ath10k_err(ar, "failed to allocate dma memory for msa region\n");
1039 return -ENOMEM;
1041 qmi->msa_mem_size = msa_size;
1044 if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
1045 qmi->msa_fixed_perm = true;
1047 ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
1048 &qmi->msa_pa,
1049 qmi->msa_va);
1051 return 0;
1054 int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
1056 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1057 struct ath10k_qmi *qmi;
1058 int ret;
1060 qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
1061 if (!qmi)
1062 return -ENOMEM;
1064 qmi->ar = ar;
1065 ar_snoc->qmi = qmi;
1067 ret = ath10k_qmi_setup_msa_resources(qmi, msa_size);
1068 if (ret)
1069 goto err;
1071 ret = qmi_handle_init(&qmi->qmi_hdl,
1072 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
1073 &ath10k_qmi_ops, qmi_msg_handler);
1074 if (ret)
1075 goto err;
1077 qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event",
1078 WQ_UNBOUND, 1);
1079 if (!qmi->event_wq) {
1080 ath10k_err(ar, "failed to allocate workqueue\n");
1081 ret = -EFAULT;
1082 goto err_release_qmi_handle;
1085 INIT_LIST_HEAD(&qmi->event_list);
1086 spin_lock_init(&qmi->event_lock);
1087 INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
1089 ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
1090 WLFW_SERVICE_VERS_V01, 0);
1091 if (ret)
1092 goto err_qmi_lookup;
1094 return 0;
1096 err_qmi_lookup:
1097 destroy_workqueue(qmi->event_wq);
1099 err_release_qmi_handle:
1100 qmi_handle_release(&qmi->qmi_hdl);
1102 err:
1103 kfree(qmi);
1104 return ret;
1107 int ath10k_qmi_deinit(struct ath10k *ar)
1109 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1110 struct ath10k_qmi *qmi = ar_snoc->qmi;
1112 qmi_handle_release(&qmi->qmi_hdl);
1113 cancel_work_sync(&qmi->event_work);
1114 destroy_workqueue(qmi->event_wq);
1115 kfree(qmi);
1116 ar_snoc->qmi = NULL;
1118 return 0;