gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath10k / hw.c
blob57c58af64a5750bf18d7fec9e02bf50bde80e46e
1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
4 */
6 #include <linux/types.h>
7 #include <linux/bitops.h>
8 #include <linux/bitfield.h>
9 #include "core.h"
10 #include "hw.h"
11 #include "hif.h"
12 #include "wmi-ops.h"
13 #include "bmi.h"
15 const struct ath10k_hw_regs qca988x_regs = {
16 .rtc_soc_base_address = 0x00004000,
17 .rtc_wmac_base_address = 0x00005000,
18 .soc_core_base_address = 0x00009000,
19 .wlan_mac_base_address = 0x00020000,
20 .ce_wrapper_base_address = 0x00057000,
21 .ce0_base_address = 0x00057400,
22 .ce1_base_address = 0x00057800,
23 .ce2_base_address = 0x00057c00,
24 .ce3_base_address = 0x00058000,
25 .ce4_base_address = 0x00058400,
26 .ce5_base_address = 0x00058800,
27 .ce6_base_address = 0x00058c00,
28 .ce7_base_address = 0x00059000,
29 .soc_reset_control_si0_rst_mask = 0x00000001,
30 .soc_reset_control_ce_rst_mask = 0x00040000,
31 .soc_chip_id_address = 0x000000ec,
32 .scratch_3_address = 0x00000030,
33 .fw_indicator_address = 0x00009030,
34 .pcie_local_base_address = 0x00080000,
35 .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
36 .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
37 .pcie_intr_fw_mask = 0x00000400,
38 .pcie_intr_ce_mask_all = 0x0007f800,
39 .pcie_intr_clr_address = 0x00000014,
42 const struct ath10k_hw_regs qca6174_regs = {
43 .rtc_soc_base_address = 0x00000800,
44 .rtc_wmac_base_address = 0x00001000,
45 .soc_core_base_address = 0x0003a000,
46 .wlan_mac_base_address = 0x00010000,
47 .ce_wrapper_base_address = 0x00034000,
48 .ce0_base_address = 0x00034400,
49 .ce1_base_address = 0x00034800,
50 .ce2_base_address = 0x00034c00,
51 .ce3_base_address = 0x00035000,
52 .ce4_base_address = 0x00035400,
53 .ce5_base_address = 0x00035800,
54 .ce6_base_address = 0x00035c00,
55 .ce7_base_address = 0x00036000,
56 .soc_reset_control_si0_rst_mask = 0x00000000,
57 .soc_reset_control_ce_rst_mask = 0x00000001,
58 .soc_chip_id_address = 0x000000f0,
59 .scratch_3_address = 0x00000028,
60 .fw_indicator_address = 0x0003a028,
61 .pcie_local_base_address = 0x00080000,
62 .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
63 .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
64 .pcie_intr_fw_mask = 0x00000400,
65 .pcie_intr_ce_mask_all = 0x0007f800,
66 .pcie_intr_clr_address = 0x00000014,
67 .cpu_pll_init_address = 0x00404020,
68 .cpu_speed_address = 0x00404024,
69 .core_clk_div_address = 0x00404028,
72 const struct ath10k_hw_regs qca99x0_regs = {
73 .rtc_soc_base_address = 0x00080000,
74 .rtc_wmac_base_address = 0x00000000,
75 .soc_core_base_address = 0x00082000,
76 .wlan_mac_base_address = 0x00030000,
77 .ce_wrapper_base_address = 0x0004d000,
78 .ce0_base_address = 0x0004a000,
79 .ce1_base_address = 0x0004a400,
80 .ce2_base_address = 0x0004a800,
81 .ce3_base_address = 0x0004ac00,
82 .ce4_base_address = 0x0004b000,
83 .ce5_base_address = 0x0004b400,
84 .ce6_base_address = 0x0004b800,
85 .ce7_base_address = 0x0004bc00,
86 /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
87 * CE0 and CE1 no other copy engine is directly referred in the code.
88 * It is not really necessary to assign address for newly supported
89 * CEs in this address table.
90 * Copy Engine Address
91 * CE8 0x0004c000
92 * CE9 0x0004c400
93 * CE10 0x0004c800
94 * CE11 0x0004cc00
96 .soc_reset_control_si0_rst_mask = 0x00000001,
97 .soc_reset_control_ce_rst_mask = 0x00000100,
98 .soc_chip_id_address = 0x000000ec,
99 .scratch_3_address = 0x00040050,
100 .fw_indicator_address = 0x00040050,
101 .pcie_local_base_address = 0x00000000,
102 .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
103 .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
104 .pcie_intr_fw_mask = 0x00100000,
105 .pcie_intr_ce_mask_all = 0x000fff00,
106 .pcie_intr_clr_address = 0x00000010,
109 const struct ath10k_hw_regs qca4019_regs = {
110 .rtc_soc_base_address = 0x00080000,
111 .soc_core_base_address = 0x00082000,
112 .wlan_mac_base_address = 0x00030000,
113 .ce_wrapper_base_address = 0x0004d000,
114 .ce0_base_address = 0x0004a000,
115 .ce1_base_address = 0x0004a400,
116 .ce2_base_address = 0x0004a800,
117 .ce3_base_address = 0x0004ac00,
118 .ce4_base_address = 0x0004b000,
119 .ce5_base_address = 0x0004b400,
120 .ce6_base_address = 0x0004b800,
121 .ce7_base_address = 0x0004bc00,
122 /* qca4019 supports upto 12 copy engines. Since base address
123 * of ce8 to ce11 are not directly referred in the code,
124 * no need have them in separate members in this table.
125 * Copy Engine Address
126 * CE8 0x0004c000
127 * CE9 0x0004c400
128 * CE10 0x0004c800
129 * CE11 0x0004cc00
131 .soc_reset_control_si0_rst_mask = 0x00000001,
132 .soc_reset_control_ce_rst_mask = 0x00000100,
133 .soc_chip_id_address = 0x000000ec,
134 .fw_indicator_address = 0x0004f00c,
135 .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
136 .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
137 .pcie_intr_fw_mask = 0x00100000,
138 .pcie_intr_ce_mask_all = 0x000fff00,
139 .pcie_intr_clr_address = 0x00000010,
142 const struct ath10k_hw_values qca988x_values = {
143 .rtc_state_val_on = 3,
144 .ce_count = 8,
145 .msi_assign_ce_max = 7,
146 .num_target_ce_config_wlan = 7,
147 .ce_desc_meta_data_mask = 0xFFFC,
148 .ce_desc_meta_data_lsb = 2,
151 const struct ath10k_hw_values qca6174_values = {
152 .rtc_state_val_on = 3,
153 .ce_count = 8,
154 .msi_assign_ce_max = 7,
155 .num_target_ce_config_wlan = 7,
156 .ce_desc_meta_data_mask = 0xFFFC,
157 .ce_desc_meta_data_lsb = 2,
158 .rfkill_pin = 16,
159 .rfkill_cfg = 0,
160 .rfkill_on_level = 1,
163 const struct ath10k_hw_values qca99x0_values = {
164 .rtc_state_val_on = 7,
165 .ce_count = 12,
166 .msi_assign_ce_max = 12,
167 .num_target_ce_config_wlan = 10,
168 .ce_desc_meta_data_mask = 0xFFF0,
169 .ce_desc_meta_data_lsb = 4,
172 const struct ath10k_hw_values qca9888_values = {
173 .rtc_state_val_on = 3,
174 .ce_count = 12,
175 .msi_assign_ce_max = 12,
176 .num_target_ce_config_wlan = 10,
177 .ce_desc_meta_data_mask = 0xFFF0,
178 .ce_desc_meta_data_lsb = 4,
181 const struct ath10k_hw_values qca4019_values = {
182 .ce_count = 12,
183 .num_target_ce_config_wlan = 10,
184 .ce_desc_meta_data_mask = 0xFFF0,
185 .ce_desc_meta_data_lsb = 4,
188 const struct ath10k_hw_regs wcn3990_regs = {
189 .rtc_soc_base_address = 0x00000000,
190 .rtc_wmac_base_address = 0x00000000,
191 .soc_core_base_address = 0x00000000,
192 .ce_wrapper_base_address = 0x0024C000,
193 .ce0_base_address = 0x00240000,
194 .ce1_base_address = 0x00241000,
195 .ce2_base_address = 0x00242000,
196 .ce3_base_address = 0x00243000,
197 .ce4_base_address = 0x00244000,
198 .ce5_base_address = 0x00245000,
199 .ce6_base_address = 0x00246000,
200 .ce7_base_address = 0x00247000,
201 .ce8_base_address = 0x00248000,
202 .ce9_base_address = 0x00249000,
203 .ce10_base_address = 0x0024A000,
204 .ce11_base_address = 0x0024B000,
205 .soc_chip_id_address = 0x000000f0,
206 .soc_reset_control_si0_rst_mask = 0x00000001,
207 .soc_reset_control_ce_rst_mask = 0x00000100,
208 .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
209 .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
210 .pcie_intr_fw_mask = 0x00100000,
213 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
214 .msb = 0x00000010,
215 .lsb = 0x00000010,
216 .mask = GENMASK(17, 17),
219 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
220 .msb = 0x00000012,
221 .lsb = 0x00000012,
222 .mask = GENMASK(18, 18),
225 static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
226 .msb = 0x00000000,
227 .lsb = 0x00000000,
228 .mask = GENMASK(15, 0),
231 static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
232 .addr = 0x00000018,
233 .src_ring = &wcn3990_src_ring,
234 .dst_ring = &wcn3990_dst_ring,
235 .dmax = &wcn3990_dmax,
238 static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
239 .mask = GENMASK(0, 0),
242 static struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
243 .copy_complete = &wcn3990_host_ie_cc,
246 static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
247 .dstr_lmask = 0x00000010,
248 .dstr_hmask = 0x00000008,
249 .srcr_lmask = 0x00000004,
250 .srcr_hmask = 0x00000002,
251 .cc_mask = 0x00000001,
252 .wm_mask = 0x0000001E,
253 .addr = 0x00000030,
256 static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
257 .axi_err = 0x00000100,
258 .dstr_add_err = 0x00000200,
259 .srcr_len_err = 0x00000100,
260 .dstr_mlen_vio = 0x00000080,
261 .dstr_overflow = 0x00000040,
262 .srcr_overflow = 0x00000020,
263 .err_mask = 0x000003E0,
264 .addr = 0x00000038,
267 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
268 .msb = 0x00000000,
269 .lsb = 0x00000010,
270 .mask = GENMASK(31, 16),
273 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
274 .msb = 0x0000000f,
275 .lsb = 0x00000000,
276 .mask = GENMASK(15, 0),
279 static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
280 .addr = 0x0000004c,
281 .low_rst = 0x00000000,
282 .high_rst = 0x00000000,
283 .wm_low = &wcn3990_src_wm_low,
284 .wm_high = &wcn3990_src_wm_high,
287 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
288 .lsb = 0x00000010,
289 .mask = GENMASK(31, 16),
292 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
293 .msb = 0x0000000f,
294 .lsb = 0x00000000,
295 .mask = GENMASK(15, 0),
298 static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
299 .addr = 0x00000050,
300 .low_rst = 0x00000000,
301 .high_rst = 0x00000000,
302 .wm_low = &wcn3990_dst_wm_low,
303 .wm_high = &wcn3990_dst_wm_high,
306 static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
307 .shift = 19,
308 .mask = 0x00080000,
309 .enable = 0x00000000,
312 const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
313 .sr_base_addr_lo = 0x00000000,
314 .sr_base_addr_hi = 0x00000004,
315 .sr_size_addr = 0x00000008,
316 .dr_base_addr_lo = 0x0000000c,
317 .dr_base_addr_hi = 0x00000010,
318 .dr_size_addr = 0x00000014,
319 .misc_ie_addr = 0x00000034,
320 .sr_wr_index_addr = 0x0000003c,
321 .dst_wr_index_addr = 0x00000040,
322 .current_srri_addr = 0x00000044,
323 .current_drri_addr = 0x00000048,
324 .ce_rri_low = 0x0024C004,
325 .ce_rri_high = 0x0024C008,
326 .host_ie_addr = 0x0000002c,
327 .ctrl1_regs = &wcn3990_ctrl1,
328 .host_ie = &wcn3990_host_ie,
329 .wm_regs = &wcn3990_wm_reg,
330 .misc_regs = &wcn3990_misc_reg,
331 .wm_srcr = &wcn3990_wm_src_ring,
332 .wm_dstr = &wcn3990_wm_dst_ring,
333 .upd = &wcn3990_ctrl1_upd,
336 const struct ath10k_hw_values wcn3990_values = {
337 .rtc_state_val_on = 5,
338 .ce_count = 12,
339 .msi_assign_ce_max = 12,
340 .num_target_ce_config_wlan = 12,
341 .ce_desc_meta_data_mask = 0xFFF0,
342 .ce_desc_meta_data_lsb = 4,
345 static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
346 .msb = 0x00000010,
347 .lsb = 0x00000010,
348 .mask = GENMASK(16, 16),
351 static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
352 .msb = 0x00000011,
353 .lsb = 0x00000011,
354 .mask = GENMASK(17, 17),
357 static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
358 .msb = 0x0000000f,
359 .lsb = 0x00000000,
360 .mask = GENMASK(15, 0),
363 static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
364 .addr = 0x00000010,
365 .hw_mask = 0x0007ffff,
366 .sw_mask = 0x0007ffff,
367 .hw_wr_mask = 0x00000000,
368 .sw_wr_mask = 0x0007ffff,
369 .reset_mask = 0xffffffff,
370 .reset = 0x00000080,
371 .src_ring = &qcax_src_ring,
372 .dst_ring = &qcax_dst_ring,
373 .dmax = &qcax_dmax,
376 static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
377 .msb = 0x00000003,
378 .lsb = 0x00000003,
379 .mask = GENMASK(3, 3),
382 static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
383 .msb = 0x00000000,
384 .mask = GENMASK(0, 0),
385 .status_reset = 0x00000000,
386 .status = &qcax_cmd_halt_status,
389 static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
390 .msb = 0x00000000,
391 .lsb = 0x00000000,
392 .mask = GENMASK(0, 0),
395 static struct ath10k_hw_ce_host_ie qcax_host_ie = {
396 .copy_complete_reset = 0x00000000,
397 .copy_complete = &qcax_host_ie_cc,
400 static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
401 .dstr_lmask = 0x00000010,
402 .dstr_hmask = 0x00000008,
403 .srcr_lmask = 0x00000004,
404 .srcr_hmask = 0x00000002,
405 .cc_mask = 0x00000001,
406 .wm_mask = 0x0000001E,
407 .addr = 0x00000030,
410 static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
411 .axi_err = 0x00000400,
412 .dstr_add_err = 0x00000200,
413 .srcr_len_err = 0x00000100,
414 .dstr_mlen_vio = 0x00000080,
415 .dstr_overflow = 0x00000040,
416 .srcr_overflow = 0x00000020,
417 .err_mask = 0x000007E0,
418 .addr = 0x00000038,
421 static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
422 .msb = 0x0000001f,
423 .lsb = 0x00000010,
424 .mask = GENMASK(31, 16),
427 static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
428 .msb = 0x0000000f,
429 .lsb = 0x00000000,
430 .mask = GENMASK(15, 0),
433 static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
434 .addr = 0x0000004c,
435 .low_rst = 0x00000000,
436 .high_rst = 0x00000000,
437 .wm_low = &qcax_src_wm_low,
438 .wm_high = &qcax_src_wm_high,
441 static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
442 .lsb = 0x00000010,
443 .mask = GENMASK(31, 16),
446 static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
447 .msb = 0x0000000f,
448 .lsb = 0x00000000,
449 .mask = GENMASK(15, 0),
452 static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
453 .addr = 0x00000050,
454 .low_rst = 0x00000000,
455 .high_rst = 0x00000000,
456 .wm_low = &qcax_dst_wm_low,
457 .wm_high = &qcax_dst_wm_high,
460 const struct ath10k_hw_ce_regs qcax_ce_regs = {
461 .sr_base_addr_lo = 0x00000000,
462 .sr_size_addr = 0x00000004,
463 .dr_base_addr_lo = 0x00000008,
464 .dr_size_addr = 0x0000000c,
465 .ce_cmd_addr = 0x00000018,
466 .misc_ie_addr = 0x00000034,
467 .sr_wr_index_addr = 0x0000003c,
468 .dst_wr_index_addr = 0x00000040,
469 .current_srri_addr = 0x00000044,
470 .current_drri_addr = 0x00000048,
471 .host_ie_addr = 0x0000002c,
472 .ctrl1_regs = &qcax_ctrl1,
473 .cmd_halt = &qcax_cmd_halt,
474 .host_ie = &qcax_host_ie,
475 .wm_regs = &qcax_wm_reg,
476 .misc_regs = &qcax_misc_reg,
477 .wm_srcr = &qcax_wm_src_ring,
478 .wm_dstr = &qcax_wm_dst_ring,
481 const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = {
483 .refclk = 48000000,
484 .div = 0xe,
485 .rnfrac = 0x2aaa8,
486 .settle_time = 2400,
487 .refdiv = 0,
488 .outdiv = 1,
491 .refclk = 19200000,
492 .div = 0x24,
493 .rnfrac = 0x2aaa8,
494 .settle_time = 960,
495 .refdiv = 0,
496 .outdiv = 1,
499 .refclk = 24000000,
500 .div = 0x1d,
501 .rnfrac = 0x15551,
502 .settle_time = 1200,
503 .refdiv = 0,
504 .outdiv = 1,
507 .refclk = 26000000,
508 .div = 0x1b,
509 .rnfrac = 0x4ec4,
510 .settle_time = 1300,
511 .refdiv = 0,
512 .outdiv = 1,
515 .refclk = 37400000,
516 .div = 0x12,
517 .rnfrac = 0x34b49,
518 .settle_time = 1870,
519 .refdiv = 0,
520 .outdiv = 1,
523 .refclk = 38400000,
524 .div = 0x12,
525 .rnfrac = 0x15551,
526 .settle_time = 1920,
527 .refdiv = 0,
528 .outdiv = 1,
531 .refclk = 40000000,
532 .div = 0x12,
533 .rnfrac = 0x26665,
534 .settle_time = 2000,
535 .refdiv = 0,
536 .outdiv = 1,
539 .refclk = 52000000,
540 .div = 0x1b,
541 .rnfrac = 0x4ec4,
542 .settle_time = 2600,
543 .refdiv = 0,
544 .outdiv = 1,
548 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
549 u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
551 u32 cc_fix = 0;
552 u32 rcc_fix = 0;
553 enum ath10k_hw_cc_wraparound_type wraparound_type;
555 survey->filled |= SURVEY_INFO_TIME |
556 SURVEY_INFO_TIME_BUSY;
558 wraparound_type = ar->hw_params.cc_wraparound_type;
560 if (cc < cc_prev || rcc < rcc_prev) {
561 switch (wraparound_type) {
562 case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
563 if (cc < cc_prev) {
564 cc_fix = 0x7fffffff;
565 survey->filled &= ~SURVEY_INFO_TIME_BUSY;
567 break;
568 case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
569 if (cc < cc_prev)
570 cc_fix = 0x7fffffff;
572 if (rcc < rcc_prev)
573 rcc_fix = 0x7fffffff;
574 break;
575 case ATH10K_HW_CC_WRAP_DISABLED:
576 break;
580 cc -= cc_prev - cc_fix;
581 rcc -= rcc_prev - rcc_fix;
583 survey->time = CCNT_TO_MSEC(ar, cc);
584 survey->time_busy = CCNT_TO_MSEC(ar, rcc);
587 /* The firmware does not support setting the coverage class. Instead this
588 * function monitors and modifies the corresponding MAC registers.
590 static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
591 s16 value)
593 u32 slottime_reg;
594 u32 slottime;
595 u32 timeout_reg;
596 u32 ack_timeout;
597 u32 cts_timeout;
598 u32 phyclk_reg;
599 u32 phyclk;
600 u64 fw_dbglog_mask;
601 u32 fw_dbglog_level;
603 mutex_lock(&ar->conf_mutex);
605 /* Only modify registers if the core is started. */
606 if ((ar->state != ATH10K_STATE_ON) &&
607 (ar->state != ATH10K_STATE_RESTARTED)) {
608 spin_lock_bh(&ar->data_lock);
609 /* Store config value for when radio boots up */
610 ar->fw_coverage.coverage_class = value;
611 spin_unlock_bh(&ar->data_lock);
612 goto unlock;
615 /* Retrieve the current values of the two registers that need to be
616 * adjusted.
618 slottime_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
619 WAVE1_PCU_GBL_IFS_SLOT);
620 timeout_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
621 WAVE1_PCU_ACK_CTS_TIMEOUT);
622 phyclk_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
623 WAVE1_PHYCLK);
624 phyclk = MS(phyclk_reg, WAVE1_PHYCLK_USEC) + 1;
626 if (value < 0)
627 value = ar->fw_coverage.coverage_class;
629 /* Break out if the coverage class and registers have the expected
630 * value.
632 if (value == ar->fw_coverage.coverage_class &&
633 slottime_reg == ar->fw_coverage.reg_slottime_conf &&
634 timeout_reg == ar->fw_coverage.reg_ack_cts_timeout_conf &&
635 phyclk_reg == ar->fw_coverage.reg_phyclk)
636 goto unlock;
638 /* Store new initial register values from the firmware. */
639 if (slottime_reg != ar->fw_coverage.reg_slottime_conf)
640 ar->fw_coverage.reg_slottime_orig = slottime_reg;
641 if (timeout_reg != ar->fw_coverage.reg_ack_cts_timeout_conf)
642 ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg;
643 ar->fw_coverage.reg_phyclk = phyclk_reg;
645 /* Calculate new value based on the (original) firmware calculation. */
646 slottime_reg = ar->fw_coverage.reg_slottime_orig;
647 timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig;
649 /* Do some sanity checks on the slottime register. */
650 if (slottime_reg % phyclk) {
651 ath10k_warn(ar,
652 "failed to set coverage class: expected integer microsecond value in register\n");
654 goto store_regs;
657 slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
658 slottime = slottime / phyclk;
659 if (slottime != 9 && slottime != 20) {
660 ath10k_warn(ar,
661 "failed to set coverage class: expected slot time of 9 or 20us in HW register. It is %uus.\n",
662 slottime);
664 goto store_regs;
667 /* Recalculate the register values by adding the additional propagation
668 * delay (3us per coverage class).
671 slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
672 slottime += value * 3 * phyclk;
673 slottime = min_t(u32, slottime, WAVE1_PCU_GBL_IFS_SLOT_MAX);
674 slottime = SM(slottime, WAVE1_PCU_GBL_IFS_SLOT);
675 slottime_reg = (slottime_reg & ~WAVE1_PCU_GBL_IFS_SLOT_MASK) | slottime;
677 /* Update ack timeout (lower halfword). */
678 ack_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
679 ack_timeout += 3 * value * phyclk;
680 ack_timeout = min_t(u32, ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
681 ack_timeout = SM(ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
683 /* Update cts timeout (upper halfword). */
684 cts_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
685 cts_timeout += 3 * value * phyclk;
686 cts_timeout = min_t(u32, cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
687 cts_timeout = SM(cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
689 timeout_reg = ack_timeout | cts_timeout;
691 ath10k_hif_write32(ar,
692 WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_GBL_IFS_SLOT,
693 slottime_reg);
694 ath10k_hif_write32(ar,
695 WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_ACK_CTS_TIMEOUT,
696 timeout_reg);
698 /* Ensure we have a debug level of WARN set for the case that the
699 * coverage class is larger than 0. This is important as we need to
700 * set the registers again if the firmware does an internal reset and
701 * this way we will be notified of the event.
703 fw_dbglog_mask = ath10k_debug_get_fw_dbglog_mask(ar);
704 fw_dbglog_level = ath10k_debug_get_fw_dbglog_level(ar);
706 if (value > 0) {
707 if (fw_dbglog_level > ATH10K_DBGLOG_LEVEL_WARN)
708 fw_dbglog_level = ATH10K_DBGLOG_LEVEL_WARN;
709 fw_dbglog_mask = ~0;
712 ath10k_wmi_dbglog_cfg(ar, fw_dbglog_mask, fw_dbglog_level);
714 store_regs:
715 /* After an error we will not retry setting the coverage class. */
716 spin_lock_bh(&ar->data_lock);
717 ar->fw_coverage.coverage_class = value;
718 spin_unlock_bh(&ar->data_lock);
720 ar->fw_coverage.reg_slottime_conf = slottime_reg;
721 ar->fw_coverage.reg_ack_cts_timeout_conf = timeout_reg;
723 unlock:
724 mutex_unlock(&ar->conf_mutex);
728 * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock
729 * @ar: the ath10k blob
731 * This function is very hardware specific, the clock initialization
732 * steps is very sensitive and could lead to unknown crash, so they
733 * should be done in sequence.
735 * *** Be aware if you planned to refactor them. ***
737 * Return: 0 if successfully enable the pll, otherwise EINVAL
739 static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
741 int ret, wait_limit;
742 u32 clk_div_addr, pll_init_addr, speed_addr;
743 u32 addr, reg_val, mem_val;
744 struct ath10k_hw_params *hw;
745 const struct ath10k_hw_clk_params *hw_clk;
747 hw = &ar->hw_params;
749 if (ar->regs->core_clk_div_address == 0 ||
750 ar->regs->cpu_pll_init_address == 0 ||
751 ar->regs->cpu_speed_address == 0)
752 return -EINVAL;
754 clk_div_addr = ar->regs->core_clk_div_address;
755 pll_init_addr = ar->regs->cpu_pll_init_address;
756 speed_addr = ar->regs->cpu_speed_address;
758 /* Read efuse register to find out the right hw clock configuration */
759 addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET);
760 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
761 if (ret)
762 return -EINVAL;
764 /* sanitize if the hw refclk index is out of the boundary */
765 if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT)
766 return -EINVAL;
768 hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)];
770 /* Set the rnfrac and outdiv params to bb_pll register */
771 addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET);
772 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
773 if (ret)
774 return -EINVAL;
776 reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK);
777 reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) |
778 SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV));
779 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
780 if (ret)
781 return -EINVAL;
783 /* Set the correct settle time value to pll_settle register */
784 addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET);
785 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
786 if (ret)
787 return -EINVAL;
789 reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK;
790 reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME);
791 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
792 if (ret)
793 return -EINVAL;
795 /* Set the clock_ctrl div to core_clk_ctrl register */
796 addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET);
797 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
798 if (ret)
799 return -EINVAL;
801 reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK;
802 reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV);
803 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
804 if (ret)
805 return -EINVAL;
807 /* Set the clock_div register */
808 mem_val = 1;
809 ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val,
810 sizeof(mem_val));
811 if (ret)
812 return -EINVAL;
814 /* Configure the pll_control register */
815 addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
816 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
817 if (ret)
818 return -EINVAL;
820 reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) |
821 SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) |
822 SM(1, WLAN_PLL_CONTROL_NOPWD));
823 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
824 if (ret)
825 return -EINVAL;
827 /* busy wait (max 1s) the rtc_sync status register indicate ready */
828 wait_limit = 100000;
829 addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
830 do {
831 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
832 if (ret)
833 return -EINVAL;
835 if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
836 break;
838 wait_limit--;
839 udelay(10);
841 } while (wait_limit > 0);
843 if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
844 return -EINVAL;
846 /* Unset the pll_bypass in pll_control register */
847 addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
848 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
849 if (ret)
850 return -EINVAL;
852 reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK;
853 reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS);
854 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
855 if (ret)
856 return -EINVAL;
858 /* busy wait (max 1s) the rtc_sync status register indicate ready */
859 wait_limit = 100000;
860 addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
861 do {
862 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
863 if (ret)
864 return -EINVAL;
866 if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
867 break;
869 wait_limit--;
870 udelay(10);
872 } while (wait_limit > 0);
874 if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
875 return -EINVAL;
877 /* Enable the hardware cpu clock register */
878 addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET);
879 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
880 if (ret)
881 return -EINVAL;
883 reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK;
884 reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD);
885 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
886 if (ret)
887 return -EINVAL;
889 /* unset the nopwd from pll_control register */
890 addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
891 ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
892 if (ret)
893 return -EINVAL;
895 reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK;
896 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
897 if (ret)
898 return -EINVAL;
900 /* enable the pll_init register */
901 mem_val = 1;
902 ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val,
903 sizeof(mem_val));
904 if (ret)
905 return -EINVAL;
907 /* set the target clock frequency to speed register */
908 ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq,
909 sizeof(hw->target_cpu_freq));
910 if (ret)
911 return -EINVAL;
913 return 0;
916 /* Program CPU_ADDR_MSB to allow different memory
917 * region access.
919 static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
921 u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS;
923 ath10k_hif_write32(ar, address, msb);
926 /* 1. Write to memory region of target, such as IRAM adn DRAM.
927 * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
928 * can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
929 * 3. In order to access the region other than the above,
930 * we need to set the value of register CPU_ADDR_MSB.
931 * 4. Target memory access space is limited to 1M size. If the size is larger
932 * than 1M, need to split it and program CPU_ADDR_MSB accordingly.
934 static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar,
935 const void *buffer,
936 u32 address,
937 u32 length)
939 u32 addr = address & REGION_ACCESS_SIZE_MASK;
940 int ret, remain_size, size;
941 const u8 *buf;
943 ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address));
945 if (addr + length > REGION_ACCESS_SIZE_LIMIT) {
946 size = REGION_ACCESS_SIZE_LIMIT - addr;
947 remain_size = length - size;
949 ret = ath10k_hif_diag_write(ar, address, buffer, size);
950 if (ret) {
951 ath10k_warn(ar,
952 "failed to download the first %d bytes segment to address:0x%x: %d\n",
953 size, address, ret);
954 goto done;
957 /* Change msb to the next memory region*/
958 ath10k_hw_map_target_mem(ar,
959 CPU_ADDR_MSB_REGION_VAL(address) + 1);
960 buf = buffer + size;
961 ret = ath10k_hif_diag_write(ar,
962 address & ~REGION_ACCESS_SIZE_MASK,
963 buf, remain_size);
964 if (ret) {
965 ath10k_warn(ar,
966 "failed to download the second %d bytes segment to address:0x%x: %d\n",
967 remain_size,
968 address & ~REGION_ACCESS_SIZE_MASK,
969 ret);
970 goto done;
972 } else {
973 ret = ath10k_hif_diag_write(ar, address, buffer, length);
974 if (ret) {
975 ath10k_warn(ar,
976 "failed to download the only %d bytes segment to address:0x%x: %d\n",
977 length, address, ret);
978 goto done;
982 done:
983 /* Change msb to DRAM */
984 ath10k_hw_map_target_mem(ar,
985 CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS));
986 return ret;
989 static int ath10k_hw_diag_segment_download(struct ath10k *ar,
990 const void *buffer,
991 u32 address,
992 u32 length)
994 if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT)
995 /* Needs to change MSB for memory write */
996 return ath10k_hw_diag_segment_msb_download(ar, buffer,
997 address, length);
998 else
999 return ath10k_hif_diag_write(ar, address, buffer, length);
1002 int ath10k_hw_diag_fast_download(struct ath10k *ar,
1003 u32 address,
1004 const void *buffer,
1005 u32 length)
1007 const u8 *buf = buffer;
1008 bool sgmt_end = false;
1009 u32 base_addr = 0;
1010 u32 base_len = 0;
1011 u32 left = 0;
1012 struct bmi_segmented_file_header *hdr;
1013 struct bmi_segmented_metadata *metadata;
1014 int ret = 0;
1016 if (length < sizeof(*hdr))
1017 return -EINVAL;
1019 /* check firmware header. If it has no correct magic number
1020 * or it's compressed, returns error.
1022 hdr = (struct bmi_segmented_file_header *)buf;
1023 if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) {
1024 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1025 "Not a supported firmware, magic_num:0x%x\n",
1026 hdr->magic_num);
1027 return -EINVAL;
1030 if (hdr->file_flags != 0) {
1031 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1032 "Not a supported firmware, file_flags:0x%x\n",
1033 hdr->file_flags);
1034 return -EINVAL;
1037 metadata = (struct bmi_segmented_metadata *)hdr->data;
1038 left = length - sizeof(*hdr);
1040 while (left > 0) {
1041 if (left < sizeof(*metadata)) {
1042 ath10k_warn(ar, "firmware segment is truncated: %d\n",
1043 left);
1044 ret = -EINVAL;
1045 break;
1047 base_addr = __le32_to_cpu(metadata->addr);
1048 base_len = __le32_to_cpu(metadata->length);
1049 buf = metadata->data;
1050 left -= sizeof(*metadata);
1052 switch (base_len) {
1053 case BMI_SGMTFILE_BEGINADDR:
1054 /* base_addr is the start address to run */
1055 ret = ath10k_bmi_set_start(ar, base_addr);
1056 base_len = 0;
1057 break;
1058 case BMI_SGMTFILE_DONE:
1059 /* no more segment */
1060 base_len = 0;
1061 sgmt_end = true;
1062 ret = 0;
1063 break;
1064 case BMI_SGMTFILE_BDDATA:
1065 case BMI_SGMTFILE_EXEC:
1066 ath10k_warn(ar,
1067 "firmware has unsupported segment:%d\n",
1068 base_len);
1069 ret = -EINVAL;
1070 break;
1071 default:
1072 if (base_len > left) {
1073 /* sanity check */
1074 ath10k_warn(ar,
1075 "firmware has invalid segment length, %d > %d\n",
1076 base_len, left);
1077 ret = -EINVAL;
1078 break;
1081 ret = ath10k_hw_diag_segment_download(ar,
1082 buf,
1083 base_addr,
1084 base_len);
1086 if (ret)
1087 ath10k_warn(ar,
1088 "failed to download firmware via diag interface:%d\n",
1089 ret);
1090 break;
1093 if (ret || sgmt_end)
1094 break;
1096 metadata = (struct bmi_segmented_metadata *)(buf + base_len);
1097 left -= base_len;
1100 if (ret == 0)
1101 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1102 "boot firmware fast diag download successfully.\n");
1103 return ret;
1106 static int ath10k_htt_tx_rssi_enable(struct htt_resp *resp)
1108 return (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI);
1111 static int ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp *resp)
1113 return (resp->data_tx_completion.flags2 &
1114 HTT_TX_DATA_RSSI_ENABLE_WCN3990);
1117 static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp)
1119 struct htt_data_tx_completion_ext extd;
1120 int pad_bytes = 0;
1122 if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_RETRIES)
1123 pad_bytes += sizeof(extd.a_retries) /
1124 sizeof(extd.msdus_rssi[0]);
1126 if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_TIMESTAMP)
1127 pad_bytes += sizeof(extd.t_stamp) / sizeof(extd.msdus_rssi[0]);
1129 return pad_bytes;
1132 const struct ath10k_hw_ops qca988x_ops = {
1133 .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
1134 .is_rssi_enable = ath10k_htt_tx_rssi_enable,
1137 static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
1139 return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
1140 RX_MSDU_END_INFO1_L3_HDR_PAD);
1143 static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
1145 return !!(rxd->msdu_end.common.info0 &
1146 __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
1149 const struct ath10k_hw_ops qca99x0_ops = {
1150 .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
1151 .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
1152 .is_rssi_enable = ath10k_htt_tx_rssi_enable,
1155 const struct ath10k_hw_ops qca6174_ops = {
1156 .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
1157 .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
1158 .is_rssi_enable = ath10k_htt_tx_rssi_enable,
1161 const struct ath10k_hw_ops qca6174_sdio_ops = {
1162 .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
1165 const struct ath10k_hw_ops wcn3990_ops = {
1166 .tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad,
1167 .is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990,