1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
18 union phy_table_tile
{
19 struct rtw_phy_cond cond
;
20 struct phy_cfg_pair cfg
;
23 static const u32 db_invert_table
[12][8] = {
29 1007, 1268, 1596, 2010},
31 794, 1000, 1259, 1585},
32 {1995, 2512, 3162, 3981,
33 5012, 6310, 7943, 10000},
34 {12589, 15849, 19953, 25119,
35 31623, 39811, 50119, 63098},
36 {79433, 100000, 125893, 158489,
37 199526, 251189, 316228, 398107},
38 {501187, 630957, 794328, 1000000,
39 1258925, 1584893, 1995262, 2511886},
40 {3162278, 3981072, 5011872, 6309573,
41 7943282, 1000000, 12589254, 15848932},
42 {19952623, 25118864, 31622777, 39810717,
43 50118723, 63095734, 79432823, 100000000},
44 {125892541, 158489319, 199526232, 251188643,
45 316227766, 398107171, 501187234, 630957345},
46 {794328235, 1000000000, 1258925412, 1584893192,
47 1995262315, 2511886432U, 3162277660U, 3981071706U}
50 u8 rtw_cck_rates
[] = { DESC_RATE1M
, DESC_RATE2M
, DESC_RATE5_5M
, DESC_RATE11M
};
51 u8 rtw_ofdm_rates
[] = {
52 DESC_RATE6M
, DESC_RATE9M
, DESC_RATE12M
,
53 DESC_RATE18M
, DESC_RATE24M
, DESC_RATE36M
,
54 DESC_RATE48M
, DESC_RATE54M
56 u8 rtw_ht_1s_rates
[] = {
57 DESC_RATEMCS0
, DESC_RATEMCS1
, DESC_RATEMCS2
,
58 DESC_RATEMCS3
, DESC_RATEMCS4
, DESC_RATEMCS5
,
59 DESC_RATEMCS6
, DESC_RATEMCS7
61 u8 rtw_ht_2s_rates
[] = {
62 DESC_RATEMCS8
, DESC_RATEMCS9
, DESC_RATEMCS10
,
63 DESC_RATEMCS11
, DESC_RATEMCS12
, DESC_RATEMCS13
,
64 DESC_RATEMCS14
, DESC_RATEMCS15
66 u8 rtw_vht_1s_rates
[] = {
67 DESC_RATEVHT1SS_MCS0
, DESC_RATEVHT1SS_MCS1
,
68 DESC_RATEVHT1SS_MCS2
, DESC_RATEVHT1SS_MCS3
,
69 DESC_RATEVHT1SS_MCS4
, DESC_RATEVHT1SS_MCS5
,
70 DESC_RATEVHT1SS_MCS6
, DESC_RATEVHT1SS_MCS7
,
71 DESC_RATEVHT1SS_MCS8
, DESC_RATEVHT1SS_MCS9
73 u8 rtw_vht_2s_rates
[] = {
74 DESC_RATEVHT2SS_MCS0
, DESC_RATEVHT2SS_MCS1
,
75 DESC_RATEVHT2SS_MCS2
, DESC_RATEVHT2SS_MCS3
,
76 DESC_RATEVHT2SS_MCS4
, DESC_RATEVHT2SS_MCS5
,
77 DESC_RATEVHT2SS_MCS6
, DESC_RATEVHT2SS_MCS7
,
78 DESC_RATEVHT2SS_MCS8
, DESC_RATEVHT2SS_MCS9
80 u8
*rtw_rate_section
[RTW_RATE_SECTION_MAX
] = {
81 rtw_cck_rates
, rtw_ofdm_rates
,
82 rtw_ht_1s_rates
, rtw_ht_2s_rates
,
83 rtw_vht_1s_rates
, rtw_vht_2s_rates
85 EXPORT_SYMBOL(rtw_rate_section
);
87 u8 rtw_rate_size
[RTW_RATE_SECTION_MAX
] = {
88 ARRAY_SIZE(rtw_cck_rates
),
89 ARRAY_SIZE(rtw_ofdm_rates
),
90 ARRAY_SIZE(rtw_ht_1s_rates
),
91 ARRAY_SIZE(rtw_ht_2s_rates
),
92 ARRAY_SIZE(rtw_vht_1s_rates
),
93 ARRAY_SIZE(rtw_vht_2s_rates
)
95 EXPORT_SYMBOL(rtw_rate_size
);
97 static const u8 rtw_cck_size
= ARRAY_SIZE(rtw_cck_rates
);
98 static const u8 rtw_ofdm_size
= ARRAY_SIZE(rtw_ofdm_rates
);
99 static const u8 rtw_ht_1s_size
= ARRAY_SIZE(rtw_ht_1s_rates
);
100 static const u8 rtw_ht_2s_size
= ARRAY_SIZE(rtw_ht_2s_rates
);
101 static const u8 rtw_vht_1s_size
= ARRAY_SIZE(rtw_vht_1s_rates
);
102 static const u8 rtw_vht_2s_size
= ARRAY_SIZE(rtw_vht_2s_rates
);
104 enum rtw_phy_band_type
{
109 static void rtw_phy_cck_pd_init(struct rtw_dev
*rtwdev
)
111 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
114 for (i
= 0; i
<= RTW_CHANNEL_WIDTH_40
; i
++) {
115 for (j
= 0; j
< RTW_RF_PATH_MAX
; j
++)
116 dm_info
->cck_pd_lv
[i
][j
] = CCK_PD_LV0
;
119 dm_info
->cck_fa_avg
= CCK_FA_AVG_RESET
;
122 void rtw_phy_init(struct rtw_dev
*rtwdev
)
124 struct rtw_chip_info
*chip
= rtwdev
->chip
;
125 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
128 dm_info
->fa_history
[3] = 0;
129 dm_info
->fa_history
[2] = 0;
130 dm_info
->fa_history
[1] = 0;
131 dm_info
->fa_history
[0] = 0;
132 dm_info
->igi_bitmap
= 0;
133 dm_info
->igi_history
[3] = 0;
134 dm_info
->igi_history
[2] = 0;
135 dm_info
->igi_history
[1] = 0;
137 addr
= chip
->dig
[0].addr
;
138 mask
= chip
->dig
[0].mask
;
139 dm_info
->igi_history
[0] = rtw_read32_mask(rtwdev
, addr
, mask
);
140 rtw_phy_cck_pd_init(rtwdev
);
142 dm_info
->iqk
.done
= false;
144 EXPORT_SYMBOL(rtw_phy_init
);
146 void rtw_phy_dig_write(struct rtw_dev
*rtwdev
, u8 igi
)
148 struct rtw_chip_info
*chip
= rtwdev
->chip
;
149 struct rtw_hal
*hal
= &rtwdev
->hal
;
154 const struct rtw_hw_reg
*dig_cck
= &chip
->dig_cck
[0];
155 rtw_write32_mask(rtwdev
, dig_cck
->addr
, dig_cck
->mask
, igi
>> 1);
158 for (path
= 0; path
< hal
->rf_path_num
; path
++) {
159 addr
= chip
->dig
[path
].addr
;
160 mask
= chip
->dig
[path
].mask
;
161 rtw_write32_mask(rtwdev
, addr
, mask
, igi
);
165 static void rtw_phy_stat_false_alarm(struct rtw_dev
*rtwdev
)
167 struct rtw_chip_info
*chip
= rtwdev
->chip
;
169 chip
->ops
->false_alarm_statistics(rtwdev
);
172 #define RA_FLOOR_TABLE_SIZE 7
173 #define RA_FLOOR_UP_GAP 3
175 static u8
rtw_phy_get_rssi_level(u8 old_level
, u8 rssi
)
177 u8 table
[RA_FLOOR_TABLE_SIZE
] = {20, 34, 38, 42, 46, 50, 100};
181 for (i
= 0; i
< RA_FLOOR_TABLE_SIZE
; i
++)
183 table
[i
] += RA_FLOOR_UP_GAP
;
185 for (i
= 0; i
< RA_FLOOR_TABLE_SIZE
; i
++) {
186 if (rssi
< table
[i
]) {
195 struct rtw_phy_stat_iter_data
{
196 struct rtw_dev
*rtwdev
;
200 static void rtw_phy_stat_rssi_iter(void *data
, struct ieee80211_sta
*sta
)
202 struct rtw_phy_stat_iter_data
*iter_data
= data
;
203 struct rtw_dev
*rtwdev
= iter_data
->rtwdev
;
204 struct rtw_sta_info
*si
= (struct rtw_sta_info
*)sta
->drv_priv
;
207 rssi
= ewma_rssi_read(&si
->avg_rssi
);
208 si
->rssi_level
= rtw_phy_get_rssi_level(si
->rssi_level
, rssi
);
210 rtw_fw_send_rssi_info(rtwdev
, si
);
212 iter_data
->min_rssi
= min_t(u8
, rssi
, iter_data
->min_rssi
);
215 static void rtw_phy_stat_rssi(struct rtw_dev
*rtwdev
)
217 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
218 struct rtw_phy_stat_iter_data data
= {};
220 data
.rtwdev
= rtwdev
;
221 data
.min_rssi
= U8_MAX
;
222 rtw_iterate_stas_atomic(rtwdev
, rtw_phy_stat_rssi_iter
, &data
);
224 dm_info
->pre_min_rssi
= dm_info
->min_rssi
;
225 dm_info
->min_rssi
= data
.min_rssi
;
228 static void rtw_phy_stat_rate_cnt(struct rtw_dev
*rtwdev
)
230 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
232 dm_info
->last_pkt_count
= dm_info
->cur_pkt_count
;
233 memset(&dm_info
->cur_pkt_count
, 0, sizeof(dm_info
->cur_pkt_count
));
236 static void rtw_phy_statistics(struct rtw_dev
*rtwdev
)
238 rtw_phy_stat_rssi(rtwdev
);
239 rtw_phy_stat_false_alarm(rtwdev
);
240 rtw_phy_stat_rate_cnt(rtwdev
);
243 #define DIG_PERF_FA_TH_LOW 250
244 #define DIG_PERF_FA_TH_HIGH 500
245 #define DIG_PERF_FA_TH_EXTRA_HIGH 750
246 #define DIG_PERF_MAX 0x5a
247 #define DIG_PERF_MID 0x40
248 #define DIG_CVRG_FA_TH_LOW 2000
249 #define DIG_CVRG_FA_TH_HIGH 4000
250 #define DIG_CVRG_FA_TH_EXTRA_HIGH 5000
251 #define DIG_CVRG_MAX 0x2a
252 #define DIG_CVRG_MID 0x26
253 #define DIG_CVRG_MIN 0x1c
254 #define DIG_RSSI_GAIN_OFFSET 15
257 rtw_phy_dig_check_damping(struct rtw_dm_info
*dm_info
)
259 u16 fa_lo
= DIG_PERF_FA_TH_LOW
;
260 u16 fa_hi
= DIG_PERF_FA_TH_HIGH
;
267 bool damping
= false;
269 min_rssi
= dm_info
->min_rssi
;
270 if (dm_info
->damping
) {
271 damping_rssi
= dm_info
->damping_rssi
;
272 diff
= min_rssi
> damping_rssi
? min_rssi
- damping_rssi
:
273 damping_rssi
- min_rssi
;
274 if (diff
> 3 || dm_info
->damping_cnt
++ > 20) {
275 dm_info
->damping
= false;
282 igi_history
= dm_info
->igi_history
;
283 fa_history
= dm_info
->fa_history
;
284 igi_bitmap
= dm_info
->igi_bitmap
& 0xf;
285 switch (igi_bitmap
) {
287 /* down -> up -> down -> up */
288 if (igi_history
[0] > igi_history
[1] &&
289 igi_history
[2] > igi_history
[3] &&
290 igi_history
[0] - igi_history
[1] >= 2 &&
291 igi_history
[2] - igi_history
[3] >= 2 &&
292 fa_history
[0] > fa_hi
&& fa_history
[1] < fa_lo
&&
293 fa_history
[2] > fa_hi
&& fa_history
[3] < fa_lo
)
297 /* up -> down -> down -> up */
298 if (igi_history
[0] > igi_history
[1] &&
299 igi_history
[3] > igi_history
[2] &&
300 igi_history
[0] - igi_history
[1] >= 4 &&
301 igi_history
[3] - igi_history
[2] >= 2 &&
302 fa_history
[0] > fa_hi
&& fa_history
[1] < fa_lo
&&
303 fa_history
[2] < fa_lo
&& fa_history
[3] > fa_hi
)
311 dm_info
->damping
= true;
312 dm_info
->damping_cnt
= 0;
313 dm_info
->damping_rssi
= min_rssi
;
319 static void rtw_phy_dig_get_boundary(struct rtw_dm_info
*dm_info
,
320 u8
*upper
, u8
*lower
, bool linked
)
322 u8 dig_max
, dig_min
, dig_mid
;
326 dig_max
= DIG_PERF_MAX
;
327 dig_mid
= DIG_PERF_MID
;
328 /* 22B=0x1c, 22C=0x20 */
330 min_rssi
= max_t(u8
, dm_info
->min_rssi
, dig_min
);
332 dig_max
= DIG_CVRG_MAX
;
333 dig_mid
= DIG_CVRG_MID
;
334 dig_min
= DIG_CVRG_MIN
;
338 /* DIG MAX should be bounded by minimum RSSI with offset +15 */
339 dig_max
= min_t(u8
, dig_max
, min_rssi
+ DIG_RSSI_GAIN_OFFSET
);
341 *lower
= clamp_t(u8
, min_rssi
, dig_min
, dig_mid
);
342 *upper
= clamp_t(u8
, *lower
+ DIG_RSSI_GAIN_OFFSET
, dig_min
, dig_max
);
345 static void rtw_phy_dig_get_threshold(struct rtw_dm_info
*dm_info
,
346 u16
*fa_th
, u8
*step
, bool linked
)
348 u8 min_rssi
, pre_min_rssi
;
350 min_rssi
= dm_info
->min_rssi
;
351 pre_min_rssi
= dm_info
->pre_min_rssi
;
357 fa_th
[0] = DIG_PERF_FA_TH_EXTRA_HIGH
;
358 fa_th
[1] = DIG_PERF_FA_TH_HIGH
;
359 fa_th
[2] = DIG_PERF_FA_TH_LOW
;
360 if (pre_min_rssi
> min_rssi
) {
366 fa_th
[0] = DIG_CVRG_FA_TH_EXTRA_HIGH
;
367 fa_th
[1] = DIG_CVRG_FA_TH_HIGH
;
368 fa_th
[2] = DIG_CVRG_FA_TH_LOW
;
372 static void rtw_phy_dig_recorder(struct rtw_dm_info
*dm_info
, u8 igi
, u16 fa
)
379 igi_bitmap
= dm_info
->igi_bitmap
<< 1 & 0xfe;
380 igi_history
= dm_info
->igi_history
;
381 fa_history
= dm_info
->fa_history
;
383 up
= igi
> igi_history
[0];
386 igi_history
[3] = igi_history
[2];
387 igi_history
[2] = igi_history
[1];
388 igi_history
[1] = igi_history
[0];
389 igi_history
[0] = igi
;
391 fa_history
[3] = fa_history
[2];
392 fa_history
[2] = fa_history
[1];
393 fa_history
[1] = fa_history
[0];
396 dm_info
->igi_bitmap
= igi_bitmap
;
399 static void rtw_phy_dig(struct rtw_dev
*rtwdev
)
401 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
402 u8 upper_bound
, lower_bound
;
404 u16 fa_th
[3], fa_cnt
;
409 if (test_bit(RTW_FLAG_DIG_DISABLE
, rtwdev
->flags
))
412 if (rtw_phy_dig_check_damping(dm_info
))
415 linked
= !!rtwdev
->sta_cnt
;
417 fa_cnt
= dm_info
->total_fa_cnt
;
418 pre_igi
= dm_info
->igi_history
[0];
420 rtw_phy_dig_get_threshold(dm_info
, fa_th
, step
, linked
);
422 /* test the false alarm count from the highest threshold level first,
423 * and increase it by corresponding step size
425 * note that the step size is offset by -2, compensate it afterall
428 for (level
= 0; level
< 3; level
++) {
429 if (fa_cnt
> fa_th
[level
]) {
430 cur_igi
+= step
[level
];
436 /* calculate the upper/lower bound by the minimum rssi we have among
437 * the peers connected with us, meanwhile make sure the igi value does
438 * not beyond the hardware limitation
440 rtw_phy_dig_get_boundary(dm_info
, &upper_bound
, &lower_bound
, linked
);
441 cur_igi
= clamp_t(u8
, cur_igi
, lower_bound
, upper_bound
);
443 /* record current igi value and false alarm statistics for further
444 * damping checks, and record the trend of igi values
446 rtw_phy_dig_recorder(dm_info
, cur_igi
, fa_cnt
);
448 if (cur_igi
!= pre_igi
)
449 rtw_phy_dig_write(rtwdev
, cur_igi
);
452 static void rtw_phy_ra_info_update_iter(void *data
, struct ieee80211_sta
*sta
)
454 struct rtw_dev
*rtwdev
= data
;
455 struct rtw_sta_info
*si
= (struct rtw_sta_info
*)sta
->drv_priv
;
457 rtw_update_sta_info(rtwdev
, si
);
460 static void rtw_phy_ra_info_update(struct rtw_dev
*rtwdev
)
462 if (rtwdev
->watch_dog_cnt
& 0x3)
465 rtw_iterate_stas_atomic(rtwdev
, rtw_phy_ra_info_update_iter
, rtwdev
);
468 static void rtw_phy_dpk_track(struct rtw_dev
*rtwdev
)
470 struct rtw_chip_info
*chip
= rtwdev
->chip
;
472 if (chip
->ops
->dpk_track
)
473 chip
->ops
->dpk_track(rtwdev
);
476 #define CCK_PD_FA_LV1_MIN 1000
477 #define CCK_PD_FA_LV0_MAX 500
479 static u8
rtw_phy_cck_pd_lv_unlink(struct rtw_dev
*rtwdev
)
481 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
482 u32 cck_fa_avg
= dm_info
->cck_fa_avg
;
484 if (cck_fa_avg
> CCK_PD_FA_LV1_MIN
)
487 if (cck_fa_avg
< CCK_PD_FA_LV0_MAX
)
490 return CCK_PD_LV_MAX
;
493 #define CCK_PD_IGI_LV4_VAL 0x38
494 #define CCK_PD_IGI_LV3_VAL 0x2a
495 #define CCK_PD_IGI_LV2_VAL 0x24
496 #define CCK_PD_RSSI_LV4_VAL 32
497 #define CCK_PD_RSSI_LV3_VAL 32
498 #define CCK_PD_RSSI_LV2_VAL 24
500 static u8
rtw_phy_cck_pd_lv_link(struct rtw_dev
*rtwdev
)
502 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
503 u8 igi
= dm_info
->igi_history
[0];
504 u8 rssi
= dm_info
->min_rssi
;
505 u32 cck_fa_avg
= dm_info
->cck_fa_avg
;
507 if (igi
> CCK_PD_IGI_LV4_VAL
&& rssi
> CCK_PD_RSSI_LV4_VAL
)
509 if (igi
> CCK_PD_IGI_LV3_VAL
&& rssi
> CCK_PD_RSSI_LV3_VAL
)
511 if (igi
> CCK_PD_IGI_LV2_VAL
|| rssi
> CCK_PD_RSSI_LV2_VAL
)
513 if (cck_fa_avg
> CCK_PD_FA_LV1_MIN
)
515 if (cck_fa_avg
< CCK_PD_FA_LV0_MAX
)
518 return CCK_PD_LV_MAX
;
521 static u8
rtw_phy_cck_pd_lv(struct rtw_dev
*rtwdev
)
523 if (!rtw_is_assoc(rtwdev
))
524 return rtw_phy_cck_pd_lv_unlink(rtwdev
);
526 return rtw_phy_cck_pd_lv_link(rtwdev
);
529 static void rtw_phy_cck_pd(struct rtw_dev
*rtwdev
)
531 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
532 struct rtw_chip_info
*chip
= rtwdev
->chip
;
533 u32 cck_fa
= dm_info
->cck_fa_cnt
;
536 if (rtwdev
->hal
.current_band_type
!= RTW_BAND_2G
)
539 if (dm_info
->cck_fa_avg
== CCK_FA_AVG_RESET
)
540 dm_info
->cck_fa_avg
= cck_fa
;
542 dm_info
->cck_fa_avg
= (dm_info
->cck_fa_avg
* 3 + cck_fa
) >> 2;
544 rtw_dbg(rtwdev
, RTW_DBG_PHY
, "IGI=0x%x, rssi_min=%d, cck_fa=%d\n",
545 dm_info
->igi_history
[0], dm_info
->min_rssi
,
546 dm_info
->fa_history
[0]);
547 rtw_dbg(rtwdev
, RTW_DBG_PHY
, "cck_fa_avg=%d, cck_pd_default=%d\n",
548 dm_info
->cck_fa_avg
, dm_info
->cck_pd_default
);
550 level
= rtw_phy_cck_pd_lv(rtwdev
);
552 if (level
>= CCK_PD_LV_MAX
)
555 if (chip
->ops
->cck_pd_set
)
556 chip
->ops
->cck_pd_set(rtwdev
, level
);
559 static void rtw_phy_pwr_track(struct rtw_dev
*rtwdev
)
561 rtwdev
->chip
->ops
->pwr_track(rtwdev
);
564 void rtw_phy_dynamic_mechanism(struct rtw_dev
*rtwdev
)
566 /* for further calculation */
567 rtw_phy_statistics(rtwdev
);
569 rtw_phy_cck_pd(rtwdev
);
570 rtw_phy_ra_info_update(rtwdev
);
571 rtw_phy_dpk_track(rtwdev
);
572 rtw_phy_pwr_track(rtwdev
);
577 static u8
rtw_phy_power_2_db(s8 power
)
579 if (power
<= -100 || power
>= 20)
587 static u64
rtw_phy_db_2_linear(u8 power_db
)
594 else if (power_db
< 1)
598 i
= (power_db
- 1) >> 3;
599 j
= (power_db
- 1) - (i
<< 3);
601 linear
= db_invert_table
[i
][j
];
602 linear
= i
> 2 ? linear
<< FRAC_BITS
: linear
;
607 static u8
rtw_phy_linear_2_db(u64 linear
)
613 if (linear
>= db_invert_table
[11][7])
614 return 96; /* maximum 96 dB */
616 for (i
= 0; i
< 12; i
++) {
617 if (i
<= 2 && (linear
<< FRAC_BITS
) <= db_invert_table
[i
][7])
619 else if (i
> 2 && linear
<= db_invert_table
[i
][7])
623 for (j
= 0; j
< 8; j
++) {
624 if (i
<= 2 && (linear
<< FRAC_BITS
) <= db_invert_table
[i
][j
])
626 else if (i
> 2 && linear
<= db_invert_table
[i
][j
])
630 if (j
== 0 && i
== 0)
635 if (db_invert_table
[i
][0] - linear
>
636 linear
- db_invert_table
[i
- 1][7]) {
641 if (db_invert_table
[3][0] - linear
>
642 linear
- db_invert_table
[2][7]) {
648 if (db_invert_table
[i
][j
] - linear
>
649 linear
- db_invert_table
[i
][j
- 1]) {
654 dB
= (i
<< 3) + j
+ 1;
659 u8
rtw_phy_rf_power_2_rssi(s8
*rf_power
, u8 path_num
)
667 for (path
= 0; path
< path_num
; path
++) {
668 power
= rf_power
[path
];
669 power_db
= rtw_phy_power_2_db(power
);
670 linear
= rtw_phy_db_2_linear(power_db
);
674 sum
= (sum
+ (1 << (FRAC_BITS
- 1))) >> FRAC_BITS
;
680 sum
= ((sum
) + ((sum
) << 1) + ((sum
) << 3)) >> 5;
689 return rtw_phy_linear_2_db(sum
);
691 EXPORT_SYMBOL(rtw_phy_rf_power_2_rssi
);
693 u32
rtw_phy_read_rf(struct rtw_dev
*rtwdev
, enum rtw_rf_path rf_path
,
696 struct rtw_hal
*hal
= &rtwdev
->hal
;
697 struct rtw_chip_info
*chip
= rtwdev
->chip
;
698 const u32
*base_addr
= chip
->rf_base_addr
;
699 u32 val
, direct_addr
;
701 if (rf_path
>= hal
->rf_phy_num
) {
702 rtw_err(rtwdev
, "unsupported rf path (%d)\n", rf_path
);
707 direct_addr
= base_addr
[rf_path
] + (addr
<< 2);
710 val
= rtw_read32_mask(rtwdev
, direct_addr
, mask
);
714 EXPORT_SYMBOL(rtw_phy_read_rf
);
716 u32
rtw_phy_read_rf_sipi(struct rtw_dev
*rtwdev
, enum rtw_rf_path rf_path
,
719 struct rtw_hal
*hal
= &rtwdev
->hal
;
720 struct rtw_chip_info
*chip
= rtwdev
->chip
;
721 const struct rtw_rf_sipi_addr
*rf_sipi_addr
;
722 const struct rtw_rf_sipi_addr
*rf_sipi_addr_a
;
728 if (rf_path
>= hal
->rf_phy_num
) {
729 rtw_err(rtwdev
, "unsupported rf path (%d)\n", rf_path
);
733 if (!chip
->rf_sipi_read_addr
) {
734 rtw_err(rtwdev
, "rf_sipi_read_addr isn't defined\n");
738 rf_sipi_addr
= &chip
->rf_sipi_read_addr
[rf_path
];
739 rf_sipi_addr_a
= &chip
->rf_sipi_read_addr
[RF_PATH_A
];
743 val32
= rtw_read32(rtwdev
, rf_sipi_addr
->hssi_2
);
744 val32
= (val32
& ~LSSI_READ_ADDR_MASK
) | (addr
<< 23);
745 rtw_write32(rtwdev
, rf_sipi_addr
->hssi_2
, val32
);
747 /* toggle read edge of path A */
748 val32
= rtw_read32(rtwdev
, rf_sipi_addr_a
->hssi_2
);
749 rtw_write32(rtwdev
, rf_sipi_addr_a
->hssi_2
, val32
& ~LSSI_READ_EDGE_MASK
);
750 rtw_write32(rtwdev
, rf_sipi_addr_a
->hssi_2
, val32
| LSSI_READ_EDGE_MASK
);
754 en_pi
= rtw_read32_mask(rtwdev
, rf_sipi_addr
->hssi_1
, BIT(8));
755 r_addr
= en_pi
? rf_sipi_addr
->lssi_read_pi
: rf_sipi_addr
->lssi_read
;
757 val32
= rtw_read32_mask(rtwdev
, r_addr
, LSSI_READ_DATA_MASK
);
761 return (val32
& mask
) >> shift
;
763 EXPORT_SYMBOL(rtw_phy_read_rf_sipi
);
765 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev
*rtwdev
, enum rtw_rf_path rf_path
,
766 u32 addr
, u32 mask
, u32 data
)
768 struct rtw_hal
*hal
= &rtwdev
->hal
;
769 struct rtw_chip_info
*chip
= rtwdev
->chip
;
770 u32
*sipi_addr
= chip
->rf_sipi_addr
;
775 if (rf_path
>= hal
->rf_phy_num
) {
776 rtw_err(rtwdev
, "unsupported rf path (%d)\n", rf_path
);
783 if (mask
!= RFREG_MASK
) {
784 old_data
= chip
->ops
->read_rf(rtwdev
, rf_path
, addr
, RFREG_MASK
);
786 if (old_data
== INV_RF_DATA
) {
787 rtw_err(rtwdev
, "Write fail, rf is disabled\n");
792 data
= ((old_data
) & (~mask
)) | (data
<< shift
);
795 data_and_addr
= ((addr
<< 20) | (data
& 0x000fffff)) & 0x0fffffff;
797 rtw_write32(rtwdev
, sipi_addr
[rf_path
], data_and_addr
);
803 EXPORT_SYMBOL(rtw_phy_write_rf_reg_sipi
);
805 bool rtw_phy_write_rf_reg(struct rtw_dev
*rtwdev
, enum rtw_rf_path rf_path
,
806 u32 addr
, u32 mask
, u32 data
)
808 struct rtw_hal
*hal
= &rtwdev
->hal
;
809 struct rtw_chip_info
*chip
= rtwdev
->chip
;
810 const u32
*base_addr
= chip
->rf_base_addr
;
813 if (rf_path
>= hal
->rf_phy_num
) {
814 rtw_err(rtwdev
, "unsupported rf path (%d)\n", rf_path
);
819 direct_addr
= base_addr
[rf_path
] + (addr
<< 2);
822 rtw_write32_mask(rtwdev
, direct_addr
, mask
, data
);
829 bool rtw_phy_write_rf_reg_mix(struct rtw_dev
*rtwdev
, enum rtw_rf_path rf_path
,
830 u32 addr
, u32 mask
, u32 data
)
833 return rtw_phy_write_rf_reg(rtwdev
, rf_path
, addr
, mask
, data
);
835 return rtw_phy_write_rf_reg_sipi(rtwdev
, rf_path
, addr
, mask
, data
);
837 EXPORT_SYMBOL(rtw_phy_write_rf_reg_mix
);
839 void rtw_phy_setup_phy_cond(struct rtw_dev
*rtwdev
, u32 pkg
)
841 struct rtw_hal
*hal
= &rtwdev
->hal
;
842 struct rtw_efuse
*efuse
= &rtwdev
->efuse
;
843 struct rtw_phy_cond cond
= {0};
845 cond
.cut
= hal
->cut_version
? hal
->cut_version
: 15;
846 cond
.pkg
= pkg
? pkg
: 15;
848 cond
.rfe
= efuse
->rfe_option
;
850 switch (rtw_hci_type(rtwdev
)) {
851 case RTW_HCI_TYPE_USB
:
852 cond
.intf
= INTF_USB
;
854 case RTW_HCI_TYPE_SDIO
:
855 cond
.intf
= INTF_SDIO
;
857 case RTW_HCI_TYPE_PCIE
:
859 cond
.intf
= INTF_PCIE
;
863 hal
->phy_cond
= cond
;
865 rtw_dbg(rtwdev
, RTW_DBG_PHY
, "phy cond=0x%08x\n", *((u32
*)&hal
->phy_cond
));
868 static bool check_positive(struct rtw_dev
*rtwdev
, struct rtw_phy_cond cond
)
870 struct rtw_hal
*hal
= &rtwdev
->hal
;
871 struct rtw_phy_cond drv_cond
= hal
->phy_cond
;
873 if (cond
.cut
&& cond
.cut
!= drv_cond
.cut
)
876 if (cond
.pkg
&& cond
.pkg
!= drv_cond
.pkg
)
879 if (cond
.intf
&& cond
.intf
!= drv_cond
.intf
)
882 if (cond
.rfe
!= drv_cond
.rfe
)
888 void rtw_parse_tbl_phy_cond(struct rtw_dev
*rtwdev
, const struct rtw_table
*tbl
)
890 const union phy_table_tile
*p
= tbl
->data
;
891 const union phy_table_tile
*end
= p
+ tbl
->size
/ 2;
892 struct rtw_phy_cond pos_cond
= {0};
893 bool is_matched
= true, is_skipped
= false;
895 BUILD_BUG_ON(sizeof(union phy_table_tile
) != sizeof(struct phy_cfg_pair
));
897 for (; p
< end
; p
++) {
899 switch (p
->cond
.branch
) {
905 is_matched
= is_skipped
? false : true;
913 } else if (p
->cond
.neg
) {
915 if (check_positive(rtwdev
, pos_cond
)) {
925 } else if (is_matched
) {
926 (*tbl
->do_cfg
)(rtwdev
, tbl
, p
->cfg
.addr
, p
->cfg
.data
);
930 EXPORT_SYMBOL(rtw_parse_tbl_phy_cond
);
932 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
934 static u8
tbl_to_dec_pwr_by_rate(struct rtw_dev
*rtwdev
, u32 hex
, u8 i
)
936 if (rtwdev
->chip
->is_pwr_by_rate_dec
)
937 return bcd_to_dec_pwr_by_rate(hex
, i
);
939 return (hex
>> (i
* 8)) & 0xFF;
943 rtw_phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev
*rtwdev
,
944 u32 addr
, u32 mask
, u32 val
, u8
*rate
,
945 u8
*pwr_by_rate
, u8
*rate_num
)
952 rate
[0] = DESC_RATE6M
;
953 rate
[1] = DESC_RATE9M
;
954 rate
[2] = DESC_RATE12M
;
955 rate
[3] = DESC_RATE18M
;
956 for (i
= 0; i
< 4; ++i
)
957 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
962 rate
[0] = DESC_RATE24M
;
963 rate
[1] = DESC_RATE36M
;
964 rate
[2] = DESC_RATE48M
;
965 rate
[3] = DESC_RATE54M
;
966 for (i
= 0; i
< 4; ++i
)
967 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
971 rate
[0] = DESC_RATE1M
;
972 pwr_by_rate
[0] = bcd_to_dec_pwr_by_rate(val
, 1);
976 if (mask
== 0xffffff00) {
977 rate
[0] = DESC_RATE2M
;
978 rate
[1] = DESC_RATE5_5M
;
979 rate
[2] = DESC_RATE11M
;
980 for (i
= 1; i
< 4; ++i
)
982 tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
984 } else if (mask
== 0x000000ff) {
985 rate
[0] = DESC_RATE11M
;
986 pwr_by_rate
[0] = bcd_to_dec_pwr_by_rate(val
, 0);
992 rate
[0] = DESC_RATEMCS0
;
993 rate
[1] = DESC_RATEMCS1
;
994 rate
[2] = DESC_RATEMCS2
;
995 rate
[3] = DESC_RATEMCS3
;
996 for (i
= 0; i
< 4; ++i
)
997 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1002 rate
[0] = DESC_RATEMCS4
;
1003 rate
[1] = DESC_RATEMCS5
;
1004 rate
[2] = DESC_RATEMCS6
;
1005 rate
[3] = DESC_RATEMCS7
;
1006 for (i
= 0; i
< 4; ++i
)
1007 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1012 rate
[0] = DESC_RATEMCS8
;
1013 rate
[1] = DESC_RATEMCS9
;
1014 rate
[2] = DESC_RATEMCS10
;
1015 rate
[3] = DESC_RATEMCS11
;
1016 for (i
= 0; i
< 4; ++i
)
1017 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1022 rate
[0] = DESC_RATEMCS12
;
1023 rate
[1] = DESC_RATEMCS13
;
1024 rate
[2] = DESC_RATEMCS14
;
1025 rate
[3] = DESC_RATEMCS15
;
1026 for (i
= 0; i
< 4; ++i
)
1027 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1031 rate
[0] = DESC_RATE1M
;
1032 rate
[1] = DESC_RATE2M
;
1033 rate
[2] = DESC_RATE5_5M
;
1034 for (i
= 1; i
< 4; ++i
)
1035 pwr_by_rate
[i
- 1] = tbl_to_dec_pwr_by_rate(rtwdev
,
1043 rate
[0] = DESC_RATE1M
;
1044 rate
[1] = DESC_RATE2M
;
1045 rate
[2] = DESC_RATE5_5M
;
1046 rate
[3] = DESC_RATE11M
;
1047 for (i
= 0; i
< 4; ++i
)
1048 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1055 rate
[0] = DESC_RATE6M
;
1056 rate
[1] = DESC_RATE9M
;
1057 rate
[2] = DESC_RATE12M
;
1058 rate
[3] = DESC_RATE18M
;
1059 for (i
= 0; i
< 4; ++i
)
1060 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1067 rate
[0] = DESC_RATE24M
;
1068 rate
[1] = DESC_RATE36M
;
1069 rate
[2] = DESC_RATE48M
;
1070 rate
[3] = DESC_RATE54M
;
1071 for (i
= 0; i
< 4; ++i
)
1072 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1079 rate
[0] = DESC_RATEMCS0
;
1080 rate
[1] = DESC_RATEMCS1
;
1081 rate
[2] = DESC_RATEMCS2
;
1082 rate
[3] = DESC_RATEMCS3
;
1083 for (i
= 0; i
< 4; ++i
)
1084 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1091 rate
[0] = DESC_RATEMCS4
;
1092 rate
[1] = DESC_RATEMCS5
;
1093 rate
[2] = DESC_RATEMCS6
;
1094 rate
[3] = DESC_RATEMCS7
;
1095 for (i
= 0; i
< 4; ++i
)
1096 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1103 rate
[0] = DESC_RATEMCS8
;
1104 rate
[1] = DESC_RATEMCS9
;
1105 rate
[2] = DESC_RATEMCS10
;
1106 rate
[3] = DESC_RATEMCS11
;
1107 for (i
= 0; i
< 4; ++i
)
1108 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1115 rate
[0] = DESC_RATEMCS12
;
1116 rate
[1] = DESC_RATEMCS13
;
1117 rate
[2] = DESC_RATEMCS14
;
1118 rate
[3] = DESC_RATEMCS15
;
1119 for (i
= 0; i
< 4; ++i
)
1120 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1127 rate
[0] = DESC_RATEVHT1SS_MCS0
;
1128 rate
[1] = DESC_RATEVHT1SS_MCS1
;
1129 rate
[2] = DESC_RATEVHT1SS_MCS2
;
1130 rate
[3] = DESC_RATEVHT1SS_MCS3
;
1131 for (i
= 0; i
< 4; ++i
)
1132 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1139 rate
[0] = DESC_RATEVHT1SS_MCS4
;
1140 rate
[1] = DESC_RATEVHT1SS_MCS5
;
1141 rate
[2] = DESC_RATEVHT1SS_MCS6
;
1142 rate
[3] = DESC_RATEVHT1SS_MCS7
;
1143 for (i
= 0; i
< 4; ++i
)
1144 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1151 rate
[0] = DESC_RATEVHT1SS_MCS8
;
1152 rate
[1] = DESC_RATEVHT1SS_MCS9
;
1153 rate
[2] = DESC_RATEVHT2SS_MCS0
;
1154 rate
[3] = DESC_RATEVHT2SS_MCS1
;
1155 for (i
= 0; i
< 4; ++i
)
1156 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1163 rate
[0] = DESC_RATEVHT2SS_MCS2
;
1164 rate
[1] = DESC_RATEVHT2SS_MCS3
;
1165 rate
[2] = DESC_RATEVHT2SS_MCS4
;
1166 rate
[3] = DESC_RATEVHT2SS_MCS5
;
1167 for (i
= 0; i
< 4; ++i
)
1168 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1175 rate
[0] = DESC_RATEVHT2SS_MCS6
;
1176 rate
[1] = DESC_RATEVHT2SS_MCS7
;
1177 rate
[2] = DESC_RATEVHT2SS_MCS8
;
1178 rate
[3] = DESC_RATEVHT2SS_MCS9
;
1179 for (i
= 0; i
< 4; ++i
)
1180 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1187 rate
[0] = DESC_RATEMCS16
;
1188 rate
[1] = DESC_RATEMCS17
;
1189 rate
[2] = DESC_RATEMCS18
;
1190 rate
[3] = DESC_RATEMCS19
;
1191 for (i
= 0; i
< 4; ++i
)
1192 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1199 rate
[0] = DESC_RATEMCS20
;
1200 rate
[1] = DESC_RATEMCS21
;
1201 rate
[2] = DESC_RATEMCS22
;
1202 rate
[3] = DESC_RATEMCS23
;
1203 for (i
= 0; i
< 4; ++i
)
1204 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1211 rate
[0] = DESC_RATEVHT3SS_MCS0
;
1212 rate
[1] = DESC_RATEVHT3SS_MCS1
;
1213 rate
[2] = DESC_RATEVHT3SS_MCS2
;
1214 rate
[3] = DESC_RATEVHT3SS_MCS3
;
1215 for (i
= 0; i
< 4; ++i
)
1216 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1223 rate
[0] = DESC_RATEVHT3SS_MCS4
;
1224 rate
[1] = DESC_RATEVHT3SS_MCS5
;
1225 rate
[2] = DESC_RATEVHT3SS_MCS6
;
1226 rate
[3] = DESC_RATEVHT3SS_MCS7
;
1227 for (i
= 0; i
< 4; ++i
)
1228 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1235 rate
[0] = DESC_RATEVHT3SS_MCS8
;
1236 rate
[1] = DESC_RATEVHT3SS_MCS9
;
1237 for (i
= 0; i
< 2; ++i
)
1238 pwr_by_rate
[i
] = tbl_to_dec_pwr_by_rate(rtwdev
, val
, i
);
1242 rtw_warn(rtwdev
, "invalid tx power index addr 0x%08x\n", addr
);
1247 static void rtw_phy_store_tx_power_by_rate(struct rtw_dev
*rtwdev
,
1248 u32 band
, u32 rfpath
, u32 txnum
,
1249 u32 regaddr
, u32 bitmask
, u32 data
)
1251 struct rtw_hal
*hal
= &rtwdev
->hal
;
1254 u8 rates
[RTW_RF_PATH_MAX
] = {0};
1256 s8 pwr_by_rate
[RTW_RF_PATH_MAX
] = {0};
1259 rtw_phy_get_rate_values_of_txpwr_by_rate(rtwdev
, regaddr
, bitmask
, data
,
1260 rates
, pwr_by_rate
, &rate_num
);
1262 if (WARN_ON(rfpath
>= RTW_RF_PATH_MAX
||
1263 (band
!= PHY_BAND_2G
&& band
!= PHY_BAND_5G
) ||
1264 rate_num
> RTW_RF_PATH_MAX
))
1267 for (i
= 0; i
< rate_num
; i
++) {
1268 offset
= pwr_by_rate
[i
];
1270 if (band
== PHY_BAND_2G
)
1271 hal
->tx_pwr_by_rate_offset_2g
[rfpath
][rate
] = offset
;
1272 else if (band
== PHY_BAND_5G
)
1273 hal
->tx_pwr_by_rate_offset_5g
[rfpath
][rate
] = offset
;
1279 void rtw_parse_tbl_bb_pg(struct rtw_dev
*rtwdev
, const struct rtw_table
*tbl
)
1281 const struct rtw_phy_pg_cfg_pair
*p
= tbl
->data
;
1282 const struct rtw_phy_pg_cfg_pair
*end
= p
+ tbl
->size
;
1284 for (; p
< end
; p
++) {
1285 if (p
->addr
== 0xfe || p
->addr
== 0xffe) {
1289 rtw_phy_store_tx_power_by_rate(rtwdev
, p
->band
, p
->rf_path
,
1290 p
->tx_num
, p
->addr
, p
->bitmask
,
1294 EXPORT_SYMBOL(rtw_parse_tbl_bb_pg
);
1296 static const u8 rtw_channel_idx_5g
[RTW_MAX_CHANNEL_NUM_5G
] = {
1297 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
1298 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
1299 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
1300 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
1301 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
1302 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
1303 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
1305 static int rtw_channel_to_idx(u8 band
, u8 channel
)
1310 if (band
== PHY_BAND_2G
) {
1311 ch_idx
= channel
- 1;
1312 n_channel
= RTW_MAX_CHANNEL_NUM_2G
;
1313 } else if (band
== PHY_BAND_5G
) {
1314 n_channel
= RTW_MAX_CHANNEL_NUM_5G
;
1315 for (ch_idx
= 0; ch_idx
< n_channel
; ch_idx
++)
1316 if (rtw_channel_idx_5g
[ch_idx
] == channel
)
1322 if (ch_idx
>= n_channel
)
1328 static void rtw_phy_set_tx_power_limit(struct rtw_dev
*rtwdev
, u8 regd
, u8 band
,
1329 u8 bw
, u8 rs
, u8 ch
, s8 pwr_limit
)
1331 struct rtw_hal
*hal
= &rtwdev
->hal
;
1332 u8 max_power_index
= rtwdev
->chip
->max_power_index
;
1336 pwr_limit
= clamp_t(s8
, pwr_limit
,
1337 -max_power_index
, max_power_index
);
1338 ch_idx
= rtw_channel_to_idx(band
, ch
);
1340 if (regd
>= RTW_REGD_MAX
|| bw
>= RTW_CHANNEL_WIDTH_MAX
||
1341 rs
>= RTW_RATE_SECTION_MAX
|| ch_idx
< 0) {
1343 "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
1344 regd
, band
, bw
, rs
, ch_idx
, pwr_limit
);
1348 if (band
== PHY_BAND_2G
) {
1349 hal
->tx_pwr_limit_2g
[regd
][bw
][rs
][ch_idx
] = pwr_limit
;
1350 ww
= hal
->tx_pwr_limit_2g
[RTW_REGD_WW
][bw
][rs
][ch_idx
];
1351 ww
= min_t(s8
, ww
, pwr_limit
);
1352 hal
->tx_pwr_limit_2g
[RTW_REGD_WW
][bw
][rs
][ch_idx
] = ww
;
1353 } else if (band
== PHY_BAND_5G
) {
1354 hal
->tx_pwr_limit_5g
[regd
][bw
][rs
][ch_idx
] = pwr_limit
;
1355 ww
= hal
->tx_pwr_limit_5g
[RTW_REGD_WW
][bw
][rs
][ch_idx
];
1356 ww
= min_t(s8
, ww
, pwr_limit
);
1357 hal
->tx_pwr_limit_5g
[RTW_REGD_WW
][bw
][rs
][ch_idx
] = ww
;
1361 /* cross-reference 5G power limits if values are not assigned */
1363 rtw_xref_5g_txpwr_lmt(struct rtw_dev
*rtwdev
, u8 regd
,
1364 u8 bw
, u8 ch_idx
, u8 rs_ht
, u8 rs_vht
)
1366 struct rtw_hal
*hal
= &rtwdev
->hal
;
1367 u8 max_power_index
= rtwdev
->chip
->max_power_index
;
1368 s8 lmt_ht
= hal
->tx_pwr_limit_5g
[regd
][bw
][rs_ht
][ch_idx
];
1369 s8 lmt_vht
= hal
->tx_pwr_limit_5g
[regd
][bw
][rs_vht
][ch_idx
];
1371 if (lmt_ht
== lmt_vht
)
1374 if (lmt_ht
== max_power_index
)
1375 hal
->tx_pwr_limit_5g
[regd
][bw
][rs_ht
][ch_idx
] = lmt_vht
;
1377 else if (lmt_vht
== max_power_index
)
1378 hal
->tx_pwr_limit_5g
[regd
][bw
][rs_vht
][ch_idx
] = lmt_ht
;
1381 /* cross-reference power limits for ht and vht */
1383 rtw_xref_txpwr_lmt_by_rs(struct rtw_dev
*rtwdev
, u8 regd
, u8 bw
, u8 ch_idx
)
1385 u8 rs_idx
, rs_ht
, rs_vht
;
1386 u8 rs_cmp
[2][2] = {{RTW_RATE_SECTION_HT_1S
, RTW_RATE_SECTION_VHT_1S
},
1387 {RTW_RATE_SECTION_HT_2S
, RTW_RATE_SECTION_VHT_2S
} };
1389 for (rs_idx
= 0; rs_idx
< 2; rs_idx
++) {
1390 rs_ht
= rs_cmp
[rs_idx
][0];
1391 rs_vht
= rs_cmp
[rs_idx
][1];
1393 rtw_xref_5g_txpwr_lmt(rtwdev
, regd
, bw
, ch_idx
, rs_ht
, rs_vht
);
1397 /* cross-reference power limits for 5G channels */
1399 rtw_xref_5g_txpwr_lmt_by_ch(struct rtw_dev
*rtwdev
, u8 regd
, u8 bw
)
1403 for (ch_idx
= 0; ch_idx
< RTW_MAX_CHANNEL_NUM_5G
; ch_idx
++)
1404 rtw_xref_txpwr_lmt_by_rs(rtwdev
, regd
, bw
, ch_idx
);
1407 /* cross-reference power limits for 20/40M bandwidth */
1409 rtw_xref_txpwr_lmt_by_bw(struct rtw_dev
*rtwdev
, u8 regd
)
1413 for (bw
= RTW_CHANNEL_WIDTH_20
; bw
<= RTW_CHANNEL_WIDTH_40
; bw
++)
1414 rtw_xref_5g_txpwr_lmt_by_ch(rtwdev
, regd
, bw
);
1417 /* cross-reference power limits */
1418 static void rtw_xref_txpwr_lmt(struct rtw_dev
*rtwdev
)
1422 for (regd
= 0; regd
< RTW_REGD_MAX
; regd
++)
1423 rtw_xref_txpwr_lmt_by_bw(rtwdev
, regd
);
1426 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev
*rtwdev
,
1427 const struct rtw_table
*tbl
)
1429 const struct rtw_txpwr_lmt_cfg_pair
*p
= tbl
->data
;
1430 const struct rtw_txpwr_lmt_cfg_pair
*end
= p
+ tbl
->size
;
1432 for (; p
< end
; p
++) {
1433 rtw_phy_set_tx_power_limit(rtwdev
, p
->regd
, p
->band
,
1434 p
->bw
, p
->rs
, p
->ch
, p
->txpwr_lmt
);
1437 rtw_xref_txpwr_lmt(rtwdev
);
1439 EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt
);
1441 void rtw_phy_cfg_mac(struct rtw_dev
*rtwdev
, const struct rtw_table
*tbl
,
1444 rtw_write8(rtwdev
, addr
, data
);
1446 EXPORT_SYMBOL(rtw_phy_cfg_mac
);
1448 void rtw_phy_cfg_agc(struct rtw_dev
*rtwdev
, const struct rtw_table
*tbl
,
1451 rtw_write32(rtwdev
, addr
, data
);
1453 EXPORT_SYMBOL(rtw_phy_cfg_agc
);
1455 void rtw_phy_cfg_bb(struct rtw_dev
*rtwdev
, const struct rtw_table
*tbl
,
1460 else if (addr
== 0xfd)
1462 else if (addr
== 0xfc)
1464 else if (addr
== 0xfb)
1465 usleep_range(50, 60);
1466 else if (addr
== 0xfa)
1468 else if (addr
== 0xf9)
1471 rtw_write32(rtwdev
, addr
, data
);
1473 EXPORT_SYMBOL(rtw_phy_cfg_bb
);
1475 void rtw_phy_cfg_rf(struct rtw_dev
*rtwdev
, const struct rtw_table
*tbl
,
1478 if (addr
== 0xffe) {
1480 } else if (addr
== 0xfe) {
1481 usleep_range(100, 110);
1483 rtw_write_rf(rtwdev
, tbl
->rf_path
, addr
, RFREG_MASK
, data
);
1487 EXPORT_SYMBOL(rtw_phy_cfg_rf
);
1489 static void rtw_load_rfk_table(struct rtw_dev
*rtwdev
)
1491 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1492 struct rtw_dpk_info
*dpk_info
= &rtwdev
->dm_info
.dpk_info
;
1494 if (!chip
->rfk_init_tbl
)
1497 rtw_write32_mask(rtwdev
, 0x1e24, BIT(17), 0x1);
1498 rtw_write32_mask(rtwdev
, 0x1cd0, BIT(28), 0x1);
1499 rtw_write32_mask(rtwdev
, 0x1cd0, BIT(29), 0x1);
1500 rtw_write32_mask(rtwdev
, 0x1cd0, BIT(30), 0x1);
1501 rtw_write32_mask(rtwdev
, 0x1cd0, BIT(31), 0x0);
1503 rtw_load_table(rtwdev
, chip
->rfk_init_tbl
);
1505 dpk_info
->is_dpk_pwr_on
= true;
1508 void rtw_phy_load_tables(struct rtw_dev
*rtwdev
)
1510 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1513 rtw_load_table(rtwdev
, chip
->mac_tbl
);
1514 rtw_load_table(rtwdev
, chip
->bb_tbl
);
1515 rtw_load_table(rtwdev
, chip
->agc_tbl
);
1516 rtw_load_rfk_table(rtwdev
);
1518 for (rf_path
= 0; rf_path
< rtwdev
->hal
.rf_path_num
; rf_path
++) {
1519 const struct rtw_table
*tbl
;
1521 tbl
= chip
->rf_tbl
[rf_path
];
1522 rtw_load_table(rtwdev
, tbl
);
1525 EXPORT_SYMBOL(rtw_phy_load_tables
);
1527 static u8
rtw_get_channel_group(u8 channel
)
1616 static s8
rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev
*rtwdev
, u16 rate
)
1618 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1621 if (!chip
->en_dis_dpd
)
1624 #define RTW_DPD_RATE_CHECK(_rate) \
1625 case DESC_RATE ## _rate: \
1626 if (DIS_DPD_RATE ## _rate & chip->dpd_ratemask) \
1627 dpd_diff = -6 * chip->txgi_factor; \
1631 RTW_DPD_RATE_CHECK(6M
);
1632 RTW_DPD_RATE_CHECK(9M
);
1633 RTW_DPD_RATE_CHECK(MCS0
);
1634 RTW_DPD_RATE_CHECK(MCS1
);
1635 RTW_DPD_RATE_CHECK(MCS8
);
1636 RTW_DPD_RATE_CHECK(MCS9
);
1637 RTW_DPD_RATE_CHECK(VHT1SS_MCS0
);
1638 RTW_DPD_RATE_CHECK(VHT1SS_MCS1
);
1639 RTW_DPD_RATE_CHECK(VHT2SS_MCS0
);
1640 RTW_DPD_RATE_CHECK(VHT2SS_MCS1
);
1642 #undef RTW_DPD_RATE_CHECK
1647 static u8
rtw_phy_get_2g_tx_power_index(struct rtw_dev
*rtwdev
,
1648 struct rtw_2g_txpwr_idx
*pwr_idx_2g
,
1649 enum rtw_bandwidth bandwidth
,
1652 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1656 u8 factor
= chip
->txgi_factor
;
1658 if (rate
<= DESC_RATE11M
)
1659 tx_power
= pwr_idx_2g
->cck_base
[group
];
1661 tx_power
= pwr_idx_2g
->bw40_base
[group
];
1663 if (rate
>= DESC_RATE6M
&& rate
<= DESC_RATE54M
)
1664 tx_power
+= pwr_idx_2g
->ht_1s_diff
.ofdm
* factor
;
1666 mcs_rate
= (rate
>= DESC_RATEMCS0
&& rate
<= DESC_RATEMCS15
) ||
1667 (rate
>= DESC_RATEVHT1SS_MCS0
&&
1668 rate
<= DESC_RATEVHT2SS_MCS9
);
1669 above_2ss
= (rate
>= DESC_RATEMCS8
&& rate
<= DESC_RATEMCS15
) ||
1670 (rate
>= DESC_RATEVHT2SS_MCS0
);
1675 switch (bandwidth
) {
1679 case RTW_CHANNEL_WIDTH_20
:
1680 tx_power
+= pwr_idx_2g
->ht_1s_diff
.bw20
* factor
;
1682 tx_power
+= pwr_idx_2g
->ht_2s_diff
.bw20
* factor
;
1684 case RTW_CHANNEL_WIDTH_40
:
1685 /* bw40 is the base power */
1687 tx_power
+= pwr_idx_2g
->ht_2s_diff
.bw40
* factor
;
1694 static u8
rtw_phy_get_5g_tx_power_index(struct rtw_dev
*rtwdev
,
1695 struct rtw_5g_txpwr_idx
*pwr_idx_5g
,
1696 enum rtw_bandwidth bandwidth
,
1699 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1704 u8 factor
= chip
->txgi_factor
;
1706 tx_power
= pwr_idx_5g
->bw40_base
[group
];
1708 mcs_rate
= (rate
>= DESC_RATEMCS0
&& rate
<= DESC_RATEMCS15
) ||
1709 (rate
>= DESC_RATEVHT1SS_MCS0
&&
1710 rate
<= DESC_RATEVHT2SS_MCS9
);
1711 above_2ss
= (rate
>= DESC_RATEMCS8
&& rate
<= DESC_RATEMCS15
) ||
1712 (rate
>= DESC_RATEVHT2SS_MCS0
);
1715 tx_power
+= pwr_idx_5g
->ht_1s_diff
.ofdm
* factor
;
1719 switch (bandwidth
) {
1723 case RTW_CHANNEL_WIDTH_20
:
1724 tx_power
+= pwr_idx_5g
->ht_1s_diff
.bw20
* factor
;
1726 tx_power
+= pwr_idx_5g
->ht_2s_diff
.bw20
* factor
;
1728 case RTW_CHANNEL_WIDTH_40
:
1729 /* bw40 is the base power */
1731 tx_power
+= pwr_idx_5g
->ht_2s_diff
.bw40
* factor
;
1733 case RTW_CHANNEL_WIDTH_80
:
1734 /* the base idx of bw80 is the average of bw40+/bw40- */
1735 lower
= pwr_idx_5g
->bw40_base
[group
];
1736 upper
= pwr_idx_5g
->bw40_base
[group
+ 1];
1738 tx_power
= (lower
+ upper
) / 2;
1739 tx_power
+= pwr_idx_5g
->vht_1s_diff
.bw80
* factor
;
1741 tx_power
+= pwr_idx_5g
->vht_2s_diff
.bw80
* factor
;
1748 static s8
rtw_phy_get_tx_power_limit(struct rtw_dev
*rtwdev
, u8 band
,
1749 enum rtw_bandwidth bw
, u8 rf_path
,
1750 u8 rate
, u8 channel
, u8 regd
)
1752 struct rtw_hal
*hal
= &rtwdev
->hal
;
1753 u8
*cch_by_bw
= hal
->cch_by_bw
;
1754 s8 power_limit
= (s8
)rtwdev
->chip
->max_power_index
;
1760 if (regd
> RTW_REGD_WW
)
1763 if (rate
>= DESC_RATE1M
&& rate
<= DESC_RATE11M
)
1764 rs
= RTW_RATE_SECTION_CCK
;
1765 else if (rate
>= DESC_RATE6M
&& rate
<= DESC_RATE54M
)
1766 rs
= RTW_RATE_SECTION_OFDM
;
1767 else if (rate
>= DESC_RATEMCS0
&& rate
<= DESC_RATEMCS7
)
1768 rs
= RTW_RATE_SECTION_HT_1S
;
1769 else if (rate
>= DESC_RATEMCS8
&& rate
<= DESC_RATEMCS15
)
1770 rs
= RTW_RATE_SECTION_HT_2S
;
1771 else if (rate
>= DESC_RATEVHT1SS_MCS0
&& rate
<= DESC_RATEVHT1SS_MCS9
)
1772 rs
= RTW_RATE_SECTION_VHT_1S
;
1773 else if (rate
>= DESC_RATEVHT2SS_MCS0
&& rate
<= DESC_RATEVHT2SS_MCS9
)
1774 rs
= RTW_RATE_SECTION_VHT_2S
;
1778 /* only 20M BW with cck and ofdm */
1779 if (rs
== RTW_RATE_SECTION_CCK
|| rs
== RTW_RATE_SECTION_OFDM
)
1780 bw
= RTW_CHANNEL_WIDTH_20
;
1782 /* only 20/40M BW with ht */
1783 if (rs
== RTW_RATE_SECTION_HT_1S
|| rs
== RTW_RATE_SECTION_HT_2S
)
1784 bw
= min_t(u8
, bw
, RTW_CHANNEL_WIDTH_40
);
1786 /* select min power limit among [20M BW ~ current BW] */
1787 for (cur_bw
= RTW_CHANNEL_WIDTH_20
; cur_bw
<= bw
; cur_bw
++) {
1788 cur_ch
= cch_by_bw
[cur_bw
];
1790 ch_idx
= rtw_channel_to_idx(band
, cur_ch
);
1794 cur_lmt
= cur_ch
<= RTW_MAX_CHANNEL_NUM_2G
?
1795 hal
->tx_pwr_limit_2g
[regd
][cur_bw
][rs
][ch_idx
] :
1796 hal
->tx_pwr_limit_5g
[regd
][cur_bw
][rs
][ch_idx
];
1798 power_limit
= min_t(s8
, cur_lmt
, power_limit
);
1804 WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
1805 band
, bw
, rf_path
, rate
, channel
);
1806 return (s8
)rtwdev
->chip
->max_power_index
;
1809 void rtw_get_tx_power_params(struct rtw_dev
*rtwdev
, u8 path
, u8 rate
, u8 bw
,
1810 u8 ch
, u8 regd
, struct rtw_power_params
*pwr_param
)
1812 struct rtw_hal
*hal
= &rtwdev
->hal
;
1813 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
1814 struct rtw_txpwr_idx
*pwr_idx
;
1816 u8
*base
= &pwr_param
->pwr_base
;
1817 s8
*offset
= &pwr_param
->pwr_offset
;
1818 s8
*limit
= &pwr_param
->pwr_limit
;
1819 s8
*remnant
= &pwr_param
->pwr_remnant
;
1821 pwr_idx
= &rtwdev
->efuse
.txpwr_idx_table
[path
];
1822 group
= rtw_get_channel_group(ch
);
1824 /* base power index for 2.4G/5G */
1825 if (IS_CH_2G_BAND(ch
)) {
1827 *base
= rtw_phy_get_2g_tx_power_index(rtwdev
,
1828 &pwr_idx
->pwr_idx_2g
,
1830 *offset
= hal
->tx_pwr_by_rate_offset_2g
[path
][rate
];
1833 *base
= rtw_phy_get_5g_tx_power_index(rtwdev
,
1834 &pwr_idx
->pwr_idx_5g
,
1836 *offset
= hal
->tx_pwr_by_rate_offset_5g
[path
][rate
];
1839 *limit
= rtw_phy_get_tx_power_limit(rtwdev
, band
, bw
, path
,
1841 *remnant
= (rate
<= DESC_RATE11M
? dm_info
->txagc_remnant_cck
:
1842 dm_info
->txagc_remnant_ofdm
);
1846 rtw_phy_get_tx_power_index(struct rtw_dev
*rtwdev
, u8 rf_path
, u8 rate
,
1847 enum rtw_bandwidth bandwidth
, u8 channel
, u8 regd
)
1849 struct rtw_power_params pwr_param
= {0};
1853 rtw_get_tx_power_params(rtwdev
, rf_path
, rate
, bandwidth
,
1854 channel
, regd
, &pwr_param
);
1856 tx_power
= pwr_param
.pwr_base
;
1857 offset
= min_t(s8
, pwr_param
.pwr_offset
, pwr_param
.pwr_limit
);
1859 if (rtwdev
->chip
->en_dis_dpd
)
1860 offset
+= rtw_phy_get_dis_dpd_by_rate_diff(rtwdev
, rate
);
1862 tx_power
+= offset
+ pwr_param
.pwr_remnant
;
1864 if (tx_power
> rtwdev
->chip
->max_power_index
)
1865 tx_power
= rtwdev
->chip
->max_power_index
;
1869 EXPORT_SYMBOL(rtw_phy_get_tx_power_index
);
1871 static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev
*rtwdev
,
1872 u8 ch
, u8 path
, u8 rs
)
1874 struct rtw_hal
*hal
= &rtwdev
->hal
;
1875 u8 regd
= rtwdev
->regd
.txpwr_regd
;
1883 if (rs
>= RTW_RATE_SECTION_MAX
)
1886 rates
= rtw_rate_section
[rs
];
1887 size
= rtw_rate_size
[rs
];
1888 bw
= hal
->current_band_width
;
1889 for (i
= 0; i
< size
; i
++) {
1891 pwr_idx
= rtw_phy_get_tx_power_index(rtwdev
, path
, rate
,
1893 hal
->tx_pwr_tbl
[path
][rate
] = pwr_idx
;
1897 /* set tx power level by path for each rates, note that the order of the rates
1898 * are *very* important, bacause 8822B/8821C combines every four bytes of tx
1899 * power index into a four-byte power index register, and calls set_tx_agc to
1900 * write these values into hardware
1902 static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev
*rtwdev
,
1905 struct rtw_hal
*hal
= &rtwdev
->hal
;
1908 /* do not need cck rates if we are not in 2.4G */
1909 if (hal
->current_band_type
== RTW_BAND_2G
)
1910 rs
= RTW_RATE_SECTION_CCK
;
1912 rs
= RTW_RATE_SECTION_OFDM
;
1914 for (; rs
< RTW_RATE_SECTION_MAX
; rs
++)
1915 rtw_phy_set_tx_power_index_by_rs(rtwdev
, ch
, path
, rs
);
1918 void rtw_phy_set_tx_power_level(struct rtw_dev
*rtwdev
, u8 channel
)
1920 struct rtw_chip_info
*chip
= rtwdev
->chip
;
1921 struct rtw_hal
*hal
= &rtwdev
->hal
;
1924 mutex_lock(&hal
->tx_power_mutex
);
1926 for (path
= 0; path
< hal
->rf_path_num
; path
++)
1927 rtw_phy_set_tx_power_level_by_path(rtwdev
, channel
, path
);
1929 chip
->ops
->set_tx_power_index(rtwdev
);
1930 mutex_unlock(&hal
->tx_power_mutex
);
1932 EXPORT_SYMBOL(rtw_phy_set_tx_power_level
);
1935 rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal
*hal
, u8 path
,
1936 u8 rs
, u8 size
, u8
*rates
)
1939 u8 base_idx
, rate_idx
;
1940 s8 base_2g
, base_5g
;
1942 if (rs
>= RTW_RATE_SECTION_VHT_1S
)
1943 base_idx
= rates
[size
- 3];
1945 base_idx
= rates
[size
- 1];
1946 base_2g
= hal
->tx_pwr_by_rate_offset_2g
[path
][base_idx
];
1947 base_5g
= hal
->tx_pwr_by_rate_offset_5g
[path
][base_idx
];
1948 hal
->tx_pwr_by_rate_base_2g
[path
][rs
] = base_2g
;
1949 hal
->tx_pwr_by_rate_base_5g
[path
][rs
] = base_5g
;
1950 for (rate
= 0; rate
< size
; rate
++) {
1951 rate_idx
= rates
[rate
];
1952 hal
->tx_pwr_by_rate_offset_2g
[path
][rate_idx
] -= base_2g
;
1953 hal
->tx_pwr_by_rate_offset_5g
[path
][rate_idx
] -= base_5g
;
1957 void rtw_phy_tx_power_by_rate_config(struct rtw_hal
*hal
)
1961 for (path
= 0; path
< RTW_RF_PATH_MAX
; path
++) {
1962 rtw_phy_tx_power_by_rate_config_by_path(hal
, path
,
1963 RTW_RATE_SECTION_CCK
,
1964 rtw_cck_size
, rtw_cck_rates
);
1965 rtw_phy_tx_power_by_rate_config_by_path(hal
, path
,
1966 RTW_RATE_SECTION_OFDM
,
1967 rtw_ofdm_size
, rtw_ofdm_rates
);
1968 rtw_phy_tx_power_by_rate_config_by_path(hal
, path
,
1969 RTW_RATE_SECTION_HT_1S
,
1970 rtw_ht_1s_size
, rtw_ht_1s_rates
);
1971 rtw_phy_tx_power_by_rate_config_by_path(hal
, path
,
1972 RTW_RATE_SECTION_HT_2S
,
1973 rtw_ht_2s_size
, rtw_ht_2s_rates
);
1974 rtw_phy_tx_power_by_rate_config_by_path(hal
, path
,
1975 RTW_RATE_SECTION_VHT_1S
,
1976 rtw_vht_1s_size
, rtw_vht_1s_rates
);
1977 rtw_phy_tx_power_by_rate_config_by_path(hal
, path
,
1978 RTW_RATE_SECTION_VHT_2S
,
1979 rtw_vht_2s_size
, rtw_vht_2s_rates
);
1984 __rtw_phy_tx_power_limit_config(struct rtw_hal
*hal
, u8 regd
, u8 bw
, u8 rs
)
1989 for (ch
= 0; ch
< RTW_MAX_CHANNEL_NUM_2G
; ch
++) {
1990 base
= hal
->tx_pwr_by_rate_base_2g
[0][rs
];
1991 hal
->tx_pwr_limit_2g
[regd
][bw
][rs
][ch
] -= base
;
1994 for (ch
= 0; ch
< RTW_MAX_CHANNEL_NUM_5G
; ch
++) {
1995 base
= hal
->tx_pwr_by_rate_base_5g
[0][rs
];
1996 hal
->tx_pwr_limit_5g
[regd
][bw
][rs
][ch
] -= base
;
2000 void rtw_phy_tx_power_limit_config(struct rtw_hal
*hal
)
2004 /* default at channel 1 */
2005 hal
->cch_by_bw
[RTW_CHANNEL_WIDTH_20
] = 1;
2007 for (regd
= 0; regd
< RTW_REGD_MAX
; regd
++)
2008 for (bw
= 0; bw
< RTW_CHANNEL_WIDTH_MAX
; bw
++)
2009 for (rs
= 0; rs
< RTW_RATE_SECTION_MAX
; rs
++)
2010 __rtw_phy_tx_power_limit_config(hal
, regd
, bw
, rs
);
2013 static void rtw_phy_init_tx_power_limit(struct rtw_dev
*rtwdev
,
2014 u8 regd
, u8 bw
, u8 rs
)
2016 struct rtw_hal
*hal
= &rtwdev
->hal
;
2017 s8 max_power_index
= (s8
)rtwdev
->chip
->max_power_index
;
2021 for (ch
= 0; ch
< RTW_MAX_CHANNEL_NUM_2G
; ch
++)
2022 hal
->tx_pwr_limit_2g
[regd
][bw
][rs
][ch
] = max_power_index
;
2025 for (ch
= 0; ch
< RTW_MAX_CHANNEL_NUM_5G
; ch
++)
2026 hal
->tx_pwr_limit_5g
[regd
][bw
][rs
][ch
] = max_power_index
;
2029 void rtw_phy_init_tx_power(struct rtw_dev
*rtwdev
)
2031 struct rtw_hal
*hal
= &rtwdev
->hal
;
2032 u8 regd
, path
, rate
, rs
, bw
;
2034 /* init tx power by rate offset */
2035 for (path
= 0; path
< RTW_RF_PATH_MAX
; path
++) {
2036 for (rate
= 0; rate
< DESC_RATE_MAX
; rate
++) {
2037 hal
->tx_pwr_by_rate_offset_2g
[path
][rate
] = 0;
2038 hal
->tx_pwr_by_rate_offset_5g
[path
][rate
] = 0;
2042 /* init tx power limit */
2043 for (regd
= 0; regd
< RTW_REGD_MAX
; regd
++)
2044 for (bw
= 0; bw
< RTW_CHANNEL_WIDTH_MAX
; bw
++)
2045 for (rs
= 0; rs
< RTW_RATE_SECTION_MAX
; rs
++)
2046 rtw_phy_init_tx_power_limit(rtwdev
, regd
, bw
,
2050 void rtw_phy_config_swing_table(struct rtw_dev
*rtwdev
,
2051 struct rtw_swing_table
*swing_table
)
2053 const struct rtw_pwr_track_tbl
*tbl
= rtwdev
->chip
->pwr_track_tbl
;
2054 u8 channel
= rtwdev
->hal
.current_channel
;
2056 if (IS_CH_2G_BAND(channel
)) {
2057 if (rtwdev
->dm_info
.tx_rate
<= DESC_RATE11M
) {
2058 swing_table
->p
[RF_PATH_A
] = tbl
->pwrtrk_2g_ccka_p
;
2059 swing_table
->n
[RF_PATH_A
] = tbl
->pwrtrk_2g_ccka_n
;
2060 swing_table
->p
[RF_PATH_B
] = tbl
->pwrtrk_2g_cckb_p
;
2061 swing_table
->n
[RF_PATH_B
] = tbl
->pwrtrk_2g_cckb_n
;
2063 swing_table
->p
[RF_PATH_A
] = tbl
->pwrtrk_2ga_p
;
2064 swing_table
->n
[RF_PATH_A
] = tbl
->pwrtrk_2ga_n
;
2065 swing_table
->p
[RF_PATH_B
] = tbl
->pwrtrk_2gb_p
;
2066 swing_table
->n
[RF_PATH_B
] = tbl
->pwrtrk_2gb_n
;
2068 } else if (IS_CH_5G_BAND_1(channel
) || IS_CH_5G_BAND_2(channel
)) {
2069 swing_table
->p
[RF_PATH_A
] = tbl
->pwrtrk_5ga_p
[RTW_PWR_TRK_5G_1
];
2070 swing_table
->n
[RF_PATH_A
] = tbl
->pwrtrk_5ga_n
[RTW_PWR_TRK_5G_1
];
2071 swing_table
->p
[RF_PATH_B
] = tbl
->pwrtrk_5gb_p
[RTW_PWR_TRK_5G_1
];
2072 swing_table
->n
[RF_PATH_B
] = tbl
->pwrtrk_5gb_n
[RTW_PWR_TRK_5G_1
];
2073 } else if (IS_CH_5G_BAND_3(channel
)) {
2074 swing_table
->p
[RF_PATH_A
] = tbl
->pwrtrk_5ga_p
[RTW_PWR_TRK_5G_2
];
2075 swing_table
->n
[RF_PATH_A
] = tbl
->pwrtrk_5ga_n
[RTW_PWR_TRK_5G_2
];
2076 swing_table
->p
[RF_PATH_B
] = tbl
->pwrtrk_5gb_p
[RTW_PWR_TRK_5G_2
];
2077 swing_table
->n
[RF_PATH_B
] = tbl
->pwrtrk_5gb_n
[RTW_PWR_TRK_5G_2
];
2078 } else if (IS_CH_5G_BAND_4(channel
)) {
2079 swing_table
->p
[RF_PATH_A
] = tbl
->pwrtrk_5ga_p
[RTW_PWR_TRK_5G_3
];
2080 swing_table
->n
[RF_PATH_A
] = tbl
->pwrtrk_5ga_n
[RTW_PWR_TRK_5G_3
];
2081 swing_table
->p
[RF_PATH_B
] = tbl
->pwrtrk_5gb_p
[RTW_PWR_TRK_5G_3
];
2082 swing_table
->n
[RF_PATH_B
] = tbl
->pwrtrk_5gb_n
[RTW_PWR_TRK_5G_3
];
2084 swing_table
->p
[RF_PATH_A
] = tbl
->pwrtrk_2ga_p
;
2085 swing_table
->n
[RF_PATH_A
] = tbl
->pwrtrk_2ga_n
;
2086 swing_table
->p
[RF_PATH_B
] = tbl
->pwrtrk_2gb_p
;
2087 swing_table
->n
[RF_PATH_B
] = tbl
->pwrtrk_2gb_n
;
2090 EXPORT_SYMBOL(rtw_phy_config_swing_table
);
2092 void rtw_phy_pwrtrack_avg(struct rtw_dev
*rtwdev
, u8 thermal
, u8 path
)
2094 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
2096 ewma_thermal_add(&dm_info
->avg_thermal
[path
], thermal
);
2097 dm_info
->thermal_avg
[path
] =
2098 ewma_thermal_read(&dm_info
->avg_thermal
[path
]);
2100 EXPORT_SYMBOL(rtw_phy_pwrtrack_avg
);
2102 bool rtw_phy_pwrtrack_thermal_changed(struct rtw_dev
*rtwdev
, u8 thermal
,
2105 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
2106 u8 avg
= ewma_thermal_read(&dm_info
->avg_thermal
[path
]);
2113 EXPORT_SYMBOL(rtw_phy_pwrtrack_thermal_changed
);
2115 u8
rtw_phy_pwrtrack_get_delta(struct rtw_dev
*rtwdev
, u8 path
)
2117 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
2118 u8 therm_avg
, therm_efuse
, therm_delta
;
2120 therm_avg
= dm_info
->thermal_avg
[path
];
2121 therm_efuse
= rtwdev
->efuse
.thermal_meter
[path
];
2122 therm_delta
= abs(therm_avg
- therm_efuse
);
2124 return min_t(u8
, therm_delta
, RTW_PWR_TRK_TBL_SZ
- 1);
2126 EXPORT_SYMBOL(rtw_phy_pwrtrack_get_delta
);
2128 s8
rtw_phy_pwrtrack_get_pwridx(struct rtw_dev
*rtwdev
,
2129 struct rtw_swing_table
*swing_table
,
2130 u8 tbl_path
, u8 therm_path
, u8 delta
)
2132 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
2133 const u8
*delta_swing_table_idx_pos
;
2134 const u8
*delta_swing_table_idx_neg
;
2136 if (delta
>= RTW_PWR_TRK_TBL_SZ
) {
2137 rtw_warn(rtwdev
, "power track table overflow\n");
2142 rtw_warn(rtwdev
, "swing table not configured\n");
2146 delta_swing_table_idx_pos
= swing_table
->p
[tbl_path
];
2147 delta_swing_table_idx_neg
= swing_table
->n
[tbl_path
];
2149 if (!delta_swing_table_idx_pos
|| !delta_swing_table_idx_neg
) {
2150 rtw_warn(rtwdev
, "invalid swing table index\n");
2154 if (dm_info
->thermal_avg
[therm_path
] >
2155 rtwdev
->efuse
.thermal_meter
[therm_path
])
2156 return delta_swing_table_idx_pos
[delta
];
2158 return -delta_swing_table_idx_neg
[delta
];
2160 EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx
);
2162 bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev
*rtwdev
)
2164 struct rtw_dm_info
*dm_info
= &rtwdev
->dm_info
;
2167 delta_iqk
= abs(dm_info
->thermal_avg
[0] - dm_info
->thermal_meter_k
);
2168 if (delta_iqk
>= rtwdev
->chip
->iqk_threshold
) {
2169 dm_info
->thermal_meter_k
= dm_info
->thermal_avg
[0];
2174 EXPORT_SYMBOL(rtw_phy_pwrtrack_need_iqk
);