1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
8 #include "ivpu_hw_btrs.h"
9 #include "ivpu_hw_btrs_lnl_reg.h"
10 #include "ivpu_hw_btrs_mtl_reg.h"
11 #include "ivpu_hw_reg_io.h"
14 #define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \
15 (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR)))
17 #define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \
18 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \
19 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \
20 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \
21 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \
22 (REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR)))
24 #define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \
27 #define BTRS_IRQ_DISABLE_MASK ((u32)-1)
29 #define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
31 #define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3)
32 #define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3)
33 #define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3)
34 #define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3)
35 #define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
37 #define PLL_CDYN_DEFAULT 0x80
38 #define PLL_EPP_DEFAULT 0x80
39 #define PLL_CONFIG_DEFAULT 0x0
40 #define PLL_SIMULATION_FREQ 10000000
41 #define PLL_REF_CLK_FREQ 50000000
42 #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
43 #define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
44 #define TIMEOUT_US (150 * USEC_PER_MSEC)
46 /* Work point configuration values */
47 #define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
48 #define MTL_CONFIG_1_TILE 0x01
49 #define MTL_CONFIG_2_TILE 0x02
50 #define MTL_PLL_RATIO_5_3 0x01
51 #define MTL_PLL_RATIO_4_3 0x02
52 #define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0
53 #define BTRS_MTL_TILE_SKU_BOTH 0x3630
55 #define BTRS_LNL_TILE_MAX_NUM 6
56 #define BTRS_LNL_TILE_MAX_MASK 0x3f
58 #define WEIGHTS_DEFAULT 0xf711f711u
59 #define WEIGHTS_ATS_DEFAULT 0x0000f711u
62 #define DCT_ENABLE 0x1
63 #define DCT_DISABLE 0x0
65 int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device
*vdev
)
67 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, BTRS_MTL_ALL_IRQ_MASK
);
68 if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT
) == BTRS_MTL_ALL_IRQ_MASK
) {
69 /* Writing 1s does not clear the interrupt status register */
70 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, 0x0);
77 static void freq_ratios_init_mtl(struct ivpu_device
*vdev
)
79 struct ivpu_hw_info
*hw
= vdev
->hw
;
80 u32 fmin_fuse
, fmax_fuse
;
82 fmin_fuse
= REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE
);
83 hw
->pll
.min_ratio
= REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE
, MIN_RATIO
, fmin_fuse
);
84 hw
->pll
.pn_ratio
= REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE
, PN_RATIO
, fmin_fuse
);
86 fmax_fuse
= REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE
);
87 hw
->pll
.max_ratio
= REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE
, MAX_RATIO
, fmax_fuse
);
90 static void freq_ratios_init_lnl(struct ivpu_device
*vdev
)
92 struct ivpu_hw_info
*hw
= vdev
->hw
;
93 u32 fmin_fuse
, fmax_fuse
;
95 fmin_fuse
= REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE
);
96 hw
->pll
.min_ratio
= REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE
, MIN_RATIO
, fmin_fuse
);
97 hw
->pll
.pn_ratio
= REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE
, PN_RATIO
, fmin_fuse
);
99 fmax_fuse
= REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE
);
100 hw
->pll
.max_ratio
= REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE
, MAX_RATIO
, fmax_fuse
);
103 void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device
*vdev
)
105 struct ivpu_hw_info
*hw
= vdev
->hw
;
107 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
108 freq_ratios_init_mtl(vdev
);
110 freq_ratios_init_lnl(vdev
);
112 hw
->pll
.min_ratio
= clamp_t(u8
, ivpu_pll_min_ratio
, hw
->pll
.min_ratio
, hw
->pll
.max_ratio
);
113 hw
->pll
.max_ratio
= clamp_t(u8
, ivpu_pll_max_ratio
, hw
->pll
.min_ratio
, hw
->pll
.max_ratio
);
114 hw
->pll
.pn_ratio
= clamp_t(u8
, hw
->pll
.pn_ratio
, hw
->pll
.min_ratio
, hw
->pll
.max_ratio
);
117 static bool tile_disable_check(u32 config
)
119 /* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
123 if (config
> BIT(BTRS_LNL_TILE_MAX_NUM
- 1))
126 if ((config
& (config
- 1)) == 0)
132 static int read_tile_config_fuse(struct ivpu_device
*vdev
, u32
*tile_fuse_config
)
137 fuse
= REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE
);
138 if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE
, VALID
, fuse
)) {
139 ivpu_err(vdev
, "Fuse: invalid (0x%x)\n", fuse
);
143 config
= REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE
, CONFIG
, fuse
);
144 if (!tile_disable_check(config
))
145 ivpu_warn(vdev
, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config
);
147 ivpu_dbg(vdev
, MISC
, "Tile disable config mask: 0x%x\n", config
);
149 *tile_fuse_config
= config
;
153 static int info_init_mtl(struct ivpu_device
*vdev
)
155 struct ivpu_hw_info
*hw
= vdev
->hw
;
157 hw
->tile_fuse
= BTRS_MTL_TILE_FUSE_ENABLE_BOTH
;
158 hw
->sku
= BTRS_MTL_TILE_SKU_BOTH
;
159 hw
->config
= BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO
;
164 static int info_init_lnl(struct ivpu_device
*vdev
)
166 struct ivpu_hw_info
*hw
= vdev
->hw
;
167 u32 tile_fuse_config
;
170 ret
= read_tile_config_fuse(vdev
, &tile_fuse_config
);
174 hw
->tile_fuse
= tile_fuse_config
;
175 hw
->pll
.profiling_freq
= PLL_PROFILING_FREQ_DEFAULT
;
180 int ivpu_hw_btrs_info_init(struct ivpu_device
*vdev
)
182 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
183 return info_init_mtl(vdev
);
185 return info_init_lnl(vdev
);
188 static int wp_request_sync(struct ivpu_device
*vdev
)
190 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
191 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD
, SEND
, 0, PLL_TIMEOUT_US
);
193 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD
, SEND
, 0, PLL_TIMEOUT_US
);
196 static int wait_for_status_ready(struct ivpu_device
*vdev
, bool enable
)
198 u32 exp_val
= enable
? 0x1 : 0x0;
200 if (IVPU_WA(punit_disabled
))
203 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
204 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS
, READY
, exp_val
, PLL_TIMEOUT_US
);
206 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS
, READY
, exp_val
, PLL_TIMEOUT_US
);
218 static void wp_request_mtl(struct ivpu_device
*vdev
, struct wp_request
*wp
)
222 val
= REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0
);
223 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0
, MIN_RATIO
, wp
->min
, val
);
224 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0
, MAX_RATIO
, wp
->max
, val
);
225 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0
, val
);
227 val
= REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1
);
228 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1
, TARGET_RATIO
, wp
->target
, val
);
229 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1
, EPP
, PLL_EPP_DEFAULT
, val
);
230 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1
, val
);
232 val
= REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2
);
233 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2
, CONFIG
, wp
->cfg
, val
);
234 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2
, val
);
236 val
= REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD
);
237 val
= REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD
, SEND
, val
);
238 REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD
, val
);
241 static void wp_request_lnl(struct ivpu_device
*vdev
, struct wp_request
*wp
)
245 val
= REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0
);
246 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0
, MIN_RATIO
, wp
->min
, val
);
247 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0
, MAX_RATIO
, wp
->max
, val
);
248 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0
, val
);
250 val
= REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1
);
251 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1
, TARGET_RATIO
, wp
->target
, val
);
252 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1
, EPP
, wp
->epp
, val
);
253 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1
, val
);
255 val
= REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2
);
256 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2
, CONFIG
, wp
->cfg
, val
);
257 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2
, CDYN
, wp
->cdyn
, val
);
258 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2
, val
);
260 val
= REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD
);
261 val
= REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD
, SEND
, val
);
262 REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD
, val
);
265 static void wp_request(struct ivpu_device
*vdev
, struct wp_request
*wp
)
267 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
268 wp_request_mtl(vdev
, wp
);
270 wp_request_lnl(vdev
, wp
);
273 static int wp_request_send(struct ivpu_device
*vdev
, struct wp_request
*wp
)
277 ret
= wp_request_sync(vdev
);
279 ivpu_err(vdev
, "Failed to sync before workpoint request: %d\n", ret
);
283 wp_request(vdev
, wp
);
285 ret
= wp_request_sync(vdev
);
287 ivpu_err(vdev
, "Failed to sync after workpoint request: %d\n", ret
);
292 static void prepare_wp_request(struct ivpu_device
*vdev
, struct wp_request
*wp
, bool enable
)
294 struct ivpu_hw_info
*hw
= vdev
->hw
;
296 wp
->min
= hw
->pll
.min_ratio
;
297 wp
->max
= hw
->pll
.max_ratio
;
299 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
) {
300 wp
->target
= enable
? hw
->pll
.pn_ratio
: 0;
301 wp
->cfg
= enable
? hw
->config
: 0;
305 wp
->target
= hw
->pll
.pn_ratio
;
306 wp
->cfg
= enable
? PLL_CONFIG_DEFAULT
: 0;
307 wp
->cdyn
= enable
? PLL_CDYN_DEFAULT
: 0;
308 wp
->epp
= enable
? PLL_EPP_DEFAULT
: 0;
312 static int wait_for_pll_lock(struct ivpu_device
*vdev
, bool enable
)
314 u32 exp_val
= enable
? 0x1 : 0x0;
316 if (ivpu_hw_btrs_gen(vdev
) != IVPU_HW_BTRS_MTL
)
319 if (IVPU_WA(punit_disabled
))
322 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS
, LOCK
, exp_val
, PLL_TIMEOUT_US
);
325 int ivpu_hw_btrs_wp_drive(struct ivpu_device
*vdev
, bool enable
)
327 struct wp_request wp
;
330 if (IVPU_WA(punit_disabled
)) {
331 ivpu_dbg(vdev
, PM
, "Skipping workpoint request\n");
335 prepare_wp_request(vdev
, &wp
, enable
);
337 ivpu_dbg(vdev
, PM
, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
338 PLL_RATIO_TO_FREQ(wp
.target
), wp
.cfg
, wp
.epp
, wp
.cdyn
);
340 ret
= wp_request_send(vdev
, &wp
);
342 ivpu_err(vdev
, "Failed to send workpoint request: %d\n", ret
);
346 ret
= wait_for_pll_lock(vdev
, enable
);
348 ivpu_err(vdev
, "Timed out waiting for PLL lock\n");
352 ret
= wait_for_status_ready(vdev
, enable
);
354 ivpu_err(vdev
, "Timed out waiting for NPU ready status\n");
361 static int d0i3_drive_mtl(struct ivpu_device
*vdev
, bool enable
)
366 ret
= REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL
, INPROGRESS
, 0, TIMEOUT_US
);
368 ivpu_err(vdev
, "Failed to sync before D0i3 transition: %d\n", ret
);
372 val
= REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL
);
374 val
= REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL
, I3
, val
);
376 val
= REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL
, I3
, val
);
377 REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL
, val
);
379 ret
= REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL
, INPROGRESS
, 0, TIMEOUT_US
);
381 ivpu_err(vdev
, "Failed to sync after D0i3 transition: %d\n", ret
);
386 static int d0i3_drive_lnl(struct ivpu_device
*vdev
, bool enable
)
391 ret
= REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL
, INPROGRESS
, 0, TIMEOUT_US
);
393 ivpu_err(vdev
, "Failed to sync before D0i3 transition: %d\n", ret
);
397 val
= REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL
);
399 val
= REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL
, I3
, val
);
401 val
= REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL
, I3
, val
);
402 REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL
, val
);
404 ret
= REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL
, INPROGRESS
, 0, TIMEOUT_US
);
406 ivpu_err(vdev
, "Failed to sync after D0i3 transition: %d\n", ret
);
413 static int d0i3_drive(struct ivpu_device
*vdev
, bool enable
)
415 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
416 return d0i3_drive_mtl(vdev
, enable
);
418 return d0i3_drive_lnl(vdev
, enable
);
421 int ivpu_hw_btrs_d0i3_enable(struct ivpu_device
*vdev
)
425 if (IVPU_WA(punit_disabled
))
428 ret
= d0i3_drive(vdev
, true);
430 ivpu_err(vdev
, "Failed to enable D0i3: %d\n", ret
);
432 udelay(5); /* VPU requires 5 us to complete the transition */
437 int ivpu_hw_btrs_d0i3_disable(struct ivpu_device
*vdev
)
441 if (IVPU_WA(punit_disabled
))
444 ret
= d0i3_drive(vdev
, false);
446 ivpu_err(vdev
, "Failed to disable D0i3: %d\n", ret
);
451 int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device
*vdev
)
453 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
456 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS
, CLOCK_RESOURCE_OWN_ACK
, 1, TIMEOUT_US
);
459 void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device
*vdev
)
461 REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS
, WEIGHTS_DEFAULT
);
462 REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS
, WEIGHTS_ATS_DEFAULT
);
465 static int ip_reset_mtl(struct ivpu_device
*vdev
)
470 ret
= REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET
, TRIGGER
, 0, TIMEOUT_US
);
472 ivpu_err(vdev
, "Timed out waiting for TRIGGER bit\n");
476 val
= REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET
);
477 val
= REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET
, TRIGGER
, val
);
478 REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET
, val
);
480 ret
= REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET
, TRIGGER
, 0, TIMEOUT_US
);
482 ivpu_err(vdev
, "Timed out waiting for RESET completion\n");
487 static int ip_reset_lnl(struct ivpu_device
*vdev
)
492 ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev
);
494 ret
= REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET
, TRIGGER
, 0, TIMEOUT_US
);
496 ivpu_err(vdev
, "Wait for *_TRIGGER timed out\n");
500 val
= REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET
);
501 val
= REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET
, TRIGGER
, val
);
502 REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET
, val
);
504 ret
= REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET
, TRIGGER
, 0, TIMEOUT_US
);
506 ivpu_err(vdev
, "Timed out waiting for RESET completion\n");
511 int ivpu_hw_btrs_ip_reset(struct ivpu_device
*vdev
)
513 if (IVPU_WA(punit_disabled
))
516 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
517 return ip_reset_mtl(vdev
);
519 return ip_reset_lnl(vdev
);
522 void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device
*vdev
)
524 u32 val
= REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS
);
526 if (vdev
->hw
->pll
.profiling_freq
== PLL_PROFILING_FREQ_DEFAULT
)
527 val
= REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS
, PERF_CLK
, val
);
529 val
= REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS
, PERF_CLK
, val
);
531 REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS
, val
);
534 void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device
*vdev
)
536 ivpu_dbg(vdev
, MISC
, "Buttress ATS: %s\n",
537 REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS
) ? "Enable" : "Disable");
540 void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device
*vdev
)
542 u32 val
= REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS
);
544 val
= REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS
, DISABLE_CLK_RELINQUISH
, val
);
545 REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS
, val
);
548 bool ivpu_hw_btrs_is_idle(struct ivpu_device
*vdev
)
552 if (IVPU_WA(punit_disabled
))
555 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
) {
556 val
= REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS
);
558 return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS
, READY
, val
) &&
559 REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS
, IDLE
, val
);
561 val
= REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS
);
563 return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS
, READY
, val
) &&
564 REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS
, IDLE
, val
);
568 int ivpu_hw_btrs_wait_for_idle(struct ivpu_device
*vdev
)
570 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
571 return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS
, IDLE
, 0x1, IDLE_TIMEOUT_US
);
573 return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS
, IDLE
, 0x1, IDLE_TIMEOUT_US
);
576 /* Handler for IRQs from Buttress core (irqB) */
577 bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device
*vdev
, int irq
)
579 u32 status
= REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT
) & BTRS_MTL_IRQ_MASK
;
580 bool schedule_recovery
= false;
585 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, FREQ_CHANGE
, status
))
586 ivpu_dbg(vdev
, IRQ
, "FREQ_CHANGE irq: %08x",
587 REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL
));
589 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, ATS_ERR
, status
)) {
590 ivpu_err(vdev
, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0
));
591 REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR
, 0x1);
592 schedule_recovery
= true;
595 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, UFI_ERR
, status
)) {
596 u32 ufi_log
= REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG
);
598 ivpu_err(vdev
, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
599 ufi_log
, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG
, OPCODE
, ufi_log
),
600 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG
, AXI_ID
, ufi_log
),
601 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG
, CQ_ID
, ufi_log
));
602 REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR
, 0x1);
603 schedule_recovery
= true;
606 /* This must be done after interrupts are cleared at the source. */
607 if (IVPU_WA(interrupt_clear_with_0
))
609 * Writing 1 triggers an interrupt, so we can't perform read update write.
610 * Clear local interrupt status by writing 0 to all bits.
612 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, 0x0);
614 REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, status
);
616 if (schedule_recovery
)
617 ivpu_pm_trigger_recovery(vdev
, "Buttress IRQ");
622 /* Handler for IRQs from Buttress core (irqB) */
623 bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device
*vdev
, int irq
)
625 u32 status
= REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT
) & BTRS_LNL_IRQ_MASK
;
626 bool schedule_recovery
= false;
631 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, SURV_ERR
, status
)) {
632 ivpu_dbg(vdev
, IRQ
, "Survivability IRQ\n");
633 if (!kfifo_put(&vdev
->hw
->irq
.fifo
, IVPU_HW_IRQ_SRC_DCT
))
634 ivpu_err_ratelimited(vdev
, "IRQ FIFO full\n");
637 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, FREQ_CHANGE
, status
))
638 ivpu_dbg(vdev
, IRQ
, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ
));
640 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, ATS_ERR
, status
)) {
641 ivpu_err(vdev
, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
642 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1
),
643 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2
));
644 REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR
, 0x1);
645 schedule_recovery
= true;
648 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, CFI0_ERR
, status
)) {
649 ivpu_err(vdev
, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG
));
650 REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR
, 0x1);
651 schedule_recovery
= true;
654 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, CFI1_ERR
, status
)) {
655 ivpu_err(vdev
, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG
));
656 REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR
, 0x1);
657 schedule_recovery
= true;
660 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, IMR0_ERR
, status
)) {
661 ivpu_err(vdev
, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
662 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW
),
663 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH
));
664 REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR
, 0x1);
665 schedule_recovery
= true;
668 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, IMR1_ERR
, status
)) {
669 ivpu_err(vdev
, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
670 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW
),
671 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH
));
672 REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR
, 0x1);
673 schedule_recovery
= true;
676 /* This must be done after interrupts are cleared at the source. */
677 REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, status
);
679 if (schedule_recovery
)
680 ivpu_pm_trigger_recovery(vdev
, "Buttress IRQ");
685 int ivpu_hw_btrs_dct_get_request(struct ivpu_device
*vdev
, bool *enable
)
687 u32 val
= REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW
);
688 u32 cmd
= REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW
, CMD
, val
);
689 u32 param1
= REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW
, PARAM1
, val
);
691 if (cmd
!= DCT_REQ
) {
692 ivpu_err_ratelimited(vdev
, "Unsupported PCODE command: 0x%x\n", cmd
);
704 ivpu_err_ratelimited(vdev
, "Invalid PARAM1 value: %u\n", param1
);
709 void ivpu_hw_btrs_dct_set_status(struct ivpu_device
*vdev
, bool enable
, u32 active_percent
)
712 u32 cmd
= enable
? DCT_ENABLE
: DCT_DISABLE
;
714 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS
, CMD
, DCT_REQ
, val
);
715 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS
, PARAM1
, cmd
, val
);
716 val
= REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS
, PARAM2
, active_percent
, val
);
718 REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS
, val
);
721 static u32
pll_ratio_to_freq_mtl(u32 ratio
, u32 config
)
723 u32 pll_clock
= PLL_REF_CLK_FREQ
* ratio
;
726 if ((config
& 0xff) == MTL_PLL_RATIO_4_3
)
727 cpu_clock
= pll_clock
* 2 / 4;
729 cpu_clock
= pll_clock
* 2 / 5;
734 u32
ivpu_hw_btrs_ratio_to_freq(struct ivpu_device
*vdev
, u32 ratio
)
736 struct ivpu_hw_info
*hw
= vdev
->hw
;
738 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
739 return pll_ratio_to_freq_mtl(ratio
, hw
->config
);
741 return PLL_RATIO_TO_FREQ(ratio
);
744 static u32
pll_freq_get_mtl(struct ivpu_device
*vdev
)
748 pll_curr_ratio
= REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL
);
749 pll_curr_ratio
&= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK
;
751 if (!ivpu_is_silicon(vdev
))
752 return PLL_SIMULATION_FREQ
;
754 return pll_ratio_to_freq_mtl(pll_curr_ratio
, vdev
->hw
->config
);
757 static u32
pll_freq_get_lnl(struct ivpu_device
*vdev
)
761 pll_curr_ratio
= REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ
);
762 pll_curr_ratio
&= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK
;
764 return PLL_RATIO_TO_FREQ(pll_curr_ratio
);
767 u32
ivpu_hw_btrs_pll_freq_get(struct ivpu_device
*vdev
)
769 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
770 return pll_freq_get_mtl(vdev
);
772 return pll_freq_get_lnl(vdev
);
775 u32
ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device
*vdev
)
777 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
778 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET
);
780 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET
);
783 u32
ivpu_hw_btrs_telemetry_size_get(struct ivpu_device
*vdev
)
785 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
786 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE
);
788 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE
);
791 u32
ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device
*vdev
)
793 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
794 return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE
);
796 return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE
);
799 void ivpu_hw_btrs_global_int_disable(struct ivpu_device
*vdev
)
801 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
802 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK
, 0x1);
804 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK
, 0x1);
807 void ivpu_hw_btrs_global_int_enable(struct ivpu_device
*vdev
)
809 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
810 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK
, 0x0);
812 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK
, 0x0);
815 void ivpu_hw_btrs_irq_enable(struct ivpu_device
*vdev
)
817 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
) {
818 REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK
, (u32
)(~BTRS_MTL_IRQ_MASK
));
819 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK
, 0x0);
821 REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK
, (u32
)(~BTRS_LNL_IRQ_MASK
));
822 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK
, 0x0);
826 void ivpu_hw_btrs_irq_disable(struct ivpu_device
*vdev
)
828 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
) {
829 REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK
, 0x1);
830 REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK
, BTRS_IRQ_DISABLE_MASK
);
832 REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK
, 0x1);
833 REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK
, BTRS_IRQ_DISABLE_MASK
);
837 static void diagnose_failure_mtl(struct ivpu_device
*vdev
)
839 u32 reg
= REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT
) & BTRS_MTL_IRQ_MASK
;
841 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, ATS_ERR
, reg
))
842 ivpu_err(vdev
, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0
));
844 if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT
, UFI_ERR
, reg
)) {
845 u32 log
= REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG
);
847 ivpu_err(vdev
, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
848 log
, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG
, OPCODE
, log
),
849 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG
, AXI_ID
, log
),
850 REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG
, CQ_ID
, log
));
854 static void diagnose_failure_lnl(struct ivpu_device
*vdev
)
856 u32 reg
= REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT
) & BTRS_LNL_IRQ_MASK
;
858 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, ATS_ERR
, reg
)) {
859 ivpu_err(vdev
, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
860 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1
),
861 REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2
));
864 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, CFI0_ERR
, reg
))
865 ivpu_err(vdev
, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG
));
867 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, CFI1_ERR
, reg
))
868 ivpu_err(vdev
, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG
));
870 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, IMR0_ERR
, reg
))
871 ivpu_err(vdev
, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
872 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW
),
873 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH
));
875 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, IMR1_ERR
, reg
))
876 ivpu_err(vdev
, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
877 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW
),
878 REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH
));
880 if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT
, SURV_ERR
, reg
))
881 ivpu_err(vdev
, "Survivability IRQ\n");
884 void ivpu_hw_btrs_diagnose_failure(struct ivpu_device
*vdev
)
886 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
887 return diagnose_failure_mtl(vdev
);
889 return diagnose_failure_lnl(vdev
);