1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 - 2024 Intel Corporation
8 #include "ivpu_hw_btrs.h"
9 #include "ivpu_hw_ip.h"
11 #include <linux/dmi.h>
13 static char *platform_to_str(u32 platform
)
16 case IVPU_PLATFORM_SILICON
:
18 case IVPU_PLATFORM_SIMICS
:
20 case IVPU_PLATFORM_FPGA
:
23 return "Invalid platform";
27 static const struct dmi_system_id dmi_platform_simulation
[] = {
29 .ident
= "Intel Simics",
31 DMI_MATCH(DMI_BOARD_NAME
, "lnlrvp"),
32 DMI_MATCH(DMI_BOARD_VERSION
, "1.0"),
33 DMI_MATCH(DMI_BOARD_SERIAL
, "123456789"),
37 .ident
= "Intel Simics",
39 DMI_MATCH(DMI_BOARD_NAME
, "Simics"),
45 static void platform_init(struct ivpu_device
*vdev
)
47 if (dmi_check_system(dmi_platform_simulation
))
48 vdev
->platform
= IVPU_PLATFORM_SIMICS
;
50 vdev
->platform
= IVPU_PLATFORM_SILICON
;
52 ivpu_dbg(vdev
, MISC
, "Platform type: %s (%d)\n",
53 platform_to_str(vdev
->platform
), vdev
->platform
);
56 static void wa_init(struct ivpu_device
*vdev
)
58 vdev
->wa
.punit_disabled
= ivpu_is_fpga(vdev
);
59 vdev
->wa
.clear_runtime_mem
= false;
61 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
62 vdev
->wa
.interrupt_clear_with_0
= ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev
);
64 if (ivpu_device_id(vdev
) == PCI_DEVICE_ID_LNL
&&
65 ivpu_revision(vdev
) < IVPU_HW_IP_REV_LNL_B0
)
66 vdev
->wa
.disable_clock_relinquish
= true;
68 if (ivpu_hw_ip_gen(vdev
) == IVPU_HW_IP_37XX
)
69 vdev
->wa
.wp0_during_power_up
= true;
71 IVPU_PRINT_WA(punit_disabled
);
72 IVPU_PRINT_WA(clear_runtime_mem
);
73 IVPU_PRINT_WA(interrupt_clear_with_0
);
74 IVPU_PRINT_WA(disable_clock_relinquish
);
75 IVPU_PRINT_WA(wp0_during_power_up
);
78 static void timeouts_init(struct ivpu_device
*vdev
)
80 if (ivpu_test_mode
& IVPU_TEST_MODE_DISABLE_TIMEOUTS
) {
81 vdev
->timeout
.boot
= -1;
82 vdev
->timeout
.jsm
= -1;
83 vdev
->timeout
.tdr
= -1;
84 vdev
->timeout
.autosuspend
= -1;
85 vdev
->timeout
.d0i3_entry_msg
= -1;
86 } else if (ivpu_is_fpga(vdev
)) {
87 vdev
->timeout
.boot
= 100000;
88 vdev
->timeout
.jsm
= 50000;
89 vdev
->timeout
.tdr
= 2000000;
90 vdev
->timeout
.autosuspend
= -1;
91 vdev
->timeout
.d0i3_entry_msg
= 500;
92 vdev
->timeout
.state_dump_msg
= 10;
93 } else if (ivpu_is_simics(vdev
)) {
94 vdev
->timeout
.boot
= 50;
95 vdev
->timeout
.jsm
= 500;
96 vdev
->timeout
.tdr
= 10000;
97 vdev
->timeout
.autosuspend
= 100;
98 vdev
->timeout
.d0i3_entry_msg
= 100;
99 vdev
->timeout
.state_dump_msg
= 10;
101 vdev
->timeout
.boot
= 1000;
102 vdev
->timeout
.jsm
= 500;
103 vdev
->timeout
.tdr
= 2000;
104 if (ivpu_hw_ip_gen(vdev
) == IVPU_HW_IP_37XX
)
105 vdev
->timeout
.autosuspend
= 10;
107 vdev
->timeout
.autosuspend
= 100;
108 vdev
->timeout
.d0i3_entry_msg
= 5;
109 vdev
->timeout
.state_dump_msg
= 10;
113 static void memory_ranges_init(struct ivpu_device
*vdev
)
115 if (ivpu_hw_ip_gen(vdev
) == IVPU_HW_IP_37XX
) {
116 ivpu_hw_range_init(&vdev
->hw
->ranges
.global
, 0x80000000, SZ_512M
);
117 ivpu_hw_range_init(&vdev
->hw
->ranges
.user
, 0x88000000, 511 * SZ_1M
);
118 ivpu_hw_range_init(&vdev
->hw
->ranges
.shave
, 0x180000000, SZ_2G
);
119 ivpu_hw_range_init(&vdev
->hw
->ranges
.dma
, 0x200000000, SZ_128G
);
121 ivpu_hw_range_init(&vdev
->hw
->ranges
.global
, 0x80000000, SZ_512M
);
122 ivpu_hw_range_init(&vdev
->hw
->ranges
.shave
, 0x80000000, SZ_2G
);
123 ivpu_hw_range_init(&vdev
->hw
->ranges
.user
, 0x100000000, SZ_256G
);
124 vdev
->hw
->ranges
.dma
= vdev
->hw
->ranges
.user
;
128 static int wp_enable(struct ivpu_device
*vdev
)
130 return ivpu_hw_btrs_wp_drive(vdev
, true);
133 static int wp_disable(struct ivpu_device
*vdev
)
135 return ivpu_hw_btrs_wp_drive(vdev
, false);
138 int ivpu_hw_power_up(struct ivpu_device
*vdev
)
142 if (IVPU_WA(wp0_during_power_up
)) {
143 /* WP requests may fail when powering down, so issue WP 0 here */
144 ret
= wp_disable(vdev
);
146 ivpu_warn(vdev
, "Failed to disable workpoint: %d\n", ret
);
149 ret
= ivpu_hw_btrs_d0i3_disable(vdev
);
151 ivpu_warn(vdev
, "Failed to disable D0I3: %d\n", ret
);
153 ret
= wp_enable(vdev
);
155 ivpu_err(vdev
, "Failed to enable workpoint: %d\n", ret
);
159 if (ivpu_hw_btrs_gen(vdev
) >= IVPU_HW_BTRS_LNL
) {
160 if (IVPU_WA(disable_clock_relinquish
))
161 ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev
);
162 ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev
);
163 ivpu_hw_btrs_ats_print_lnl(vdev
);
166 ret
= ivpu_hw_ip_host_ss_configure(vdev
);
168 ivpu_err(vdev
, "Failed to configure host SS: %d\n", ret
);
172 ivpu_hw_ip_idle_gen_disable(vdev
);
174 ret
= ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev
);
176 ivpu_err(vdev
, "Timed out waiting for clock resource own ACK\n");
180 ret
= ivpu_hw_ip_pwr_domain_enable(vdev
);
182 ivpu_err(vdev
, "Failed to enable power domain: %d\n", ret
);
186 ret
= ivpu_hw_ip_host_ss_axi_enable(vdev
);
188 ivpu_err(vdev
, "Failed to enable AXI: %d\n", ret
);
192 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_LNL
)
193 ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev
);
195 ret
= ivpu_hw_ip_top_noc_enable(vdev
);
197 ivpu_err(vdev
, "Failed to enable TOP NOC: %d\n", ret
);
202 static void save_d0i3_entry_timestamp(struct ivpu_device
*vdev
)
204 vdev
->hw
->d0i3_entry_host_ts
= ktime_get_boottime();
205 vdev
->hw
->d0i3_entry_vpu_ts
= ivpu_hw_ip_read_perf_timer_counter(vdev
);
208 int ivpu_hw_reset(struct ivpu_device
*vdev
)
212 if (ivpu_hw_btrs_ip_reset(vdev
)) {
213 ivpu_err(vdev
, "Failed to reset NPU IP\n");
217 if (wp_disable(vdev
)) {
218 ivpu_err(vdev
, "Failed to disable workpoint\n");
225 int ivpu_hw_power_down(struct ivpu_device
*vdev
)
229 save_d0i3_entry_timestamp(vdev
);
231 if (!ivpu_hw_is_idle(vdev
))
232 ivpu_warn(vdev
, "NPU not idle during power down\n");
234 if (ivpu_hw_reset(vdev
)) {
235 ivpu_err(vdev
, "Failed to reset NPU\n");
239 if (ivpu_hw_btrs_d0i3_enable(vdev
)) {
240 ivpu_err(vdev
, "Failed to enter D0I3\n");
247 int ivpu_hw_init(struct ivpu_device
*vdev
)
249 ivpu_hw_btrs_info_init(vdev
);
250 ivpu_hw_btrs_freq_ratios_init(vdev
);
251 memory_ranges_init(vdev
);
255 atomic_set(&vdev
->hw
->firewall_irq_counter
, 0);
260 int ivpu_hw_boot_fw(struct ivpu_device
*vdev
)
264 ivpu_hw_ip_snoop_disable(vdev
);
265 ivpu_hw_ip_tbu_mmu_enable(vdev
);
266 ret
= ivpu_hw_ip_soc_cpu_boot(vdev
);
268 ivpu_err(vdev
, "Failed to boot SOC CPU: %d\n", ret
);
273 void ivpu_hw_profiling_freq_drive(struct ivpu_device
*vdev
, bool enable
)
275 if (ivpu_hw_ip_gen(vdev
) == IVPU_HW_IP_37XX
) {
276 vdev
->hw
->pll
.profiling_freq
= PLL_PROFILING_FREQ_DEFAULT
;
281 vdev
->hw
->pll
.profiling_freq
= PLL_PROFILING_FREQ_HIGH
;
283 vdev
->hw
->pll
.profiling_freq
= PLL_PROFILING_FREQ_DEFAULT
;
286 void ivpu_irq_handlers_init(struct ivpu_device
*vdev
)
288 INIT_KFIFO(vdev
->hw
->irq
.fifo
);
290 if (ivpu_hw_ip_gen(vdev
) == IVPU_HW_IP_37XX
)
291 vdev
->hw
->irq
.ip_irq_handler
= ivpu_hw_ip_irq_handler_37xx
;
293 vdev
->hw
->irq
.ip_irq_handler
= ivpu_hw_ip_irq_handler_40xx
;
295 if (ivpu_hw_btrs_gen(vdev
) == IVPU_HW_BTRS_MTL
)
296 vdev
->hw
->irq
.btrs_irq_handler
= ivpu_hw_btrs_irq_handler_mtl
;
298 vdev
->hw
->irq
.btrs_irq_handler
= ivpu_hw_btrs_irq_handler_lnl
;
301 void ivpu_hw_irq_enable(struct ivpu_device
*vdev
)
303 kfifo_reset(&vdev
->hw
->irq
.fifo
);
304 ivpu_hw_ip_irq_enable(vdev
);
305 ivpu_hw_btrs_irq_enable(vdev
);
308 void ivpu_hw_irq_disable(struct ivpu_device
*vdev
)
310 ivpu_hw_btrs_irq_disable(vdev
);
311 ivpu_hw_ip_irq_disable(vdev
);
314 irqreturn_t
ivpu_hw_irq_handler(int irq
, void *ptr
)
316 struct ivpu_device
*vdev
= ptr
;
317 bool ip_handled
, btrs_handled
;
319 ivpu_hw_btrs_global_int_disable(vdev
);
321 btrs_handled
= ivpu_hw_btrs_irq_handler(vdev
, irq
);
322 if (!ivpu_hw_is_idle((vdev
)) || !btrs_handled
)
323 ip_handled
= ivpu_hw_ip_irq_handler(vdev
, irq
);
327 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
328 ivpu_hw_btrs_global_int_enable(vdev
);
330 if (!kfifo_is_empty(&vdev
->hw
->irq
.fifo
))
331 return IRQ_WAKE_THREAD
;
332 if (ip_handled
|| btrs_handled
)