1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
6 #include <linux/firmware.h>
7 #include <linux/module.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/workqueue.h>
11 #include <generated/utsrelease.h>
13 #include <drm/drm_accel.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_ioctl.h>
17 #include <drm/drm_prime.h>
19 #include "ivpu_coredump.h"
20 #include "ivpu_debugfs.h"
23 #include "ivpu_fw_log.h"
28 #include "ivpu_jsm_msg.h"
30 #include "ivpu_mmu_context.h"
33 #include "ivpu_sysfs.h"
34 #include "vpu_boot_api.h"
36 #ifndef DRIVER_VERSION_STR
37 #define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
41 module_param_named(dbg_mask
, ivpu_dbg_mask
, int, 0644);
42 MODULE_PARM_DESC(dbg_mask
, "Driver debug mask. See IVPU_DBG_* macros.");
45 #if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
46 module_param_named_unsafe(test_mode
, ivpu_test_mode
, int, 0644);
47 MODULE_PARM_DESC(test_mode
, "Test mode mask. See IVPU_TEST_MODE_* macros.");
50 u8 ivpu_pll_min_ratio
;
51 module_param_named(pll_min_ratio
, ivpu_pll_min_ratio
, byte
, 0644);
52 MODULE_PARM_DESC(pll_min_ratio
, "Minimum PLL ratio used to set NPU frequency");
54 u8 ivpu_pll_max_ratio
= U8_MAX
;
55 module_param_named(pll_max_ratio
, ivpu_pll_max_ratio
, byte
, 0644);
56 MODULE_PARM_DESC(pll_max_ratio
, "Maximum PLL ratio used to set NPU frequency");
58 int ivpu_sched_mode
= IVPU_SCHED_MODE_AUTO
;
59 module_param_named(sched_mode
, ivpu_sched_mode
, int, 0444);
60 MODULE_PARM_DESC(sched_mode
, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
62 bool ivpu_disable_mmu_cont_pages
;
63 module_param_named(disable_mmu_cont_pages
, ivpu_disable_mmu_cont_pages
, bool, 0444);
64 MODULE_PARM_DESC(disable_mmu_cont_pages
, "Disable MMU contiguous pages optimization");
66 bool ivpu_force_snoop
;
67 module_param_named(force_snoop
, ivpu_force_snoop
, bool, 0444);
68 MODULE_PARM_DESC(force_snoop
, "Force snooping for NPU host memory access");
70 struct ivpu_file_priv
*ivpu_file_priv_get(struct ivpu_file_priv
*file_priv
)
72 struct ivpu_device
*vdev
= file_priv
->vdev
;
74 kref_get(&file_priv
->ref
);
76 ivpu_dbg(vdev
, KREF
, "file_priv get: ctx %u refcount %u\n",
77 file_priv
->ctx
.id
, kref_read(&file_priv
->ref
));
82 static void file_priv_unbind(struct ivpu_device
*vdev
, struct ivpu_file_priv
*file_priv
)
84 mutex_lock(&file_priv
->lock
);
85 if (file_priv
->bound
) {
86 ivpu_dbg(vdev
, FILE, "file_priv unbind: ctx %u\n", file_priv
->ctx
.id
);
88 ivpu_cmdq_release_all_locked(file_priv
);
89 ivpu_bo_unbind_all_bos_from_context(vdev
, &file_priv
->ctx
);
90 ivpu_mmu_context_fini(vdev
, &file_priv
->ctx
);
91 file_priv
->bound
= false;
92 drm_WARN_ON(&vdev
->drm
, !xa_erase_irq(&vdev
->context_xa
, file_priv
->ctx
.id
));
94 mutex_unlock(&file_priv
->lock
);
97 static void file_priv_release(struct kref
*ref
)
99 struct ivpu_file_priv
*file_priv
= container_of(ref
, struct ivpu_file_priv
, ref
);
100 struct ivpu_device
*vdev
= file_priv
->vdev
;
102 ivpu_dbg(vdev
, FILE, "file_priv release: ctx %u bound %d\n",
103 file_priv
->ctx
.id
, (bool)file_priv
->bound
);
105 pm_runtime_get_sync(vdev
->drm
.dev
);
106 mutex_lock(&vdev
->context_list_lock
);
107 file_priv_unbind(vdev
, file_priv
);
108 drm_WARN_ON(&vdev
->drm
, !xa_empty(&file_priv
->cmdq_xa
));
109 xa_destroy(&file_priv
->cmdq_xa
);
110 mutex_unlock(&vdev
->context_list_lock
);
111 pm_runtime_put_autosuspend(vdev
->drm
.dev
);
113 mutex_destroy(&file_priv
->ms_lock
);
114 mutex_destroy(&file_priv
->lock
);
118 void ivpu_file_priv_put(struct ivpu_file_priv
**link
)
120 struct ivpu_file_priv
*file_priv
= *link
;
121 struct ivpu_device
*vdev
= file_priv
->vdev
;
123 ivpu_dbg(vdev
, KREF
, "file_priv put: ctx %u refcount %u\n",
124 file_priv
->ctx
.id
, kref_read(&file_priv
->ref
));
127 kref_put(&file_priv
->ref
, file_priv_release
);
130 bool ivpu_is_capable(struct ivpu_device
*vdev
, u32 capability
)
132 switch (capability
) {
133 case DRM_IVPU_CAP_METRIC_STREAMER
:
135 case DRM_IVPU_CAP_DMA_MEMORY_RANGE
:
137 case DRM_IVPU_CAP_MANAGE_CMDQ
:
138 return vdev
->fw
->sched_mode
== VPU_SCHEDULING_MODE_HW
;
144 static int ivpu_get_param_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
146 struct ivpu_file_priv
*file_priv
= file
->driver_priv
;
147 struct ivpu_device
*vdev
= file_priv
->vdev
;
148 struct pci_dev
*pdev
= to_pci_dev(vdev
->drm
.dev
);
149 struct drm_ivpu_param
*args
= data
;
153 if (!drm_dev_enter(dev
, &idx
))
156 switch (args
->param
) {
157 case DRM_IVPU_PARAM_DEVICE_ID
:
158 args
->value
= pdev
->device
;
160 case DRM_IVPU_PARAM_DEVICE_REVISION
:
161 args
->value
= pdev
->revision
;
163 case DRM_IVPU_PARAM_PLATFORM_TYPE
:
164 args
->value
= vdev
->platform
;
166 case DRM_IVPU_PARAM_CORE_CLOCK_RATE
:
167 args
->value
= ivpu_hw_ratio_to_freq(vdev
, vdev
->hw
->pll
.max_ratio
);
169 case DRM_IVPU_PARAM_NUM_CONTEXTS
:
170 args
->value
= ivpu_get_context_count(vdev
);
172 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS
:
173 args
->value
= vdev
->hw
->ranges
.user
.start
;
175 case DRM_IVPU_PARAM_CONTEXT_ID
:
176 args
->value
= file_priv
->ctx
.id
;
178 case DRM_IVPU_PARAM_FW_API_VERSION
:
179 if (args
->index
< VPU_FW_API_VER_NUM
) {
180 struct vpu_firmware_header
*fw_hdr
;
182 fw_hdr
= (struct vpu_firmware_header
*)vdev
->fw
->file
->data
;
183 args
->value
= fw_hdr
->api_version
[args
->index
];
188 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT
:
189 ret
= ivpu_jsm_get_heartbeat(vdev
, args
->index
, &args
->value
);
191 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID
:
192 args
->value
= (u64
)atomic64_inc_return(&vdev
->unique_id_counter
);
194 case DRM_IVPU_PARAM_TILE_CONFIG
:
195 args
->value
= vdev
->hw
->tile_fuse
;
197 case DRM_IVPU_PARAM_SKU
:
198 args
->value
= vdev
->hw
->sku
;
200 case DRM_IVPU_PARAM_CAPABILITIES
:
201 args
->value
= ivpu_is_capable(vdev
, args
->index
);
212 static int ivpu_set_param_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
214 struct drm_ivpu_param
*args
= data
;
217 switch (args
->param
) {
225 static int ivpu_open(struct drm_device
*dev
, struct drm_file
*file
)
227 struct ivpu_device
*vdev
= to_ivpu_device(dev
);
228 struct ivpu_file_priv
*file_priv
;
232 if (!drm_dev_enter(dev
, &idx
))
235 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
241 INIT_LIST_HEAD(&file_priv
->ms_instance_list
);
243 file_priv
->vdev
= vdev
;
244 file_priv
->bound
= true;
245 kref_init(&file_priv
->ref
);
246 mutex_init(&file_priv
->lock
);
247 mutex_init(&file_priv
->ms_lock
);
249 mutex_lock(&vdev
->context_list_lock
);
251 ret
= xa_alloc_irq(&vdev
->context_xa
, &ctx_id
, file_priv
,
252 vdev
->context_xa_limit
, GFP_KERNEL
);
254 ivpu_err(vdev
, "Failed to allocate context id: %d\n", ret
);
258 ivpu_mmu_context_init(vdev
, &file_priv
->ctx
, ctx_id
);
260 file_priv
->job_limit
.min
= FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK
, (file_priv
->ctx
.id
- 1));
261 file_priv
->job_limit
.max
= file_priv
->job_limit
.min
| IVPU_JOB_ID_JOB_MASK
;
263 xa_init_flags(&file_priv
->cmdq_xa
, XA_FLAGS_ALLOC1
);
264 file_priv
->cmdq_limit
.min
= IVPU_CMDQ_MIN_ID
;
265 file_priv
->cmdq_limit
.max
= IVPU_CMDQ_MAX_ID
;
267 mutex_unlock(&vdev
->context_list_lock
);
270 file
->driver_priv
= file_priv
;
272 ivpu_dbg(vdev
, FILE, "file_priv create: ctx %u process %s pid %d\n",
273 ctx_id
, current
->comm
, task_pid_nr(current
));
278 mutex_unlock(&vdev
->context_list_lock
);
279 mutex_destroy(&file_priv
->ms_lock
);
280 mutex_destroy(&file_priv
->lock
);
287 static void ivpu_postclose(struct drm_device
*dev
, struct drm_file
*file
)
289 struct ivpu_file_priv
*file_priv
= file
->driver_priv
;
290 struct ivpu_device
*vdev
= to_ivpu_device(dev
);
292 ivpu_dbg(vdev
, FILE, "file_priv close: ctx %u process %s pid %d\n",
293 file_priv
->ctx
.id
, current
->comm
, task_pid_nr(current
));
295 ivpu_ms_cleanup(file_priv
);
296 ivpu_file_priv_put(&file_priv
);
299 static const struct drm_ioctl_desc ivpu_drm_ioctls
[] = {
300 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM
, ivpu_get_param_ioctl
, 0),
301 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM
, ivpu_set_param_ioctl
, 0),
302 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE
, ivpu_bo_create_ioctl
, 0),
303 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO
, ivpu_bo_info_ioctl
, 0),
304 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT
, ivpu_submit_ioctl
, 0),
305 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT
, ivpu_bo_wait_ioctl
, 0),
306 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START
, ivpu_ms_start_ioctl
, 0),
307 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA
, ivpu_ms_get_data_ioctl
, 0),
308 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP
, ivpu_ms_stop_ioctl
, 0),
309 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO
, ivpu_ms_get_info_ioctl
, 0),
310 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE
, ivpu_cmdq_create_ioctl
, 0),
311 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY
, ivpu_cmdq_destroy_ioctl
, 0),
312 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT
, ivpu_cmdq_submit_ioctl
, 0),
315 static int ivpu_wait_for_ready(struct ivpu_device
*vdev
)
317 struct ivpu_ipc_consumer cons
;
318 struct ivpu_ipc_hdr ipc_hdr
;
319 unsigned long timeout
;
322 if (ivpu_test_mode
& IVPU_TEST_MODE_FW_TEST
)
325 ivpu_ipc_consumer_add(vdev
, &cons
, IVPU_IPC_CHAN_BOOT_MSG
, NULL
);
327 timeout
= jiffies
+ msecs_to_jiffies(vdev
->timeout
.boot
);
329 ivpu_ipc_irq_handler(vdev
);
330 ret
= ivpu_ipc_receive(vdev
, &cons
, &ipc_hdr
, NULL
, 0);
331 if (ret
!= -ETIMEDOUT
|| time_after_eq(jiffies
, timeout
))
337 ivpu_ipc_consumer_del(vdev
, &cons
);
339 if (!ret
&& ipc_hdr
.data_addr
!= IVPU_IPC_BOOT_MSG_DATA_ADDR
) {
340 ivpu_err(vdev
, "Invalid NPU ready message: 0x%x\n",
346 ivpu_dbg(vdev
, PM
, "NPU ready message received successfully\n");
351 static int ivpu_hw_sched_init(struct ivpu_device
*vdev
)
355 if (vdev
->fw
->sched_mode
== VPU_SCHEDULING_MODE_HW
) {
356 ret
= ivpu_jsm_hws_setup_priority_bands(vdev
);
358 ivpu_err(vdev
, "Failed to enable hw scheduler: %d", ret
);
367 * ivpu_boot() - Start VPU firmware
370 * This function is paired with ivpu_shutdown() but it doesn't power up the
371 * VPU because power up has to be called very early in ivpu_probe().
373 int ivpu_boot(struct ivpu_device
*vdev
)
377 /* Update boot params located at first 4KB of FW memory */
378 ivpu_fw_boot_params_setup(vdev
, ivpu_bo_vaddr(vdev
->fw
->mem
));
380 ret
= ivpu_hw_boot_fw(vdev
);
382 ivpu_err(vdev
, "Failed to start the firmware: %d\n", ret
);
386 ret
= ivpu_wait_for_ready(vdev
);
388 ivpu_err(vdev
, "Failed to boot the firmware: %d\n", ret
);
389 goto err_diagnose_failure
;
392 ivpu_hw_irq_clear(vdev
);
393 enable_irq(vdev
->irq
);
394 ivpu_hw_irq_enable(vdev
);
395 ivpu_ipc_enable(vdev
);
397 if (ivpu_fw_is_cold_boot(vdev
)) {
398 ret
= ivpu_pm_dct_init(vdev
);
400 goto err_diagnose_failure
;
402 ret
= ivpu_hw_sched_init(vdev
);
404 goto err_diagnose_failure
;
409 err_diagnose_failure
:
410 ivpu_hw_diagnose_failure(vdev
);
411 ivpu_mmu_evtq_dump(vdev
);
412 ivpu_dev_coredump(vdev
);
416 void ivpu_prepare_for_reset(struct ivpu_device
*vdev
)
418 ivpu_hw_irq_disable(vdev
);
419 disable_irq(vdev
->irq
);
420 cancel_work_sync(&vdev
->irq_ipc_work
);
421 cancel_work_sync(&vdev
->irq_dct_work
);
422 cancel_work_sync(&vdev
->context_abort_work
);
423 ivpu_ipc_disable(vdev
);
424 ivpu_mmu_disable(vdev
);
427 int ivpu_shutdown(struct ivpu_device
*vdev
)
431 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
432 pci_save_state(to_pci_dev(vdev
->drm
.dev
));
434 ret
= ivpu_hw_power_down(vdev
);
436 ivpu_warn(vdev
, "Failed to power down HW: %d\n", ret
);
438 pci_set_power_state(to_pci_dev(vdev
->drm
.dev
), PCI_D3hot
);
443 static const struct file_operations ivpu_fops
= {
444 .owner
= THIS_MODULE
,
448 static const struct drm_driver driver
= {
449 .driver_features
= DRIVER_GEM
| DRIVER_COMPUTE_ACCEL
,
452 .postclose
= ivpu_postclose
,
454 .gem_create_object
= ivpu_gem_create_object
,
455 .gem_prime_import_sg_table
= drm_gem_shmem_prime_import_sg_table
,
457 .ioctls
= ivpu_drm_ioctls
,
458 .num_ioctls
= ARRAY_SIZE(ivpu_drm_ioctls
),
467 static int ivpu_irq_init(struct ivpu_device
*vdev
)
469 struct pci_dev
*pdev
= to_pci_dev(vdev
->drm
.dev
);
472 ret
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
474 ivpu_err(vdev
, "Failed to allocate a MSI IRQ: %d\n", ret
);
478 INIT_WORK(&vdev
->irq_ipc_work
, ivpu_ipc_irq_work_fn
);
479 INIT_WORK(&vdev
->irq_dct_work
, ivpu_pm_irq_dct_work_fn
);
480 INIT_WORK(&vdev
->context_abort_work
, ivpu_context_abort_work_fn
);
482 ivpu_irq_handlers_init(vdev
);
484 vdev
->irq
= pci_irq_vector(pdev
, 0);
486 ret
= devm_request_irq(vdev
->drm
.dev
, vdev
->irq
, ivpu_hw_irq_handler
,
487 IRQF_NO_AUTOEN
, DRIVER_NAME
, vdev
);
489 ivpu_err(vdev
, "Failed to request an IRQ %d\n", ret
);
494 static int ivpu_pci_init(struct ivpu_device
*vdev
)
496 struct pci_dev
*pdev
= to_pci_dev(vdev
->drm
.dev
);
497 struct resource
*bar0
= &pdev
->resource
[0];
498 struct resource
*bar4
= &pdev
->resource
[4];
501 ivpu_dbg(vdev
, MISC
, "Mapping BAR0 (RegV) %pR\n", bar0
);
502 vdev
->regv
= devm_ioremap_resource(vdev
->drm
.dev
, bar0
);
503 if (IS_ERR(vdev
->regv
)) {
504 ivpu_err(vdev
, "Failed to map bar 0: %pe\n", vdev
->regv
);
505 return PTR_ERR(vdev
->regv
);
508 ivpu_dbg(vdev
, MISC
, "Mapping BAR4 (RegB) %pR\n", bar4
);
509 vdev
->regb
= devm_ioremap_resource(vdev
->drm
.dev
, bar4
);
510 if (IS_ERR(vdev
->regb
)) {
511 ivpu_err(vdev
, "Failed to map bar 4: %pe\n", vdev
->regb
);
512 return PTR_ERR(vdev
->regb
);
515 ret
= dma_set_mask_and_coherent(vdev
->drm
.dev
, DMA_BIT_MASK(vdev
->hw
->dma_bits
));
517 ivpu_err(vdev
, "Failed to set DMA mask: %d\n", ret
);
520 dma_set_max_seg_size(vdev
->drm
.dev
, UINT_MAX
);
522 /* Clear any pending errors */
523 pcie_capability_clear_word(pdev
, PCI_EXP_DEVSTA
, 0x3f);
525 /* NPU does not require 10m D3hot delay */
526 pdev
->d3hot_delay
= 0;
528 ret
= pcim_enable_device(pdev
);
530 ivpu_err(vdev
, "Failed to enable PCI device: %d\n", ret
);
534 pci_set_master(pdev
);
539 static int ivpu_dev_init(struct ivpu_device
*vdev
)
543 vdev
->hw
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->hw
), GFP_KERNEL
);
547 vdev
->mmu
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->mmu
), GFP_KERNEL
);
551 vdev
->fw
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->fw
), GFP_KERNEL
);
555 vdev
->ipc
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->ipc
), GFP_KERNEL
);
559 vdev
->pm
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->pm
), GFP_KERNEL
);
563 if (ivpu_hw_ip_gen(vdev
) >= IVPU_HW_IP_40XX
)
564 vdev
->hw
->dma_bits
= 48;
566 vdev
->hw
->dma_bits
= 38;
568 vdev
->platform
= IVPU_PLATFORM_INVALID
;
569 vdev
->context_xa_limit
.min
= IVPU_USER_CONTEXT_MIN_SSID
;
570 vdev
->context_xa_limit
.max
= IVPU_USER_CONTEXT_MAX_SSID
;
571 atomic64_set(&vdev
->unique_id_counter
, 0);
572 xa_init_flags(&vdev
->context_xa
, XA_FLAGS_ALLOC
| XA_FLAGS_LOCK_IRQ
);
573 xa_init_flags(&vdev
->submitted_jobs_xa
, XA_FLAGS_ALLOC1
);
574 xa_init_flags(&vdev
->db_xa
, XA_FLAGS_ALLOC1
);
575 INIT_LIST_HEAD(&vdev
->bo_list
);
577 vdev
->db_limit
.min
= IVPU_MIN_DB
;
578 vdev
->db_limit
.max
= IVPU_MAX_DB
;
580 ret
= drmm_mutex_init(&vdev
->drm
, &vdev
->context_list_lock
);
584 ret
= drmm_mutex_init(&vdev
->drm
, &vdev
->submitted_jobs_lock
);
588 ret
= drmm_mutex_init(&vdev
->drm
, &vdev
->bo_list_lock
);
592 ret
= ivpu_pci_init(vdev
);
596 ret
= ivpu_irq_init(vdev
);
600 /* Init basic HW info based on buttress registers which are accessible before power up */
601 ret
= ivpu_hw_init(vdev
);
605 /* Power up early so the rest of init code can access VPU registers */
606 ret
= ivpu_hw_power_up(vdev
);
610 ivpu_mmu_global_context_init(vdev
);
612 ret
= ivpu_mmu_init(vdev
);
614 goto err_mmu_gctx_fini
;
616 ret
= ivpu_mmu_reserved_context_init(vdev
);
618 goto err_mmu_gctx_fini
;
620 ret
= ivpu_fw_init(vdev
);
622 goto err_mmu_rctx_fini
;
624 ret
= ivpu_ipc_init(vdev
);
630 ret
= ivpu_boot(vdev
);
634 ivpu_job_done_consumer_init(vdev
);
635 ivpu_pm_enable(vdev
);
644 ivpu_mmu_reserved_context_fini(vdev
);
646 ivpu_mmu_global_context_fini(vdev
);
650 xa_destroy(&vdev
->db_xa
);
651 xa_destroy(&vdev
->submitted_jobs_xa
);
652 xa_destroy(&vdev
->context_xa
);
656 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device
*vdev
)
658 struct ivpu_file_priv
*file_priv
;
659 unsigned long ctx_id
;
661 mutex_lock(&vdev
->context_list_lock
);
663 xa_for_each(&vdev
->context_xa
, ctx_id
, file_priv
)
664 file_priv_unbind(vdev
, file_priv
);
666 mutex_unlock(&vdev
->context_list_lock
);
669 static void ivpu_dev_fini(struct ivpu_device
*vdev
)
671 ivpu_jobs_abort_all(vdev
);
672 ivpu_pm_cancel_recovery(vdev
);
673 ivpu_pm_disable(vdev
);
674 ivpu_prepare_for_reset(vdev
);
677 ivpu_ms_cleanup_all(vdev
);
678 ivpu_job_done_consumer_fini(vdev
);
679 ivpu_bo_unbind_all_user_contexts(vdev
);
683 ivpu_mmu_reserved_context_fini(vdev
);
684 ivpu_mmu_global_context_fini(vdev
);
686 drm_WARN_ON(&vdev
->drm
, !xa_empty(&vdev
->db_xa
));
687 xa_destroy(&vdev
->db_xa
);
688 drm_WARN_ON(&vdev
->drm
, !xa_empty(&vdev
->submitted_jobs_xa
));
689 xa_destroy(&vdev
->submitted_jobs_xa
);
690 drm_WARN_ON(&vdev
->drm
, !xa_empty(&vdev
->context_xa
));
691 xa_destroy(&vdev
->context_xa
);
694 static struct pci_device_id ivpu_pci_ids
[] = {
695 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_MTL
) },
696 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_ARL
) },
697 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_LNL
) },
698 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_PTL_P
) },
701 MODULE_DEVICE_TABLE(pci
, ivpu_pci_ids
);
703 static int ivpu_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
705 struct ivpu_device
*vdev
;
708 vdev
= devm_drm_dev_alloc(&pdev
->dev
, &driver
, struct ivpu_device
, drm
);
710 return PTR_ERR(vdev
);
712 pci_set_drvdata(pdev
, vdev
);
714 ret
= ivpu_dev_init(vdev
);
718 ivpu_debugfs_init(vdev
);
719 ivpu_sysfs_init(vdev
);
721 ret
= drm_dev_register(&vdev
->drm
, 0);
723 dev_err(&pdev
->dev
, "Failed to register DRM device: %d\n", ret
);
730 static void ivpu_remove(struct pci_dev
*pdev
)
732 struct ivpu_device
*vdev
= pci_get_drvdata(pdev
);
734 drm_dev_unplug(&vdev
->drm
);
738 static const struct dev_pm_ops ivpu_drv_pci_pm
= {
739 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb
, ivpu_pm_resume_cb
)
740 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb
, ivpu_pm_runtime_resume_cb
, NULL
)
743 static const struct pci_error_handlers ivpu_drv_pci_err
= {
744 .reset_prepare
= ivpu_pm_reset_prepare_cb
,
745 .reset_done
= ivpu_pm_reset_done_cb
,
748 static struct pci_driver ivpu_pci_driver
= {
749 .name
= KBUILD_MODNAME
,
750 .id_table
= ivpu_pci_ids
,
752 .remove
= ivpu_remove
,
754 .pm
= &ivpu_drv_pci_pm
,
756 .err_handler
= &ivpu_drv_pci_err
,
759 module_pci_driver(ivpu_pci_driver
);
761 MODULE_AUTHOR("Intel Corporation");
762 MODULE_DESCRIPTION(DRIVER_DESC
);
763 MODULE_LICENSE("GPL and additional rights");
764 MODULE_VERSION(DRIVER_VERSION_STR
);