1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2024 Intel Corporation
6 #include <linux/firmware.h>
7 #include <linux/module.h>
9 #include <linux/pm_runtime.h>
10 #include <generated/utsrelease.h>
12 #include <drm/drm_accel.h>
13 #include <drm/drm_file.h>
14 #include <drm/drm_gem.h>
15 #include <drm/drm_ioctl.h>
16 #include <drm/drm_prime.h>
18 #include "ivpu_coredump.h"
19 #include "ivpu_debugfs.h"
22 #include "ivpu_fw_log.h"
27 #include "ivpu_jsm_msg.h"
29 #include "ivpu_mmu_context.h"
32 #include "ivpu_sysfs.h"
33 #include "vpu_boot_api.h"
35 #ifndef DRIVER_VERSION_STR
36 #define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
39 static struct lock_class_key submitted_jobs_xa_lock_class_key
;
42 module_param_named(dbg_mask
, ivpu_dbg_mask
, int, 0644);
43 MODULE_PARM_DESC(dbg_mask
, "Driver debug mask. See IVPU_DBG_* macros.");
46 #if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
47 module_param_named_unsafe(test_mode
, ivpu_test_mode
, int, 0644);
48 MODULE_PARM_DESC(test_mode
, "Test mode mask. See IVPU_TEST_MODE_* macros.");
51 u8 ivpu_pll_min_ratio
;
52 module_param_named(pll_min_ratio
, ivpu_pll_min_ratio
, byte
, 0644);
53 MODULE_PARM_DESC(pll_min_ratio
, "Minimum PLL ratio used to set NPU frequency");
55 u8 ivpu_pll_max_ratio
= U8_MAX
;
56 module_param_named(pll_max_ratio
, ivpu_pll_max_ratio
, byte
, 0644);
57 MODULE_PARM_DESC(pll_max_ratio
, "Maximum PLL ratio used to set NPU frequency");
59 int ivpu_sched_mode
= IVPU_SCHED_MODE_AUTO
;
60 module_param_named(sched_mode
, ivpu_sched_mode
, int, 0444);
61 MODULE_PARM_DESC(sched_mode
, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
63 bool ivpu_disable_mmu_cont_pages
;
64 module_param_named(disable_mmu_cont_pages
, ivpu_disable_mmu_cont_pages
, bool, 0444);
65 MODULE_PARM_DESC(disable_mmu_cont_pages
, "Disable MMU contiguous pages optimization");
67 bool ivpu_force_snoop
;
68 module_param_named(force_snoop
, ivpu_force_snoop
, bool, 0444);
69 MODULE_PARM_DESC(force_snoop
, "Force snooping for NPU host memory access");
71 struct ivpu_file_priv
*ivpu_file_priv_get(struct ivpu_file_priv
*file_priv
)
73 struct ivpu_device
*vdev
= file_priv
->vdev
;
75 kref_get(&file_priv
->ref
);
77 ivpu_dbg(vdev
, KREF
, "file_priv get: ctx %u refcount %u\n",
78 file_priv
->ctx
.id
, kref_read(&file_priv
->ref
));
83 static void file_priv_unbind(struct ivpu_device
*vdev
, struct ivpu_file_priv
*file_priv
)
85 mutex_lock(&file_priv
->lock
);
86 if (file_priv
->bound
) {
87 ivpu_dbg(vdev
, FILE, "file_priv unbind: ctx %u\n", file_priv
->ctx
.id
);
89 ivpu_cmdq_release_all_locked(file_priv
);
90 ivpu_bo_unbind_all_bos_from_context(vdev
, &file_priv
->ctx
);
91 ivpu_mmu_context_fini(vdev
, &file_priv
->ctx
);
92 file_priv
->bound
= false;
93 drm_WARN_ON(&vdev
->drm
, !xa_erase_irq(&vdev
->context_xa
, file_priv
->ctx
.id
));
95 mutex_unlock(&file_priv
->lock
);
98 static void file_priv_release(struct kref
*ref
)
100 struct ivpu_file_priv
*file_priv
= container_of(ref
, struct ivpu_file_priv
, ref
);
101 struct ivpu_device
*vdev
= file_priv
->vdev
;
103 ivpu_dbg(vdev
, FILE, "file_priv release: ctx %u bound %d\n",
104 file_priv
->ctx
.id
, (bool)file_priv
->bound
);
106 pm_runtime_get_sync(vdev
->drm
.dev
);
107 mutex_lock(&vdev
->context_list_lock
);
108 file_priv_unbind(vdev
, file_priv
);
109 drm_WARN_ON(&vdev
->drm
, !xa_empty(&file_priv
->cmdq_xa
));
110 xa_destroy(&file_priv
->cmdq_xa
);
111 mutex_unlock(&vdev
->context_list_lock
);
112 pm_runtime_put_autosuspend(vdev
->drm
.dev
);
114 mutex_destroy(&file_priv
->ms_lock
);
115 mutex_destroy(&file_priv
->lock
);
119 void ivpu_file_priv_put(struct ivpu_file_priv
**link
)
121 struct ivpu_file_priv
*file_priv
= *link
;
122 struct ivpu_device
*vdev
= file_priv
->vdev
;
124 ivpu_dbg(vdev
, KREF
, "file_priv put: ctx %u refcount %u\n",
125 file_priv
->ctx
.id
, kref_read(&file_priv
->ref
));
128 kref_put(&file_priv
->ref
, file_priv_release
);
131 static int ivpu_get_capabilities(struct ivpu_device
*vdev
, struct drm_ivpu_param
*args
)
133 switch (args
->index
) {
134 case DRM_IVPU_CAP_METRIC_STREAMER
:
137 case DRM_IVPU_CAP_DMA_MEMORY_RANGE
:
147 static int ivpu_get_param_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
149 struct ivpu_file_priv
*file_priv
= file
->driver_priv
;
150 struct ivpu_device
*vdev
= file_priv
->vdev
;
151 struct pci_dev
*pdev
= to_pci_dev(vdev
->drm
.dev
);
152 struct drm_ivpu_param
*args
= data
;
156 if (!drm_dev_enter(dev
, &idx
))
159 switch (args
->param
) {
160 case DRM_IVPU_PARAM_DEVICE_ID
:
161 args
->value
= pdev
->device
;
163 case DRM_IVPU_PARAM_DEVICE_REVISION
:
164 args
->value
= pdev
->revision
;
166 case DRM_IVPU_PARAM_PLATFORM_TYPE
:
167 args
->value
= vdev
->platform
;
169 case DRM_IVPU_PARAM_CORE_CLOCK_RATE
:
170 args
->value
= ivpu_hw_ratio_to_freq(vdev
, vdev
->hw
->pll
.max_ratio
);
172 case DRM_IVPU_PARAM_NUM_CONTEXTS
:
173 args
->value
= ivpu_get_context_count(vdev
);
175 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS
:
176 args
->value
= vdev
->hw
->ranges
.user
.start
;
178 case DRM_IVPU_PARAM_CONTEXT_ID
:
179 args
->value
= file_priv
->ctx
.id
;
181 case DRM_IVPU_PARAM_FW_API_VERSION
:
182 if (args
->index
< VPU_FW_API_VER_NUM
) {
183 struct vpu_firmware_header
*fw_hdr
;
185 fw_hdr
= (struct vpu_firmware_header
*)vdev
->fw
->file
->data
;
186 args
->value
= fw_hdr
->api_version
[args
->index
];
191 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT
:
192 ret
= ivpu_jsm_get_heartbeat(vdev
, args
->index
, &args
->value
);
194 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID
:
195 args
->value
= (u64
)atomic64_inc_return(&vdev
->unique_id_counter
);
197 case DRM_IVPU_PARAM_TILE_CONFIG
:
198 args
->value
= vdev
->hw
->tile_fuse
;
200 case DRM_IVPU_PARAM_SKU
:
201 args
->value
= vdev
->hw
->sku
;
203 case DRM_IVPU_PARAM_CAPABILITIES
:
204 ret
= ivpu_get_capabilities(vdev
, args
);
215 static int ivpu_set_param_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*file
)
217 struct drm_ivpu_param
*args
= data
;
220 switch (args
->param
) {
228 static int ivpu_open(struct drm_device
*dev
, struct drm_file
*file
)
230 struct ivpu_device
*vdev
= to_ivpu_device(dev
);
231 struct ivpu_file_priv
*file_priv
;
235 if (!drm_dev_enter(dev
, &idx
))
238 file_priv
= kzalloc(sizeof(*file_priv
), GFP_KERNEL
);
244 INIT_LIST_HEAD(&file_priv
->ms_instance_list
);
246 file_priv
->vdev
= vdev
;
247 file_priv
->bound
= true;
248 kref_init(&file_priv
->ref
);
249 mutex_init(&file_priv
->lock
);
250 mutex_init(&file_priv
->ms_lock
);
252 mutex_lock(&vdev
->context_list_lock
);
254 ret
= xa_alloc_irq(&vdev
->context_xa
, &ctx_id
, file_priv
,
255 vdev
->context_xa_limit
, GFP_KERNEL
);
257 ivpu_err(vdev
, "Failed to allocate context id: %d\n", ret
);
261 ivpu_mmu_context_init(vdev
, &file_priv
->ctx
, ctx_id
);
263 file_priv
->job_limit
.min
= FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK
, (file_priv
->ctx
.id
- 1));
264 file_priv
->job_limit
.max
= file_priv
->job_limit
.min
| IVPU_JOB_ID_JOB_MASK
;
266 xa_init_flags(&file_priv
->cmdq_xa
, XA_FLAGS_ALLOC1
);
267 file_priv
->cmdq_limit
.min
= IVPU_CMDQ_MIN_ID
;
268 file_priv
->cmdq_limit
.max
= IVPU_CMDQ_MAX_ID
;
270 mutex_unlock(&vdev
->context_list_lock
);
273 file
->driver_priv
= file_priv
;
275 ivpu_dbg(vdev
, FILE, "file_priv create: ctx %u process %s pid %d\n",
276 ctx_id
, current
->comm
, task_pid_nr(current
));
281 mutex_unlock(&vdev
->context_list_lock
);
282 mutex_destroy(&file_priv
->ms_lock
);
283 mutex_destroy(&file_priv
->lock
);
290 static void ivpu_postclose(struct drm_device
*dev
, struct drm_file
*file
)
292 struct ivpu_file_priv
*file_priv
= file
->driver_priv
;
293 struct ivpu_device
*vdev
= to_ivpu_device(dev
);
295 ivpu_dbg(vdev
, FILE, "file_priv close: ctx %u process %s pid %d\n",
296 file_priv
->ctx
.id
, current
->comm
, task_pid_nr(current
));
298 ivpu_ms_cleanup(file_priv
);
299 ivpu_file_priv_put(&file_priv
);
302 static const struct drm_ioctl_desc ivpu_drm_ioctls
[] = {
303 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM
, ivpu_get_param_ioctl
, 0),
304 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM
, ivpu_set_param_ioctl
, 0),
305 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE
, ivpu_bo_create_ioctl
, 0),
306 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO
, ivpu_bo_info_ioctl
, 0),
307 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT
, ivpu_submit_ioctl
, 0),
308 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT
, ivpu_bo_wait_ioctl
, 0),
309 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START
, ivpu_ms_start_ioctl
, 0),
310 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA
, ivpu_ms_get_data_ioctl
, 0),
311 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP
, ivpu_ms_stop_ioctl
, 0),
312 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO
, ivpu_ms_get_info_ioctl
, 0),
315 static int ivpu_wait_for_ready(struct ivpu_device
*vdev
)
317 struct ivpu_ipc_consumer cons
;
318 struct ivpu_ipc_hdr ipc_hdr
;
319 unsigned long timeout
;
322 if (ivpu_test_mode
& IVPU_TEST_MODE_FW_TEST
)
325 ivpu_ipc_consumer_add(vdev
, &cons
, IVPU_IPC_CHAN_BOOT_MSG
, NULL
);
327 timeout
= jiffies
+ msecs_to_jiffies(vdev
->timeout
.boot
);
329 ivpu_ipc_irq_handler(vdev
);
330 ret
= ivpu_ipc_receive(vdev
, &cons
, &ipc_hdr
, NULL
, 0);
331 if (ret
!= -ETIMEDOUT
|| time_after_eq(jiffies
, timeout
))
337 ivpu_ipc_consumer_del(vdev
, &cons
);
339 if (!ret
&& ipc_hdr
.data_addr
!= IVPU_IPC_BOOT_MSG_DATA_ADDR
) {
340 ivpu_err(vdev
, "Invalid NPU ready message: 0x%x\n",
346 ivpu_dbg(vdev
, PM
, "NPU ready message received successfully\n");
351 static int ivpu_hw_sched_init(struct ivpu_device
*vdev
)
355 if (vdev
->fw
->sched_mode
== VPU_SCHEDULING_MODE_HW
) {
356 ret
= ivpu_jsm_hws_setup_priority_bands(vdev
);
358 ivpu_err(vdev
, "Failed to enable hw scheduler: %d", ret
);
367 * ivpu_boot() - Start VPU firmware
370 * This function is paired with ivpu_shutdown() but it doesn't power up the
371 * VPU because power up has to be called very early in ivpu_probe().
373 int ivpu_boot(struct ivpu_device
*vdev
)
377 /* Update boot params located at first 4KB of FW memory */
378 ivpu_fw_boot_params_setup(vdev
, ivpu_bo_vaddr(vdev
->fw
->mem
));
380 ret
= ivpu_hw_boot_fw(vdev
);
382 ivpu_err(vdev
, "Failed to start the firmware: %d\n", ret
);
386 ret
= ivpu_wait_for_ready(vdev
);
388 ivpu_err(vdev
, "Failed to boot the firmware: %d\n", ret
);
389 goto err_diagnose_failure
;
392 ivpu_hw_irq_clear(vdev
);
393 enable_irq(vdev
->irq
);
394 ivpu_hw_irq_enable(vdev
);
395 ivpu_ipc_enable(vdev
);
397 if (ivpu_fw_is_cold_boot(vdev
)) {
398 ret
= ivpu_pm_dct_init(vdev
);
400 goto err_diagnose_failure
;
402 ret
= ivpu_hw_sched_init(vdev
);
404 goto err_diagnose_failure
;
409 err_diagnose_failure
:
410 ivpu_hw_diagnose_failure(vdev
);
411 ivpu_mmu_evtq_dump(vdev
);
412 ivpu_dev_coredump(vdev
);
416 void ivpu_prepare_for_reset(struct ivpu_device
*vdev
)
418 ivpu_hw_irq_disable(vdev
);
419 disable_irq(vdev
->irq
);
420 ivpu_ipc_disable(vdev
);
421 ivpu_mmu_disable(vdev
);
424 int ivpu_shutdown(struct ivpu_device
*vdev
)
428 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
429 pci_save_state(to_pci_dev(vdev
->drm
.dev
));
431 ret
= ivpu_hw_power_down(vdev
);
433 ivpu_warn(vdev
, "Failed to power down HW: %d\n", ret
);
435 pci_set_power_state(to_pci_dev(vdev
->drm
.dev
), PCI_D3hot
);
440 static const struct file_operations ivpu_fops
= {
441 .owner
= THIS_MODULE
,
445 static const struct drm_driver driver
= {
446 .driver_features
= DRIVER_GEM
| DRIVER_COMPUTE_ACCEL
,
449 .postclose
= ivpu_postclose
,
451 .gem_create_object
= ivpu_gem_create_object
,
452 .gem_prime_import_sg_table
= drm_gem_shmem_prime_import_sg_table
,
454 .ioctls
= ivpu_drm_ioctls
,
455 .num_ioctls
= ARRAY_SIZE(ivpu_drm_ioctls
),
463 .major
= DRIVER_MAJOR
,
464 .minor
= DRIVER_MINOR
,
465 .patchlevel
= DRIVER_PATCHLEVEL
,
472 static void ivpu_context_abort_invalid(struct ivpu_device
*vdev
)
474 struct ivpu_file_priv
*file_priv
;
475 unsigned long ctx_id
;
477 mutex_lock(&vdev
->context_list_lock
);
479 xa_for_each(&vdev
->context_xa
, ctx_id
, file_priv
) {
480 if (!file_priv
->has_mmu_faults
|| file_priv
->aborted
)
483 mutex_lock(&file_priv
->lock
);
484 ivpu_context_abort_locked(file_priv
);
485 file_priv
->aborted
= true;
486 mutex_unlock(&file_priv
->lock
);
489 mutex_unlock(&vdev
->context_list_lock
);
492 static irqreturn_t
ivpu_irq_thread_handler(int irq
, void *arg
)
494 struct ivpu_device
*vdev
= arg
;
497 if (kfifo_is_empty(&vdev
->hw
->irq
.fifo
))
500 while (kfifo_get(&vdev
->hw
->irq
.fifo
, &irq_src
)) {
502 case IVPU_HW_IRQ_SRC_IPC
:
503 ivpu_ipc_irq_thread_handler(vdev
);
505 case IVPU_HW_IRQ_SRC_MMU_EVTQ
:
506 ivpu_context_abort_invalid(vdev
);
508 case IVPU_HW_IRQ_SRC_DCT
:
509 ivpu_pm_dct_irq_thread_handler(vdev
);
512 ivpu_err_ratelimited(vdev
, "Unknown IRQ source: %u\n", irq_src
);
520 static int ivpu_irq_init(struct ivpu_device
*vdev
)
522 struct pci_dev
*pdev
= to_pci_dev(vdev
->drm
.dev
);
525 ret
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSI
| PCI_IRQ_MSIX
);
527 ivpu_err(vdev
, "Failed to allocate a MSI IRQ: %d\n", ret
);
531 ivpu_irq_handlers_init(vdev
);
533 vdev
->irq
= pci_irq_vector(pdev
, 0);
535 ret
= devm_request_threaded_irq(vdev
->drm
.dev
, vdev
->irq
, ivpu_hw_irq_handler
,
536 ivpu_irq_thread_handler
, IRQF_NO_AUTOEN
, DRIVER_NAME
, vdev
);
538 ivpu_err(vdev
, "Failed to request an IRQ %d\n", ret
);
543 static int ivpu_pci_init(struct ivpu_device
*vdev
)
545 struct pci_dev
*pdev
= to_pci_dev(vdev
->drm
.dev
);
546 struct resource
*bar0
= &pdev
->resource
[0];
547 struct resource
*bar4
= &pdev
->resource
[4];
550 ivpu_dbg(vdev
, MISC
, "Mapping BAR0 (RegV) %pR\n", bar0
);
551 vdev
->regv
= devm_ioremap_resource(vdev
->drm
.dev
, bar0
);
552 if (IS_ERR(vdev
->regv
)) {
553 ivpu_err(vdev
, "Failed to map bar 0: %pe\n", vdev
->regv
);
554 return PTR_ERR(vdev
->regv
);
557 ivpu_dbg(vdev
, MISC
, "Mapping BAR4 (RegB) %pR\n", bar4
);
558 vdev
->regb
= devm_ioremap_resource(vdev
->drm
.dev
, bar4
);
559 if (IS_ERR(vdev
->regb
)) {
560 ivpu_err(vdev
, "Failed to map bar 4: %pe\n", vdev
->regb
);
561 return PTR_ERR(vdev
->regb
);
564 ret
= dma_set_mask_and_coherent(vdev
->drm
.dev
, DMA_BIT_MASK(vdev
->hw
->dma_bits
));
566 ivpu_err(vdev
, "Failed to set DMA mask: %d\n", ret
);
569 dma_set_max_seg_size(vdev
->drm
.dev
, UINT_MAX
);
571 /* Clear any pending errors */
572 pcie_capability_clear_word(pdev
, PCI_EXP_DEVSTA
, 0x3f);
574 /* NPU does not require 10m D3hot delay */
575 pdev
->d3hot_delay
= 0;
577 ret
= pcim_enable_device(pdev
);
579 ivpu_err(vdev
, "Failed to enable PCI device: %d\n", ret
);
583 pci_set_master(pdev
);
588 static int ivpu_dev_init(struct ivpu_device
*vdev
)
592 vdev
->hw
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->hw
), GFP_KERNEL
);
596 vdev
->mmu
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->mmu
), GFP_KERNEL
);
600 vdev
->fw
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->fw
), GFP_KERNEL
);
604 vdev
->ipc
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->ipc
), GFP_KERNEL
);
608 vdev
->pm
= drmm_kzalloc(&vdev
->drm
, sizeof(*vdev
->pm
), GFP_KERNEL
);
612 if (ivpu_hw_ip_gen(vdev
) >= IVPU_HW_IP_40XX
)
613 vdev
->hw
->dma_bits
= 48;
615 vdev
->hw
->dma_bits
= 38;
617 vdev
->platform
= IVPU_PLATFORM_INVALID
;
618 vdev
->context_xa_limit
.min
= IVPU_USER_CONTEXT_MIN_SSID
;
619 vdev
->context_xa_limit
.max
= IVPU_USER_CONTEXT_MAX_SSID
;
620 atomic64_set(&vdev
->unique_id_counter
, 0);
621 xa_init_flags(&vdev
->context_xa
, XA_FLAGS_ALLOC
| XA_FLAGS_LOCK_IRQ
);
622 xa_init_flags(&vdev
->submitted_jobs_xa
, XA_FLAGS_ALLOC1
);
623 xa_init_flags(&vdev
->db_xa
, XA_FLAGS_ALLOC1
);
624 lockdep_set_class(&vdev
->submitted_jobs_xa
.xa_lock
, &submitted_jobs_xa_lock_class_key
);
625 INIT_LIST_HEAD(&vdev
->bo_list
);
627 vdev
->db_limit
.min
= IVPU_MIN_DB
;
628 vdev
->db_limit
.max
= IVPU_MAX_DB
;
630 ret
= drmm_mutex_init(&vdev
->drm
, &vdev
->context_list_lock
);
634 ret
= drmm_mutex_init(&vdev
->drm
, &vdev
->bo_list_lock
);
638 ret
= ivpu_pci_init(vdev
);
642 ret
= ivpu_irq_init(vdev
);
646 /* Init basic HW info based on buttress registers which are accessible before power up */
647 ret
= ivpu_hw_init(vdev
);
651 /* Power up early so the rest of init code can access VPU registers */
652 ret
= ivpu_hw_power_up(vdev
);
656 ivpu_mmu_global_context_init(vdev
);
658 ret
= ivpu_mmu_init(vdev
);
660 goto err_mmu_gctx_fini
;
662 ret
= ivpu_mmu_reserved_context_init(vdev
);
664 goto err_mmu_gctx_fini
;
666 ret
= ivpu_fw_init(vdev
);
668 goto err_mmu_rctx_fini
;
670 ret
= ivpu_ipc_init(vdev
);
676 ret
= ivpu_boot(vdev
);
680 ivpu_job_done_consumer_init(vdev
);
681 ivpu_pm_enable(vdev
);
690 ivpu_mmu_reserved_context_fini(vdev
);
692 ivpu_mmu_global_context_fini(vdev
);
696 xa_destroy(&vdev
->db_xa
);
697 xa_destroy(&vdev
->submitted_jobs_xa
);
698 xa_destroy(&vdev
->context_xa
);
702 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device
*vdev
)
704 struct ivpu_file_priv
*file_priv
;
705 unsigned long ctx_id
;
707 mutex_lock(&vdev
->context_list_lock
);
709 xa_for_each(&vdev
->context_xa
, ctx_id
, file_priv
)
710 file_priv_unbind(vdev
, file_priv
);
712 mutex_unlock(&vdev
->context_list_lock
);
715 static void ivpu_dev_fini(struct ivpu_device
*vdev
)
717 ivpu_jobs_abort_all(vdev
);
718 ivpu_pm_cancel_recovery(vdev
);
719 ivpu_pm_disable(vdev
);
720 ivpu_prepare_for_reset(vdev
);
723 ivpu_ms_cleanup_all(vdev
);
724 ivpu_job_done_consumer_fini(vdev
);
725 ivpu_bo_unbind_all_user_contexts(vdev
);
729 ivpu_mmu_reserved_context_fini(vdev
);
730 ivpu_mmu_global_context_fini(vdev
);
732 drm_WARN_ON(&vdev
->drm
, !xa_empty(&vdev
->db_xa
));
733 xa_destroy(&vdev
->db_xa
);
734 drm_WARN_ON(&vdev
->drm
, !xa_empty(&vdev
->submitted_jobs_xa
));
735 xa_destroy(&vdev
->submitted_jobs_xa
);
736 drm_WARN_ON(&vdev
->drm
, !xa_empty(&vdev
->context_xa
));
737 xa_destroy(&vdev
->context_xa
);
740 static struct pci_device_id ivpu_pci_ids
[] = {
741 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_MTL
) },
742 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_ARL
) },
743 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_LNL
) },
744 { PCI_DEVICE(PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_PTL_P
) },
747 MODULE_DEVICE_TABLE(pci
, ivpu_pci_ids
);
749 static int ivpu_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
751 struct ivpu_device
*vdev
;
754 vdev
= devm_drm_dev_alloc(&pdev
->dev
, &driver
, struct ivpu_device
, drm
);
756 return PTR_ERR(vdev
);
758 pci_set_drvdata(pdev
, vdev
);
760 ret
= ivpu_dev_init(vdev
);
764 ivpu_debugfs_init(vdev
);
765 ivpu_sysfs_init(vdev
);
767 ret
= drm_dev_register(&vdev
->drm
, 0);
769 dev_err(&pdev
->dev
, "Failed to register DRM device: %d\n", ret
);
776 static void ivpu_remove(struct pci_dev
*pdev
)
778 struct ivpu_device
*vdev
= pci_get_drvdata(pdev
);
780 drm_dev_unplug(&vdev
->drm
);
784 static const struct dev_pm_ops ivpu_drv_pci_pm
= {
785 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb
, ivpu_pm_resume_cb
)
786 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb
, ivpu_pm_runtime_resume_cb
, NULL
)
789 static const struct pci_error_handlers ivpu_drv_pci_err
= {
790 .reset_prepare
= ivpu_pm_reset_prepare_cb
,
791 .reset_done
= ivpu_pm_reset_done_cb
,
794 static struct pci_driver ivpu_pci_driver
= {
795 .name
= KBUILD_MODNAME
,
796 .id_table
= ivpu_pci_ids
,
798 .remove
= ivpu_remove
,
800 .pm
= &ivpu_drv_pci_pm
,
802 .err_handler
= &ivpu_drv_pci_err
,
805 module_pci_driver(ivpu_pci_driver
);
807 MODULE_AUTHOR("Intel Corporation");
808 MODULE_DESCRIPTION(DRIVER_DESC
);
809 MODULE_LICENSE("GPL and additional rights");
810 MODULE_VERSION(DRIVER_VERSION_STR
);