1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
4 #include "pvr_context.h"
5 #include "pvr_debugfs.h"
6 #include "pvr_device.h"
8 #include "pvr_free_list.h"
13 #include "pvr_power.h"
14 #include "pvr_rogue_defs.h"
15 #include "pvr_rogue_fwif_client.h"
16 #include "pvr_rogue_fwif_shared.h"
19 #include <uapi/drm/pvr_drm.h>
21 #include <drm/drm_device.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_file.h>
24 #include <drm/drm_gem.h>
25 #include <drm/drm_ioctl.h>
27 #include <linux/err.h>
28 #include <linux/export.h>
30 #include <linux/kernel.h>
31 #include <linux/list.h>
32 #include <linux/mod_devicetable.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/of_device.h>
36 #include <linux/of_platform.h>
37 #include <linux/platform_device.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/xarray.h>
42 * DOC: PowerVR (Series 6 and later) and IMG Graphics Driver
44 * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
46 * * AXE-1-16M (found in Texas Instruments AM62)
50 * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object.
51 * @drm_dev: [IN] Target DRM device.
52 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
53 * &struct drm_pvr_ioctl_create_bo_args.
54 * @file: [IN] DRM file-private data.
56 * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO.
60 * * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero
61 * or wider than &typedef size_t,
62 * * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are
63 * reserved or undefined are set,
64 * * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not
66 * * Any error encountered while creating the object (see
67 * pvr_gem_object_create()), or
68 * * Any error encountered while transferring ownership of the object into a
69 * userspace-accessible handle (see pvr_gem_object_into_handle()).
72 pvr_ioctl_create_bo(struct drm_device
*drm_dev
, void *raw_args
,
73 struct drm_file
*file
)
75 struct drm_pvr_ioctl_create_bo_args
*args
= raw_args
;
76 struct pvr_device
*pvr_dev
= to_pvr_device(drm_dev
);
77 struct pvr_file
*pvr_file
= to_pvr_file(file
);
79 struct pvr_gem_object
*pvr_obj
;
80 size_t sanitized_size
;
85 if (!drm_dev_enter(drm_dev
, &idx
))
88 /* All padding fields must be zeroed. */
89 if (args
->_padding_c
!= 0) {
91 goto err_drm_dev_exit
;
95 * On 64-bit platforms (our primary target), size_t is a u64. However,
96 * on other architectures we have to check for overflow when casting
97 * down to size_t from u64.
99 * We also disallow zero-sized allocations, and reserved (kernel-only)
102 if (args
->size
> SIZE_MAX
|| args
->size
== 0 || args
->flags
&
103 ~DRM_PVR_BO_FLAGS_MASK
|| args
->size
& (PVR_DEVICE_PAGE_SIZE
- 1)) {
105 goto err_drm_dev_exit
;
108 sanitized_size
= (size_t)args
->size
;
111 * Create a buffer object and transfer ownership to a userspace-
114 pvr_obj
= pvr_gem_object_create(pvr_dev
, sanitized_size
, args
->flags
);
115 if (IS_ERR(pvr_obj
)) {
116 err
= PTR_ERR(pvr_obj
);
117 goto err_drm_dev_exit
;
120 /* This function will not modify &args->handle unless it succeeds. */
121 err
= pvr_gem_object_into_handle(pvr_obj
, pvr_file
, &args
->handle
);
123 goto err_destroy_obj
;
131 * GEM objects are refcounted, so there is no explicit destructor
132 * function. Instead, we release the singular reference we currently
133 * hold on the object and let GEM take care of the rest.
135 pvr_gem_object_put(pvr_obj
);
144 * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be
145 * used when calling mmap() from userspace to map the given GEM buffer object
146 * @drm_dev: [IN] DRM device (unused).
147 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
148 * &struct drm_pvr_ioctl_get_bo_mmap_offset_args.
149 * @file: [IN] DRM file private data.
151 * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET.
153 * This IOCTL does *not* perform an mmap. See the docs on
154 * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details.
158 * * -%ENOENT if the handle does not reference a valid GEM buffer object,
159 * * -%EINVAL if any padding fields in &struct
160 * drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or
161 * * Any error returned by drm_gem_create_mmap_offset().
164 pvr_ioctl_get_bo_mmap_offset(struct drm_device
*drm_dev
, void *raw_args
,
165 struct drm_file
*file
)
167 struct drm_pvr_ioctl_get_bo_mmap_offset_args
*args
= raw_args
;
168 struct pvr_file
*pvr_file
= to_pvr_file(file
);
169 struct pvr_gem_object
*pvr_obj
;
170 struct drm_gem_object
*gem_obj
;
174 if (!drm_dev_enter(drm_dev
, &idx
))
177 /* All padding fields must be zeroed. */
178 if (args
->_padding_4
!= 0) {
180 goto err_drm_dev_exit
;
184 * Obtain a kernel reference to the buffer object. This reference is
185 * counted and must be manually dropped before returning. If a buffer
186 * object cannot be found for the specified handle, return -%ENOENT (No
187 * such file or directory).
189 pvr_obj
= pvr_gem_object_from_handle(pvr_file
, args
->handle
);
192 goto err_drm_dev_exit
;
195 gem_obj
= gem_from_pvr_gem(pvr_obj
);
198 * Allocate a fake offset which can be used in userspace calls to mmap
199 * on the DRM device file. If this fails, return the error code. This
200 * operation is idempotent.
202 ret
= drm_gem_create_mmap_offset(gem_obj
);
204 /* Drop our reference to the buffer object. */
205 drm_gem_object_put(gem_obj
);
206 goto err_drm_dev_exit
;
210 * Read out the fake offset allocated by the earlier call to
211 * drm_gem_create_mmap_offset.
213 args
->offset
= drm_vma_node_offset_addr(&gem_obj
->vma_node
);
215 /* Drop our reference to the buffer object. */
216 pvr_gem_object_put(pvr_obj
);
224 static __always_inline __maybe_unused u64
225 pvr_fw_version_packed(u32 major
, u32 minor
)
227 return ((u64
)major
<< 32) | minor
;
231 rogue_get_common_store_partition_space_size(struct pvr_device
*pvr_dev
)
233 u32 max_partitions
= 0;
237 PVR_FEATURE_VALUE(pvr_dev
, tile_size_x
, &tile_size_x
);
238 PVR_FEATURE_VALUE(pvr_dev
, tile_size_y
, &tile_size_y
);
239 PVR_FEATURE_VALUE(pvr_dev
, max_partitions
, &max_partitions
);
241 if (tile_size_x
== 16 && tile_size_y
== 16) {
242 u32 usc_min_output_registers_per_pix
= 0;
244 PVR_FEATURE_VALUE(pvr_dev
, usc_min_output_registers_per_pix
,
245 &usc_min_output_registers_per_pix
);
247 return tile_size_x
* tile_size_y
* max_partitions
*
248 usc_min_output_registers_per_pix
;
251 return max_partitions
* 1024;
255 rogue_get_common_store_alloc_region_size(struct pvr_device
*pvr_dev
)
257 u32 common_store_size_in_dwords
= 512 * 4 * 4;
258 u32 alloc_region_size
;
260 PVR_FEATURE_VALUE(pvr_dev
, common_store_size_in_dwords
, &common_store_size_in_dwords
);
262 alloc_region_size
= common_store_size_in_dwords
- (256U * 4U) -
263 rogue_get_common_store_partition_space_size(pvr_dev
);
265 if (PVR_HAS_QUIRK(pvr_dev
, 44079)) {
266 u32 common_store_split_point
= (768U * 4U * 4U);
268 return min(common_store_split_point
- (256U * 4U), alloc_region_size
);
271 return alloc_region_size
;
275 rogue_get_num_phantoms(struct pvr_device
*pvr_dev
)
277 u32 num_clusters
= 1;
279 PVR_FEATURE_VALUE(pvr_dev
, num_clusters
, &num_clusters
);
281 return ROGUE_REQ_NUM_PHANTOMS(num_clusters
);
285 rogue_get_max_coeffs(struct pvr_device
*pvr_dev
)
287 u32 max_coeff_additional_portion
= ROGUE_MAX_VERTEX_SHARED_REGISTERS
;
288 u32 pending_allocation_shared_regs
= 2U * 1024U;
289 u32 pending_allocation_coeff_regs
= 0U;
290 u32 num_phantoms
= rogue_get_num_phantoms(pvr_dev
);
291 u32 tiles_in_flight
= 0;
292 u32 max_coeff_pixel_portion
;
294 PVR_FEATURE_VALUE(pvr_dev
, isp_max_tiles_in_flight
, &tiles_in_flight
);
295 max_coeff_pixel_portion
= DIV_ROUND_UP(tiles_in_flight
, num_phantoms
);
296 max_coeff_pixel_portion
*= ROGUE_MAX_PIXEL_SHARED_REGISTERS
;
299 * Compute tasks on cores with BRN48492 and without compute overlap may lock
300 * up without two additional lines of coeffs.
302 if (PVR_HAS_QUIRK(pvr_dev
, 48492) && !PVR_HAS_FEATURE(pvr_dev
, compute_overlap
))
303 pending_allocation_coeff_regs
= 2U * 1024U;
305 if (PVR_HAS_ENHANCEMENT(pvr_dev
, 38748))
306 pending_allocation_shared_regs
= 0;
308 if (PVR_HAS_ENHANCEMENT(pvr_dev
, 38020))
309 max_coeff_additional_portion
+= ROGUE_MAX_COMPUTE_SHARED_REGISTERS
;
311 return rogue_get_common_store_alloc_region_size(pvr_dev
) + pending_allocation_coeff_regs
-
312 (max_coeff_pixel_portion
+ max_coeff_additional_portion
+
313 pending_allocation_shared_regs
);
317 rogue_get_cdm_max_local_mem_size_regs(struct pvr_device
*pvr_dev
)
319 u32 available_coeffs_in_dwords
= rogue_get_max_coeffs(pvr_dev
);
321 if (PVR_HAS_QUIRK(pvr_dev
, 48492) && PVR_HAS_FEATURE(pvr_dev
, roguexe
) &&
322 !PVR_HAS_FEATURE(pvr_dev
, compute_overlap
)) {
323 /* Driver must not use the 2 reserved lines. */
324 available_coeffs_in_dwords
-= ROGUE_CSRM_LINE_SIZE_IN_DWORDS
* 2;
328 * The maximum amount of local memory available to a kernel is the minimum
329 * of the total number of coefficient registers available and the max common
330 * store allocation size which can be made by the CDM.
332 * If any coeff lines are reserved for tessellation or pixel then we need to
333 * subtract those too.
335 return min(available_coeffs_in_dwords
, (u32
)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS
);
339 * pvr_dev_query_gpu_info_get()
340 * @pvr_dev: Device pointer.
341 * @args: [IN] Device query arguments containing a pointer to a userspace
342 * struct drm_pvr_dev_query_gpu_info.
344 * If the query object pointer is NULL, the size field is updated with the
345 * expected size of the query object.
348 * * 0 on success, or if size is requested using a NULL pointer, or
349 * * -%E2BIG if the indicated length of the allocation is less than is
350 * required to contain the copied data, or
351 * * -%EFAULT if local memory could not be copied to userspace.
354 pvr_dev_query_gpu_info_get(struct pvr_device
*pvr_dev
,
355 struct drm_pvr_ioctl_dev_query_args
*args
)
357 struct drm_pvr_dev_query_gpu_info gpu_info
= {0};
360 if (!args
->pointer
) {
361 args
->size
= sizeof(struct drm_pvr_dev_query_gpu_info
);
366 pvr_gpu_id_to_packed_bvnc(&pvr_dev
->gpu_id
);
367 gpu_info
.num_phantoms
= rogue_get_num_phantoms(pvr_dev
);
369 err
= PVR_UOBJ_SET(args
->pointer
, args
->size
, gpu_info
);
373 if (args
->size
> sizeof(gpu_info
))
374 args
->size
= sizeof(gpu_info
);
379 * pvr_dev_query_runtime_info_get()
380 * @pvr_dev: Device pointer.
381 * @args: [IN] Device query arguments containing a pointer to a userspace
382 * struct drm_pvr_dev_query_runtime_info.
384 * If the query object pointer is NULL, the size field is updated with the
385 * expected size of the query object.
388 * * 0 on success, or if size is requested using a NULL pointer, or
389 * * -%E2BIG if the indicated length of the allocation is less than is
390 * required to contain the copied data, or
391 * * -%EFAULT if local memory could not be copied to userspace.
394 pvr_dev_query_runtime_info_get(struct pvr_device
*pvr_dev
,
395 struct drm_pvr_ioctl_dev_query_args
*args
)
397 struct drm_pvr_dev_query_runtime_info runtime_info
= {0};
400 if (!args
->pointer
) {
401 args
->size
= sizeof(struct drm_pvr_dev_query_runtime_info
);
405 runtime_info
.free_list_min_pages
=
406 pvr_get_free_list_min_pages(pvr_dev
);
407 runtime_info
.free_list_max_pages
=
408 ROGUE_PM_MAX_FREELIST_SIZE
/ ROGUE_PM_PAGE_SIZE
;
409 runtime_info
.common_store_alloc_region_size
=
410 rogue_get_common_store_alloc_region_size(pvr_dev
);
411 runtime_info
.common_store_partition_space_size
=
412 rogue_get_common_store_partition_space_size(pvr_dev
);
413 runtime_info
.max_coeffs
= rogue_get_max_coeffs(pvr_dev
);
414 runtime_info
.cdm_max_local_mem_size_regs
=
415 rogue_get_cdm_max_local_mem_size_regs(pvr_dev
);
417 err
= PVR_UOBJ_SET(args
->pointer
, args
->size
, runtime_info
);
421 if (args
->size
> sizeof(runtime_info
))
422 args
->size
= sizeof(runtime_info
);
427 * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given
428 * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required
430 * @pvr_dev: Device pointer.
431 * @args: [IN] Device query arguments containing a pointer to a userspace
432 * struct drm_pvr_dev_query_query_quirks.
434 * If the query object pointer is NULL, the size field is updated with the
435 * expected size of the query object.
436 * If the userspace pointer in the query object is NULL, or the count is
437 * short, no data is copied.
438 * The count field will be updated to that copied, or if either pointer is
439 * NULL, that which would have been copied.
440 * The size field in the query object will be updated to the size copied.
443 * * 0 on success, or if size/count is requested using a NULL pointer, or
444 * * -%EINVAL if args contained non-zero reserved fields, or
445 * * -%E2BIG if the indicated length of the allocation is less than is
446 * required to contain the copied data, or
447 * * -%EFAULT if local memory could not be copied to userspace.
450 pvr_dev_query_quirks_get(struct pvr_device
*pvr_dev
,
451 struct drm_pvr_ioctl_dev_query_args
*args
)
454 * @FIXME - hardcoding of numbers here is intended as an
455 * intermediate step so the UAPI can be fixed, but requires a
456 * a refactor in the future to store them in a more appropriate
459 static const u32 umd_quirks_musthave
[] = {
464 static const u32 umd_quirks
[] = {
468 struct drm_pvr_dev_query_quirks query
;
469 u32 out
[ARRAY_SIZE(umd_quirks_musthave
) + ARRAY_SIZE(umd_quirks
)];
470 size_t out_musthave_count
= 0;
471 size_t out_count
= 0;
474 if (!args
->pointer
) {
475 args
->size
= sizeof(struct drm_pvr_dev_query_quirks
);
479 err
= PVR_UOBJ_GET(query
, args
->size
, args
->pointer
);
483 if (query
._padding_c
)
486 for (int i
= 0; i
< ARRAY_SIZE(umd_quirks_musthave
); i
++) {
487 if (pvr_device_has_uapi_quirk(pvr_dev
, umd_quirks_musthave
[i
])) {
488 out
[out_count
++] = umd_quirks_musthave
[i
];
489 out_musthave_count
++;
493 for (int i
= 0; i
< ARRAY_SIZE(umd_quirks
); i
++) {
494 if (pvr_device_has_uapi_quirk(pvr_dev
, umd_quirks
[i
]))
495 out
[out_count
++] = umd_quirks
[i
];
500 if (query
.count
< out_count
)
503 if (copy_to_user(u64_to_user_ptr(query
.quirks
), out
,
504 out_count
* sizeof(u32
))) {
508 query
.musthave_count
= out_musthave_count
;
511 query
.count
= out_count
;
512 err
= PVR_UOBJ_SET(args
->pointer
, args
->size
, query
);
516 args
->size
= sizeof(query
);
521 * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the
522 * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount
523 * of space required for it.
524 * @pvr_dev: Device pointer.
525 * @args: [IN] Device query arguments containing a pointer to a userspace
526 * struct drm_pvr_dev_query_enhancements.
528 * If the query object pointer is NULL, the size field is updated with the
529 * expected size of the query object.
530 * If the userspace pointer in the query object is NULL, or the count is
531 * short, no data is copied.
532 * The count field will be updated to that copied, or if either pointer is
533 * NULL, that which would have been copied.
534 * The size field in the query object will be updated to the size copied.
537 * * 0 on success, or if size/count is requested using a NULL pointer, or
538 * * -%EINVAL if args contained non-zero reserved fields, or
539 * * -%E2BIG if the indicated length of the allocation is less than is
540 * required to contain the copied data, or
541 * * -%EFAULT if local memory could not be copied to userspace.
544 pvr_dev_query_enhancements_get(struct pvr_device
*pvr_dev
,
545 struct drm_pvr_ioctl_dev_query_args
*args
)
548 * @FIXME - hardcoding of numbers here is intended as an
549 * intermediate step so the UAPI can be fixed, but requires a
550 * a refactor in the future to store them in a more appropriate
553 const u32 umd_enhancements
[] = {
557 struct drm_pvr_dev_query_enhancements query
;
558 u32 out
[ARRAY_SIZE(umd_enhancements
)];
562 if (!args
->pointer
) {
563 args
->size
= sizeof(struct drm_pvr_dev_query_enhancements
);
567 err
= PVR_UOBJ_GET(query
, args
->size
, args
->pointer
);
571 if (query
._padding_a
)
573 if (query
._padding_c
)
576 for (int i
= 0; i
< ARRAY_SIZE(umd_enhancements
); i
++) {
577 if (pvr_device_has_uapi_enhancement(pvr_dev
, umd_enhancements
[i
]))
578 out
[out_idx
++] = umd_enhancements
[i
];
581 if (!query
.enhancements
)
583 if (query
.count
< out_idx
)
586 if (copy_to_user(u64_to_user_ptr(query
.enhancements
), out
,
587 out_idx
* sizeof(u32
))) {
592 query
.count
= out_idx
;
593 err
= PVR_UOBJ_SET(args
->pointer
, args
->size
, query
);
597 args
->size
= sizeof(query
);
602 * pvr_ioctl_dev_query() - IOCTL to copy information about a device
603 * @drm_dev: [IN] DRM device.
604 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
605 * &struct drm_pvr_ioctl_dev_query_args.
606 * @file: [IN] DRM file private data.
608 * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY.
609 * If the given receiving struct pointer is NULL, or the indicated size is too
610 * small, the expected size of the struct type will be returned in the size
614 * * 0 on success or when fetching the size with args->pointer == NULL, or
615 * * -%E2BIG if the indicated size of the receiving struct is less than is
616 * required to contain the copied data, or
617 * * -%EINVAL if the indicated struct type is unknown, or
618 * * -%ENOMEM if local memory could not be allocated, or
619 * * -%EFAULT if local memory could not be copied to userspace.
622 pvr_ioctl_dev_query(struct drm_device
*drm_dev
, void *raw_args
,
623 struct drm_file
*file
)
625 struct pvr_device
*pvr_dev
= to_pvr_device(drm_dev
);
626 struct drm_pvr_ioctl_dev_query_args
*args
= raw_args
;
630 if (!drm_dev_enter(drm_dev
, &idx
))
633 switch ((enum drm_pvr_dev_query
)args
->type
) {
634 case DRM_PVR_DEV_QUERY_GPU_INFO_GET
:
635 ret
= pvr_dev_query_gpu_info_get(pvr_dev
, args
);
638 case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET
:
639 ret
= pvr_dev_query_runtime_info_get(pvr_dev
, args
);
642 case DRM_PVR_DEV_QUERY_QUIRKS_GET
:
643 ret
= pvr_dev_query_quirks_get(pvr_dev
, args
);
646 case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET
:
647 ret
= pvr_dev_query_enhancements_get(pvr_dev
, args
);
650 case DRM_PVR_DEV_QUERY_HEAP_INFO_GET
:
651 ret
= pvr_heap_info_get(pvr_dev
, args
);
654 case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET
:
655 ret
= pvr_static_data_areas_get(pvr_dev
, args
);
665 * pvr_ioctl_create_context() - IOCTL to create a context
666 * @drm_dev: [IN] DRM device.
667 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
668 * &struct drm_pvr_ioctl_create_context_args.
669 * @file: [IN] DRM file private data.
671 * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT.
675 * * -%EINVAL if provided arguments are invalid, or
676 * * -%EFAULT if arguments can't be copied from userspace, or
677 * * Any error returned by pvr_create_render_context().
680 pvr_ioctl_create_context(struct drm_device
*drm_dev
, void *raw_args
,
681 struct drm_file
*file
)
683 struct drm_pvr_ioctl_create_context_args
*args
= raw_args
;
684 struct pvr_file
*pvr_file
= file
->driver_priv
;
688 if (!drm_dev_enter(drm_dev
, &idx
))
691 ret
= pvr_context_create(pvr_file
, args
);
699 * pvr_ioctl_destroy_context() - IOCTL to destroy a context
700 * @drm_dev: [IN] DRM device.
701 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
702 * &struct drm_pvr_ioctl_destroy_context_args.
703 * @file: [IN] DRM file private data.
705 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT.
709 * * -%EINVAL if context not in context list.
712 pvr_ioctl_destroy_context(struct drm_device
*drm_dev
, void *raw_args
,
713 struct drm_file
*file
)
715 struct drm_pvr_ioctl_destroy_context_args
*args
= raw_args
;
716 struct pvr_file
*pvr_file
= file
->driver_priv
;
718 if (args
->_padding_4
)
721 return pvr_context_destroy(pvr_file
, args
->handle
);
725 * pvr_ioctl_create_free_list() - IOCTL to create a free list
726 * @drm_dev: [IN] DRM device.
727 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
728 * &struct drm_pvr_ioctl_create_free_list_args.
729 * @file: [IN] DRM file private data.
731 * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST.
735 * * Any error returned by pvr_free_list_create().
738 pvr_ioctl_create_free_list(struct drm_device
*drm_dev
, void *raw_args
,
739 struct drm_file
*file
)
741 struct drm_pvr_ioctl_create_free_list_args
*args
= raw_args
;
742 struct pvr_file
*pvr_file
= to_pvr_file(file
);
743 struct pvr_free_list
*free_list
;
747 if (!drm_dev_enter(drm_dev
, &idx
))
750 free_list
= pvr_free_list_create(pvr_file
, args
);
751 if (IS_ERR(free_list
)) {
752 err
= PTR_ERR(free_list
);
753 goto err_drm_dev_exit
;
756 /* Allocate object handle for userspace. */
757 err
= xa_alloc(&pvr_file
->free_list_handles
,
770 pvr_free_list_put(free_list
);
779 * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list
780 * @drm_dev: [IN] DRM device.
781 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
782 * &struct drm_pvr_ioctl_destroy_free_list_args.
783 * @file: [IN] DRM file private data.
785 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST.
789 * * -%EINVAL if free list not in object list.
792 pvr_ioctl_destroy_free_list(struct drm_device
*drm_dev
, void *raw_args
,
793 struct drm_file
*file
)
795 struct drm_pvr_ioctl_destroy_free_list_args
*args
= raw_args
;
796 struct pvr_file
*pvr_file
= to_pvr_file(file
);
797 struct pvr_free_list
*free_list
;
799 if (args
->_padding_4
)
802 free_list
= xa_erase(&pvr_file
->free_list_handles
, args
->handle
);
806 pvr_free_list_put(free_list
);
811 * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset
812 * @drm_dev: [IN] DRM device.
813 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
814 * &struct drm_pvr_ioctl_create_hwrt_dataset_args.
815 * @file: [IN] DRM file private data.
817 * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET.
821 * * Any error returned by pvr_hwrt_dataset_create().
824 pvr_ioctl_create_hwrt_dataset(struct drm_device
*drm_dev
, void *raw_args
,
825 struct drm_file
*file
)
827 struct drm_pvr_ioctl_create_hwrt_dataset_args
*args
= raw_args
;
828 struct pvr_file
*pvr_file
= to_pvr_file(file
);
829 struct pvr_hwrt_dataset
*hwrt
;
833 if (!drm_dev_enter(drm_dev
, &idx
))
836 hwrt
= pvr_hwrt_dataset_create(pvr_file
, args
);
839 goto err_drm_dev_exit
;
842 /* Allocate object handle for userspace. */
843 err
= xa_alloc(&pvr_file
->hwrt_handles
,
856 pvr_hwrt_dataset_put(hwrt
);
865 * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset
866 * @drm_dev: [IN] DRM device.
867 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
868 * &struct drm_pvr_ioctl_destroy_hwrt_dataset_args.
869 * @file: [IN] DRM file private data.
871 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET.
875 * * -%EINVAL if HWRT dataset not in object list.
878 pvr_ioctl_destroy_hwrt_dataset(struct drm_device
*drm_dev
, void *raw_args
,
879 struct drm_file
*file
)
881 struct drm_pvr_ioctl_destroy_hwrt_dataset_args
*args
= raw_args
;
882 struct pvr_file
*pvr_file
= to_pvr_file(file
);
883 struct pvr_hwrt_dataset
*hwrt
;
885 if (args
->_padding_4
)
888 hwrt
= xa_erase(&pvr_file
->hwrt_handles
, args
->handle
);
892 pvr_hwrt_dataset_put(hwrt
);
897 * pvr_ioctl_create_vm_context() - IOCTL to create a VM context
898 * @drm_dev: [IN] DRM device.
899 * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
900 * &struct drm_pvr_ioctl_create_vm_context_args.
901 * @file: [IN] DRM file private data.
903 * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT.
907 * * Any error returned by pvr_vm_create_context().
910 pvr_ioctl_create_vm_context(struct drm_device
*drm_dev
, void *raw_args
,
911 struct drm_file
*file
)
913 struct drm_pvr_ioctl_create_vm_context_args
*args
= raw_args
;
914 struct pvr_file
*pvr_file
= to_pvr_file(file
);
915 struct pvr_vm_context
*vm_ctx
;
919 if (!drm_dev_enter(drm_dev
, &idx
))
922 if (args
->_padding_4
) {
924 goto err_drm_dev_exit
;
927 vm_ctx
= pvr_vm_create_context(pvr_file
->pvr_dev
, true);
928 if (IS_ERR(vm_ctx
)) {
929 err
= PTR_ERR(vm_ctx
);
930 goto err_drm_dev_exit
;
933 /* Allocate object handle for userspace. */
934 err
= xa_alloc(&pvr_file
->vm_ctx_handles
,
947 pvr_vm_context_put(vm_ctx
);
956 * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context
957 * @drm_dev: [IN] DRM device.
958 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
959 * &struct drm_pvr_ioctl_destroy_vm_context_args.
960 * @file: [IN] DRM file private data.
962 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT.
965 * * 0 on success, or
966 * * -%EINVAL if object not in object list.
969 pvr_ioctl_destroy_vm_context(struct drm_device
*drm_dev
, void *raw_args
,
970 struct drm_file
*file
)
972 struct drm_pvr_ioctl_destroy_vm_context_args
*args
= raw_args
;
973 struct pvr_file
*pvr_file
= to_pvr_file(file
);
974 struct pvr_vm_context
*vm_ctx
;
976 if (args
->_padding_4
)
979 vm_ctx
= xa_erase(&pvr_file
->vm_ctx_handles
, args
->handle
);
983 pvr_vm_context_put(vm_ctx
);
988 * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space.
989 * @drm_dev: [IN] DRM device.
990 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
991 * &struct drm_pvr_ioctl_vm_map_args.
992 * @file: [IN] DRM file private data.
994 * Called from userspace with %DRM_IOCTL_PVR_VM_MAP.
998 * * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero,
999 * * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset
1000 * and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall
1001 * within the buffer object specified by
1002 * &drm_pvr_ioctl_vm_op_map_args.handle,
1003 * * -%EINVAL if the bounds specified by
1004 * &drm_pvr_ioctl_vm_op_map_args.device_addr and
1005 * &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual
1006 * address range which falls entirely within a single heap, or
1007 * * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a
1008 * valid PowerVR buffer object.
1011 pvr_ioctl_vm_map(struct drm_device
*drm_dev
, void *raw_args
,
1012 struct drm_file
*file
)
1014 struct pvr_device
*pvr_dev
= to_pvr_device(drm_dev
);
1015 struct drm_pvr_ioctl_vm_map_args
*args
= raw_args
;
1016 struct pvr_file
*pvr_file
= to_pvr_file(file
);
1017 struct pvr_vm_context
*vm_ctx
;
1019 struct pvr_gem_object
*pvr_obj
;
1020 size_t pvr_obj_size
;
1022 u64 offset_plus_size
;
1026 if (!drm_dev_enter(drm_dev
, &idx
))
1029 /* Initial validation of args. */
1030 if (args
->_padding_14
) {
1032 goto err_drm_dev_exit
;
1035 if (args
->flags
!= 0 ||
1036 check_add_overflow(args
->offset
, args
->size
, &offset_plus_size
) ||
1037 !pvr_find_heap_containing(pvr_dev
, args
->device_addr
, args
->size
)) {
1039 goto err_drm_dev_exit
;
1042 vm_ctx
= pvr_vm_context_lookup(pvr_file
, args
->vm_context_handle
);
1045 goto err_drm_dev_exit
;
1048 pvr_obj
= pvr_gem_object_from_handle(pvr_file
, args
->handle
);
1051 goto err_put_vm_context
;
1054 pvr_obj_size
= pvr_gem_object_size(pvr_obj
);
1057 * Validate offset and size args. The alignment of these will be
1058 * checked when mapping; for now just check that they're within valid
1061 if (args
->offset
>= pvr_obj_size
|| offset_plus_size
> pvr_obj_size
) {
1063 goto err_put_pvr_object
;
1066 err
= pvr_vm_map(vm_ctx
, pvr_obj
, args
->offset
,
1067 args
->device_addr
, args
->size
);
1069 goto err_put_pvr_object
;
1072 * In order to set up the mapping, we needed a reference to &pvr_obj.
1073 * However, pvr_vm_map() obtains and stores its own reference, so we
1074 * must release ours before returning.
1078 pvr_gem_object_put(pvr_obj
);
1081 pvr_vm_context_put(vm_ctx
);
1090 * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space.
1091 * @drm_dev: [IN] DRM device.
1092 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1093 * &struct drm_pvr_ioctl_vm_unmap_args.
1094 * @file: [IN] DRM file private data.
1096 * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP.
1100 * * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid
1101 * device page-aligned device-virtual address, or
1102 * * -%ENOENT if there is currently no PowerVR buffer object mapped at
1103 * &drm_pvr_ioctl_vm_op_unmap_args.device_addr.
1106 pvr_ioctl_vm_unmap(struct drm_device
*drm_dev
, void *raw_args
,
1107 struct drm_file
*file
)
1109 struct drm_pvr_ioctl_vm_unmap_args
*args
= raw_args
;
1110 struct pvr_file
*pvr_file
= to_pvr_file(file
);
1111 struct pvr_vm_context
*vm_ctx
;
1114 /* Initial validation of args. */
1115 if (args
->_padding_4
)
1118 vm_ctx
= pvr_vm_context_lookup(pvr_file
, args
->vm_context_handle
);
1122 err
= pvr_vm_unmap(vm_ctx
, args
->device_addr
, args
->size
);
1124 pvr_vm_context_put(vm_ctx
);
1130 * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU
1131 * @drm_dev: [IN] DRM device.
1132 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1133 * &struct drm_pvr_ioctl_submit_job_args.
1134 * @file: [IN] DRM file private data.
1136 * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB.
1139 * * 0 on success, or
1140 * * -%EINVAL if arguments are invalid.
1143 pvr_ioctl_submit_jobs(struct drm_device
*drm_dev
, void *raw_args
,
1144 struct drm_file
*file
)
1146 struct drm_pvr_ioctl_submit_jobs_args
*args
= raw_args
;
1147 struct pvr_device
*pvr_dev
= to_pvr_device(drm_dev
);
1148 struct pvr_file
*pvr_file
= to_pvr_file(file
);
1152 if (!drm_dev_enter(drm_dev
, &idx
))
1155 err
= pvr_submit_jobs(pvr_dev
, pvr_file
, args
);
1163 pvr_get_uobj(u64 usr_ptr
, u32 usr_stride
, u32 min_stride
, u32 obj_size
, void *out
)
1165 if (usr_stride
< min_stride
)
1168 return copy_struct_from_user(out
, obj_size
, u64_to_user_ptr(usr_ptr
), usr_stride
);
1172 pvr_set_uobj(u64 usr_ptr
, u32 usr_stride
, u32 min_stride
, u32 obj_size
, const void *in
)
1174 if (usr_stride
< min_stride
)
1177 if (copy_to_user(u64_to_user_ptr(usr_ptr
), in
, min_t(u32
, usr_stride
, obj_size
)))
1180 if (usr_stride
> obj_size
&&
1181 clear_user(u64_to_user_ptr(usr_ptr
+ obj_size
), usr_stride
- obj_size
)) {
1189 pvr_get_uobj_array(const struct drm_pvr_obj_array
*in
, u32 min_stride
, u32 obj_size
, void **out
)
1194 if (in
->stride
< min_stride
)
1200 out_alloc
= kvmalloc_array(in
->count
, obj_size
, GFP_KERNEL
);
1204 if (obj_size
== in
->stride
) {
1205 if (copy_from_user(out_alloc
, u64_to_user_ptr(in
->array
),
1206 (unsigned long)obj_size
* in
->count
))
1209 void __user
*in_ptr
= u64_to_user_ptr(in
->array
);
1210 void *out_ptr
= out_alloc
;
1212 for (u32 i
= 0; i
< in
->count
; i
++) {
1213 ret
= copy_struct_from_user(out_ptr
, obj_size
, in_ptr
, in
->stride
);
1217 out_ptr
+= obj_size
;
1218 in_ptr
+= in
->stride
;
1232 pvr_set_uobj_array(const struct drm_pvr_obj_array
*out
, u32 min_stride
, u32 obj_size
,
1235 if (out
->stride
< min_stride
)
1241 if (obj_size
== out
->stride
) {
1242 if (copy_to_user(u64_to_user_ptr(out
->array
), in
,
1243 (unsigned long)obj_size
* out
->count
))
1246 u32 cpy_elem_size
= min_t(u32
, out
->stride
, obj_size
);
1247 void __user
*out_ptr
= u64_to_user_ptr(out
->array
);
1248 const void *in_ptr
= in
;
1250 for (u32 i
= 0; i
< out
->count
; i
++) {
1251 if (copy_to_user(out_ptr
, in_ptr
, cpy_elem_size
))
1254 out_ptr
+= obj_size
;
1255 in_ptr
+= out
->stride
;
1258 if (out
->stride
> obj_size
&&
1259 clear_user(u64_to_user_ptr(out
->array
+ obj_size
),
1260 out
->stride
- obj_size
)) {
1268 #define DRM_PVR_IOCTL(_name, _func, _flags) \
1269 DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags)
1271 /* clang-format off */
1273 static const struct drm_ioctl_desc pvr_drm_driver_ioctls
[] = {
1274 DRM_PVR_IOCTL(DEV_QUERY
, dev_query
, DRM_RENDER_ALLOW
),
1275 DRM_PVR_IOCTL(CREATE_BO
, create_bo
, DRM_RENDER_ALLOW
),
1276 DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET
, get_bo_mmap_offset
, DRM_RENDER_ALLOW
),
1277 DRM_PVR_IOCTL(CREATE_VM_CONTEXT
, create_vm_context
, DRM_RENDER_ALLOW
),
1278 DRM_PVR_IOCTL(DESTROY_VM_CONTEXT
, destroy_vm_context
, DRM_RENDER_ALLOW
),
1279 DRM_PVR_IOCTL(VM_MAP
, vm_map
, DRM_RENDER_ALLOW
),
1280 DRM_PVR_IOCTL(VM_UNMAP
, vm_unmap
, DRM_RENDER_ALLOW
),
1281 DRM_PVR_IOCTL(CREATE_CONTEXT
, create_context
, DRM_RENDER_ALLOW
),
1282 DRM_PVR_IOCTL(DESTROY_CONTEXT
, destroy_context
, DRM_RENDER_ALLOW
),
1283 DRM_PVR_IOCTL(CREATE_FREE_LIST
, create_free_list
, DRM_RENDER_ALLOW
),
1284 DRM_PVR_IOCTL(DESTROY_FREE_LIST
, destroy_free_list
, DRM_RENDER_ALLOW
),
1285 DRM_PVR_IOCTL(CREATE_HWRT_DATASET
, create_hwrt_dataset
, DRM_RENDER_ALLOW
),
1286 DRM_PVR_IOCTL(DESTROY_HWRT_DATASET
, destroy_hwrt_dataset
, DRM_RENDER_ALLOW
),
1287 DRM_PVR_IOCTL(SUBMIT_JOBS
, submit_jobs
, DRM_RENDER_ALLOW
),
1290 /* clang-format on */
1292 #undef DRM_PVR_IOCTL
1295 * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened
1296 * @drm_dev: [IN] DRM device.
1297 * @file: [IN] DRM file private data.
1299 * Allocates powervr-specific file private data (&struct pvr_file).
1301 * Registered in &pvr_drm_driver.
1305 * * -%ENOMEM if the allocation of a &struct ipvr_file fails, or
1306 * * Any error returned by pvr_memory_context_init().
1309 pvr_drm_driver_open(struct drm_device
*drm_dev
, struct drm_file
*file
)
1311 struct pvr_device
*pvr_dev
= to_pvr_device(drm_dev
);
1312 struct pvr_file
*pvr_file
;
1314 pvr_file
= kzalloc(sizeof(*pvr_file
), GFP_KERNEL
);
1319 * Store reference to base DRM file private data for use by
1322 pvr_file
->file
= file
;
1325 * Store reference to powervr-specific outer device struct in file
1326 * private data for convenient access.
1328 pvr_file
->pvr_dev
= pvr_dev
;
1330 INIT_LIST_HEAD(&pvr_file
->contexts
);
1332 xa_init_flags(&pvr_file
->ctx_handles
, XA_FLAGS_ALLOC1
);
1333 xa_init_flags(&pvr_file
->free_list_handles
, XA_FLAGS_ALLOC1
);
1334 xa_init_flags(&pvr_file
->hwrt_handles
, XA_FLAGS_ALLOC1
);
1335 xa_init_flags(&pvr_file
->vm_ctx_handles
, XA_FLAGS_ALLOC1
);
1338 * Store reference to powervr-specific file private data in DRM file
1341 file
->driver_priv
= pvr_file
;
1347 * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct
1348 * drm_file is closed.
1349 * @drm_dev: [IN] DRM device (unused).
1350 * @file: [IN] DRM file private data.
1352 * Frees powervr-specific file private data (&struct pvr_file).
1354 * Registered in &pvr_drm_driver.
1357 pvr_drm_driver_postclose(__always_unused
struct drm_device
*drm_dev
,
1358 struct drm_file
*file
)
1360 struct pvr_file
*pvr_file
= to_pvr_file(file
);
1362 /* Kill remaining contexts. */
1363 pvr_destroy_contexts_for_file(pvr_file
);
1365 /* Drop references on any remaining objects. */
1366 pvr_destroy_free_lists_for_file(pvr_file
);
1367 pvr_destroy_hwrt_datasets_for_file(pvr_file
);
1368 pvr_destroy_vm_contexts_for_file(pvr_file
);
1371 file
->driver_priv
= NULL
;
1374 DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops
);
1376 static struct drm_driver pvr_drm_driver
= {
1377 .driver_features
= DRIVER_GEM
| DRIVER_GEM_GPUVA
| DRIVER_RENDER
|
1378 DRIVER_SYNCOBJ
| DRIVER_SYNCOBJ_TIMELINE
,
1379 .open
= pvr_drm_driver_open
,
1380 .postclose
= pvr_drm_driver_postclose
,
1381 .ioctls
= pvr_drm_driver_ioctls
,
1382 .num_ioctls
= ARRAY_SIZE(pvr_drm_driver_ioctls
),
1383 .fops
= &pvr_drm_driver_fops
,
1384 #if defined(CONFIG_DEBUG_FS)
1385 .debugfs_init
= pvr_debugfs_init
,
1388 .name
= PVR_DRIVER_NAME
,
1389 .desc
= PVR_DRIVER_DESC
,
1390 .date
= PVR_DRIVER_DATE
,
1391 .major
= PVR_DRIVER_MAJOR
,
1392 .minor
= PVR_DRIVER_MINOR
,
1393 .patchlevel
= PVR_DRIVER_PATCHLEVEL
,
1395 .gem_prime_import_sg_table
= drm_gem_shmem_prime_import_sg_table
,
1396 .gem_create_object
= pvr_gem_create_object
,
1400 pvr_probe(struct platform_device
*plat_dev
)
1402 struct pvr_device
*pvr_dev
;
1403 struct drm_device
*drm_dev
;
1406 pvr_dev
= devm_drm_dev_alloc(&plat_dev
->dev
, &pvr_drm_driver
,
1407 struct pvr_device
, base
);
1408 if (IS_ERR(pvr_dev
))
1409 return PTR_ERR(pvr_dev
);
1411 drm_dev
= &pvr_dev
->base
;
1413 platform_set_drvdata(plat_dev
, drm_dev
);
1415 init_rwsem(&pvr_dev
->reset_sem
);
1417 pvr_context_device_init(pvr_dev
);
1419 err
= pvr_queue_device_init(pvr_dev
);
1421 goto err_context_fini
;
1423 devm_pm_runtime_enable(&plat_dev
->dev
);
1424 pm_runtime_mark_last_busy(&plat_dev
->dev
);
1426 pm_runtime_set_autosuspend_delay(&plat_dev
->dev
, 50);
1427 pm_runtime_use_autosuspend(&plat_dev
->dev
);
1428 pvr_watchdog_init(pvr_dev
);
1430 err
= pvr_device_init(pvr_dev
);
1432 goto err_watchdog_fini
;
1434 err
= drm_dev_register(drm_dev
, 0);
1436 goto err_device_fini
;
1438 xa_init_flags(&pvr_dev
->free_list_ids
, XA_FLAGS_ALLOC1
);
1439 xa_init_flags(&pvr_dev
->job_ids
, XA_FLAGS_ALLOC1
);
1444 pvr_device_fini(pvr_dev
);
1447 pvr_watchdog_fini(pvr_dev
);
1449 pvr_queue_device_fini(pvr_dev
);
1452 pvr_context_device_fini(pvr_dev
);
1457 static void pvr_remove(struct platform_device
*plat_dev
)
1459 struct drm_device
*drm_dev
= platform_get_drvdata(plat_dev
);
1460 struct pvr_device
*pvr_dev
= to_pvr_device(drm_dev
);
1462 WARN_ON(!xa_empty(&pvr_dev
->job_ids
));
1463 WARN_ON(!xa_empty(&pvr_dev
->free_list_ids
));
1465 xa_destroy(&pvr_dev
->job_ids
);
1466 xa_destroy(&pvr_dev
->free_list_ids
);
1468 pm_runtime_suspend(drm_dev
->dev
);
1469 pvr_device_fini(pvr_dev
);
1470 drm_dev_unplug(drm_dev
);
1471 pvr_watchdog_fini(pvr_dev
);
1472 pvr_queue_device_fini(pvr_dev
);
1473 pvr_context_device_fini(pvr_dev
);
1476 static const struct of_device_id dt_match
[] = {
1477 { .compatible
= "img,img-axe", .data
= NULL
},
1480 MODULE_DEVICE_TABLE(of
, dt_match
);
1482 static const struct dev_pm_ops pvr_pm_ops
= {
1483 RUNTIME_PM_OPS(pvr_power_device_suspend
, pvr_power_device_resume
, pvr_power_device_idle
)
1486 static struct platform_driver pvr_driver
= {
1488 .remove
= pvr_remove
,
1490 .name
= PVR_DRIVER_NAME
,
1492 .of_match_table
= dt_match
,
1495 module_platform_driver(pvr_driver
);
1497 MODULE_AUTHOR("Imagination Technologies Ltd.");
1498 MODULE_DESCRIPTION(PVR_DRIVER_DESC
);
1499 MODULE_LICENSE("Dual MIT/GPL");
1500 MODULE_IMPORT_NS("DMA_BUF");
1501 MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");