1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
5 #include <linux/kref.h>
6 #include <linux/uaccess.h>
10 void msm_submitqueue_destroy(struct kref
*kref
)
12 struct msm_gpu_submitqueue
*queue
= container_of(kref
,
13 struct msm_gpu_submitqueue
, ref
);
15 msm_file_private_put(queue
->ctx
);
20 struct msm_gpu_submitqueue
*msm_submitqueue_get(struct msm_file_private
*ctx
,
23 struct msm_gpu_submitqueue
*entry
;
28 read_lock(&ctx
->queuelock
);
30 list_for_each_entry(entry
, &ctx
->submitqueues
, node
) {
31 if (entry
->id
== id
) {
32 kref_get(&entry
->ref
);
33 read_unlock(&ctx
->queuelock
);
39 read_unlock(&ctx
->queuelock
);
43 void msm_submitqueue_close(struct msm_file_private
*ctx
)
45 struct msm_gpu_submitqueue
*entry
, *tmp
;
51 * No lock needed in close and there won't
52 * be any more user ioctls coming our way
54 list_for_each_entry_safe(entry
, tmp
, &ctx
->submitqueues
, node
) {
55 list_del(&entry
->node
);
56 msm_submitqueue_put(entry
);
60 int msm_submitqueue_create(struct drm_device
*drm
, struct msm_file_private
*ctx
,
61 u32 prio
, u32 flags
, u32
*id
)
63 struct msm_drm_private
*priv
= drm
->dev_private
;
64 struct msm_gpu_submitqueue
*queue
;
69 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
74 kref_init(&queue
->ref
);
78 if (prio
>= priv
->gpu
->nr_rings
) {
86 write_lock(&ctx
->queuelock
);
88 queue
->ctx
= msm_file_private_get(ctx
);
89 queue
->id
= ctx
->queueid
++;
94 list_add_tail(&queue
->node
, &ctx
->submitqueues
);
96 write_unlock(&ctx
->queuelock
);
101 int msm_submitqueue_init(struct drm_device
*drm
, struct msm_file_private
*ctx
)
103 struct msm_drm_private
*priv
= drm
->dev_private
;
110 * Select priority 2 as the "default priority" unless nr_rings is less
111 * than 2 and then pick the lowest pirority
113 default_prio
= priv
->gpu
?
114 clamp_t(uint32_t, 2, 0, priv
->gpu
->nr_rings
- 1) : 0;
116 INIT_LIST_HEAD(&ctx
->submitqueues
);
118 rwlock_init(&ctx
->queuelock
);
120 return msm_submitqueue_create(drm
, ctx
, default_prio
, 0, NULL
);
123 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue
*queue
,
124 struct drm_msm_submitqueue_query
*args
)
126 size_t size
= min_t(size_t, args
->len
, sizeof(queue
->faults
));
129 /* If a zero length was passed in, return the data size we expect */
131 args
->len
= sizeof(queue
->faults
);
135 /* Set the length to the actual size of the data */
138 ret
= copy_to_user(u64_to_user_ptr(args
->data
), &queue
->faults
, size
);
140 return ret
? -EFAULT
: 0;
143 int msm_submitqueue_query(struct drm_device
*drm
, struct msm_file_private
*ctx
,
144 struct drm_msm_submitqueue_query
*args
)
146 struct msm_gpu_submitqueue
*queue
;
152 queue
= msm_submitqueue_get(ctx
, args
->id
);
156 if (args
->param
== MSM_SUBMITQUEUE_PARAM_FAULTS
)
157 ret
= msm_submitqueue_query_faults(queue
, args
);
159 msm_submitqueue_put(queue
);
164 int msm_submitqueue_remove(struct msm_file_private
*ctx
, u32 id
)
166 struct msm_gpu_submitqueue
*entry
;
172 * id 0 is the "default" queue and can't be destroyed
178 write_lock(&ctx
->queuelock
);
180 list_for_each_entry(entry
, &ctx
->submitqueues
, node
) {
181 if (entry
->id
== id
) {
182 list_del(&entry
->node
);
183 write_unlock(&ctx
->queuelock
);
185 msm_submitqueue_put(entry
);
190 write_unlock(&ctx
->queuelock
);