1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
5 #include <linux/kref.h>
6 #include <linux/uaccess.h>
10 void msm_submitqueue_destroy(struct kref
*kref
)
12 struct msm_gpu_submitqueue
*queue
= container_of(kref
,
13 struct msm_gpu_submitqueue
, ref
);
18 struct msm_gpu_submitqueue
*msm_submitqueue_get(struct msm_file_private
*ctx
,
21 struct msm_gpu_submitqueue
*entry
;
26 read_lock(&ctx
->queuelock
);
28 list_for_each_entry(entry
, &ctx
->submitqueues
, node
) {
29 if (entry
->id
== id
) {
30 kref_get(&entry
->ref
);
31 read_unlock(&ctx
->queuelock
);
37 read_unlock(&ctx
->queuelock
);
41 void msm_submitqueue_close(struct msm_file_private
*ctx
)
43 struct msm_gpu_submitqueue
*entry
, *tmp
;
49 * No lock needed in close and there won't
50 * be any more user ioctls coming our way
52 list_for_each_entry_safe(entry
, tmp
, &ctx
->submitqueues
, node
)
53 msm_submitqueue_put(entry
);
56 int msm_submitqueue_create(struct drm_device
*drm
, struct msm_file_private
*ctx
,
57 u32 prio
, u32 flags
, u32
*id
)
59 struct msm_drm_private
*priv
= drm
->dev_private
;
60 struct msm_gpu_submitqueue
*queue
;
65 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
70 kref_init(&queue
->ref
);
74 if (prio
>= priv
->gpu
->nr_rings
)
80 write_lock(&ctx
->queuelock
);
82 queue
->id
= ctx
->queueid
++;
87 list_add_tail(&queue
->node
, &ctx
->submitqueues
);
89 write_unlock(&ctx
->queuelock
);
94 int msm_submitqueue_init(struct drm_device
*drm
, struct msm_file_private
*ctx
)
96 struct msm_drm_private
*priv
= drm
->dev_private
;
103 * Select priority 2 as the "default priority" unless nr_rings is less
104 * than 2 and then pick the lowest pirority
106 default_prio
= priv
->gpu
?
107 clamp_t(uint32_t, 2, 0, priv
->gpu
->nr_rings
- 1) : 0;
109 INIT_LIST_HEAD(&ctx
->submitqueues
);
111 rwlock_init(&ctx
->queuelock
);
113 return msm_submitqueue_create(drm
, ctx
, default_prio
, 0, NULL
);
116 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue
*queue
,
117 struct drm_msm_submitqueue_query
*args
)
119 size_t size
= min_t(size_t, args
->len
, sizeof(queue
->faults
));
122 /* If a zero length was passed in, return the data size we expect */
124 args
->len
= sizeof(queue
->faults
);
128 /* Set the length to the actual size of the data */
131 ret
= copy_to_user(u64_to_user_ptr(args
->data
), &queue
->faults
, size
);
133 return ret
? -EFAULT
: 0;
136 int msm_submitqueue_query(struct drm_device
*drm
, struct msm_file_private
*ctx
,
137 struct drm_msm_submitqueue_query
*args
)
139 struct msm_gpu_submitqueue
*queue
;
145 queue
= msm_submitqueue_get(ctx
, args
->id
);
149 if (args
->param
== MSM_SUBMITQUEUE_PARAM_FAULTS
)
150 ret
= msm_submitqueue_query_faults(queue
, args
);
152 msm_submitqueue_put(queue
);
157 int msm_submitqueue_remove(struct msm_file_private
*ctx
, u32 id
)
159 struct msm_gpu_submitqueue
*entry
;
165 * id 0 is the "default" queue and can't be destroyed
171 write_lock(&ctx
->queuelock
);
173 list_for_each_entry(entry
, &ctx
->submitqueues
, node
) {
174 if (entry
->id
== id
) {
175 list_del(&entry
->node
);
176 write_unlock(&ctx
->queuelock
);
178 msm_submitqueue_put(entry
);
183 write_unlock(&ctx
->queuelock
);