1 // SPDX-License-Identifier: GPL-2.0
3 * Media device request objects
5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 * Copyright (C) 2018 Intel Corporation
7 * Copyright (C) 2018 Google, Inc.
9 * Author: Hans Verkuil <hans.verkuil@cisco.com>
10 * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/refcount.h>
17 #include <media/media-device.h>
18 #include <media/media-request.h>
20 static const char * const request_state
[] = {
21 [MEDIA_REQUEST_STATE_IDLE
] = "idle",
22 [MEDIA_REQUEST_STATE_VALIDATING
] = "validating",
23 [MEDIA_REQUEST_STATE_QUEUED
] = "queued",
24 [MEDIA_REQUEST_STATE_COMPLETE
] = "complete",
25 [MEDIA_REQUEST_STATE_CLEANING
] = "cleaning",
26 [MEDIA_REQUEST_STATE_UPDATING
] = "updating",
30 media_request_state_str(enum media_request_state state
)
32 BUILD_BUG_ON(ARRAY_SIZE(request_state
) != NR_OF_MEDIA_REQUEST_STATE
);
34 if (WARN_ON(state
>= ARRAY_SIZE(request_state
)))
36 return request_state
[state
];
39 static void media_request_clean(struct media_request
*req
)
41 struct media_request_object
*obj
, *obj_safe
;
43 /* Just a sanity check. No other code path is allowed to change this. */
44 WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_CLEANING
);
45 WARN_ON(req
->updating_count
);
46 WARN_ON(req
->access_count
);
48 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
49 media_request_object_unbind(obj
);
50 media_request_object_put(obj
);
53 req
->updating_count
= 0;
54 req
->access_count
= 0;
55 WARN_ON(req
->num_incomplete_objects
);
56 req
->num_incomplete_objects
= 0;
57 wake_up_interruptible_all(&req
->poll_wait
);
60 static void media_request_release(struct kref
*kref
)
62 struct media_request
*req
=
63 container_of(kref
, struct media_request
, kref
);
64 struct media_device
*mdev
= req
->mdev
;
66 dev_dbg(mdev
->dev
, "request: release %s\n", req
->debug_str
);
68 /* No other users, no need for a spinlock */
69 req
->state
= MEDIA_REQUEST_STATE_CLEANING
;
71 media_request_clean(req
);
73 if (mdev
->ops
->req_free
)
74 mdev
->ops
->req_free(req
);
79 void media_request_put(struct media_request
*req
)
81 kref_put(&req
->kref
, media_request_release
);
83 EXPORT_SYMBOL_GPL(media_request_put
);
85 static int media_request_close(struct inode
*inode
, struct file
*filp
)
87 struct media_request
*req
= filp
->private_data
;
89 media_request_put(req
);
93 static __poll_t
media_request_poll(struct file
*filp
,
94 struct poll_table_struct
*wait
)
96 struct media_request
*req
= filp
->private_data
;
100 if (!(poll_requested_events(wait
) & EPOLLPRI
))
103 poll_wait(filp
, &req
->poll_wait
, wait
);
104 spin_lock_irqsave(&req
->lock
, flags
);
105 if (req
->state
== MEDIA_REQUEST_STATE_COMPLETE
) {
109 if (req
->state
!= MEDIA_REQUEST_STATE_QUEUED
) {
115 spin_unlock_irqrestore(&req
->lock
, flags
);
119 static long media_request_ioctl_queue(struct media_request
*req
)
121 struct media_device
*mdev
= req
->mdev
;
122 enum media_request_state state
;
126 dev_dbg(mdev
->dev
, "request: queue %s\n", req
->debug_str
);
129 * Ensure the request that is validated will be the one that gets queued
130 * next by serialising the queueing process. This mutex is also used
131 * to serialize with canceling a vb2 queue and with setting values such
132 * as controls in a request.
134 mutex_lock(&mdev
->req_queue_mutex
);
136 media_request_get(req
);
138 spin_lock_irqsave(&req
->lock
, flags
);
139 if (req
->state
== MEDIA_REQUEST_STATE_IDLE
)
140 req
->state
= MEDIA_REQUEST_STATE_VALIDATING
;
142 spin_unlock_irqrestore(&req
->lock
, flags
);
143 if (state
!= MEDIA_REQUEST_STATE_VALIDATING
) {
145 "request: unable to queue %s, request in state %s\n",
146 req
->debug_str
, media_request_state_str(state
));
147 media_request_put(req
);
148 mutex_unlock(&mdev
->req_queue_mutex
);
152 ret
= mdev
->ops
->req_validate(req
);
155 * If the req_validate was successful, then we mark the state as QUEUED
156 * and call req_queue. The reason we set the state first is that this
157 * allows req_queue to unbind or complete the queued objects in case
158 * they are immediately 'consumed'. State changes from QUEUED to another
159 * state can only happen if either the driver changes the state or if
160 * the user cancels the vb2 queue. The driver can only change the state
161 * after each object is queued through the req_queue op (and note that
162 * that op cannot fail), so setting the state to QUEUED up front is
165 * The other reason for changing the state is if the vb2 queue is
166 * canceled, and that uses the req_queue_mutex which is still locked
167 * while req_queue is called, so that's safe as well.
169 spin_lock_irqsave(&req
->lock
, flags
);
170 req
->state
= ret
? MEDIA_REQUEST_STATE_IDLE
171 : MEDIA_REQUEST_STATE_QUEUED
;
172 spin_unlock_irqrestore(&req
->lock
, flags
);
175 mdev
->ops
->req_queue(req
);
177 mutex_unlock(&mdev
->req_queue_mutex
);
180 dev_dbg(mdev
->dev
, "request: can't queue %s (%d)\n",
181 req
->debug_str
, ret
);
182 media_request_put(req
);
188 static long media_request_ioctl_reinit(struct media_request
*req
)
190 struct media_device
*mdev
= req
->mdev
;
193 spin_lock_irqsave(&req
->lock
, flags
);
194 if (req
->state
!= MEDIA_REQUEST_STATE_IDLE
&&
195 req
->state
!= MEDIA_REQUEST_STATE_COMPLETE
) {
197 "request: %s not in idle or complete state, cannot reinit\n",
199 spin_unlock_irqrestore(&req
->lock
, flags
);
202 if (req
->access_count
) {
204 "request: %s is being accessed, cannot reinit\n",
206 spin_unlock_irqrestore(&req
->lock
, flags
);
209 req
->state
= MEDIA_REQUEST_STATE_CLEANING
;
210 spin_unlock_irqrestore(&req
->lock
, flags
);
212 media_request_clean(req
);
214 spin_lock_irqsave(&req
->lock
, flags
);
215 req
->state
= MEDIA_REQUEST_STATE_IDLE
;
216 spin_unlock_irqrestore(&req
->lock
, flags
);
221 static long media_request_ioctl(struct file
*filp
, unsigned int cmd
,
224 struct media_request
*req
= filp
->private_data
;
227 case MEDIA_REQUEST_IOC_QUEUE
:
228 return media_request_ioctl_queue(req
);
229 case MEDIA_REQUEST_IOC_REINIT
:
230 return media_request_ioctl_reinit(req
);
236 static const struct file_operations request_fops
= {
237 .owner
= THIS_MODULE
,
238 .poll
= media_request_poll
,
239 .unlocked_ioctl
= media_request_ioctl
,
241 .compat_ioctl
= media_request_ioctl
,
242 #endif /* CONFIG_COMPAT */
243 .release
= media_request_close
,
246 struct media_request
*
247 media_request_get_by_fd(struct media_device
*mdev
, int request_fd
)
250 struct media_request
*req
;
252 if (!mdev
|| !mdev
->ops
||
253 !mdev
->ops
->req_validate
|| !mdev
->ops
->req_queue
)
254 return ERR_PTR(-EBADR
);
256 f
= fdget(request_fd
);
260 if (f
.file
->f_op
!= &request_fops
)
262 req
= f
.file
->private_data
;
263 if (req
->mdev
!= mdev
)
267 * Note: as long as someone has an open filehandle of the request,
268 * the request can never be released. The fdget() above ensures that
269 * even if userspace closes the request filehandle, the release()
270 * fop won't be called, so the media_request_get() always succeeds
271 * and there is no race condition where the request was released
272 * before media_request_get() is called.
274 media_request_get(req
);
283 dev_dbg(mdev
->dev
, "cannot find request_fd %d\n", request_fd
);
284 return ERR_PTR(-EINVAL
);
286 EXPORT_SYMBOL_GPL(media_request_get_by_fd
);
288 int media_request_alloc(struct media_device
*mdev
, int *alloc_fd
)
290 struct media_request
*req
;
295 /* Either both are NULL or both are non-NULL */
296 if (WARN_ON(!mdev
->ops
->req_alloc
^ !mdev
->ops
->req_free
))
299 if (mdev
->ops
->req_alloc
)
300 req
= mdev
->ops
->req_alloc(mdev
);
302 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
306 fd
= get_unused_fd_flags(O_CLOEXEC
);
312 filp
= anon_inode_getfile("request", &request_fops
, NULL
, O_CLOEXEC
);
318 filp
->private_data
= req
;
320 req
->state
= MEDIA_REQUEST_STATE_IDLE
;
321 req
->num_incomplete_objects
= 0;
322 kref_init(&req
->kref
);
323 INIT_LIST_HEAD(&req
->objects
);
324 spin_lock_init(&req
->lock
);
325 init_waitqueue_head(&req
->poll_wait
);
326 req
->updating_count
= 0;
327 req
->access_count
= 0;
331 snprintf(req
->debug_str
, sizeof(req
->debug_str
), "%u:%d",
332 atomic_inc_return(&mdev
->request_id
), fd
);
333 dev_dbg(mdev
->dev
, "request: allocated %s\n", req
->debug_str
);
335 fd_install(fd
, filp
);
343 if (mdev
->ops
->req_free
)
344 mdev
->ops
->req_free(req
);
351 static void media_request_object_release(struct kref
*kref
)
353 struct media_request_object
*obj
=
354 container_of(kref
, struct media_request_object
, kref
);
355 struct media_request
*req
= obj
->req
;
358 media_request_object_unbind(obj
);
359 obj
->ops
->release(obj
);
362 struct media_request_object
*
363 media_request_object_find(struct media_request
*req
,
364 const struct media_request_object_ops
*ops
,
367 struct media_request_object
*obj
;
368 struct media_request_object
*found
= NULL
;
371 if (WARN_ON(!ops
|| !priv
))
374 spin_lock_irqsave(&req
->lock
, flags
);
375 list_for_each_entry(obj
, &req
->objects
, list
) {
376 if (obj
->ops
== ops
&& obj
->priv
== priv
) {
377 media_request_object_get(obj
);
382 spin_unlock_irqrestore(&req
->lock
, flags
);
385 EXPORT_SYMBOL_GPL(media_request_object_find
);
387 void media_request_object_put(struct media_request_object
*obj
)
389 kref_put(&obj
->kref
, media_request_object_release
);
391 EXPORT_SYMBOL_GPL(media_request_object_put
);
393 void media_request_object_init(struct media_request_object
*obj
)
398 obj
->completed
= false;
399 INIT_LIST_HEAD(&obj
->list
);
400 kref_init(&obj
->kref
);
402 EXPORT_SYMBOL_GPL(media_request_object_init
);
404 int media_request_object_bind(struct media_request
*req
,
405 const struct media_request_object_ops
*ops
,
406 void *priv
, bool is_buffer
,
407 struct media_request_object
*obj
)
412 if (WARN_ON(!ops
->release
))
415 spin_lock_irqsave(&req
->lock
, flags
);
417 if (WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_UPDATING
))
425 list_add_tail(&obj
->list
, &req
->objects
);
427 list_add(&obj
->list
, &req
->objects
);
428 req
->num_incomplete_objects
++;
432 spin_unlock_irqrestore(&req
->lock
, flags
);
435 EXPORT_SYMBOL_GPL(media_request_object_bind
);
437 void media_request_object_unbind(struct media_request_object
*obj
)
439 struct media_request
*req
= obj
->req
;
441 bool completed
= false;
446 spin_lock_irqsave(&req
->lock
, flags
);
447 list_del(&obj
->list
);
450 if (req
->state
== MEDIA_REQUEST_STATE_COMPLETE
)
453 if (WARN_ON(req
->state
== MEDIA_REQUEST_STATE_VALIDATING
))
456 if (req
->state
== MEDIA_REQUEST_STATE_CLEANING
) {
458 req
->num_incomplete_objects
--;
462 if (WARN_ON(!req
->num_incomplete_objects
))
465 req
->num_incomplete_objects
--;
466 if (req
->state
== MEDIA_REQUEST_STATE_QUEUED
&&
467 !req
->num_incomplete_objects
) {
468 req
->state
= MEDIA_REQUEST_STATE_COMPLETE
;
470 wake_up_interruptible_all(&req
->poll_wait
);
474 spin_unlock_irqrestore(&req
->lock
, flags
);
475 if (obj
->ops
->unbind
)
476 obj
->ops
->unbind(obj
);
478 media_request_put(req
);
480 EXPORT_SYMBOL_GPL(media_request_object_unbind
);
482 void media_request_object_complete(struct media_request_object
*obj
)
484 struct media_request
*req
= obj
->req
;
486 bool completed
= false;
488 spin_lock_irqsave(&req
->lock
, flags
);
491 obj
->completed
= true;
492 if (WARN_ON(!req
->num_incomplete_objects
) ||
493 WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_QUEUED
))
496 if (!--req
->num_incomplete_objects
) {
497 req
->state
= MEDIA_REQUEST_STATE_COMPLETE
;
498 wake_up_interruptible_all(&req
->poll_wait
);
502 spin_unlock_irqrestore(&req
->lock
, flags
);
504 media_request_put(req
);
506 EXPORT_SYMBOL_GPL(media_request_object_complete
);