1 // SPDX-License-Identifier: GPL-2.0
3 * Media device request objects
5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 * Copyright (C) 2018 Intel Corporation
7 * Copyright (C) 2018 Google, Inc.
9 * Author: Hans Verkuil <hans.verkuil@cisco.com>
10 * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/refcount.h>
17 #include <media/media-device.h>
18 #include <media/media-request.h>
20 static const char * const request_state
[] = {
21 [MEDIA_REQUEST_STATE_IDLE
] = "idle",
22 [MEDIA_REQUEST_STATE_VALIDATING
] = "validating",
23 [MEDIA_REQUEST_STATE_QUEUED
] = "queued",
24 [MEDIA_REQUEST_STATE_COMPLETE
] = "complete",
25 [MEDIA_REQUEST_STATE_CLEANING
] = "cleaning",
26 [MEDIA_REQUEST_STATE_UPDATING
] = "updating",
30 media_request_state_str(enum media_request_state state
)
32 BUILD_BUG_ON(ARRAY_SIZE(request_state
) != NR_OF_MEDIA_REQUEST_STATE
);
34 if (WARN_ON(state
>= ARRAY_SIZE(request_state
)))
36 return request_state
[state
];
39 static void media_request_clean(struct media_request
*req
)
41 struct media_request_object
*obj
, *obj_safe
;
43 /* Just a sanity check. No other code path is allowed to change this. */
44 WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_CLEANING
);
45 WARN_ON(req
->updating_count
);
46 WARN_ON(req
->access_count
);
48 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
49 media_request_object_unbind(obj
);
50 media_request_object_put(obj
);
53 req
->updating_count
= 0;
54 req
->access_count
= 0;
55 WARN_ON(req
->num_incomplete_objects
);
56 req
->num_incomplete_objects
= 0;
57 wake_up_interruptible_all(&req
->poll_wait
);
60 static void media_request_release(struct kref
*kref
)
62 struct media_request
*req
=
63 container_of(kref
, struct media_request
, kref
);
64 struct media_device
*mdev
= req
->mdev
;
66 dev_dbg(mdev
->dev
, "request: release %s\n", req
->debug_str
);
68 /* No other users, no need for a spinlock */
69 req
->state
= MEDIA_REQUEST_STATE_CLEANING
;
71 media_request_clean(req
);
73 if (mdev
->ops
->req_free
)
74 mdev
->ops
->req_free(req
);
79 void media_request_put(struct media_request
*req
)
81 kref_put(&req
->kref
, media_request_release
);
83 EXPORT_SYMBOL_GPL(media_request_put
);
85 static int media_request_close(struct inode
*inode
, struct file
*filp
)
87 struct media_request
*req
= filp
->private_data
;
89 media_request_put(req
);
93 static __poll_t
media_request_poll(struct file
*filp
,
94 struct poll_table_struct
*wait
)
96 struct media_request
*req
= filp
->private_data
;
100 if (!(poll_requested_events(wait
) & EPOLLPRI
))
103 poll_wait(filp
, &req
->poll_wait
, wait
);
104 spin_lock_irqsave(&req
->lock
, flags
);
105 if (req
->state
== MEDIA_REQUEST_STATE_COMPLETE
) {
109 if (req
->state
!= MEDIA_REQUEST_STATE_QUEUED
) {
115 spin_unlock_irqrestore(&req
->lock
, flags
);
119 static long media_request_ioctl_queue(struct media_request
*req
)
121 struct media_device
*mdev
= req
->mdev
;
122 enum media_request_state state
;
126 dev_dbg(mdev
->dev
, "request: queue %s\n", req
->debug_str
);
129 * Ensure the request that is validated will be the one that gets queued
130 * next by serialising the queueing process. This mutex is also used
131 * to serialize with canceling a vb2 queue and with setting values such
132 * as controls in a request.
134 mutex_lock(&mdev
->req_queue_mutex
);
136 media_request_get(req
);
138 spin_lock_irqsave(&req
->lock
, flags
);
139 if (req
->state
== MEDIA_REQUEST_STATE_IDLE
)
140 req
->state
= MEDIA_REQUEST_STATE_VALIDATING
;
142 spin_unlock_irqrestore(&req
->lock
, flags
);
143 if (state
!= MEDIA_REQUEST_STATE_VALIDATING
) {
145 "request: unable to queue %s, request in state %s\n",
146 req
->debug_str
, media_request_state_str(state
));
147 media_request_put(req
);
148 mutex_unlock(&mdev
->req_queue_mutex
);
152 ret
= mdev
->ops
->req_validate(req
);
155 * If the req_validate was successful, then we mark the state as QUEUED
156 * and call req_queue. The reason we set the state first is that this
157 * allows req_queue to unbind or complete the queued objects in case
158 * they are immediately 'consumed'. State changes from QUEUED to another
159 * state can only happen if either the driver changes the state or if
160 * the user cancels the vb2 queue. The driver can only change the state
161 * after each object is queued through the req_queue op (and note that
162 * that op cannot fail), so setting the state to QUEUED up front is
165 * The other reason for changing the state is if the vb2 queue is
166 * canceled, and that uses the req_queue_mutex which is still locked
167 * while req_queue is called, so that's safe as well.
169 spin_lock_irqsave(&req
->lock
, flags
);
170 req
->state
= ret
? MEDIA_REQUEST_STATE_IDLE
171 : MEDIA_REQUEST_STATE_QUEUED
;
172 spin_unlock_irqrestore(&req
->lock
, flags
);
175 mdev
->ops
->req_queue(req
);
177 mutex_unlock(&mdev
->req_queue_mutex
);
180 dev_dbg(mdev
->dev
, "request: can't queue %s (%d)\n",
181 req
->debug_str
, ret
);
182 media_request_put(req
);
188 static long media_request_ioctl_reinit(struct media_request
*req
)
190 struct media_device
*mdev
= req
->mdev
;
193 spin_lock_irqsave(&req
->lock
, flags
);
194 if (req
->state
!= MEDIA_REQUEST_STATE_IDLE
&&
195 req
->state
!= MEDIA_REQUEST_STATE_COMPLETE
) {
197 "request: %s not in idle or complete state, cannot reinit\n",
199 spin_unlock_irqrestore(&req
->lock
, flags
);
202 if (req
->access_count
) {
204 "request: %s is being accessed, cannot reinit\n",
206 spin_unlock_irqrestore(&req
->lock
, flags
);
209 req
->state
= MEDIA_REQUEST_STATE_CLEANING
;
210 spin_unlock_irqrestore(&req
->lock
, flags
);
212 media_request_clean(req
);
214 spin_lock_irqsave(&req
->lock
, flags
);
215 req
->state
= MEDIA_REQUEST_STATE_IDLE
;
216 spin_unlock_irqrestore(&req
->lock
, flags
);
221 static long media_request_ioctl(struct file
*filp
, unsigned int cmd
,
224 struct media_request
*req
= filp
->private_data
;
227 case MEDIA_REQUEST_IOC_QUEUE
:
228 return media_request_ioctl_queue(req
);
229 case MEDIA_REQUEST_IOC_REINIT
:
230 return media_request_ioctl_reinit(req
);
236 static const struct file_operations request_fops
= {
237 .owner
= THIS_MODULE
,
238 .poll
= media_request_poll
,
239 .unlocked_ioctl
= media_request_ioctl
,
241 .compat_ioctl
= media_request_ioctl
,
242 #endif /* CONFIG_COMPAT */
243 .release
= media_request_close
,
246 struct media_request
*
247 media_request_get_by_fd(struct media_device
*mdev
, int request_fd
)
250 struct media_request
*req
;
252 if (!mdev
|| !mdev
->ops
||
253 !mdev
->ops
->req_validate
|| !mdev
->ops
->req_queue
)
254 return ERR_PTR(-EACCES
);
256 filp
= fget(request_fd
);
260 if (filp
->f_op
!= &request_fops
)
262 req
= filp
->private_data
;
263 if (req
->mdev
!= mdev
)
267 * Note: as long as someone has an open filehandle of the request,
268 * the request can never be released. The fget() above ensures that
269 * even if userspace closes the request filehandle, the release()
270 * fop won't be called, so the media_request_get() always succeeds
271 * and there is no race condition where the request was released
272 * before media_request_get() is called.
274 media_request_get(req
);
283 dev_dbg(mdev
->dev
, "cannot find request_fd %d\n", request_fd
);
284 return ERR_PTR(-EINVAL
);
286 EXPORT_SYMBOL_GPL(media_request_get_by_fd
);
288 int media_request_alloc(struct media_device
*mdev
, int *alloc_fd
)
290 struct media_request
*req
;
295 /* Either both are NULL or both are non-NULL */
296 if (WARN_ON(!mdev
->ops
->req_alloc
^ !mdev
->ops
->req_free
))
299 fd
= get_unused_fd_flags(O_CLOEXEC
);
303 filp
= anon_inode_getfile("request", &request_fops
, NULL
, O_CLOEXEC
);
309 if (mdev
->ops
->req_alloc
)
310 req
= mdev
->ops
->req_alloc(mdev
);
312 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
318 filp
->private_data
= req
;
320 req
->state
= MEDIA_REQUEST_STATE_IDLE
;
321 req
->num_incomplete_objects
= 0;
322 kref_init(&req
->kref
);
323 INIT_LIST_HEAD(&req
->objects
);
324 spin_lock_init(&req
->lock
);
325 init_waitqueue_head(&req
->poll_wait
);
326 req
->updating_count
= 0;
327 req
->access_count
= 0;
331 snprintf(req
->debug_str
, sizeof(req
->debug_str
), "%u:%d",
332 atomic_inc_return(&mdev
->request_id
), fd
);
333 dev_dbg(mdev
->dev
, "request: allocated %s\n", req
->debug_str
);
335 fd_install(fd
, filp
);
348 static void media_request_object_release(struct kref
*kref
)
350 struct media_request_object
*obj
=
351 container_of(kref
, struct media_request_object
, kref
);
352 struct media_request
*req
= obj
->req
;
355 media_request_object_unbind(obj
);
356 obj
->ops
->release(obj
);
359 struct media_request_object
*
360 media_request_object_find(struct media_request
*req
,
361 const struct media_request_object_ops
*ops
,
364 struct media_request_object
*obj
;
365 struct media_request_object
*found
= NULL
;
368 if (WARN_ON(!ops
|| !priv
))
371 spin_lock_irqsave(&req
->lock
, flags
);
372 list_for_each_entry(obj
, &req
->objects
, list
) {
373 if (obj
->ops
== ops
&& obj
->priv
== priv
) {
374 media_request_object_get(obj
);
379 spin_unlock_irqrestore(&req
->lock
, flags
);
382 EXPORT_SYMBOL_GPL(media_request_object_find
);
384 void media_request_object_put(struct media_request_object
*obj
)
386 kref_put(&obj
->kref
, media_request_object_release
);
388 EXPORT_SYMBOL_GPL(media_request_object_put
);
390 void media_request_object_init(struct media_request_object
*obj
)
395 obj
->completed
= false;
396 INIT_LIST_HEAD(&obj
->list
);
397 kref_init(&obj
->kref
);
399 EXPORT_SYMBOL_GPL(media_request_object_init
);
401 int media_request_object_bind(struct media_request
*req
,
402 const struct media_request_object_ops
*ops
,
403 void *priv
, bool is_buffer
,
404 struct media_request_object
*obj
)
409 if (WARN_ON(!ops
->release
))
412 spin_lock_irqsave(&req
->lock
, flags
);
414 if (WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_UPDATING
))
422 list_add_tail(&obj
->list
, &req
->objects
);
424 list_add(&obj
->list
, &req
->objects
);
425 req
->num_incomplete_objects
++;
429 spin_unlock_irqrestore(&req
->lock
, flags
);
432 EXPORT_SYMBOL_GPL(media_request_object_bind
);
434 void media_request_object_unbind(struct media_request_object
*obj
)
436 struct media_request
*req
= obj
->req
;
438 bool completed
= false;
443 spin_lock_irqsave(&req
->lock
, flags
);
444 list_del(&obj
->list
);
447 if (req
->state
== MEDIA_REQUEST_STATE_COMPLETE
)
450 if (WARN_ON(req
->state
== MEDIA_REQUEST_STATE_VALIDATING
))
453 if (req
->state
== MEDIA_REQUEST_STATE_CLEANING
) {
455 req
->num_incomplete_objects
--;
459 if (WARN_ON(!req
->num_incomplete_objects
))
462 req
->num_incomplete_objects
--;
463 if (req
->state
== MEDIA_REQUEST_STATE_QUEUED
&&
464 !req
->num_incomplete_objects
) {
465 req
->state
= MEDIA_REQUEST_STATE_COMPLETE
;
467 wake_up_interruptible_all(&req
->poll_wait
);
471 spin_unlock_irqrestore(&req
->lock
, flags
);
472 if (obj
->ops
->unbind
)
473 obj
->ops
->unbind(obj
);
475 media_request_put(req
);
477 EXPORT_SYMBOL_GPL(media_request_object_unbind
);
479 void media_request_object_complete(struct media_request_object
*obj
)
481 struct media_request
*req
= obj
->req
;
483 bool completed
= false;
485 spin_lock_irqsave(&req
->lock
, flags
);
488 obj
->completed
= true;
489 if (WARN_ON(!req
->num_incomplete_objects
) ||
490 WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_QUEUED
))
493 if (!--req
->num_incomplete_objects
) {
494 req
->state
= MEDIA_REQUEST_STATE_COMPLETE
;
495 wake_up_interruptible_all(&req
->poll_wait
);
499 spin_unlock_irqrestore(&req
->lock
, flags
);
501 media_request_put(req
);
503 EXPORT_SYMBOL_GPL(media_request_object_complete
);