1 // SPDX-License-Identifier: GPL-2.0
3 * Media device request objects
5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 * Copyright (C) 2018 Intel Corporation
7 * Copyright (C) 2018 Google, Inc.
9 * Author: Hans Verkuil <hansverk@cisco.com>
10 * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/refcount.h>
17 #include <media/media-device.h>
18 #include <media/media-request.h>
20 static const char * const request_state
[] = {
21 [MEDIA_REQUEST_STATE_IDLE
] = "idle",
22 [MEDIA_REQUEST_STATE_VALIDATING
] = "validating",
23 [MEDIA_REQUEST_STATE_QUEUED
] = "queued",
24 [MEDIA_REQUEST_STATE_COMPLETE
] = "complete",
25 [MEDIA_REQUEST_STATE_CLEANING
] = "cleaning",
26 [MEDIA_REQUEST_STATE_UPDATING
] = "updating",
30 media_request_state_str(enum media_request_state state
)
32 BUILD_BUG_ON(ARRAY_SIZE(request_state
) != NR_OF_MEDIA_REQUEST_STATE
);
34 if (WARN_ON(state
>= ARRAY_SIZE(request_state
)))
36 return request_state
[state
];
39 static void media_request_clean(struct media_request
*req
)
41 struct media_request_object
*obj
, *obj_safe
;
43 /* Just a sanity check. No other code path is allowed to change this. */
44 WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_CLEANING
);
45 WARN_ON(req
->updating_count
);
46 WARN_ON(req
->access_count
);
48 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
49 media_request_object_unbind(obj
);
50 media_request_object_put(obj
);
53 req
->updating_count
= 0;
54 req
->access_count
= 0;
55 WARN_ON(req
->num_incomplete_objects
);
56 req
->num_incomplete_objects
= 0;
57 wake_up_interruptible_all(&req
->poll_wait
);
60 static void media_request_release(struct kref
*kref
)
62 struct media_request
*req
=
63 container_of(kref
, struct media_request
, kref
);
64 struct media_device
*mdev
= req
->mdev
;
66 dev_dbg(mdev
->dev
, "request: release %s\n", req
->debug_str
);
68 /* No other users, no need for a spinlock */
69 req
->state
= MEDIA_REQUEST_STATE_CLEANING
;
71 media_request_clean(req
);
73 if (mdev
->ops
->req_free
)
74 mdev
->ops
->req_free(req
);
79 void media_request_put(struct media_request
*req
)
81 kref_put(&req
->kref
, media_request_release
);
83 EXPORT_SYMBOL_GPL(media_request_put
);
85 static int media_request_close(struct inode
*inode
, struct file
*filp
)
87 struct media_request
*req
= filp
->private_data
;
89 media_request_put(req
);
93 static __poll_t
media_request_poll(struct file
*filp
,
94 struct poll_table_struct
*wait
)
96 struct media_request
*req
= filp
->private_data
;
100 if (!(poll_requested_events(wait
) & EPOLLPRI
))
103 poll_wait(filp
, &req
->poll_wait
, wait
);
104 spin_lock_irqsave(&req
->lock
, flags
);
105 if (req
->state
== MEDIA_REQUEST_STATE_COMPLETE
) {
109 if (req
->state
!= MEDIA_REQUEST_STATE_QUEUED
) {
115 spin_unlock_irqrestore(&req
->lock
, flags
);
119 static long media_request_ioctl_queue(struct media_request
*req
)
121 struct media_device
*mdev
= req
->mdev
;
122 enum media_request_state state
;
126 dev_dbg(mdev
->dev
, "request: queue %s\n", req
->debug_str
);
129 * Ensure the request that is validated will be the one that gets queued
130 * next by serialising the queueing process. This mutex is also used
131 * to serialize with canceling a vb2 queue and with setting values such
132 * as controls in a request.
134 mutex_lock(&mdev
->req_queue_mutex
);
136 media_request_get(req
);
138 spin_lock_irqsave(&req
->lock
, flags
);
139 if (req
->state
== MEDIA_REQUEST_STATE_IDLE
)
140 req
->state
= MEDIA_REQUEST_STATE_VALIDATING
;
142 spin_unlock_irqrestore(&req
->lock
, flags
);
143 if (state
!= MEDIA_REQUEST_STATE_VALIDATING
) {
145 "request: unable to queue %s, request in state %s\n",
146 req
->debug_str
, media_request_state_str(state
));
147 media_request_put(req
);
148 mutex_unlock(&mdev
->req_queue_mutex
);
152 ret
= mdev
->ops
->req_validate(req
);
155 * If the req_validate was successful, then we mark the state as QUEUED
156 * and call req_queue. The reason we set the state first is that this
157 * allows req_queue to unbind or complete the queued objects in case
158 * they are immediately 'consumed'. State changes from QUEUED to another
159 * state can only happen if either the driver changes the state or if
160 * the user cancels the vb2 queue. The driver can only change the state
161 * after each object is queued through the req_queue op (and note that
162 * that op cannot fail), so setting the state to QUEUED up front is
165 * The other reason for changing the state is if the vb2 queue is
166 * canceled, and that uses the req_queue_mutex which is still locked
167 * while req_queue is called, so that's safe as well.
169 spin_lock_irqsave(&req
->lock
, flags
);
170 req
->state
= ret
? MEDIA_REQUEST_STATE_IDLE
171 : MEDIA_REQUEST_STATE_QUEUED
;
172 spin_unlock_irqrestore(&req
->lock
, flags
);
175 mdev
->ops
->req_queue(req
);
177 mutex_unlock(&mdev
->req_queue_mutex
);
180 dev_dbg(mdev
->dev
, "request: can't queue %s (%d)\n",
181 req
->debug_str
, ret
);
182 media_request_put(req
);
188 static long media_request_ioctl_reinit(struct media_request
*req
)
190 struct media_device
*mdev
= req
->mdev
;
193 spin_lock_irqsave(&req
->lock
, flags
);
194 if (req
->state
!= MEDIA_REQUEST_STATE_IDLE
&&
195 req
->state
!= MEDIA_REQUEST_STATE_COMPLETE
) {
197 "request: %s not in idle or complete state, cannot reinit\n",
199 spin_unlock_irqrestore(&req
->lock
, flags
);
202 if (req
->access_count
) {
204 "request: %s is being accessed, cannot reinit\n",
206 spin_unlock_irqrestore(&req
->lock
, flags
);
209 req
->state
= MEDIA_REQUEST_STATE_CLEANING
;
210 spin_unlock_irqrestore(&req
->lock
, flags
);
212 media_request_clean(req
);
214 spin_lock_irqsave(&req
->lock
, flags
);
215 req
->state
= MEDIA_REQUEST_STATE_IDLE
;
216 spin_unlock_irqrestore(&req
->lock
, flags
);
221 static long media_request_ioctl(struct file
*filp
, unsigned int cmd
,
224 struct media_request
*req
= filp
->private_data
;
227 case MEDIA_REQUEST_IOC_QUEUE
:
228 return media_request_ioctl_queue(req
);
229 case MEDIA_REQUEST_IOC_REINIT
:
230 return media_request_ioctl_reinit(req
);
236 static const struct file_operations request_fops
= {
237 .owner
= THIS_MODULE
,
238 .poll
= media_request_poll
,
239 .unlocked_ioctl
= media_request_ioctl
,
241 .compat_ioctl
= media_request_ioctl
,
242 #endif /* CONFIG_COMPAT */
243 .release
= media_request_close
,
246 struct media_request
*
247 media_request_get_by_fd(struct media_device
*mdev
, int request_fd
)
249 struct media_request
*req
;
251 if (!mdev
|| !mdev
->ops
||
252 !mdev
->ops
->req_validate
|| !mdev
->ops
->req_queue
)
253 return ERR_PTR(-EBADR
);
255 CLASS(fd
, f
)(request_fd
);
259 if (fd_file(f
)->f_op
!= &request_fops
)
261 req
= fd_file(f
)->private_data
;
262 if (req
->mdev
!= mdev
)
266 * Note: as long as someone has an open filehandle of the request,
267 * the request can never be released. The fdget() above ensures that
268 * even if userspace closes the request filehandle, the release()
269 * fop won't be called, so the media_request_get() always succeeds
270 * and there is no race condition where the request was released
271 * before media_request_get() is called.
273 media_request_get(req
);
277 dev_dbg(mdev
->dev
, "cannot find request_fd %d\n", request_fd
);
278 return ERR_PTR(-EINVAL
);
280 EXPORT_SYMBOL_GPL(media_request_get_by_fd
);
282 int media_request_alloc(struct media_device
*mdev
, int *alloc_fd
)
284 struct media_request
*req
;
289 /* Either both are NULL or both are non-NULL */
290 if (WARN_ON(!mdev
->ops
->req_alloc
^ !mdev
->ops
->req_free
))
293 if (mdev
->ops
->req_alloc
)
294 req
= mdev
->ops
->req_alloc(mdev
);
296 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
300 fd
= get_unused_fd_flags(O_CLOEXEC
);
306 filp
= anon_inode_getfile("request", &request_fops
, NULL
, O_CLOEXEC
);
312 filp
->private_data
= req
;
314 req
->state
= MEDIA_REQUEST_STATE_IDLE
;
315 req
->num_incomplete_objects
= 0;
316 kref_init(&req
->kref
);
317 INIT_LIST_HEAD(&req
->objects
);
318 spin_lock_init(&req
->lock
);
319 init_waitqueue_head(&req
->poll_wait
);
320 req
->updating_count
= 0;
321 req
->access_count
= 0;
325 snprintf(req
->debug_str
, sizeof(req
->debug_str
), "%u:%d",
326 atomic_inc_return(&mdev
->request_id
), fd
);
327 dev_dbg(mdev
->dev
, "request: allocated %s\n", req
->debug_str
);
329 fd_install(fd
, filp
);
337 if (mdev
->ops
->req_free
)
338 mdev
->ops
->req_free(req
);
345 static void media_request_object_release(struct kref
*kref
)
347 struct media_request_object
*obj
=
348 container_of(kref
, struct media_request_object
, kref
);
349 struct media_request
*req
= obj
->req
;
352 media_request_object_unbind(obj
);
353 obj
->ops
->release(obj
);
356 struct media_request_object
*
357 media_request_object_find(struct media_request
*req
,
358 const struct media_request_object_ops
*ops
,
361 struct media_request_object
*obj
;
362 struct media_request_object
*found
= NULL
;
365 if (WARN_ON(!ops
|| !priv
))
368 spin_lock_irqsave(&req
->lock
, flags
);
369 list_for_each_entry(obj
, &req
->objects
, list
) {
370 if (obj
->ops
== ops
&& obj
->priv
== priv
) {
371 media_request_object_get(obj
);
376 spin_unlock_irqrestore(&req
->lock
, flags
);
379 EXPORT_SYMBOL_GPL(media_request_object_find
);
381 void media_request_object_put(struct media_request_object
*obj
)
383 kref_put(&obj
->kref
, media_request_object_release
);
385 EXPORT_SYMBOL_GPL(media_request_object_put
);
387 void media_request_object_init(struct media_request_object
*obj
)
392 obj
->completed
= false;
393 INIT_LIST_HEAD(&obj
->list
);
394 kref_init(&obj
->kref
);
396 EXPORT_SYMBOL_GPL(media_request_object_init
);
398 int media_request_object_bind(struct media_request
*req
,
399 const struct media_request_object_ops
*ops
,
400 void *priv
, bool is_buffer
,
401 struct media_request_object
*obj
)
406 if (WARN_ON(!ops
->release
))
409 spin_lock_irqsave(&req
->lock
, flags
);
411 if (WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_UPDATING
&&
412 req
->state
!= MEDIA_REQUEST_STATE_QUEUED
))
420 list_add_tail(&obj
->list
, &req
->objects
);
422 list_add(&obj
->list
, &req
->objects
);
423 req
->num_incomplete_objects
++;
427 spin_unlock_irqrestore(&req
->lock
, flags
);
430 EXPORT_SYMBOL_GPL(media_request_object_bind
);
432 void media_request_object_unbind(struct media_request_object
*obj
)
434 struct media_request
*req
= obj
->req
;
436 bool completed
= false;
441 spin_lock_irqsave(&req
->lock
, flags
);
442 list_del(&obj
->list
);
445 if (req
->state
== MEDIA_REQUEST_STATE_COMPLETE
)
448 if (WARN_ON(req
->state
== MEDIA_REQUEST_STATE_VALIDATING
))
451 if (req
->state
== MEDIA_REQUEST_STATE_CLEANING
) {
453 req
->num_incomplete_objects
--;
457 if (WARN_ON(!req
->num_incomplete_objects
))
460 req
->num_incomplete_objects
--;
461 if (req
->state
== MEDIA_REQUEST_STATE_QUEUED
&&
462 !req
->num_incomplete_objects
) {
463 req
->state
= MEDIA_REQUEST_STATE_COMPLETE
;
465 wake_up_interruptible_all(&req
->poll_wait
);
469 spin_unlock_irqrestore(&req
->lock
, flags
);
470 if (obj
->ops
->unbind
)
471 obj
->ops
->unbind(obj
);
473 media_request_put(req
);
475 EXPORT_SYMBOL_GPL(media_request_object_unbind
);
477 void media_request_object_complete(struct media_request_object
*obj
)
479 struct media_request
*req
= obj
->req
;
481 bool completed
= false;
483 spin_lock_irqsave(&req
->lock
, flags
);
486 obj
->completed
= true;
487 if (WARN_ON(!req
->num_incomplete_objects
) ||
488 WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_QUEUED
))
491 if (!--req
->num_incomplete_objects
) {
492 req
->state
= MEDIA_REQUEST_STATE_COMPLETE
;
493 wake_up_interruptible_all(&req
->poll_wait
);
497 spin_unlock_irqrestore(&req
->lock
, flags
);
499 media_request_put(req
);
501 EXPORT_SYMBOL_GPL(media_request_object_complete
);