1 // SPDX-License-Identifier: GPL-2.0
3 * Media device request objects
5 * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 * Copyright (C) 2018 Intel Corporation
7 * Copyright (C) 2018 Google, Inc.
9 * Author: Hans Verkuil <hans.verkuil@cisco.com>
10 * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/refcount.h>
17 #include <media/media-device.h>
18 #include <media/media-request.h>
20 static const char * const request_state
[] = {
21 [MEDIA_REQUEST_STATE_IDLE
] = "idle",
22 [MEDIA_REQUEST_STATE_VALIDATING
] = "validating",
23 [MEDIA_REQUEST_STATE_QUEUED
] = "queued",
24 [MEDIA_REQUEST_STATE_COMPLETE
] = "complete",
25 [MEDIA_REQUEST_STATE_CLEANING
] = "cleaning",
26 [MEDIA_REQUEST_STATE_UPDATING
] = "updating",
30 media_request_state_str(enum media_request_state state
)
32 BUILD_BUG_ON(ARRAY_SIZE(request_state
) != NR_OF_MEDIA_REQUEST_STATE
);
34 if (WARN_ON(state
>= ARRAY_SIZE(request_state
)))
36 return request_state
[state
];
39 static void media_request_clean(struct media_request
*req
)
41 struct media_request_object
*obj
, *obj_safe
;
43 /* Just a sanity check. No other code path is allowed to change this. */
44 WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_CLEANING
);
45 WARN_ON(req
->updating_count
);
46 WARN_ON(req
->access_count
);
48 list_for_each_entry_safe(obj
, obj_safe
, &req
->objects
, list
) {
49 media_request_object_unbind(obj
);
50 media_request_object_put(obj
);
53 req
->updating_count
= 0;
54 req
->access_count
= 0;
55 WARN_ON(req
->num_incomplete_objects
);
56 req
->num_incomplete_objects
= 0;
57 wake_up_interruptible_all(&req
->poll_wait
);
60 static void media_request_release(struct kref
*kref
)
62 struct media_request
*req
=
63 container_of(kref
, struct media_request
, kref
);
64 struct media_device
*mdev
= req
->mdev
;
66 dev_dbg(mdev
->dev
, "request: release %s\n", req
->debug_str
);
68 /* No other users, no need for a spinlock */
69 req
->state
= MEDIA_REQUEST_STATE_CLEANING
;
71 media_request_clean(req
);
73 if (mdev
->ops
->req_free
)
74 mdev
->ops
->req_free(req
);
79 void media_request_put(struct media_request
*req
)
81 kref_put(&req
->kref
, media_request_release
);
83 EXPORT_SYMBOL_GPL(media_request_put
);
85 static int media_request_close(struct inode
*inode
, struct file
*filp
)
87 struct media_request
*req
= filp
->private_data
;
89 media_request_put(req
);
93 static __poll_t
media_request_poll(struct file
*filp
,
94 struct poll_table_struct
*wait
)
96 struct media_request
*req
= filp
->private_data
;
100 if (!(poll_requested_events(wait
) & EPOLLPRI
))
103 spin_lock_irqsave(&req
->lock
, flags
);
104 if (req
->state
== MEDIA_REQUEST_STATE_COMPLETE
) {
108 if (req
->state
!= MEDIA_REQUEST_STATE_QUEUED
) {
113 poll_wait(filp
, &req
->poll_wait
, wait
);
116 spin_unlock_irqrestore(&req
->lock
, flags
);
120 static long media_request_ioctl_queue(struct media_request
*req
)
122 struct media_device
*mdev
= req
->mdev
;
123 enum media_request_state state
;
127 dev_dbg(mdev
->dev
, "request: queue %s\n", req
->debug_str
);
130 * Ensure the request that is validated will be the one that gets queued
131 * next by serialising the queueing process. This mutex is also used
132 * to serialize with canceling a vb2 queue and with setting values such
133 * as controls in a request.
135 mutex_lock(&mdev
->req_queue_mutex
);
137 media_request_get(req
);
139 spin_lock_irqsave(&req
->lock
, flags
);
140 if (req
->state
== MEDIA_REQUEST_STATE_IDLE
)
141 req
->state
= MEDIA_REQUEST_STATE_VALIDATING
;
143 spin_unlock_irqrestore(&req
->lock
, flags
);
144 if (state
!= MEDIA_REQUEST_STATE_VALIDATING
) {
146 "request: unable to queue %s, request in state %s\n",
147 req
->debug_str
, media_request_state_str(state
));
148 media_request_put(req
);
149 mutex_unlock(&mdev
->req_queue_mutex
);
153 ret
= mdev
->ops
->req_validate(req
);
156 * If the req_validate was successful, then we mark the state as QUEUED
157 * and call req_queue. The reason we set the state first is that this
158 * allows req_queue to unbind or complete the queued objects in case
159 * they are immediately 'consumed'. State changes from QUEUED to another
160 * state can only happen if either the driver changes the state or if
161 * the user cancels the vb2 queue. The driver can only change the state
162 * after each object is queued through the req_queue op (and note that
163 * that op cannot fail), so setting the state to QUEUED up front is
166 * The other reason for changing the state is if the vb2 queue is
167 * canceled, and that uses the req_queue_mutex which is still locked
168 * while req_queue is called, so that's safe as well.
170 spin_lock_irqsave(&req
->lock
, flags
);
171 req
->state
= ret
? MEDIA_REQUEST_STATE_IDLE
172 : MEDIA_REQUEST_STATE_QUEUED
;
173 spin_unlock_irqrestore(&req
->lock
, flags
);
176 mdev
->ops
->req_queue(req
);
178 mutex_unlock(&mdev
->req_queue_mutex
);
181 dev_dbg(mdev
->dev
, "request: can't queue %s (%d)\n",
182 req
->debug_str
, ret
);
183 media_request_put(req
);
189 static long media_request_ioctl_reinit(struct media_request
*req
)
191 struct media_device
*mdev
= req
->mdev
;
194 spin_lock_irqsave(&req
->lock
, flags
);
195 if (req
->state
!= MEDIA_REQUEST_STATE_IDLE
&&
196 req
->state
!= MEDIA_REQUEST_STATE_COMPLETE
) {
198 "request: %s not in idle or complete state, cannot reinit\n",
200 spin_unlock_irqrestore(&req
->lock
, flags
);
203 if (req
->access_count
) {
205 "request: %s is being accessed, cannot reinit\n",
207 spin_unlock_irqrestore(&req
->lock
, flags
);
210 req
->state
= MEDIA_REQUEST_STATE_CLEANING
;
211 spin_unlock_irqrestore(&req
->lock
, flags
);
213 media_request_clean(req
);
215 spin_lock_irqsave(&req
->lock
, flags
);
216 req
->state
= MEDIA_REQUEST_STATE_IDLE
;
217 spin_unlock_irqrestore(&req
->lock
, flags
);
222 static long media_request_ioctl(struct file
*filp
, unsigned int cmd
,
225 struct media_request
*req
= filp
->private_data
;
228 case MEDIA_REQUEST_IOC_QUEUE
:
229 return media_request_ioctl_queue(req
);
230 case MEDIA_REQUEST_IOC_REINIT
:
231 return media_request_ioctl_reinit(req
);
237 static const struct file_operations request_fops
= {
238 .owner
= THIS_MODULE
,
239 .poll
= media_request_poll
,
240 .unlocked_ioctl
= media_request_ioctl
,
242 .compat_ioctl
= media_request_ioctl
,
243 #endif /* CONFIG_COMPAT */
244 .release
= media_request_close
,
247 struct media_request
*
248 media_request_get_by_fd(struct media_device
*mdev
, int request_fd
)
251 struct media_request
*req
;
253 if (!mdev
|| !mdev
->ops
||
254 !mdev
->ops
->req_validate
|| !mdev
->ops
->req_queue
)
255 return ERR_PTR(-EACCES
);
257 filp
= fget(request_fd
);
261 if (filp
->f_op
!= &request_fops
)
263 req
= filp
->private_data
;
264 if (req
->mdev
!= mdev
)
268 * Note: as long as someone has an open filehandle of the request,
269 * the request can never be released. The fget() above ensures that
270 * even if userspace closes the request filehandle, the release()
271 * fop won't be called, so the media_request_get() always succeeds
272 * and there is no race condition where the request was released
273 * before media_request_get() is called.
275 media_request_get(req
);
284 dev_dbg(mdev
->dev
, "cannot find request_fd %d\n", request_fd
);
285 return ERR_PTR(-EINVAL
);
287 EXPORT_SYMBOL_GPL(media_request_get_by_fd
);
289 int media_request_alloc(struct media_device
*mdev
, int *alloc_fd
)
291 struct media_request
*req
;
296 /* Either both are NULL or both are non-NULL */
297 if (WARN_ON(!mdev
->ops
->req_alloc
^ !mdev
->ops
->req_free
))
300 fd
= get_unused_fd_flags(O_CLOEXEC
);
304 filp
= anon_inode_getfile("request", &request_fops
, NULL
, O_CLOEXEC
);
310 if (mdev
->ops
->req_alloc
)
311 req
= mdev
->ops
->req_alloc(mdev
);
313 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
319 filp
->private_data
= req
;
321 req
->state
= MEDIA_REQUEST_STATE_IDLE
;
322 req
->num_incomplete_objects
= 0;
323 kref_init(&req
->kref
);
324 INIT_LIST_HEAD(&req
->objects
);
325 spin_lock_init(&req
->lock
);
326 init_waitqueue_head(&req
->poll_wait
);
327 req
->updating_count
= 0;
328 req
->access_count
= 0;
332 snprintf(req
->debug_str
, sizeof(req
->debug_str
), "%u:%d",
333 atomic_inc_return(&mdev
->request_id
), fd
);
334 dev_dbg(mdev
->dev
, "request: allocated %s\n", req
->debug_str
);
336 fd_install(fd
, filp
);
349 static void media_request_object_release(struct kref
*kref
)
351 struct media_request_object
*obj
=
352 container_of(kref
, struct media_request_object
, kref
);
353 struct media_request
*req
= obj
->req
;
356 media_request_object_unbind(obj
);
357 obj
->ops
->release(obj
);
360 struct media_request_object
*
361 media_request_object_find(struct media_request
*req
,
362 const struct media_request_object_ops
*ops
,
365 struct media_request_object
*obj
;
366 struct media_request_object
*found
= NULL
;
369 if (WARN_ON(!ops
|| !priv
))
372 spin_lock_irqsave(&req
->lock
, flags
);
373 list_for_each_entry(obj
, &req
->objects
, list
) {
374 if (obj
->ops
== ops
&& obj
->priv
== priv
) {
375 media_request_object_get(obj
);
380 spin_unlock_irqrestore(&req
->lock
, flags
);
383 EXPORT_SYMBOL_GPL(media_request_object_find
);
385 void media_request_object_put(struct media_request_object
*obj
)
387 kref_put(&obj
->kref
, media_request_object_release
);
389 EXPORT_SYMBOL_GPL(media_request_object_put
);
391 void media_request_object_init(struct media_request_object
*obj
)
396 obj
->completed
= false;
397 INIT_LIST_HEAD(&obj
->list
);
398 kref_init(&obj
->kref
);
400 EXPORT_SYMBOL_GPL(media_request_object_init
);
402 int media_request_object_bind(struct media_request
*req
,
403 const struct media_request_object_ops
*ops
,
404 void *priv
, bool is_buffer
,
405 struct media_request_object
*obj
)
410 if (WARN_ON(!ops
->release
))
413 spin_lock_irqsave(&req
->lock
, flags
);
415 if (WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_UPDATING
))
423 list_add_tail(&obj
->list
, &req
->objects
);
425 list_add(&obj
->list
, &req
->objects
);
426 req
->num_incomplete_objects
++;
430 spin_unlock_irqrestore(&req
->lock
, flags
);
433 EXPORT_SYMBOL_GPL(media_request_object_bind
);
435 void media_request_object_unbind(struct media_request_object
*obj
)
437 struct media_request
*req
= obj
->req
;
439 bool completed
= false;
444 spin_lock_irqsave(&req
->lock
, flags
);
445 list_del(&obj
->list
);
448 if (req
->state
== MEDIA_REQUEST_STATE_COMPLETE
)
451 if (WARN_ON(req
->state
== MEDIA_REQUEST_STATE_VALIDATING
))
454 if (req
->state
== MEDIA_REQUEST_STATE_CLEANING
) {
456 req
->num_incomplete_objects
--;
460 if (WARN_ON(!req
->num_incomplete_objects
))
463 req
->num_incomplete_objects
--;
464 if (req
->state
== MEDIA_REQUEST_STATE_QUEUED
&&
465 !req
->num_incomplete_objects
) {
466 req
->state
= MEDIA_REQUEST_STATE_COMPLETE
;
468 wake_up_interruptible_all(&req
->poll_wait
);
472 spin_unlock_irqrestore(&req
->lock
, flags
);
473 if (obj
->ops
->unbind
)
474 obj
->ops
->unbind(obj
);
476 media_request_put(req
);
478 EXPORT_SYMBOL_GPL(media_request_object_unbind
);
480 void media_request_object_complete(struct media_request_object
*obj
)
482 struct media_request
*req
= obj
->req
;
484 bool completed
= false;
486 spin_lock_irqsave(&req
->lock
, flags
);
489 obj
->completed
= true;
490 if (WARN_ON(!req
->num_incomplete_objects
) ||
491 WARN_ON(req
->state
!= MEDIA_REQUEST_STATE_QUEUED
))
494 if (!--req
->num_incomplete_objects
) {
495 req
->state
= MEDIA_REQUEST_STATE_COMPLETE
;
496 wake_up_interruptible_all(&req
->poll_wait
);
500 spin_unlock_irqrestore(&req
->lock
, flags
);
502 media_request_put(req
);
504 EXPORT_SYMBOL_GPL(media_request_object_complete
);