2 * f_fs.c -- user mode file system API for USB composite function controllers
4 * Copyright (C) 2010 Samsung Electronics
5 * Author: Michal Nazarewicz <mina86@mina86.com>
7 * Based on inode.c (GadgetFS) which was:
8 * Copyright (C) 2003-2004 David Brownell
9 * Copyright (C) 2003 Agilent Technologies
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
19 /* #define VERBOSE_DEBUG */
21 #include <linux/blkdev.h>
22 #include <linux/pagemap.h>
23 #include <linux/export.h>
24 #include <linux/hid.h>
25 #include <linux/module.h>
26 #include <linux/uio.h>
27 #include <asm/unaligned.h>
29 #include <linux/usb/composite.h>
30 #include <linux/usb/functionfs.h>
32 #include <linux/aio.h>
33 #include <linux/mmu_context.h>
34 #include <linux/poll.h>
35 #include <linux/eventfd.h>
39 #include "u_os_desc.h"
42 #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
44 /* Reference counter handling */
45 static void ffs_data_get(struct ffs_data
*ffs
);
46 static void ffs_data_put(struct ffs_data
*ffs
);
47 /* Creates new ffs_data object. */
48 static struct ffs_data
*__must_check
ffs_data_new(void) __attribute__((malloc
));
50 /* Opened counter handling. */
51 static void ffs_data_opened(struct ffs_data
*ffs
);
52 static void ffs_data_closed(struct ffs_data
*ffs
);
54 /* Called with ffs->mutex held; take over ownership of data. */
55 static int __must_check
56 __ffs_data_got_descs(struct ffs_data
*ffs
, char *data
, size_t len
);
57 static int __must_check
58 __ffs_data_got_strings(struct ffs_data
*ffs
, char *data
, size_t len
);
61 /* The function structure ***************************************************/
66 struct usb_configuration
*conf
;
67 struct usb_gadget
*gadget
;
72 short *interfaces_nums
;
74 struct usb_function function
;
78 static struct ffs_function
*ffs_func_from_usb(struct usb_function
*f
)
80 return container_of(f
, struct ffs_function
, function
);
84 static inline enum ffs_setup_state
85 ffs_setup_state_clear_cancelled(struct ffs_data
*ffs
)
87 return (enum ffs_setup_state
)
88 cmpxchg(&ffs
->setup_state
, FFS_SETUP_CANCELLED
, FFS_NO_SETUP
);
92 static void ffs_func_eps_disable(struct ffs_function
*func
);
93 static int __must_check
ffs_func_eps_enable(struct ffs_function
*func
);
95 static int ffs_func_bind(struct usb_configuration
*,
96 struct usb_function
*);
97 static int ffs_func_set_alt(struct usb_function
*, unsigned, unsigned);
98 static void ffs_func_disable(struct usb_function
*);
99 static int ffs_func_setup(struct usb_function
*,
100 const struct usb_ctrlrequest
*);
101 static void ffs_func_suspend(struct usb_function
*);
102 static void ffs_func_resume(struct usb_function
*);
105 static int ffs_func_revmap_ep(struct ffs_function
*func
, u8 num
);
106 static int ffs_func_revmap_intf(struct ffs_function
*func
, u8 intf
);
109 /* The endpoints structures *************************************************/
112 struct usb_ep
*ep
; /* P: ffs->eps_lock */
113 struct usb_request
*req
; /* P: epfile->mutex */
115 /* [0]: full speed, [1]: high speed, [2]: super speed */
116 struct usb_endpoint_descriptor
*descs
[3];
120 int status
; /* P: epfile->mutex */
124 /* Protects ep->ep and ep->req. */
126 wait_queue_head_t wait
;
128 struct ffs_data
*ffs
;
129 struct ffs_ep
*ep
; /* P: ffs->eps_lock */
131 struct dentry
*dentry
;
135 unsigned char in
; /* P: ffs->eps_lock */
136 unsigned char isoc
; /* P: ffs->eps_lock */
141 /* ffs_io_data structure ***************************************************/
148 struct iov_iter data
;
152 struct mm_struct
*mm
;
153 struct work_struct work
;
156 struct usb_request
*req
;
158 struct ffs_data
*ffs
;
161 struct ffs_desc_helper
{
162 struct ffs_data
*ffs
;
163 unsigned interfaces_count
;
167 static int __must_check
ffs_epfiles_create(struct ffs_data
*ffs
);
168 static void ffs_epfiles_destroy(struct ffs_epfile
*epfiles
, unsigned count
);
170 static struct dentry
*
171 ffs_sb_create_file(struct super_block
*sb
, const char *name
, void *data
,
172 const struct file_operations
*fops
);
174 /* Devices management *******************************************************/
176 DEFINE_MUTEX(ffs_lock
);
177 EXPORT_SYMBOL_GPL(ffs_lock
);
179 static struct ffs_dev
*_ffs_find_dev(const char *name
);
180 static struct ffs_dev
*_ffs_alloc_dev(void);
181 static int _ffs_name_dev(struct ffs_dev
*dev
, const char *name
);
182 static void _ffs_free_dev(struct ffs_dev
*dev
);
183 static void *ffs_acquire_dev(const char *dev_name
);
184 static void ffs_release_dev(struct ffs_data
*ffs_data
);
185 static int ffs_ready(struct ffs_data
*ffs
);
186 static void ffs_closed(struct ffs_data
*ffs
);
188 /* Misc helper functions ****************************************************/
190 static int ffs_mutex_lock(struct mutex
*mutex
, unsigned nonblock
)
191 __attribute__((warn_unused_result
, nonnull
));
192 static char *ffs_prepare_buffer(const char __user
*buf
, size_t len
)
193 __attribute__((warn_unused_result
, nonnull
));
196 /* Control file aka ep0 *****************************************************/
198 static void ffs_ep0_complete(struct usb_ep
*ep
, struct usb_request
*req
)
200 struct ffs_data
*ffs
= req
->context
;
202 complete_all(&ffs
->ep0req_completion
);
205 static int __ffs_ep0_queue_wait(struct ffs_data
*ffs
, char *data
, size_t len
)
207 struct usb_request
*req
= ffs
->ep0req
;
210 req
->zero
= len
< le16_to_cpu(ffs
->ev
.setup
.wLength
);
212 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
218 * UDC layer requires to provide a buffer even for ZLP, but should
219 * not use it at all. Let's provide some poisoned pointer to catch
220 * possible bug in the driver.
222 if (req
->buf
== NULL
)
223 req
->buf
= (void *)0xDEADBABE;
225 reinit_completion(&ffs
->ep0req_completion
);
227 ret
= usb_ep_queue(ffs
->gadget
->ep0
, req
, GFP_ATOMIC
);
228 if (unlikely(ret
< 0))
231 ret
= wait_for_completion_interruptible(&ffs
->ep0req_completion
);
233 usb_ep_dequeue(ffs
->gadget
->ep0
, req
);
237 ffs
->setup_state
= FFS_NO_SETUP
;
238 return req
->status
? req
->status
: req
->actual
;
241 static int __ffs_ep0_stall(struct ffs_data
*ffs
)
243 if (ffs
->ev
.can_stall
) {
244 pr_vdebug("ep0 stall\n");
245 usb_ep_set_halt(ffs
->gadget
->ep0
);
246 ffs
->setup_state
= FFS_NO_SETUP
;
249 pr_debug("bogus ep0 stall!\n");
254 static ssize_t
ffs_ep0_write(struct file
*file
, const char __user
*buf
,
255 size_t len
, loff_t
*ptr
)
257 struct ffs_data
*ffs
= file
->private_data
;
263 /* Fast check if setup was canceled */
264 if (ffs_setup_state_clear_cancelled(ffs
) == FFS_SETUP_CANCELLED
)
268 ret
= ffs_mutex_lock(&ffs
->mutex
, file
->f_flags
& O_NONBLOCK
);
269 if (unlikely(ret
< 0))
273 switch (ffs
->state
) {
274 case FFS_READ_DESCRIPTORS
:
275 case FFS_READ_STRINGS
:
277 if (unlikely(len
< 16)) {
282 data
= ffs_prepare_buffer(buf
, len
);
289 if (ffs
->state
== FFS_READ_DESCRIPTORS
) {
290 pr_info("read descriptors\n");
291 ret
= __ffs_data_got_descs(ffs
, data
, len
);
292 if (unlikely(ret
< 0))
295 ffs
->state
= FFS_READ_STRINGS
;
298 pr_info("read strings\n");
299 ret
= __ffs_data_got_strings(ffs
, data
, len
);
300 if (unlikely(ret
< 0))
303 ret
= ffs_epfiles_create(ffs
);
305 ffs
->state
= FFS_CLOSING
;
309 ffs
->state
= FFS_ACTIVE
;
310 mutex_unlock(&ffs
->mutex
);
312 ret
= ffs_ready(ffs
);
313 if (unlikely(ret
< 0)) {
314 ffs
->state
= FFS_CLOSING
;
325 * We're called from user space, we can use _irq
326 * rather then _irqsave
328 spin_lock_irq(&ffs
->ev
.waitq
.lock
);
329 switch (ffs_setup_state_clear_cancelled(ffs
)) {
330 case FFS_SETUP_CANCELLED
:
338 case FFS_SETUP_PENDING
:
342 /* FFS_SETUP_PENDING */
343 if (!(ffs
->ev
.setup
.bRequestType
& USB_DIR_IN
)) {
344 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
345 ret
= __ffs_ep0_stall(ffs
);
349 /* FFS_SETUP_PENDING and not stall */
350 len
= min(len
, (size_t)le16_to_cpu(ffs
->ev
.setup
.wLength
));
352 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
354 data
= ffs_prepare_buffer(buf
, len
);
360 spin_lock_irq(&ffs
->ev
.waitq
.lock
);
363 * We are guaranteed to be still in FFS_ACTIVE state
364 * but the state of setup could have changed from
365 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
366 * to check for that. If that happened we copied data
367 * from user space in vain but it's unlikely.
369 * For sure we are not in FFS_NO_SETUP since this is
370 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
371 * transition can be performed and it's protected by
374 if (ffs_setup_state_clear_cancelled(ffs
) ==
375 FFS_SETUP_CANCELLED
) {
378 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
380 /* unlocks spinlock */
381 ret
= __ffs_ep0_queue_wait(ffs
, data
, len
);
391 mutex_unlock(&ffs
->mutex
);
395 /* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
396 static ssize_t
__ffs_ep0_read_events(struct ffs_data
*ffs
, char __user
*buf
,
400 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
401 * size of ffs->ev.types array (which is four) so that's how much space
404 struct usb_functionfs_event events
[ARRAY_SIZE(ffs
->ev
.types
)];
405 const size_t size
= n
* sizeof *events
;
408 memset(events
, 0, size
);
411 events
[i
].type
= ffs
->ev
.types
[i
];
412 if (events
[i
].type
== FUNCTIONFS_SETUP
) {
413 events
[i
].u
.setup
= ffs
->ev
.setup
;
414 ffs
->setup_state
= FFS_SETUP_PENDING
;
420 memmove(ffs
->ev
.types
, ffs
->ev
.types
+ n
,
421 ffs
->ev
.count
* sizeof *ffs
->ev
.types
);
423 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
424 mutex_unlock(&ffs
->mutex
);
426 return unlikely(copy_to_user(buf
, events
, size
)) ? -EFAULT
: size
;
429 static ssize_t
ffs_ep0_read(struct file
*file
, char __user
*buf
,
430 size_t len
, loff_t
*ptr
)
432 struct ffs_data
*ffs
= file
->private_data
;
439 /* Fast check if setup was canceled */
440 if (ffs_setup_state_clear_cancelled(ffs
) == FFS_SETUP_CANCELLED
)
444 ret
= ffs_mutex_lock(&ffs
->mutex
, file
->f_flags
& O_NONBLOCK
);
445 if (unlikely(ret
< 0))
449 if (ffs
->state
!= FFS_ACTIVE
) {
455 * We're called from user space, we can use _irq rather then
458 spin_lock_irq(&ffs
->ev
.waitq
.lock
);
460 switch (ffs_setup_state_clear_cancelled(ffs
)) {
461 case FFS_SETUP_CANCELLED
:
466 n
= len
/ sizeof(struct usb_functionfs_event
);
472 if ((file
->f_flags
& O_NONBLOCK
) && !ffs
->ev
.count
) {
477 if (wait_event_interruptible_exclusive_locked_irq(ffs
->ev
.waitq
,
483 return __ffs_ep0_read_events(ffs
, buf
,
484 min(n
, (size_t)ffs
->ev
.count
));
486 case FFS_SETUP_PENDING
:
487 if (ffs
->ev
.setup
.bRequestType
& USB_DIR_IN
) {
488 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
489 ret
= __ffs_ep0_stall(ffs
);
493 len
= min(len
, (size_t)le16_to_cpu(ffs
->ev
.setup
.wLength
));
495 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
498 data
= kmalloc(len
, GFP_KERNEL
);
499 if (unlikely(!data
)) {
505 spin_lock_irq(&ffs
->ev
.waitq
.lock
);
507 /* See ffs_ep0_write() */
508 if (ffs_setup_state_clear_cancelled(ffs
) ==
509 FFS_SETUP_CANCELLED
) {
514 /* unlocks spinlock */
515 ret
= __ffs_ep0_queue_wait(ffs
, data
, len
);
516 if (likely(ret
> 0) && unlikely(copy_to_user(buf
, data
, len
)))
525 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
527 mutex_unlock(&ffs
->mutex
);
532 static int ffs_ep0_open(struct inode
*inode
, struct file
*file
)
534 struct ffs_data
*ffs
= inode
->i_private
;
538 if (unlikely(ffs
->state
== FFS_CLOSING
))
541 file
->private_data
= ffs
;
542 ffs_data_opened(ffs
);
547 static int ffs_ep0_release(struct inode
*inode
, struct file
*file
)
549 struct ffs_data
*ffs
= file
->private_data
;
553 ffs_data_closed(ffs
);
558 static long ffs_ep0_ioctl(struct file
*file
, unsigned code
, unsigned long value
)
560 struct ffs_data
*ffs
= file
->private_data
;
561 struct usb_gadget
*gadget
= ffs
->gadget
;
566 if (code
== FUNCTIONFS_INTERFACE_REVMAP
) {
567 struct ffs_function
*func
= ffs
->func
;
568 ret
= func
? ffs_func_revmap_intf(func
, value
) : -ENODEV
;
569 } else if (gadget
&& gadget
->ops
->ioctl
) {
570 ret
= gadget
->ops
->ioctl(gadget
, code
, value
);
578 static unsigned int ffs_ep0_poll(struct file
*file
, poll_table
*wait
)
580 struct ffs_data
*ffs
= file
->private_data
;
581 unsigned int mask
= POLLWRNORM
;
584 poll_wait(file
, &ffs
->ev
.waitq
, wait
);
586 ret
= ffs_mutex_lock(&ffs
->mutex
, file
->f_flags
& O_NONBLOCK
);
587 if (unlikely(ret
< 0))
590 switch (ffs
->state
) {
591 case FFS_READ_DESCRIPTORS
:
592 case FFS_READ_STRINGS
:
597 switch (ffs
->setup_state
) {
603 case FFS_SETUP_PENDING
:
604 case FFS_SETUP_CANCELLED
:
605 mask
|= (POLLIN
| POLLOUT
);
610 case FFS_DEACTIVATED
:
614 mutex_unlock(&ffs
->mutex
);
619 static const struct file_operations ffs_ep0_operations
= {
622 .open
= ffs_ep0_open
,
623 .write
= ffs_ep0_write
,
624 .read
= ffs_ep0_read
,
625 .release
= ffs_ep0_release
,
626 .unlocked_ioctl
= ffs_ep0_ioctl
,
627 .poll
= ffs_ep0_poll
,
631 /* "Normal" endpoints operations ********************************************/
633 static void ffs_epfile_io_complete(struct usb_ep
*_ep
, struct usb_request
*req
)
636 if (likely(req
->context
)) {
637 struct ffs_ep
*ep
= _ep
->driver_data
;
638 ep
->status
= req
->status
? req
->status
: req
->actual
;
639 complete(req
->context
);
643 static void ffs_user_copy_worker(struct work_struct
*work
)
645 struct ffs_io_data
*io_data
= container_of(work
, struct ffs_io_data
,
647 int ret
= io_data
->req
->status
? io_data
->req
->status
:
648 io_data
->req
->actual
;
649 bool kiocb_has_eventfd
= io_data
->kiocb
->ki_flags
& IOCB_EVENTFD
;
651 if (io_data
->read
&& ret
> 0) {
652 mm_segment_t oldfs
= get_fs();
656 ret
= copy_to_iter(io_data
->buf
, ret
, &io_data
->data
);
657 if (ret
!= io_data
->req
->actual
&& iov_iter_count(&io_data
->data
))
659 unuse_mm(io_data
->mm
);
663 io_data
->kiocb
->ki_complete(io_data
->kiocb
, ret
, ret
);
665 if (io_data
->ffs
->ffs_eventfd
&& !kiocb_has_eventfd
)
666 eventfd_signal(io_data
->ffs
->ffs_eventfd
, 1);
668 usb_ep_free_request(io_data
->ep
, io_data
->req
);
671 kfree(io_data
->to_free
);
676 static void ffs_epfile_async_io_complete(struct usb_ep
*_ep
,
677 struct usb_request
*req
)
679 struct ffs_io_data
*io_data
= req
->context
;
683 INIT_WORK(&io_data
->work
, ffs_user_copy_worker
);
684 schedule_work(&io_data
->work
);
687 static ssize_t
ffs_epfile_io(struct file
*file
, struct ffs_io_data
*io_data
)
689 struct ffs_epfile
*epfile
= file
->private_data
;
692 ssize_t ret
, data_len
= -EINVAL
;
695 /* Are we still active? */
696 if (WARN_ON(epfile
->ffs
->state
!= FFS_ACTIVE
)) {
701 /* Wait for endpoint to be enabled */
704 if (file
->f_flags
& O_NONBLOCK
) {
709 ret
= wait_event_interruptible(epfile
->wait
, (ep
= epfile
->ep
));
717 halt
= (!io_data
->read
== !epfile
->in
);
718 if (halt
&& epfile
->isoc
) {
723 /* Allocate & copy */
726 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
727 * before the waiting completes, so do not assign to 'gadget' earlier
729 struct usb_gadget
*gadget
= epfile
->ffs
->gadget
;
732 spin_lock_irq(&epfile
->ffs
->eps_lock
);
733 /* In the meantime, endpoint got disabled or changed. */
734 if (epfile
->ep
!= ep
) {
735 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
738 data_len
= iov_iter_count(&io_data
->data
);
740 * Controller may require buffer size to be aligned to
741 * maxpacketsize of an out endpoint.
744 data_len
= usb_ep_align_maybe(gadget
, ep
->ep
, data_len
);
745 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
747 data
= kmalloc(data_len
, GFP_KERNEL
);
750 if (!io_data
->read
) {
751 copied
= copy_from_iter(data
, data_len
, &io_data
->data
);
752 if (copied
!= data_len
) {
759 /* We will be using request */
760 ret
= ffs_mutex_lock(&epfile
->mutex
, file
->f_flags
& O_NONBLOCK
);
764 spin_lock_irq(&epfile
->ffs
->eps_lock
);
766 if (epfile
->ep
!= ep
) {
767 /* In the meantime, endpoint got disabled or changed. */
769 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
772 if (likely(epfile
->ep
== ep
) && !WARN_ON(!ep
->ep
))
773 usb_ep_set_halt(ep
->ep
);
774 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
777 /* Fire the request */
778 struct usb_request
*req
;
781 * Sanity Check: even though data_len can't be used
782 * uninitialized at the time I write this comment, some
783 * compilers complain about this situation.
784 * In order to keep the code clean from warnings, data_len is
785 * being initialized to -EINVAL during its declaration, which
786 * means we can't rely on compiler anymore to warn no future
787 * changes won't result in data_len being used uninitialized.
788 * For such reason, we're adding this redundant sanity check
791 if (unlikely(data_len
== -EINVAL
)) {
792 WARN(1, "%s: data_len == -EINVAL\n", __func__
);
798 req
= usb_ep_alloc_request(ep
->ep
, GFP_ATOMIC
);
803 req
->length
= data_len
;
806 io_data
->ep
= ep
->ep
;
808 io_data
->ffs
= epfile
->ffs
;
810 req
->context
= io_data
;
811 req
->complete
= ffs_epfile_async_io_complete
;
813 ret
= usb_ep_queue(ep
->ep
, req
, GFP_ATOMIC
);
815 usb_ep_free_request(ep
->ep
, req
);
820 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
822 DECLARE_COMPLETION_ONSTACK(done
);
826 req
->length
= data_len
;
828 req
->context
= &done
;
829 req
->complete
= ffs_epfile_io_complete
;
831 ret
= usb_ep_queue(ep
->ep
, req
, GFP_ATOMIC
);
833 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
835 if (unlikely(ret
< 0)) {
838 wait_for_completion_interruptible(&done
))) {
840 usb_ep_dequeue(ep
->ep
, req
);
843 * XXX We may end up silently droping data
844 * here. Since data_len (i.e. req->length) may
845 * be bigger than len (after being rounded up
846 * to maxpacketsize), we may end up with more
847 * data then user space has space for.
850 if (io_data
->read
&& ret
> 0) {
851 ret
= copy_to_iter(data
, ret
, &io_data
->data
);
860 mutex_unlock(&epfile
->mutex
);
864 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
865 mutex_unlock(&epfile
->mutex
);
872 ffs_epfile_open(struct inode
*inode
, struct file
*file
)
874 struct ffs_epfile
*epfile
= inode
->i_private
;
878 if (WARN_ON(epfile
->ffs
->state
!= FFS_ACTIVE
))
881 file
->private_data
= epfile
;
882 ffs_data_opened(epfile
->ffs
);
887 static int ffs_aio_cancel(struct kiocb
*kiocb
)
889 struct ffs_io_data
*io_data
= kiocb
->private;
890 struct ffs_epfile
*epfile
= kiocb
->ki_filp
->private_data
;
895 spin_lock_irq(&epfile
->ffs
->eps_lock
);
897 if (likely(io_data
&& io_data
->ep
&& io_data
->req
))
898 value
= usb_ep_dequeue(io_data
->ep
, io_data
->req
);
902 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
907 static ssize_t
ffs_epfile_write_iter(struct kiocb
*kiocb
, struct iov_iter
*from
)
909 struct ffs_io_data io_data
, *p
= &io_data
;
914 if (!is_sync_kiocb(kiocb
)) {
915 p
= kmalloc(sizeof(io_data
), GFP_KERNEL
);
931 kiocb_set_cancel_fn(kiocb
, ffs_aio_cancel
);
933 res
= ffs_epfile_io(kiocb
->ki_filp
, p
);
934 if (res
== -EIOCBQUEUED
)
943 static ssize_t
ffs_epfile_read_iter(struct kiocb
*kiocb
, struct iov_iter
*to
)
945 struct ffs_io_data io_data
, *p
= &io_data
;
950 if (!is_sync_kiocb(kiocb
)) {
951 p
= kmalloc(sizeof(io_data
), GFP_KERNEL
);
962 p
->to_free
= dup_iter(&p
->data
, to
, GFP_KERNEL
);
976 kiocb_set_cancel_fn(kiocb
, ffs_aio_cancel
);
978 res
= ffs_epfile_io(kiocb
->ki_filp
, p
);
979 if (res
== -EIOCBQUEUED
)
992 ffs_epfile_release(struct inode
*inode
, struct file
*file
)
994 struct ffs_epfile
*epfile
= inode
->i_private
;
998 ffs_data_closed(epfile
->ffs
);
1003 static long ffs_epfile_ioctl(struct file
*file
, unsigned code
,
1004 unsigned long value
)
1006 struct ffs_epfile
*epfile
= file
->private_data
;
1011 if (WARN_ON(epfile
->ffs
->state
!= FFS_ACTIVE
))
1014 spin_lock_irq(&epfile
->ffs
->eps_lock
);
1015 if (likely(epfile
->ep
)) {
1017 case FUNCTIONFS_FIFO_STATUS
:
1018 ret
= usb_ep_fifo_status(epfile
->ep
->ep
);
1020 case FUNCTIONFS_FIFO_FLUSH
:
1021 usb_ep_fifo_flush(epfile
->ep
->ep
);
1024 case FUNCTIONFS_CLEAR_HALT
:
1025 ret
= usb_ep_clear_halt(epfile
->ep
->ep
);
1027 case FUNCTIONFS_ENDPOINT_REVMAP
:
1028 ret
= epfile
->ep
->num
;
1030 case FUNCTIONFS_ENDPOINT_DESC
:
1033 struct usb_endpoint_descriptor
*desc
;
1035 switch (epfile
->ffs
->gadget
->speed
) {
1036 case USB_SPEED_SUPER
:
1039 case USB_SPEED_HIGH
:
1045 desc
= epfile
->ep
->descs
[desc_idx
];
1047 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
1048 ret
= copy_to_user((void *)value
, desc
, sizeof(*desc
));
1059 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
1064 static const struct file_operations ffs_epfile_operations
= {
1065 .llseek
= no_llseek
,
1067 .open
= ffs_epfile_open
,
1068 .write_iter
= ffs_epfile_write_iter
,
1069 .read_iter
= ffs_epfile_read_iter
,
1070 .release
= ffs_epfile_release
,
1071 .unlocked_ioctl
= ffs_epfile_ioctl
,
1075 /* File system and super block operations ***********************************/
1078 * Mounting the file system creates a controller file, used first for
1079 * function configuration then later for event monitoring.
1082 static struct inode
*__must_check
1083 ffs_sb_make_inode(struct super_block
*sb
, void *data
,
1084 const struct file_operations
*fops
,
1085 const struct inode_operations
*iops
,
1086 struct ffs_file_perms
*perms
)
1088 struct inode
*inode
;
1092 inode
= new_inode(sb
);
1094 if (likely(inode
)) {
1095 struct timespec current_time
= CURRENT_TIME
;
1097 inode
->i_ino
= get_next_ino();
1098 inode
->i_mode
= perms
->mode
;
1099 inode
->i_uid
= perms
->uid
;
1100 inode
->i_gid
= perms
->gid
;
1101 inode
->i_atime
= current_time
;
1102 inode
->i_mtime
= current_time
;
1103 inode
->i_ctime
= current_time
;
1104 inode
->i_private
= data
;
1106 inode
->i_fop
= fops
;
1114 /* Create "regular" file */
1115 static struct dentry
*ffs_sb_create_file(struct super_block
*sb
,
1116 const char *name
, void *data
,
1117 const struct file_operations
*fops
)
1119 struct ffs_data
*ffs
= sb
->s_fs_info
;
1120 struct dentry
*dentry
;
1121 struct inode
*inode
;
1125 dentry
= d_alloc_name(sb
->s_root
, name
);
1126 if (unlikely(!dentry
))
1129 inode
= ffs_sb_make_inode(sb
, data
, fops
, NULL
, &ffs
->file_perms
);
1130 if (unlikely(!inode
)) {
1135 d_add(dentry
, inode
);
1140 static const struct super_operations ffs_sb_operations
= {
1141 .statfs
= simple_statfs
,
1142 .drop_inode
= generic_delete_inode
,
1145 struct ffs_sb_fill_data
{
1146 struct ffs_file_perms perms
;
1148 const char *dev_name
;
1150 struct ffs_data
*ffs_data
;
1153 static int ffs_sb_fill(struct super_block
*sb
, void *_data
, int silent
)
1155 struct ffs_sb_fill_data
*data
= _data
;
1156 struct inode
*inode
;
1157 struct ffs_data
*ffs
= data
->ffs_data
;
1162 data
->ffs_data
= NULL
;
1163 sb
->s_fs_info
= ffs
;
1164 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
1165 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
1166 sb
->s_magic
= FUNCTIONFS_MAGIC
;
1167 sb
->s_op
= &ffs_sb_operations
;
1168 sb
->s_time_gran
= 1;
1171 data
->perms
.mode
= data
->root_mode
;
1172 inode
= ffs_sb_make_inode(sb
, NULL
,
1173 &simple_dir_operations
,
1174 &simple_dir_inode_operations
,
1176 sb
->s_root
= d_make_root(inode
);
1177 if (unlikely(!sb
->s_root
))
1181 if (unlikely(!ffs_sb_create_file(sb
, "ep0", ffs
,
1182 &ffs_ep0_operations
)))
1188 static int ffs_fs_parse_opts(struct ffs_sb_fill_data
*data
, char *opts
)
1192 if (!opts
|| !*opts
)
1196 unsigned long value
;
1200 comma
= strchr(opts
, ',');
1205 eq
= strchr(opts
, '=');
1206 if (unlikely(!eq
)) {
1207 pr_err("'=' missing in %s\n", opts
);
1213 if (kstrtoul(eq
+ 1, 0, &value
)) {
1214 pr_err("%s: invalid value: %s\n", opts
, eq
+ 1);
1218 /* Interpret option */
1219 switch (eq
- opts
) {
1221 if (!memcmp(opts
, "no_disconnect", 13))
1222 data
->no_disconnect
= !!value
;
1227 if (!memcmp(opts
, "rmode", 5))
1228 data
->root_mode
= (value
& 0555) | S_IFDIR
;
1229 else if (!memcmp(opts
, "fmode", 5))
1230 data
->perms
.mode
= (value
& 0666) | S_IFREG
;
1236 if (!memcmp(opts
, "mode", 4)) {
1237 data
->root_mode
= (value
& 0555) | S_IFDIR
;
1238 data
->perms
.mode
= (value
& 0666) | S_IFREG
;
1245 if (!memcmp(opts
, "uid", 3)) {
1246 data
->perms
.uid
= make_kuid(current_user_ns(), value
);
1247 if (!uid_valid(data
->perms
.uid
)) {
1248 pr_err("%s: unmapped value: %lu\n", opts
, value
);
1251 } else if (!memcmp(opts
, "gid", 3)) {
1252 data
->perms
.gid
= make_kgid(current_user_ns(), value
);
1253 if (!gid_valid(data
->perms
.gid
)) {
1254 pr_err("%s: unmapped value: %lu\n", opts
, value
);
1264 pr_err("%s: invalid option\n", opts
);
1268 /* Next iteration */
1277 /* "mount -t functionfs dev_name /dev/function" ends up here */
1279 static struct dentry
*
1280 ffs_fs_mount(struct file_system_type
*t
, int flags
,
1281 const char *dev_name
, void *opts
)
1283 struct ffs_sb_fill_data data
= {
1285 .mode
= S_IFREG
| 0600,
1286 .uid
= GLOBAL_ROOT_UID
,
1287 .gid
= GLOBAL_ROOT_GID
,
1289 .root_mode
= S_IFDIR
| 0500,
1290 .no_disconnect
= false,
1295 struct ffs_data
*ffs
;
1299 ret
= ffs_fs_parse_opts(&data
, opts
);
1300 if (unlikely(ret
< 0))
1301 return ERR_PTR(ret
);
1303 ffs
= ffs_data_new();
1305 return ERR_PTR(-ENOMEM
);
1306 ffs
->file_perms
= data
.perms
;
1307 ffs
->no_disconnect
= data
.no_disconnect
;
1309 ffs
->dev_name
= kstrdup(dev_name
, GFP_KERNEL
);
1310 if (unlikely(!ffs
->dev_name
)) {
1312 return ERR_PTR(-ENOMEM
);
1315 ffs_dev
= ffs_acquire_dev(dev_name
);
1316 if (IS_ERR(ffs_dev
)) {
1318 return ERR_CAST(ffs_dev
);
1320 ffs
->private_data
= ffs_dev
;
1321 data
.ffs_data
= ffs
;
1323 rv
= mount_nodev(t
, flags
, &data
, ffs_sb_fill
);
1324 if (IS_ERR(rv
) && data
.ffs_data
) {
1325 ffs_release_dev(data
.ffs_data
);
1326 ffs_data_put(data
.ffs_data
);
1332 ffs_fs_kill_sb(struct super_block
*sb
)
1336 kill_litter_super(sb
);
1337 if (sb
->s_fs_info
) {
1338 ffs_release_dev(sb
->s_fs_info
);
1339 ffs_data_closed(sb
->s_fs_info
);
1343 static struct file_system_type ffs_fs_type
= {
1344 .owner
= THIS_MODULE
,
1345 .name
= "functionfs",
1346 .mount
= ffs_fs_mount
,
1347 .kill_sb
= ffs_fs_kill_sb
,
1349 MODULE_ALIAS_FS("functionfs");
1352 /* Driver's main init/cleanup functions *************************************/
1354 static int functionfs_init(void)
1360 ret
= register_filesystem(&ffs_fs_type
);
1362 pr_info("file system registered\n");
1364 pr_err("failed registering file system (%d)\n", ret
);
1369 static void functionfs_cleanup(void)
1373 pr_info("unloading\n");
1374 unregister_filesystem(&ffs_fs_type
);
1378 /* ffs_data and ffs_function construction and destruction code **************/
1380 static void ffs_data_clear(struct ffs_data
*ffs
);
1381 static void ffs_data_reset(struct ffs_data
*ffs
);
1383 static void ffs_data_get(struct ffs_data
*ffs
)
1387 atomic_inc(&ffs
->ref
);
1390 static void ffs_data_opened(struct ffs_data
*ffs
)
1394 atomic_inc(&ffs
->ref
);
1395 if (atomic_add_return(1, &ffs
->opened
) == 1 &&
1396 ffs
->state
== FFS_DEACTIVATED
) {
1397 ffs
->state
= FFS_CLOSING
;
1398 ffs_data_reset(ffs
);
1402 static void ffs_data_put(struct ffs_data
*ffs
)
1406 if (unlikely(atomic_dec_and_test(&ffs
->ref
))) {
1407 pr_info("%s(): freeing\n", __func__
);
1408 ffs_data_clear(ffs
);
1409 BUG_ON(waitqueue_active(&ffs
->ev
.waitq
) ||
1410 waitqueue_active(&ffs
->ep0req_completion
.wait
));
1411 kfree(ffs
->dev_name
);
1416 static void ffs_data_closed(struct ffs_data
*ffs
)
1420 if (atomic_dec_and_test(&ffs
->opened
)) {
1421 if (ffs
->no_disconnect
) {
1422 ffs
->state
= FFS_DEACTIVATED
;
1424 ffs_epfiles_destroy(ffs
->epfiles
,
1426 ffs
->epfiles
= NULL
;
1428 if (ffs
->setup_state
== FFS_SETUP_PENDING
)
1429 __ffs_ep0_stall(ffs
);
1431 ffs
->state
= FFS_CLOSING
;
1432 ffs_data_reset(ffs
);
1435 if (atomic_read(&ffs
->opened
) < 0) {
1436 ffs
->state
= FFS_CLOSING
;
1437 ffs_data_reset(ffs
);
1443 static struct ffs_data
*ffs_data_new(void)
1445 struct ffs_data
*ffs
= kzalloc(sizeof *ffs
, GFP_KERNEL
);
1451 atomic_set(&ffs
->ref
, 1);
1452 atomic_set(&ffs
->opened
, 0);
1453 ffs
->state
= FFS_READ_DESCRIPTORS
;
1454 mutex_init(&ffs
->mutex
);
1455 spin_lock_init(&ffs
->eps_lock
);
1456 init_waitqueue_head(&ffs
->ev
.waitq
);
1457 init_completion(&ffs
->ep0req_completion
);
1459 /* XXX REVISIT need to update it in some places, or do we? */
1460 ffs
->ev
.can_stall
= 1;
1465 static void ffs_data_clear(struct ffs_data
*ffs
)
1471 BUG_ON(ffs
->gadget
);
1474 ffs_epfiles_destroy(ffs
->epfiles
, ffs
->eps_count
);
1476 if (ffs
->ffs_eventfd
)
1477 eventfd_ctx_put(ffs
->ffs_eventfd
);
1479 kfree(ffs
->raw_descs_data
);
1480 kfree(ffs
->raw_strings
);
1481 kfree(ffs
->stringtabs
);
1484 static void ffs_data_reset(struct ffs_data
*ffs
)
1488 ffs_data_clear(ffs
);
1490 ffs
->epfiles
= NULL
;
1491 ffs
->raw_descs_data
= NULL
;
1492 ffs
->raw_descs
= NULL
;
1493 ffs
->raw_strings
= NULL
;
1494 ffs
->stringtabs
= NULL
;
1496 ffs
->raw_descs_length
= 0;
1497 ffs
->fs_descs_count
= 0;
1498 ffs
->hs_descs_count
= 0;
1499 ffs
->ss_descs_count
= 0;
1501 ffs
->strings_count
= 0;
1502 ffs
->interfaces_count
= 0;
1507 ffs
->state
= FFS_READ_DESCRIPTORS
;
1508 ffs
->setup_state
= FFS_NO_SETUP
;
1513 static int functionfs_bind(struct ffs_data
*ffs
, struct usb_composite_dev
*cdev
)
1515 struct usb_gadget_strings
**lang
;
1520 if (WARN_ON(ffs
->state
!= FFS_ACTIVE
1521 || test_and_set_bit(FFS_FL_BOUND
, &ffs
->flags
)))
1524 first_id
= usb_string_ids_n(cdev
, ffs
->strings_count
);
1525 if (unlikely(first_id
< 0))
1528 ffs
->ep0req
= usb_ep_alloc_request(cdev
->gadget
->ep0
, GFP_KERNEL
);
1529 if (unlikely(!ffs
->ep0req
))
1531 ffs
->ep0req
->complete
= ffs_ep0_complete
;
1532 ffs
->ep0req
->context
= ffs
;
1534 lang
= ffs
->stringtabs
;
1536 for (; *lang
; ++lang
) {
1537 struct usb_string
*str
= (*lang
)->strings
;
1539 for (; str
->s
; ++id
, ++str
)
1544 ffs
->gadget
= cdev
->gadget
;
1549 static void functionfs_unbind(struct ffs_data
*ffs
)
1553 if (!WARN_ON(!ffs
->gadget
)) {
1554 usb_ep_free_request(ffs
->gadget
->ep0
, ffs
->ep0req
);
1557 clear_bit(FFS_FL_BOUND
, &ffs
->flags
);
1562 static int ffs_epfiles_create(struct ffs_data
*ffs
)
1564 struct ffs_epfile
*epfile
, *epfiles
;
1569 count
= ffs
->eps_count
;
1570 epfiles
= kcalloc(count
, sizeof(*epfiles
), GFP_KERNEL
);
1575 for (i
= 1; i
<= count
; ++i
, ++epfile
) {
1577 mutex_init(&epfile
->mutex
);
1578 init_waitqueue_head(&epfile
->wait
);
1579 if (ffs
->user_flags
& FUNCTIONFS_VIRTUAL_ADDR
)
1580 sprintf(epfile
->name
, "ep%02x", ffs
->eps_addrmap
[i
]);
1582 sprintf(epfile
->name
, "ep%u", i
);
1583 epfile
->dentry
= ffs_sb_create_file(ffs
->sb
, epfile
->name
,
1585 &ffs_epfile_operations
);
1586 if (unlikely(!epfile
->dentry
)) {
1587 ffs_epfiles_destroy(epfiles
, i
- 1);
1592 ffs
->epfiles
= epfiles
;
1596 static void ffs_epfiles_destroy(struct ffs_epfile
*epfiles
, unsigned count
)
1598 struct ffs_epfile
*epfile
= epfiles
;
1602 for (; count
; --count
, ++epfile
) {
1603 BUG_ON(mutex_is_locked(&epfile
->mutex
) ||
1604 waitqueue_active(&epfile
->wait
));
1605 if (epfile
->dentry
) {
1606 d_delete(epfile
->dentry
);
1607 dput(epfile
->dentry
);
1608 epfile
->dentry
= NULL
;
1615 static void ffs_func_eps_disable(struct ffs_function
*func
)
1617 struct ffs_ep
*ep
= func
->eps
;
1618 struct ffs_epfile
*epfile
= func
->ffs
->epfiles
;
1619 unsigned count
= func
->ffs
->eps_count
;
1620 unsigned long flags
;
1622 spin_lock_irqsave(&func
->ffs
->eps_lock
, flags
);
1624 /* pending requests get nuked */
1626 usb_ep_disable(ep
->ep
);
1634 spin_unlock_irqrestore(&func
->ffs
->eps_lock
, flags
);
1637 static int ffs_func_eps_enable(struct ffs_function
*func
)
1639 struct ffs_data
*ffs
= func
->ffs
;
1640 struct ffs_ep
*ep
= func
->eps
;
1641 struct ffs_epfile
*epfile
= ffs
->epfiles
;
1642 unsigned count
= ffs
->eps_count
;
1643 unsigned long flags
;
1646 spin_lock_irqsave(&func
->ffs
->eps_lock
, flags
);
1648 struct usb_endpoint_descriptor
*ds
;
1649 struct usb_ss_ep_comp_descriptor
*comp_desc
= NULL
;
1650 int needs_comp_desc
= false;
1653 if (ffs
->gadget
->speed
== USB_SPEED_SUPER
) {
1655 needs_comp_desc
= true;
1656 } else if (ffs
->gadget
->speed
== USB_SPEED_HIGH
)
1661 /* fall-back to lower speed if desc missing for current speed */
1663 ds
= ep
->descs
[desc_idx
];
1664 } while (!ds
&& --desc_idx
>= 0);
1671 ep
->ep
->driver_data
= ep
;
1674 if (needs_comp_desc
) {
1675 comp_desc
= (struct usb_ss_ep_comp_descriptor
*)(ds
+
1676 USB_DT_ENDPOINT_SIZE
);
1677 ep
->ep
->maxburst
= comp_desc
->bMaxBurst
+ 1;
1678 ep
->ep
->comp_desc
= comp_desc
;
1681 ret
= usb_ep_enable(ep
->ep
);
1684 epfile
->in
= usb_endpoint_dir_in(ds
);
1685 epfile
->isoc
= usb_endpoint_xfer_isoc(ds
);
1690 wake_up(&epfile
->wait
);
1695 spin_unlock_irqrestore(&func
->ffs
->eps_lock
, flags
);
1701 /* Parsing and building descriptors and strings *****************************/
1704 * This validates if data pointed by data is a valid USB descriptor as
1705 * well as record how many interfaces, endpoints and strings are
1706 * required by given configuration. Returns address after the
1707 * descriptor or NULL if data is invalid.
1710 enum ffs_entity_type
{
1711 FFS_DESCRIPTOR
, FFS_INTERFACE
, FFS_STRING
, FFS_ENDPOINT
1714 enum ffs_os_desc_type
{
1715 FFS_OS_DESC
, FFS_OS_DESC_EXT_COMPAT
, FFS_OS_DESC_EXT_PROP
1718 typedef int (*ffs_entity_callback
)(enum ffs_entity_type entity
,
1720 struct usb_descriptor_header
*desc
,
1723 typedef int (*ffs_os_desc_callback
)(enum ffs_os_desc_type entity
,
1724 struct usb_os_desc_header
*h
, void *data
,
1725 unsigned len
, void *priv
);
1727 static int __must_check
ffs_do_single_desc(char *data
, unsigned len
,
1728 ffs_entity_callback entity
,
1731 struct usb_descriptor_header
*_ds
= (void *)data
;
1737 /* At least two bytes are required: length and type */
1739 pr_vdebug("descriptor too short\n");
1743 /* If we have at least as many bytes as the descriptor takes? */
1744 length
= _ds
->bLength
;
1746 pr_vdebug("descriptor longer then available data\n");
1750 #define __entity_check_INTERFACE(val) 1
1751 #define __entity_check_STRING(val) (val)
1752 #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1753 #define __entity(type, val) do { \
1754 pr_vdebug("entity " #type "(%02x)\n", (val)); \
1755 if (unlikely(!__entity_check_ ##type(val))) { \
1756 pr_vdebug("invalid entity's value\n"); \
1759 ret = entity(FFS_ ##type, &val, _ds, priv); \
1760 if (unlikely(ret < 0)) { \
1761 pr_debug("entity " #type "(%02x); ret = %d\n", \
1767 /* Parse descriptor depending on type. */
1768 switch (_ds
->bDescriptorType
) {
1772 case USB_DT_DEVICE_QUALIFIER
:
1773 /* function can't have any of those */
1774 pr_vdebug("descriptor reserved for gadget: %d\n",
1775 _ds
->bDescriptorType
);
1778 case USB_DT_INTERFACE
: {
1779 struct usb_interface_descriptor
*ds
= (void *)_ds
;
1780 pr_vdebug("interface descriptor\n");
1781 if (length
!= sizeof *ds
)
1784 __entity(INTERFACE
, ds
->bInterfaceNumber
);
1786 __entity(STRING
, ds
->iInterface
);
1790 case USB_DT_ENDPOINT
: {
1791 struct usb_endpoint_descriptor
*ds
= (void *)_ds
;
1792 pr_vdebug("endpoint descriptor\n");
1793 if (length
!= USB_DT_ENDPOINT_SIZE
&&
1794 length
!= USB_DT_ENDPOINT_AUDIO_SIZE
)
1796 __entity(ENDPOINT
, ds
->bEndpointAddress
);
1801 pr_vdebug("hid descriptor\n");
1802 if (length
!= sizeof(struct hid_descriptor
))
1807 if (length
!= sizeof(struct usb_otg_descriptor
))
1811 case USB_DT_INTERFACE_ASSOCIATION
: {
1812 struct usb_interface_assoc_descriptor
*ds
= (void *)_ds
;
1813 pr_vdebug("interface association descriptor\n");
1814 if (length
!= sizeof *ds
)
1817 __entity(STRING
, ds
->iFunction
);
1821 case USB_DT_SS_ENDPOINT_COMP
:
1822 pr_vdebug("EP SS companion descriptor\n");
1823 if (length
!= sizeof(struct usb_ss_ep_comp_descriptor
))
1827 case USB_DT_OTHER_SPEED_CONFIG
:
1828 case USB_DT_INTERFACE_POWER
:
1830 case USB_DT_SECURITY
:
1831 case USB_DT_CS_RADIO_CONTROL
:
1833 pr_vdebug("unimplemented descriptor: %d\n", _ds
->bDescriptorType
);
1837 /* We should never be here */
1838 pr_vdebug("unknown descriptor: %d\n", _ds
->bDescriptorType
);
1842 pr_vdebug("invalid length: %d (descriptor %d)\n",
1843 _ds
->bLength
, _ds
->bDescriptorType
);
1848 #undef __entity_check_DESCRIPTOR
1849 #undef __entity_check_INTERFACE
1850 #undef __entity_check_STRING
1851 #undef __entity_check_ENDPOINT
1856 static int __must_check
ffs_do_descs(unsigned count
, char *data
, unsigned len
,
1857 ffs_entity_callback entity
, void *priv
)
1859 const unsigned _len
= len
;
1860 unsigned long num
= 0;
1870 /* Record "descriptor" entity */
1871 ret
= entity(FFS_DESCRIPTOR
, (u8
*)num
, (void *)data
, priv
);
1872 if (unlikely(ret
< 0)) {
1873 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
1881 ret
= ffs_do_single_desc(data
, len
, entity
, priv
);
1882 if (unlikely(ret
< 0)) {
1883 pr_debug("%s returns %d\n", __func__
, ret
);
1893 static int __ffs_data_do_entity(enum ffs_entity_type type
,
1894 u8
*valuep
, struct usb_descriptor_header
*desc
,
1897 struct ffs_desc_helper
*helper
= priv
;
1898 struct usb_endpoint_descriptor
*d
;
1903 case FFS_DESCRIPTOR
:
1908 * Interfaces are indexed from zero so if we
1909 * encountered interface "n" then there are at least
1912 if (*valuep
>= helper
->interfaces_count
)
1913 helper
->interfaces_count
= *valuep
+ 1;
1918 * Strings are indexed from 1 (0 is magic ;) reserved
1919 * for languages list or some such)
1921 if (*valuep
> helper
->ffs
->strings_count
)
1922 helper
->ffs
->strings_count
= *valuep
;
1927 helper
->eps_count
++;
1928 if (helper
->eps_count
>= 15)
1930 /* Check if descriptors for any speed were already parsed */
1931 if (!helper
->ffs
->eps_count
&& !helper
->ffs
->interfaces_count
)
1932 helper
->ffs
->eps_addrmap
[helper
->eps_count
] =
1933 d
->bEndpointAddress
;
1934 else if (helper
->ffs
->eps_addrmap
[helper
->eps_count
] !=
1935 d
->bEndpointAddress
)
1943 static int __ffs_do_os_desc_header(enum ffs_os_desc_type
*next_type
,
1944 struct usb_os_desc_header
*desc
)
1946 u16 bcd_version
= le16_to_cpu(desc
->bcdVersion
);
1947 u16 w_index
= le16_to_cpu(desc
->wIndex
);
1949 if (bcd_version
!= 1) {
1950 pr_vdebug("unsupported os descriptors version: %d",
1956 *next_type
= FFS_OS_DESC_EXT_COMPAT
;
1959 *next_type
= FFS_OS_DESC_EXT_PROP
;
1962 pr_vdebug("unsupported os descriptor type: %d", w_index
);
1966 return sizeof(*desc
);
1970 * Process all extended compatibility/extended property descriptors
1971 * of a feature descriptor
1973 static int __must_check
ffs_do_single_os_desc(char *data
, unsigned len
,
1974 enum ffs_os_desc_type type
,
1976 ffs_os_desc_callback entity
,
1978 struct usb_os_desc_header
*h
)
1981 const unsigned _len
= len
;
1985 /* loop over all ext compat/ext prop descriptors */
1986 while (feature_count
--) {
1987 ret
= entity(type
, h
, data
, len
, priv
);
1988 if (unlikely(ret
< 0)) {
1989 pr_debug("bad OS descriptor, type: %d\n", type
);
1998 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
1999 static int __must_check
ffs_do_os_descs(unsigned count
,
2000 char *data
, unsigned len
,
2001 ffs_os_desc_callback entity
, void *priv
)
2003 const unsigned _len
= len
;
2004 unsigned long num
= 0;
2008 for (num
= 0; num
< count
; ++num
) {
2010 enum ffs_os_desc_type type
;
2012 struct usb_os_desc_header
*desc
= (void *)data
;
2014 if (len
< sizeof(*desc
))
2018 * Record "descriptor" entity.
2019 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2020 * Move the data pointer to the beginning of extended
2021 * compatibilities proper or extended properties proper
2022 * portions of the data
2024 if (le32_to_cpu(desc
->dwLength
) > len
)
2027 ret
= __ffs_do_os_desc_header(&type
, desc
);
2028 if (unlikely(ret
< 0)) {
2029 pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2034 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2036 feature_count
= le16_to_cpu(desc
->wCount
);
2037 if (type
== FFS_OS_DESC_EXT_COMPAT
&&
2038 (feature_count
> 255 || desc
->Reserved
))
2044 * Process all function/property descriptors
2045 * of this Feature Descriptor
2047 ret
= ffs_do_single_os_desc(data
, len
, type
,
2048 feature_count
, entity
, priv
, desc
);
2049 if (unlikely(ret
< 0)) {
2050 pr_debug("%s returns %d\n", __func__
, ret
);
2061 * Validate contents of the buffer from userspace related to OS descriptors.
2063 static int __ffs_data_do_os_desc(enum ffs_os_desc_type type
,
2064 struct usb_os_desc_header
*h
, void *data
,
2065 unsigned len
, void *priv
)
2067 struct ffs_data
*ffs
= priv
;
2073 case FFS_OS_DESC_EXT_COMPAT
: {
2074 struct usb_ext_compat_desc
*d
= data
;
2077 if (len
< sizeof(*d
) ||
2078 d
->bFirstInterfaceNumber
>= ffs
->interfaces_count
||
2081 for (i
= 0; i
< ARRAY_SIZE(d
->Reserved2
); ++i
)
2082 if (d
->Reserved2
[i
])
2085 length
= sizeof(struct usb_ext_compat_desc
);
2088 case FFS_OS_DESC_EXT_PROP
: {
2089 struct usb_ext_prop_desc
*d
= data
;
2093 if (len
< sizeof(*d
) || h
->interface
>= ffs
->interfaces_count
)
2095 length
= le32_to_cpu(d
->dwSize
);
2098 type
= le32_to_cpu(d
->dwPropertyDataType
);
2099 if (type
< USB_EXT_PROP_UNICODE
||
2100 type
> USB_EXT_PROP_UNICODE_MULTI
) {
2101 pr_vdebug("unsupported os descriptor property type: %d",
2105 pnl
= le16_to_cpu(d
->wPropertyNameLength
);
2106 if (length
< 14 + pnl
) {
2107 pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2111 pdl
= le32_to_cpu(*(u32
*)((u8
*)data
+ 10 + pnl
));
2112 if (length
!= 14 + pnl
+ pdl
) {
2113 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2114 length
, pnl
, pdl
, type
);
2117 ++ffs
->ms_os_descs_ext_prop_count
;
2118 /* property name reported to the host as "WCHAR"s */
2119 ffs
->ms_os_descs_ext_prop_name_len
+= pnl
* 2;
2120 ffs
->ms_os_descs_ext_prop_data_len
+= pdl
;
2124 pr_vdebug("unknown descriptor: %d\n", type
);
2130 static int __ffs_data_got_descs(struct ffs_data
*ffs
,
2131 char *const _data
, size_t len
)
2133 char *data
= _data
, *raw_descs
;
2134 unsigned os_descs_count
= 0, counts
[3], flags
;
2135 int ret
= -EINVAL
, i
;
2136 struct ffs_desc_helper helper
;
2140 if (get_unaligned_le32(data
+ 4) != len
)
2143 switch (get_unaligned_le32(data
)) {
2144 case FUNCTIONFS_DESCRIPTORS_MAGIC
:
2145 flags
= FUNCTIONFS_HAS_FS_DESC
| FUNCTIONFS_HAS_HS_DESC
;
2149 case FUNCTIONFS_DESCRIPTORS_MAGIC_V2
:
2150 flags
= get_unaligned_le32(data
+ 8);
2151 ffs
->user_flags
= flags
;
2152 if (flags
& ~(FUNCTIONFS_HAS_FS_DESC
|
2153 FUNCTIONFS_HAS_HS_DESC
|
2154 FUNCTIONFS_HAS_SS_DESC
|
2155 FUNCTIONFS_HAS_MS_OS_DESC
|
2156 FUNCTIONFS_VIRTUAL_ADDR
|
2157 FUNCTIONFS_EVENTFD
)) {
2168 if (flags
& FUNCTIONFS_EVENTFD
) {
2172 eventfd_ctx_fdget((int)get_unaligned_le32(data
));
2173 if (IS_ERR(ffs
->ffs_eventfd
)) {
2174 ret
= PTR_ERR(ffs
->ffs_eventfd
);
2175 ffs
->ffs_eventfd
= NULL
;
2182 /* Read fs_count, hs_count and ss_count (if present) */
2183 for (i
= 0; i
< 3; ++i
) {
2184 if (!(flags
& (1 << i
))) {
2186 } else if (len
< 4) {
2189 counts
[i
] = get_unaligned_le32(data
);
2194 if (flags
& (1 << i
)) {
2198 os_descs_count
= get_unaligned_le32(data
);
2203 /* Read descriptors */
2206 for (i
= 0; i
< 3; ++i
) {
2209 helper
.interfaces_count
= 0;
2210 helper
.eps_count
= 0;
2211 ret
= ffs_do_descs(counts
[i
], data
, len
,
2212 __ffs_data_do_entity
, &helper
);
2215 if (!ffs
->eps_count
&& !ffs
->interfaces_count
) {
2216 ffs
->eps_count
= helper
.eps_count
;
2217 ffs
->interfaces_count
= helper
.interfaces_count
;
2219 if (ffs
->eps_count
!= helper
.eps_count
) {
2223 if (ffs
->interfaces_count
!= helper
.interfaces_count
) {
2231 if (os_descs_count
) {
2232 ret
= ffs_do_os_descs(os_descs_count
, data
, len
,
2233 __ffs_data_do_os_desc
, ffs
);
2240 if (raw_descs
== data
|| len
) {
2245 ffs
->raw_descs_data
= _data
;
2246 ffs
->raw_descs
= raw_descs
;
2247 ffs
->raw_descs_length
= data
- raw_descs
;
2248 ffs
->fs_descs_count
= counts
[0];
2249 ffs
->hs_descs_count
= counts
[1];
2250 ffs
->ss_descs_count
= counts
[2];
2251 ffs
->ms_os_descs_count
= os_descs_count
;
2260 static int __ffs_data_got_strings(struct ffs_data
*ffs
,
2261 char *const _data
, size_t len
)
2263 u32 str_count
, needed_count
, lang_count
;
2264 struct usb_gadget_strings
**stringtabs
, *t
;
2265 struct usb_string
*strings
, *s
;
2266 const char *data
= _data
;
2270 if (unlikely(len
< 16 ||
2271 get_unaligned_le32(data
) != FUNCTIONFS_STRINGS_MAGIC
||
2272 get_unaligned_le32(data
+ 4) != len
))
2274 str_count
= get_unaligned_le32(data
+ 8);
2275 lang_count
= get_unaligned_le32(data
+ 12);
2277 /* if one is zero the other must be zero */
2278 if (unlikely(!str_count
!= !lang_count
))
2281 /* Do we have at least as many strings as descriptors need? */
2282 needed_count
= ffs
->strings_count
;
2283 if (unlikely(str_count
< needed_count
))
2287 * If we don't need any strings just return and free all
2290 if (!needed_count
) {
2295 /* Allocate everything in one chunk so there's less maintenance. */
2299 vla_item(d
, struct usb_gadget_strings
*, stringtabs
,
2301 vla_item(d
, struct usb_gadget_strings
, stringtab
, lang_count
);
2302 vla_item(d
, struct usb_string
, strings
,
2303 lang_count
*(needed_count
+1));
2305 char *vlabuf
= kmalloc(vla_group_size(d
), GFP_KERNEL
);
2307 if (unlikely(!vlabuf
)) {
2312 /* Initialize the VLA pointers */
2313 stringtabs
= vla_ptr(vlabuf
, d
, stringtabs
);
2314 t
= vla_ptr(vlabuf
, d
, stringtab
);
2317 *stringtabs
++ = t
++;
2321 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2322 stringtabs
= vla_ptr(vlabuf
, d
, stringtabs
);
2323 t
= vla_ptr(vlabuf
, d
, stringtab
);
2324 s
= vla_ptr(vlabuf
, d
, strings
);
2328 /* For each language */
2332 do { /* lang_count > 0 so we can use do-while */
2333 unsigned needed
= needed_count
;
2335 if (unlikely(len
< 3))
2337 t
->language
= get_unaligned_le16(data
);
2344 /* For each string */
2345 do { /* str_count > 0 so we can use do-while */
2346 size_t length
= strnlen(data
, len
);
2348 if (unlikely(length
== len
))
2352 * User may provide more strings then we need,
2353 * if that's the case we simply ignore the
2356 if (likely(needed
)) {
2358 * s->id will be set while adding
2359 * function to configuration so for
2360 * now just leave garbage here.
2369 } while (--str_count
);
2371 s
->id
= 0; /* terminator */
2375 } while (--lang_count
);
2377 /* Some garbage left? */
2382 ffs
->stringtabs
= stringtabs
;
2383 ffs
->raw_strings
= _data
;
2395 /* Events handling and management *******************************************/
2397 static void __ffs_event_add(struct ffs_data
*ffs
,
2398 enum usb_functionfs_event_type type
)
2400 enum usb_functionfs_event_type rem_type1
, rem_type2
= type
;
2404 * Abort any unhandled setup
2406 * We do not need to worry about some cmpxchg() changing value
2407 * of ffs->setup_state without holding the lock because when
2408 * state is FFS_SETUP_PENDING cmpxchg() in several places in
2409 * the source does nothing.
2411 if (ffs
->setup_state
== FFS_SETUP_PENDING
)
2412 ffs
->setup_state
= FFS_SETUP_CANCELLED
;
2415 * Logic of this function guarantees that there are at most four pending
2416 * evens on ffs->ev.types queue. This is important because the queue
2417 * has space for four elements only and __ffs_ep0_read_events function
2418 * depends on that limit as well. If more event types are added, those
2419 * limits have to be revisited or guaranteed to still hold.
2422 case FUNCTIONFS_RESUME
:
2423 rem_type2
= FUNCTIONFS_SUSPEND
;
2425 case FUNCTIONFS_SUSPEND
:
2426 case FUNCTIONFS_SETUP
:
2428 /* Discard all similar events */
2431 case FUNCTIONFS_BIND
:
2432 case FUNCTIONFS_UNBIND
:
2433 case FUNCTIONFS_DISABLE
:
2434 case FUNCTIONFS_ENABLE
:
2435 /* Discard everything other then power management. */
2436 rem_type1
= FUNCTIONFS_SUSPEND
;
2437 rem_type2
= FUNCTIONFS_RESUME
;
2442 WARN(1, "%d: unknown event, this should not happen\n", type
);
2447 u8
*ev
= ffs
->ev
.types
, *out
= ev
;
2448 unsigned n
= ffs
->ev
.count
;
2449 for (; n
; --n
, ++ev
)
2450 if ((*ev
== rem_type1
|| *ev
== rem_type2
) == neg
)
2453 pr_vdebug("purging event %d\n", *ev
);
2454 ffs
->ev
.count
= out
- ffs
->ev
.types
;
2457 pr_vdebug("adding event %d\n", type
);
2458 ffs
->ev
.types
[ffs
->ev
.count
++] = type
;
2459 wake_up_locked(&ffs
->ev
.waitq
);
2460 if (ffs
->ffs_eventfd
)
2461 eventfd_signal(ffs
->ffs_eventfd
, 1);
2464 static void ffs_event_add(struct ffs_data
*ffs
,
2465 enum usb_functionfs_event_type type
)
2467 unsigned long flags
;
2468 spin_lock_irqsave(&ffs
->ev
.waitq
.lock
, flags
);
2469 __ffs_event_add(ffs
, type
);
2470 spin_unlock_irqrestore(&ffs
->ev
.waitq
.lock
, flags
);
2473 /* Bind/unbind USB function hooks *******************************************/
2475 static int ffs_ep_addr2idx(struct ffs_data
*ffs
, u8 endpoint_address
)
2479 for (i
= 1; i
< ARRAY_SIZE(ffs
->eps_addrmap
); ++i
)
2480 if (ffs
->eps_addrmap
[i
] == endpoint_address
)
2485 static int __ffs_func_bind_do_descs(enum ffs_entity_type type
, u8
*valuep
,
2486 struct usb_descriptor_header
*desc
,
2489 struct usb_endpoint_descriptor
*ds
= (void *)desc
;
2490 struct ffs_function
*func
= priv
;
2491 struct ffs_ep
*ffs_ep
;
2492 unsigned ep_desc_id
;
2494 static const char *speed_names
[] = { "full", "high", "super" };
2496 if (type
!= FFS_DESCRIPTOR
)
2500 * If ss_descriptors is not NULL, we are reading super speed
2501 * descriptors; if hs_descriptors is not NULL, we are reading high
2502 * speed descriptors; otherwise, we are reading full speed
2505 if (func
->function
.ss_descriptors
) {
2507 func
->function
.ss_descriptors
[(long)valuep
] = desc
;
2508 } else if (func
->function
.hs_descriptors
) {
2510 func
->function
.hs_descriptors
[(long)valuep
] = desc
;
2513 func
->function
.fs_descriptors
[(long)valuep
] = desc
;
2516 if (!desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
)
2519 idx
= ffs_ep_addr2idx(func
->ffs
, ds
->bEndpointAddress
) - 1;
2523 ffs_ep
= func
->eps
+ idx
;
2525 if (unlikely(ffs_ep
->descs
[ep_desc_id
])) {
2526 pr_err("two %sspeed descriptors for EP %d\n",
2527 speed_names
[ep_desc_id
],
2528 ds
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
2531 ffs_ep
->descs
[ep_desc_id
] = ds
;
2533 ffs_dump_mem(": Original ep desc", ds
, ds
->bLength
);
2535 ds
->bEndpointAddress
= ffs_ep
->descs
[0]->bEndpointAddress
;
2536 if (!ds
->wMaxPacketSize
)
2537 ds
->wMaxPacketSize
= ffs_ep
->descs
[0]->wMaxPacketSize
;
2539 struct usb_request
*req
;
2541 u8 bEndpointAddress
;
2544 * We back up bEndpointAddress because autoconfig overwrites
2545 * it with physical endpoint address.
2547 bEndpointAddress
= ds
->bEndpointAddress
;
2548 pr_vdebug("autoconfig\n");
2549 ep
= usb_ep_autoconfig(func
->gadget
, ds
);
2552 ep
->driver_data
= func
->eps
+ idx
;
2554 req
= usb_ep_alloc_request(ep
, GFP_KERNEL
);
2560 func
->eps_revmap
[ds
->bEndpointAddress
&
2561 USB_ENDPOINT_NUMBER_MASK
] = idx
+ 1;
2563 * If we use virtual address mapping, we restore
2564 * original bEndpointAddress value.
2566 if (func
->ffs
->user_flags
& FUNCTIONFS_VIRTUAL_ADDR
)
2567 ds
->bEndpointAddress
= bEndpointAddress
;
2569 ffs_dump_mem(": Rewritten ep desc", ds
, ds
->bLength
);
2574 static int __ffs_func_bind_do_nums(enum ffs_entity_type type
, u8
*valuep
,
2575 struct usb_descriptor_header
*desc
,
2578 struct ffs_function
*func
= priv
;
2584 case FFS_DESCRIPTOR
:
2585 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2590 if (func
->interfaces_nums
[idx
] < 0) {
2591 int id
= usb_interface_id(func
->conf
, &func
->function
);
2592 if (unlikely(id
< 0))
2594 func
->interfaces_nums
[idx
] = id
;
2596 newValue
= func
->interfaces_nums
[idx
];
2600 /* String' IDs are allocated when fsf_data is bound to cdev */
2601 newValue
= func
->ffs
->stringtabs
[0]->strings
[*valuep
- 1].id
;
2606 * USB_DT_ENDPOINT are handled in
2607 * __ffs_func_bind_do_descs().
2609 if (desc
->bDescriptorType
== USB_DT_ENDPOINT
)
2612 idx
= (*valuep
& USB_ENDPOINT_NUMBER_MASK
) - 1;
2613 if (unlikely(!func
->eps
[idx
].ep
))
2617 struct usb_endpoint_descriptor
**descs
;
2618 descs
= func
->eps
[idx
].descs
;
2619 newValue
= descs
[descs
[0] ? 0 : 1]->bEndpointAddress
;
2624 pr_vdebug("%02x -> %02x\n", *valuep
, newValue
);
2629 static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type
,
2630 struct usb_os_desc_header
*h
, void *data
,
2631 unsigned len
, void *priv
)
2633 struct ffs_function
*func
= priv
;
2637 case FFS_OS_DESC_EXT_COMPAT
: {
2638 struct usb_ext_compat_desc
*desc
= data
;
2639 struct usb_os_desc_table
*t
;
2641 t
= &func
->function
.os_desc_table
[desc
->bFirstInterfaceNumber
];
2642 t
->if_id
= func
->interfaces_nums
[desc
->bFirstInterfaceNumber
];
2643 memcpy(t
->os_desc
->ext_compat_id
, &desc
->CompatibleID
,
2644 ARRAY_SIZE(desc
->CompatibleID
) +
2645 ARRAY_SIZE(desc
->SubCompatibleID
));
2646 length
= sizeof(*desc
);
2649 case FFS_OS_DESC_EXT_PROP
: {
2650 struct usb_ext_prop_desc
*desc
= data
;
2651 struct usb_os_desc_table
*t
;
2652 struct usb_os_desc_ext_prop
*ext_prop
;
2653 char *ext_prop_name
;
2654 char *ext_prop_data
;
2656 t
= &func
->function
.os_desc_table
[h
->interface
];
2657 t
->if_id
= func
->interfaces_nums
[h
->interface
];
2659 ext_prop
= func
->ffs
->ms_os_descs_ext_prop_avail
;
2660 func
->ffs
->ms_os_descs_ext_prop_avail
+= sizeof(*ext_prop
);
2662 ext_prop
->type
= le32_to_cpu(desc
->dwPropertyDataType
);
2663 ext_prop
->name_len
= le16_to_cpu(desc
->wPropertyNameLength
);
2664 ext_prop
->data_len
= le32_to_cpu(*(u32
*)
2665 usb_ext_prop_data_len_ptr(data
, ext_prop
->name_len
));
2666 length
= ext_prop
->name_len
+ ext_prop
->data_len
+ 14;
2668 ext_prop_name
= func
->ffs
->ms_os_descs_ext_prop_name_avail
;
2669 func
->ffs
->ms_os_descs_ext_prop_name_avail
+=
2672 ext_prop_data
= func
->ffs
->ms_os_descs_ext_prop_data_avail
;
2673 func
->ffs
->ms_os_descs_ext_prop_data_avail
+=
2675 memcpy(ext_prop_data
,
2676 usb_ext_prop_data_ptr(data
, ext_prop
->name_len
),
2677 ext_prop
->data_len
);
2678 /* unicode data reported to the host as "WCHAR"s */
2679 switch (ext_prop
->type
) {
2680 case USB_EXT_PROP_UNICODE
:
2681 case USB_EXT_PROP_UNICODE_ENV
:
2682 case USB_EXT_PROP_UNICODE_LINK
:
2683 case USB_EXT_PROP_UNICODE_MULTI
:
2684 ext_prop
->data_len
*= 2;
2687 ext_prop
->data
= ext_prop_data
;
2689 memcpy(ext_prop_name
, usb_ext_prop_name_ptr(data
),
2690 ext_prop
->name_len
);
2691 /* property name reported to the host as "WCHAR"s */
2692 ext_prop
->name_len
*= 2;
2693 ext_prop
->name
= ext_prop_name
;
2695 t
->os_desc
->ext_prop_len
+=
2696 ext_prop
->name_len
+ ext_prop
->data_len
+ 14;
2697 ++t
->os_desc
->ext_prop_count
;
2698 list_add_tail(&ext_prop
->entry
, &t
->os_desc
->ext_prop
);
2702 pr_vdebug("unknown descriptor: %d\n", type
);
2708 static inline struct f_fs_opts
*ffs_do_functionfs_bind(struct usb_function
*f
,
2709 struct usb_configuration
*c
)
2711 struct ffs_function
*func
= ffs_func_from_usb(f
);
2712 struct f_fs_opts
*ffs_opts
=
2713 container_of(f
->fi
, struct f_fs_opts
, func_inst
);
2719 * Legacy gadget triggers binding in functionfs_ready_callback,
2720 * which already uses locking; taking the same lock here would
2723 * Configfs-enabled gadgets however do need ffs_dev_lock.
2725 if (!ffs_opts
->no_configfs
)
2727 ret
= ffs_opts
->dev
->desc_ready
? 0 : -ENODEV
;
2728 func
->ffs
= ffs_opts
->dev
->ffs_data
;
2729 if (!ffs_opts
->no_configfs
)
2732 return ERR_PTR(ret
);
2735 func
->gadget
= c
->cdev
->gadget
;
2738 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2739 * configurations are bound in sequence with list_for_each_entry,
2740 * in each configuration its functions are bound in sequence
2741 * with list_for_each_entry, so we assume no race condition
2742 * with regard to ffs_opts->bound access
2744 if (!ffs_opts
->refcnt
) {
2745 ret
= functionfs_bind(func
->ffs
, c
->cdev
);
2747 return ERR_PTR(ret
);
2750 func
->function
.strings
= func
->ffs
->stringtabs
;
2755 static int _ffs_func_bind(struct usb_configuration
*c
,
2756 struct usb_function
*f
)
2758 struct ffs_function
*func
= ffs_func_from_usb(f
);
2759 struct ffs_data
*ffs
= func
->ffs
;
2761 const int full
= !!func
->ffs
->fs_descs_count
;
2762 const int high
= !!func
->ffs
->hs_descs_count
;
2763 const int super
= !!func
->ffs
->ss_descs_count
;
2765 int fs_len
, hs_len
, ss_len
, ret
, i
;
2766 struct ffs_ep
*eps_ptr
;
2768 /* Make it a single chunk, less management later on */
2770 vla_item_with_sz(d
, struct ffs_ep
, eps
, ffs
->eps_count
);
2771 vla_item_with_sz(d
, struct usb_descriptor_header
*, fs_descs
,
2772 full
? ffs
->fs_descs_count
+ 1 : 0);
2773 vla_item_with_sz(d
, struct usb_descriptor_header
*, hs_descs
,
2774 high
? ffs
->hs_descs_count
+ 1 : 0);
2775 vla_item_with_sz(d
, struct usb_descriptor_header
*, ss_descs
,
2776 super
? ffs
->ss_descs_count
+ 1 : 0);
2777 vla_item_with_sz(d
, short, inums
, ffs
->interfaces_count
);
2778 vla_item_with_sz(d
, struct usb_os_desc_table
, os_desc_table
,
2779 c
->cdev
->use_os_string
? ffs
->interfaces_count
: 0);
2780 vla_item_with_sz(d
, char[16], ext_compat
,
2781 c
->cdev
->use_os_string
? ffs
->interfaces_count
: 0);
2782 vla_item_with_sz(d
, struct usb_os_desc
, os_desc
,
2783 c
->cdev
->use_os_string
? ffs
->interfaces_count
: 0);
2784 vla_item_with_sz(d
, struct usb_os_desc_ext_prop
, ext_prop
,
2785 ffs
->ms_os_descs_ext_prop_count
);
2786 vla_item_with_sz(d
, char, ext_prop_name
,
2787 ffs
->ms_os_descs_ext_prop_name_len
);
2788 vla_item_with_sz(d
, char, ext_prop_data
,
2789 ffs
->ms_os_descs_ext_prop_data_len
);
2790 vla_item_with_sz(d
, char, raw_descs
, ffs
->raw_descs_length
);
2795 /* Has descriptors only for speeds gadget does not support */
2796 if (unlikely(!(full
| high
| super
)))
2799 /* Allocate a single chunk, less management later on */
2800 vlabuf
= kzalloc(vla_group_size(d
), GFP_KERNEL
);
2801 if (unlikely(!vlabuf
))
2804 ffs
->ms_os_descs_ext_prop_avail
= vla_ptr(vlabuf
, d
, ext_prop
);
2805 ffs
->ms_os_descs_ext_prop_name_avail
=
2806 vla_ptr(vlabuf
, d
, ext_prop_name
);
2807 ffs
->ms_os_descs_ext_prop_data_avail
=
2808 vla_ptr(vlabuf
, d
, ext_prop_data
);
2810 /* Copy descriptors */
2811 memcpy(vla_ptr(vlabuf
, d
, raw_descs
), ffs
->raw_descs
,
2812 ffs
->raw_descs_length
);
2814 memset(vla_ptr(vlabuf
, d
, inums
), 0xff, d_inums__sz
);
2815 eps_ptr
= vla_ptr(vlabuf
, d
, eps
);
2816 for (i
= 0; i
< ffs
->eps_count
; i
++)
2817 eps_ptr
[i
].num
= -1;
2820 * d_eps == vlabuf, func->eps used to kfree vlabuf later
2822 func
->eps
= vla_ptr(vlabuf
, d
, eps
);
2823 func
->interfaces_nums
= vla_ptr(vlabuf
, d
, inums
);
2826 * Go through all the endpoint descriptors and allocate
2827 * endpoints first, so that later we can rewrite the endpoint
2828 * numbers without worrying that it may be described later on.
2831 func
->function
.fs_descriptors
= vla_ptr(vlabuf
, d
, fs_descs
);
2832 fs_len
= ffs_do_descs(ffs
->fs_descs_count
,
2833 vla_ptr(vlabuf
, d
, raw_descs
),
2835 __ffs_func_bind_do_descs
, func
);
2836 if (unlikely(fs_len
< 0)) {
2845 func
->function
.hs_descriptors
= vla_ptr(vlabuf
, d
, hs_descs
);
2846 hs_len
= ffs_do_descs(ffs
->hs_descs_count
,
2847 vla_ptr(vlabuf
, d
, raw_descs
) + fs_len
,
2848 d_raw_descs__sz
- fs_len
,
2849 __ffs_func_bind_do_descs
, func
);
2850 if (unlikely(hs_len
< 0)) {
2858 if (likely(super
)) {
2859 func
->function
.ss_descriptors
= vla_ptr(vlabuf
, d
, ss_descs
);
2860 ss_len
= ffs_do_descs(ffs
->ss_descs_count
,
2861 vla_ptr(vlabuf
, d
, raw_descs
) + fs_len
+ hs_len
,
2862 d_raw_descs__sz
- fs_len
- hs_len
,
2863 __ffs_func_bind_do_descs
, func
);
2864 if (unlikely(ss_len
< 0)) {
2873 * Now handle interface numbers allocation and interface and
2874 * endpoint numbers rewriting. We can do that in one go
2877 ret
= ffs_do_descs(ffs
->fs_descs_count
+
2878 (high
? ffs
->hs_descs_count
: 0) +
2879 (super
? ffs
->ss_descs_count
: 0),
2880 vla_ptr(vlabuf
, d
, raw_descs
), d_raw_descs__sz
,
2881 __ffs_func_bind_do_nums
, func
);
2882 if (unlikely(ret
< 0))
2885 func
->function
.os_desc_table
= vla_ptr(vlabuf
, d
, os_desc_table
);
2886 if (c
->cdev
->use_os_string
)
2887 for (i
= 0; i
< ffs
->interfaces_count
; ++i
) {
2888 struct usb_os_desc
*desc
;
2890 desc
= func
->function
.os_desc_table
[i
].os_desc
=
2891 vla_ptr(vlabuf
, d
, os_desc
) +
2892 i
* sizeof(struct usb_os_desc
);
2893 desc
->ext_compat_id
=
2894 vla_ptr(vlabuf
, d
, ext_compat
) + i
* 16;
2895 INIT_LIST_HEAD(&desc
->ext_prop
);
2897 ret
= ffs_do_os_descs(ffs
->ms_os_descs_count
,
2898 vla_ptr(vlabuf
, d
, raw_descs
) +
2899 fs_len
+ hs_len
+ ss_len
,
2900 d_raw_descs__sz
- fs_len
- hs_len
- ss_len
,
2901 __ffs_func_bind_do_os_desc
, func
);
2902 if (unlikely(ret
< 0))
2904 func
->function
.os_desc_n
=
2905 c
->cdev
->use_os_string
? ffs
->interfaces_count
: 0;
2907 /* And we're done */
2908 ffs_event_add(ffs
, FUNCTIONFS_BIND
);
2912 /* XXX Do we need to release all claimed endpoints here? */
2916 static int ffs_func_bind(struct usb_configuration
*c
,
2917 struct usb_function
*f
)
2919 struct f_fs_opts
*ffs_opts
= ffs_do_functionfs_bind(f
, c
);
2920 struct ffs_function
*func
= ffs_func_from_usb(f
);
2923 if (IS_ERR(ffs_opts
))
2924 return PTR_ERR(ffs_opts
);
2926 ret
= _ffs_func_bind(c
, f
);
2927 if (ret
&& !--ffs_opts
->refcnt
)
2928 functionfs_unbind(func
->ffs
);
2934 /* Other USB function hooks *************************************************/
2936 static void ffs_reset_work(struct work_struct
*work
)
2938 struct ffs_data
*ffs
= container_of(work
,
2939 struct ffs_data
, reset_work
);
2940 ffs_data_reset(ffs
);
2943 static int ffs_func_set_alt(struct usb_function
*f
,
2944 unsigned interface
, unsigned alt
)
2946 struct ffs_function
*func
= ffs_func_from_usb(f
);
2947 struct ffs_data
*ffs
= func
->ffs
;
2950 if (alt
!= (unsigned)-1) {
2951 intf
= ffs_func_revmap_intf(func
, interface
);
2952 if (unlikely(intf
< 0))
2957 ffs_func_eps_disable(ffs
->func
);
2959 if (ffs
->state
== FFS_DEACTIVATED
) {
2960 ffs
->state
= FFS_CLOSING
;
2961 INIT_WORK(&ffs
->reset_work
, ffs_reset_work
);
2962 schedule_work(&ffs
->reset_work
);
2966 if (ffs
->state
!= FFS_ACTIVE
)
2969 if (alt
== (unsigned)-1) {
2971 ffs_event_add(ffs
, FUNCTIONFS_DISABLE
);
2976 ret
= ffs_func_eps_enable(func
);
2977 if (likely(ret
>= 0))
2978 ffs_event_add(ffs
, FUNCTIONFS_ENABLE
);
2982 static void ffs_func_disable(struct usb_function
*f
)
2984 ffs_func_set_alt(f
, 0, (unsigned)-1);
2987 static int ffs_func_setup(struct usb_function
*f
,
2988 const struct usb_ctrlrequest
*creq
)
2990 struct ffs_function
*func
= ffs_func_from_usb(f
);
2991 struct ffs_data
*ffs
= func
->ffs
;
2992 unsigned long flags
;
2997 pr_vdebug("creq->bRequestType = %02x\n", creq
->bRequestType
);
2998 pr_vdebug("creq->bRequest = %02x\n", creq
->bRequest
);
2999 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq
->wValue
));
3000 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq
->wIndex
));
3001 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq
->wLength
));
3004 * Most requests directed to interface go through here
3005 * (notable exceptions are set/get interface) so we need to
3006 * handle them. All other either handled by composite or
3007 * passed to usb_configuration->setup() (if one is set). No
3008 * matter, we will handle requests directed to endpoint here
3009 * as well (as it's straightforward) but what to do with any
3012 if (ffs
->state
!= FFS_ACTIVE
)
3015 switch (creq
->bRequestType
& USB_RECIP_MASK
) {
3016 case USB_RECIP_INTERFACE
:
3017 ret
= ffs_func_revmap_intf(func
, le16_to_cpu(creq
->wIndex
));
3018 if (unlikely(ret
< 0))
3022 case USB_RECIP_ENDPOINT
:
3023 ret
= ffs_func_revmap_ep(func
, le16_to_cpu(creq
->wIndex
));
3024 if (unlikely(ret
< 0))
3026 if (func
->ffs
->user_flags
& FUNCTIONFS_VIRTUAL_ADDR
)
3027 ret
= func
->ffs
->eps_addrmap
[ret
];
3034 spin_lock_irqsave(&ffs
->ev
.waitq
.lock
, flags
);
3035 ffs
->ev
.setup
= *creq
;
3036 ffs
->ev
.setup
.wIndex
= cpu_to_le16(ret
);
3037 __ffs_event_add(ffs
, FUNCTIONFS_SETUP
);
3038 spin_unlock_irqrestore(&ffs
->ev
.waitq
.lock
, flags
);
3040 return creq
->wLength
== 0 ? USB_GADGET_DELAYED_STATUS
: 0;
3043 static void ffs_func_suspend(struct usb_function
*f
)
3046 ffs_event_add(ffs_func_from_usb(f
)->ffs
, FUNCTIONFS_SUSPEND
);
3049 static void ffs_func_resume(struct usb_function
*f
)
3052 ffs_event_add(ffs_func_from_usb(f
)->ffs
, FUNCTIONFS_RESUME
);
3056 /* Endpoint and interface numbers reverse mapping ***************************/
3058 static int ffs_func_revmap_ep(struct ffs_function
*func
, u8 num
)
3060 num
= func
->eps_revmap
[num
& USB_ENDPOINT_NUMBER_MASK
];
3061 return num
? num
: -EDOM
;
3064 static int ffs_func_revmap_intf(struct ffs_function
*func
, u8 intf
)
3066 short *nums
= func
->interfaces_nums
;
3067 unsigned count
= func
->ffs
->interfaces_count
;
3069 for (; count
; --count
, ++nums
) {
3070 if (*nums
>= 0 && *nums
== intf
)
3071 return nums
- func
->interfaces_nums
;
3078 /* Devices management *******************************************************/
3080 static LIST_HEAD(ffs_devices
);
3082 static struct ffs_dev
*_ffs_do_find_dev(const char *name
)
3084 struct ffs_dev
*dev
;
3086 list_for_each_entry(dev
, &ffs_devices
, entry
) {
3087 if (!dev
->name
|| !name
)
3089 if (strcmp(dev
->name
, name
) == 0)
3097 * ffs_lock must be taken by the caller of this function
3099 static struct ffs_dev
*_ffs_get_single_dev(void)
3101 struct ffs_dev
*dev
;
3103 if (list_is_singular(&ffs_devices
)) {
3104 dev
= list_first_entry(&ffs_devices
, struct ffs_dev
, entry
);
3113 * ffs_lock must be taken by the caller of this function
3115 static struct ffs_dev
*_ffs_find_dev(const char *name
)
3117 struct ffs_dev
*dev
;
3119 dev
= _ffs_get_single_dev();
3123 return _ffs_do_find_dev(name
);
3126 /* Configfs support *********************************************************/
3128 static inline struct f_fs_opts
*to_ffs_opts(struct config_item
*item
)
3130 return container_of(to_config_group(item
), struct f_fs_opts
,
3134 static void ffs_attr_release(struct config_item
*item
)
3136 struct f_fs_opts
*opts
= to_ffs_opts(item
);
3138 usb_put_function_instance(&opts
->func_inst
);
3141 static struct configfs_item_operations ffs_item_ops
= {
3142 .release
= ffs_attr_release
,
3145 static struct config_item_type ffs_func_type
= {
3146 .ct_item_ops
= &ffs_item_ops
,
3147 .ct_owner
= THIS_MODULE
,
3151 /* Function registration interface ******************************************/
3153 static void ffs_free_inst(struct usb_function_instance
*f
)
3155 struct f_fs_opts
*opts
;
3157 opts
= to_f_fs_opts(f
);
3159 _ffs_free_dev(opts
->dev
);
3164 #define MAX_INST_NAME_LEN 40
3166 static int ffs_set_inst_name(struct usb_function_instance
*fi
, const char *name
)
3168 struct f_fs_opts
*opts
;
3173 name_len
= strlen(name
) + 1;
3174 if (name_len
> MAX_INST_NAME_LEN
)
3175 return -ENAMETOOLONG
;
3177 ptr
= kstrndup(name
, name_len
, GFP_KERNEL
);
3181 opts
= to_f_fs_opts(fi
);
3186 tmp
= opts
->dev
->name_allocated
? opts
->dev
->name
: NULL
;
3187 ret
= _ffs_name_dev(opts
->dev
, ptr
);
3193 opts
->dev
->name_allocated
= true;
3202 static struct usb_function_instance
*ffs_alloc_inst(void)
3204 struct f_fs_opts
*opts
;
3205 struct ffs_dev
*dev
;
3207 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
3209 return ERR_PTR(-ENOMEM
);
3211 opts
->func_inst
.set_inst_name
= ffs_set_inst_name
;
3212 opts
->func_inst
.free_func_inst
= ffs_free_inst
;
3214 dev
= _ffs_alloc_dev();
3218 return ERR_CAST(dev
);
3223 config_group_init_type_name(&opts
->func_inst
.group
, "",
3225 return &opts
->func_inst
;
3228 static void ffs_free(struct usb_function
*f
)
3230 kfree(ffs_func_from_usb(f
));
3233 static void ffs_func_unbind(struct usb_configuration
*c
,
3234 struct usb_function
*f
)
3236 struct ffs_function
*func
= ffs_func_from_usb(f
);
3237 struct ffs_data
*ffs
= func
->ffs
;
3238 struct f_fs_opts
*opts
=
3239 container_of(f
->fi
, struct f_fs_opts
, func_inst
);
3240 struct ffs_ep
*ep
= func
->eps
;
3241 unsigned count
= ffs
->eps_count
;
3242 unsigned long flags
;
3245 if (ffs
->func
== func
) {
3246 ffs_func_eps_disable(func
);
3250 if (!--opts
->refcnt
)
3251 functionfs_unbind(ffs
);
3253 /* cleanup after autoconfig */
3254 spin_lock_irqsave(&func
->ffs
->eps_lock
, flags
);
3256 if (ep
->ep
&& ep
->req
)
3257 usb_ep_free_request(ep
->ep
, ep
->req
);
3261 spin_unlock_irqrestore(&func
->ffs
->eps_lock
, flags
);
3265 * eps, descriptors and interfaces_nums are allocated in the
3266 * same chunk so only one free is required.
3268 func
->function
.fs_descriptors
= NULL
;
3269 func
->function
.hs_descriptors
= NULL
;
3270 func
->function
.ss_descriptors
= NULL
;
3271 func
->interfaces_nums
= NULL
;
3273 ffs_event_add(ffs
, FUNCTIONFS_UNBIND
);
3276 static struct usb_function
*ffs_alloc(struct usb_function_instance
*fi
)
3278 struct ffs_function
*func
;
3282 func
= kzalloc(sizeof(*func
), GFP_KERNEL
);
3283 if (unlikely(!func
))
3284 return ERR_PTR(-ENOMEM
);
3286 func
->function
.name
= "Function FS Gadget";
3288 func
->function
.bind
= ffs_func_bind
;
3289 func
->function
.unbind
= ffs_func_unbind
;
3290 func
->function
.set_alt
= ffs_func_set_alt
;
3291 func
->function
.disable
= ffs_func_disable
;
3292 func
->function
.setup
= ffs_func_setup
;
3293 func
->function
.suspend
= ffs_func_suspend
;
3294 func
->function
.resume
= ffs_func_resume
;
3295 func
->function
.free_func
= ffs_free
;
3297 return &func
->function
;
3301 * ffs_lock must be taken by the caller of this function
3303 static struct ffs_dev
*_ffs_alloc_dev(void)
3305 struct ffs_dev
*dev
;
3308 if (_ffs_get_single_dev())
3309 return ERR_PTR(-EBUSY
);
3311 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
3313 return ERR_PTR(-ENOMEM
);
3315 if (list_empty(&ffs_devices
)) {
3316 ret
= functionfs_init();
3319 return ERR_PTR(ret
);
3323 list_add(&dev
->entry
, &ffs_devices
);
3329 * ffs_lock must be taken by the caller of this function
3330 * The caller is responsible for "name" being available whenever f_fs needs it
3332 static int _ffs_name_dev(struct ffs_dev
*dev
, const char *name
)
3334 struct ffs_dev
*existing
;
3336 existing
= _ffs_do_find_dev(name
);
3346 * The caller is responsible for "name" being available whenever f_fs needs it
3348 int ffs_name_dev(struct ffs_dev
*dev
, const char *name
)
3353 ret
= _ffs_name_dev(dev
, name
);
3358 EXPORT_SYMBOL_GPL(ffs_name_dev
);
3360 int ffs_single_dev(struct ffs_dev
*dev
)
3367 if (!list_is_singular(&ffs_devices
))
3375 EXPORT_SYMBOL_GPL(ffs_single_dev
);
3378 * ffs_lock must be taken by the caller of this function
3380 static void _ffs_free_dev(struct ffs_dev
*dev
)
3382 list_del(&dev
->entry
);
3383 if (dev
->name_allocated
)
3386 if (list_empty(&ffs_devices
))
3387 functionfs_cleanup();
3390 static void *ffs_acquire_dev(const char *dev_name
)
3392 struct ffs_dev
*ffs_dev
;
3397 ffs_dev
= _ffs_find_dev(dev_name
);
3399 ffs_dev
= ERR_PTR(-ENOENT
);
3400 else if (ffs_dev
->mounted
)
3401 ffs_dev
= ERR_PTR(-EBUSY
);
3402 else if (ffs_dev
->ffs_acquire_dev_callback
&&
3403 ffs_dev
->ffs_acquire_dev_callback(ffs_dev
))
3404 ffs_dev
= ERR_PTR(-ENOENT
);
3406 ffs_dev
->mounted
= true;
3412 static void ffs_release_dev(struct ffs_data
*ffs_data
)
3414 struct ffs_dev
*ffs_dev
;
3419 ffs_dev
= ffs_data
->private_data
;
3421 ffs_dev
->mounted
= false;
3423 if (ffs_dev
->ffs_release_dev_callback
)
3424 ffs_dev
->ffs_release_dev_callback(ffs_dev
);
3430 static int ffs_ready(struct ffs_data
*ffs
)
3432 struct ffs_dev
*ffs_obj
;
3438 ffs_obj
= ffs
->private_data
;
3443 if (WARN_ON(ffs_obj
->desc_ready
)) {
3448 ffs_obj
->desc_ready
= true;
3449 ffs_obj
->ffs_data
= ffs
;
3451 if (ffs_obj
->ffs_ready_callback
) {
3452 ret
= ffs_obj
->ffs_ready_callback(ffs
);
3457 set_bit(FFS_FL_CALL_CLOSED_CALLBACK
, &ffs
->flags
);
3463 static void ffs_closed(struct ffs_data
*ffs
)
3465 struct ffs_dev
*ffs_obj
;
3466 struct f_fs_opts
*opts
;
3467 struct config_item
*ci
;
3472 ffs_obj
= ffs
->private_data
;
3476 ffs_obj
->desc_ready
= false;
3478 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK
, &ffs
->flags
) &&
3479 ffs_obj
->ffs_closed_callback
)
3480 ffs_obj
->ffs_closed_callback(ffs
);
3483 opts
= ffs_obj
->opts
;
3487 if (opts
->no_configfs
|| !opts
->func_inst
.group
.cg_item
.ci_parent
3488 || !atomic_read(&opts
->func_inst
.group
.cg_item
.ci_kref
.refcount
))
3491 ci
= opts
->func_inst
.group
.cg_item
.ci_parent
->ci_parent
;
3494 if (test_bit(FFS_FL_BOUND
, &ffs
->flags
))
3495 unregister_gadget_item(ci
);
3501 /* Misc helper functions ****************************************************/
3503 static int ffs_mutex_lock(struct mutex
*mutex
, unsigned nonblock
)
3506 ? likely(mutex_trylock(mutex
)) ? 0 : -EAGAIN
3507 : mutex_lock_interruptible(mutex
);
3510 static char *ffs_prepare_buffer(const char __user
*buf
, size_t len
)
3517 data
= kmalloc(len
, GFP_KERNEL
);
3518 if (unlikely(!data
))
3519 return ERR_PTR(-ENOMEM
);
3521 if (unlikely(copy_from_user(data
, buf
, len
))) {
3523 return ERR_PTR(-EFAULT
);
3526 pr_vdebug("Buffer from user space:\n");
3527 ffs_dump_mem("", data
, len
);
3532 DECLARE_USB_FUNCTION_INIT(ffs
, ffs_alloc_inst
, ffs_alloc
);
3533 MODULE_LICENSE("GPL");
3534 MODULE_AUTHOR("Michal Nazarewicz");