1 // SPDX-License-Identifier: GPL-2.0+
3 * f_fs.c -- user mode file system API for USB composite function controllers
5 * Copyright (C) 2010 Samsung Electronics
6 * Author: Michal Nazarewicz <mina86@mina86.com>
8 * Based on inode.c (GadgetFS) which was:
9 * Copyright (C) 2003-2004 David Brownell
10 * Copyright (C) 2003 Agilent Technologies
15 /* #define VERBOSE_DEBUG */
17 #include <linux/blkdev.h>
18 #include <linux/pagemap.h>
19 #include <linux/export.h>
20 #include <linux/hid.h>
21 #include <linux/module.h>
22 #include <linux/sched/signal.h>
23 #include <linux/uio.h>
24 #include <asm/unaligned.h>
26 #include <linux/usb/composite.h>
27 #include <linux/usb/functionfs.h>
29 #include <linux/aio.h>
30 #include <linux/mmu_context.h>
31 #include <linux/poll.h>
32 #include <linux/eventfd.h>
36 #include "u_os_desc.h"
39 #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
41 /* Reference counter handling */
42 static void ffs_data_get(struct ffs_data
*ffs
);
43 static void ffs_data_put(struct ffs_data
*ffs
);
44 /* Creates new ffs_data object. */
45 static struct ffs_data
*__must_check
ffs_data_new(const char *dev_name
)
46 __attribute__((malloc
));
48 /* Opened counter handling. */
49 static void ffs_data_opened(struct ffs_data
*ffs
);
50 static void ffs_data_closed(struct ffs_data
*ffs
);
52 /* Called with ffs->mutex held; take over ownership of data. */
53 static int __must_check
54 __ffs_data_got_descs(struct ffs_data
*ffs
, char *data
, size_t len
);
55 static int __must_check
56 __ffs_data_got_strings(struct ffs_data
*ffs
, char *data
, size_t len
);
59 /* The function structure ***************************************************/
64 struct usb_configuration
*conf
;
65 struct usb_gadget
*gadget
;
70 short *interfaces_nums
;
72 struct usb_function function
;
76 static struct ffs_function
*ffs_func_from_usb(struct usb_function
*f
)
78 return container_of(f
, struct ffs_function
, function
);
82 static inline enum ffs_setup_state
83 ffs_setup_state_clear_cancelled(struct ffs_data
*ffs
)
85 return (enum ffs_setup_state
)
86 cmpxchg(&ffs
->setup_state
, FFS_SETUP_CANCELLED
, FFS_NO_SETUP
);
90 static void ffs_func_eps_disable(struct ffs_function
*func
);
91 static int __must_check
ffs_func_eps_enable(struct ffs_function
*func
);
93 static int ffs_func_bind(struct usb_configuration
*,
94 struct usb_function
*);
95 static int ffs_func_set_alt(struct usb_function
*, unsigned, unsigned);
96 static void ffs_func_disable(struct usb_function
*);
97 static int ffs_func_setup(struct usb_function
*,
98 const struct usb_ctrlrequest
*);
99 static bool ffs_func_req_match(struct usb_function
*,
100 const struct usb_ctrlrequest
*,
102 static void ffs_func_suspend(struct usb_function
*);
103 static void ffs_func_resume(struct usb_function
*);
106 static int ffs_func_revmap_ep(struct ffs_function
*func
, u8 num
);
107 static int ffs_func_revmap_intf(struct ffs_function
*func
, u8 intf
);
110 /* The endpoints structures *************************************************/
113 struct usb_ep
*ep
; /* P: ffs->eps_lock */
114 struct usb_request
*req
; /* P: epfile->mutex */
116 /* [0]: full speed, [1]: high speed, [2]: super speed */
117 struct usb_endpoint_descriptor
*descs
[3];
121 int status
; /* P: epfile->mutex */
125 /* Protects ep->ep and ep->req. */
128 struct ffs_data
*ffs
;
129 struct ffs_ep
*ep
; /* P: ffs->eps_lock */
131 struct dentry
*dentry
;
134 * Buffer for holding data from partial reads which may happen since
135 * we’re rounding user read requests to a multiple of a max packet size.
137 * The pointer is initialised with NULL value and may be set by
138 * __ffs_epfile_read_data function to point to a temporary buffer.
140 * In normal operation, calls to __ffs_epfile_read_buffered will consume
141 * data from said buffer and eventually free it. Importantly, while the
142 * function is using the buffer, it sets the pointer to NULL. This is
143 * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
144 * can never run concurrently (they are synchronised by epfile->mutex)
145 * so the latter will not assign a new value to the pointer.
147 * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
148 * valid) and sets the pointer to READ_BUFFER_DROP value. This special
149 * value is crux of the synchronisation between ffs_func_eps_disable and
150 * __ffs_epfile_read_data.
152 * Once __ffs_epfile_read_data is about to finish it will try to set the
153 * pointer back to its old value (as described above), but seeing as the
154 * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
157 * == State transitions ==
159 * • ptr == NULL: (initial state)
160 * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
161 * ◦ __ffs_epfile_read_buffered: nop
162 * ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
163 * ◦ reading finishes: n/a, not in ‘and reading’ state
165 * ◦ __ffs_epfile_read_buffer_free: nop
166 * ◦ __ffs_epfile_read_buffered: go to ptr == NULL
167 * ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
168 * ◦ reading finishes: n/a, not in ‘and reading’ state
170 * ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
171 * ◦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
172 * ◦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
173 * is always called first
174 * ◦ reading finishes: n/a, not in ‘and reading’ state
175 * • ptr == NULL and reading:
176 * ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
177 * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
178 * ◦ __ffs_epfile_read_data: n/a, mutex is held
179 * ◦ reading finishes and …
180 * … all data read: free buf, go to ptr == NULL
181 * … otherwise: go to ptr == buf and reading
182 * • ptr == DROP and reading:
183 * ◦ __ffs_epfile_read_buffer_free: nop
184 * ◦ __ffs_epfile_read_buffered: n/a, mutex is held
185 * ◦ __ffs_epfile_read_data: n/a, mutex is held
186 * ◦ reading finishes: free buf, go to ptr == DROP
188 struct ffs_buffer
*read_buffer
;
189 #define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
193 unsigned char in
; /* P: ffs->eps_lock */
194 unsigned char isoc
; /* P: ffs->eps_lock */
205 /* ffs_io_data structure ***************************************************/
212 struct iov_iter data
;
216 struct mm_struct
*mm
;
217 struct work_struct work
;
220 struct usb_request
*req
;
222 struct ffs_data
*ffs
;
225 struct ffs_desc_helper
{
226 struct ffs_data
*ffs
;
227 unsigned interfaces_count
;
231 static int __must_check
ffs_epfiles_create(struct ffs_data
*ffs
);
232 static void ffs_epfiles_destroy(struct ffs_epfile
*epfiles
, unsigned count
);
234 static struct dentry
*
235 ffs_sb_create_file(struct super_block
*sb
, const char *name
, void *data
,
236 const struct file_operations
*fops
);
238 /* Devices management *******************************************************/
240 DEFINE_MUTEX(ffs_lock
);
241 EXPORT_SYMBOL_GPL(ffs_lock
);
243 static struct ffs_dev
*_ffs_find_dev(const char *name
);
244 static struct ffs_dev
*_ffs_alloc_dev(void);
245 static void _ffs_free_dev(struct ffs_dev
*dev
);
246 static void *ffs_acquire_dev(const char *dev_name
);
247 static void ffs_release_dev(struct ffs_data
*ffs_data
);
248 static int ffs_ready(struct ffs_data
*ffs
);
249 static void ffs_closed(struct ffs_data
*ffs
);
251 /* Misc helper functions ****************************************************/
253 static int ffs_mutex_lock(struct mutex
*mutex
, unsigned nonblock
)
254 __attribute__((warn_unused_result
, nonnull
));
255 static char *ffs_prepare_buffer(const char __user
*buf
, size_t len
)
256 __attribute__((warn_unused_result
, nonnull
));
259 /* Control file aka ep0 *****************************************************/
261 static void ffs_ep0_complete(struct usb_ep
*ep
, struct usb_request
*req
)
263 struct ffs_data
*ffs
= req
->context
;
265 complete(&ffs
->ep0req_completion
);
268 static int __ffs_ep0_queue_wait(struct ffs_data
*ffs
, char *data
, size_t len
)
269 __releases(&ffs
->ev
.waitq
.lock
)
271 struct usb_request
*req
= ffs
->ep0req
;
274 req
->zero
= len
< le16_to_cpu(ffs
->ev
.setup
.wLength
);
276 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
282 * UDC layer requires to provide a buffer even for ZLP, but should
283 * not use it at all. Let's provide some poisoned pointer to catch
284 * possible bug in the driver.
286 if (req
->buf
== NULL
)
287 req
->buf
= (void *)0xDEADBABE;
289 reinit_completion(&ffs
->ep0req_completion
);
291 ret
= usb_ep_queue(ffs
->gadget
->ep0
, req
, GFP_ATOMIC
);
292 if (unlikely(ret
< 0))
295 ret
= wait_for_completion_interruptible(&ffs
->ep0req_completion
);
297 usb_ep_dequeue(ffs
->gadget
->ep0
, req
);
301 ffs
->setup_state
= FFS_NO_SETUP
;
302 return req
->status
? req
->status
: req
->actual
;
305 static int __ffs_ep0_stall(struct ffs_data
*ffs
)
307 if (ffs
->ev
.can_stall
) {
308 pr_vdebug("ep0 stall\n");
309 usb_ep_set_halt(ffs
->gadget
->ep0
);
310 ffs
->setup_state
= FFS_NO_SETUP
;
313 pr_debug("bogus ep0 stall!\n");
318 static ssize_t
ffs_ep0_write(struct file
*file
, const char __user
*buf
,
319 size_t len
, loff_t
*ptr
)
321 struct ffs_data
*ffs
= file
->private_data
;
327 /* Fast check if setup was canceled */
328 if (ffs_setup_state_clear_cancelled(ffs
) == FFS_SETUP_CANCELLED
)
332 ret
= ffs_mutex_lock(&ffs
->mutex
, file
->f_flags
& O_NONBLOCK
);
333 if (unlikely(ret
< 0))
337 switch (ffs
->state
) {
338 case FFS_READ_DESCRIPTORS
:
339 case FFS_READ_STRINGS
:
341 if (unlikely(len
< 16)) {
346 data
= ffs_prepare_buffer(buf
, len
);
353 if (ffs
->state
== FFS_READ_DESCRIPTORS
) {
354 pr_info("read descriptors\n");
355 ret
= __ffs_data_got_descs(ffs
, data
, len
);
356 if (unlikely(ret
< 0))
359 ffs
->state
= FFS_READ_STRINGS
;
362 pr_info("read strings\n");
363 ret
= __ffs_data_got_strings(ffs
, data
, len
);
364 if (unlikely(ret
< 0))
367 ret
= ffs_epfiles_create(ffs
);
369 ffs
->state
= FFS_CLOSING
;
373 ffs
->state
= FFS_ACTIVE
;
374 mutex_unlock(&ffs
->mutex
);
376 ret
= ffs_ready(ffs
);
377 if (unlikely(ret
< 0)) {
378 ffs
->state
= FFS_CLOSING
;
389 * We're called from user space, we can use _irq
390 * rather then _irqsave
392 spin_lock_irq(&ffs
->ev
.waitq
.lock
);
393 switch (ffs_setup_state_clear_cancelled(ffs
)) {
394 case FFS_SETUP_CANCELLED
:
402 case FFS_SETUP_PENDING
:
406 /* FFS_SETUP_PENDING */
407 if (!(ffs
->ev
.setup
.bRequestType
& USB_DIR_IN
)) {
408 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
409 ret
= __ffs_ep0_stall(ffs
);
413 /* FFS_SETUP_PENDING and not stall */
414 len
= min(len
, (size_t)le16_to_cpu(ffs
->ev
.setup
.wLength
));
416 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
418 data
= ffs_prepare_buffer(buf
, len
);
424 spin_lock_irq(&ffs
->ev
.waitq
.lock
);
427 * We are guaranteed to be still in FFS_ACTIVE state
428 * but the state of setup could have changed from
429 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
430 * to check for that. If that happened we copied data
431 * from user space in vain but it's unlikely.
433 * For sure we are not in FFS_NO_SETUP since this is
434 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
435 * transition can be performed and it's protected by
438 if (ffs_setup_state_clear_cancelled(ffs
) ==
439 FFS_SETUP_CANCELLED
) {
442 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
444 /* unlocks spinlock */
445 ret
= __ffs_ep0_queue_wait(ffs
, data
, len
);
455 mutex_unlock(&ffs
->mutex
);
459 /* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
460 static ssize_t
__ffs_ep0_read_events(struct ffs_data
*ffs
, char __user
*buf
,
462 __releases(&ffs
->ev
.waitq
.lock
)
465 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
466 * size of ffs->ev.types array (which is four) so that's how much space
469 struct usb_functionfs_event events
[ARRAY_SIZE(ffs
->ev
.types
)];
470 const size_t size
= n
* sizeof *events
;
473 memset(events
, 0, size
);
476 events
[i
].type
= ffs
->ev
.types
[i
];
477 if (events
[i
].type
== FUNCTIONFS_SETUP
) {
478 events
[i
].u
.setup
= ffs
->ev
.setup
;
479 ffs
->setup_state
= FFS_SETUP_PENDING
;
485 memmove(ffs
->ev
.types
, ffs
->ev
.types
+ n
,
486 ffs
->ev
.count
* sizeof *ffs
->ev
.types
);
488 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
489 mutex_unlock(&ffs
->mutex
);
491 return unlikely(copy_to_user(buf
, events
, size
)) ? -EFAULT
: size
;
494 static ssize_t
ffs_ep0_read(struct file
*file
, char __user
*buf
,
495 size_t len
, loff_t
*ptr
)
497 struct ffs_data
*ffs
= file
->private_data
;
504 /* Fast check if setup was canceled */
505 if (ffs_setup_state_clear_cancelled(ffs
) == FFS_SETUP_CANCELLED
)
509 ret
= ffs_mutex_lock(&ffs
->mutex
, file
->f_flags
& O_NONBLOCK
);
510 if (unlikely(ret
< 0))
514 if (ffs
->state
!= FFS_ACTIVE
) {
520 * We're called from user space, we can use _irq rather then
523 spin_lock_irq(&ffs
->ev
.waitq
.lock
);
525 switch (ffs_setup_state_clear_cancelled(ffs
)) {
526 case FFS_SETUP_CANCELLED
:
531 n
= len
/ sizeof(struct usb_functionfs_event
);
537 if ((file
->f_flags
& O_NONBLOCK
) && !ffs
->ev
.count
) {
542 if (wait_event_interruptible_exclusive_locked_irq(ffs
->ev
.waitq
,
548 /* unlocks spinlock */
549 return __ffs_ep0_read_events(ffs
, buf
,
550 min(n
, (size_t)ffs
->ev
.count
));
552 case FFS_SETUP_PENDING
:
553 if (ffs
->ev
.setup
.bRequestType
& USB_DIR_IN
) {
554 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
555 ret
= __ffs_ep0_stall(ffs
);
559 len
= min(len
, (size_t)le16_to_cpu(ffs
->ev
.setup
.wLength
));
561 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
564 data
= kmalloc(len
, GFP_KERNEL
);
565 if (unlikely(!data
)) {
571 spin_lock_irq(&ffs
->ev
.waitq
.lock
);
573 /* See ffs_ep0_write() */
574 if (ffs_setup_state_clear_cancelled(ffs
) ==
575 FFS_SETUP_CANCELLED
) {
580 /* unlocks spinlock */
581 ret
= __ffs_ep0_queue_wait(ffs
, data
, len
);
582 if (likely(ret
> 0) && unlikely(copy_to_user(buf
, data
, len
)))
591 spin_unlock_irq(&ffs
->ev
.waitq
.lock
);
593 mutex_unlock(&ffs
->mutex
);
598 static int ffs_ep0_open(struct inode
*inode
, struct file
*file
)
600 struct ffs_data
*ffs
= inode
->i_private
;
604 if (unlikely(ffs
->state
== FFS_CLOSING
))
607 file
->private_data
= ffs
;
608 ffs_data_opened(ffs
);
613 static int ffs_ep0_release(struct inode
*inode
, struct file
*file
)
615 struct ffs_data
*ffs
= file
->private_data
;
619 ffs_data_closed(ffs
);
624 static long ffs_ep0_ioctl(struct file
*file
, unsigned code
, unsigned long value
)
626 struct ffs_data
*ffs
= file
->private_data
;
627 struct usb_gadget
*gadget
= ffs
->gadget
;
632 if (code
== FUNCTIONFS_INTERFACE_REVMAP
) {
633 struct ffs_function
*func
= ffs
->func
;
634 ret
= func
? ffs_func_revmap_intf(func
, value
) : -ENODEV
;
635 } else if (gadget
&& gadget
->ops
->ioctl
) {
636 ret
= gadget
->ops
->ioctl(gadget
, code
, value
);
644 static __poll_t
ffs_ep0_poll(struct file
*file
, poll_table
*wait
)
646 struct ffs_data
*ffs
= file
->private_data
;
647 __poll_t mask
= EPOLLWRNORM
;
650 poll_wait(file
, &ffs
->ev
.waitq
, wait
);
652 ret
= ffs_mutex_lock(&ffs
->mutex
, file
->f_flags
& O_NONBLOCK
);
653 if (unlikely(ret
< 0))
656 switch (ffs
->state
) {
657 case FFS_READ_DESCRIPTORS
:
658 case FFS_READ_STRINGS
:
663 switch (ffs
->setup_state
) {
669 case FFS_SETUP_PENDING
:
670 case FFS_SETUP_CANCELLED
:
671 mask
|= (EPOLLIN
| EPOLLOUT
);
676 case FFS_DEACTIVATED
:
680 mutex_unlock(&ffs
->mutex
);
685 static const struct file_operations ffs_ep0_operations
= {
688 .open
= ffs_ep0_open
,
689 .write
= ffs_ep0_write
,
690 .read
= ffs_ep0_read
,
691 .release
= ffs_ep0_release
,
692 .unlocked_ioctl
= ffs_ep0_ioctl
,
693 .poll
= ffs_ep0_poll
,
697 /* "Normal" endpoints operations ********************************************/
699 static void ffs_epfile_io_complete(struct usb_ep
*_ep
, struct usb_request
*req
)
702 if (likely(req
->context
)) {
703 struct ffs_ep
*ep
= _ep
->driver_data
;
704 ep
->status
= req
->status
? req
->status
: req
->actual
;
705 complete(req
->context
);
709 static ssize_t
ffs_copy_to_iter(void *data
, int data_len
, struct iov_iter
*iter
)
711 ssize_t ret
= copy_to_iter(data
, data_len
, iter
);
712 if (likely(ret
== data_len
))
715 if (unlikely(iov_iter_count(iter
)))
719 * Dear user space developer!
721 * TL;DR: To stop getting below error message in your kernel log, change
722 * user space code using functionfs to align read buffers to a max
725 * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
726 * packet size. When unaligned buffer is passed to functionfs, it
727 * internally uses a larger, aligned buffer so that such UDCs are happy.
729 * Unfortunately, this means that host may send more data than was
730 * requested in read(2) system call. f_fs doesn’t know what to do with
731 * that excess data so it simply drops it.
733 * Was the buffer aligned in the first place, no such problem would
736 * Data may be dropped only in AIO reads. Synchronous reads are handled
737 * by splitting a request into multiple parts. This splitting may still
738 * be a problem though so it’s likely best to align the buffer
739 * regardless of it being AIO or not..
741 * This only affects OUT endpoints, i.e. reading data with a read(2),
742 * aio_read(2) etc. system calls. Writing data to an IN endpoint is not
745 pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
746 "Align read buffer size to max packet size to avoid the problem.\n",
752 static void ffs_user_copy_worker(struct work_struct
*work
)
754 struct ffs_io_data
*io_data
= container_of(work
, struct ffs_io_data
,
756 int ret
= io_data
->req
->status
? io_data
->req
->status
:
757 io_data
->req
->actual
;
758 bool kiocb_has_eventfd
= io_data
->kiocb
->ki_flags
& IOCB_EVENTFD
;
760 if (io_data
->read
&& ret
> 0) {
762 ret
= ffs_copy_to_iter(io_data
->buf
, ret
, &io_data
->data
);
763 unuse_mm(io_data
->mm
);
766 io_data
->kiocb
->ki_complete(io_data
->kiocb
, ret
, ret
);
768 if (io_data
->ffs
->ffs_eventfd
&& !kiocb_has_eventfd
)
769 eventfd_signal(io_data
->ffs
->ffs_eventfd
, 1);
771 usb_ep_free_request(io_data
->ep
, io_data
->req
);
774 kfree(io_data
->to_free
);
779 static void ffs_epfile_async_io_complete(struct usb_ep
*_ep
,
780 struct usb_request
*req
)
782 struct ffs_io_data
*io_data
= req
->context
;
783 struct ffs_data
*ffs
= io_data
->ffs
;
787 INIT_WORK(&io_data
->work
, ffs_user_copy_worker
);
788 queue_work(ffs
->io_completion_wq
, &io_data
->work
);
791 static void __ffs_epfile_read_buffer_free(struct ffs_epfile
*epfile
)
794 * See comment in struct ffs_epfile for full read_buffer pointer
795 * synchronisation story.
797 struct ffs_buffer
*buf
= xchg(&epfile
->read_buffer
, READ_BUFFER_DROP
);
798 if (buf
&& buf
!= READ_BUFFER_DROP
)
802 /* Assumes epfile->mutex is held. */
803 static ssize_t
__ffs_epfile_read_buffered(struct ffs_epfile
*epfile
,
804 struct iov_iter
*iter
)
807 * Null out epfile->read_buffer so ffs_func_eps_disable does not free
808 * the buffer while we are using it. See comment in struct ffs_epfile
809 * for full read_buffer pointer synchronisation story.
811 struct ffs_buffer
*buf
= xchg(&epfile
->read_buffer
, NULL
);
813 if (!buf
|| buf
== READ_BUFFER_DROP
)
816 ret
= copy_to_iter(buf
->data
, buf
->length
, iter
);
817 if (buf
->length
== ret
) {
822 if (unlikely(iov_iter_count(iter
))) {
829 if (cmpxchg(&epfile
->read_buffer
, NULL
, buf
))
835 /* Assumes epfile->mutex is held. */
836 static ssize_t
__ffs_epfile_read_data(struct ffs_epfile
*epfile
,
837 void *data
, int data_len
,
838 struct iov_iter
*iter
)
840 struct ffs_buffer
*buf
;
842 ssize_t ret
= copy_to_iter(data
, data_len
, iter
);
843 if (likely(data_len
== ret
))
846 if (unlikely(iov_iter_count(iter
)))
849 /* See ffs_copy_to_iter for more context. */
850 pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
854 buf
= kmalloc(sizeof(*buf
) + data_len
, GFP_KERNEL
);
857 buf
->length
= data_len
;
858 buf
->data
= buf
->storage
;
859 memcpy(buf
->storage
, data
+ ret
, data_len
);
862 * At this point read_buffer is NULL or READ_BUFFER_DROP (if
863 * ffs_func_eps_disable has been called in the meanwhile). See comment
864 * in struct ffs_epfile for full read_buffer pointer synchronisation
867 if (unlikely(cmpxchg(&epfile
->read_buffer
, NULL
, buf
)))
873 static ssize_t
ffs_epfile_io(struct file
*file
, struct ffs_io_data
*io_data
)
875 struct ffs_epfile
*epfile
= file
->private_data
;
876 struct usb_request
*req
;
879 ssize_t ret
, data_len
= -EINVAL
;
882 /* Are we still active? */
883 if (WARN_ON(epfile
->ffs
->state
!= FFS_ACTIVE
))
886 /* Wait for endpoint to be enabled */
889 if (file
->f_flags
& O_NONBLOCK
)
892 ret
= wait_event_interruptible(
893 epfile
->ffs
->wait
, (ep
= epfile
->ep
));
899 halt
= (!io_data
->read
== !epfile
->in
);
900 if (halt
&& epfile
->isoc
)
903 /* We will be using request and read_buffer */
904 ret
= ffs_mutex_lock(&epfile
->mutex
, file
->f_flags
& O_NONBLOCK
);
908 /* Allocate & copy */
910 struct usb_gadget
*gadget
;
913 * Do we have buffered data from previous partial read? Check
914 * that for synchronous case only because we do not have
915 * facility to ‘wake up’ a pending asynchronous read and push
916 * buffered data to it which we would need to make things behave
919 if (!io_data
->aio
&& io_data
->read
) {
920 ret
= __ffs_epfile_read_buffered(epfile
, &io_data
->data
);
926 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
927 * before the waiting completes, so do not assign to 'gadget'
930 gadget
= epfile
->ffs
->gadget
;
932 spin_lock_irq(&epfile
->ffs
->eps_lock
);
933 /* In the meantime, endpoint got disabled or changed. */
934 if (epfile
->ep
!= ep
) {
938 data_len
= iov_iter_count(&io_data
->data
);
940 * Controller may require buffer size to be aligned to
941 * maxpacketsize of an out endpoint.
944 data_len
= usb_ep_align_maybe(gadget
, ep
->ep
, data_len
);
945 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
947 data
= kmalloc(data_len
, GFP_KERNEL
);
948 if (unlikely(!data
)) {
952 if (!io_data
->read
&&
953 !copy_from_iter_full(data
, data_len
, &io_data
->data
)) {
959 spin_lock_irq(&epfile
->ffs
->eps_lock
);
961 if (epfile
->ep
!= ep
) {
962 /* In the meantime, endpoint got disabled or changed. */
965 ret
= usb_ep_set_halt(ep
->ep
);
968 } else if (unlikely(data_len
== -EINVAL
)) {
970 * Sanity Check: even though data_len can't be used
971 * uninitialized at the time I write this comment, some
972 * compilers complain about this situation.
973 * In order to keep the code clean from warnings, data_len is
974 * being initialized to -EINVAL during its declaration, which
975 * means we can't rely on compiler anymore to warn no future
976 * changes won't result in data_len being used uninitialized.
977 * For such reason, we're adding this redundant sanity check
980 WARN(1, "%s: data_len == -EINVAL\n", __func__
);
982 } else if (!io_data
->aio
) {
983 DECLARE_COMPLETION_ONSTACK(done
);
984 bool interrupted
= false;
988 req
->length
= data_len
;
990 req
->context
= &done
;
991 req
->complete
= ffs_epfile_io_complete
;
993 ret
= usb_ep_queue(ep
->ep
, req
, GFP_ATOMIC
);
994 if (unlikely(ret
< 0))
997 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
999 if (unlikely(wait_for_completion_interruptible(&done
))) {
1001 * To avoid race condition with ffs_epfile_io_complete,
1002 * dequeue the request first then check
1003 * status. usb_ep_dequeue API should guarantee no race
1004 * condition with req->complete callback.
1006 usb_ep_dequeue(ep
->ep
, req
);
1007 interrupted
= ep
->status
< 0;
1012 else if (io_data
->read
&& ep
->status
> 0)
1013 ret
= __ffs_epfile_read_data(epfile
, data
, ep
->status
,
1018 } else if (!(req
= usb_ep_alloc_request(ep
->ep
, GFP_ATOMIC
))) {
1022 req
->length
= data_len
;
1024 io_data
->buf
= data
;
1025 io_data
->ep
= ep
->ep
;
1027 io_data
->ffs
= epfile
->ffs
;
1029 req
->context
= io_data
;
1030 req
->complete
= ffs_epfile_async_io_complete
;
1032 ret
= usb_ep_queue(ep
->ep
, req
, GFP_ATOMIC
);
1033 if (unlikely(ret
)) {
1034 usb_ep_free_request(ep
->ep
, req
);
1040 * Do not kfree the buffer in this function. It will be freed
1041 * by ffs_user_copy_worker.
1047 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
1049 mutex_unlock(&epfile
->mutex
);
1056 ffs_epfile_open(struct inode
*inode
, struct file
*file
)
1058 struct ffs_epfile
*epfile
= inode
->i_private
;
1062 if (WARN_ON(epfile
->ffs
->state
!= FFS_ACTIVE
))
1065 file
->private_data
= epfile
;
1066 ffs_data_opened(epfile
->ffs
);
1071 static int ffs_aio_cancel(struct kiocb
*kiocb
)
1073 struct ffs_io_data
*io_data
= kiocb
->private;
1074 struct ffs_epfile
*epfile
= kiocb
->ki_filp
->private_data
;
1079 spin_lock_irq(&epfile
->ffs
->eps_lock
);
1081 if (likely(io_data
&& io_data
->ep
&& io_data
->req
))
1082 value
= usb_ep_dequeue(io_data
->ep
, io_data
->req
);
1086 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
1091 static ssize_t
ffs_epfile_write_iter(struct kiocb
*kiocb
, struct iov_iter
*from
)
1093 struct ffs_io_data io_data
, *p
= &io_data
;
1098 if (!is_sync_kiocb(kiocb
)) {
1099 p
= kmalloc(sizeof(io_data
), GFP_KERNEL
);
1110 p
->mm
= current
->mm
;
1115 kiocb_set_cancel_fn(kiocb
, ffs_aio_cancel
);
1117 res
= ffs_epfile_io(kiocb
->ki_filp
, p
);
1118 if (res
== -EIOCBQUEUED
)
1127 static ssize_t
ffs_epfile_read_iter(struct kiocb
*kiocb
, struct iov_iter
*to
)
1129 struct ffs_io_data io_data
, *p
= &io_data
;
1134 if (!is_sync_kiocb(kiocb
)) {
1135 p
= kmalloc(sizeof(io_data
), GFP_KERNEL
);
1146 p
->to_free
= dup_iter(&p
->data
, to
, GFP_KERNEL
);
1155 p
->mm
= current
->mm
;
1160 kiocb_set_cancel_fn(kiocb
, ffs_aio_cancel
);
1162 res
= ffs_epfile_io(kiocb
->ki_filp
, p
);
1163 if (res
== -EIOCBQUEUED
)
1176 ffs_epfile_release(struct inode
*inode
, struct file
*file
)
1178 struct ffs_epfile
*epfile
= inode
->i_private
;
1182 __ffs_epfile_read_buffer_free(epfile
);
1183 ffs_data_closed(epfile
->ffs
);
1188 static long ffs_epfile_ioctl(struct file
*file
, unsigned code
,
1189 unsigned long value
)
1191 struct ffs_epfile
*epfile
= file
->private_data
;
1197 if (WARN_ON(epfile
->ffs
->state
!= FFS_ACTIVE
))
1200 /* Wait for endpoint to be enabled */
1203 if (file
->f_flags
& O_NONBLOCK
)
1206 ret
= wait_event_interruptible(
1207 epfile
->ffs
->wait
, (ep
= epfile
->ep
));
1212 spin_lock_irq(&epfile
->ffs
->eps_lock
);
1214 /* In the meantime, endpoint got disabled or changed. */
1215 if (epfile
->ep
!= ep
) {
1216 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
1221 case FUNCTIONFS_FIFO_STATUS
:
1222 ret
= usb_ep_fifo_status(epfile
->ep
->ep
);
1224 case FUNCTIONFS_FIFO_FLUSH
:
1225 usb_ep_fifo_flush(epfile
->ep
->ep
);
1228 case FUNCTIONFS_CLEAR_HALT
:
1229 ret
= usb_ep_clear_halt(epfile
->ep
->ep
);
1231 case FUNCTIONFS_ENDPOINT_REVMAP
:
1232 ret
= epfile
->ep
->num
;
1234 case FUNCTIONFS_ENDPOINT_DESC
:
1237 struct usb_endpoint_descriptor
*desc
;
1239 switch (epfile
->ffs
->gadget
->speed
) {
1240 case USB_SPEED_SUPER
:
1243 case USB_SPEED_HIGH
:
1249 desc
= epfile
->ep
->descs
[desc_idx
];
1251 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
1252 ret
= copy_to_user((void __user
*)value
, desc
, desc
->bLength
);
1260 spin_unlock_irq(&epfile
->ffs
->eps_lock
);
1265 static const struct file_operations ffs_epfile_operations
= {
1266 .llseek
= no_llseek
,
1268 .open
= ffs_epfile_open
,
1269 .write_iter
= ffs_epfile_write_iter
,
1270 .read_iter
= ffs_epfile_read_iter
,
1271 .release
= ffs_epfile_release
,
1272 .unlocked_ioctl
= ffs_epfile_ioctl
,
1276 /* File system and super block operations ***********************************/
1279 * Mounting the file system creates a controller file, used first for
1280 * function configuration then later for event monitoring.
1283 static struct inode
*__must_check
1284 ffs_sb_make_inode(struct super_block
*sb
, void *data
,
1285 const struct file_operations
*fops
,
1286 const struct inode_operations
*iops
,
1287 struct ffs_file_perms
*perms
)
1289 struct inode
*inode
;
1293 inode
= new_inode(sb
);
1295 if (likely(inode
)) {
1296 struct timespec ts
= current_time(inode
);
1298 inode
->i_ino
= get_next_ino();
1299 inode
->i_mode
= perms
->mode
;
1300 inode
->i_uid
= perms
->uid
;
1301 inode
->i_gid
= perms
->gid
;
1302 inode
->i_atime
= ts
;
1303 inode
->i_mtime
= ts
;
1304 inode
->i_ctime
= ts
;
1305 inode
->i_private
= data
;
1307 inode
->i_fop
= fops
;
1315 /* Create "regular" file */
1316 static struct dentry
*ffs_sb_create_file(struct super_block
*sb
,
1317 const char *name
, void *data
,
1318 const struct file_operations
*fops
)
1320 struct ffs_data
*ffs
= sb
->s_fs_info
;
1321 struct dentry
*dentry
;
1322 struct inode
*inode
;
1326 dentry
= d_alloc_name(sb
->s_root
, name
);
1327 if (unlikely(!dentry
))
1330 inode
= ffs_sb_make_inode(sb
, data
, fops
, NULL
, &ffs
->file_perms
);
1331 if (unlikely(!inode
)) {
1336 d_add(dentry
, inode
);
1341 static const struct super_operations ffs_sb_operations
= {
1342 .statfs
= simple_statfs
,
1343 .drop_inode
= generic_delete_inode
,
1346 struct ffs_sb_fill_data
{
1347 struct ffs_file_perms perms
;
1349 const char *dev_name
;
1351 struct ffs_data
*ffs_data
;
1354 static int ffs_sb_fill(struct super_block
*sb
, void *_data
, int silent
)
1356 struct ffs_sb_fill_data
*data
= _data
;
1357 struct inode
*inode
;
1358 struct ffs_data
*ffs
= data
->ffs_data
;
1363 data
->ffs_data
= NULL
;
1364 sb
->s_fs_info
= ffs
;
1365 sb
->s_blocksize
= PAGE_SIZE
;
1366 sb
->s_blocksize_bits
= PAGE_SHIFT
;
1367 sb
->s_magic
= FUNCTIONFS_MAGIC
;
1368 sb
->s_op
= &ffs_sb_operations
;
1369 sb
->s_time_gran
= 1;
1372 data
->perms
.mode
= data
->root_mode
;
1373 inode
= ffs_sb_make_inode(sb
, NULL
,
1374 &simple_dir_operations
,
1375 &simple_dir_inode_operations
,
1377 sb
->s_root
= d_make_root(inode
);
1378 if (unlikely(!sb
->s_root
))
1382 if (unlikely(!ffs_sb_create_file(sb
, "ep0", ffs
,
1383 &ffs_ep0_operations
)))
1389 static int ffs_fs_parse_opts(struct ffs_sb_fill_data
*data
, char *opts
)
1393 if (!opts
|| !*opts
)
1397 unsigned long value
;
1401 comma
= strchr(opts
, ',');
1406 eq
= strchr(opts
, '=');
1407 if (unlikely(!eq
)) {
1408 pr_err("'=' missing in %s\n", opts
);
1414 if (kstrtoul(eq
+ 1, 0, &value
)) {
1415 pr_err("%s: invalid value: %s\n", opts
, eq
+ 1);
1419 /* Interpret option */
1420 switch (eq
- opts
) {
1422 if (!memcmp(opts
, "no_disconnect", 13))
1423 data
->no_disconnect
= !!value
;
1428 if (!memcmp(opts
, "rmode", 5))
1429 data
->root_mode
= (value
& 0555) | S_IFDIR
;
1430 else if (!memcmp(opts
, "fmode", 5))
1431 data
->perms
.mode
= (value
& 0666) | S_IFREG
;
1437 if (!memcmp(opts
, "mode", 4)) {
1438 data
->root_mode
= (value
& 0555) | S_IFDIR
;
1439 data
->perms
.mode
= (value
& 0666) | S_IFREG
;
1446 if (!memcmp(opts
, "uid", 3)) {
1447 data
->perms
.uid
= make_kuid(current_user_ns(), value
);
1448 if (!uid_valid(data
->perms
.uid
)) {
1449 pr_err("%s: unmapped value: %lu\n", opts
, value
);
1452 } else if (!memcmp(opts
, "gid", 3)) {
1453 data
->perms
.gid
= make_kgid(current_user_ns(), value
);
1454 if (!gid_valid(data
->perms
.gid
)) {
1455 pr_err("%s: unmapped value: %lu\n", opts
, value
);
1465 pr_err("%s: invalid option\n", opts
);
1469 /* Next iteration */
1478 /* "mount -t functionfs dev_name /dev/function" ends up here */
1480 static struct dentry
*
1481 ffs_fs_mount(struct file_system_type
*t
, int flags
,
1482 const char *dev_name
, void *opts
)
1484 struct ffs_sb_fill_data data
= {
1486 .mode
= S_IFREG
| 0600,
1487 .uid
= GLOBAL_ROOT_UID
,
1488 .gid
= GLOBAL_ROOT_GID
,
1490 .root_mode
= S_IFDIR
| 0500,
1491 .no_disconnect
= false,
1496 struct ffs_data
*ffs
;
1500 ret
= ffs_fs_parse_opts(&data
, opts
);
1501 if (unlikely(ret
< 0))
1502 return ERR_PTR(ret
);
1504 ffs
= ffs_data_new(dev_name
);
1506 return ERR_PTR(-ENOMEM
);
1507 ffs
->file_perms
= data
.perms
;
1508 ffs
->no_disconnect
= data
.no_disconnect
;
1510 ffs
->dev_name
= kstrdup(dev_name
, GFP_KERNEL
);
1511 if (unlikely(!ffs
->dev_name
)) {
1513 return ERR_PTR(-ENOMEM
);
1516 ffs_dev
= ffs_acquire_dev(dev_name
);
1517 if (IS_ERR(ffs_dev
)) {
1519 return ERR_CAST(ffs_dev
);
1521 ffs
->private_data
= ffs_dev
;
1522 data
.ffs_data
= ffs
;
1524 rv
= mount_nodev(t
, flags
, &data
, ffs_sb_fill
);
1525 if (IS_ERR(rv
) && data
.ffs_data
) {
1526 ffs_release_dev(data
.ffs_data
);
1527 ffs_data_put(data
.ffs_data
);
1533 ffs_fs_kill_sb(struct super_block
*sb
)
1537 kill_litter_super(sb
);
1538 if (sb
->s_fs_info
) {
1539 ffs_release_dev(sb
->s_fs_info
);
1540 ffs_data_closed(sb
->s_fs_info
);
1541 ffs_data_put(sb
->s_fs_info
);
1545 static struct file_system_type ffs_fs_type
= {
1546 .owner
= THIS_MODULE
,
1547 .name
= "functionfs",
1548 .mount
= ffs_fs_mount
,
1549 .kill_sb
= ffs_fs_kill_sb
,
1551 MODULE_ALIAS_FS("functionfs");
1554 /* Driver's main init/cleanup functions *************************************/
1556 static int functionfs_init(void)
1562 ret
= register_filesystem(&ffs_fs_type
);
1564 pr_info("file system registered\n");
1566 pr_err("failed registering file system (%d)\n", ret
);
1571 static void functionfs_cleanup(void)
1575 pr_info("unloading\n");
1576 unregister_filesystem(&ffs_fs_type
);
1580 /* ffs_data and ffs_function construction and destruction code **************/
1582 static void ffs_data_clear(struct ffs_data
*ffs
);
1583 static void ffs_data_reset(struct ffs_data
*ffs
);
1585 static void ffs_data_get(struct ffs_data
*ffs
)
1589 refcount_inc(&ffs
->ref
);
1592 static void ffs_data_opened(struct ffs_data
*ffs
)
1596 refcount_inc(&ffs
->ref
);
1597 if (atomic_add_return(1, &ffs
->opened
) == 1 &&
1598 ffs
->state
== FFS_DEACTIVATED
) {
1599 ffs
->state
= FFS_CLOSING
;
1600 ffs_data_reset(ffs
);
1604 static void ffs_data_put(struct ffs_data
*ffs
)
1608 if (unlikely(refcount_dec_and_test(&ffs
->ref
))) {
1609 pr_info("%s(): freeing\n", __func__
);
1610 ffs_data_clear(ffs
);
1611 BUG_ON(waitqueue_active(&ffs
->ev
.waitq
) ||
1612 waitqueue_active(&ffs
->ep0req_completion
.wait
) ||
1613 waitqueue_active(&ffs
->wait
));
1614 destroy_workqueue(ffs
->io_completion_wq
);
1615 kfree(ffs
->dev_name
);
1620 static void ffs_data_closed(struct ffs_data
*ffs
)
1624 if (atomic_dec_and_test(&ffs
->opened
)) {
1625 if (ffs
->no_disconnect
) {
1626 ffs
->state
= FFS_DEACTIVATED
;
1628 ffs_epfiles_destroy(ffs
->epfiles
,
1630 ffs
->epfiles
= NULL
;
1632 if (ffs
->setup_state
== FFS_SETUP_PENDING
)
1633 __ffs_ep0_stall(ffs
);
1635 ffs
->state
= FFS_CLOSING
;
1636 ffs_data_reset(ffs
);
1639 if (atomic_read(&ffs
->opened
) < 0) {
1640 ffs
->state
= FFS_CLOSING
;
1641 ffs_data_reset(ffs
);
1647 static struct ffs_data
*ffs_data_new(const char *dev_name
)
1649 struct ffs_data
*ffs
= kzalloc(sizeof *ffs
, GFP_KERNEL
);
1655 ffs
->io_completion_wq
= alloc_ordered_workqueue("%s", 0, dev_name
);
1656 if (!ffs
->io_completion_wq
) {
1661 refcount_set(&ffs
->ref
, 1);
1662 atomic_set(&ffs
->opened
, 0);
1663 ffs
->state
= FFS_READ_DESCRIPTORS
;
1664 mutex_init(&ffs
->mutex
);
1665 spin_lock_init(&ffs
->eps_lock
);
1666 init_waitqueue_head(&ffs
->ev
.waitq
);
1667 init_waitqueue_head(&ffs
->wait
);
1668 init_completion(&ffs
->ep0req_completion
);
1670 /* XXX REVISIT need to update it in some places, or do we? */
1671 ffs
->ev
.can_stall
= 1;
1676 static void ffs_data_clear(struct ffs_data
*ffs
)
1682 BUG_ON(ffs
->gadget
);
1685 ffs_epfiles_destroy(ffs
->epfiles
, ffs
->eps_count
);
1687 if (ffs
->ffs_eventfd
)
1688 eventfd_ctx_put(ffs
->ffs_eventfd
);
1690 kfree(ffs
->raw_descs_data
);
1691 kfree(ffs
->raw_strings
);
1692 kfree(ffs
->stringtabs
);
1695 static void ffs_data_reset(struct ffs_data
*ffs
)
1699 ffs_data_clear(ffs
);
1701 ffs
->epfiles
= NULL
;
1702 ffs
->raw_descs_data
= NULL
;
1703 ffs
->raw_descs
= NULL
;
1704 ffs
->raw_strings
= NULL
;
1705 ffs
->stringtabs
= NULL
;
1707 ffs
->raw_descs_length
= 0;
1708 ffs
->fs_descs_count
= 0;
1709 ffs
->hs_descs_count
= 0;
1710 ffs
->ss_descs_count
= 0;
1712 ffs
->strings_count
= 0;
1713 ffs
->interfaces_count
= 0;
1718 ffs
->state
= FFS_READ_DESCRIPTORS
;
1719 ffs
->setup_state
= FFS_NO_SETUP
;
1724 static int functionfs_bind(struct ffs_data
*ffs
, struct usb_composite_dev
*cdev
)
1726 struct usb_gadget_strings
**lang
;
1731 if (WARN_ON(ffs
->state
!= FFS_ACTIVE
1732 || test_and_set_bit(FFS_FL_BOUND
, &ffs
->flags
)))
1735 first_id
= usb_string_ids_n(cdev
, ffs
->strings_count
);
1736 if (unlikely(first_id
< 0))
1739 ffs
->ep0req
= usb_ep_alloc_request(cdev
->gadget
->ep0
, GFP_KERNEL
);
1740 if (unlikely(!ffs
->ep0req
))
1742 ffs
->ep0req
->complete
= ffs_ep0_complete
;
1743 ffs
->ep0req
->context
= ffs
;
1745 lang
= ffs
->stringtabs
;
1747 for (; *lang
; ++lang
) {
1748 struct usb_string
*str
= (*lang
)->strings
;
1750 for (; str
->s
; ++id
, ++str
)
1755 ffs
->gadget
= cdev
->gadget
;
1760 static void functionfs_unbind(struct ffs_data
*ffs
)
1764 if (!WARN_ON(!ffs
->gadget
)) {
1765 usb_ep_free_request(ffs
->gadget
->ep0
, ffs
->ep0req
);
1768 clear_bit(FFS_FL_BOUND
, &ffs
->flags
);
1773 static int ffs_epfiles_create(struct ffs_data
*ffs
)
1775 struct ffs_epfile
*epfile
, *epfiles
;
1780 count
= ffs
->eps_count
;
1781 epfiles
= kcalloc(count
, sizeof(*epfiles
), GFP_KERNEL
);
1786 for (i
= 1; i
<= count
; ++i
, ++epfile
) {
1788 mutex_init(&epfile
->mutex
);
1789 if (ffs
->user_flags
& FUNCTIONFS_VIRTUAL_ADDR
)
1790 sprintf(epfile
->name
, "ep%02x", ffs
->eps_addrmap
[i
]);
1792 sprintf(epfile
->name
, "ep%u", i
);
1793 epfile
->dentry
= ffs_sb_create_file(ffs
->sb
, epfile
->name
,
1795 &ffs_epfile_operations
);
1796 if (unlikely(!epfile
->dentry
)) {
1797 ffs_epfiles_destroy(epfiles
, i
- 1);
1802 ffs
->epfiles
= epfiles
;
1806 static void ffs_epfiles_destroy(struct ffs_epfile
*epfiles
, unsigned count
)
1808 struct ffs_epfile
*epfile
= epfiles
;
1812 for (; count
; --count
, ++epfile
) {
1813 BUG_ON(mutex_is_locked(&epfile
->mutex
));
1814 if (epfile
->dentry
) {
1815 d_delete(epfile
->dentry
);
1816 dput(epfile
->dentry
);
1817 epfile
->dentry
= NULL
;
1824 static void ffs_func_eps_disable(struct ffs_function
*func
)
1826 struct ffs_ep
*ep
= func
->eps
;
1827 struct ffs_epfile
*epfile
= func
->ffs
->epfiles
;
1828 unsigned count
= func
->ffs
->eps_count
;
1829 unsigned long flags
;
1831 spin_lock_irqsave(&func
->ffs
->eps_lock
, flags
);
1833 /* pending requests get nuked */
1835 usb_ep_disable(ep
->ep
);
1840 __ffs_epfile_read_buffer_free(epfile
);
1844 spin_unlock_irqrestore(&func
->ffs
->eps_lock
, flags
);
1847 static int ffs_func_eps_enable(struct ffs_function
*func
)
1849 struct ffs_data
*ffs
= func
->ffs
;
1850 struct ffs_ep
*ep
= func
->eps
;
1851 struct ffs_epfile
*epfile
= ffs
->epfiles
;
1852 unsigned count
= ffs
->eps_count
;
1853 unsigned long flags
;
1856 spin_lock_irqsave(&func
->ffs
->eps_lock
, flags
);
1858 ep
->ep
->driver_data
= ep
;
1860 ret
= config_ep_by_speed(func
->gadget
, &func
->function
, ep
->ep
);
1862 pr_err("%s: config_ep_by_speed(%s) returned %d\n",
1863 __func__
, ep
->ep
->name
, ret
);
1867 ret
= usb_ep_enable(ep
->ep
);
1870 epfile
->in
= usb_endpoint_dir_in(ep
->ep
->desc
);
1871 epfile
->isoc
= usb_endpoint_xfer_isoc(ep
->ep
->desc
);
1880 wake_up_interruptible(&ffs
->wait
);
1881 spin_unlock_irqrestore(&func
->ffs
->eps_lock
, flags
);
1887 /* Parsing and building descriptors and strings *****************************/
1890 * This validates if data pointed by data is a valid USB descriptor as
1891 * well as record how many interfaces, endpoints and strings are
1892 * required by given configuration. Returns address after the
1893 * descriptor or NULL if data is invalid.
1896 enum ffs_entity_type
{
1897 FFS_DESCRIPTOR
, FFS_INTERFACE
, FFS_STRING
, FFS_ENDPOINT
1900 enum ffs_os_desc_type
{
1901 FFS_OS_DESC
, FFS_OS_DESC_EXT_COMPAT
, FFS_OS_DESC_EXT_PROP
1904 typedef int (*ffs_entity_callback
)(enum ffs_entity_type entity
,
1906 struct usb_descriptor_header
*desc
,
1909 typedef int (*ffs_os_desc_callback
)(enum ffs_os_desc_type entity
,
1910 struct usb_os_desc_header
*h
, void *data
,
1911 unsigned len
, void *priv
);
1913 static int __must_check
ffs_do_single_desc(char *data
, unsigned len
,
1914 ffs_entity_callback entity
,
1917 struct usb_descriptor_header
*_ds
= (void *)data
;
1923 /* At least two bytes are required: length and type */
1925 pr_vdebug("descriptor too short\n");
1929 /* If we have at least as many bytes as the descriptor takes? */
1930 length
= _ds
->bLength
;
1932 pr_vdebug("descriptor longer then available data\n");
1936 #define __entity_check_INTERFACE(val) 1
1937 #define __entity_check_STRING(val) (val)
1938 #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK)
1939 #define __entity(type, val) do { \
1940 pr_vdebug("entity " #type "(%02x)\n", (val)); \
1941 if (unlikely(!__entity_check_ ##type(val))) { \
1942 pr_vdebug("invalid entity's value\n"); \
1945 ret = entity(FFS_ ##type, &val, _ds, priv); \
1946 if (unlikely(ret < 0)) { \
1947 pr_debug("entity " #type "(%02x); ret = %d\n", \
1953 /* Parse descriptor depending on type. */
1954 switch (_ds
->bDescriptorType
) {
1958 case USB_DT_DEVICE_QUALIFIER
:
1959 /* function can't have any of those */
1960 pr_vdebug("descriptor reserved for gadget: %d\n",
1961 _ds
->bDescriptorType
);
1964 case USB_DT_INTERFACE
: {
1965 struct usb_interface_descriptor
*ds
= (void *)_ds
;
1966 pr_vdebug("interface descriptor\n");
1967 if (length
!= sizeof *ds
)
1970 __entity(INTERFACE
, ds
->bInterfaceNumber
);
1972 __entity(STRING
, ds
->iInterface
);
1976 case USB_DT_ENDPOINT
: {
1977 struct usb_endpoint_descriptor
*ds
= (void *)_ds
;
1978 pr_vdebug("endpoint descriptor\n");
1979 if (length
!= USB_DT_ENDPOINT_SIZE
&&
1980 length
!= USB_DT_ENDPOINT_AUDIO_SIZE
)
1982 __entity(ENDPOINT
, ds
->bEndpointAddress
);
1987 pr_vdebug("hid descriptor\n");
1988 if (length
!= sizeof(struct hid_descriptor
))
1993 if (length
!= sizeof(struct usb_otg_descriptor
))
1997 case USB_DT_INTERFACE_ASSOCIATION
: {
1998 struct usb_interface_assoc_descriptor
*ds
= (void *)_ds
;
1999 pr_vdebug("interface association descriptor\n");
2000 if (length
!= sizeof *ds
)
2003 __entity(STRING
, ds
->iFunction
);
2007 case USB_DT_SS_ENDPOINT_COMP
:
2008 pr_vdebug("EP SS companion descriptor\n");
2009 if (length
!= sizeof(struct usb_ss_ep_comp_descriptor
))
2013 case USB_DT_OTHER_SPEED_CONFIG
:
2014 case USB_DT_INTERFACE_POWER
:
2016 case USB_DT_SECURITY
:
2017 case USB_DT_CS_RADIO_CONTROL
:
2019 pr_vdebug("unimplemented descriptor: %d\n", _ds
->bDescriptorType
);
2023 /* We should never be here */
2024 pr_vdebug("unknown descriptor: %d\n", _ds
->bDescriptorType
);
2028 pr_vdebug("invalid length: %d (descriptor %d)\n",
2029 _ds
->bLength
, _ds
->bDescriptorType
);
2034 #undef __entity_check_DESCRIPTOR
2035 #undef __entity_check_INTERFACE
2036 #undef __entity_check_STRING
2037 #undef __entity_check_ENDPOINT
2042 static int __must_check
ffs_do_descs(unsigned count
, char *data
, unsigned len
,
2043 ffs_entity_callback entity
, void *priv
)
2045 const unsigned _len
= len
;
2046 unsigned long num
= 0;
2056 /* Record "descriptor" entity */
2057 ret
= entity(FFS_DESCRIPTOR
, (u8
*)num
, (void *)data
, priv
);
2058 if (unlikely(ret
< 0)) {
2059 pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
2067 ret
= ffs_do_single_desc(data
, len
, entity
, priv
);
2068 if (unlikely(ret
< 0)) {
2069 pr_debug("%s returns %d\n", __func__
, ret
);
2079 static int __ffs_data_do_entity(enum ffs_entity_type type
,
2080 u8
*valuep
, struct usb_descriptor_header
*desc
,
2083 struct ffs_desc_helper
*helper
= priv
;
2084 struct usb_endpoint_descriptor
*d
;
2089 case FFS_DESCRIPTOR
:
2094 * Interfaces are indexed from zero so if we
2095 * encountered interface "n" then there are at least
2098 if (*valuep
>= helper
->interfaces_count
)
2099 helper
->interfaces_count
= *valuep
+ 1;
2104 * Strings are indexed from 1 (0 is reserved
2105 * for languages list)
2107 if (*valuep
> helper
->ffs
->strings_count
)
2108 helper
->ffs
->strings_count
= *valuep
;
2113 helper
->eps_count
++;
2114 if (helper
->eps_count
>= FFS_MAX_EPS_COUNT
)
2116 /* Check if descriptors for any speed were already parsed */
2117 if (!helper
->ffs
->eps_count
&& !helper
->ffs
->interfaces_count
)
2118 helper
->ffs
->eps_addrmap
[helper
->eps_count
] =
2119 d
->bEndpointAddress
;
2120 else if (helper
->ffs
->eps_addrmap
[helper
->eps_count
] !=
2121 d
->bEndpointAddress
)
2129 static int __ffs_do_os_desc_header(enum ffs_os_desc_type
*next_type
,
2130 struct usb_os_desc_header
*desc
)
2132 u16 bcd_version
= le16_to_cpu(desc
->bcdVersion
);
2133 u16 w_index
= le16_to_cpu(desc
->wIndex
);
2135 if (bcd_version
!= 1) {
2136 pr_vdebug("unsupported os descriptors version: %d",
2142 *next_type
= FFS_OS_DESC_EXT_COMPAT
;
2145 *next_type
= FFS_OS_DESC_EXT_PROP
;
2148 pr_vdebug("unsupported os descriptor type: %d", w_index
);
2152 return sizeof(*desc
);
2156 * Process all extended compatibility/extended property descriptors
2157 * of a feature descriptor
2159 static int __must_check
ffs_do_single_os_desc(char *data
, unsigned len
,
2160 enum ffs_os_desc_type type
,
2162 ffs_os_desc_callback entity
,
2164 struct usb_os_desc_header
*h
)
2167 const unsigned _len
= len
;
2171 /* loop over all ext compat/ext prop descriptors */
2172 while (feature_count
--) {
2173 ret
= entity(type
, h
, data
, len
, priv
);
2174 if (unlikely(ret
< 0)) {
2175 pr_debug("bad OS descriptor, type: %d\n", type
);
2184 /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
2185 static int __must_check
ffs_do_os_descs(unsigned count
,
2186 char *data
, unsigned len
,
2187 ffs_os_desc_callback entity
, void *priv
)
2189 const unsigned _len
= len
;
2190 unsigned long num
= 0;
2194 for (num
= 0; num
< count
; ++num
) {
2196 enum ffs_os_desc_type type
;
2198 struct usb_os_desc_header
*desc
= (void *)data
;
2200 if (len
< sizeof(*desc
))
2204 * Record "descriptor" entity.
2205 * Process dwLength, bcdVersion, wIndex, get b/wCount.
2206 * Move the data pointer to the beginning of extended
2207 * compatibilities proper or extended properties proper
2208 * portions of the data
2210 if (le32_to_cpu(desc
->dwLength
) > len
)
2213 ret
= __ffs_do_os_desc_header(&type
, desc
);
2214 if (unlikely(ret
< 0)) {
2215 pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
2220 * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
2222 feature_count
= le16_to_cpu(desc
->wCount
);
2223 if (type
== FFS_OS_DESC_EXT_COMPAT
&&
2224 (feature_count
> 255 || desc
->Reserved
))
2230 * Process all function/property descriptors
2231 * of this Feature Descriptor
2233 ret
= ffs_do_single_os_desc(data
, len
, type
,
2234 feature_count
, entity
, priv
, desc
);
2235 if (unlikely(ret
< 0)) {
2236 pr_debug("%s returns %d\n", __func__
, ret
);
2247 * Validate contents of the buffer from userspace related to OS descriptors.
2249 static int __ffs_data_do_os_desc(enum ffs_os_desc_type type
,
2250 struct usb_os_desc_header
*h
, void *data
,
2251 unsigned len
, void *priv
)
2253 struct ffs_data
*ffs
= priv
;
2259 case FFS_OS_DESC_EXT_COMPAT
: {
2260 struct usb_ext_compat_desc
*d
= data
;
2263 if (len
< sizeof(*d
) ||
2264 d
->bFirstInterfaceNumber
>= ffs
->interfaces_count
)
2266 if (d
->Reserved1
!= 1) {
2268 * According to the spec, Reserved1 must be set to 1
2269 * but older kernels incorrectly rejected non-zero
2270 * values. We fix it here to avoid returning EINVAL
2271 * in response to values we used to accept.
2273 pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
2276 for (i
= 0; i
< ARRAY_SIZE(d
->Reserved2
); ++i
)
2277 if (d
->Reserved2
[i
])
2280 length
= sizeof(struct usb_ext_compat_desc
);
2283 case FFS_OS_DESC_EXT_PROP
: {
2284 struct usb_ext_prop_desc
*d
= data
;
2288 if (len
< sizeof(*d
) || h
->interface
>= ffs
->interfaces_count
)
2290 length
= le32_to_cpu(d
->dwSize
);
2293 type
= le32_to_cpu(d
->dwPropertyDataType
);
2294 if (type
< USB_EXT_PROP_UNICODE
||
2295 type
> USB_EXT_PROP_UNICODE_MULTI
) {
2296 pr_vdebug("unsupported os descriptor property type: %d",
2300 pnl
= le16_to_cpu(d
->wPropertyNameLength
);
2301 if (length
< 14 + pnl
) {
2302 pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2306 pdl
= le32_to_cpu(*(__le32
*)((u8
*)data
+ 10 + pnl
));
2307 if (length
!= 14 + pnl
+ pdl
) {
2308 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
2309 length
, pnl
, pdl
, type
);
2312 ++ffs
->ms_os_descs_ext_prop_count
;
2313 /* property name reported to the host as "WCHAR"s */
2314 ffs
->ms_os_descs_ext_prop_name_len
+= pnl
* 2;
2315 ffs
->ms_os_descs_ext_prop_data_len
+= pdl
;
2319 pr_vdebug("unknown descriptor: %d\n", type
);
2325 static int __ffs_data_got_descs(struct ffs_data
*ffs
,
2326 char *const _data
, size_t len
)
2328 char *data
= _data
, *raw_descs
;
2329 unsigned os_descs_count
= 0, counts
[3], flags
;
2330 int ret
= -EINVAL
, i
;
2331 struct ffs_desc_helper helper
;
2335 if (get_unaligned_le32(data
+ 4) != len
)
2338 switch (get_unaligned_le32(data
)) {
2339 case FUNCTIONFS_DESCRIPTORS_MAGIC
:
2340 flags
= FUNCTIONFS_HAS_FS_DESC
| FUNCTIONFS_HAS_HS_DESC
;
2344 case FUNCTIONFS_DESCRIPTORS_MAGIC_V2
:
2345 flags
= get_unaligned_le32(data
+ 8);
2346 ffs
->user_flags
= flags
;
2347 if (flags
& ~(FUNCTIONFS_HAS_FS_DESC
|
2348 FUNCTIONFS_HAS_HS_DESC
|
2349 FUNCTIONFS_HAS_SS_DESC
|
2350 FUNCTIONFS_HAS_MS_OS_DESC
|
2351 FUNCTIONFS_VIRTUAL_ADDR
|
2352 FUNCTIONFS_EVENTFD
|
2353 FUNCTIONFS_ALL_CTRL_RECIP
|
2354 FUNCTIONFS_CONFIG0_SETUP
)) {
2365 if (flags
& FUNCTIONFS_EVENTFD
) {
2369 eventfd_ctx_fdget((int)get_unaligned_le32(data
));
2370 if (IS_ERR(ffs
->ffs_eventfd
)) {
2371 ret
= PTR_ERR(ffs
->ffs_eventfd
);
2372 ffs
->ffs_eventfd
= NULL
;
2379 /* Read fs_count, hs_count and ss_count (if present) */
2380 for (i
= 0; i
< 3; ++i
) {
2381 if (!(flags
& (1 << i
))) {
2383 } else if (len
< 4) {
2386 counts
[i
] = get_unaligned_le32(data
);
2391 if (flags
& (1 << i
)) {
2395 os_descs_count
= get_unaligned_le32(data
);
2400 /* Read descriptors */
2403 for (i
= 0; i
< 3; ++i
) {
2406 helper
.interfaces_count
= 0;
2407 helper
.eps_count
= 0;
2408 ret
= ffs_do_descs(counts
[i
], data
, len
,
2409 __ffs_data_do_entity
, &helper
);
2412 if (!ffs
->eps_count
&& !ffs
->interfaces_count
) {
2413 ffs
->eps_count
= helper
.eps_count
;
2414 ffs
->interfaces_count
= helper
.interfaces_count
;
2416 if (ffs
->eps_count
!= helper
.eps_count
) {
2420 if (ffs
->interfaces_count
!= helper
.interfaces_count
) {
2428 if (os_descs_count
) {
2429 ret
= ffs_do_os_descs(os_descs_count
, data
, len
,
2430 __ffs_data_do_os_desc
, ffs
);
2437 if (raw_descs
== data
|| len
) {
2442 ffs
->raw_descs_data
= _data
;
2443 ffs
->raw_descs
= raw_descs
;
2444 ffs
->raw_descs_length
= data
- raw_descs
;
2445 ffs
->fs_descs_count
= counts
[0];
2446 ffs
->hs_descs_count
= counts
[1];
2447 ffs
->ss_descs_count
= counts
[2];
2448 ffs
->ms_os_descs_count
= os_descs_count
;
2457 static int __ffs_data_got_strings(struct ffs_data
*ffs
,
2458 char *const _data
, size_t len
)
2460 u32 str_count
, needed_count
, lang_count
;
2461 struct usb_gadget_strings
**stringtabs
, *t
;
2462 const char *data
= _data
;
2463 struct usb_string
*s
;
2467 if (unlikely(len
< 16 ||
2468 get_unaligned_le32(data
) != FUNCTIONFS_STRINGS_MAGIC
||
2469 get_unaligned_le32(data
+ 4) != len
))
2471 str_count
= get_unaligned_le32(data
+ 8);
2472 lang_count
= get_unaligned_le32(data
+ 12);
2474 /* if one is zero the other must be zero */
2475 if (unlikely(!str_count
!= !lang_count
))
2478 /* Do we have at least as many strings as descriptors need? */
2479 needed_count
= ffs
->strings_count
;
2480 if (unlikely(str_count
< needed_count
))
2484 * If we don't need any strings just return and free all
2487 if (!needed_count
) {
2492 /* Allocate everything in one chunk so there's less maintenance. */
2496 vla_item(d
, struct usb_gadget_strings
*, stringtabs
,
2498 vla_item(d
, struct usb_gadget_strings
, stringtab
, lang_count
);
2499 vla_item(d
, struct usb_string
, strings
,
2500 lang_count
*(needed_count
+1));
2502 char *vlabuf
= kmalloc(vla_group_size(d
), GFP_KERNEL
);
2504 if (unlikely(!vlabuf
)) {
2509 /* Initialize the VLA pointers */
2510 stringtabs
= vla_ptr(vlabuf
, d
, stringtabs
);
2511 t
= vla_ptr(vlabuf
, d
, stringtab
);
2514 *stringtabs
++ = t
++;
2518 /* stringtabs = vlabuf = d_stringtabs for later kfree */
2519 stringtabs
= vla_ptr(vlabuf
, d
, stringtabs
);
2520 t
= vla_ptr(vlabuf
, d
, stringtab
);
2521 s
= vla_ptr(vlabuf
, d
, strings
);
2524 /* For each language */
2528 do { /* lang_count > 0 so we can use do-while */
2529 unsigned needed
= needed_count
;
2531 if (unlikely(len
< 3))
2533 t
->language
= get_unaligned_le16(data
);
2540 /* For each string */
2541 do { /* str_count > 0 so we can use do-while */
2542 size_t length
= strnlen(data
, len
);
2544 if (unlikely(length
== len
))
2548 * User may provide more strings then we need,
2549 * if that's the case we simply ignore the
2552 if (likely(needed
)) {
2554 * s->id will be set while adding
2555 * function to configuration so for
2556 * now just leave garbage here.
2565 } while (--str_count
);
2567 s
->id
= 0; /* terminator */
2571 } while (--lang_count
);
2573 /* Some garbage left? */
2578 ffs
->stringtabs
= stringtabs
;
2579 ffs
->raw_strings
= _data
;
2591 /* Events handling and management *******************************************/
2593 static void __ffs_event_add(struct ffs_data
*ffs
,
2594 enum usb_functionfs_event_type type
)
2596 enum usb_functionfs_event_type rem_type1
, rem_type2
= type
;
2600 * Abort any unhandled setup
2602 * We do not need to worry about some cmpxchg() changing value
2603 * of ffs->setup_state without holding the lock because when
2604 * state is FFS_SETUP_PENDING cmpxchg() in several places in
2605 * the source does nothing.
2607 if (ffs
->setup_state
== FFS_SETUP_PENDING
)
2608 ffs
->setup_state
= FFS_SETUP_CANCELLED
;
2611 * Logic of this function guarantees that there are at most four pending
2612 * evens on ffs->ev.types queue. This is important because the queue
2613 * has space for four elements only and __ffs_ep0_read_events function
2614 * depends on that limit as well. If more event types are added, those
2615 * limits have to be revisited or guaranteed to still hold.
2618 case FUNCTIONFS_RESUME
:
2619 rem_type2
= FUNCTIONFS_SUSPEND
;
2621 case FUNCTIONFS_SUSPEND
:
2622 case FUNCTIONFS_SETUP
:
2624 /* Discard all similar events */
2627 case FUNCTIONFS_BIND
:
2628 case FUNCTIONFS_UNBIND
:
2629 case FUNCTIONFS_DISABLE
:
2630 case FUNCTIONFS_ENABLE
:
2631 /* Discard everything other then power management. */
2632 rem_type1
= FUNCTIONFS_SUSPEND
;
2633 rem_type2
= FUNCTIONFS_RESUME
;
2638 WARN(1, "%d: unknown event, this should not happen\n", type
);
2643 u8
*ev
= ffs
->ev
.types
, *out
= ev
;
2644 unsigned n
= ffs
->ev
.count
;
2645 for (; n
; --n
, ++ev
)
2646 if ((*ev
== rem_type1
|| *ev
== rem_type2
) == neg
)
2649 pr_vdebug("purging event %d\n", *ev
);
2650 ffs
->ev
.count
= out
- ffs
->ev
.types
;
2653 pr_vdebug("adding event %d\n", type
);
2654 ffs
->ev
.types
[ffs
->ev
.count
++] = type
;
2655 wake_up_locked(&ffs
->ev
.waitq
);
2656 if (ffs
->ffs_eventfd
)
2657 eventfd_signal(ffs
->ffs_eventfd
, 1);
2660 static void ffs_event_add(struct ffs_data
*ffs
,
2661 enum usb_functionfs_event_type type
)
2663 unsigned long flags
;
2664 spin_lock_irqsave(&ffs
->ev
.waitq
.lock
, flags
);
2665 __ffs_event_add(ffs
, type
);
2666 spin_unlock_irqrestore(&ffs
->ev
.waitq
.lock
, flags
);
2669 /* Bind/unbind USB function hooks *******************************************/
2671 static int ffs_ep_addr2idx(struct ffs_data
*ffs
, u8 endpoint_address
)
2675 for (i
= 1; i
< ARRAY_SIZE(ffs
->eps_addrmap
); ++i
)
2676 if (ffs
->eps_addrmap
[i
] == endpoint_address
)
2681 static int __ffs_func_bind_do_descs(enum ffs_entity_type type
, u8
*valuep
,
2682 struct usb_descriptor_header
*desc
,
2685 struct usb_endpoint_descriptor
*ds
= (void *)desc
;
2686 struct ffs_function
*func
= priv
;
2687 struct ffs_ep
*ffs_ep
;
2688 unsigned ep_desc_id
;
2690 static const char *speed_names
[] = { "full", "high", "super" };
2692 if (type
!= FFS_DESCRIPTOR
)
2696 * If ss_descriptors is not NULL, we are reading super speed
2697 * descriptors; if hs_descriptors is not NULL, we are reading high
2698 * speed descriptors; otherwise, we are reading full speed
2701 if (func
->function
.ss_descriptors
) {
2703 func
->function
.ss_descriptors
[(long)valuep
] = desc
;
2704 } else if (func
->function
.hs_descriptors
) {
2706 func
->function
.hs_descriptors
[(long)valuep
] = desc
;
2709 func
->function
.fs_descriptors
[(long)valuep
] = desc
;
2712 if (!desc
|| desc
->bDescriptorType
!= USB_DT_ENDPOINT
)
2715 idx
= ffs_ep_addr2idx(func
->ffs
, ds
->bEndpointAddress
) - 1;
2719 ffs_ep
= func
->eps
+ idx
;
2721 if (unlikely(ffs_ep
->descs
[ep_desc_id
])) {
2722 pr_err("two %sspeed descriptors for EP %d\n",
2723 speed_names
[ep_desc_id
],
2724 ds
->bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK
);
2727 ffs_ep
->descs
[ep_desc_id
] = ds
;
2729 ffs_dump_mem(": Original ep desc", ds
, ds
->bLength
);
2731 ds
->bEndpointAddress
= ffs_ep
->descs
[0]->bEndpointAddress
;
2732 if (!ds
->wMaxPacketSize
)
2733 ds
->wMaxPacketSize
= ffs_ep
->descs
[0]->wMaxPacketSize
;
2735 struct usb_request
*req
;
2737 u8 bEndpointAddress
;
2740 * We back up bEndpointAddress because autoconfig overwrites
2741 * it with physical endpoint address.
2743 bEndpointAddress
= ds
->bEndpointAddress
;
2744 pr_vdebug("autoconfig\n");
2745 ep
= usb_ep_autoconfig(func
->gadget
, ds
);
2748 ep
->driver_data
= func
->eps
+ idx
;
2750 req
= usb_ep_alloc_request(ep
, GFP_KERNEL
);
2756 func
->eps_revmap
[ds
->bEndpointAddress
&
2757 USB_ENDPOINT_NUMBER_MASK
] = idx
+ 1;
2759 * If we use virtual address mapping, we restore
2760 * original bEndpointAddress value.
2762 if (func
->ffs
->user_flags
& FUNCTIONFS_VIRTUAL_ADDR
)
2763 ds
->bEndpointAddress
= bEndpointAddress
;
2765 ffs_dump_mem(": Rewritten ep desc", ds
, ds
->bLength
);
2770 static int __ffs_func_bind_do_nums(enum ffs_entity_type type
, u8
*valuep
,
2771 struct usb_descriptor_header
*desc
,
2774 struct ffs_function
*func
= priv
;
2780 case FFS_DESCRIPTOR
:
2781 /* Handled in previous pass by __ffs_func_bind_do_descs() */
2786 if (func
->interfaces_nums
[idx
] < 0) {
2787 int id
= usb_interface_id(func
->conf
, &func
->function
);
2788 if (unlikely(id
< 0))
2790 func
->interfaces_nums
[idx
] = id
;
2792 newValue
= func
->interfaces_nums
[idx
];
2796 /* String' IDs are allocated when fsf_data is bound to cdev */
2797 newValue
= func
->ffs
->stringtabs
[0]->strings
[*valuep
- 1].id
;
2802 * USB_DT_ENDPOINT are handled in
2803 * __ffs_func_bind_do_descs().
2805 if (desc
->bDescriptorType
== USB_DT_ENDPOINT
)
2808 idx
= (*valuep
& USB_ENDPOINT_NUMBER_MASK
) - 1;
2809 if (unlikely(!func
->eps
[idx
].ep
))
2813 struct usb_endpoint_descriptor
**descs
;
2814 descs
= func
->eps
[idx
].descs
;
2815 newValue
= descs
[descs
[0] ? 0 : 1]->bEndpointAddress
;
2820 pr_vdebug("%02x -> %02x\n", *valuep
, newValue
);
2825 static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type
,
2826 struct usb_os_desc_header
*h
, void *data
,
2827 unsigned len
, void *priv
)
2829 struct ffs_function
*func
= priv
;
2833 case FFS_OS_DESC_EXT_COMPAT
: {
2834 struct usb_ext_compat_desc
*desc
= data
;
2835 struct usb_os_desc_table
*t
;
2837 t
= &func
->function
.os_desc_table
[desc
->bFirstInterfaceNumber
];
2838 t
->if_id
= func
->interfaces_nums
[desc
->bFirstInterfaceNumber
];
2839 memcpy(t
->os_desc
->ext_compat_id
, &desc
->CompatibleID
,
2840 ARRAY_SIZE(desc
->CompatibleID
) +
2841 ARRAY_SIZE(desc
->SubCompatibleID
));
2842 length
= sizeof(*desc
);
2845 case FFS_OS_DESC_EXT_PROP
: {
2846 struct usb_ext_prop_desc
*desc
= data
;
2847 struct usb_os_desc_table
*t
;
2848 struct usb_os_desc_ext_prop
*ext_prop
;
2849 char *ext_prop_name
;
2850 char *ext_prop_data
;
2852 t
= &func
->function
.os_desc_table
[h
->interface
];
2853 t
->if_id
= func
->interfaces_nums
[h
->interface
];
2855 ext_prop
= func
->ffs
->ms_os_descs_ext_prop_avail
;
2856 func
->ffs
->ms_os_descs_ext_prop_avail
+= sizeof(*ext_prop
);
2858 ext_prop
->type
= le32_to_cpu(desc
->dwPropertyDataType
);
2859 ext_prop
->name_len
= le16_to_cpu(desc
->wPropertyNameLength
);
2860 ext_prop
->data_len
= le32_to_cpu(*(__le32
*)
2861 usb_ext_prop_data_len_ptr(data
, ext_prop
->name_len
));
2862 length
= ext_prop
->name_len
+ ext_prop
->data_len
+ 14;
2864 ext_prop_name
= func
->ffs
->ms_os_descs_ext_prop_name_avail
;
2865 func
->ffs
->ms_os_descs_ext_prop_name_avail
+=
2868 ext_prop_data
= func
->ffs
->ms_os_descs_ext_prop_data_avail
;
2869 func
->ffs
->ms_os_descs_ext_prop_data_avail
+=
2871 memcpy(ext_prop_data
,
2872 usb_ext_prop_data_ptr(data
, ext_prop
->name_len
),
2873 ext_prop
->data_len
);
2874 /* unicode data reported to the host as "WCHAR"s */
2875 switch (ext_prop
->type
) {
2876 case USB_EXT_PROP_UNICODE
:
2877 case USB_EXT_PROP_UNICODE_ENV
:
2878 case USB_EXT_PROP_UNICODE_LINK
:
2879 case USB_EXT_PROP_UNICODE_MULTI
:
2880 ext_prop
->data_len
*= 2;
2883 ext_prop
->data
= ext_prop_data
;
2885 memcpy(ext_prop_name
, usb_ext_prop_name_ptr(data
),
2886 ext_prop
->name_len
);
2887 /* property name reported to the host as "WCHAR"s */
2888 ext_prop
->name_len
*= 2;
2889 ext_prop
->name
= ext_prop_name
;
2891 t
->os_desc
->ext_prop_len
+=
2892 ext_prop
->name_len
+ ext_prop
->data_len
+ 14;
2893 ++t
->os_desc
->ext_prop_count
;
2894 list_add_tail(&ext_prop
->entry
, &t
->os_desc
->ext_prop
);
2898 pr_vdebug("unknown descriptor: %d\n", type
);
2904 static inline struct f_fs_opts
*ffs_do_functionfs_bind(struct usb_function
*f
,
2905 struct usb_configuration
*c
)
2907 struct ffs_function
*func
= ffs_func_from_usb(f
);
2908 struct f_fs_opts
*ffs_opts
=
2909 container_of(f
->fi
, struct f_fs_opts
, func_inst
);
2915 * Legacy gadget triggers binding in functionfs_ready_callback,
2916 * which already uses locking; taking the same lock here would
2919 * Configfs-enabled gadgets however do need ffs_dev_lock.
2921 if (!ffs_opts
->no_configfs
)
2923 ret
= ffs_opts
->dev
->desc_ready
? 0 : -ENODEV
;
2924 func
->ffs
= ffs_opts
->dev
->ffs_data
;
2925 if (!ffs_opts
->no_configfs
)
2928 return ERR_PTR(ret
);
2931 func
->gadget
= c
->cdev
->gadget
;
2934 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2935 * configurations are bound in sequence with list_for_each_entry,
2936 * in each configuration its functions are bound in sequence
2937 * with list_for_each_entry, so we assume no race condition
2938 * with regard to ffs_opts->bound access
2940 if (!ffs_opts
->refcnt
) {
2941 ret
= functionfs_bind(func
->ffs
, c
->cdev
);
2943 return ERR_PTR(ret
);
2946 func
->function
.strings
= func
->ffs
->stringtabs
;
2951 static int _ffs_func_bind(struct usb_configuration
*c
,
2952 struct usb_function
*f
)
2954 struct ffs_function
*func
= ffs_func_from_usb(f
);
2955 struct ffs_data
*ffs
= func
->ffs
;
2957 const int full
= !!func
->ffs
->fs_descs_count
;
2958 const int high
= !!func
->ffs
->hs_descs_count
;
2959 const int super
= !!func
->ffs
->ss_descs_count
;
2961 int fs_len
, hs_len
, ss_len
, ret
, i
;
2962 struct ffs_ep
*eps_ptr
;
2964 /* Make it a single chunk, less management later on */
2966 vla_item_with_sz(d
, struct ffs_ep
, eps
, ffs
->eps_count
);
2967 vla_item_with_sz(d
, struct usb_descriptor_header
*, fs_descs
,
2968 full
? ffs
->fs_descs_count
+ 1 : 0);
2969 vla_item_with_sz(d
, struct usb_descriptor_header
*, hs_descs
,
2970 high
? ffs
->hs_descs_count
+ 1 : 0);
2971 vla_item_with_sz(d
, struct usb_descriptor_header
*, ss_descs
,
2972 super
? ffs
->ss_descs_count
+ 1 : 0);
2973 vla_item_with_sz(d
, short, inums
, ffs
->interfaces_count
);
2974 vla_item_with_sz(d
, struct usb_os_desc_table
, os_desc_table
,
2975 c
->cdev
->use_os_string
? ffs
->interfaces_count
: 0);
2976 vla_item_with_sz(d
, char[16], ext_compat
,
2977 c
->cdev
->use_os_string
? ffs
->interfaces_count
: 0);
2978 vla_item_with_sz(d
, struct usb_os_desc
, os_desc
,
2979 c
->cdev
->use_os_string
? ffs
->interfaces_count
: 0);
2980 vla_item_with_sz(d
, struct usb_os_desc_ext_prop
, ext_prop
,
2981 ffs
->ms_os_descs_ext_prop_count
);
2982 vla_item_with_sz(d
, char, ext_prop_name
,
2983 ffs
->ms_os_descs_ext_prop_name_len
);
2984 vla_item_with_sz(d
, char, ext_prop_data
,
2985 ffs
->ms_os_descs_ext_prop_data_len
);
2986 vla_item_with_sz(d
, char, raw_descs
, ffs
->raw_descs_length
);
2991 /* Has descriptors only for speeds gadget does not support */
2992 if (unlikely(!(full
| high
| super
)))
2995 /* Allocate a single chunk, less management later on */
2996 vlabuf
= kzalloc(vla_group_size(d
), GFP_KERNEL
);
2997 if (unlikely(!vlabuf
))
3000 ffs
->ms_os_descs_ext_prop_avail
= vla_ptr(vlabuf
, d
, ext_prop
);
3001 ffs
->ms_os_descs_ext_prop_name_avail
=
3002 vla_ptr(vlabuf
, d
, ext_prop_name
);
3003 ffs
->ms_os_descs_ext_prop_data_avail
=
3004 vla_ptr(vlabuf
, d
, ext_prop_data
);
3006 /* Copy descriptors */
3007 memcpy(vla_ptr(vlabuf
, d
, raw_descs
), ffs
->raw_descs
,
3008 ffs
->raw_descs_length
);
3010 memset(vla_ptr(vlabuf
, d
, inums
), 0xff, d_inums__sz
);
3011 eps_ptr
= vla_ptr(vlabuf
, d
, eps
);
3012 for (i
= 0; i
< ffs
->eps_count
; i
++)
3013 eps_ptr
[i
].num
= -1;
3016 * d_eps == vlabuf, func->eps used to kfree vlabuf later
3018 func
->eps
= vla_ptr(vlabuf
, d
, eps
);
3019 func
->interfaces_nums
= vla_ptr(vlabuf
, d
, inums
);
3022 * Go through all the endpoint descriptors and allocate
3023 * endpoints first, so that later we can rewrite the endpoint
3024 * numbers without worrying that it may be described later on.
3027 func
->function
.fs_descriptors
= vla_ptr(vlabuf
, d
, fs_descs
);
3028 fs_len
= ffs_do_descs(ffs
->fs_descs_count
,
3029 vla_ptr(vlabuf
, d
, raw_descs
),
3031 __ffs_func_bind_do_descs
, func
);
3032 if (unlikely(fs_len
< 0)) {
3041 func
->function
.hs_descriptors
= vla_ptr(vlabuf
, d
, hs_descs
);
3042 hs_len
= ffs_do_descs(ffs
->hs_descs_count
,
3043 vla_ptr(vlabuf
, d
, raw_descs
) + fs_len
,
3044 d_raw_descs__sz
- fs_len
,
3045 __ffs_func_bind_do_descs
, func
);
3046 if (unlikely(hs_len
< 0)) {
3054 if (likely(super
)) {
3055 func
->function
.ss_descriptors
= vla_ptr(vlabuf
, d
, ss_descs
);
3056 ss_len
= ffs_do_descs(ffs
->ss_descs_count
,
3057 vla_ptr(vlabuf
, d
, raw_descs
) + fs_len
+ hs_len
,
3058 d_raw_descs__sz
- fs_len
- hs_len
,
3059 __ffs_func_bind_do_descs
, func
);
3060 if (unlikely(ss_len
< 0)) {
3069 * Now handle interface numbers allocation and interface and
3070 * endpoint numbers rewriting. We can do that in one go
3073 ret
= ffs_do_descs(ffs
->fs_descs_count
+
3074 (high
? ffs
->hs_descs_count
: 0) +
3075 (super
? ffs
->ss_descs_count
: 0),
3076 vla_ptr(vlabuf
, d
, raw_descs
), d_raw_descs__sz
,
3077 __ffs_func_bind_do_nums
, func
);
3078 if (unlikely(ret
< 0))
3081 func
->function
.os_desc_table
= vla_ptr(vlabuf
, d
, os_desc_table
);
3082 if (c
->cdev
->use_os_string
) {
3083 for (i
= 0; i
< ffs
->interfaces_count
; ++i
) {
3084 struct usb_os_desc
*desc
;
3086 desc
= func
->function
.os_desc_table
[i
].os_desc
=
3087 vla_ptr(vlabuf
, d
, os_desc
) +
3088 i
* sizeof(struct usb_os_desc
);
3089 desc
->ext_compat_id
=
3090 vla_ptr(vlabuf
, d
, ext_compat
) + i
* 16;
3091 INIT_LIST_HEAD(&desc
->ext_prop
);
3093 ret
= ffs_do_os_descs(ffs
->ms_os_descs_count
,
3094 vla_ptr(vlabuf
, d
, raw_descs
) +
3095 fs_len
+ hs_len
+ ss_len
,
3096 d_raw_descs__sz
- fs_len
- hs_len
-
3098 __ffs_func_bind_do_os_desc
, func
);
3099 if (unlikely(ret
< 0))
3102 func
->function
.os_desc_n
=
3103 c
->cdev
->use_os_string
? ffs
->interfaces_count
: 0;
3105 /* And we're done */
3106 ffs_event_add(ffs
, FUNCTIONFS_BIND
);
3110 /* XXX Do we need to release all claimed endpoints here? */
3114 static int ffs_func_bind(struct usb_configuration
*c
,
3115 struct usb_function
*f
)
3117 struct f_fs_opts
*ffs_opts
= ffs_do_functionfs_bind(f
, c
);
3118 struct ffs_function
*func
= ffs_func_from_usb(f
);
3121 if (IS_ERR(ffs_opts
))
3122 return PTR_ERR(ffs_opts
);
3124 ret
= _ffs_func_bind(c
, f
);
3125 if (ret
&& !--ffs_opts
->refcnt
)
3126 functionfs_unbind(func
->ffs
);
3132 /* Other USB function hooks *************************************************/
3134 static void ffs_reset_work(struct work_struct
*work
)
3136 struct ffs_data
*ffs
= container_of(work
,
3137 struct ffs_data
, reset_work
);
3138 ffs_data_reset(ffs
);
3141 static int ffs_func_set_alt(struct usb_function
*f
,
3142 unsigned interface
, unsigned alt
)
3144 struct ffs_function
*func
= ffs_func_from_usb(f
);
3145 struct ffs_data
*ffs
= func
->ffs
;
3148 if (alt
!= (unsigned)-1) {
3149 intf
= ffs_func_revmap_intf(func
, interface
);
3150 if (unlikely(intf
< 0))
3155 ffs_func_eps_disable(ffs
->func
);
3157 if (ffs
->state
== FFS_DEACTIVATED
) {
3158 ffs
->state
= FFS_CLOSING
;
3159 INIT_WORK(&ffs
->reset_work
, ffs_reset_work
);
3160 schedule_work(&ffs
->reset_work
);
3164 if (ffs
->state
!= FFS_ACTIVE
)
3167 if (alt
== (unsigned)-1) {
3169 ffs_event_add(ffs
, FUNCTIONFS_DISABLE
);
3174 ret
= ffs_func_eps_enable(func
);
3175 if (likely(ret
>= 0))
3176 ffs_event_add(ffs
, FUNCTIONFS_ENABLE
);
3180 static void ffs_func_disable(struct usb_function
*f
)
3182 ffs_func_set_alt(f
, 0, (unsigned)-1);
3185 static int ffs_func_setup(struct usb_function
*f
,
3186 const struct usb_ctrlrequest
*creq
)
3188 struct ffs_function
*func
= ffs_func_from_usb(f
);
3189 struct ffs_data
*ffs
= func
->ffs
;
3190 unsigned long flags
;
3195 pr_vdebug("creq->bRequestType = %02x\n", creq
->bRequestType
);
3196 pr_vdebug("creq->bRequest = %02x\n", creq
->bRequest
);
3197 pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq
->wValue
));
3198 pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq
->wIndex
));
3199 pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq
->wLength
));
3202 * Most requests directed to interface go through here
3203 * (notable exceptions are set/get interface) so we need to
3204 * handle them. All other either handled by composite or
3205 * passed to usb_configuration->setup() (if one is set). No
3206 * matter, we will handle requests directed to endpoint here
3207 * as well (as it's straightforward). Other request recipient
3208 * types are only handled when the user flag FUNCTIONFS_ALL_CTRL_RECIP
3211 if (ffs
->state
!= FFS_ACTIVE
)
3214 switch (creq
->bRequestType
& USB_RECIP_MASK
) {
3215 case USB_RECIP_INTERFACE
:
3216 ret
= ffs_func_revmap_intf(func
, le16_to_cpu(creq
->wIndex
));
3217 if (unlikely(ret
< 0))
3221 case USB_RECIP_ENDPOINT
:
3222 ret
= ffs_func_revmap_ep(func
, le16_to_cpu(creq
->wIndex
));
3223 if (unlikely(ret
< 0))
3225 if (func
->ffs
->user_flags
& FUNCTIONFS_VIRTUAL_ADDR
)
3226 ret
= func
->ffs
->eps_addrmap
[ret
];
3230 if (func
->ffs
->user_flags
& FUNCTIONFS_ALL_CTRL_RECIP
)
3231 ret
= le16_to_cpu(creq
->wIndex
);
3236 spin_lock_irqsave(&ffs
->ev
.waitq
.lock
, flags
);
3237 ffs
->ev
.setup
= *creq
;
3238 ffs
->ev
.setup
.wIndex
= cpu_to_le16(ret
);
3239 __ffs_event_add(ffs
, FUNCTIONFS_SETUP
);
3240 spin_unlock_irqrestore(&ffs
->ev
.waitq
.lock
, flags
);
3245 static bool ffs_func_req_match(struct usb_function
*f
,
3246 const struct usb_ctrlrequest
*creq
,
3249 struct ffs_function
*func
= ffs_func_from_usb(f
);
3251 if (config0
&& !(func
->ffs
->user_flags
& FUNCTIONFS_CONFIG0_SETUP
))
3254 switch (creq
->bRequestType
& USB_RECIP_MASK
) {
3255 case USB_RECIP_INTERFACE
:
3256 return (ffs_func_revmap_intf(func
,
3257 le16_to_cpu(creq
->wIndex
)) >= 0);
3258 case USB_RECIP_ENDPOINT
:
3259 return (ffs_func_revmap_ep(func
,
3260 le16_to_cpu(creq
->wIndex
)) >= 0);
3262 return (bool) (func
->ffs
->user_flags
&
3263 FUNCTIONFS_ALL_CTRL_RECIP
);
3267 static void ffs_func_suspend(struct usb_function
*f
)
3270 ffs_event_add(ffs_func_from_usb(f
)->ffs
, FUNCTIONFS_SUSPEND
);
3273 static void ffs_func_resume(struct usb_function
*f
)
3276 ffs_event_add(ffs_func_from_usb(f
)->ffs
, FUNCTIONFS_RESUME
);
3280 /* Endpoint and interface numbers reverse mapping ***************************/
3282 static int ffs_func_revmap_ep(struct ffs_function
*func
, u8 num
)
3284 num
= func
->eps_revmap
[num
& USB_ENDPOINT_NUMBER_MASK
];
3285 return num
? num
: -EDOM
;
3288 static int ffs_func_revmap_intf(struct ffs_function
*func
, u8 intf
)
3290 short *nums
= func
->interfaces_nums
;
3291 unsigned count
= func
->ffs
->interfaces_count
;
3293 for (; count
; --count
, ++nums
) {
3294 if (*nums
>= 0 && *nums
== intf
)
3295 return nums
- func
->interfaces_nums
;
3302 /* Devices management *******************************************************/
3304 static LIST_HEAD(ffs_devices
);
3306 static struct ffs_dev
*_ffs_do_find_dev(const char *name
)
3308 struct ffs_dev
*dev
;
3313 list_for_each_entry(dev
, &ffs_devices
, entry
) {
3314 if (strcmp(dev
->name
, name
) == 0)
3322 * ffs_lock must be taken by the caller of this function
3324 static struct ffs_dev
*_ffs_get_single_dev(void)
3326 struct ffs_dev
*dev
;
3328 if (list_is_singular(&ffs_devices
)) {
3329 dev
= list_first_entry(&ffs_devices
, struct ffs_dev
, entry
);
3338 * ffs_lock must be taken by the caller of this function
3340 static struct ffs_dev
*_ffs_find_dev(const char *name
)
3342 struct ffs_dev
*dev
;
3344 dev
= _ffs_get_single_dev();
3348 return _ffs_do_find_dev(name
);
3351 /* Configfs support *********************************************************/
3353 static inline struct f_fs_opts
*to_ffs_opts(struct config_item
*item
)
3355 return container_of(to_config_group(item
), struct f_fs_opts
,
3359 static void ffs_attr_release(struct config_item
*item
)
3361 struct f_fs_opts
*opts
= to_ffs_opts(item
);
3363 usb_put_function_instance(&opts
->func_inst
);
3366 static struct configfs_item_operations ffs_item_ops
= {
3367 .release
= ffs_attr_release
,
3370 static const struct config_item_type ffs_func_type
= {
3371 .ct_item_ops
= &ffs_item_ops
,
3372 .ct_owner
= THIS_MODULE
,
3376 /* Function registration interface ******************************************/
3378 static void ffs_free_inst(struct usb_function_instance
*f
)
3380 struct f_fs_opts
*opts
;
3382 opts
= to_f_fs_opts(f
);
3384 _ffs_free_dev(opts
->dev
);
3389 static int ffs_set_inst_name(struct usb_function_instance
*fi
, const char *name
)
3391 if (strlen(name
) >= FIELD_SIZEOF(struct ffs_dev
, name
))
3392 return -ENAMETOOLONG
;
3393 return ffs_name_dev(to_f_fs_opts(fi
)->dev
, name
);
3396 static struct usb_function_instance
*ffs_alloc_inst(void)
3398 struct f_fs_opts
*opts
;
3399 struct ffs_dev
*dev
;
3401 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
3403 return ERR_PTR(-ENOMEM
);
3405 opts
->func_inst
.set_inst_name
= ffs_set_inst_name
;
3406 opts
->func_inst
.free_func_inst
= ffs_free_inst
;
3408 dev
= _ffs_alloc_dev();
3412 return ERR_CAST(dev
);
3417 config_group_init_type_name(&opts
->func_inst
.group
, "",
3419 return &opts
->func_inst
;
3422 static void ffs_free(struct usb_function
*f
)
3424 kfree(ffs_func_from_usb(f
));
3427 static void ffs_func_unbind(struct usb_configuration
*c
,
3428 struct usb_function
*f
)
3430 struct ffs_function
*func
= ffs_func_from_usb(f
);
3431 struct ffs_data
*ffs
= func
->ffs
;
3432 struct f_fs_opts
*opts
=
3433 container_of(f
->fi
, struct f_fs_opts
, func_inst
);
3434 struct ffs_ep
*ep
= func
->eps
;
3435 unsigned count
= ffs
->eps_count
;
3436 unsigned long flags
;
3439 if (ffs
->func
== func
) {
3440 ffs_func_eps_disable(func
);
3444 if (!--opts
->refcnt
)
3445 functionfs_unbind(ffs
);
3447 /* cleanup after autoconfig */
3448 spin_lock_irqsave(&func
->ffs
->eps_lock
, flags
);
3450 if (ep
->ep
&& ep
->req
)
3451 usb_ep_free_request(ep
->ep
, ep
->req
);
3455 spin_unlock_irqrestore(&func
->ffs
->eps_lock
, flags
);
3459 * eps, descriptors and interfaces_nums are allocated in the
3460 * same chunk so only one free is required.
3462 func
->function
.fs_descriptors
= NULL
;
3463 func
->function
.hs_descriptors
= NULL
;
3464 func
->function
.ss_descriptors
= NULL
;
3465 func
->interfaces_nums
= NULL
;
3467 ffs_event_add(ffs
, FUNCTIONFS_UNBIND
);
3470 static struct usb_function
*ffs_alloc(struct usb_function_instance
*fi
)
3472 struct ffs_function
*func
;
3476 func
= kzalloc(sizeof(*func
), GFP_KERNEL
);
3477 if (unlikely(!func
))
3478 return ERR_PTR(-ENOMEM
);
3480 func
->function
.name
= "Function FS Gadget";
3482 func
->function
.bind
= ffs_func_bind
;
3483 func
->function
.unbind
= ffs_func_unbind
;
3484 func
->function
.set_alt
= ffs_func_set_alt
;
3485 func
->function
.disable
= ffs_func_disable
;
3486 func
->function
.setup
= ffs_func_setup
;
3487 func
->function
.req_match
= ffs_func_req_match
;
3488 func
->function
.suspend
= ffs_func_suspend
;
3489 func
->function
.resume
= ffs_func_resume
;
3490 func
->function
.free_func
= ffs_free
;
3492 return &func
->function
;
3496 * ffs_lock must be taken by the caller of this function
3498 static struct ffs_dev
*_ffs_alloc_dev(void)
3500 struct ffs_dev
*dev
;
3503 if (_ffs_get_single_dev())
3504 return ERR_PTR(-EBUSY
);
3506 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
3508 return ERR_PTR(-ENOMEM
);
3510 if (list_empty(&ffs_devices
)) {
3511 ret
= functionfs_init();
3514 return ERR_PTR(ret
);
3518 list_add(&dev
->entry
, &ffs_devices
);
3523 int ffs_name_dev(struct ffs_dev
*dev
, const char *name
)
3525 struct ffs_dev
*existing
;
3530 existing
= _ffs_do_find_dev(name
);
3532 strlcpy(dev
->name
, name
, ARRAY_SIZE(dev
->name
));
3533 else if (existing
!= dev
)
3540 EXPORT_SYMBOL_GPL(ffs_name_dev
);
3542 int ffs_single_dev(struct ffs_dev
*dev
)
3549 if (!list_is_singular(&ffs_devices
))
3557 EXPORT_SYMBOL_GPL(ffs_single_dev
);
3560 * ffs_lock must be taken by the caller of this function
3562 static void _ffs_free_dev(struct ffs_dev
*dev
)
3564 list_del(&dev
->entry
);
3566 /* Clear the private_data pointer to stop incorrect dev access */
3568 dev
->ffs_data
->private_data
= NULL
;
3571 if (list_empty(&ffs_devices
))
3572 functionfs_cleanup();
3575 static void *ffs_acquire_dev(const char *dev_name
)
3577 struct ffs_dev
*ffs_dev
;
3582 ffs_dev
= _ffs_find_dev(dev_name
);
3584 ffs_dev
= ERR_PTR(-ENOENT
);
3585 else if (ffs_dev
->mounted
)
3586 ffs_dev
= ERR_PTR(-EBUSY
);
3587 else if (ffs_dev
->ffs_acquire_dev_callback
&&
3588 ffs_dev
->ffs_acquire_dev_callback(ffs_dev
))
3589 ffs_dev
= ERR_PTR(-ENOENT
);
3591 ffs_dev
->mounted
= true;
3597 static void ffs_release_dev(struct ffs_data
*ffs_data
)
3599 struct ffs_dev
*ffs_dev
;
3604 ffs_dev
= ffs_data
->private_data
;
3606 ffs_dev
->mounted
= false;
3608 if (ffs_dev
->ffs_release_dev_callback
)
3609 ffs_dev
->ffs_release_dev_callback(ffs_dev
);
3615 static int ffs_ready(struct ffs_data
*ffs
)
3617 struct ffs_dev
*ffs_obj
;
3623 ffs_obj
= ffs
->private_data
;
3628 if (WARN_ON(ffs_obj
->desc_ready
)) {
3633 ffs_obj
->desc_ready
= true;
3634 ffs_obj
->ffs_data
= ffs
;
3636 if (ffs_obj
->ffs_ready_callback
) {
3637 ret
= ffs_obj
->ffs_ready_callback(ffs
);
3642 set_bit(FFS_FL_CALL_CLOSED_CALLBACK
, &ffs
->flags
);
3648 static void ffs_closed(struct ffs_data
*ffs
)
3650 struct ffs_dev
*ffs_obj
;
3651 struct f_fs_opts
*opts
;
3652 struct config_item
*ci
;
3657 ffs_obj
= ffs
->private_data
;
3661 ffs_obj
->desc_ready
= false;
3662 ffs_obj
->ffs_data
= NULL
;
3664 if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK
, &ffs
->flags
) &&
3665 ffs_obj
->ffs_closed_callback
)
3666 ffs_obj
->ffs_closed_callback(ffs
);
3669 opts
= ffs_obj
->opts
;
3673 if (opts
->no_configfs
|| !opts
->func_inst
.group
.cg_item
.ci_parent
3674 || !kref_read(&opts
->func_inst
.group
.cg_item
.ci_kref
))
3677 ci
= opts
->func_inst
.group
.cg_item
.ci_parent
->ci_parent
;
3680 if (test_bit(FFS_FL_BOUND
, &ffs
->flags
))
3681 unregister_gadget_item(ci
);
3687 /* Misc helper functions ****************************************************/
3689 static int ffs_mutex_lock(struct mutex
*mutex
, unsigned nonblock
)
3692 ? likely(mutex_trylock(mutex
)) ? 0 : -EAGAIN
3693 : mutex_lock_interruptible(mutex
);
3696 static char *ffs_prepare_buffer(const char __user
*buf
, size_t len
)
3703 data
= kmalloc(len
, GFP_KERNEL
);
3704 if (unlikely(!data
))
3705 return ERR_PTR(-ENOMEM
);
3707 if (unlikely(copy_from_user(data
, buf
, len
))) {
3709 return ERR_PTR(-EFAULT
);
3712 pr_vdebug("Buffer from user space:\n");
3713 ffs_dump_mem("", data
, len
);
3718 DECLARE_USB_FUNCTION_INIT(ffs
, ffs_alloc_inst
, ffs_alloc
);
3719 MODULE_LICENSE("GPL");
3720 MODULE_AUTHOR("Michal Nazarewicz");