2 * f_mass_storage.c -- Mass Storage USB Composite Function
4 * Copyright (C) 2003-2008 Alan Stern
5 * Copyright (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz <mina86@mina86.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * The Mass Storage Function acts as a USB Mass Storage device,
42 * appearing to the host as a disk drive or as a CD-ROM drive. In
43 * addition to providing an example of a genuinely useful composite
44 * function for a USB device, it also illustrates a technique of
45 * double-buffering for increased throughput.
47 * For more information about MSF and in particular its module
48 * parameters and sysfs interface read the
49 * <Documentation/usb/mass-storage.txt> file.
53 * MSF is configured by specifying a fsg_config structure. It has the
56 * nluns Number of LUNs function have (anywhere from 1
58 * luns An array of LUN configuration values. This
59 * should be filled for each LUN that
60 * function will include (ie. for "nluns"
61 * LUNs). Each element of the array has
62 * the following fields:
63 * ->filename The path to the backing file for the LUN.
64 * Required if LUN is not marked as
66 * ->ro Flag specifying access to the LUN shall be
67 * read-only. This is implied if CD-ROM
68 * emulation is enabled as well as when
69 * it was impossible to open "filename"
71 * ->removable Flag specifying that LUN shall be indicated as
73 * ->cdrom Flag specifying that LUN shall be reported as
75 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12)
76 * commands for this LUN shall be ignored.
80 * release Information used as a reply to INQUIRY
81 * request. To use default set to NULL,
82 * NULL, 0xffff respectively. The first
83 * field should be 8 and the second 16
86 * can_stall Set to permit function to halt bulk endpoints.
87 * Disabled on some USB devices known not
88 * to work correctly. You should set it
91 * If "removable" is not set for a LUN then a backing file must be
92 * specified. If it is set, then NULL filename means the LUN's medium
93 * is not loaded (an empty string as "filename" in the fsg_config
94 * structure causes error). The CD-ROM emulation includes a single
95 * data track and no audio tracks; hence there need be only one
96 * backing file per LUN.
98 * This function is heavily based on "File-backed Storage Gadget" by
99 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
100 * Brownell. The driver's SCSI command interface was based on the
101 * "Information technology - Small Computer System Interface - 2"
102 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
103 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
104 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
105 * was based on the "Universal Serial Bus Mass Storage Class UFI
106 * Command Specification" document, Revision 1.0, December 14, 1998,
108 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
114 * The MSF is fairly straightforward. There is a main kernel
115 * thread that handles most of the work. Interrupt routines field
116 * callbacks from the controller driver: bulk- and interrupt-request
117 * completion notifications, endpoint-0 events, and disconnect events.
118 * Completion events are passed to the main thread by wakeup calls. Many
119 * ep0 requests are handled at interrupt time, but SetInterface,
120 * SetConfiguration, and device reset requests are forwarded to the
121 * thread in the form of "exceptions" using SIGUSR1 signals (since they
122 * should interrupt any ongoing file I/O operations).
124 * The thread's main routine implements the standard command/data/status
125 * parts of a SCSI interaction. It and its subroutines are full of tests
126 * for pending signals/exceptions -- all this polling is necessary since
127 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
128 * indication that the driver really wants to be running in userspace.)
129 * An important point is that so long as the thread is alive it keeps an
130 * open reference to the backing file. This will prevent unmounting
131 * the backing file's underlying filesystem and could cause problems
132 * during system shutdown, for example. To prevent such problems, the
133 * thread catches INT, TERM, and KILL signals and converts them into
136 * In normal operation the main thread is started during the gadget's
137 * fsg_bind() callback and stopped during fsg_unbind(). But it can
138 * also exit when it receives a signal, and there's no point leaving
139 * the gadget running when the thread is dead. As of this moment, MSF
140 * provides no way to deregister the gadget when thread dies -- maybe
141 * a callback functions is needed.
143 * To provide maximum throughput, the driver uses a circular pipeline of
144 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
145 * arbitrarily long; in practice the benefits don't justify having more
146 * than 2 stages (i.e., double buffering). But it helps to think of the
147 * pipeline as being a long one. Each buffer head contains a bulk-in and
148 * a bulk-out request pointer (since the buffer can be used for both
149 * output and input -- directions always are given from the host's
150 * point of view) as well as a pointer to the buffer and various state
153 * Use of the pipeline follows a simple protocol. There is a variable
154 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
155 * At any time that buffer head may still be in use from an earlier
156 * request, so each buffer head has a state variable indicating whether
157 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
158 * buffer head to be EMPTY, filling the buffer either by file I/O or by
159 * USB I/O (during which the buffer head is BUSY), and marking the buffer
160 * head FULL when the I/O is complete. Then the buffer will be emptied
161 * (again possibly by USB I/O, during which it is marked BUSY) and
162 * finally marked EMPTY again (possibly by a completion routine).
164 * A module parameter tells the driver to avoid stalling the bulk
165 * endpoints wherever the transport specification allows. This is
166 * necessary for some UDCs like the SuperH, which cannot reliably clear a
167 * halt on a bulk endpoint. However, under certain circumstances the
168 * Bulk-only specification requires a stall. In such cases the driver
169 * will halt the endpoint and set a flag indicating that it should clear
170 * the halt in software during the next device reset. Hopefully this
171 * will permit everything to work correctly. Furthermore, although the
172 * specification allows the bulk-out endpoint to halt when the host sends
173 * too much data, implementing this would cause an unavoidable race.
174 * The driver will always use the "no-stall" approach for OUT transfers.
176 * One subtle point concerns sending status-stage responses for ep0
177 * requests. Some of these requests, such as device reset, can involve
178 * interrupting an ongoing file I/O operation, which might take an
179 * arbitrarily long time. During that delay the host might give up on
180 * the original ep0 request and issue a new one. When that happens the
181 * driver should not notify the host about completion of the original
182 * request, as the host will no longer be waiting for it. So the driver
183 * assigns to each ep0 request a unique tag, and it keeps track of the
184 * tag value of the request associated with a long-running exception
185 * (device-reset, interface-change, or configuration-change). When the
186 * exception handler is finished, the status-stage response is submitted
187 * only if the current ep0 request tag is equal to the exception request
188 * tag. Thus only the most recently received ep0 request will get a
189 * status-stage response.
191 * Warning: This driver source file is too long. It ought to be split up
192 * into a header file plus about 3 separate .c files, to handle the details
193 * of the Gadget, USB Mass Storage, and SCSI protocols.
197 /* #define VERBOSE_DEBUG */
198 /* #define DUMP_MSGS */
200 #include <linux/blkdev.h>
201 #include <linux/completion.h>
202 #include <linux/dcache.h>
203 #include <linux/delay.h>
204 #include <linux/device.h>
205 #include <linux/fcntl.h>
206 #include <linux/file.h>
207 #include <linux/fs.h>
208 #include <linux/kref.h>
209 #include <linux/kthread.h>
210 #include <linux/limits.h>
211 #include <linux/rwsem.h>
212 #include <linux/slab.h>
213 #include <linux/spinlock.h>
214 #include <linux/string.h>
215 #include <linux/freezer.h>
216 #include <linux/module.h>
217 #include <linux/uaccess.h>
219 #include <linux/usb/ch9.h>
220 #include <linux/usb/gadget.h>
221 #include <linux/usb/composite.h>
223 #include "configfs.h"
226 /*------------------------------------------------------------------------*/
228 #define FSG_DRIVER_DESC "Mass Storage Function"
229 #define FSG_DRIVER_VERSION "2009/09/11"
231 static const char fsg_string_interface
[] = "Mass Storage";
233 #include "storage_common.h"
234 #include "f_mass_storage.h"
236 /* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
237 static struct usb_string fsg_strings
[] = {
238 {FSG_STRING_INTERFACE
, fsg_string_interface
},
242 static struct usb_gadget_strings fsg_stringtab
= {
243 .language
= 0x0409, /* en-us */
244 .strings
= fsg_strings
,
247 static struct usb_gadget_strings
*fsg_strings_array
[] = {
252 /*-------------------------------------------------------------------------*/
257 /* Data shared by all the FSG instances. */
259 struct usb_gadget
*gadget
;
260 struct usb_composite_dev
*cdev
;
261 struct fsg_dev
*fsg
, *new_fsg
;
262 wait_queue_head_t fsg_wait
;
264 /* filesem protects: backing files in use */
265 struct rw_semaphore filesem
;
267 /* lock protects: state, all the req_busy's */
270 struct usb_ep
*ep0
; /* Copy of gadget->ep0 */
271 struct usb_request
*ep0req
; /* Copy of cdev->req */
272 unsigned int ep0_req_tag
;
274 struct fsg_buffhd
*next_buffhd_to_fill
;
275 struct fsg_buffhd
*next_buffhd_to_drain
;
276 struct fsg_buffhd
*buffhds
;
277 unsigned int fsg_num_buffers
;
280 u8 cmnd
[MAX_COMMAND_SIZE
];
283 struct fsg_lun
*luns
[FSG_MAX_LUNS
];
284 struct fsg_lun
*curlun
;
286 unsigned int bulk_out_maxpacket
;
287 enum fsg_state state
; /* For exception handling */
288 unsigned int exception_req_tag
;
290 enum data_direction data_dir
;
292 u32 data_size_from_cmnd
;
297 unsigned int can_stall
:1;
298 unsigned int free_storage_on_release
:1;
299 unsigned int phase_error
:1;
300 unsigned int short_packet_received
:1;
301 unsigned int bad_lun_okay
:1;
302 unsigned int running
:1;
303 unsigned int sysfs
:1;
305 int thread_wakeup_needed
;
306 struct completion thread_notifier
;
307 struct task_struct
*thread_task
;
309 /* Gadget's private data. */
312 char inquiry_string
[INQUIRY_STRING_LEN
];
318 struct usb_function function
;
319 struct usb_gadget
*gadget
; /* Copy of cdev->gadget */
320 struct fsg_common
*common
;
322 u16 interface_number
;
324 unsigned int bulk_in_enabled
:1;
325 unsigned int bulk_out_enabled
:1;
327 unsigned long atomic_bitflags
;
328 #define IGNORE_BULK_OUT 0
330 struct usb_ep
*bulk_in
;
331 struct usb_ep
*bulk_out
;
334 static inline int __fsg_is_set(struct fsg_common
*common
,
335 const char *func
, unsigned line
)
339 ERROR(common
, "common->fsg is NULL in %s at %u\n", func
, line
);
344 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
346 static inline struct fsg_dev
*fsg_from_func(struct usb_function
*f
)
348 return container_of(f
, struct fsg_dev
, function
);
351 typedef void (*fsg_routine_t
)(struct fsg_dev
*);
353 static int exception_in_progress(struct fsg_common
*common
)
355 return common
->state
> FSG_STATE_IDLE
;
358 /* Make bulk-out requests be divisible by the maxpacket size */
359 static void set_bulk_out_req_length(struct fsg_common
*common
,
360 struct fsg_buffhd
*bh
, unsigned int length
)
364 bh
->bulk_out_intended_length
= length
;
365 rem
= length
% common
->bulk_out_maxpacket
;
367 length
+= common
->bulk_out_maxpacket
- rem
;
368 bh
->outreq
->length
= length
;
372 /*-------------------------------------------------------------------------*/
374 static int fsg_set_halt(struct fsg_dev
*fsg
, struct usb_ep
*ep
)
378 if (ep
== fsg
->bulk_in
)
380 else if (ep
== fsg
->bulk_out
)
384 DBG(fsg
, "%s set halt\n", name
);
385 return usb_ep_set_halt(ep
);
389 /*-------------------------------------------------------------------------*/
391 /* These routines may be called in process context or in_irq */
393 /* Caller must hold fsg->lock */
394 static void wakeup_thread(struct fsg_common
*common
)
397 * Ensure the reading of thread_wakeup_needed
398 * and the writing of bh->state are completed
401 /* Tell the main thread that something has happened */
402 common
->thread_wakeup_needed
= 1;
403 if (common
->thread_task
)
404 wake_up_process(common
->thread_task
);
407 static void raise_exception(struct fsg_common
*common
, enum fsg_state new_state
)
412 * Do nothing if a higher-priority exception is already in progress.
413 * If a lower-or-equal priority exception is in progress, preempt it
414 * and notify the main thread by sending it a signal.
416 spin_lock_irqsave(&common
->lock
, flags
);
417 if (common
->state
<= new_state
) {
418 common
->exception_req_tag
= common
->ep0_req_tag
;
419 common
->state
= new_state
;
420 if (common
->thread_task
)
421 send_sig_info(SIGUSR1
, SEND_SIG_FORCED
,
422 common
->thread_task
);
424 spin_unlock_irqrestore(&common
->lock
, flags
);
428 /*-------------------------------------------------------------------------*/
430 static int ep0_queue(struct fsg_common
*common
)
434 rc
= usb_ep_queue(common
->ep0
, common
->ep0req
, GFP_ATOMIC
);
435 common
->ep0
->driver_data
= common
;
436 if (rc
!= 0 && rc
!= -ESHUTDOWN
) {
437 /* We can't do much more than wait for a reset */
438 WARNING(common
, "error in submission: %s --> %d\n",
439 common
->ep0
->name
, rc
);
445 /*-------------------------------------------------------------------------*/
447 /* Completion handlers. These always run in_irq. */
449 static void bulk_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
451 struct fsg_common
*common
= ep
->driver_data
;
452 struct fsg_buffhd
*bh
= req
->context
;
454 if (req
->status
|| req
->actual
!= req
->length
)
455 DBG(common
, "%s --> %d, %u/%u\n", __func__
,
456 req
->status
, req
->actual
, req
->length
);
457 if (req
->status
== -ECONNRESET
) /* Request was cancelled */
458 usb_ep_fifo_flush(ep
);
460 /* Hold the lock while we update the request and buffer states */
462 spin_lock(&common
->lock
);
464 bh
->state
= BUF_STATE_EMPTY
;
465 wakeup_thread(common
);
466 spin_unlock(&common
->lock
);
469 static void bulk_out_complete(struct usb_ep
*ep
, struct usb_request
*req
)
471 struct fsg_common
*common
= ep
->driver_data
;
472 struct fsg_buffhd
*bh
= req
->context
;
474 dump_msg(common
, "bulk-out", req
->buf
, req
->actual
);
475 if (req
->status
|| req
->actual
!= bh
->bulk_out_intended_length
)
476 DBG(common
, "%s --> %d, %u/%u\n", __func__
,
477 req
->status
, req
->actual
, bh
->bulk_out_intended_length
);
478 if (req
->status
== -ECONNRESET
) /* Request was cancelled */
479 usb_ep_fifo_flush(ep
);
481 /* Hold the lock while we update the request and buffer states */
483 spin_lock(&common
->lock
);
485 bh
->state
= BUF_STATE_FULL
;
486 wakeup_thread(common
);
487 spin_unlock(&common
->lock
);
490 static int _fsg_common_get_max_lun(struct fsg_common
*common
)
492 int i
= ARRAY_SIZE(common
->luns
) - 1;
494 while (i
>= 0 && !common
->luns
[i
])
500 static int fsg_setup(struct usb_function
*f
,
501 const struct usb_ctrlrequest
*ctrl
)
503 struct fsg_dev
*fsg
= fsg_from_func(f
);
504 struct usb_request
*req
= fsg
->common
->ep0req
;
505 u16 w_index
= le16_to_cpu(ctrl
->wIndex
);
506 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
507 u16 w_length
= le16_to_cpu(ctrl
->wLength
);
509 if (!fsg_is_set(fsg
->common
))
512 ++fsg
->common
->ep0_req_tag
; /* Record arrival of a new request */
515 dump_msg(fsg
, "ep0-setup", (u8
*) ctrl
, sizeof(*ctrl
));
517 switch (ctrl
->bRequest
) {
519 case US_BULK_RESET_REQUEST
:
520 if (ctrl
->bRequestType
!=
521 (USB_DIR_OUT
| USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
523 if (w_index
!= fsg
->interface_number
|| w_value
!= 0 ||
528 * Raise an exception to stop the current operation
529 * and reinitialize our state.
531 DBG(fsg
, "bulk reset request\n");
532 raise_exception(fsg
->common
, FSG_STATE_RESET
);
533 return USB_GADGET_DELAYED_STATUS
;
535 case US_BULK_GET_MAX_LUN
:
536 if (ctrl
->bRequestType
!=
537 (USB_DIR_IN
| USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
539 if (w_index
!= fsg
->interface_number
|| w_value
!= 0 ||
542 VDBG(fsg
, "get max LUN\n");
543 *(u8
*)req
->buf
= _fsg_common_get_max_lun(fsg
->common
);
545 /* Respond with data/status */
546 req
->length
= min((u16
)1, w_length
);
547 return ep0_queue(fsg
->common
);
551 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
552 ctrl
->bRequestType
, ctrl
->bRequest
,
553 le16_to_cpu(ctrl
->wValue
), w_index
, w_length
);
558 /*-------------------------------------------------------------------------*/
560 /* All the following routines run in process context */
562 /* Use this for bulk or interrupt transfers, not ep0 */
563 static void start_transfer(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
564 struct usb_request
*req
, int *pbusy
,
565 enum fsg_buffer_state
*state
)
569 if (ep
== fsg
->bulk_in
)
570 dump_msg(fsg
, "bulk-in", req
->buf
, req
->length
);
572 spin_lock_irq(&fsg
->common
->lock
);
574 *state
= BUF_STATE_BUSY
;
575 spin_unlock_irq(&fsg
->common
->lock
);
577 rc
= usb_ep_queue(ep
, req
, GFP_KERNEL
);
579 return; /* All good, we're done */
582 *state
= BUF_STATE_EMPTY
;
584 /* We can't do much more than wait for a reset */
587 * Note: currently the net2280 driver fails zero-length
588 * submissions if DMA is enabled.
590 if (rc
!= -ESHUTDOWN
&& !(rc
== -EOPNOTSUPP
&& req
->length
== 0))
591 WARNING(fsg
, "error in submission: %s --> %d\n", ep
->name
, rc
);
594 static bool start_in_transfer(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
596 if (!fsg_is_set(common
))
598 start_transfer(common
->fsg
, common
->fsg
->bulk_in
,
599 bh
->inreq
, &bh
->inreq_busy
, &bh
->state
);
603 static bool start_out_transfer(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
605 if (!fsg_is_set(common
))
607 start_transfer(common
->fsg
, common
->fsg
->bulk_out
,
608 bh
->outreq
, &bh
->outreq_busy
, &bh
->state
);
612 static int sleep_thread(struct fsg_common
*common
, bool can_freeze
)
616 /* Wait until a signal arrives or we are woken up */
620 set_current_state(TASK_INTERRUPTIBLE
);
621 if (signal_pending(current
)) {
625 if (common
->thread_wakeup_needed
)
629 __set_current_state(TASK_RUNNING
);
630 common
->thread_wakeup_needed
= 0;
633 * Ensure the writing of thread_wakeup_needed
634 * and the reading of bh->state are completed
641 /*-------------------------------------------------------------------------*/
643 static int do_read(struct fsg_common
*common
)
645 struct fsg_lun
*curlun
= common
->curlun
;
647 struct fsg_buffhd
*bh
;
650 loff_t file_offset
, file_offset_tmp
;
655 * Get the starting Logical Block Address and check that it's
658 if (common
->cmnd
[0] == READ_6
)
659 lba
= get_unaligned_be24(&common
->cmnd
[1]);
661 lba
= get_unaligned_be32(&common
->cmnd
[2]);
664 * We allow DPO (Disable Page Out = don't save data in the
665 * cache) and FUA (Force Unit Access = don't read from the
666 * cache), but we don't implement them.
668 if ((common
->cmnd
[1] & ~0x18) != 0) {
669 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
673 if (lba
>= curlun
->num_sectors
) {
674 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
677 file_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
679 /* Carry out the file reads */
680 amount_left
= common
->data_size_from_cmnd
;
681 if (unlikely(amount_left
== 0))
682 return -EIO
; /* No default reply */
686 * Figure out how much we need to read:
687 * Try to read the remaining amount.
688 * But don't read more than the buffer size.
689 * And don't try to read past the end of the file.
691 amount
= min(amount_left
, FSG_BUFLEN
);
692 amount
= min((loff_t
)amount
,
693 curlun
->file_length
- file_offset
);
695 /* Wait for the next buffer to become available */
696 bh
= common
->next_buffhd_to_fill
;
697 while (bh
->state
!= BUF_STATE_EMPTY
) {
698 rc
= sleep_thread(common
, false);
704 * If we were asked to read past the end of file,
705 * end with an empty buffer.
709 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
710 curlun
->sense_data_info
=
711 file_offset
>> curlun
->blkbits
;
712 curlun
->info_valid
= 1;
713 bh
->inreq
->length
= 0;
714 bh
->state
= BUF_STATE_FULL
;
718 /* Perform the read */
719 file_offset_tmp
= file_offset
;
720 nread
= vfs_read(curlun
->filp
,
721 (char __user
*)bh
->buf
,
722 amount
, &file_offset_tmp
);
723 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
724 (unsigned long long)file_offset
, (int)nread
);
725 if (signal_pending(current
))
729 LDBG(curlun
, "error in file read: %d\n", (int)nread
);
731 } else if (nread
< amount
) {
732 LDBG(curlun
, "partial file read: %d/%u\n",
734 nread
= round_down(nread
, curlun
->blksize
);
736 file_offset
+= nread
;
737 amount_left
-= nread
;
738 common
->residue
-= nread
;
741 * Except at the end of the transfer, nread will be
742 * equal to the buffer size, which is divisible by the
743 * bulk-in maxpacket size.
745 bh
->inreq
->length
= nread
;
746 bh
->state
= BUF_STATE_FULL
;
748 /* If an error occurred, report it and its position */
749 if (nread
< amount
) {
750 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
751 curlun
->sense_data_info
=
752 file_offset
>> curlun
->blkbits
;
753 curlun
->info_valid
= 1;
757 if (amount_left
== 0)
758 break; /* No more left to read */
760 /* Send this buffer and go read some more */
762 if (!start_in_transfer(common
, bh
))
763 /* Don't know what to do if common->fsg is NULL */
765 common
->next_buffhd_to_fill
= bh
->next
;
768 return -EIO
; /* No default reply */
772 /*-------------------------------------------------------------------------*/
774 static int do_write(struct fsg_common
*common
)
776 struct fsg_lun
*curlun
= common
->curlun
;
778 struct fsg_buffhd
*bh
;
780 u32 amount_left_to_req
, amount_left_to_write
;
781 loff_t usb_offset
, file_offset
, file_offset_tmp
;
787 curlun
->sense_data
= SS_WRITE_PROTECTED
;
790 spin_lock(&curlun
->filp
->f_lock
);
791 curlun
->filp
->f_flags
&= ~O_SYNC
; /* Default is not to wait */
792 spin_unlock(&curlun
->filp
->f_lock
);
795 * Get the starting Logical Block Address and check that it's
798 if (common
->cmnd
[0] == WRITE_6
)
799 lba
= get_unaligned_be24(&common
->cmnd
[1]);
801 lba
= get_unaligned_be32(&common
->cmnd
[2]);
804 * We allow DPO (Disable Page Out = don't save data in the
805 * cache) and FUA (Force Unit Access = write directly to the
806 * medium). We don't implement DPO; we implement FUA by
807 * performing synchronous output.
809 if (common
->cmnd
[1] & ~0x18) {
810 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
813 if (!curlun
->nofua
&& (common
->cmnd
[1] & 0x08)) { /* FUA */
814 spin_lock(&curlun
->filp
->f_lock
);
815 curlun
->filp
->f_flags
|= O_SYNC
;
816 spin_unlock(&curlun
->filp
->f_lock
);
819 if (lba
>= curlun
->num_sectors
) {
820 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
824 /* Carry out the file writes */
826 file_offset
= usb_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
827 amount_left_to_req
= common
->data_size_from_cmnd
;
828 amount_left_to_write
= common
->data_size_from_cmnd
;
830 while (amount_left_to_write
> 0) {
832 /* Queue a request for more data from the host */
833 bh
= common
->next_buffhd_to_fill
;
834 if (bh
->state
== BUF_STATE_EMPTY
&& get_some_more
) {
837 * Figure out how much we want to get:
838 * Try to get the remaining amount,
839 * but not more than the buffer size.
841 amount
= min(amount_left_to_req
, FSG_BUFLEN
);
843 /* Beyond the end of the backing file? */
844 if (usb_offset
>= curlun
->file_length
) {
847 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
848 curlun
->sense_data_info
=
849 usb_offset
>> curlun
->blkbits
;
850 curlun
->info_valid
= 1;
854 /* Get the next buffer */
855 usb_offset
+= amount
;
856 common
->usb_amount_left
-= amount
;
857 amount_left_to_req
-= amount
;
858 if (amount_left_to_req
== 0)
862 * Except at the end of the transfer, amount will be
863 * equal to the buffer size, which is divisible by
864 * the bulk-out maxpacket size.
866 set_bulk_out_req_length(common
, bh
, amount
);
867 if (!start_out_transfer(common
, bh
))
868 /* Dunno what to do if common->fsg is NULL */
870 common
->next_buffhd_to_fill
= bh
->next
;
874 /* Write the received data to the backing file */
875 bh
= common
->next_buffhd_to_drain
;
876 if (bh
->state
== BUF_STATE_EMPTY
&& !get_some_more
)
877 break; /* We stopped early */
878 if (bh
->state
== BUF_STATE_FULL
) {
880 common
->next_buffhd_to_drain
= bh
->next
;
881 bh
->state
= BUF_STATE_EMPTY
;
883 /* Did something go wrong with the transfer? */
884 if (bh
->outreq
->status
!= 0) {
885 curlun
->sense_data
= SS_COMMUNICATION_FAILURE
;
886 curlun
->sense_data_info
=
887 file_offset
>> curlun
->blkbits
;
888 curlun
->info_valid
= 1;
892 amount
= bh
->outreq
->actual
;
893 if (curlun
->file_length
- file_offset
< amount
) {
895 "write %u @ %llu beyond end %llu\n",
896 amount
, (unsigned long long)file_offset
,
897 (unsigned long long)curlun
->file_length
);
898 amount
= curlun
->file_length
- file_offset
;
901 /* Don't accept excess data. The spec doesn't say
902 * what to do in this case. We'll ignore the error.
904 amount
= min(amount
, bh
->bulk_out_intended_length
);
906 /* Don't write a partial block */
907 amount
= round_down(amount
, curlun
->blksize
);
911 /* Perform the write */
912 file_offset_tmp
= file_offset
;
913 nwritten
= vfs_write(curlun
->filp
,
914 (char __user
*)bh
->buf
,
915 amount
, &file_offset_tmp
);
916 VLDBG(curlun
, "file write %u @ %llu -> %d\n", amount
,
917 (unsigned long long)file_offset
, (int)nwritten
);
918 if (signal_pending(current
))
919 return -EINTR
; /* Interrupted! */
922 LDBG(curlun
, "error in file write: %d\n",
925 } else if (nwritten
< amount
) {
926 LDBG(curlun
, "partial file write: %d/%u\n",
927 (int)nwritten
, amount
);
928 nwritten
= round_down(nwritten
, curlun
->blksize
);
930 file_offset
+= nwritten
;
931 amount_left_to_write
-= nwritten
;
932 common
->residue
-= nwritten
;
934 /* If an error occurred, report it and its position */
935 if (nwritten
< amount
) {
936 curlun
->sense_data
= SS_WRITE_ERROR
;
937 curlun
->sense_data_info
=
938 file_offset
>> curlun
->blkbits
;
939 curlun
->info_valid
= 1;
944 /* Did the host decide to stop early? */
945 if (bh
->outreq
->actual
< bh
->bulk_out_intended_length
) {
946 common
->short_packet_received
= 1;
952 /* Wait for something to happen */
953 rc
= sleep_thread(common
, false);
958 return -EIO
; /* No default reply */
962 /*-------------------------------------------------------------------------*/
964 static int do_synchronize_cache(struct fsg_common
*common
)
966 struct fsg_lun
*curlun
= common
->curlun
;
969 /* We ignore the requested LBA and write out all file's
970 * dirty data buffers. */
971 rc
= fsg_lun_fsync_sub(curlun
);
973 curlun
->sense_data
= SS_WRITE_ERROR
;
978 /*-------------------------------------------------------------------------*/
980 static void invalidate_sub(struct fsg_lun
*curlun
)
982 struct file
*filp
= curlun
->filp
;
983 struct inode
*inode
= file_inode(filp
);
986 rc
= invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
987 VLDBG(curlun
, "invalidate_mapping_pages -> %ld\n", rc
);
990 static int do_verify(struct fsg_common
*common
)
992 struct fsg_lun
*curlun
= common
->curlun
;
994 u32 verification_length
;
995 struct fsg_buffhd
*bh
= common
->next_buffhd_to_fill
;
996 loff_t file_offset
, file_offset_tmp
;
1002 * Get the starting Logical Block Address and check that it's
1005 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1006 if (lba
>= curlun
->num_sectors
) {
1007 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1012 * We allow DPO (Disable Page Out = don't save data in the
1013 * cache) but we don't implement it.
1015 if (common
->cmnd
[1] & ~0x10) {
1016 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1020 verification_length
= get_unaligned_be16(&common
->cmnd
[7]);
1021 if (unlikely(verification_length
== 0))
1022 return -EIO
; /* No default reply */
1024 /* Prepare to carry out the file verify */
1025 amount_left
= verification_length
<< curlun
->blkbits
;
1026 file_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
1028 /* Write out all the dirty buffers before invalidating them */
1029 fsg_lun_fsync_sub(curlun
);
1030 if (signal_pending(current
))
1033 invalidate_sub(curlun
);
1034 if (signal_pending(current
))
1037 /* Just try to read the requested blocks */
1038 while (amount_left
> 0) {
1040 * Figure out how much we need to read:
1041 * Try to read the remaining amount, but not more than
1043 * And don't try to read past the end of the file.
1045 amount
= min(amount_left
, FSG_BUFLEN
);
1046 amount
= min((loff_t
)amount
,
1047 curlun
->file_length
- file_offset
);
1049 curlun
->sense_data
=
1050 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1051 curlun
->sense_data_info
=
1052 file_offset
>> curlun
->blkbits
;
1053 curlun
->info_valid
= 1;
1057 /* Perform the read */
1058 file_offset_tmp
= file_offset
;
1059 nread
= vfs_read(curlun
->filp
,
1060 (char __user
*) bh
->buf
,
1061 amount
, &file_offset_tmp
);
1062 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1063 (unsigned long long) file_offset
,
1065 if (signal_pending(current
))
1069 LDBG(curlun
, "error in file verify: %d\n", (int)nread
);
1071 } else if (nread
< amount
) {
1072 LDBG(curlun
, "partial file verify: %d/%u\n",
1073 (int)nread
, amount
);
1074 nread
= round_down(nread
, curlun
->blksize
);
1077 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1078 curlun
->sense_data_info
=
1079 file_offset
>> curlun
->blkbits
;
1080 curlun
->info_valid
= 1;
1083 file_offset
+= nread
;
1084 amount_left
-= nread
;
1090 /*-------------------------------------------------------------------------*/
1092 static int do_inquiry(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1094 struct fsg_lun
*curlun
= common
->curlun
;
1095 u8
*buf
= (u8
*) bh
->buf
;
1097 if (!curlun
) { /* Unsupported LUNs are okay */
1098 common
->bad_lun_okay
= 1;
1100 buf
[0] = TYPE_NO_LUN
; /* Unsupported, no device-type */
1101 buf
[4] = 31; /* Additional length */
1105 buf
[0] = curlun
->cdrom
? TYPE_ROM
: TYPE_DISK
;
1106 buf
[1] = curlun
->removable
? 0x80 : 0;
1107 buf
[2] = 2; /* ANSI SCSI level 2 */
1108 buf
[3] = 2; /* SCSI-2 INQUIRY data format */
1109 buf
[4] = 31; /* Additional length */
1110 buf
[5] = 0; /* No special options */
1113 if (curlun
->inquiry_string
[0])
1114 memcpy(buf
+ 8, curlun
->inquiry_string
,
1115 sizeof(curlun
->inquiry_string
));
1117 memcpy(buf
+ 8, common
->inquiry_string
,
1118 sizeof(common
->inquiry_string
));
1122 static int do_request_sense(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1124 struct fsg_lun
*curlun
= common
->curlun
;
1125 u8
*buf
= (u8
*) bh
->buf
;
1130 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1132 * If a REQUEST SENSE command is received from an initiator
1133 * with a pending unit attention condition (before the target
1134 * generates the contingent allegiance condition), then the
1135 * target shall either:
1136 * a) report any pending sense data and preserve the unit
1137 * attention condition on the logical unit, or,
1138 * b) report the unit attention condition, may discard any
1139 * pending sense data, and clear the unit attention
1140 * condition on the logical unit for that initiator.
1142 * FSG normally uses option a); enable this code to use option b).
1145 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
) {
1146 curlun
->sense_data
= curlun
->unit_attention_data
;
1147 curlun
->unit_attention_data
= SS_NO_SENSE
;
1151 if (!curlun
) { /* Unsupported LUNs are okay */
1152 common
->bad_lun_okay
= 1;
1153 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1157 sd
= curlun
->sense_data
;
1158 sdinfo
= curlun
->sense_data_info
;
1159 valid
= curlun
->info_valid
<< 7;
1160 curlun
->sense_data
= SS_NO_SENSE
;
1161 curlun
->sense_data_info
= 0;
1162 curlun
->info_valid
= 0;
1166 buf
[0] = valid
| 0x70; /* Valid, current error */
1168 put_unaligned_be32(sdinfo
, &buf
[3]); /* Sense information */
1169 buf
[7] = 18 - 8; /* Additional sense length */
1175 static int do_read_capacity(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1177 struct fsg_lun
*curlun
= common
->curlun
;
1178 u32 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1179 int pmi
= common
->cmnd
[8];
1180 u8
*buf
= (u8
*)bh
->buf
;
1182 /* Check the PMI and LBA fields */
1183 if (pmi
> 1 || (pmi
== 0 && lba
!= 0)) {
1184 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1188 put_unaligned_be32(curlun
->num_sectors
- 1, &buf
[0]);
1189 /* Max logical block */
1190 put_unaligned_be32(curlun
->blksize
, &buf
[4]);/* Block length */
1194 static int do_read_header(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1196 struct fsg_lun
*curlun
= common
->curlun
;
1197 int msf
= common
->cmnd
[1] & 0x02;
1198 u32 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1199 u8
*buf
= (u8
*)bh
->buf
;
1201 if (common
->cmnd
[1] & ~0x02) { /* Mask away MSF */
1202 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1205 if (lba
>= curlun
->num_sectors
) {
1206 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1211 buf
[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1212 store_cdrom_address(&buf
[4], msf
, lba
);
1216 static int do_read_toc(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1218 struct fsg_lun
*curlun
= common
->curlun
;
1219 int msf
= common
->cmnd
[1] & 0x02;
1220 int start_track
= common
->cmnd
[6];
1221 u8
*buf
= (u8
*)bh
->buf
;
1223 if ((common
->cmnd
[1] & ~0x02) != 0 || /* Mask away MSF */
1225 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1230 buf
[1] = (20-2); /* TOC data length */
1231 buf
[2] = 1; /* First track number */
1232 buf
[3] = 1; /* Last track number */
1233 buf
[5] = 0x16; /* Data track, copying allowed */
1234 buf
[6] = 0x01; /* Only track is number 1 */
1235 store_cdrom_address(&buf
[8], msf
, 0);
1237 buf
[13] = 0x16; /* Lead-out track is data */
1238 buf
[14] = 0xAA; /* Lead-out track number */
1239 store_cdrom_address(&buf
[16], msf
, curlun
->num_sectors
);
1243 static int do_mode_sense(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1245 struct fsg_lun
*curlun
= common
->curlun
;
1246 int mscmnd
= common
->cmnd
[0];
1247 u8
*buf
= (u8
*) bh
->buf
;
1250 int changeable_values
, all_pages
;
1254 if ((common
->cmnd
[1] & ~0x08) != 0) { /* Mask away DBD */
1255 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1258 pc
= common
->cmnd
[2] >> 6;
1259 page_code
= common
->cmnd
[2] & 0x3f;
1261 curlun
->sense_data
= SS_SAVING_PARAMETERS_NOT_SUPPORTED
;
1264 changeable_values
= (pc
== 1);
1265 all_pages
= (page_code
== 0x3f);
1268 * Write the mode parameter header. Fixed values are: default
1269 * medium type, no cache control (DPOFUA), and no block descriptors.
1270 * The only variable value is the WriteProtect bit. We will fill in
1271 * the mode data length later.
1274 if (mscmnd
== MODE_SENSE
) {
1275 buf
[2] = (curlun
->ro
? 0x80 : 0x00); /* WP, DPOFUA */
1278 } else { /* MODE_SENSE_10 */
1279 buf
[3] = (curlun
->ro
? 0x80 : 0x00); /* WP, DPOFUA */
1281 limit
= 65535; /* Should really be FSG_BUFLEN */
1284 /* No block descriptors */
1287 * The mode pages, in numerical order. The only page we support
1288 * is the Caching page.
1290 if (page_code
== 0x08 || all_pages
) {
1292 buf
[0] = 0x08; /* Page code */
1293 buf
[1] = 10; /* Page length */
1294 memset(buf
+2, 0, 10); /* None of the fields are changeable */
1296 if (!changeable_values
) {
1297 buf
[2] = 0x04; /* Write cache enable, */
1298 /* Read cache not disabled */
1299 /* No cache retention priorities */
1300 put_unaligned_be16(0xffff, &buf
[4]);
1301 /* Don't disable prefetch */
1302 /* Minimum prefetch = 0 */
1303 put_unaligned_be16(0xffff, &buf
[8]);
1304 /* Maximum prefetch */
1305 put_unaligned_be16(0xffff, &buf
[10]);
1306 /* Maximum prefetch ceiling */
1312 * Check that a valid page was requested and the mode data length
1316 if (!valid_page
|| len
> limit
) {
1317 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1321 /* Store the mode data length */
1322 if (mscmnd
== MODE_SENSE
)
1325 put_unaligned_be16(len
- 2, buf0
);
1329 static int do_start_stop(struct fsg_common
*common
)
1331 struct fsg_lun
*curlun
= common
->curlun
;
1336 } else if (!curlun
->removable
) {
1337 curlun
->sense_data
= SS_INVALID_COMMAND
;
1339 } else if ((common
->cmnd
[1] & ~0x01) != 0 || /* Mask away Immed */
1340 (common
->cmnd
[4] & ~0x03) != 0) { /* Mask LoEj, Start */
1341 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1345 loej
= common
->cmnd
[4] & 0x02;
1346 start
= common
->cmnd
[4] & 0x01;
1349 * Our emulation doesn't support mounting; the medium is
1350 * available for use as soon as it is loaded.
1353 if (!fsg_lun_is_open(curlun
)) {
1354 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
1360 /* Are we allowed to unload the media? */
1361 if (curlun
->prevent_medium_removal
) {
1362 LDBG(curlun
, "unload attempt prevented\n");
1363 curlun
->sense_data
= SS_MEDIUM_REMOVAL_PREVENTED
;
1370 up_read(&common
->filesem
);
1371 down_write(&common
->filesem
);
1372 fsg_lun_close(curlun
);
1373 up_write(&common
->filesem
);
1374 down_read(&common
->filesem
);
1379 static int do_prevent_allow(struct fsg_common
*common
)
1381 struct fsg_lun
*curlun
= common
->curlun
;
1384 if (!common
->curlun
) {
1386 } else if (!common
->curlun
->removable
) {
1387 common
->curlun
->sense_data
= SS_INVALID_COMMAND
;
1391 prevent
= common
->cmnd
[4] & 0x01;
1392 if ((common
->cmnd
[4] & ~0x01) != 0) { /* Mask away Prevent */
1393 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1397 if (curlun
->prevent_medium_removal
&& !prevent
)
1398 fsg_lun_fsync_sub(curlun
);
1399 curlun
->prevent_medium_removal
= prevent
;
1403 static int do_read_format_capacities(struct fsg_common
*common
,
1404 struct fsg_buffhd
*bh
)
1406 struct fsg_lun
*curlun
= common
->curlun
;
1407 u8
*buf
= (u8
*) bh
->buf
;
1409 buf
[0] = buf
[1] = buf
[2] = 0;
1410 buf
[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1413 put_unaligned_be32(curlun
->num_sectors
, &buf
[0]);
1414 /* Number of blocks */
1415 put_unaligned_be32(curlun
->blksize
, &buf
[4]);/* Block length */
1416 buf
[4] = 0x02; /* Current capacity */
1420 static int do_mode_select(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1422 struct fsg_lun
*curlun
= common
->curlun
;
1424 /* We don't support MODE SELECT */
1426 curlun
->sense_data
= SS_INVALID_COMMAND
;
1431 /*-------------------------------------------------------------------------*/
1433 static int halt_bulk_in_endpoint(struct fsg_dev
*fsg
)
1437 rc
= fsg_set_halt(fsg
, fsg
->bulk_in
);
1439 VDBG(fsg
, "delayed bulk-in endpoint halt\n");
1441 if (rc
!= -EAGAIN
) {
1442 WARNING(fsg
, "usb_ep_set_halt -> %d\n", rc
);
1447 /* Wait for a short time and then try again */
1448 if (msleep_interruptible(100) != 0)
1450 rc
= usb_ep_set_halt(fsg
->bulk_in
);
1455 static int wedge_bulk_in_endpoint(struct fsg_dev
*fsg
)
1459 DBG(fsg
, "bulk-in set wedge\n");
1460 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1462 VDBG(fsg
, "delayed bulk-in endpoint wedge\n");
1464 if (rc
!= -EAGAIN
) {
1465 WARNING(fsg
, "usb_ep_set_wedge -> %d\n", rc
);
1470 /* Wait for a short time and then try again */
1471 if (msleep_interruptible(100) != 0)
1473 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1478 static int throw_away_data(struct fsg_common
*common
)
1480 struct fsg_buffhd
*bh
;
1484 for (bh
= common
->next_buffhd_to_drain
;
1485 bh
->state
!= BUF_STATE_EMPTY
|| common
->usb_amount_left
> 0;
1486 bh
= common
->next_buffhd_to_drain
) {
1488 /* Throw away the data in a filled buffer */
1489 if (bh
->state
== BUF_STATE_FULL
) {
1491 bh
->state
= BUF_STATE_EMPTY
;
1492 common
->next_buffhd_to_drain
= bh
->next
;
1494 /* A short packet or an error ends everything */
1495 if (bh
->outreq
->actual
< bh
->bulk_out_intended_length
||
1496 bh
->outreq
->status
!= 0) {
1497 raise_exception(common
,
1498 FSG_STATE_ABORT_BULK_OUT
);
1504 /* Try to submit another request if we need one */
1505 bh
= common
->next_buffhd_to_fill
;
1506 if (bh
->state
== BUF_STATE_EMPTY
1507 && common
->usb_amount_left
> 0) {
1508 amount
= min(common
->usb_amount_left
, FSG_BUFLEN
);
1511 * Except at the end of the transfer, amount will be
1512 * equal to the buffer size, which is divisible by
1513 * the bulk-out maxpacket size.
1515 set_bulk_out_req_length(common
, bh
, amount
);
1516 if (!start_out_transfer(common
, bh
))
1517 /* Dunno what to do if common->fsg is NULL */
1519 common
->next_buffhd_to_fill
= bh
->next
;
1520 common
->usb_amount_left
-= amount
;
1524 /* Otherwise wait for something to happen */
1525 rc
= sleep_thread(common
, true);
1532 static int finish_reply(struct fsg_common
*common
)
1534 struct fsg_buffhd
*bh
= common
->next_buffhd_to_fill
;
1537 switch (common
->data_dir
) {
1539 break; /* Nothing to send */
1542 * If we don't know whether the host wants to read or write,
1543 * this must be CB or CBI with an unknown command. We mustn't
1544 * try to send or receive any data. So stall both bulk pipes
1545 * if we can and wait for a reset.
1547 case DATA_DIR_UNKNOWN
:
1548 if (!common
->can_stall
) {
1550 } else if (fsg_is_set(common
)) {
1551 fsg_set_halt(common
->fsg
, common
->fsg
->bulk_out
);
1552 rc
= halt_bulk_in_endpoint(common
->fsg
);
1554 /* Don't know what to do if common->fsg is NULL */
1559 /* All but the last buffer of data must have already been sent */
1560 case DATA_DIR_TO_HOST
:
1561 if (common
->data_size
== 0) {
1562 /* Nothing to send */
1564 /* Don't know what to do if common->fsg is NULL */
1565 } else if (!fsg_is_set(common
)) {
1568 /* If there's no residue, simply send the last buffer */
1569 } else if (common
->residue
== 0) {
1570 bh
->inreq
->zero
= 0;
1571 if (!start_in_transfer(common
, bh
))
1573 common
->next_buffhd_to_fill
= bh
->next
;
1576 * For Bulk-only, mark the end of the data with a short
1577 * packet. If we are allowed to stall, halt the bulk-in
1578 * endpoint. (Note: This violates the Bulk-Only Transport
1579 * specification, which requires us to pad the data if we
1580 * don't halt the endpoint. Presumably nobody will mind.)
1583 bh
->inreq
->zero
= 1;
1584 if (!start_in_transfer(common
, bh
))
1586 common
->next_buffhd_to_fill
= bh
->next
;
1587 if (common
->can_stall
)
1588 rc
= halt_bulk_in_endpoint(common
->fsg
);
1593 * We have processed all we want from the data the host has sent.
1594 * There may still be outstanding bulk-out requests.
1596 case DATA_DIR_FROM_HOST
:
1597 if (common
->residue
== 0) {
1598 /* Nothing to receive */
1600 /* Did the host stop sending unexpectedly early? */
1601 } else if (common
->short_packet_received
) {
1602 raise_exception(common
, FSG_STATE_ABORT_BULK_OUT
);
1606 * We haven't processed all the incoming data. Even though
1607 * we may be allowed to stall, doing so would cause a race.
1608 * The controller may already have ACK'ed all the remaining
1609 * bulk-out packets, in which case the host wouldn't see a
1610 * STALL. Not realizing the endpoint was halted, it wouldn't
1611 * clear the halt -- leading to problems later on.
1614 } else if (common
->can_stall
) {
1615 if (fsg_is_set(common
))
1616 fsg_set_halt(common
->fsg
,
1617 common
->fsg
->bulk_out
);
1618 raise_exception(common
, FSG_STATE_ABORT_BULK_OUT
);
1623 * We can't stall. Read in the excess data and throw it
1627 rc
= throw_away_data(common
);
1634 static int send_status(struct fsg_common
*common
)
1636 struct fsg_lun
*curlun
= common
->curlun
;
1637 struct fsg_buffhd
*bh
;
1638 struct bulk_cs_wrap
*csw
;
1640 u8 status
= US_BULK_STAT_OK
;
1643 /* Wait for the next buffer to become available */
1644 bh
= common
->next_buffhd_to_fill
;
1645 while (bh
->state
!= BUF_STATE_EMPTY
) {
1646 rc
= sleep_thread(common
, true);
1652 sd
= curlun
->sense_data
;
1653 sdinfo
= curlun
->sense_data_info
;
1654 } else if (common
->bad_lun_okay
)
1657 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1659 if (common
->phase_error
) {
1660 DBG(common
, "sending phase-error status\n");
1661 status
= US_BULK_STAT_PHASE
;
1662 sd
= SS_INVALID_COMMAND
;
1663 } else if (sd
!= SS_NO_SENSE
) {
1664 DBG(common
, "sending command-failure status\n");
1665 status
= US_BULK_STAT_FAIL
;
1666 VDBG(common
, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1668 SK(sd
), ASC(sd
), ASCQ(sd
), sdinfo
);
1671 /* Store and send the Bulk-only CSW */
1672 csw
= (void *)bh
->buf
;
1674 csw
->Signature
= cpu_to_le32(US_BULK_CS_SIGN
);
1675 csw
->Tag
= common
->tag
;
1676 csw
->Residue
= cpu_to_le32(common
->residue
);
1677 csw
->Status
= status
;
1679 bh
->inreq
->length
= US_BULK_CS_WRAP_LEN
;
1680 bh
->inreq
->zero
= 0;
1681 if (!start_in_transfer(common
, bh
))
1682 /* Don't know what to do if common->fsg is NULL */
1685 common
->next_buffhd_to_fill
= bh
->next
;
1690 /*-------------------------------------------------------------------------*/
1693 * Check whether the command is properly formed and whether its data size
1694 * and direction agree with the values we already have.
1696 static int check_command(struct fsg_common
*common
, int cmnd_size
,
1697 enum data_direction data_dir
, unsigned int mask
,
1698 int needs_medium
, const char *name
)
1701 unsigned int lun
= common
->cmnd
[1] >> 5;
1702 static const char dirletter
[4] = {'u', 'o', 'i', 'n'};
1704 struct fsg_lun
*curlun
;
1707 if (common
->data_dir
!= DATA_DIR_UNKNOWN
)
1708 sprintf(hdlen
, ", H%c=%u", dirletter
[(int) common
->data_dir
],
1710 VDBG(common
, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1711 name
, cmnd_size
, dirletter
[(int) data_dir
],
1712 common
->data_size_from_cmnd
, common
->cmnd_size
, hdlen
);
1715 * We can't reply at all until we know the correct data direction
1718 if (common
->data_size_from_cmnd
== 0)
1719 data_dir
= DATA_DIR_NONE
;
1720 if (common
->data_size
< common
->data_size_from_cmnd
) {
1722 * Host data size < Device data size is a phase error.
1723 * Carry out the command, but only transfer as much as
1726 common
->data_size_from_cmnd
= common
->data_size
;
1727 common
->phase_error
= 1;
1729 common
->residue
= common
->data_size
;
1730 common
->usb_amount_left
= common
->data_size
;
1732 /* Conflicting data directions is a phase error */
1733 if (common
->data_dir
!= data_dir
&& common
->data_size_from_cmnd
> 0) {
1734 common
->phase_error
= 1;
1738 /* Verify the length of the command itself */
1739 if (cmnd_size
!= common
->cmnd_size
) {
1742 * Special case workaround: There are plenty of buggy SCSI
1743 * implementations. Many have issues with cbw->Length
1744 * field passing a wrong command size. For those cases we
1745 * always try to work around the problem by using the length
1746 * sent by the host side provided it is at least as large
1747 * as the correct command length.
1748 * Examples of such cases would be MS-Windows, which issues
1749 * REQUEST SENSE with cbw->Length == 12 where it should
1750 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1751 * REQUEST SENSE with cbw->Length == 10 where it should
1754 if (cmnd_size
<= common
->cmnd_size
) {
1755 DBG(common
, "%s is buggy! Expected length %d "
1756 "but we got %d\n", name
,
1757 cmnd_size
, common
->cmnd_size
);
1758 cmnd_size
= common
->cmnd_size
;
1760 common
->phase_error
= 1;
1765 /* Check that the LUN values are consistent */
1766 if (common
->lun
!= lun
)
1767 DBG(common
, "using LUN %u from CBW, not LUN %u from CDB\n",
1771 curlun
= common
->curlun
;
1773 if (common
->cmnd
[0] != REQUEST_SENSE
) {
1774 curlun
->sense_data
= SS_NO_SENSE
;
1775 curlun
->sense_data_info
= 0;
1776 curlun
->info_valid
= 0;
1779 common
->bad_lun_okay
= 0;
1782 * INQUIRY and REQUEST SENSE commands are explicitly allowed
1783 * to use unsupported LUNs; all others may not.
1785 if (common
->cmnd
[0] != INQUIRY
&&
1786 common
->cmnd
[0] != REQUEST_SENSE
) {
1787 DBG(common
, "unsupported LUN %u\n", common
->lun
);
1793 * If a unit attention condition exists, only INQUIRY and
1794 * REQUEST SENSE commands are allowed; anything else must fail.
1796 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
&&
1797 common
->cmnd
[0] != INQUIRY
&&
1798 common
->cmnd
[0] != REQUEST_SENSE
) {
1799 curlun
->sense_data
= curlun
->unit_attention_data
;
1800 curlun
->unit_attention_data
= SS_NO_SENSE
;
1804 /* Check that only command bytes listed in the mask are non-zero */
1805 common
->cmnd
[1] &= 0x1f; /* Mask away the LUN */
1806 for (i
= 1; i
< cmnd_size
; ++i
) {
1807 if (common
->cmnd
[i
] && !(mask
& (1 << i
))) {
1809 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1814 /* If the medium isn't mounted and the command needs to access
1815 * it, return an error. */
1816 if (curlun
&& !fsg_lun_is_open(curlun
) && needs_medium
) {
1817 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
1824 /* wrapper of check_command for data size in blocks handling */
1825 static int check_command_size_in_blocks(struct fsg_common
*common
,
1826 int cmnd_size
, enum data_direction data_dir
,
1827 unsigned int mask
, int needs_medium
, const char *name
)
1830 common
->data_size_from_cmnd
<<= common
->curlun
->blkbits
;
1831 return check_command(common
, cmnd_size
, data_dir
,
1832 mask
, needs_medium
, name
);
1835 static int do_scsi_command(struct fsg_common
*common
)
1837 struct fsg_buffhd
*bh
;
1839 int reply
= -EINVAL
;
1841 static char unknown
[16];
1845 /* Wait for the next buffer to become available for data or status */
1846 bh
= common
->next_buffhd_to_fill
;
1847 common
->next_buffhd_to_drain
= bh
;
1848 while (bh
->state
!= BUF_STATE_EMPTY
) {
1849 rc
= sleep_thread(common
, true);
1853 common
->phase_error
= 0;
1854 common
->short_packet_received
= 0;
1856 down_read(&common
->filesem
); /* We're using the backing file */
1857 switch (common
->cmnd
[0]) {
1860 common
->data_size_from_cmnd
= common
->cmnd
[4];
1861 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1865 reply
= do_inquiry(common
, bh
);
1869 common
->data_size_from_cmnd
= common
->cmnd
[4];
1870 reply
= check_command(common
, 6, DATA_DIR_FROM_HOST
,
1874 reply
= do_mode_select(common
, bh
);
1877 case MODE_SELECT_10
:
1878 common
->data_size_from_cmnd
=
1879 get_unaligned_be16(&common
->cmnd
[7]);
1880 reply
= check_command(common
, 10, DATA_DIR_FROM_HOST
,
1884 reply
= do_mode_select(common
, bh
);
1888 common
->data_size_from_cmnd
= common
->cmnd
[4];
1889 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1890 (1<<1) | (1<<2) | (1<<4), 0,
1893 reply
= do_mode_sense(common
, bh
);
1897 common
->data_size_from_cmnd
=
1898 get_unaligned_be16(&common
->cmnd
[7]);
1899 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1900 (1<<1) | (1<<2) | (3<<7), 0,
1903 reply
= do_mode_sense(common
, bh
);
1906 case ALLOW_MEDIUM_REMOVAL
:
1907 common
->data_size_from_cmnd
= 0;
1908 reply
= check_command(common
, 6, DATA_DIR_NONE
,
1910 "PREVENT-ALLOW MEDIUM REMOVAL");
1912 reply
= do_prevent_allow(common
);
1916 i
= common
->cmnd
[4];
1917 common
->data_size_from_cmnd
= (i
== 0) ? 256 : i
;
1918 reply
= check_command_size_in_blocks(common
, 6,
1923 reply
= do_read(common
);
1927 common
->data_size_from_cmnd
=
1928 get_unaligned_be16(&common
->cmnd
[7]);
1929 reply
= check_command_size_in_blocks(common
, 10,
1931 (1<<1) | (0xf<<2) | (3<<7), 1,
1934 reply
= do_read(common
);
1938 common
->data_size_from_cmnd
=
1939 get_unaligned_be32(&common
->cmnd
[6]);
1940 reply
= check_command_size_in_blocks(common
, 12,
1942 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1945 reply
= do_read(common
);
1949 common
->data_size_from_cmnd
= 8;
1950 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1951 (0xf<<2) | (1<<8), 1,
1954 reply
= do_read_capacity(common
, bh
);
1958 if (!common
->curlun
|| !common
->curlun
->cdrom
)
1960 common
->data_size_from_cmnd
=
1961 get_unaligned_be16(&common
->cmnd
[7]);
1962 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1963 (3<<7) | (0x1f<<1), 1,
1966 reply
= do_read_header(common
, bh
);
1970 if (!common
->curlun
|| !common
->curlun
->cdrom
)
1972 common
->data_size_from_cmnd
=
1973 get_unaligned_be16(&common
->cmnd
[7]);
1974 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1978 reply
= do_read_toc(common
, bh
);
1981 case READ_FORMAT_CAPACITIES
:
1982 common
->data_size_from_cmnd
=
1983 get_unaligned_be16(&common
->cmnd
[7]);
1984 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1986 "READ FORMAT CAPACITIES");
1988 reply
= do_read_format_capacities(common
, bh
);
1992 common
->data_size_from_cmnd
= common
->cmnd
[4];
1993 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1997 reply
= do_request_sense(common
, bh
);
2001 common
->data_size_from_cmnd
= 0;
2002 reply
= check_command(common
, 6, DATA_DIR_NONE
,
2006 reply
= do_start_stop(common
);
2009 case SYNCHRONIZE_CACHE
:
2010 common
->data_size_from_cmnd
= 0;
2011 reply
= check_command(common
, 10, DATA_DIR_NONE
,
2012 (0xf<<2) | (3<<7), 1,
2013 "SYNCHRONIZE CACHE");
2015 reply
= do_synchronize_cache(common
);
2018 case TEST_UNIT_READY
:
2019 common
->data_size_from_cmnd
= 0;
2020 reply
= check_command(common
, 6, DATA_DIR_NONE
,
2026 * Although optional, this command is used by MS-Windows. We
2027 * support a minimal version: BytChk must be 0.
2030 common
->data_size_from_cmnd
= 0;
2031 reply
= check_command(common
, 10, DATA_DIR_NONE
,
2032 (1<<1) | (0xf<<2) | (3<<7), 1,
2035 reply
= do_verify(common
);
2039 i
= common
->cmnd
[4];
2040 common
->data_size_from_cmnd
= (i
== 0) ? 256 : i
;
2041 reply
= check_command_size_in_blocks(common
, 6,
2046 reply
= do_write(common
);
2050 common
->data_size_from_cmnd
=
2051 get_unaligned_be16(&common
->cmnd
[7]);
2052 reply
= check_command_size_in_blocks(common
, 10,
2054 (1<<1) | (0xf<<2) | (3<<7), 1,
2057 reply
= do_write(common
);
2061 common
->data_size_from_cmnd
=
2062 get_unaligned_be32(&common
->cmnd
[6]);
2063 reply
= check_command_size_in_blocks(common
, 12,
2065 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2068 reply
= do_write(common
);
2072 * Some mandatory commands that we recognize but don't implement.
2073 * They don't mean much in this setting. It's left as an exercise
2074 * for anyone interested to implement RESERVE and RELEASE in terms
2080 case SEND_DIAGNOSTIC
:
2085 common
->data_size_from_cmnd
= 0;
2086 sprintf(unknown
, "Unknown x%02x", common
->cmnd
[0]);
2087 reply
= check_command(common
, common
->cmnd_size
,
2088 DATA_DIR_UNKNOWN
, ~0, 0, unknown
);
2090 common
->curlun
->sense_data
= SS_INVALID_COMMAND
;
2095 up_read(&common
->filesem
);
2097 if (reply
== -EINTR
|| signal_pending(current
))
2100 /* Set up the single reply buffer for finish_reply() */
2101 if (reply
== -EINVAL
)
2102 reply
= 0; /* Error reply length */
2103 if (reply
>= 0 && common
->data_dir
== DATA_DIR_TO_HOST
) {
2104 reply
= min((u32
)reply
, common
->data_size_from_cmnd
);
2105 bh
->inreq
->length
= reply
;
2106 bh
->state
= BUF_STATE_FULL
;
2107 common
->residue
-= reply
;
2108 } /* Otherwise it's already set */
2114 /*-------------------------------------------------------------------------*/
2116 static int received_cbw(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
2118 struct usb_request
*req
= bh
->outreq
;
2119 struct bulk_cb_wrap
*cbw
= req
->buf
;
2120 struct fsg_common
*common
= fsg
->common
;
2122 /* Was this a real packet? Should it be ignored? */
2123 if (req
->status
|| test_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
2126 /* Is the CBW valid? */
2127 if (req
->actual
!= US_BULK_CB_WRAP_LEN
||
2128 cbw
->Signature
!= cpu_to_le32(
2130 DBG(fsg
, "invalid CBW: len %u sig 0x%x\n",
2132 le32_to_cpu(cbw
->Signature
));
2135 * The Bulk-only spec says we MUST stall the IN endpoint
2136 * (6.6.1), so it's unavoidable. It also says we must
2137 * retain this state until the next reset, but there's
2138 * no way to tell the controller driver it should ignore
2139 * Clear-Feature(HALT) requests.
2141 * We aren't required to halt the OUT endpoint; instead
2142 * we can simply accept and discard any data received
2143 * until the next reset.
2145 wedge_bulk_in_endpoint(fsg
);
2146 set_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2150 /* Is the CBW meaningful? */
2151 if (cbw
->Lun
>= ARRAY_SIZE(common
->luns
) ||
2152 cbw
->Flags
& ~US_BULK_FLAG_IN
|| cbw
->Length
<= 0 ||
2153 cbw
->Length
> MAX_COMMAND_SIZE
) {
2154 DBG(fsg
, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2156 cbw
->Lun
, cbw
->Flags
, cbw
->Length
);
2159 * We can do anything we want here, so let's stall the
2160 * bulk pipes if we are allowed to.
2162 if (common
->can_stall
) {
2163 fsg_set_halt(fsg
, fsg
->bulk_out
);
2164 halt_bulk_in_endpoint(fsg
);
2169 /* Save the command for later */
2170 common
->cmnd_size
= cbw
->Length
;
2171 memcpy(common
->cmnd
, cbw
->CDB
, common
->cmnd_size
);
2172 if (cbw
->Flags
& US_BULK_FLAG_IN
)
2173 common
->data_dir
= DATA_DIR_TO_HOST
;
2175 common
->data_dir
= DATA_DIR_FROM_HOST
;
2176 common
->data_size
= le32_to_cpu(cbw
->DataTransferLength
);
2177 if (common
->data_size
== 0)
2178 common
->data_dir
= DATA_DIR_NONE
;
2179 common
->lun
= cbw
->Lun
;
2180 if (common
->lun
< ARRAY_SIZE(common
->luns
))
2181 common
->curlun
= common
->luns
[common
->lun
];
2183 common
->curlun
= NULL
;
2184 common
->tag
= cbw
->Tag
;
2188 static int get_next_command(struct fsg_common
*common
)
2190 struct fsg_buffhd
*bh
;
2193 /* Wait for the next buffer to become available */
2194 bh
= common
->next_buffhd_to_fill
;
2195 while (bh
->state
!= BUF_STATE_EMPTY
) {
2196 rc
= sleep_thread(common
, true);
2201 /* Queue a request to read a Bulk-only CBW */
2202 set_bulk_out_req_length(common
, bh
, US_BULK_CB_WRAP_LEN
);
2203 if (!start_out_transfer(common
, bh
))
2204 /* Don't know what to do if common->fsg is NULL */
2208 * We will drain the buffer in software, which means we
2209 * can reuse it for the next filling. No need to advance
2210 * next_buffhd_to_fill.
2213 /* Wait for the CBW to arrive */
2214 while (bh
->state
!= BUF_STATE_FULL
) {
2215 rc
= sleep_thread(common
, true);
2220 rc
= fsg_is_set(common
) ? received_cbw(common
->fsg
, bh
) : -EIO
;
2221 bh
->state
= BUF_STATE_EMPTY
;
2227 /*-------------------------------------------------------------------------*/
2229 static int alloc_request(struct fsg_common
*common
, struct usb_ep
*ep
,
2230 struct usb_request
**preq
)
2232 *preq
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
2235 ERROR(common
, "can't allocate request for %s\n", ep
->name
);
2239 /* Reset interface setting and re-init endpoint state (toggle etc). */
2240 static int do_set_interface(struct fsg_common
*common
, struct fsg_dev
*new_fsg
)
2242 struct fsg_dev
*fsg
;
2245 if (common
->running
)
2246 DBG(common
, "reset interface\n");
2249 /* Deallocate the requests */
2253 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2254 struct fsg_buffhd
*bh
= &common
->buffhds
[i
];
2257 usb_ep_free_request(fsg
->bulk_in
, bh
->inreq
);
2261 usb_ep_free_request(fsg
->bulk_out
, bh
->outreq
);
2266 /* Disable the endpoints */
2267 if (fsg
->bulk_in_enabled
) {
2268 usb_ep_disable(fsg
->bulk_in
);
2269 fsg
->bulk_in_enabled
= 0;
2271 if (fsg
->bulk_out_enabled
) {
2272 usb_ep_disable(fsg
->bulk_out
);
2273 fsg
->bulk_out_enabled
= 0;
2277 wake_up(&common
->fsg_wait
);
2280 common
->running
= 0;
2284 common
->fsg
= new_fsg
;
2287 /* Enable the endpoints */
2288 rc
= config_ep_by_speed(common
->gadget
, &(fsg
->function
), fsg
->bulk_in
);
2291 rc
= usb_ep_enable(fsg
->bulk_in
);
2294 fsg
->bulk_in
->driver_data
= common
;
2295 fsg
->bulk_in_enabled
= 1;
2297 rc
= config_ep_by_speed(common
->gadget
, &(fsg
->function
),
2301 rc
= usb_ep_enable(fsg
->bulk_out
);
2304 fsg
->bulk_out
->driver_data
= common
;
2305 fsg
->bulk_out_enabled
= 1;
2306 common
->bulk_out_maxpacket
= usb_endpoint_maxp(fsg
->bulk_out
->desc
);
2307 clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2309 /* Allocate the requests */
2310 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2311 struct fsg_buffhd
*bh
= &common
->buffhds
[i
];
2313 rc
= alloc_request(common
, fsg
->bulk_in
, &bh
->inreq
);
2316 rc
= alloc_request(common
, fsg
->bulk_out
, &bh
->outreq
);
2319 bh
->inreq
->buf
= bh
->outreq
->buf
= bh
->buf
;
2320 bh
->inreq
->context
= bh
->outreq
->context
= bh
;
2321 bh
->inreq
->complete
= bulk_in_complete
;
2322 bh
->outreq
->complete
= bulk_out_complete
;
2325 common
->running
= 1;
2326 for (i
= 0; i
< ARRAY_SIZE(common
->luns
); ++i
)
2327 if (common
->luns
[i
])
2328 common
->luns
[i
]->unit_attention_data
=
2334 /****************************** ALT CONFIGS ******************************/
2336 static int fsg_set_alt(struct usb_function
*f
, unsigned intf
, unsigned alt
)
2338 struct fsg_dev
*fsg
= fsg_from_func(f
);
2339 fsg
->common
->new_fsg
= fsg
;
2340 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
2341 return USB_GADGET_DELAYED_STATUS
;
2344 static void fsg_disable(struct usb_function
*f
)
2346 struct fsg_dev
*fsg
= fsg_from_func(f
);
2347 fsg
->common
->new_fsg
= NULL
;
2348 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
2352 /*-------------------------------------------------------------------------*/
2354 static void handle_exception(struct fsg_common
*common
)
2357 struct fsg_buffhd
*bh
;
2358 enum fsg_state old_state
;
2359 struct fsg_lun
*curlun
;
2360 unsigned int exception_req_tag
;
2363 * Clear the existing signals. Anything but SIGUSR1 is converted
2364 * into a high-priority EXIT exception.
2367 int sig
= kernel_dequeue_signal(NULL
);
2370 if (sig
!= SIGUSR1
) {
2371 if (common
->state
< FSG_STATE_EXIT
)
2372 DBG(common
, "Main thread exiting on signal\n");
2373 raise_exception(common
, FSG_STATE_EXIT
);
2377 /* Cancel all the pending transfers */
2378 if (likely(common
->fsg
)) {
2379 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2380 bh
= &common
->buffhds
[i
];
2382 usb_ep_dequeue(common
->fsg
->bulk_in
, bh
->inreq
);
2383 if (bh
->outreq_busy
)
2384 usb_ep_dequeue(common
->fsg
->bulk_out
,
2388 /* Wait until everything is idle */
2391 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2392 bh
= &common
->buffhds
[i
];
2393 num_active
+= bh
->inreq_busy
+ bh
->outreq_busy
;
2395 if (num_active
== 0)
2397 if (sleep_thread(common
, true))
2401 /* Clear out the controller's fifos */
2402 if (common
->fsg
->bulk_in_enabled
)
2403 usb_ep_fifo_flush(common
->fsg
->bulk_in
);
2404 if (common
->fsg
->bulk_out_enabled
)
2405 usb_ep_fifo_flush(common
->fsg
->bulk_out
);
2409 * Reset the I/O buffer states and pointers, the SCSI
2410 * state, and the exception. Then invoke the handler.
2412 spin_lock_irq(&common
->lock
);
2414 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2415 bh
= &common
->buffhds
[i
];
2416 bh
->state
= BUF_STATE_EMPTY
;
2418 common
->next_buffhd_to_fill
= &common
->buffhds
[0];
2419 common
->next_buffhd_to_drain
= &common
->buffhds
[0];
2420 exception_req_tag
= common
->exception_req_tag
;
2421 old_state
= common
->state
;
2423 if (old_state
== FSG_STATE_ABORT_BULK_OUT
)
2424 common
->state
= FSG_STATE_STATUS_PHASE
;
2426 for (i
= 0; i
< ARRAY_SIZE(common
->luns
); ++i
) {
2427 curlun
= common
->luns
[i
];
2430 curlun
->prevent_medium_removal
= 0;
2431 curlun
->sense_data
= SS_NO_SENSE
;
2432 curlun
->unit_attention_data
= SS_NO_SENSE
;
2433 curlun
->sense_data_info
= 0;
2434 curlun
->info_valid
= 0;
2436 common
->state
= FSG_STATE_IDLE
;
2438 spin_unlock_irq(&common
->lock
);
2440 /* Carry out any extra actions required for the exception */
2441 switch (old_state
) {
2442 case FSG_STATE_ABORT_BULK_OUT
:
2443 send_status(common
);
2444 spin_lock_irq(&common
->lock
);
2445 if (common
->state
== FSG_STATE_STATUS_PHASE
)
2446 common
->state
= FSG_STATE_IDLE
;
2447 spin_unlock_irq(&common
->lock
);
2450 case FSG_STATE_RESET
:
2452 * In case we were forced against our will to halt a
2453 * bulk endpoint, clear the halt now. (The SuperH UDC
2456 if (!fsg_is_set(common
))
2458 if (test_and_clear_bit(IGNORE_BULK_OUT
,
2459 &common
->fsg
->atomic_bitflags
))
2460 usb_ep_clear_halt(common
->fsg
->bulk_in
);
2462 if (common
->ep0_req_tag
== exception_req_tag
)
2463 ep0_queue(common
); /* Complete the status stage */
2466 * Technically this should go here, but it would only be
2467 * a waste of time. Ditto for the INTERFACE_CHANGE and
2468 * CONFIG_CHANGE cases.
2470 /* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
2471 /* if (common->luns[i]) */
2472 /* common->luns[i]->unit_attention_data = */
2473 /* SS_RESET_OCCURRED; */
2476 case FSG_STATE_CONFIG_CHANGE
:
2477 do_set_interface(common
, common
->new_fsg
);
2478 if (common
->new_fsg
)
2479 usb_composite_setup_continue(common
->cdev
);
2482 case FSG_STATE_EXIT
:
2483 case FSG_STATE_TERMINATED
:
2484 do_set_interface(common
, NULL
); /* Free resources */
2485 spin_lock_irq(&common
->lock
);
2486 common
->state
= FSG_STATE_TERMINATED
; /* Stop the thread */
2487 spin_unlock_irq(&common
->lock
);
2490 case FSG_STATE_INTERFACE_CHANGE
:
2491 case FSG_STATE_DISCONNECT
:
2492 case FSG_STATE_COMMAND_PHASE
:
2493 case FSG_STATE_DATA_PHASE
:
2494 case FSG_STATE_STATUS_PHASE
:
2495 case FSG_STATE_IDLE
:
2501 /*-------------------------------------------------------------------------*/
2503 static int fsg_main_thread(void *common_
)
2505 struct fsg_common
*common
= common_
;
2509 * Allow the thread to be killed by a signal, but set the signal mask
2510 * to block everything but INT, TERM, KILL, and USR1.
2512 allow_signal(SIGINT
);
2513 allow_signal(SIGTERM
);
2514 allow_signal(SIGKILL
);
2515 allow_signal(SIGUSR1
);
2517 /* Allow the thread to be frozen */
2521 * Arrange for userspace references to be interpreted as kernel
2522 * pointers. That way we can pass a kernel pointer to a routine
2523 * that expects a __user pointer and it will work okay.
2528 while (common
->state
!= FSG_STATE_TERMINATED
) {
2529 if (exception_in_progress(common
) || signal_pending(current
)) {
2530 handle_exception(common
);
2534 if (!common
->running
) {
2535 sleep_thread(common
, true);
2539 if (get_next_command(common
))
2542 spin_lock_irq(&common
->lock
);
2543 if (!exception_in_progress(common
))
2544 common
->state
= FSG_STATE_DATA_PHASE
;
2545 spin_unlock_irq(&common
->lock
);
2547 if (do_scsi_command(common
) || finish_reply(common
))
2550 spin_lock_irq(&common
->lock
);
2551 if (!exception_in_progress(common
))
2552 common
->state
= FSG_STATE_STATUS_PHASE
;
2553 spin_unlock_irq(&common
->lock
);
2555 if (send_status(common
))
2558 spin_lock_irq(&common
->lock
);
2559 if (!exception_in_progress(common
))
2560 common
->state
= FSG_STATE_IDLE
;
2561 spin_unlock_irq(&common
->lock
);
2564 spin_lock_irq(&common
->lock
);
2565 common
->thread_task
= NULL
;
2566 spin_unlock_irq(&common
->lock
);
2568 /* Eject media from all LUNs */
2570 down_write(&common
->filesem
);
2571 for (i
= 0; i
< ARRAY_SIZE(common
->luns
); i
++) {
2572 struct fsg_lun
*curlun
= common
->luns
[i
];
2574 if (curlun
&& fsg_lun_is_open(curlun
))
2575 fsg_lun_close(curlun
);
2577 up_write(&common
->filesem
);
2579 /* Let fsg_unbind() know the thread has exited */
2580 complete_and_exit(&common
->thread_notifier
, 0);
2584 /*************************** DEVICE ATTRIBUTES ***************************/
2586 static ssize_t
ro_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2588 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2590 return fsg_show_ro(curlun
, buf
);
2593 static ssize_t
nofua_show(struct device
*dev
, struct device_attribute
*attr
,
2596 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2598 return fsg_show_nofua(curlun
, buf
);
2601 static ssize_t
file_show(struct device
*dev
, struct device_attribute
*attr
,
2604 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2605 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
2607 return fsg_show_file(curlun
, filesem
, buf
);
2610 static ssize_t
ro_store(struct device
*dev
, struct device_attribute
*attr
,
2611 const char *buf
, size_t count
)
2613 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2614 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
2616 return fsg_store_ro(curlun
, filesem
, buf
, count
);
2619 static ssize_t
nofua_store(struct device
*dev
, struct device_attribute
*attr
,
2620 const char *buf
, size_t count
)
2622 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2624 return fsg_store_nofua(curlun
, buf
, count
);
2627 static ssize_t
file_store(struct device
*dev
, struct device_attribute
*attr
,
2628 const char *buf
, size_t count
)
2630 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2631 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
2633 return fsg_store_file(curlun
, filesem
, buf
, count
);
2636 static DEVICE_ATTR_RW(nofua
);
2637 /* mode wil be set in fsg_lun_attr_is_visible() */
2638 static DEVICE_ATTR(ro
, 0, ro_show
, ro_store
);
2639 static DEVICE_ATTR(file
, 0, file_show
, file_store
);
2641 /****************************** FSG COMMON ******************************/
2643 static void fsg_common_release(struct kref
*ref
);
2645 static void fsg_lun_release(struct device
*dev
)
2647 /* Nothing needs to be done */
2650 void fsg_common_get(struct fsg_common
*common
)
2652 kref_get(&common
->ref
);
2654 EXPORT_SYMBOL_GPL(fsg_common_get
);
2656 void fsg_common_put(struct fsg_common
*common
)
2658 kref_put(&common
->ref
, fsg_common_release
);
2660 EXPORT_SYMBOL_GPL(fsg_common_put
);
2662 static struct fsg_common
*fsg_common_setup(struct fsg_common
*common
)
2665 common
= kzalloc(sizeof(*common
), GFP_KERNEL
);
2667 return ERR_PTR(-ENOMEM
);
2668 common
->free_storage_on_release
= 1;
2670 common
->free_storage_on_release
= 0;
2672 init_rwsem(&common
->filesem
);
2673 spin_lock_init(&common
->lock
);
2674 kref_init(&common
->ref
);
2675 init_completion(&common
->thread_notifier
);
2676 init_waitqueue_head(&common
->fsg_wait
);
2677 common
->state
= FSG_STATE_TERMINATED
;
2678 memset(common
->luns
, 0, sizeof(common
->luns
));
2683 void fsg_common_set_sysfs(struct fsg_common
*common
, bool sysfs
)
2685 common
->sysfs
= sysfs
;
2687 EXPORT_SYMBOL_GPL(fsg_common_set_sysfs
);
2689 static void _fsg_common_free_buffers(struct fsg_buffhd
*buffhds
, unsigned n
)
2692 struct fsg_buffhd
*bh
= buffhds
;
2701 int fsg_common_set_num_buffers(struct fsg_common
*common
, unsigned int n
)
2703 struct fsg_buffhd
*bh
, *buffhds
;
2706 buffhds
= kcalloc(n
, sizeof(*buffhds
), GFP_KERNEL
);
2710 /* Data buffers cyclic list */
2713 goto buffhds_first_it
;
2718 bh
->buf
= kmalloc(FSG_BUFLEN
, GFP_KERNEL
);
2719 if (unlikely(!bh
->buf
))
2724 _fsg_common_free_buffers(common
->buffhds
, common
->fsg_num_buffers
);
2725 common
->fsg_num_buffers
= n
;
2726 common
->buffhds
= buffhds
;
2732 * "buf"s pointed to by heads after n - i are NULL
2733 * so releasing them won't hurt
2735 _fsg_common_free_buffers(buffhds
, n
);
2739 EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers
);
2741 void fsg_common_remove_lun(struct fsg_lun
*lun
)
2743 if (device_is_registered(&lun
->dev
))
2744 device_unregister(&lun
->dev
);
2748 EXPORT_SYMBOL_GPL(fsg_common_remove_lun
);
2750 static void _fsg_common_remove_luns(struct fsg_common
*common
, int n
)
2754 for (i
= 0; i
< n
; ++i
)
2755 if (common
->luns
[i
]) {
2756 fsg_common_remove_lun(common
->luns
[i
]);
2757 common
->luns
[i
] = NULL
;
2761 void fsg_common_remove_luns(struct fsg_common
*common
)
2763 _fsg_common_remove_luns(common
, ARRAY_SIZE(common
->luns
));
2765 EXPORT_SYMBOL_GPL(fsg_common_remove_luns
);
2767 void fsg_common_free_buffers(struct fsg_common
*common
)
2769 _fsg_common_free_buffers(common
->buffhds
, common
->fsg_num_buffers
);
2770 common
->buffhds
= NULL
;
2772 EXPORT_SYMBOL_GPL(fsg_common_free_buffers
);
2774 int fsg_common_set_cdev(struct fsg_common
*common
,
2775 struct usb_composite_dev
*cdev
, bool can_stall
)
2777 struct usb_string
*us
;
2779 common
->gadget
= cdev
->gadget
;
2780 common
->ep0
= cdev
->gadget
->ep0
;
2781 common
->ep0req
= cdev
->req
;
2782 common
->cdev
= cdev
;
2784 us
= usb_gstrings_attach(cdev
, fsg_strings_array
,
2785 ARRAY_SIZE(fsg_strings
));
2789 fsg_intf_desc
.iInterface
= us
[FSG_STRING_INTERFACE
].id
;
2792 * Some peripheral controllers are known not to be able to
2793 * halt bulk endpoints correctly. If one of them is present,
2796 common
->can_stall
= can_stall
&&
2797 gadget_is_stall_supported(common
->gadget
);
2801 EXPORT_SYMBOL_GPL(fsg_common_set_cdev
);
2803 static struct attribute
*fsg_lun_dev_attrs
[] = {
2805 &dev_attr_file
.attr
,
2806 &dev_attr_nofua
.attr
,
2810 static umode_t
fsg_lun_dev_is_visible(struct kobject
*kobj
,
2811 struct attribute
*attr
, int idx
)
2813 struct device
*dev
= kobj_to_dev(kobj
);
2814 struct fsg_lun
*lun
= fsg_lun_from_dev(dev
);
2816 if (attr
== &dev_attr_ro
.attr
)
2817 return lun
->cdrom
? S_IRUGO
: (S_IWUSR
| S_IRUGO
);
2818 if (attr
== &dev_attr_file
.attr
)
2819 return lun
->removable
? (S_IWUSR
| S_IRUGO
) : S_IRUGO
;
2823 static const struct attribute_group fsg_lun_dev_group
= {
2824 .attrs
= fsg_lun_dev_attrs
,
2825 .is_visible
= fsg_lun_dev_is_visible
,
2828 static const struct attribute_group
*fsg_lun_dev_groups
[] = {
2833 int fsg_common_create_lun(struct fsg_common
*common
, struct fsg_lun_config
*cfg
,
2834 unsigned int id
, const char *name
,
2835 const char **name_pfx
)
2837 struct fsg_lun
*lun
;
2841 if (id
>= ARRAY_SIZE(common
->luns
))
2844 if (common
->luns
[id
])
2847 if (!cfg
->filename
&& !cfg
->removable
) {
2848 pr_err("no file given for LUN%d\n", id
);
2852 lun
= kzalloc(sizeof(*lun
), GFP_KERNEL
);
2856 lun
->name_pfx
= name_pfx
;
2858 lun
->cdrom
= !!cfg
->cdrom
;
2859 lun
->ro
= cfg
->cdrom
|| cfg
->ro
;
2860 lun
->initially_ro
= lun
->ro
;
2861 lun
->removable
= !!cfg
->removable
;
2863 if (!common
->sysfs
) {
2864 /* we DON'T own the name!*/
2867 lun
->dev
.release
= fsg_lun_release
;
2868 lun
->dev
.parent
= &common
->gadget
->dev
;
2869 lun
->dev
.groups
= fsg_lun_dev_groups
;
2870 dev_set_drvdata(&lun
->dev
, &common
->filesem
);
2871 dev_set_name(&lun
->dev
, "%s", name
);
2872 lun
->name
= dev_name(&lun
->dev
);
2874 rc
= device_register(&lun
->dev
);
2876 pr_info("failed to register LUN%d: %d\n", id
, rc
);
2877 put_device(&lun
->dev
);
2882 common
->luns
[id
] = lun
;
2884 if (cfg
->filename
) {
2885 rc
= fsg_lun_open(lun
, cfg
->filename
);
2890 pathbuf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
2892 if (fsg_lun_is_open(lun
)) {
2895 p
= file_path(lun
->filp
, pathbuf
, PATH_MAX
);
2900 pr_info("LUN: %s%s%sfile: %s\n",
2901 lun
->removable
? "removable " : "",
2902 lun
->ro
? "read only " : "",
2903 lun
->cdrom
? "CD-ROM " : "",
2910 if (device_is_registered(&lun
->dev
))
2911 device_unregister(&lun
->dev
);
2913 common
->luns
[id
] = NULL
;
2918 EXPORT_SYMBOL_GPL(fsg_common_create_lun
);
2920 int fsg_common_create_luns(struct fsg_common
*common
, struct fsg_config
*cfg
)
2922 char buf
[8]; /* enough for 100000000 different numbers, decimal */
2925 fsg_common_remove_luns(common
);
2927 for (i
= 0; i
< cfg
->nluns
; ++i
) {
2928 snprintf(buf
, sizeof(buf
), "lun%d", i
);
2929 rc
= fsg_common_create_lun(common
, &cfg
->luns
[i
], i
, buf
, NULL
);
2934 pr_info("Number of LUNs=%d\n", cfg
->nluns
);
2939 _fsg_common_remove_luns(common
, i
);
2942 EXPORT_SYMBOL_GPL(fsg_common_create_luns
);
2944 void fsg_common_set_inquiry_string(struct fsg_common
*common
, const char *vn
,
2949 /* Prepare inquiryString */
2950 i
= get_default_bcdDevice();
2951 snprintf(common
->inquiry_string
, sizeof(common
->inquiry_string
),
2952 "%-8s%-16s%04x", vn
?: "Linux",
2953 /* Assume product name dependent on the first LUN */
2954 pn
?: ((*common
->luns
)->cdrom
2956 : "File-Stor Gadget"),
2959 EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string
);
2961 static void fsg_common_release(struct kref
*ref
)
2963 struct fsg_common
*common
= container_of(ref
, struct fsg_common
, ref
);
2966 /* If the thread isn't already dead, tell it to exit now */
2967 if (common
->state
!= FSG_STATE_TERMINATED
) {
2968 raise_exception(common
, FSG_STATE_EXIT
);
2969 wait_for_completion(&common
->thread_notifier
);
2970 common
->thread_task
= NULL
;
2973 for (i
= 0; i
< ARRAY_SIZE(common
->luns
); ++i
) {
2974 struct fsg_lun
*lun
= common
->luns
[i
];
2978 if (device_is_registered(&lun
->dev
))
2979 device_unregister(&lun
->dev
);
2983 _fsg_common_free_buffers(common
->buffhds
, common
->fsg_num_buffers
);
2984 if (common
->free_storage_on_release
)
2989 /*-------------------------------------------------------------------------*/
2991 static int fsg_bind(struct usb_configuration
*c
, struct usb_function
*f
)
2993 struct fsg_dev
*fsg
= fsg_from_func(f
);
2994 struct fsg_common
*common
= fsg
->common
;
2995 struct usb_gadget
*gadget
= c
->cdev
->gadget
;
3000 struct fsg_opts
*opts
;
3002 /* Don't allow to bind if we don't have at least one LUN */
3003 ret
= _fsg_common_get_max_lun(common
);
3005 pr_err("There should be at least one LUN.\n");
3009 opts
= fsg_opts_from_func_inst(f
->fi
);
3010 if (!opts
->no_configfs
) {
3011 ret
= fsg_common_set_cdev(fsg
->common
, c
->cdev
,
3012 fsg
->common
->can_stall
);
3015 fsg_common_set_inquiry_string(fsg
->common
, NULL
, NULL
);
3018 if (!common
->thread_task
) {
3019 common
->state
= FSG_STATE_IDLE
;
3020 common
->thread_task
=
3021 kthread_create(fsg_main_thread
, common
, "file-storage");
3022 if (IS_ERR(common
->thread_task
)) {
3023 int ret
= PTR_ERR(common
->thread_task
);
3024 common
->thread_task
= NULL
;
3025 common
->state
= FSG_STATE_TERMINATED
;
3028 DBG(common
, "I/O thread pid: %d\n",
3029 task_pid_nr(common
->thread_task
));
3030 wake_up_process(common
->thread_task
);
3033 fsg
->gadget
= gadget
;
3036 i
= usb_interface_id(c
, f
);
3039 fsg_intf_desc
.bInterfaceNumber
= i
;
3040 fsg
->interface_number
= i
;
3042 /* Find all the endpoints we will use */
3043 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_in_desc
);
3048 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_out_desc
);
3053 /* Assume endpoint addresses are the same for both speeds */
3054 fsg_hs_bulk_in_desc
.bEndpointAddress
=
3055 fsg_fs_bulk_in_desc
.bEndpointAddress
;
3056 fsg_hs_bulk_out_desc
.bEndpointAddress
=
3057 fsg_fs_bulk_out_desc
.bEndpointAddress
;
3059 /* Calculate bMaxBurst, we know packet size is 1024 */
3060 max_burst
= min_t(unsigned, FSG_BUFLEN
/ 1024, 15);
3062 fsg_ss_bulk_in_desc
.bEndpointAddress
=
3063 fsg_fs_bulk_in_desc
.bEndpointAddress
;
3064 fsg_ss_bulk_in_comp_desc
.bMaxBurst
= max_burst
;
3066 fsg_ss_bulk_out_desc
.bEndpointAddress
=
3067 fsg_fs_bulk_out_desc
.bEndpointAddress
;
3068 fsg_ss_bulk_out_comp_desc
.bMaxBurst
= max_burst
;
3070 ret
= usb_assign_descriptors(f
, fsg_fs_function
, fsg_hs_function
,
3071 fsg_ss_function
, fsg_ss_function
);
3078 ERROR(fsg
, "unable to autoconfigure all endpoints\n");
3081 /* terminate the thread */
3082 if (fsg
->common
->state
!= FSG_STATE_TERMINATED
) {
3083 raise_exception(fsg
->common
, FSG_STATE_EXIT
);
3084 wait_for_completion(&fsg
->common
->thread_notifier
);
3089 /****************************** ALLOCATE FUNCTION *************************/
3091 static void fsg_unbind(struct usb_configuration
*c
, struct usb_function
*f
)
3093 struct fsg_dev
*fsg
= fsg_from_func(f
);
3094 struct fsg_common
*common
= fsg
->common
;
3096 DBG(fsg
, "unbind\n");
3097 if (fsg
->common
->fsg
== fsg
) {
3098 fsg
->common
->new_fsg
= NULL
;
3099 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
3100 /* FIXME: make interruptible or killable somehow? */
3101 wait_event(common
->fsg_wait
, common
->fsg
!= fsg
);
3104 usb_free_all_descriptors(&fsg
->function
);
3107 static inline struct fsg_lun_opts
*to_fsg_lun_opts(struct config_item
*item
)
3109 return container_of(to_config_group(item
), struct fsg_lun_opts
, group
);
3112 static inline struct fsg_opts
*to_fsg_opts(struct config_item
*item
)
3114 return container_of(to_config_group(item
), struct fsg_opts
,
3118 static void fsg_lun_attr_release(struct config_item
*item
)
3120 struct fsg_lun_opts
*lun_opts
;
3122 lun_opts
= to_fsg_lun_opts(item
);
3126 static struct configfs_item_operations fsg_lun_item_ops
= {
3127 .release
= fsg_lun_attr_release
,
3130 static ssize_t
fsg_lun_opts_file_show(struct config_item
*item
, char *page
)
3132 struct fsg_lun_opts
*opts
= to_fsg_lun_opts(item
);
3133 struct fsg_opts
*fsg_opts
= to_fsg_opts(opts
->group
.cg_item
.ci_parent
);
3135 return fsg_show_file(opts
->lun
, &fsg_opts
->common
->filesem
, page
);
3138 static ssize_t
fsg_lun_opts_file_store(struct config_item
*item
,
3139 const char *page
, size_t len
)
3141 struct fsg_lun_opts
*opts
= to_fsg_lun_opts(item
);
3142 struct fsg_opts
*fsg_opts
= to_fsg_opts(opts
->group
.cg_item
.ci_parent
);
3144 return fsg_store_file(opts
->lun
, &fsg_opts
->common
->filesem
, page
, len
);
3147 CONFIGFS_ATTR(fsg_lun_opts_
, file
);
3149 static ssize_t
fsg_lun_opts_ro_show(struct config_item
*item
, char *page
)
3151 return fsg_show_ro(to_fsg_lun_opts(item
)->lun
, page
);
3154 static ssize_t
fsg_lun_opts_ro_store(struct config_item
*item
,
3155 const char *page
, size_t len
)
3157 struct fsg_lun_opts
*opts
= to_fsg_lun_opts(item
);
3158 struct fsg_opts
*fsg_opts
= to_fsg_opts(opts
->group
.cg_item
.ci_parent
);
3160 return fsg_store_ro(opts
->lun
, &fsg_opts
->common
->filesem
, page
, len
);
3163 CONFIGFS_ATTR(fsg_lun_opts_
, ro
);
3165 static ssize_t
fsg_lun_opts_removable_show(struct config_item
*item
,
3168 return fsg_show_removable(to_fsg_lun_opts(item
)->lun
, page
);
3171 static ssize_t
fsg_lun_opts_removable_store(struct config_item
*item
,
3172 const char *page
, size_t len
)
3174 return fsg_store_removable(to_fsg_lun_opts(item
)->lun
, page
, len
);
3177 CONFIGFS_ATTR(fsg_lun_opts_
, removable
);
3179 static ssize_t
fsg_lun_opts_cdrom_show(struct config_item
*item
, char *page
)
3181 return fsg_show_cdrom(to_fsg_lun_opts(item
)->lun
, page
);
3184 static ssize_t
fsg_lun_opts_cdrom_store(struct config_item
*item
,
3185 const char *page
, size_t len
)
3187 struct fsg_lun_opts
*opts
= to_fsg_lun_opts(item
);
3188 struct fsg_opts
*fsg_opts
= to_fsg_opts(opts
->group
.cg_item
.ci_parent
);
3190 return fsg_store_cdrom(opts
->lun
, &fsg_opts
->common
->filesem
, page
,
3194 CONFIGFS_ATTR(fsg_lun_opts_
, cdrom
);
3196 static ssize_t
fsg_lun_opts_nofua_show(struct config_item
*item
, char *page
)
3198 return fsg_show_nofua(to_fsg_lun_opts(item
)->lun
, page
);
3201 static ssize_t
fsg_lun_opts_nofua_store(struct config_item
*item
,
3202 const char *page
, size_t len
)
3204 return fsg_store_nofua(to_fsg_lun_opts(item
)->lun
, page
, len
);
3207 CONFIGFS_ATTR(fsg_lun_opts_
, nofua
);
3209 static ssize_t
fsg_lun_opts_inquiry_string_show(struct config_item
*item
,
3212 return fsg_show_inquiry_string(to_fsg_lun_opts(item
)->lun
, page
);
3215 static ssize_t
fsg_lun_opts_inquiry_string_store(struct config_item
*item
,
3216 const char *page
, size_t len
)
3218 return fsg_store_inquiry_string(to_fsg_lun_opts(item
)->lun
, page
, len
);
3221 CONFIGFS_ATTR(fsg_lun_opts_
, inquiry_string
);
3223 static struct configfs_attribute
*fsg_lun_attrs
[] = {
3224 &fsg_lun_opts_attr_file
,
3225 &fsg_lun_opts_attr_ro
,
3226 &fsg_lun_opts_attr_removable
,
3227 &fsg_lun_opts_attr_cdrom
,
3228 &fsg_lun_opts_attr_nofua
,
3229 &fsg_lun_opts_attr_inquiry_string
,
3233 static struct config_item_type fsg_lun_type
= {
3234 .ct_item_ops
= &fsg_lun_item_ops
,
3235 .ct_attrs
= fsg_lun_attrs
,
3236 .ct_owner
= THIS_MODULE
,
3239 static struct config_group
*fsg_lun_make(struct config_group
*group
,
3242 struct fsg_lun_opts
*opts
;
3243 struct fsg_opts
*fsg_opts
;
3244 struct fsg_lun_config config
;
3249 num_str
= strchr(name
, '.');
3251 pr_err("Unable to locate . in LUN.NUMBER\n");
3252 return ERR_PTR(-EINVAL
);
3256 ret
= kstrtou8(num_str
, 0, &num
);
3258 return ERR_PTR(ret
);
3260 fsg_opts
= to_fsg_opts(&group
->cg_item
);
3261 if (num
>= FSG_MAX_LUNS
)
3262 return ERR_PTR(-ERANGE
);
3264 mutex_lock(&fsg_opts
->lock
);
3265 if (fsg_opts
->refcnt
|| fsg_opts
->common
->luns
[num
]) {
3270 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
3276 memset(&config
, 0, sizeof(config
));
3277 config
.removable
= true;
3279 ret
= fsg_common_create_lun(fsg_opts
->common
, &config
, num
, name
,
3280 (const char **)&group
->cg_item
.ci_name
);
3285 opts
->lun
= fsg_opts
->common
->luns
[num
];
3287 mutex_unlock(&fsg_opts
->lock
);
3289 config_group_init_type_name(&opts
->group
, name
, &fsg_lun_type
);
3291 return &opts
->group
;
3293 mutex_unlock(&fsg_opts
->lock
);
3294 return ERR_PTR(ret
);
3297 static void fsg_lun_drop(struct config_group
*group
, struct config_item
*item
)
3299 struct fsg_lun_opts
*lun_opts
;
3300 struct fsg_opts
*fsg_opts
;
3302 lun_opts
= to_fsg_lun_opts(item
);
3303 fsg_opts
= to_fsg_opts(&group
->cg_item
);
3305 mutex_lock(&fsg_opts
->lock
);
3306 if (fsg_opts
->refcnt
) {
3307 struct config_item
*gadget
;
3309 gadget
= group
->cg_item
.ci_parent
->ci_parent
;
3310 unregister_gadget_item(gadget
);
3313 fsg_common_remove_lun(lun_opts
->lun
);
3314 fsg_opts
->common
->luns
[lun_opts
->lun_id
] = NULL
;
3315 lun_opts
->lun_id
= 0;
3316 mutex_unlock(&fsg_opts
->lock
);
3318 config_item_put(item
);
3321 static void fsg_attr_release(struct config_item
*item
)
3323 struct fsg_opts
*opts
= to_fsg_opts(item
);
3325 usb_put_function_instance(&opts
->func_inst
);
3328 static struct configfs_item_operations fsg_item_ops
= {
3329 .release
= fsg_attr_release
,
3332 static ssize_t
fsg_opts_stall_show(struct config_item
*item
, char *page
)
3334 struct fsg_opts
*opts
= to_fsg_opts(item
);
3337 mutex_lock(&opts
->lock
);
3338 result
= sprintf(page
, "%d", opts
->common
->can_stall
);
3339 mutex_unlock(&opts
->lock
);
3344 static ssize_t
fsg_opts_stall_store(struct config_item
*item
, const char *page
,
3347 struct fsg_opts
*opts
= to_fsg_opts(item
);
3351 mutex_lock(&opts
->lock
);
3354 mutex_unlock(&opts
->lock
);
3358 ret
= strtobool(page
, &stall
);
3360 opts
->common
->can_stall
= stall
;
3364 mutex_unlock(&opts
->lock
);
3369 CONFIGFS_ATTR(fsg_opts_
, stall
);
3371 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
3372 static ssize_t
fsg_opts_num_buffers_show(struct config_item
*item
, char *page
)
3374 struct fsg_opts
*opts
= to_fsg_opts(item
);
3377 mutex_lock(&opts
->lock
);
3378 result
= sprintf(page
, "%d", opts
->common
->fsg_num_buffers
);
3379 mutex_unlock(&opts
->lock
);
3384 static ssize_t
fsg_opts_num_buffers_store(struct config_item
*item
,
3385 const char *page
, size_t len
)
3387 struct fsg_opts
*opts
= to_fsg_opts(item
);
3391 mutex_lock(&opts
->lock
);
3396 ret
= kstrtou8(page
, 0, &num
);
3400 fsg_common_set_num_buffers(opts
->common
, num
);
3404 mutex_unlock(&opts
->lock
);
3408 CONFIGFS_ATTR(fsg_opts_
, num_buffers
);
3411 static struct configfs_attribute
*fsg_attrs
[] = {
3412 &fsg_opts_attr_stall
,
3413 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
3414 &fsg_opts_attr_num_buffers
,
3419 static struct configfs_group_operations fsg_group_ops
= {
3420 .make_group
= fsg_lun_make
,
3421 .drop_item
= fsg_lun_drop
,
3424 static struct config_item_type fsg_func_type
= {
3425 .ct_item_ops
= &fsg_item_ops
,
3426 .ct_group_ops
= &fsg_group_ops
,
3427 .ct_attrs
= fsg_attrs
,
3428 .ct_owner
= THIS_MODULE
,
3431 static void fsg_free_inst(struct usb_function_instance
*fi
)
3433 struct fsg_opts
*opts
;
3435 opts
= fsg_opts_from_func_inst(fi
);
3436 fsg_common_put(opts
->common
);
3440 static struct usb_function_instance
*fsg_alloc_inst(void)
3442 struct fsg_opts
*opts
;
3443 struct fsg_lun_config config
;
3446 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
3448 return ERR_PTR(-ENOMEM
);
3449 mutex_init(&opts
->lock
);
3450 opts
->func_inst
.free_func_inst
= fsg_free_inst
;
3451 opts
->common
= fsg_common_setup(opts
->common
);
3452 if (IS_ERR(opts
->common
)) {
3453 rc
= PTR_ERR(opts
->common
);
3457 rc
= fsg_common_set_num_buffers(opts
->common
,
3458 CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
);
3462 pr_info(FSG_DRIVER_DESC
", version: " FSG_DRIVER_VERSION
"\n");
3464 memset(&config
, 0, sizeof(config
));
3465 config
.removable
= true;
3466 rc
= fsg_common_create_lun(opts
->common
, &config
, 0, "lun.0",
3467 (const char **)&opts
->func_inst
.group
.cg_item
.ci_name
);
3469 goto release_buffers
;
3471 opts
->lun0
.lun
= opts
->common
->luns
[0];
3472 opts
->lun0
.lun_id
= 0;
3474 config_group_init_type_name(&opts
->func_inst
.group
, "", &fsg_func_type
);
3476 config_group_init_type_name(&opts
->lun0
.group
, "lun.0", &fsg_lun_type
);
3477 configfs_add_default_group(&opts
->lun0
.group
, &opts
->func_inst
.group
);
3479 return &opts
->func_inst
;
3482 fsg_common_free_buffers(opts
->common
);
3488 static void fsg_free(struct usb_function
*f
)
3490 struct fsg_dev
*fsg
;
3491 struct fsg_opts
*opts
;
3493 fsg
= container_of(f
, struct fsg_dev
, function
);
3494 opts
= container_of(f
->fi
, struct fsg_opts
, func_inst
);
3496 mutex_lock(&opts
->lock
);
3498 mutex_unlock(&opts
->lock
);
3503 static struct usb_function
*fsg_alloc(struct usb_function_instance
*fi
)
3505 struct fsg_opts
*opts
= fsg_opts_from_func_inst(fi
);
3506 struct fsg_common
*common
= opts
->common
;
3507 struct fsg_dev
*fsg
;
3509 fsg
= kzalloc(sizeof(*fsg
), GFP_KERNEL
);
3511 return ERR_PTR(-ENOMEM
);
3513 mutex_lock(&opts
->lock
);
3515 mutex_unlock(&opts
->lock
);
3517 fsg
->function
.name
= FSG_DRIVER_DESC
;
3518 fsg
->function
.bind
= fsg_bind
;
3519 fsg
->function
.unbind
= fsg_unbind
;
3520 fsg
->function
.setup
= fsg_setup
;
3521 fsg
->function
.set_alt
= fsg_set_alt
;
3522 fsg
->function
.disable
= fsg_disable
;
3523 fsg
->function
.free_func
= fsg_free
;
3525 fsg
->common
= common
;
3527 return &fsg
->function
;
3530 DECLARE_USB_FUNCTION_INIT(mass_storage
, fsg_alloc_inst
, fsg_alloc
);
3531 MODULE_LICENSE("GPL");
3532 MODULE_AUTHOR("Michal Nazarewicz");
3534 /************************* Module parameters *************************/
3537 void fsg_config_from_params(struct fsg_config
*cfg
,
3538 const struct fsg_module_parameters
*params
,
3539 unsigned int fsg_num_buffers
)
3541 struct fsg_lun_config
*lun
;
3544 /* Configure LUNs */
3546 min(params
->luns
?: (params
->file_count
?: 1u),
3547 (unsigned)FSG_MAX_LUNS
);
3548 for (i
= 0, lun
= cfg
->luns
; i
< cfg
->nluns
; ++i
, ++lun
) {
3549 lun
->ro
= !!params
->ro
[i
];
3550 lun
->cdrom
= !!params
->cdrom
[i
];
3551 lun
->removable
= !!params
->removable
[i
];
3553 params
->file_count
> i
&& params
->file
[i
][0]
3558 /* Let MSF use defaults */
3559 cfg
->vendor_name
= NULL
;
3560 cfg
->product_name
= NULL
;
3563 cfg
->private_data
= NULL
;
3566 cfg
->can_stall
= params
->stall
;
3567 cfg
->fsg_num_buffers
= fsg_num_buffers
;
3569 EXPORT_SYMBOL_GPL(fsg_config_from_params
);