2 * f_mass_storage.c -- Mass Storage USB Composite Function
4 * Copyright (C) 2003-2008 Alan Stern
5 * Copyright (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
9 * SPDX-License-Identifier: GPL-2.0+ BSD-3-Clause
13 * The Mass Storage Function acts as a USB Mass Storage device,
14 * appearing to the host as a disk drive or as a CD-ROM drive. In
15 * addition to providing an example of a genuinely useful composite
16 * function for a USB device, it also illustrates a technique of
17 * double-buffering for increased throughput.
19 * Function supports multiple logical units (LUNs). Backing storage
20 * for each LUN is provided by a regular file or a block device.
21 * Access for each LUN can be limited to read-only. Moreover, the
22 * function can indicate that LUN is removable and/or CD-ROM. (The
23 * later implies read-only access.)
25 * MSF is configured by specifying a fsg_config structure. It has the
28 * nluns Number of LUNs function have (anywhere from 1
29 * to FSG_MAX_LUNS which is 8).
30 * luns An array of LUN configuration values. This
31 * should be filled for each LUN that
32 * function will include (ie. for "nluns"
33 * LUNs). Each element of the array has
34 * the following fields:
35 * ->filename The path to the backing file for the LUN.
36 * Required if LUN is not marked as
38 * ->ro Flag specifying access to the LUN shall be
39 * read-only. This is implied if CD-ROM
40 * emulation is enabled as well as when
41 * it was impossible to open "filename"
43 * ->removable Flag specifying that LUN shall be indicated as
45 * ->cdrom Flag specifying that LUN shall be reported as
48 * lun_name_format A printf-like format for names of the LUN
49 * devices. This determines how the
50 * directory in sysfs will be named.
51 * Unless you are using several MSFs in
52 * a single gadget (as opposed to single
53 * MSF in many configurations) you may
54 * leave it as NULL (in which case
55 * "lun%d" will be used). In the format
56 * you can use "%d" to index LUNs for
57 * MSF's with more than one LUN. (Beware
58 * that there is only one integer given
59 * as an argument for the format and
60 * specifying invalid format may cause
61 * unspecified behaviour.)
62 * thread_name Name of the kernel thread process used by the
63 * MSF. You can safely set it to NULL
64 * (in which case default "file-storage"
69 * release Information used as a reply to INQUIRY
70 * request. To use default set to NULL,
71 * NULL, 0xffff respectively. The first
72 * field should be 8 and the second 16
75 * can_stall Set to permit function to halt bulk endpoints.
76 * Disabled on some USB devices known not
77 * to work correctly. You should set it
80 * If "removable" is not set for a LUN then a backing file must be
81 * specified. If it is set, then NULL filename means the LUN's medium
82 * is not loaded (an empty string as "filename" in the fsg_config
83 * structure causes error). The CD-ROM emulation includes a single
84 * data track and no audio tracks; hence there need be only one
85 * backing file per LUN. Note also that the CD-ROM block length is
86 * set to 512 rather than the more common value 2048.
89 * MSF includes support for module parameters. If gadget using it
90 * decides to use it, the following module parameters will be
93 * file=filename[,filename...]
94 * Names of the files or block devices used for
96 * ro=b[,b...] Default false, boolean for read-only access.
98 * Default true, boolean for removable media.
99 * cdrom=b[,b...] Default false, boolean for whether to emulate
101 * luns=N Default N = number of filenames, number of
103 * stall Default determined according to the type of
104 * USB device controller (usually true),
105 * boolean to permit the driver to halt
108 * The module parameters may be prefixed with some string. You need
109 * to consult gadget's documentation or source to verify whether it is
110 * using those module parameters and if it does what are the prefixes
111 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
115 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
116 * needed. The memory requirement amounts to two 16K buffers, size
117 * configurable by a parameter. Support is included for both
118 * full-speed and high-speed operation.
120 * Note that the driver is slightly non-portable in that it assumes a
121 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
122 * interrupt-in endpoints. With most device controllers this isn't an
123 * issue, but there may be some with hardware restrictions that prevent
124 * a buffer from being used by more than one endpoint.
127 * The pathnames of the backing files and the ro settings are
128 * available in the attribute files "file" and "ro" in the lun<n> (or
129 * to be more precise in a directory which name comes from
130 * "lun_name_format" option!) subdirectory of the gadget's sysfs
131 * directory. If the "removable" option is set, writing to these
132 * files will simulate ejecting/loading the medium (writing an empty
133 * line means eject) and adjusting a write-enable tab. Changes to the
134 * ro setting are not allowed when the medium is loaded or if CD-ROM
135 * emulation is being used.
137 * When a LUN receive an "eject" SCSI request (Start/Stop Unit),
138 * if the LUN is removable, the backing file is released to simulate
142 * This function is heavily based on "File-backed Storage Gadget" by
143 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
144 * Brownell. The driver's SCSI command interface was based on the
145 * "Information technology - Small Computer System Interface - 2"
146 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
147 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
148 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
149 * was based on the "Universal Serial Bus Mass Storage Class UFI
150 * Command Specification" document, Revision 1.0, December 14, 1998,
152 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
158 * The MSF is fairly straightforward. There is a main kernel
159 * thread that handles most of the work. Interrupt routines field
160 * callbacks from the controller driver: bulk- and interrupt-request
161 * completion notifications, endpoint-0 events, and disconnect events.
162 * Completion events are passed to the main thread by wakeup calls. Many
163 * ep0 requests are handled at interrupt time, but SetInterface,
164 * SetConfiguration, and device reset requests are forwarded to the
165 * thread in the form of "exceptions" using SIGUSR1 signals (since they
166 * should interrupt any ongoing file I/O operations).
168 * The thread's main routine implements the standard command/data/status
169 * parts of a SCSI interaction. It and its subroutines are full of tests
170 * for pending signals/exceptions -- all this polling is necessary since
171 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
172 * indication that the driver really wants to be running in userspace.)
173 * An important point is that so long as the thread is alive it keeps an
174 * open reference to the backing file. This will prevent unmounting
175 * the backing file's underlying filesystem and could cause problems
176 * during system shutdown, for example. To prevent such problems, the
177 * thread catches INT, TERM, and KILL signals and converts them into
180 * In normal operation the main thread is started during the gadget's
181 * fsg_bind() callback and stopped during fsg_unbind(). But it can
182 * also exit when it receives a signal, and there's no point leaving
183 * the gadget running when the thread is dead. At of this moment, MSF
184 * provides no way to deregister the gadget when thread dies -- maybe
185 * a callback functions is needed.
187 * To provide maximum throughput, the driver uses a circular pipeline of
188 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
189 * arbitrarily long; in practice the benefits don't justify having more
190 * than 2 stages (i.e., double buffering). But it helps to think of the
191 * pipeline as being a long one. Each buffer head contains a bulk-in and
192 * a bulk-out request pointer (since the buffer can be used for both
193 * output and input -- directions always are given from the host's
194 * point of view) as well as a pointer to the buffer and various state
197 * Use of the pipeline follows a simple protocol. There is a variable
198 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
199 * At any time that buffer head may still be in use from an earlier
200 * request, so each buffer head has a state variable indicating whether
201 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
202 * buffer head to be EMPTY, filling the buffer either by file I/O or by
203 * USB I/O (during which the buffer head is BUSY), and marking the buffer
204 * head FULL when the I/O is complete. Then the buffer will be emptied
205 * (again possibly by USB I/O, during which it is marked BUSY) and
206 * finally marked EMPTY again (possibly by a completion routine).
208 * A module parameter tells the driver to avoid stalling the bulk
209 * endpoints wherever the transport specification allows. This is
210 * necessary for some UDCs like the SuperH, which cannot reliably clear a
211 * halt on a bulk endpoint. However, under certain circumstances the
212 * Bulk-only specification requires a stall. In such cases the driver
213 * will halt the endpoint and set a flag indicating that it should clear
214 * the halt in software during the next device reset. Hopefully this
215 * will permit everything to work correctly. Furthermore, although the
216 * specification allows the bulk-out endpoint to halt when the host sends
217 * too much data, implementing this would cause an unavoidable race.
218 * The driver will always use the "no-stall" approach for OUT transfers.
220 * One subtle point concerns sending status-stage responses for ep0
221 * requests. Some of these requests, such as device reset, can involve
222 * interrupting an ongoing file I/O operation, which might take an
223 * arbitrarily long time. During that delay the host might give up on
224 * the original ep0 request and issue a new one. When that happens the
225 * driver should not notify the host about completion of the original
226 * request, as the host will no longer be waiting for it. So the driver
227 * assigns to each ep0 request a unique tag, and it keeps track of the
228 * tag value of the request associated with a long-running exception
229 * (device-reset, interface-change, or configuration-change). When the
230 * exception handler is finished, the status-stage response is submitted
231 * only if the current ep0 request tag is equal to the exception request
232 * tag. Thus only the most recently received ep0 request will get a
233 * status-stage response.
235 * Warning: This driver source file is too long. It ought to be split up
236 * into a header file plus about 3 separate .c files, to handle the details
237 * of the Gadget, USB Mass Storage, and SCSI protocols.
240 /* #define VERBOSE_DEBUG */
241 /* #define DUMP_MSGS */
248 #include <linux/err.h>
249 #include <linux/usb/ch9.h>
250 #include <linux/usb/gadget.h>
251 #include <usb_mass_storage.h>
253 #include <asm/unaligned.h>
254 #include <linux/usb/gadget.h>
255 #include <linux/usb/gadget.h>
256 #include <linux/usb/composite.h>
257 #include <usb/lin_gadget_compat.h>
259 /*------------------------------------------------------------------------*/
261 #define FSG_DRIVER_DESC "Mass Storage Function"
262 #define FSG_DRIVER_VERSION "2012/06/5"
264 static const char fsg_string_interface
[] = "Mass Storage";
266 #define FSG_NO_INTR_EP 1
267 #define FSG_NO_DEVICE_STRINGS 1
269 #define FSG_NO_INTR_EP 1
271 #include "storage_common.c"
273 /*-------------------------------------------------------------------------*/
275 #define GFP_ATOMIC ((gfp_t) 0)
276 #define PAGE_CACHE_SHIFT 12
277 #define PAGE_CACHE_SIZE (1 << PAGE_CACHE_SHIFT)
278 #define kthread_create(...) __builtin_return_address(0)
279 #define wait_for_completion(...) do {} while (0)
281 struct kref
{int x
; };
282 struct completion
{int x
; };
284 inline void set_bit(int nr
, volatile void *addr
)
287 unsigned int *a
= (unsigned int *) addr
;
290 mask
= 1 << (nr
& 0x1f);
294 inline void clear_bit(int nr
, volatile void *addr
)
297 unsigned int *a
= (unsigned int *) addr
;
300 mask
= 1 << (nr
& 0x1f);
307 /* Data shared by all the FSG instances. */
309 struct usb_gadget
*gadget
;
310 struct fsg_dev
*fsg
, *new_fsg
;
312 struct usb_ep
*ep0
; /* Copy of gadget->ep0 */
313 struct usb_request
*ep0req
; /* Copy of cdev->req */
314 unsigned int ep0_req_tag
;
316 struct fsg_buffhd
*next_buffhd_to_fill
;
317 struct fsg_buffhd
*next_buffhd_to_drain
;
318 struct fsg_buffhd buffhds
[FSG_NUM_BUFFERS
];
321 u8 cmnd
[MAX_COMMAND_SIZE
];
325 struct fsg_lun luns
[FSG_MAX_LUNS
];
327 unsigned int bulk_out_maxpacket
;
328 enum fsg_state state
; /* For exception handling */
329 unsigned int exception_req_tag
;
331 enum data_direction data_dir
;
333 u32 data_size_from_cmnd
;
338 unsigned int can_stall
:1;
339 unsigned int free_storage_on_release
:1;
340 unsigned int phase_error
:1;
341 unsigned int short_packet_received
:1;
342 unsigned int bad_lun_okay
:1;
343 unsigned int running
:1;
345 int thread_wakeup_needed
;
346 struct completion thread_notifier
;
347 struct task_struct
*thread_task
;
349 /* Callback functions. */
350 const struct fsg_operations
*ops
;
351 /* Gadget's private data. */
354 const char *vendor_name
; /* 8 characters or less */
355 const char *product_name
; /* 16 characters or less */
358 /* Vendor (8 chars), product (16 chars), release (4
359 * hexadecimal digits) and NUL byte */
360 char inquiry_string
[8 + 16 + 4 + 1];
367 struct fsg_lun_config
{
368 const char *filename
;
373 } luns
[FSG_MAX_LUNS
];
375 /* Callback functions. */
376 const struct fsg_operations
*ops
;
377 /* Gadget's private data. */
380 const char *vendor_name
; /* 8 characters or less */
381 const char *product_name
; /* 16 characters or less */
387 struct usb_function function
;
388 struct usb_gadget
*gadget
; /* Copy of cdev->gadget */
389 struct fsg_common
*common
;
391 u16 interface_number
;
393 unsigned int bulk_in_enabled
:1;
394 unsigned int bulk_out_enabled
:1;
396 unsigned long atomic_bitflags
;
397 #define IGNORE_BULK_OUT 0
399 struct usb_ep
*bulk_in
;
400 struct usb_ep
*bulk_out
;
404 static inline int __fsg_is_set(struct fsg_common
*common
,
405 const char *func
, unsigned line
)
409 ERROR(common
, "common->fsg is NULL in %s at %u\n", func
, line
);
414 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
417 static inline struct fsg_dev
*fsg_from_func(struct usb_function
*f
)
419 return container_of(f
, struct fsg_dev
, function
);
423 typedef void (*fsg_routine_t
)(struct fsg_dev
*);
425 static int exception_in_progress(struct fsg_common
*common
)
427 return common
->state
> FSG_STATE_IDLE
;
430 /* Make bulk-out requests be divisible by the maxpacket size */
431 static void set_bulk_out_req_length(struct fsg_common
*common
,
432 struct fsg_buffhd
*bh
, unsigned int length
)
436 bh
->bulk_out_intended_length
= length
;
437 rem
= length
% common
->bulk_out_maxpacket
;
439 length
+= common
->bulk_out_maxpacket
- rem
;
440 bh
->outreq
->length
= length
;
443 /*-------------------------------------------------------------------------*/
446 struct fsg_common
*the_fsg_common
;
448 static int fsg_set_halt(struct fsg_dev
*fsg
, struct usb_ep
*ep
)
452 if (ep
== fsg
->bulk_in
)
454 else if (ep
== fsg
->bulk_out
)
458 DBG(fsg
, "%s set halt\n", name
);
459 return usb_ep_set_halt(ep
);
462 /*-------------------------------------------------------------------------*/
464 /* These routines may be called in process context or in_irq */
466 /* Caller must hold fsg->lock */
467 static void wakeup_thread(struct fsg_common
*common
)
469 common
->thread_wakeup_needed
= 1;
472 static void raise_exception(struct fsg_common
*common
, enum fsg_state new_state
)
474 /* Do nothing if a higher-priority exception is already in progress.
475 * If a lower-or-equal priority exception is in progress, preempt it
476 * and notify the main thread by sending it a signal. */
477 if (common
->state
<= new_state
) {
478 common
->exception_req_tag
= common
->ep0_req_tag
;
479 common
->state
= new_state
;
480 common
->thread_wakeup_needed
= 1;
484 /*-------------------------------------------------------------------------*/
486 static int ep0_queue(struct fsg_common
*common
)
490 rc
= usb_ep_queue(common
->ep0
, common
->ep0req
, GFP_ATOMIC
);
491 common
->ep0
->driver_data
= common
;
492 if (rc
!= 0 && rc
!= -ESHUTDOWN
) {
493 /* We can't do much more than wait for a reset */
494 WARNING(common
, "error in submission: %s --> %d\n",
495 common
->ep0
->name
, rc
);
500 /*-------------------------------------------------------------------------*/
502 /* Bulk and interrupt endpoint completion handlers.
503 * These always run in_irq. */
505 static void bulk_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
507 struct fsg_common
*common
= ep
->driver_data
;
508 struct fsg_buffhd
*bh
= req
->context
;
510 if (req
->status
|| req
->actual
!= req
->length
)
511 DBG(common
, "%s --> %d, %u/%u\n", __func__
,
512 req
->status
, req
->actual
, req
->length
);
513 if (req
->status
== -ECONNRESET
) /* Request was cancelled */
514 usb_ep_fifo_flush(ep
);
516 /* Hold the lock while we update the request and buffer states */
518 bh
->state
= BUF_STATE_EMPTY
;
519 wakeup_thread(common
);
522 static void bulk_out_complete(struct usb_ep
*ep
, struct usb_request
*req
)
524 struct fsg_common
*common
= ep
->driver_data
;
525 struct fsg_buffhd
*bh
= req
->context
;
527 dump_msg(common
, "bulk-out", req
->buf
, req
->actual
);
528 if (req
->status
|| req
->actual
!= bh
->bulk_out_intended_length
)
529 DBG(common
, "%s --> %d, %u/%u\n", __func__
,
530 req
->status
, req
->actual
,
531 bh
->bulk_out_intended_length
);
532 if (req
->status
== -ECONNRESET
) /* Request was cancelled */
533 usb_ep_fifo_flush(ep
);
535 /* Hold the lock while we update the request and buffer states */
537 bh
->state
= BUF_STATE_FULL
;
538 wakeup_thread(common
);
541 /*-------------------------------------------------------------------------*/
543 /* Ep0 class-specific handlers. These always run in_irq. */
545 static int fsg_setup(struct usb_function
*f
,
546 const struct usb_ctrlrequest
*ctrl
)
548 struct fsg_dev
*fsg
= fsg_from_func(f
);
549 struct usb_request
*req
= fsg
->common
->ep0req
;
550 u16 w_index
= get_unaligned_le16(&ctrl
->wIndex
);
551 u16 w_value
= get_unaligned_le16(&ctrl
->wValue
);
552 u16 w_length
= get_unaligned_le16(&ctrl
->wLength
);
554 if (!fsg_is_set(fsg
->common
))
557 switch (ctrl
->bRequest
) {
559 case USB_BULK_RESET_REQUEST
:
560 if (ctrl
->bRequestType
!=
561 (USB_DIR_OUT
| USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
563 if (w_index
!= fsg
->interface_number
|| w_value
!= 0)
566 /* Raise an exception to stop the current operation
567 * and reinitialize our state. */
568 DBG(fsg
, "bulk reset request\n");
569 raise_exception(fsg
->common
, FSG_STATE_RESET
);
570 return DELAYED_STATUS
;
572 case USB_BULK_GET_MAX_LUN_REQUEST
:
573 if (ctrl
->bRequestType
!=
574 (USB_DIR_IN
| USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
576 if (w_index
!= fsg
->interface_number
|| w_value
!= 0)
578 VDBG(fsg
, "get max LUN\n");
579 *(u8
*) req
->buf
= fsg
->common
->nluns
- 1;
581 /* Respond with data/status */
582 req
->length
= min((u16
)1, w_length
);
583 return ep0_queue(fsg
->common
);
587 "unknown class-specific control req "
588 "%02x.%02x v%04x i%04x l%u\n",
589 ctrl
->bRequestType
, ctrl
->bRequest
,
590 get_unaligned_le16(&ctrl
->wValue
), w_index
, w_length
);
594 /*-------------------------------------------------------------------------*/
596 /* All the following routines run in process context */
598 /* Use this for bulk or interrupt transfers, not ep0 */
599 static void start_transfer(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
600 struct usb_request
*req
, int *pbusy
,
601 enum fsg_buffer_state
*state
)
605 if (ep
== fsg
->bulk_in
)
606 dump_msg(fsg
, "bulk-in", req
->buf
, req
->length
);
609 *state
= BUF_STATE_BUSY
;
610 rc
= usb_ep_queue(ep
, req
, GFP_KERNEL
);
613 *state
= BUF_STATE_EMPTY
;
615 /* We can't do much more than wait for a reset */
617 /* Note: currently the net2280 driver fails zero-length
618 * submissions if DMA is enabled. */
619 if (rc
!= -ESHUTDOWN
&& !(rc
== -EOPNOTSUPP
&&
621 WARNING(fsg
, "error in submission: %s --> %d\n",
626 #define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
627 if (fsg_is_set(common)) \
628 start_transfer((common)->fsg, (common)->fsg->ep_name, \
629 req, pbusy, state); \
632 #define START_TRANSFER(common, ep_name, req, pbusy, state) \
633 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
635 static void busy_indicator(void)
663 static int sleep_thread(struct fsg_common
*common
)
668 /* Wait until a signal arrives or we are woken up */
670 if (common
->thread_wakeup_needed
)
683 #ifdef CONFIG_USB_CABLE_CHECK
684 /* Check cable connection */
685 if (!usb_cable_connected())
691 usb_gadget_handle_interrupts();
693 common
->thread_wakeup_needed
= 0;
697 /*-------------------------------------------------------------------------*/
699 static int do_read(struct fsg_common
*common
)
701 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
703 struct fsg_buffhd
*bh
;
708 unsigned int partial_page
;
711 /* Get the starting Logical Block Address and check that it's
713 if (common
->cmnd
[0] == SC_READ_6
)
714 lba
= get_unaligned_be24(&common
->cmnd
[1]);
716 lba
= get_unaligned_be32(&common
->cmnd
[2]);
718 /* We allow DPO (Disable Page Out = don't save data in the
719 * cache) and FUA (Force Unit Access = don't read from the
720 * cache), but we don't implement them. */
721 if ((common
->cmnd
[1] & ~0x18) != 0) {
722 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
726 if (lba
>= curlun
->num_sectors
) {
727 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
730 file_offset
= ((loff_t
) lba
) << 9;
732 /* Carry out the file reads */
733 amount_left
= common
->data_size_from_cmnd
;
734 if (unlikely(amount_left
== 0))
735 return -EIO
; /* No default reply */
739 /* Figure out how much we need to read:
740 * Try to read the remaining amount.
741 * But don't read more than the buffer size.
742 * And don't try to read past the end of the file.
743 * Finally, if we're not at a page boundary, don't read past
745 * If this means reading 0 then we were asked to read past
746 * the end of file. */
747 amount
= min(amount_left
, FSG_BUFLEN
);
748 partial_page
= file_offset
& (PAGE_CACHE_SIZE
- 1);
749 if (partial_page
> 0)
750 amount
= min(amount
, (unsigned int) PAGE_CACHE_SIZE
-
753 /* Wait for the next buffer to become available */
754 bh
= common
->next_buffhd_to_fill
;
755 while (bh
->state
!= BUF_STATE_EMPTY
) {
756 rc
= sleep_thread(common
);
761 /* If we were asked to read past the end of file,
762 * end with an empty buffer. */
765 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
766 curlun
->info_valid
= 1;
767 bh
->inreq
->length
= 0;
768 bh
->state
= BUF_STATE_FULL
;
772 /* Perform the read */
773 rc
= ums
->read_sector(ums
,
774 file_offset
/ SECTOR_SIZE
,
775 amount
/ SECTOR_SIZE
,
776 (char __user
*)bh
->buf
);
780 nread
= rc
* SECTOR_SIZE
;
782 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
783 (unsigned long long) file_offset
,
787 LDBG(curlun
, "error in file read: %d\n",
790 } else if (nread
< amount
) {
791 LDBG(curlun
, "partial file read: %d/%u\n",
792 (int) nread
, amount
);
793 nread
-= (nread
& 511); /* Round down to a block */
795 file_offset
+= nread
;
796 amount_left
-= nread
;
797 common
->residue
-= nread
;
798 bh
->inreq
->length
= nread
;
799 bh
->state
= BUF_STATE_FULL
;
801 /* If an error occurred, report it and its position */
802 if (nread
< amount
) {
803 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
804 curlun
->info_valid
= 1;
808 if (amount_left
== 0)
809 break; /* No more left to read */
811 /* Send this buffer and go read some more */
813 START_TRANSFER_OR(common
, bulk_in
, bh
->inreq
,
814 &bh
->inreq_busy
, &bh
->state
)
815 /* Don't know what to do if
816 * common->fsg is NULL */
818 common
->next_buffhd_to_fill
= bh
->next
;
821 return -EIO
; /* No default reply */
824 /*-------------------------------------------------------------------------*/
826 static int do_write(struct fsg_common
*common
)
828 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
830 struct fsg_buffhd
*bh
;
832 u32 amount_left_to_req
, amount_left_to_write
;
833 loff_t usb_offset
, file_offset
;
835 unsigned int partial_page
;
840 curlun
->sense_data
= SS_WRITE_PROTECTED
;
844 /* Get the starting Logical Block Address and check that it's
846 if (common
->cmnd
[0] == SC_WRITE_6
)
847 lba
= get_unaligned_be24(&common
->cmnd
[1]);
849 lba
= get_unaligned_be32(&common
->cmnd
[2]);
851 /* We allow DPO (Disable Page Out = don't save data in the
852 * cache) and FUA (Force Unit Access = write directly to the
853 * medium). We don't implement DPO; we implement FUA by
854 * performing synchronous output. */
855 if (common
->cmnd
[1] & ~0x18) {
856 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
860 if (lba
>= curlun
->num_sectors
) {
861 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
865 /* Carry out the file writes */
867 file_offset
= usb_offset
= ((loff_t
) lba
) << 9;
868 amount_left_to_req
= common
->data_size_from_cmnd
;
869 amount_left_to_write
= common
->data_size_from_cmnd
;
871 while (amount_left_to_write
> 0) {
873 /* Queue a request for more data from the host */
874 bh
= common
->next_buffhd_to_fill
;
875 if (bh
->state
== BUF_STATE_EMPTY
&& get_some_more
) {
877 /* Figure out how much we want to get:
878 * Try to get the remaining amount.
879 * But don't get more than the buffer size.
880 * And don't try to go past the end of the file.
881 * If we're not at a page boundary,
882 * don't go past the next page.
883 * If this means getting 0, then we were asked
884 * to write past the end of file.
885 * Finally, round down to a block boundary. */
886 amount
= min(amount_left_to_req
, FSG_BUFLEN
);
887 partial_page
= usb_offset
& (PAGE_CACHE_SIZE
- 1);
888 if (partial_page
> 0)
890 (unsigned int) PAGE_CACHE_SIZE
- partial_page
);
895 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
896 curlun
->info_valid
= 1;
899 amount
-= (amount
& 511);
902 /* Why were we were asked to transfer a
908 /* Get the next buffer */
909 usb_offset
+= amount
;
910 common
->usb_amount_left
-= amount
;
911 amount_left_to_req
-= amount
;
912 if (amount_left_to_req
== 0)
915 /* amount is always divisible by 512, hence by
916 * the bulk-out maxpacket size */
917 bh
->outreq
->length
= amount
;
918 bh
->bulk_out_intended_length
= amount
;
919 bh
->outreq
->short_not_ok
= 1;
920 START_TRANSFER_OR(common
, bulk_out
, bh
->outreq
,
921 &bh
->outreq_busy
, &bh
->state
)
922 /* Don't know what to do if
923 * common->fsg is NULL */
925 common
->next_buffhd_to_fill
= bh
->next
;
929 /* Write the received data to the backing file */
930 bh
= common
->next_buffhd_to_drain
;
931 if (bh
->state
== BUF_STATE_EMPTY
&& !get_some_more
)
932 break; /* We stopped early */
933 if (bh
->state
== BUF_STATE_FULL
) {
934 common
->next_buffhd_to_drain
= bh
->next
;
935 bh
->state
= BUF_STATE_EMPTY
;
937 /* Did something go wrong with the transfer? */
938 if (bh
->outreq
->status
!= 0) {
939 curlun
->sense_data
= SS_COMMUNICATION_FAILURE
;
940 curlun
->info_valid
= 1;
944 amount
= bh
->outreq
->actual
;
946 /* Perform the write */
947 rc
= ums
->write_sector(ums
,
948 file_offset
/ SECTOR_SIZE
,
949 amount
/ SECTOR_SIZE
,
950 (char __user
*)bh
->buf
);
953 nwritten
= rc
* SECTOR_SIZE
;
955 VLDBG(curlun
, "file write %u @ %llu -> %d\n", amount
,
956 (unsigned long long) file_offset
,
960 LDBG(curlun
, "error in file write: %d\n",
963 } else if (nwritten
< amount
) {
964 LDBG(curlun
, "partial file write: %d/%u\n",
965 (int) nwritten
, amount
);
966 nwritten
-= (nwritten
& 511);
967 /* Round down to a block */
969 file_offset
+= nwritten
;
970 amount_left_to_write
-= nwritten
;
971 common
->residue
-= nwritten
;
973 /* If an error occurred, report it and its position */
974 if (nwritten
< amount
) {
975 printf("nwritten:%d amount:%d\n", nwritten
,
977 curlun
->sense_data
= SS_WRITE_ERROR
;
978 curlun
->info_valid
= 1;
982 /* Did the host decide to stop early? */
983 if (bh
->outreq
->actual
!= bh
->outreq
->length
) {
984 common
->short_packet_received
= 1;
990 /* Wait for something to happen */
991 rc
= sleep_thread(common
);
996 return -EIO
; /* No default reply */
999 /*-------------------------------------------------------------------------*/
1001 static int do_synchronize_cache(struct fsg_common
*common
)
1006 /*-------------------------------------------------------------------------*/
1008 static int do_verify(struct fsg_common
*common
)
1010 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1012 u32 verification_length
;
1013 struct fsg_buffhd
*bh
= common
->next_buffhd_to_fill
;
1016 unsigned int amount
;
1020 /* Get the starting Logical Block Address and check that it's
1022 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1023 if (lba
>= curlun
->num_sectors
) {
1024 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1028 /* We allow DPO (Disable Page Out = don't save data in the
1029 * cache) but we don't implement it. */
1030 if (common
->cmnd
[1] & ~0x10) {
1031 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1035 verification_length
= get_unaligned_be16(&common
->cmnd
[7]);
1036 if (unlikely(verification_length
== 0))
1037 return -EIO
; /* No default reply */
1039 /* Prepare to carry out the file verify */
1040 amount_left
= verification_length
<< 9;
1041 file_offset
= ((loff_t
) lba
) << 9;
1043 /* Write out all the dirty buffers before invalidating them */
1045 /* Just try to read the requested blocks */
1046 while (amount_left
> 0) {
1048 /* Figure out how much we need to read:
1049 * Try to read the remaining amount, but not more than
1051 * And don't try to read past the end of the file.
1052 * If this means reading 0 then we were asked to read
1053 * past the end of file. */
1054 amount
= min(amount_left
, FSG_BUFLEN
);
1056 curlun
->sense_data
=
1057 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1058 curlun
->info_valid
= 1;
1062 /* Perform the read */
1063 rc
= ums
->read_sector(ums
,
1064 file_offset
/ SECTOR_SIZE
,
1065 amount
/ SECTOR_SIZE
,
1066 (char __user
*)bh
->buf
);
1069 nread
= rc
* SECTOR_SIZE
;
1071 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1072 (unsigned long long) file_offset
,
1075 LDBG(curlun
, "error in file verify: %d\n",
1078 } else if (nread
< amount
) {
1079 LDBG(curlun
, "partial file verify: %d/%u\n",
1080 (int) nread
, amount
);
1081 nread
-= (nread
& 511); /* Round down to a sector */
1084 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1085 curlun
->info_valid
= 1;
1088 file_offset
+= nread
;
1089 amount_left
-= nread
;
1094 /*-------------------------------------------------------------------------*/
1096 static int do_inquiry(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1098 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1099 static const char vendor_id
[] = "Linux ";
1100 u8
*buf
= (u8
*) bh
->buf
;
1102 if (!curlun
) { /* Unsupported LUNs are okay */
1103 common
->bad_lun_okay
= 1;
1105 buf
[0] = 0x7f; /* Unsupported, no device-type */
1106 buf
[4] = 31; /* Additional length */
1112 buf
[2] = 2; /* ANSI SCSI level 2 */
1113 buf
[3] = 2; /* SCSI-2 INQUIRY data format */
1114 buf
[4] = 31; /* Additional length */
1115 /* No special options */
1116 sprintf((char *) (buf
+ 8), "%-8s%-16s%04x", (char*) vendor_id
,
1117 ums
->name
, (u16
) 0xffff);
1123 static int do_request_sense(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1125 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1126 u8
*buf
= (u8
*) bh
->buf
;
1131 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1133 * If a REQUEST SENSE command is received from an initiator
1134 * with a pending unit attention condition (before the target
1135 * generates the contingent allegiance condition), then the
1136 * target shall either:
1137 * a) report any pending sense data and preserve the unit
1138 * attention condition on the logical unit, or,
1139 * b) report the unit attention condition, may discard any
1140 * pending sense data, and clear the unit attention
1141 * condition on the logical unit for that initiator.
1143 * FSG normally uses option a); enable this code to use option b).
1146 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
) {
1147 curlun
->sense_data
= curlun
->unit_attention_data
;
1148 curlun
->unit_attention_data
= SS_NO_SENSE
;
1152 if (!curlun
) { /* Unsupported LUNs are okay */
1153 common
->bad_lun_okay
= 1;
1154 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1158 sd
= curlun
->sense_data
;
1159 valid
= curlun
->info_valid
<< 7;
1160 curlun
->sense_data
= SS_NO_SENSE
;
1161 curlun
->info_valid
= 0;
1165 buf
[0] = valid
| 0x70; /* Valid, current error */
1167 put_unaligned_be32(sdinfo
, &buf
[3]); /* Sense information */
1168 buf
[7] = 18 - 8; /* Additional sense length */
1174 static int do_read_capacity(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1176 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1177 u32 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1178 int pmi
= common
->cmnd
[8];
1179 u8
*buf
= (u8
*) bh
->buf
;
1181 /* Check the PMI and LBA fields */
1182 if (pmi
> 1 || (pmi
== 0 && lba
!= 0)) {
1183 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1187 put_unaligned_be32(curlun
->num_sectors
- 1, &buf
[0]);
1188 /* Max logical block */
1189 put_unaligned_be32(512, &buf
[4]); /* Block length */
1193 static int do_read_header(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1195 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1196 int msf
= common
->cmnd
[1] & 0x02;
1197 u32 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1198 u8
*buf
= (u8
*) bh
->buf
;
1200 if (common
->cmnd
[1] & ~0x02) { /* Mask away MSF */
1201 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1204 if (lba
>= curlun
->num_sectors
) {
1205 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1210 buf
[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1211 store_cdrom_address(&buf
[4], msf
, lba
);
1216 static int do_read_toc(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1218 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1219 int msf
= common
->cmnd
[1] & 0x02;
1220 int start_track
= common
->cmnd
[6];
1221 u8
*buf
= (u8
*) bh
->buf
;
1223 if ((common
->cmnd
[1] & ~0x02) != 0 || /* Mask away MSF */
1225 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1230 buf
[1] = (20-2); /* TOC data length */
1231 buf
[2] = 1; /* First track number */
1232 buf
[3] = 1; /* Last track number */
1233 buf
[5] = 0x16; /* Data track, copying allowed */
1234 buf
[6] = 0x01; /* Only track is number 1 */
1235 store_cdrom_address(&buf
[8], msf
, 0);
1237 buf
[13] = 0x16; /* Lead-out track is data */
1238 buf
[14] = 0xAA; /* Lead-out track number */
1239 store_cdrom_address(&buf
[16], msf
, curlun
->num_sectors
);
1244 static int do_mode_sense(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1246 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1247 int mscmnd
= common
->cmnd
[0];
1248 u8
*buf
= (u8
*) bh
->buf
;
1251 int changeable_values
, all_pages
;
1255 if ((common
->cmnd
[1] & ~0x08) != 0) { /* Mask away DBD */
1256 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1259 pc
= common
->cmnd
[2] >> 6;
1260 page_code
= common
->cmnd
[2] & 0x3f;
1262 curlun
->sense_data
= SS_SAVING_PARAMETERS_NOT_SUPPORTED
;
1265 changeable_values
= (pc
== 1);
1266 all_pages
= (page_code
== 0x3f);
1268 /* Write the mode parameter header. Fixed values are: default
1269 * medium type, no cache control (DPOFUA), and no block descriptors.
1270 * The only variable value is the WriteProtect bit. We will fill in
1271 * the mode data length later. */
1273 if (mscmnd
== SC_MODE_SENSE_6
) {
1274 buf
[2] = (curlun
->ro
? 0x80 : 0x00); /* WP, DPOFUA */
1277 } else { /* SC_MODE_SENSE_10 */
1278 buf
[3] = (curlun
->ro
? 0x80 : 0x00); /* WP, DPOFUA */
1280 limit
= 65535; /* Should really be FSG_BUFLEN */
1283 /* No block descriptors */
1285 /* The mode pages, in numerical order. The only page we support
1286 * is the Caching page. */
1287 if (page_code
== 0x08 || all_pages
) {
1289 buf
[0] = 0x08; /* Page code */
1290 buf
[1] = 10; /* Page length */
1291 memset(buf
+2, 0, 10); /* None of the fields are changeable */
1293 if (!changeable_values
) {
1294 buf
[2] = 0x04; /* Write cache enable, */
1295 /* Read cache not disabled */
1296 /* No cache retention priorities */
1297 put_unaligned_be16(0xffff, &buf
[4]);
1298 /* Don't disable prefetch */
1299 /* Minimum prefetch = 0 */
1300 put_unaligned_be16(0xffff, &buf
[8]);
1301 /* Maximum prefetch */
1302 put_unaligned_be16(0xffff, &buf
[10]);
1303 /* Maximum prefetch ceiling */
1308 /* Check that a valid page was requested and the mode data length
1309 * isn't too long. */
1311 if (!valid_page
|| len
> limit
) {
1312 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1316 /* Store the mode data length */
1317 if (mscmnd
== SC_MODE_SENSE_6
)
1320 put_unaligned_be16(len
- 2, buf0
);
1325 static int do_start_stop(struct fsg_common
*common
)
1327 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1331 } else if (!curlun
->removable
) {
1332 curlun
->sense_data
= SS_INVALID_COMMAND
;
1339 static int do_prevent_allow(struct fsg_common
*common
)
1341 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1344 if (!curlun
->removable
) {
1345 curlun
->sense_data
= SS_INVALID_COMMAND
;
1349 prevent
= common
->cmnd
[4] & 0x01;
1350 if ((common
->cmnd
[4] & ~0x01) != 0) { /* Mask away Prevent */
1351 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1355 if (curlun
->prevent_medium_removal
&& !prevent
)
1356 fsg_lun_fsync_sub(curlun
);
1357 curlun
->prevent_medium_removal
= prevent
;
1362 static int do_read_format_capacities(struct fsg_common
*common
,
1363 struct fsg_buffhd
*bh
)
1365 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1366 u8
*buf
= (u8
*) bh
->buf
;
1368 buf
[0] = buf
[1] = buf
[2] = 0;
1369 buf
[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1372 put_unaligned_be32(curlun
->num_sectors
, &buf
[0]);
1373 /* Number of blocks */
1374 put_unaligned_be32(512, &buf
[4]); /* Block length */
1375 buf
[4] = 0x02; /* Current capacity */
1380 static int do_mode_select(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1382 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1384 /* We don't support MODE SELECT */
1386 curlun
->sense_data
= SS_INVALID_COMMAND
;
1391 /*-------------------------------------------------------------------------*/
1393 static int halt_bulk_in_endpoint(struct fsg_dev
*fsg
)
1397 rc
= fsg_set_halt(fsg
, fsg
->bulk_in
);
1399 VDBG(fsg
, "delayed bulk-in endpoint halt\n");
1401 if (rc
!= -EAGAIN
) {
1402 WARNING(fsg
, "usb_ep_set_halt -> %d\n", rc
);
1407 rc
= usb_ep_set_halt(fsg
->bulk_in
);
1412 static int wedge_bulk_in_endpoint(struct fsg_dev
*fsg
)
1416 DBG(fsg
, "bulk-in set wedge\n");
1417 rc
= 0; /* usb_ep_set_wedge(fsg->bulk_in); */
1419 VDBG(fsg
, "delayed bulk-in endpoint wedge\n");
1421 if (rc
!= -EAGAIN
) {
1422 WARNING(fsg
, "usb_ep_set_wedge -> %d\n", rc
);
1430 static int pad_with_zeros(struct fsg_dev
*fsg
)
1432 struct fsg_buffhd
*bh
= fsg
->common
->next_buffhd_to_fill
;
1433 u32 nkeep
= bh
->inreq
->length
;
1437 bh
->state
= BUF_STATE_EMPTY
; /* For the first iteration */
1438 fsg
->common
->usb_amount_left
= nkeep
+ fsg
->common
->residue
;
1439 while (fsg
->common
->usb_amount_left
> 0) {
1441 /* Wait for the next buffer to be free */
1442 while (bh
->state
!= BUF_STATE_EMPTY
) {
1443 rc
= sleep_thread(fsg
->common
);
1448 nsend
= min(fsg
->common
->usb_amount_left
, FSG_BUFLEN
);
1449 memset(bh
->buf
+ nkeep
, 0, nsend
- nkeep
);
1450 bh
->inreq
->length
= nsend
;
1451 bh
->inreq
->zero
= 0;
1452 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
1453 &bh
->inreq_busy
, &bh
->state
);
1454 bh
= fsg
->common
->next_buffhd_to_fill
= bh
->next
;
1455 fsg
->common
->usb_amount_left
-= nsend
;
1461 static int throw_away_data(struct fsg_common
*common
)
1463 struct fsg_buffhd
*bh
;
1467 for (bh
= common
->next_buffhd_to_drain
;
1468 bh
->state
!= BUF_STATE_EMPTY
|| common
->usb_amount_left
> 0;
1469 bh
= common
->next_buffhd_to_drain
) {
1471 /* Throw away the data in a filled buffer */
1472 if (bh
->state
== BUF_STATE_FULL
) {
1473 bh
->state
= BUF_STATE_EMPTY
;
1474 common
->next_buffhd_to_drain
= bh
->next
;
1476 /* A short packet or an error ends everything */
1477 if (bh
->outreq
->actual
!= bh
->outreq
->length
||
1478 bh
->outreq
->status
!= 0) {
1479 raise_exception(common
,
1480 FSG_STATE_ABORT_BULK_OUT
);
1486 /* Try to submit another request if we need one */
1487 bh
= common
->next_buffhd_to_fill
;
1488 if (bh
->state
== BUF_STATE_EMPTY
1489 && common
->usb_amount_left
> 0) {
1490 amount
= min(common
->usb_amount_left
, FSG_BUFLEN
);
1492 /* amount is always divisible by 512, hence by
1493 * the bulk-out maxpacket size */
1494 bh
->outreq
->length
= amount
;
1495 bh
->bulk_out_intended_length
= amount
;
1496 bh
->outreq
->short_not_ok
= 1;
1497 START_TRANSFER_OR(common
, bulk_out
, bh
->outreq
,
1498 &bh
->outreq_busy
, &bh
->state
)
1499 /* Don't know what to do if
1500 * common->fsg is NULL */
1502 common
->next_buffhd_to_fill
= bh
->next
;
1503 common
->usb_amount_left
-= amount
;
1507 /* Otherwise wait for something to happen */
1508 rc
= sleep_thread(common
);
1516 static int finish_reply(struct fsg_common
*common
)
1518 struct fsg_buffhd
*bh
= common
->next_buffhd_to_fill
;
1521 switch (common
->data_dir
) {
1523 break; /* Nothing to send */
1525 /* If we don't know whether the host wants to read or write,
1526 * this must be CB or CBI with an unknown command. We mustn't
1527 * try to send or receive any data. So stall both bulk pipes
1528 * if we can and wait for a reset. */
1529 case DATA_DIR_UNKNOWN
:
1530 if (!common
->can_stall
) {
1532 } else if (fsg_is_set(common
)) {
1533 fsg_set_halt(common
->fsg
, common
->fsg
->bulk_out
);
1534 rc
= halt_bulk_in_endpoint(common
->fsg
);
1536 /* Don't know what to do if common->fsg is NULL */
1541 /* All but the last buffer of data must have already been sent */
1542 case DATA_DIR_TO_HOST
:
1543 if (common
->data_size
== 0) {
1544 /* Nothing to send */
1546 /* If there's no residue, simply send the last buffer */
1547 } else if (common
->residue
== 0) {
1548 bh
->inreq
->zero
= 0;
1549 START_TRANSFER_OR(common
, bulk_in
, bh
->inreq
,
1550 &bh
->inreq_busy
, &bh
->state
)
1552 common
->next_buffhd_to_fill
= bh
->next
;
1554 /* For Bulk-only, if we're allowed to stall then send the
1555 * short packet and halt the bulk-in endpoint. If we can't
1556 * stall, pad out the remaining data with 0's. */
1557 } else if (common
->can_stall
) {
1558 bh
->inreq
->zero
= 1;
1559 START_TRANSFER_OR(common
, bulk_in
, bh
->inreq
,
1560 &bh
->inreq_busy
, &bh
->state
)
1561 /* Don't know what to do if
1562 * common->fsg is NULL */
1564 common
->next_buffhd_to_fill
= bh
->next
;
1566 rc
= halt_bulk_in_endpoint(common
->fsg
);
1567 } else if (fsg_is_set(common
)) {
1568 rc
= pad_with_zeros(common
->fsg
);
1570 /* Don't know what to do if common->fsg is NULL */
1575 /* We have processed all we want from the data the host has sent.
1576 * There may still be outstanding bulk-out requests. */
1577 case DATA_DIR_FROM_HOST
:
1578 if (common
->residue
== 0) {
1579 /* Nothing to receive */
1581 /* Did the host stop sending unexpectedly early? */
1582 } else if (common
->short_packet_received
) {
1583 raise_exception(common
, FSG_STATE_ABORT_BULK_OUT
);
1586 /* We haven't processed all the incoming data. Even though
1587 * we may be allowed to stall, doing so would cause a race.
1588 * The controller may already have ACK'ed all the remaining
1589 * bulk-out packets, in which case the host wouldn't see a
1590 * STALL. Not realizing the endpoint was halted, it wouldn't
1591 * clear the halt -- leading to problems later on. */
1593 } else if (common
->can_stall
) {
1594 if (fsg_is_set(common
))
1595 fsg_set_halt(common
->fsg
,
1596 common
->fsg
->bulk_out
);
1597 raise_exception(common
, FSG_STATE_ABORT_BULK_OUT
);
1601 /* We can't stall. Read in the excess data and throw it
1604 rc
= throw_away_data(common
);
1612 static int send_status(struct fsg_common
*common
)
1614 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1615 struct fsg_buffhd
*bh
;
1616 struct bulk_cs_wrap
*csw
;
1618 u8 status
= USB_STATUS_PASS
;
1621 /* Wait for the next buffer to become available */
1622 bh
= common
->next_buffhd_to_fill
;
1623 while (bh
->state
!= BUF_STATE_EMPTY
) {
1624 rc
= sleep_thread(common
);
1630 sd
= curlun
->sense_data
;
1631 else if (common
->bad_lun_okay
)
1634 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1636 if (common
->phase_error
) {
1637 DBG(common
, "sending phase-error status\n");
1638 status
= USB_STATUS_PHASE_ERROR
;
1639 sd
= SS_INVALID_COMMAND
;
1640 } else if (sd
!= SS_NO_SENSE
) {
1641 DBG(common
, "sending command-failure status\n");
1642 status
= USB_STATUS_FAIL
;
1643 VDBG(common
, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1645 SK(sd
), ASC(sd
), ASCQ(sd
), sdinfo
);
1648 /* Store and send the Bulk-only CSW */
1649 csw
= (void *)bh
->buf
;
1651 csw
->Signature
= cpu_to_le32(USB_BULK_CS_SIG
);
1652 csw
->Tag
= common
->tag
;
1653 csw
->Residue
= cpu_to_le32(common
->residue
);
1654 csw
->Status
= status
;
1656 bh
->inreq
->length
= USB_BULK_CS_WRAP_LEN
;
1657 bh
->inreq
->zero
= 0;
1658 START_TRANSFER_OR(common
, bulk_in
, bh
->inreq
,
1659 &bh
->inreq_busy
, &bh
->state
)
1660 /* Don't know what to do if common->fsg is NULL */
1663 common
->next_buffhd_to_fill
= bh
->next
;
1668 /*-------------------------------------------------------------------------*/
1670 /* Check whether the command is properly formed and whether its data size
1671 * and direction agree with the values we already have. */
1672 static int check_command(struct fsg_common
*common
, int cmnd_size
,
1673 enum data_direction data_dir
, unsigned int mask
,
1674 int needs_medium
, const char *name
)
1677 int lun
= common
->cmnd
[1] >> 5;
1678 static const char dirletter
[4] = {'u', 'o', 'i', 'n'};
1680 struct fsg_lun
*curlun
;
1683 if (common
->data_dir
!= DATA_DIR_UNKNOWN
)
1684 sprintf(hdlen
, ", H%c=%u", dirletter
[(int) common
->data_dir
],
1686 VDBG(common
, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1687 name
, cmnd_size
, dirletter
[(int) data_dir
],
1688 common
->data_size_from_cmnd
, common
->cmnd_size
, hdlen
);
1690 /* We can't reply at all until we know the correct data direction
1692 if (common
->data_size_from_cmnd
== 0)
1693 data_dir
= DATA_DIR_NONE
;
1694 if (common
->data_size
< common
->data_size_from_cmnd
) {
1695 /* Host data size < Device data size is a phase error.
1696 * Carry out the command, but only transfer as much as
1697 * we are allowed. */
1698 common
->data_size_from_cmnd
= common
->data_size
;
1699 common
->phase_error
= 1;
1701 common
->residue
= common
->data_size
;
1702 common
->usb_amount_left
= common
->data_size
;
1704 /* Conflicting data directions is a phase error */
1705 if (common
->data_dir
!= data_dir
1706 && common
->data_size_from_cmnd
> 0) {
1707 common
->phase_error
= 1;
1711 /* Verify the length of the command itself */
1712 if (cmnd_size
!= common
->cmnd_size
) {
1714 /* Special case workaround: There are plenty of buggy SCSI
1715 * implementations. Many have issues with cbw->Length
1716 * field passing a wrong command size. For those cases we
1717 * always try to work around the problem by using the length
1718 * sent by the host side provided it is at least as large
1719 * as the correct command length.
1720 * Examples of such cases would be MS-Windows, which issues
1721 * REQUEST SENSE with cbw->Length == 12 where it should
1722 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1723 * REQUEST SENSE with cbw->Length == 10 where it should
1726 if (cmnd_size
<= common
->cmnd_size
) {
1727 DBG(common
, "%s is buggy! Expected length %d "
1728 "but we got %d\n", name
,
1729 cmnd_size
, common
->cmnd_size
);
1730 cmnd_size
= common
->cmnd_size
;
1732 common
->phase_error
= 1;
1737 /* Check that the LUN values are consistent */
1738 if (common
->lun
!= lun
)
1739 DBG(common
, "using LUN %d from CBW, not LUN %d from CDB\n",
1743 if (common
->lun
>= 0 && common
->lun
< common
->nluns
) {
1744 curlun
= &common
->luns
[common
->lun
];
1745 if (common
->cmnd
[0] != SC_REQUEST_SENSE
) {
1746 curlun
->sense_data
= SS_NO_SENSE
;
1747 curlun
->info_valid
= 0;
1751 common
->bad_lun_okay
= 0;
1753 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1754 * to use unsupported LUNs; all others may not. */
1755 if (common
->cmnd
[0] != SC_INQUIRY
&&
1756 common
->cmnd
[0] != SC_REQUEST_SENSE
) {
1757 DBG(common
, "unsupported LUN %d\n", common
->lun
);
1762 /* If a unit attention condition exists, only INQUIRY and
1763 * REQUEST SENSE commands are allowed; anything else must fail. */
1764 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
&&
1765 common
->cmnd
[0] != SC_INQUIRY
&&
1766 common
->cmnd
[0] != SC_REQUEST_SENSE
) {
1767 curlun
->sense_data
= curlun
->unit_attention_data
;
1768 curlun
->unit_attention_data
= SS_NO_SENSE
;
1772 /* Check that only command bytes listed in the mask are non-zero */
1773 common
->cmnd
[1] &= 0x1f; /* Mask away the LUN */
1774 for (i
= 1; i
< cmnd_size
; ++i
) {
1775 if (common
->cmnd
[i
] && !(mask
& (1 << i
))) {
1777 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1786 static int do_scsi_command(struct fsg_common
*common
)
1788 struct fsg_buffhd
*bh
;
1790 int reply
= -EINVAL
;
1792 static char unknown
[16];
1793 struct fsg_lun
*curlun
= &common
->luns
[common
->lun
];
1797 /* Wait for the next buffer to become available for data or status */
1798 bh
= common
->next_buffhd_to_fill
;
1799 common
->next_buffhd_to_drain
= bh
;
1800 while (bh
->state
!= BUF_STATE_EMPTY
) {
1801 rc
= sleep_thread(common
);
1805 common
->phase_error
= 0;
1806 common
->short_packet_received
= 0;
1808 down_read(&common
->filesem
); /* We're using the backing file */
1809 switch (common
->cmnd
[0]) {
1812 common
->data_size_from_cmnd
= common
->cmnd
[4];
1813 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1817 reply
= do_inquiry(common
, bh
);
1820 case SC_MODE_SELECT_6
:
1821 common
->data_size_from_cmnd
= common
->cmnd
[4];
1822 reply
= check_command(common
, 6, DATA_DIR_FROM_HOST
,
1826 reply
= do_mode_select(common
, bh
);
1829 case SC_MODE_SELECT_10
:
1830 common
->data_size_from_cmnd
=
1831 get_unaligned_be16(&common
->cmnd
[7]);
1832 reply
= check_command(common
, 10, DATA_DIR_FROM_HOST
,
1836 reply
= do_mode_select(common
, bh
);
1839 case SC_MODE_SENSE_6
:
1840 common
->data_size_from_cmnd
= common
->cmnd
[4];
1841 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1842 (1<<1) | (1<<2) | (1<<4), 0,
1845 reply
= do_mode_sense(common
, bh
);
1848 case SC_MODE_SENSE_10
:
1849 common
->data_size_from_cmnd
=
1850 get_unaligned_be16(&common
->cmnd
[7]);
1851 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1852 (1<<1) | (1<<2) | (3<<7), 0,
1855 reply
= do_mode_sense(common
, bh
);
1858 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL
:
1859 common
->data_size_from_cmnd
= 0;
1860 reply
= check_command(common
, 6, DATA_DIR_NONE
,
1862 "PREVENT-ALLOW MEDIUM REMOVAL");
1864 reply
= do_prevent_allow(common
);
1868 i
= common
->cmnd
[4];
1869 common
->data_size_from_cmnd
= (i
== 0 ? 256 : i
) << 9;
1870 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1874 reply
= do_read(common
);
1878 common
->data_size_from_cmnd
=
1879 get_unaligned_be16(&common
->cmnd
[7]) << 9;
1880 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1881 (1<<1) | (0xf<<2) | (3<<7), 1,
1884 reply
= do_read(common
);
1888 common
->data_size_from_cmnd
=
1889 get_unaligned_be32(&common
->cmnd
[6]) << 9;
1890 reply
= check_command(common
, 12, DATA_DIR_TO_HOST
,
1891 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1894 reply
= do_read(common
);
1897 case SC_READ_CAPACITY
:
1898 common
->data_size_from_cmnd
= 8;
1899 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1900 (0xf<<2) | (1<<8), 1,
1903 reply
= do_read_capacity(common
, bh
);
1906 case SC_READ_HEADER
:
1907 if (!common
->luns
[common
->lun
].cdrom
)
1909 common
->data_size_from_cmnd
=
1910 get_unaligned_be16(&common
->cmnd
[7]);
1911 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1912 (3<<7) | (0x1f<<1), 1,
1915 reply
= do_read_header(common
, bh
);
1919 if (!common
->luns
[common
->lun
].cdrom
)
1921 common
->data_size_from_cmnd
=
1922 get_unaligned_be16(&common
->cmnd
[7]);
1923 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1927 reply
= do_read_toc(common
, bh
);
1930 case SC_READ_FORMAT_CAPACITIES
:
1931 common
->data_size_from_cmnd
=
1932 get_unaligned_be16(&common
->cmnd
[7]);
1933 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1935 "READ FORMAT CAPACITIES");
1937 reply
= do_read_format_capacities(common
, bh
);
1940 case SC_REQUEST_SENSE
:
1941 common
->data_size_from_cmnd
= common
->cmnd
[4];
1942 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1946 reply
= do_request_sense(common
, bh
);
1949 case SC_START_STOP_UNIT
:
1950 common
->data_size_from_cmnd
= 0;
1951 reply
= check_command(common
, 6, DATA_DIR_NONE
,
1955 reply
= do_start_stop(common
);
1958 case SC_SYNCHRONIZE_CACHE
:
1959 common
->data_size_from_cmnd
= 0;
1960 reply
= check_command(common
, 10, DATA_DIR_NONE
,
1961 (0xf<<2) | (3<<7), 1,
1962 "SYNCHRONIZE CACHE");
1964 reply
= do_synchronize_cache(common
);
1967 case SC_TEST_UNIT_READY
:
1968 common
->data_size_from_cmnd
= 0;
1969 reply
= check_command(common
, 6, DATA_DIR_NONE
,
1974 /* Although optional, this command is used by MS-Windows. We
1975 * support a minimal version: BytChk must be 0. */
1977 common
->data_size_from_cmnd
= 0;
1978 reply
= check_command(common
, 10, DATA_DIR_NONE
,
1979 (1<<1) | (0xf<<2) | (3<<7), 1,
1982 reply
= do_verify(common
);
1986 i
= common
->cmnd
[4];
1987 common
->data_size_from_cmnd
= (i
== 0 ? 256 : i
) << 9;
1988 reply
= check_command(common
, 6, DATA_DIR_FROM_HOST
,
1992 reply
= do_write(common
);
1996 common
->data_size_from_cmnd
=
1997 get_unaligned_be16(&common
->cmnd
[7]) << 9;
1998 reply
= check_command(common
, 10, DATA_DIR_FROM_HOST
,
1999 (1<<1) | (0xf<<2) | (3<<7), 1,
2002 reply
= do_write(common
);
2006 common
->data_size_from_cmnd
=
2007 get_unaligned_be32(&common
->cmnd
[6]) << 9;
2008 reply
= check_command(common
, 12, DATA_DIR_FROM_HOST
,
2009 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2012 reply
= do_write(common
);
2015 /* Some mandatory commands that we recognize but don't implement.
2016 * They don't mean much in this setting. It's left as an exercise
2017 * for anyone interested to implement RESERVE and RELEASE in terms
2018 * of Posix locks. */
2019 case SC_FORMAT_UNIT
:
2022 case SC_SEND_DIAGNOSTIC
:
2027 common
->data_size_from_cmnd
= 0;
2028 sprintf(unknown
, "Unknown x%02x", common
->cmnd
[0]);
2029 reply
= check_command(common
, common
->cmnd_size
,
2030 DATA_DIR_UNKNOWN
, 0xff, 0, unknown
);
2032 curlun
->sense_data
= SS_INVALID_COMMAND
;
2037 up_read(&common
->filesem
);
2039 if (reply
== -EINTR
)
2042 /* Set up the single reply buffer for finish_reply() */
2043 if (reply
== -EINVAL
)
2044 reply
= 0; /* Error reply length */
2045 if (reply
>= 0 && common
->data_dir
== DATA_DIR_TO_HOST
) {
2046 reply
= min((u32
) reply
, common
->data_size_from_cmnd
);
2047 bh
->inreq
->length
= reply
;
2048 bh
->state
= BUF_STATE_FULL
;
2049 common
->residue
-= reply
;
2050 } /* Otherwise it's already set */
2055 /*-------------------------------------------------------------------------*/
2057 static int received_cbw(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
2059 struct usb_request
*req
= bh
->outreq
;
2060 struct fsg_bulk_cb_wrap
*cbw
= req
->buf
;
2061 struct fsg_common
*common
= fsg
->common
;
2063 /* Was this a real packet? Should it be ignored? */
2064 if (req
->status
|| test_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
2067 /* Is the CBW valid? */
2068 if (req
->actual
!= USB_BULK_CB_WRAP_LEN
||
2069 cbw
->Signature
!= cpu_to_le32(
2071 DBG(fsg
, "invalid CBW: len %u sig 0x%x\n",
2073 le32_to_cpu(cbw
->Signature
));
2075 /* The Bulk-only spec says we MUST stall the IN endpoint
2076 * (6.6.1), so it's unavoidable. It also says we must
2077 * retain this state until the next reset, but there's
2078 * no way to tell the controller driver it should ignore
2079 * Clear-Feature(HALT) requests.
2081 * We aren't required to halt the OUT endpoint; instead
2082 * we can simply accept and discard any data received
2083 * until the next reset. */
2084 wedge_bulk_in_endpoint(fsg
);
2085 set_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2089 /* Is the CBW meaningful? */
2090 if (cbw
->Lun
>= FSG_MAX_LUNS
|| cbw
->Flags
& ~USB_BULK_IN_FLAG
||
2091 cbw
->Length
<= 0 || cbw
->Length
> MAX_COMMAND_SIZE
) {
2092 DBG(fsg
, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2094 cbw
->Lun
, cbw
->Flags
, cbw
->Length
);
2096 /* We can do anything we want here, so let's stall the
2097 * bulk pipes if we are allowed to. */
2098 if (common
->can_stall
) {
2099 fsg_set_halt(fsg
, fsg
->bulk_out
);
2100 halt_bulk_in_endpoint(fsg
);
2105 /* Save the command for later */
2106 common
->cmnd_size
= cbw
->Length
;
2107 memcpy(common
->cmnd
, cbw
->CDB
, common
->cmnd_size
);
2108 if (cbw
->Flags
& USB_BULK_IN_FLAG
)
2109 common
->data_dir
= DATA_DIR_TO_HOST
;
2111 common
->data_dir
= DATA_DIR_FROM_HOST
;
2112 common
->data_size
= le32_to_cpu(cbw
->DataTransferLength
);
2113 if (common
->data_size
== 0)
2114 common
->data_dir
= DATA_DIR_NONE
;
2115 common
->lun
= cbw
->Lun
;
2116 common
->tag
= cbw
->Tag
;
2121 static int get_next_command(struct fsg_common
*common
)
2123 struct fsg_buffhd
*bh
;
2126 /* Wait for the next buffer to become available */
2127 bh
= common
->next_buffhd_to_fill
;
2128 while (bh
->state
!= BUF_STATE_EMPTY
) {
2129 rc
= sleep_thread(common
);
2134 /* Queue a request to read a Bulk-only CBW */
2135 set_bulk_out_req_length(common
, bh
, USB_BULK_CB_WRAP_LEN
);
2136 bh
->outreq
->short_not_ok
= 1;
2137 START_TRANSFER_OR(common
, bulk_out
, bh
->outreq
,
2138 &bh
->outreq_busy
, &bh
->state
)
2139 /* Don't know what to do if common->fsg is NULL */
2142 /* We will drain the buffer in software, which means we
2143 * can reuse it for the next filling. No need to advance
2144 * next_buffhd_to_fill. */
2146 /* Wait for the CBW to arrive */
2147 while (bh
->state
!= BUF_STATE_FULL
) {
2148 rc
= sleep_thread(common
);
2153 rc
= fsg_is_set(common
) ? received_cbw(common
->fsg
, bh
) : -EIO
;
2154 bh
->state
= BUF_STATE_EMPTY
;
2160 /*-------------------------------------------------------------------------*/
2162 static int enable_endpoint(struct fsg_common
*common
, struct usb_ep
*ep
,
2163 const struct usb_endpoint_descriptor
*d
)
2167 ep
->driver_data
= common
;
2168 rc
= usb_ep_enable(ep
, d
);
2170 ERROR(common
, "can't enable %s, result %d\n", ep
->name
, rc
);
2174 static int alloc_request(struct fsg_common
*common
, struct usb_ep
*ep
,
2175 struct usb_request
**preq
)
2177 *preq
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
2180 ERROR(common
, "can't allocate request for %s\n", ep
->name
);
2184 /* Reset interface setting and re-init endpoint state (toggle etc). */
2185 static int do_set_interface(struct fsg_common
*common
, struct fsg_dev
*new_fsg
)
2187 const struct usb_endpoint_descriptor
*d
;
2188 struct fsg_dev
*fsg
;
2191 if (common
->running
)
2192 DBG(common
, "reset interface\n");
2195 /* Deallocate the requests */
2199 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2200 struct fsg_buffhd
*bh
= &common
->buffhds
[i
];
2203 usb_ep_free_request(fsg
->bulk_in
, bh
->inreq
);
2207 usb_ep_free_request(fsg
->bulk_out
, bh
->outreq
);
2212 /* Disable the endpoints */
2213 if (fsg
->bulk_in_enabled
) {
2214 usb_ep_disable(fsg
->bulk_in
);
2215 fsg
->bulk_in_enabled
= 0;
2217 if (fsg
->bulk_out_enabled
) {
2218 usb_ep_disable(fsg
->bulk_out
);
2219 fsg
->bulk_out_enabled
= 0;
2223 /* wake_up(&common->fsg_wait); */
2226 common
->running
= 0;
2230 common
->fsg
= new_fsg
;
2233 /* Enable the endpoints */
2234 d
= fsg_ep_desc(common
->gadget
,
2235 &fsg_fs_bulk_in_desc
, &fsg_hs_bulk_in_desc
);
2236 rc
= enable_endpoint(common
, fsg
->bulk_in
, d
);
2239 fsg
->bulk_in_enabled
= 1;
2241 d
= fsg_ep_desc(common
->gadget
,
2242 &fsg_fs_bulk_out_desc
, &fsg_hs_bulk_out_desc
);
2243 rc
= enable_endpoint(common
, fsg
->bulk_out
, d
);
2246 fsg
->bulk_out_enabled
= 1;
2247 common
->bulk_out_maxpacket
=
2248 le16_to_cpu(get_unaligned(&d
->wMaxPacketSize
));
2249 clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2251 /* Allocate the requests */
2252 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2253 struct fsg_buffhd
*bh
= &common
->buffhds
[i
];
2255 rc
= alloc_request(common
, fsg
->bulk_in
, &bh
->inreq
);
2258 rc
= alloc_request(common
, fsg
->bulk_out
, &bh
->outreq
);
2261 bh
->inreq
->buf
= bh
->outreq
->buf
= bh
->buf
;
2262 bh
->inreq
->context
= bh
->outreq
->context
= bh
;
2263 bh
->inreq
->complete
= bulk_in_complete
;
2264 bh
->outreq
->complete
= bulk_out_complete
;
2267 common
->running
= 1;
2273 /****************************** ALT CONFIGS ******************************/
2276 static int fsg_set_alt(struct usb_function
*f
, unsigned intf
, unsigned alt
)
2278 struct fsg_dev
*fsg
= fsg_from_func(f
);
2279 fsg
->common
->new_fsg
= fsg
;
2280 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
2284 static void fsg_disable(struct usb_function
*f
)
2286 struct fsg_dev
*fsg
= fsg_from_func(f
);
2287 fsg
->common
->new_fsg
= NULL
;
2288 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
2291 /*-------------------------------------------------------------------------*/
2293 static void handle_exception(struct fsg_common
*common
)
2296 struct fsg_buffhd
*bh
;
2297 enum fsg_state old_state
;
2298 struct fsg_lun
*curlun
;
2299 unsigned int exception_req_tag
;
2301 /* Cancel all the pending transfers */
2303 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2304 bh
= &common
->buffhds
[i
];
2306 usb_ep_dequeue(common
->fsg
->bulk_in
, bh
->inreq
);
2307 if (bh
->outreq_busy
)
2308 usb_ep_dequeue(common
->fsg
->bulk_out
,
2312 /* Wait until everything is idle */
2315 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2316 bh
= &common
->buffhds
[i
];
2317 num_active
+= bh
->inreq_busy
+ bh
->outreq_busy
;
2319 if (num_active
== 0)
2321 if (sleep_thread(common
))
2325 /* Clear out the controller's fifos */
2326 if (common
->fsg
->bulk_in_enabled
)
2327 usb_ep_fifo_flush(common
->fsg
->bulk_in
);
2328 if (common
->fsg
->bulk_out_enabled
)
2329 usb_ep_fifo_flush(common
->fsg
->bulk_out
);
2332 /* Reset the I/O buffer states and pointers, the SCSI
2333 * state, and the exception. Then invoke the handler. */
2335 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2336 bh
= &common
->buffhds
[i
];
2337 bh
->state
= BUF_STATE_EMPTY
;
2339 common
->next_buffhd_to_fill
= &common
->buffhds
[0];
2340 common
->next_buffhd_to_drain
= &common
->buffhds
[0];
2341 exception_req_tag
= common
->exception_req_tag
;
2342 old_state
= common
->state
;
2344 if (old_state
== FSG_STATE_ABORT_BULK_OUT
)
2345 common
->state
= FSG_STATE_STATUS_PHASE
;
2347 for (i
= 0; i
< common
->nluns
; ++i
) {
2348 curlun
= &common
->luns
[i
];
2349 curlun
->sense_data
= SS_NO_SENSE
;
2350 curlun
->info_valid
= 0;
2352 common
->state
= FSG_STATE_IDLE
;
2355 /* Carry out any extra actions required for the exception */
2356 switch (old_state
) {
2357 case FSG_STATE_ABORT_BULK_OUT
:
2358 send_status(common
);
2360 if (common
->state
== FSG_STATE_STATUS_PHASE
)
2361 common
->state
= FSG_STATE_IDLE
;
2364 case FSG_STATE_RESET
:
2365 /* In case we were forced against our will to halt a
2366 * bulk endpoint, clear the halt now. (The SuperH UDC
2367 * requires this.) */
2368 if (!fsg_is_set(common
))
2370 if (test_and_clear_bit(IGNORE_BULK_OUT
,
2371 &common
->fsg
->atomic_bitflags
))
2372 usb_ep_clear_halt(common
->fsg
->bulk_in
);
2374 if (common
->ep0_req_tag
== exception_req_tag
)
2375 ep0_queue(common
); /* Complete the status stage */
2379 case FSG_STATE_CONFIG_CHANGE
:
2380 do_set_interface(common
, common
->new_fsg
);
2383 case FSG_STATE_EXIT
:
2384 case FSG_STATE_TERMINATED
:
2385 do_set_interface(common
, NULL
); /* Free resources */
2386 common
->state
= FSG_STATE_TERMINATED
; /* Stop the thread */
2389 case FSG_STATE_INTERFACE_CHANGE
:
2390 case FSG_STATE_DISCONNECT
:
2391 case FSG_STATE_COMMAND_PHASE
:
2392 case FSG_STATE_DATA_PHASE
:
2393 case FSG_STATE_STATUS_PHASE
:
2394 case FSG_STATE_IDLE
:
2399 /*-------------------------------------------------------------------------*/
2401 int fsg_main_thread(void *common_
)
2404 struct fsg_common
*common
= the_fsg_common
;
2407 if (exception_in_progress(common
)) {
2408 handle_exception(common
);
2412 if (!common
->running
) {
2413 ret
= sleep_thread(common
);
2420 ret
= get_next_command(common
);
2424 if (!exception_in_progress(common
))
2425 common
->state
= FSG_STATE_DATA_PHASE
;
2427 if (do_scsi_command(common
) || finish_reply(common
))
2430 if (!exception_in_progress(common
))
2431 common
->state
= FSG_STATE_STATUS_PHASE
;
2433 if (send_status(common
))
2436 if (!exception_in_progress(common
))
2437 common
->state
= FSG_STATE_IDLE
;
2440 common
->thread_task
= NULL
;
2445 static void fsg_common_release(struct kref
*ref
);
2447 static struct fsg_common
*fsg_common_init(struct fsg_common
*common
,
2448 struct usb_composite_dev
*cdev
)
2450 struct usb_gadget
*gadget
= cdev
->gadget
;
2451 struct fsg_buffhd
*bh
;
2452 struct fsg_lun
*curlun
;
2455 /* Find out how many LUNs there should be */
2457 if (nluns
< 1 || nluns
> FSG_MAX_LUNS
) {
2458 printf("invalid number of LUNs: %u\n", nluns
);
2459 return ERR_PTR(-EINVAL
);
2464 common
= calloc(sizeof *common
, 1);
2466 return ERR_PTR(-ENOMEM
);
2467 common
->free_storage_on_release
= 1;
2469 memset(common
, 0, sizeof common
);
2470 common
->free_storage_on_release
= 0;
2474 common
->private_data
= NULL
;
2476 common
->gadget
= gadget
;
2477 common
->ep0
= gadget
->ep0
;
2478 common
->ep0req
= cdev
->req
;
2480 /* Maybe allocate device-global string IDs, and patch descriptors */
2481 if (fsg_strings
[FSG_STRING_INTERFACE
].id
== 0) {
2482 rc
= usb_string_id(cdev
);
2483 if (unlikely(rc
< 0))
2485 fsg_strings
[FSG_STRING_INTERFACE
].id
= rc
;
2486 fsg_intf_desc
.iInterface
= rc
;
2489 /* Create the LUNs, open their backing files, and register the
2490 * LUN devices in sysfs. */
2491 curlun
= calloc(nluns
, sizeof *curlun
);
2496 common
->nluns
= nluns
;
2498 for (i
= 0; i
< nluns
; i
++) {
2499 common
->luns
[i
].removable
= 1;
2501 rc
= fsg_lun_open(&common
->luns
[i
], "");
2507 /* Data buffers cyclic list */
2508 bh
= common
->buffhds
;
2510 i
= FSG_NUM_BUFFERS
;
2511 goto buffhds_first_it
;
2517 bh
->outreq_busy
= 0;
2518 bh
->buf
= memalign(CONFIG_SYS_CACHELINE_SIZE
, FSG_BUFLEN
);
2519 if (unlikely(!bh
->buf
)) {
2524 bh
->next
= common
->buffhds
;
2526 snprintf(common
->inquiry_string
, sizeof common
->inquiry_string
,
2529 "File-Store Gadget",
2532 /* Some peripheral controllers are known not to be able to
2533 * halt bulk endpoints correctly. If one of them is present,
2537 /* Tell the thread to start working */
2538 common
->thread_task
=
2539 kthread_create(fsg_main_thread
, common
,
2540 OR(cfg
->thread_name
, "file-storage"));
2541 if (IS_ERR(common
->thread_task
)) {
2542 rc
= PTR_ERR(common
->thread_task
);
2548 INFO(common
, FSG_DRIVER_DESC
", version: " FSG_DRIVER_VERSION
"\n");
2549 INFO(common
, "Number of LUNs=%d\n", common
->nluns
);
2554 common
->nluns
= i
+ 1;
2556 common
->state
= FSG_STATE_TERMINATED
; /* The thread is dead */
2557 /* Call fsg_common_release() directly, ref might be not
2559 fsg_common_release(&common
->ref
);
2563 static void fsg_common_release(struct kref
*ref
)
2565 struct fsg_common
*common
= container_of(ref
, struct fsg_common
, ref
);
2567 /* If the thread isn't already dead, tell it to exit now */
2568 if (common
->state
!= FSG_STATE_TERMINATED
) {
2569 raise_exception(common
, FSG_STATE_EXIT
);
2570 wait_for_completion(&common
->thread_notifier
);
2573 if (likely(common
->luns
)) {
2574 struct fsg_lun
*lun
= common
->luns
;
2575 unsigned i
= common
->nluns
;
2577 /* In error recovery common->nluns may be zero. */
2578 for (; i
; --i
, ++lun
)
2581 kfree(common
->luns
);
2585 struct fsg_buffhd
*bh
= common
->buffhds
;
2586 unsigned i
= FSG_NUM_BUFFERS
;
2589 } while (++bh
, --i
);
2592 if (common
->free_storage_on_release
)
2597 /*-------------------------------------------------------------------------*/
2600 * usb_copy_descriptors - copy a vector of USB descriptors
2601 * @src: null-terminated vector to copy
2602 * Context: initialization code, which may sleep
2604 * This makes a copy of a vector of USB descriptors. Its primary use
2605 * is to support usb_function objects which can have multiple copies,
2606 * each needing different descriptors. Functions may have static
2607 * tables of descriptors, which are used as templates and customized
2608 * with identifiers (for interfaces, strings, endpoints, and more)
2609 * as needed by a given function instance.
2611 struct usb_descriptor_header
**
2612 usb_copy_descriptors(struct usb_descriptor_header
**src
)
2614 struct usb_descriptor_header
**tmp
;
2618 struct usb_descriptor_header
**ret
;
2620 /* count descriptors and their sizes; then add vector size */
2621 for (bytes
= 0, n_desc
= 0, tmp
= src
; *tmp
; tmp
++, n_desc
++)
2622 bytes
+= (*tmp
)->bLength
;
2623 bytes
+= (n_desc
+ 1) * sizeof(*tmp
);
2625 mem
= memalign(CONFIG_SYS_CACHELINE_SIZE
, bytes
);
2629 /* fill in pointers starting at "tmp",
2630 * to descriptors copied starting at "mem";
2635 mem
+= (n_desc
+ 1) * sizeof(*tmp
);
2637 memcpy(mem
, *src
, (*src
)->bLength
);
2640 mem
+= (*src
)->bLength
;
2648 static void fsg_unbind(struct usb_configuration
*c
, struct usb_function
*f
)
2650 struct fsg_dev
*fsg
= fsg_from_func(f
);
2652 DBG(fsg
, "unbind\n");
2653 if (fsg
->common
->fsg
== fsg
) {
2654 fsg
->common
->new_fsg
= NULL
;
2655 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
2658 free(fsg
->function
.descriptors
);
2659 free(fsg
->function
.hs_descriptors
);
2663 static int fsg_bind(struct usb_configuration
*c
, struct usb_function
*f
)
2665 struct fsg_dev
*fsg
= fsg_from_func(f
);
2666 struct usb_gadget
*gadget
= c
->cdev
->gadget
;
2669 fsg
->gadget
= gadget
;
2672 i
= usb_interface_id(c
, f
);
2675 fsg_intf_desc
.bInterfaceNumber
= i
;
2676 fsg
->interface_number
= i
;
2678 /* Find all the endpoints we will use */
2679 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_in_desc
);
2682 ep
->driver_data
= fsg
->common
; /* claim the endpoint */
2685 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_out_desc
);
2688 ep
->driver_data
= fsg
->common
; /* claim the endpoint */
2691 /* Copy descriptors */
2692 f
->descriptors
= usb_copy_descriptors(fsg_fs_function
);
2693 if (unlikely(!f
->descriptors
))
2696 if (gadget_is_dualspeed(gadget
)) {
2697 /* Assume endpoint addresses are the same for both speeds */
2698 fsg_hs_bulk_in_desc
.bEndpointAddress
=
2699 fsg_fs_bulk_in_desc
.bEndpointAddress
;
2700 fsg_hs_bulk_out_desc
.bEndpointAddress
=
2701 fsg_fs_bulk_out_desc
.bEndpointAddress
;
2702 f
->hs_descriptors
= usb_copy_descriptors(fsg_hs_function
);
2703 if (unlikely(!f
->hs_descriptors
)) {
2704 free(f
->descriptors
);
2711 ERROR(fsg
, "unable to autoconfigure all endpoints\n");
2716 /****************************** ADD FUNCTION ******************************/
2718 static struct usb_gadget_strings
*fsg_strings_array
[] = {
2723 static int fsg_bind_config(struct usb_composite_dev
*cdev
,
2724 struct usb_configuration
*c
,
2725 struct fsg_common
*common
)
2727 struct fsg_dev
*fsg
;
2730 fsg
= calloc(1, sizeof *fsg
);
2733 fsg
->function
.name
= FSG_DRIVER_DESC
;
2734 fsg
->function
.strings
= fsg_strings_array
;
2735 fsg
->function
.bind
= fsg_bind
;
2736 fsg
->function
.unbind
= fsg_unbind
;
2737 fsg
->function
.setup
= fsg_setup
;
2738 fsg
->function
.set_alt
= fsg_set_alt
;
2739 fsg
->function
.disable
= fsg_disable
;
2741 fsg
->common
= common
;
2743 /* Our caller holds a reference to common structure so we
2744 * don't have to be worry about it being freed until we return
2745 * from this function. So instead of incrementing counter now
2746 * and decrement in error recovery we increment it only when
2747 * call to usb_add_function() was successful. */
2749 rc
= usb_add_function(c
, &fsg
->function
);
2757 int fsg_add(struct usb_configuration
*c
)
2759 struct fsg_common
*fsg_common
;
2761 fsg_common
= fsg_common_init(NULL
, c
->cdev
);
2763 fsg_common
->vendor_name
= 0;
2764 fsg_common
->product_name
= 0;
2765 fsg_common
->release
= 0xffff;
2767 fsg_common
->ops
= NULL
;
2768 fsg_common
->private_data
= NULL
;
2770 the_fsg_common
= fsg_common
;
2772 return fsg_bind_config(c
->cdev
, c
, fsg_common
);
2775 int fsg_init(struct ums
*ums_dev
)