1 // SPDX-License-Identifier: GPL-2.0
3 * Greybus "AP" USB driver for "ES2" controller chips
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
8 #include <linux/kthread.h>
9 #include <linux/sizes.h>
10 #include <linux/usb.h>
11 #include <linux/kfifo.h>
12 #include <linux/debugfs.h>
13 #include <linux/list.h>
14 #include <linux/greybus.h>
15 #include <asm/unaligned.h>
18 #include "greybus_trace.h"
21 /* Default timeout for USB vendor requests. */
22 #define ES2_USB_CTRL_TIMEOUT 500
24 /* Default timeout for ARPC CPort requests */
25 #define ES2_ARPC_CPORT_TIMEOUT 500
27 /* Fixed CPort numbers */
28 #define ES2_CPORT_CDSI0 16
29 #define ES2_CPORT_CDSI1 17
31 /* Memory sizes for the buffers sent to/from the ES2 controller */
32 #define ES2_GBUF_MSG_SIZE_MAX 2048
34 /* Memory sizes for the ARPC buffers */
35 #define ARPC_OUT_SIZE_MAX U16_MAX
36 #define ARPC_IN_SIZE_MAX 128
38 static const struct usb_device_id id_table
[] = {
39 { USB_DEVICE(0x18d1, 0x1eaf) },
42 MODULE_DEVICE_TABLE(usb
, id_table
);
44 #define APB1_LOG_SIZE SZ_16K
47 * Number of CPort IN urbs in flight at any point in time.
48 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
51 #define NUM_CPORT_IN_URB 4
53 /* Number of CPort OUT urbs in flight at any point in time.
54 * Adjust if we get messages saying we are out of urbs in the system log.
56 #define NUM_CPORT_OUT_URB 8
59 * Number of ARPC in urbs in flight at any point in time.
61 #define NUM_ARPC_IN_URB 2
64 * @endpoint: bulk in endpoint for CPort data
65 * @urb: array of urbs for the CPort in messages
66 * @buffer: array of buffers for the @cport_in_urb urbs
70 struct urb
*urb
[NUM_CPORT_IN_URB
];
71 u8
*buffer
[NUM_CPORT_IN_URB
];
75 * es2_ap_dev - ES2 USB Bridge to AP structure
76 * @usb_dev: pointer to the USB device we are.
77 * @usb_intf: pointer to the USB interface we are bound to.
78 * @hd: pointer to our gb_host_device structure
80 * @cport_in: endpoint, urbs and buffer for cport in messages
81 * @cport_out_endpoint: endpoint for for cport out messages
82 * @cport_out_urb: array of urbs for the CPort out messages
83 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
85 * @cport_out_urb_cancelled: array of flags indicating whether the
86 * corresponding @cport_out_urb is being cancelled
87 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
89 * @apb_log_task: task pointer for logging thread
90 * @apb_log_dentry: file system entry for the log file interface
91 * @apb_log_enable_dentry: file system entry for enabling logging
92 * @apb_log_fifo: kernel FIFO to carry logged data
93 * @arpc_urb: array of urbs for the ARPC in messages
94 * @arpc_buffer: array of buffers for the @arpc_urb urbs
95 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
96 * @arpc_id_cycle: gives an unique id to ARPC
97 * @arpc_lock: locks ARPC list
98 * @arpcs: list of in progress ARPCs
101 struct usb_device
*usb_dev
;
102 struct usb_interface
*usb_intf
;
103 struct gb_host_device
*hd
;
105 struct es2_cport_in cport_in
;
106 __u8 cport_out_endpoint
;
107 struct urb
*cport_out_urb
[NUM_CPORT_OUT_URB
];
108 bool cport_out_urb_busy
[NUM_CPORT_OUT_URB
];
109 bool cport_out_urb_cancelled
[NUM_CPORT_OUT_URB
];
110 spinlock_t cport_out_urb_lock
;
114 struct task_struct
*apb_log_task
;
115 struct dentry
*apb_log_dentry
;
116 struct dentry
*apb_log_enable_dentry
;
117 DECLARE_KFIFO(apb_log_fifo
, char, APB1_LOG_SIZE
);
119 __u8 arpc_endpoint_in
;
120 struct urb
*arpc_urb
[NUM_ARPC_IN_URB
];
121 u8
*arpc_buffer
[NUM_ARPC_IN_URB
];
124 spinlock_t arpc_lock
;
125 struct list_head arpcs
;
129 struct list_head list
;
130 struct arpc_request_message
*req
;
131 struct arpc_response_message
*resp
;
132 struct completion response_received
;
136 static inline struct es2_ap_dev
*hd_to_es2(struct gb_host_device
*hd
)
138 return (struct es2_ap_dev
*)&hd
->hd_priv
;
141 static void cport_out_callback(struct urb
*urb
);
142 static void usb_log_enable(struct es2_ap_dev
*es2
);
143 static void usb_log_disable(struct es2_ap_dev
*es2
);
144 static int arpc_sync(struct es2_ap_dev
*es2
, u8 type
, void *payload
,
145 size_t size
, int *result
, unsigned int timeout
);
147 static int output_sync(struct es2_ap_dev
*es2
, void *req
, u16 size
, u8 cmd
)
149 struct usb_device
*udev
= es2
->usb_dev
;
153 data
= kmemdup(req
, size
, GFP_KERNEL
);
157 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
159 USB_DIR_OUT
| USB_TYPE_VENDOR
|
161 0, 0, data
, size
, ES2_USB_CTRL_TIMEOUT
);
163 dev_err(&udev
->dev
, "%s: return error %d\n", __func__
, retval
);
171 static void ap_urb_complete(struct urb
*urb
)
173 struct usb_ctrlrequest
*dr
= urb
->context
;
179 static int output_async(struct es2_ap_dev
*es2
, void *req
, u16 size
, u8 cmd
)
181 struct usb_device
*udev
= es2
->usb_dev
;
183 struct usb_ctrlrequest
*dr
;
187 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
191 dr
= kmalloc(sizeof(*dr
) + size
, GFP_ATOMIC
);
197 buf
= (u8
*)dr
+ sizeof(*dr
);
198 memcpy(buf
, req
, size
);
201 dr
->bRequestType
= USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_INTERFACE
;
204 dr
->wLength
= cpu_to_le16(size
);
206 usb_fill_control_urb(urb
, udev
, usb_sndctrlpipe(udev
, 0),
207 (unsigned char *)dr
, buf
, size
,
208 ap_urb_complete
, dr
);
209 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
217 static int output(struct gb_host_device
*hd
, void *req
, u16 size
, u8 cmd
,
220 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
223 return output_async(es2
, req
, size
, cmd
);
225 return output_sync(es2
, req
, size
, cmd
);
228 static int es2_cport_in_enable(struct es2_ap_dev
*es2
,
229 struct es2_cport_in
*cport_in
)
235 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
236 urb
= cport_in
->urb
[i
];
238 ret
= usb_submit_urb(urb
, GFP_KERNEL
);
240 dev_err(&es2
->usb_dev
->dev
,
241 "failed to submit in-urb: %d\n", ret
);
249 for (--i
; i
>= 0; --i
) {
250 urb
= cport_in
->urb
[i
];
257 static void es2_cport_in_disable(struct es2_ap_dev
*es2
,
258 struct es2_cport_in
*cport_in
)
263 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
264 urb
= cport_in
->urb
[i
];
269 static int es2_arpc_in_enable(struct es2_ap_dev
*es2
)
275 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
276 urb
= es2
->arpc_urb
[i
];
278 ret
= usb_submit_urb(urb
, GFP_KERNEL
);
280 dev_err(&es2
->usb_dev
->dev
,
281 "failed to submit arpc in-urb: %d\n", ret
);
289 for (--i
; i
>= 0; --i
) {
290 urb
= es2
->arpc_urb
[i
];
297 static void es2_arpc_in_disable(struct es2_ap_dev
*es2
)
302 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
303 urb
= es2
->arpc_urb
[i
];
308 static struct urb
*next_free_urb(struct es2_ap_dev
*es2
, gfp_t gfp_mask
)
310 struct urb
*urb
= NULL
;
314 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
316 /* Look in our pool of allocated urbs first, as that's the "fastest" */
317 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
318 if (!es2
->cport_out_urb_busy
[i
] &&
319 !es2
->cport_out_urb_cancelled
[i
]) {
320 es2
->cport_out_urb_busy
[i
] = true;
321 urb
= es2
->cport_out_urb
[i
];
325 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
330 * Crap, pool is empty, complain to the syslog and go allocate one
331 * dynamically as we have to succeed.
333 dev_dbg(&es2
->usb_dev
->dev
,
334 "No free CPort OUT urbs, having to dynamically allocate one!\n");
335 return usb_alloc_urb(0, gfp_mask
);
338 static void free_urb(struct es2_ap_dev
*es2
, struct urb
*urb
)
343 * See if this was an urb in our pool, if so mark it "free", otherwise
344 * we need to free it ourselves.
346 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
347 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
348 if (urb
== es2
->cport_out_urb
[i
]) {
349 es2
->cport_out_urb_busy
[i
] = false;
354 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
356 /* If urb is not NULL, then we need to free this urb */
361 * We (ab)use the operation-message header pad bytes to transfer the
362 * cport id in order to minimise overhead.
365 gb_message_cport_pack(struct gb_operation_msg_hdr
*header
, u16 cport_id
)
367 header
->pad
[0] = cport_id
;
370 /* Clear the pad bytes used for the CPort id */
371 static void gb_message_cport_clear(struct gb_operation_msg_hdr
*header
)
376 /* Extract the CPort id packed into the header, and clear it */
377 static u16
gb_message_cport_unpack(struct gb_operation_msg_hdr
*header
)
379 u16 cport_id
= header
->pad
[0];
381 gb_message_cport_clear(header
);
387 * Returns zero if the message was successfully queued, or a negative errno
390 static int message_send(struct gb_host_device
*hd
, u16 cport_id
,
391 struct gb_message
*message
, gfp_t gfp_mask
)
393 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
394 struct usb_device
*udev
= es2
->usb_dev
;
401 * The data actually transferred will include an indication
402 * of where the data should be sent. Do one last check of
403 * the target CPort id before filling it in.
405 if (!cport_id_valid(hd
, cport_id
)) {
406 dev_err(&udev
->dev
, "invalid cport %u\n", cport_id
);
410 /* Find a free urb */
411 urb
= next_free_urb(es2
, gfp_mask
);
415 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
416 message
->hcpriv
= urb
;
417 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
419 /* Pack the cport id into the message header */
420 gb_message_cport_pack(message
->header
, cport_id
);
422 buffer_size
= sizeof(*message
->header
) + message
->payload_size
;
424 usb_fill_bulk_urb(urb
, udev
,
425 usb_sndbulkpipe(udev
,
426 es2
->cport_out_endpoint
),
427 message
->buffer
, buffer_size
,
428 cport_out_callback
, message
);
429 urb
->transfer_flags
|= URB_ZERO_PACKET
;
431 trace_gb_message_submit(message
);
433 retval
= usb_submit_urb(urb
, gfp_mask
);
435 dev_err(&udev
->dev
, "failed to submit out-urb: %d\n", retval
);
437 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
438 message
->hcpriv
= NULL
;
439 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
442 gb_message_cport_clear(message
->header
);
451 * Can not be called in atomic context.
453 static void message_cancel(struct gb_message
*message
)
455 struct gb_host_device
*hd
= message
->operation
->connection
->hd
;
456 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
462 spin_lock_irq(&es2
->cport_out_urb_lock
);
463 urb
= message
->hcpriv
;
465 /* Prevent dynamically allocated urb from being deallocated. */
468 /* Prevent pre-allocated urb from being reused. */
469 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
470 if (urb
== es2
->cport_out_urb
[i
]) {
471 es2
->cport_out_urb_cancelled
[i
] = true;
475 spin_unlock_irq(&es2
->cport_out_urb_lock
);
479 if (i
< NUM_CPORT_OUT_URB
) {
480 spin_lock_irq(&es2
->cport_out_urb_lock
);
481 es2
->cport_out_urb_cancelled
[i
] = false;
482 spin_unlock_irq(&es2
->cport_out_urb_lock
);
488 static int es2_cport_allocate(struct gb_host_device
*hd
, int cport_id
,
491 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
492 struct ida
*id_map
= &hd
->cport_id_map
;
493 int ida_start
, ida_end
;
496 case ES2_CPORT_CDSI0
:
497 case ES2_CPORT_CDSI1
:
498 dev_err(&hd
->dev
, "cport %d not available\n", cport_id
);
502 if (flags
& GB_CONNECTION_FLAG_OFFLOADED
&&
503 flags
& GB_CONNECTION_FLAG_CDSI1
) {
504 if (es2
->cdsi1_in_use
) {
505 dev_err(&hd
->dev
, "CDSI1 already in use\n");
509 es2
->cdsi1_in_use
= true;
511 return ES2_CPORT_CDSI1
;
516 ida_end
= hd
->num_cports
;
517 } else if (cport_id
< hd
->num_cports
) {
518 ida_start
= cport_id
;
519 ida_end
= cport_id
+ 1;
521 dev_err(&hd
->dev
, "cport %d not available\n", cport_id
);
525 return ida_simple_get(id_map
, ida_start
, ida_end
, GFP_KERNEL
);
528 static void es2_cport_release(struct gb_host_device
*hd
, u16 cport_id
)
530 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
533 case ES2_CPORT_CDSI1
:
534 es2
->cdsi1_in_use
= false;
538 ida_simple_remove(&hd
->cport_id_map
, cport_id
);
541 static int cport_enable(struct gb_host_device
*hd
, u16 cport_id
,
544 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
545 struct usb_device
*udev
= es2
->usb_dev
;
546 struct gb_apb_request_cport_flags
*req
;
547 u32 connection_flags
;
550 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
554 connection_flags
= 0;
555 if (flags
& GB_CONNECTION_FLAG_CONTROL
)
556 connection_flags
|= GB_APB_CPORT_FLAG_CONTROL
;
557 if (flags
& GB_CONNECTION_FLAG_HIGH_PRIO
)
558 connection_flags
|= GB_APB_CPORT_FLAG_HIGH_PRIO
;
560 req
->flags
= cpu_to_le32(connection_flags
);
562 dev_dbg(&hd
->dev
, "%s - cport = %u, flags = %02x\n", __func__
,
563 cport_id
, connection_flags
);
565 ret
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
566 GB_APB_REQUEST_CPORT_FLAGS
,
567 USB_DIR_OUT
| USB_TYPE_VENDOR
|
568 USB_RECIP_INTERFACE
, cport_id
, 0,
569 req
, sizeof(*req
), ES2_USB_CTRL_TIMEOUT
);
570 if (ret
!= sizeof(*req
)) {
571 dev_err(&udev
->dev
, "failed to set cport flags for port %d\n",
586 static int es2_cport_connected(struct gb_host_device
*hd
, u16 cport_id
)
588 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
589 struct device
*dev
= &es2
->usb_dev
->dev
;
590 struct arpc_cport_connected_req req
;
593 req
.cport_id
= cpu_to_le16(cport_id
);
594 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_CONNECTED
, &req
, sizeof(req
),
595 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
597 dev_err(dev
, "failed to set connected state for cport %u: %d\n",
605 static int es2_cport_flush(struct gb_host_device
*hd
, u16 cport_id
)
607 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
608 struct device
*dev
= &es2
->usb_dev
->dev
;
609 struct arpc_cport_flush_req req
;
612 req
.cport_id
= cpu_to_le16(cport_id
);
613 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_FLUSH
, &req
, sizeof(req
),
614 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
616 dev_err(dev
, "failed to flush cport %u: %d\n", cport_id
, ret
);
623 static int es2_cport_shutdown(struct gb_host_device
*hd
, u16 cport_id
,
624 u8 phase
, unsigned int timeout
)
626 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
627 struct device
*dev
= &es2
->usb_dev
->dev
;
628 struct arpc_cport_shutdown_req req
;
632 if (timeout
> U16_MAX
)
635 req
.cport_id
= cpu_to_le16(cport_id
);
636 req
.timeout
= cpu_to_le16(timeout
);
638 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_SHUTDOWN
, &req
, sizeof(req
),
639 &result
, ES2_ARPC_CPORT_TIMEOUT
+ timeout
);
641 dev_err(dev
, "failed to send shutdown over cport %u: %d (%d)\n",
642 cport_id
, ret
, result
);
649 static int es2_cport_quiesce(struct gb_host_device
*hd
, u16 cport_id
,
650 size_t peer_space
, unsigned int timeout
)
652 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
653 struct device
*dev
= &es2
->usb_dev
->dev
;
654 struct arpc_cport_quiesce_req req
;
658 if (peer_space
> U16_MAX
)
661 if (timeout
> U16_MAX
)
664 req
.cport_id
= cpu_to_le16(cport_id
);
665 req
.peer_space
= cpu_to_le16(peer_space
);
666 req
.timeout
= cpu_to_le16(timeout
);
667 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_QUIESCE
, &req
, sizeof(req
),
668 &result
, ES2_ARPC_CPORT_TIMEOUT
+ timeout
);
670 dev_err(dev
, "failed to quiesce cport %u: %d (%d)\n",
671 cport_id
, ret
, result
);
678 static int es2_cport_clear(struct gb_host_device
*hd
, u16 cport_id
)
680 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
681 struct device
*dev
= &es2
->usb_dev
->dev
;
682 struct arpc_cport_clear_req req
;
685 req
.cport_id
= cpu_to_le16(cport_id
);
686 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_CLEAR
, &req
, sizeof(req
),
687 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
689 dev_err(dev
, "failed to clear cport %u: %d\n", cport_id
, ret
);
696 static int latency_tag_enable(struct gb_host_device
*hd
, u16 cport_id
)
699 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
700 struct usb_device
*udev
= es2
->usb_dev
;
702 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
703 GB_APB_REQUEST_LATENCY_TAG_EN
,
704 USB_DIR_OUT
| USB_TYPE_VENDOR
|
705 USB_RECIP_INTERFACE
, cport_id
, 0, NULL
,
706 0, ES2_USB_CTRL_TIMEOUT
);
709 dev_err(&udev
->dev
, "Cannot enable latency tag for cport %d\n",
714 static int latency_tag_disable(struct gb_host_device
*hd
, u16 cport_id
)
717 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
718 struct usb_device
*udev
= es2
->usb_dev
;
720 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
721 GB_APB_REQUEST_LATENCY_TAG_DIS
,
722 USB_DIR_OUT
| USB_TYPE_VENDOR
|
723 USB_RECIP_INTERFACE
, cport_id
, 0, NULL
,
724 0, ES2_USB_CTRL_TIMEOUT
);
727 dev_err(&udev
->dev
, "Cannot disable latency tag for cport %d\n",
732 static struct gb_hd_driver es2_driver
= {
733 .hd_priv_size
= sizeof(struct es2_ap_dev
),
734 .message_send
= message_send
,
735 .message_cancel
= message_cancel
,
736 .cport_allocate
= es2_cport_allocate
,
737 .cport_release
= es2_cport_release
,
738 .cport_enable
= cport_enable
,
739 .cport_connected
= es2_cport_connected
,
740 .cport_flush
= es2_cport_flush
,
741 .cport_shutdown
= es2_cport_shutdown
,
742 .cport_quiesce
= es2_cport_quiesce
,
743 .cport_clear
= es2_cport_clear
,
744 .latency_tag_enable
= latency_tag_enable
,
745 .latency_tag_disable
= latency_tag_disable
,
749 /* Common function to report consistent warnings based on URB status */
750 static int check_urb_status(struct urb
*urb
)
752 struct device
*dev
= &urb
->dev
->dev
;
753 int status
= urb
->status
;
760 dev_err(dev
, "%s: overflow actual length is %d\n",
761 __func__
, urb
->actual_length
);
768 /* device is gone, stop sending */
771 dev_err(dev
, "%s: unknown status %d\n", __func__
, status
);
776 static void es2_destroy(struct es2_ap_dev
*es2
)
778 struct usb_device
*udev
;
782 debugfs_remove(es2
->apb_log_enable_dentry
);
783 usb_log_disable(es2
);
785 /* Tear down everything! */
786 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
787 urb
= es2
->cport_out_urb
[i
];
790 es2
->cport_out_urb
[i
] = NULL
;
791 es2
->cport_out_urb_busy
[i
] = false; /* just to be anal */
794 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
795 usb_free_urb(es2
->arpc_urb
[i
]);
796 kfree(es2
->arpc_buffer
[i
]);
797 es2
->arpc_buffer
[i
] = NULL
;
800 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
801 usb_free_urb(es2
->cport_in
.urb
[i
]);
802 kfree(es2
->cport_in
.buffer
[i
]);
803 es2
->cport_in
.buffer
[i
] = NULL
;
806 /* release reserved CDSI0 and CDSI1 cports */
807 gb_hd_cport_release_reserved(es2
->hd
, ES2_CPORT_CDSI1
);
808 gb_hd_cport_release_reserved(es2
->hd
, ES2_CPORT_CDSI0
);
816 static void cport_in_callback(struct urb
*urb
)
818 struct gb_host_device
*hd
= urb
->context
;
819 struct device
*dev
= &urb
->dev
->dev
;
820 struct gb_operation_msg_hdr
*header
;
821 int status
= check_urb_status(urb
);
826 if ((status
== -EAGAIN
) || (status
== -EPROTO
))
829 /* The urb is being unlinked */
830 if (status
== -ENOENT
|| status
== -ESHUTDOWN
)
833 dev_err(dev
, "urb cport in error %d (dropped)\n", status
);
837 if (urb
->actual_length
< sizeof(*header
)) {
838 dev_err(dev
, "short message received\n");
842 /* Extract the CPort id, which is packed in the message header */
843 header
= urb
->transfer_buffer
;
844 cport_id
= gb_message_cport_unpack(header
);
846 if (cport_id_valid(hd
, cport_id
)) {
847 greybus_data_rcvd(hd
, cport_id
, urb
->transfer_buffer
,
850 dev_err(dev
, "invalid cport id %u received\n", cport_id
);
853 /* put our urb back in the request pool */
854 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
856 dev_err(dev
, "failed to resubmit in-urb: %d\n", retval
);
859 static void cport_out_callback(struct urb
*urb
)
861 struct gb_message
*message
= urb
->context
;
862 struct gb_host_device
*hd
= message
->operation
->connection
->hd
;
863 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
864 int status
= check_urb_status(urb
);
867 gb_message_cport_clear(message
->header
);
869 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
870 message
->hcpriv
= NULL
;
871 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
874 * Tell the submitter that the message send (attempt) is
875 * complete, and report the status.
877 greybus_message_sent(hd
, message
, status
);
882 static struct arpc
*arpc_alloc(void *payload
, u16 size
, u8 type
)
886 if (size
+ sizeof(*rpc
->req
) > ARPC_OUT_SIZE_MAX
)
889 rpc
= kzalloc(sizeof(*rpc
), GFP_KERNEL
);
893 INIT_LIST_HEAD(&rpc
->list
);
894 rpc
->req
= kzalloc(sizeof(*rpc
->req
) + size
, GFP_KERNEL
);
898 rpc
->resp
= kzalloc(sizeof(*rpc
->resp
), GFP_KERNEL
);
902 rpc
->req
->type
= type
;
903 rpc
->req
->size
= cpu_to_le16(sizeof(*rpc
->req
) + size
);
904 memcpy(rpc
->req
->data
, payload
, size
);
906 init_completion(&rpc
->response_received
);
918 static void arpc_free(struct arpc
*rpc
)
925 static struct arpc
*arpc_find(struct es2_ap_dev
*es2
, __le16 id
)
929 list_for_each_entry(rpc
, &es2
->arpcs
, list
) {
930 if (rpc
->req
->id
== id
)
937 static void arpc_add(struct es2_ap_dev
*es2
, struct arpc
*rpc
)
940 rpc
->req
->id
= cpu_to_le16(es2
->arpc_id_cycle
++);
941 list_add_tail(&rpc
->list
, &es2
->arpcs
);
944 static void arpc_del(struct es2_ap_dev
*es2
, struct arpc
*rpc
)
948 list_del(&rpc
->list
);
952 static int arpc_send(struct es2_ap_dev
*es2
, struct arpc
*rpc
, int timeout
)
954 struct usb_device
*udev
= es2
->usb_dev
;
957 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
958 GB_APB_REQUEST_ARPC_RUN
,
959 USB_DIR_OUT
| USB_TYPE_VENDOR
|
962 rpc
->req
, le16_to_cpu(rpc
->req
->size
),
963 ES2_USB_CTRL_TIMEOUT
);
964 if (retval
!= le16_to_cpu(rpc
->req
->size
)) {
966 "failed to send ARPC request %d: %d\n",
967 rpc
->req
->type
, retval
);
976 static int arpc_sync(struct es2_ap_dev
*es2
, u8 type
, void *payload
,
977 size_t size
, int *result
, unsigned int timeout
)
986 rpc
= arpc_alloc(payload
, size
, type
);
990 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
992 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
994 retval
= arpc_send(es2
, rpc
, timeout
);
998 retval
= wait_for_completion_interruptible_timeout(
999 &rpc
->response_received
,
1000 msecs_to_jiffies(timeout
));
1003 retval
= -ETIMEDOUT
;
1007 if (rpc
->resp
->result
) {
1008 retval
= -EREMOTEIO
;
1010 *result
= rpc
->resp
->result
;
1016 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
1018 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1021 if (retval
< 0 && retval
!= -EREMOTEIO
) {
1022 dev_err(&es2
->usb_dev
->dev
,
1023 "failed to execute ARPC: %d\n", retval
);
1029 static void arpc_in_callback(struct urb
*urb
)
1031 struct es2_ap_dev
*es2
= urb
->context
;
1032 struct device
*dev
= &urb
->dev
->dev
;
1033 int status
= check_urb_status(urb
);
1035 struct arpc_response_message
*resp
;
1036 unsigned long flags
;
1040 if ((status
== -EAGAIN
) || (status
== -EPROTO
))
1043 /* The urb is being unlinked */
1044 if (status
== -ENOENT
|| status
== -ESHUTDOWN
)
1047 dev_err(dev
, "arpc in-urb error %d (dropped)\n", status
);
1051 if (urb
->actual_length
< sizeof(*resp
)) {
1052 dev_err(dev
, "short aprc response received\n");
1056 resp
= urb
->transfer_buffer
;
1057 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
1058 rpc
= arpc_find(es2
, resp
->id
);
1060 dev_err(dev
, "invalid arpc response id received: %u\n",
1061 le16_to_cpu(resp
->id
));
1062 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1067 memcpy(rpc
->resp
, resp
, sizeof(*resp
));
1068 complete(&rpc
->response_received
);
1069 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1072 /* put our urb back in the request pool */
1073 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
1075 dev_err(dev
, "failed to resubmit arpc in-urb: %d\n", retval
);
1078 #define APB1_LOG_MSG_SIZE 64
1079 static void apb_log_get(struct es2_ap_dev
*es2
, char *buf
)
1084 retval
= usb_control_msg(es2
->usb_dev
,
1085 usb_rcvctrlpipe(es2
->usb_dev
, 0),
1087 USB_DIR_IN
| USB_TYPE_VENDOR
|
1088 USB_RECIP_INTERFACE
,
1092 ES2_USB_CTRL_TIMEOUT
);
1094 kfifo_in(&es2
->apb_log_fifo
, buf
, retval
);
1095 } while (retval
> 0);
1098 static int apb_log_poll(void *data
)
1100 struct es2_ap_dev
*es2
= data
;
1103 buf
= kmalloc(APB1_LOG_MSG_SIZE
, GFP_KERNEL
);
1107 while (!kthread_should_stop()) {
1109 apb_log_get(es2
, buf
);
1117 static ssize_t
apb_log_read(struct file
*f
, char __user
*buf
,
1118 size_t count
, loff_t
*ppos
)
1120 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1125 if (count
> APB1_LOG_SIZE
)
1126 count
= APB1_LOG_SIZE
;
1128 tmp_buf
= kmalloc(count
, GFP_KERNEL
);
1132 copied
= kfifo_out(&es2
->apb_log_fifo
, tmp_buf
, count
);
1133 ret
= simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
, copied
);
1140 static const struct file_operations apb_log_fops
= {
1141 .read
= apb_log_read
,
1144 static void usb_log_enable(struct es2_ap_dev
*es2
)
1146 if (!IS_ERR_OR_NULL(es2
->apb_log_task
))
1149 /* get log from APB1 */
1150 es2
->apb_log_task
= kthread_run(apb_log_poll
, es2
, "apb_log");
1151 if (IS_ERR(es2
->apb_log_task
))
1153 /* XXX We will need to rename this per APB */
1154 es2
->apb_log_dentry
= debugfs_create_file("apb_log", 0444,
1155 gb_debugfs_get(), es2
,
1159 static void usb_log_disable(struct es2_ap_dev
*es2
)
1161 if (IS_ERR_OR_NULL(es2
->apb_log_task
))
1164 debugfs_remove(es2
->apb_log_dentry
);
1165 es2
->apb_log_dentry
= NULL
;
1167 kthread_stop(es2
->apb_log_task
);
1168 es2
->apb_log_task
= NULL
;
1171 static ssize_t
apb_log_enable_read(struct file
*f
, char __user
*buf
,
1172 size_t count
, loff_t
*ppos
)
1174 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1175 int enable
= !IS_ERR_OR_NULL(es2
->apb_log_task
);
1178 sprintf(tmp_buf
, "%d\n", enable
);
1179 return simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
, 3);
1182 static ssize_t
apb_log_enable_write(struct file
*f
, const char __user
*buf
,
1183 size_t count
, loff_t
*ppos
)
1187 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1189 retval
= kstrtoint_from_user(buf
, count
, 10, &enable
);
1194 usb_log_enable(es2
);
1196 usb_log_disable(es2
);
1201 static const struct file_operations apb_log_enable_fops
= {
1202 .read
= apb_log_enable_read
,
1203 .write
= apb_log_enable_write
,
1206 static int apb_get_cport_count(struct usb_device
*udev
)
1209 __le16
*cport_count
;
1211 cport_count
= kzalloc(sizeof(*cport_count
), GFP_KERNEL
);
1215 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
1216 GB_APB_REQUEST_CPORT_COUNT
,
1217 USB_DIR_IN
| USB_TYPE_VENDOR
|
1218 USB_RECIP_INTERFACE
, 0, 0, cport_count
,
1219 sizeof(*cport_count
), ES2_USB_CTRL_TIMEOUT
);
1220 if (retval
!= sizeof(*cport_count
)) {
1221 dev_err(&udev
->dev
, "Cannot retrieve CPort count: %d\n",
1230 retval
= le16_to_cpu(*cport_count
);
1232 /* We need to fit a CPort ID in one byte of a message header */
1233 if (retval
> U8_MAX
) {
1235 dev_warn(&udev
->dev
, "Limiting number of CPorts to U8_MAX\n");
1244 * The ES2 USB Bridge device has 15 endpoints
1245 * 1 Control - usual USB stuff + AP -> APBridgeA messages
1246 * 7 Bulk IN - CPort data in
1247 * 7 Bulk OUT - CPort data out
1249 static int ap_probe(struct usb_interface
*interface
,
1250 const struct usb_device_id
*id
)
1252 struct es2_ap_dev
*es2
;
1253 struct gb_host_device
*hd
;
1254 struct usb_device
*udev
;
1255 struct usb_host_interface
*iface_desc
;
1256 struct usb_endpoint_descriptor
*endpoint
;
1261 bool bulk_out_found
= false;
1262 bool bulk_in_found
= false;
1263 bool arpc_in_found
= false;
1265 udev
= usb_get_dev(interface_to_usbdev(interface
));
1267 num_cports
= apb_get_cport_count(udev
);
1268 if (num_cports
< 0) {
1270 dev_err(&udev
->dev
, "Cannot retrieve CPort count: %d\n",
1275 hd
= gb_hd_create(&es2_driver
, &udev
->dev
, ES2_GBUF_MSG_SIZE_MAX
,
1282 es2
= hd_to_es2(hd
);
1284 es2
->usb_intf
= interface
;
1285 es2
->usb_dev
= udev
;
1286 spin_lock_init(&es2
->cport_out_urb_lock
);
1287 INIT_KFIFO(es2
->apb_log_fifo
);
1288 usb_set_intfdata(interface
, es2
);
1291 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1294 retval
= gb_hd_cport_reserve(hd
, ES2_CPORT_CDSI0
);
1297 retval
= gb_hd_cport_reserve(hd
, ES2_CPORT_CDSI1
);
1301 /* find all bulk endpoints */
1302 iface_desc
= interface
->cur_altsetting
;
1303 for (i
= 0; i
< iface_desc
->desc
.bNumEndpoints
; ++i
) {
1304 endpoint
= &iface_desc
->endpoint
[i
].desc
;
1305 ep_addr
= endpoint
->bEndpointAddress
;
1307 if (usb_endpoint_is_bulk_in(endpoint
)) {
1308 if (!bulk_in_found
) {
1309 es2
->cport_in
.endpoint
= ep_addr
;
1310 bulk_in_found
= true;
1311 } else if (!arpc_in_found
) {
1312 es2
->arpc_endpoint_in
= ep_addr
;
1313 arpc_in_found
= true;
1315 dev_warn(&udev
->dev
,
1316 "Unused bulk IN endpoint found: 0x%02x\n",
1321 if (usb_endpoint_is_bulk_out(endpoint
)) {
1322 if (!bulk_out_found
) {
1323 es2
->cport_out_endpoint
= ep_addr
;
1324 bulk_out_found
= true;
1326 dev_warn(&udev
->dev
,
1327 "Unused bulk OUT endpoint found: 0x%02x\n",
1332 dev_warn(&udev
->dev
,
1333 "Unknown endpoint type found, address 0x%02x\n",
1336 if (!bulk_in_found
|| !arpc_in_found
|| !bulk_out_found
) {
1337 dev_err(&udev
->dev
, "Not enough endpoints found in device, aborting!\n");
1342 /* Allocate buffers for our cport in messages */
1343 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
1347 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1352 es2
->cport_in
.urb
[i
] = urb
;
1354 buffer
= kmalloc(ES2_GBUF_MSG_SIZE_MAX
, GFP_KERNEL
);
1360 usb_fill_bulk_urb(urb
, udev
,
1361 usb_rcvbulkpipe(udev
, es2
->cport_in
.endpoint
),
1362 buffer
, ES2_GBUF_MSG_SIZE_MAX
,
1363 cport_in_callback
, hd
);
1365 es2
->cport_in
.buffer
[i
] = buffer
;
1368 /* Allocate buffers for ARPC in messages */
1369 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
1373 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1378 es2
->arpc_urb
[i
] = urb
;
1380 buffer
= kmalloc(ARPC_IN_SIZE_MAX
, GFP_KERNEL
);
1386 usb_fill_bulk_urb(urb
, udev
,
1387 usb_rcvbulkpipe(udev
,
1388 es2
->arpc_endpoint_in
),
1389 buffer
, ARPC_IN_SIZE_MAX
,
1390 arpc_in_callback
, es2
);
1392 es2
->arpc_buffer
[i
] = buffer
;
1395 /* Allocate urbs for our CPort OUT messages */
1396 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
1399 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1405 es2
->cport_out_urb
[i
] = urb
;
1406 es2
->cport_out_urb_busy
[i
] = false; /* just to be anal */
1409 /* XXX We will need to rename this per APB */
1410 es2
->apb_log_enable_dentry
= debugfs_create_file("apb_log_enable",
1412 gb_debugfs_get(), es2
,
1413 &apb_log_enable_fops
);
1415 INIT_LIST_HEAD(&es2
->arpcs
);
1416 spin_lock_init(&es2
->arpc_lock
);
1418 retval
= es2_arpc_in_enable(es2
);
1422 retval
= gb_hd_add(hd
);
1424 goto err_disable_arpc_in
;
1426 retval
= es2_cport_in_enable(es2
, &es2
->cport_in
);
1434 err_disable_arpc_in
:
1435 es2_arpc_in_disable(es2
);
1442 static void ap_disconnect(struct usb_interface
*interface
)
1444 struct es2_ap_dev
*es2
= usb_get_intfdata(interface
);
1448 es2_cport_in_disable(es2
, &es2
->cport_in
);
1449 es2_arpc_in_disable(es2
);
1454 static struct usb_driver es2_ap_driver
= {
1455 .name
= "es2_ap_driver",
1457 .disconnect
= ap_disconnect
,
1458 .id_table
= id_table
,
1462 module_usb_driver(es2_ap_driver
);
1464 MODULE_LICENSE("GPL v2");
1465 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");