1 // SPDX-License-Identifier: GPL-2.0
3 * Greybus "AP" USB driver for "ES2" controller chips
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
8 #include <linux/kthread.h>
9 #include <linux/sizes.h>
10 #include <linux/usb.h>
11 #include <linux/kfifo.h>
12 #include <linux/debugfs.h>
13 #include <linux/list.h>
14 #include <linux/greybus.h>
15 #include <linux/unaligned.h>
18 #include "greybus_trace.h"
21 /* Default timeout for USB vendor requests. */
22 #define ES2_USB_CTRL_TIMEOUT 500
24 /* Default timeout for ARPC CPort requests */
25 #define ES2_ARPC_CPORT_TIMEOUT 500
27 /* Fixed CPort numbers */
28 #define ES2_CPORT_CDSI0 16
29 #define ES2_CPORT_CDSI1 17
31 /* Memory sizes for the buffers sent to/from the ES2 controller */
32 #define ES2_GBUF_MSG_SIZE_MAX 2048
34 /* Memory sizes for the ARPC buffers */
35 #define ARPC_OUT_SIZE_MAX U16_MAX
36 #define ARPC_IN_SIZE_MAX 128
38 static const struct usb_device_id id_table
[] = {
39 { USB_DEVICE(0x18d1, 0x1eaf) },
42 MODULE_DEVICE_TABLE(usb
, id_table
);
44 #define APB1_LOG_SIZE SZ_16K
47 * Number of CPort IN urbs in flight at any point in time.
48 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
51 #define NUM_CPORT_IN_URB 4
53 /* Number of CPort OUT urbs in flight at any point in time.
54 * Adjust if we get messages saying we are out of urbs in the system log.
56 #define NUM_CPORT_OUT_URB 8
59 * Number of ARPC in urbs in flight at any point in time.
61 #define NUM_ARPC_IN_URB 2
64 * @endpoint: bulk in endpoint for CPort data
65 * @urb: array of urbs for the CPort in messages
66 * @buffer: array of buffers for the @cport_in_urb urbs
70 struct urb
*urb
[NUM_CPORT_IN_URB
];
71 u8
*buffer
[NUM_CPORT_IN_URB
];
75 * struct es2_ap_dev - ES2 USB Bridge to AP structure
76 * @usb_dev: pointer to the USB device we are.
77 * @usb_intf: pointer to the USB interface we are bound to.
78 * @hd: pointer to our gb_host_device structure
80 * @cport_in: endpoint, urbs and buffer for cport in messages
81 * @cport_out_endpoint: endpoint for cport out messages
82 * @cport_out_urb: array of urbs for the CPort out messages
83 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
85 * @cport_out_urb_cancelled: array of flags indicating whether the
86 * corresponding @cport_out_urb is being cancelled
87 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
88 * @cdsi1_in_use: true if cport CDSI1 is in use
89 * @apb_log_task: task pointer for logging thread
90 * @apb_log_dentry: file system entry for the log file interface
91 * @apb_log_enable_dentry: file system entry for enabling logging
92 * @apb_log_fifo: kernel FIFO to carry logged data
93 * @arpc_urb: array of urbs for the ARPC in messages
94 * @arpc_buffer: array of buffers for the @arpc_urb urbs
95 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
96 * @arpc_id_cycle: gives an unique id to ARPC
97 * @arpc_lock: locks ARPC list
98 * @arpcs: list of in progress ARPCs
101 struct usb_device
*usb_dev
;
102 struct usb_interface
*usb_intf
;
103 struct gb_host_device
*hd
;
105 struct es2_cport_in cport_in
;
106 __u8 cport_out_endpoint
;
107 struct urb
*cport_out_urb
[NUM_CPORT_OUT_URB
];
108 bool cport_out_urb_busy
[NUM_CPORT_OUT_URB
];
109 bool cport_out_urb_cancelled
[NUM_CPORT_OUT_URB
];
110 spinlock_t cport_out_urb_lock
;
114 struct task_struct
*apb_log_task
;
115 struct dentry
*apb_log_dentry
;
116 struct dentry
*apb_log_enable_dentry
;
117 DECLARE_KFIFO(apb_log_fifo
, char, APB1_LOG_SIZE
);
119 __u8 arpc_endpoint_in
;
120 struct urb
*arpc_urb
[NUM_ARPC_IN_URB
];
121 u8
*arpc_buffer
[NUM_ARPC_IN_URB
];
124 spinlock_t arpc_lock
;
125 struct list_head arpcs
;
129 struct list_head list
;
130 struct arpc_request_message
*req
;
131 struct arpc_response_message
*resp
;
132 struct completion response_received
;
136 static inline struct es2_ap_dev
*hd_to_es2(struct gb_host_device
*hd
)
138 return (struct es2_ap_dev
*)&hd
->hd_priv
;
141 static void cport_out_callback(struct urb
*urb
);
142 static void usb_log_enable(struct es2_ap_dev
*es2
);
143 static void usb_log_disable(struct es2_ap_dev
*es2
);
144 static int arpc_sync(struct es2_ap_dev
*es2
, u8 type
, void *payload
,
145 size_t size
, int *result
, unsigned int timeout
);
147 static int output_sync(struct es2_ap_dev
*es2
, void *req
, u16 size
, u8 cmd
)
149 struct usb_device
*udev
= es2
->usb_dev
;
153 data
= kmemdup(req
, size
, GFP_KERNEL
);
157 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
159 USB_DIR_OUT
| USB_TYPE_VENDOR
|
161 0, 0, data
, size
, ES2_USB_CTRL_TIMEOUT
);
163 dev_err(&udev
->dev
, "%s: return error %d\n", __func__
, retval
);
171 static void ap_urb_complete(struct urb
*urb
)
173 struct usb_ctrlrequest
*dr
= urb
->context
;
179 static int output_async(struct es2_ap_dev
*es2
, void *req
, u16 size
, u8 cmd
)
181 struct usb_device
*udev
= es2
->usb_dev
;
183 struct usb_ctrlrequest
*dr
;
187 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
191 dr
= kmalloc(sizeof(*dr
) + size
, GFP_ATOMIC
);
197 buf
= (u8
*)dr
+ sizeof(*dr
);
198 memcpy(buf
, req
, size
);
201 dr
->bRequestType
= USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_INTERFACE
;
204 dr
->wLength
= cpu_to_le16(size
);
206 usb_fill_control_urb(urb
, udev
, usb_sndctrlpipe(udev
, 0),
207 (unsigned char *)dr
, buf
, size
,
208 ap_urb_complete
, dr
);
209 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
217 static int output(struct gb_host_device
*hd
, void *req
, u16 size
, u8 cmd
,
220 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
223 return output_async(es2
, req
, size
, cmd
);
225 return output_sync(es2
, req
, size
, cmd
);
228 static int es2_cport_in_enable(struct es2_ap_dev
*es2
,
229 struct es2_cport_in
*cport_in
)
235 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
236 urb
= cport_in
->urb
[i
];
238 ret
= usb_submit_urb(urb
, GFP_KERNEL
);
240 dev_err(&es2
->usb_dev
->dev
,
241 "failed to submit in-urb: %d\n", ret
);
249 for (--i
; i
>= 0; --i
) {
250 urb
= cport_in
->urb
[i
];
257 static void es2_cport_in_disable(struct es2_ap_dev
*es2
,
258 struct es2_cport_in
*cport_in
)
263 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
264 urb
= cport_in
->urb
[i
];
269 static int es2_arpc_in_enable(struct es2_ap_dev
*es2
)
275 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
276 urb
= es2
->arpc_urb
[i
];
278 ret
= usb_submit_urb(urb
, GFP_KERNEL
);
280 dev_err(&es2
->usb_dev
->dev
,
281 "failed to submit arpc in-urb: %d\n", ret
);
289 for (--i
; i
>= 0; --i
) {
290 urb
= es2
->arpc_urb
[i
];
297 static void es2_arpc_in_disable(struct es2_ap_dev
*es2
)
302 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
303 urb
= es2
->arpc_urb
[i
];
308 static struct urb
*next_free_urb(struct es2_ap_dev
*es2
, gfp_t gfp_mask
)
310 struct urb
*urb
= NULL
;
314 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
316 /* Look in our pool of allocated urbs first, as that's the "fastest" */
317 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
318 if (!es2
->cport_out_urb_busy
[i
] &&
319 !es2
->cport_out_urb_cancelled
[i
]) {
320 es2
->cport_out_urb_busy
[i
] = true;
321 urb
= es2
->cport_out_urb
[i
];
325 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
330 * Crap, pool is empty, complain to the syslog and go allocate one
331 * dynamically as we have to succeed.
333 dev_dbg(&es2
->usb_dev
->dev
,
334 "No free CPort OUT urbs, having to dynamically allocate one!\n");
335 return usb_alloc_urb(0, gfp_mask
);
338 static void free_urb(struct es2_ap_dev
*es2
, struct urb
*urb
)
343 * See if this was an urb in our pool, if so mark it "free", otherwise
344 * we need to free it ourselves.
346 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
347 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
348 if (urb
== es2
->cport_out_urb
[i
]) {
349 es2
->cport_out_urb_busy
[i
] = false;
354 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
356 /* If urb is not NULL, then we need to free this urb */
361 * We (ab)use the operation-message header pad bytes to transfer the
362 * cport id in order to minimise overhead.
365 gb_message_cport_pack(struct gb_operation_msg_hdr
*header
, u16 cport_id
)
367 header
->pad
[0] = cport_id
;
370 /* Clear the pad bytes used for the CPort id */
371 static void gb_message_cport_clear(struct gb_operation_msg_hdr
*header
)
376 /* Extract the CPort id packed into the header, and clear it */
377 static u16
gb_message_cport_unpack(struct gb_operation_msg_hdr
*header
)
379 u16 cport_id
= header
->pad
[0];
381 gb_message_cport_clear(header
);
387 * Returns zero if the message was successfully queued, or a negative errno
390 static int message_send(struct gb_host_device
*hd
, u16 cport_id
,
391 struct gb_message
*message
, gfp_t gfp_mask
)
393 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
394 struct usb_device
*udev
= es2
->usb_dev
;
401 * The data actually transferred will include an indication
402 * of where the data should be sent. Do one last check of
403 * the target CPort id before filling it in.
405 if (!cport_id_valid(hd
, cport_id
)) {
406 dev_err(&udev
->dev
, "invalid cport %u\n", cport_id
);
410 /* Find a free urb */
411 urb
= next_free_urb(es2
, gfp_mask
);
415 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
416 message
->hcpriv
= urb
;
417 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
419 /* Pack the cport id into the message header */
420 gb_message_cport_pack(message
->header
, cport_id
);
422 buffer_size
= sizeof(*message
->header
) + message
->payload_size
;
424 usb_fill_bulk_urb(urb
, udev
,
425 usb_sndbulkpipe(udev
,
426 es2
->cport_out_endpoint
),
427 message
->buffer
, buffer_size
,
428 cport_out_callback
, message
);
429 urb
->transfer_flags
|= URB_ZERO_PACKET
;
431 trace_gb_message_submit(message
);
433 retval
= usb_submit_urb(urb
, gfp_mask
);
435 dev_err(&udev
->dev
, "failed to submit out-urb: %d\n", retval
);
437 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
438 message
->hcpriv
= NULL
;
439 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
442 gb_message_cport_clear(message
->header
);
451 * Can not be called in atomic context.
453 static void message_cancel(struct gb_message
*message
)
455 struct gb_host_device
*hd
= message
->operation
->connection
->hd
;
456 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
462 spin_lock_irq(&es2
->cport_out_urb_lock
);
463 urb
= message
->hcpriv
;
465 /* Prevent dynamically allocated urb from being deallocated. */
468 /* Prevent pre-allocated urb from being reused. */
469 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
470 if (urb
== es2
->cport_out_urb
[i
]) {
471 es2
->cport_out_urb_cancelled
[i
] = true;
475 spin_unlock_irq(&es2
->cport_out_urb_lock
);
479 if (i
< NUM_CPORT_OUT_URB
) {
480 spin_lock_irq(&es2
->cport_out_urb_lock
);
481 es2
->cport_out_urb_cancelled
[i
] = false;
482 spin_unlock_irq(&es2
->cport_out_urb_lock
);
488 static int es2_cport_allocate(struct gb_host_device
*hd
, int cport_id
,
491 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
492 struct ida
*id_map
= &hd
->cport_id_map
;
493 int ida_start
, ida_end
;
496 case ES2_CPORT_CDSI0
:
497 case ES2_CPORT_CDSI1
:
498 dev_err(&hd
->dev
, "cport %d not available\n", cport_id
);
502 if (flags
& GB_CONNECTION_FLAG_OFFLOADED
&&
503 flags
& GB_CONNECTION_FLAG_CDSI1
) {
504 if (es2
->cdsi1_in_use
) {
505 dev_err(&hd
->dev
, "CDSI1 already in use\n");
509 es2
->cdsi1_in_use
= true;
511 return ES2_CPORT_CDSI1
;
516 ida_end
= hd
->num_cports
- 1;
517 } else if (cport_id
< hd
->num_cports
) {
518 ida_start
= cport_id
;
521 dev_err(&hd
->dev
, "cport %d not available\n", cport_id
);
525 return ida_alloc_range(id_map
, ida_start
, ida_end
, GFP_KERNEL
);
528 static void es2_cport_release(struct gb_host_device
*hd
, u16 cport_id
)
530 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
533 case ES2_CPORT_CDSI1
:
534 es2
->cdsi1_in_use
= false;
538 ida_free(&hd
->cport_id_map
, cport_id
);
541 static int cport_enable(struct gb_host_device
*hd
, u16 cport_id
,
544 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
545 struct usb_device
*udev
= es2
->usb_dev
;
546 struct gb_apb_request_cport_flags
*req
;
547 u32 connection_flags
;
550 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
554 connection_flags
= 0;
555 if (flags
& GB_CONNECTION_FLAG_CONTROL
)
556 connection_flags
|= GB_APB_CPORT_FLAG_CONTROL
;
557 if (flags
& GB_CONNECTION_FLAG_HIGH_PRIO
)
558 connection_flags
|= GB_APB_CPORT_FLAG_HIGH_PRIO
;
560 req
->flags
= cpu_to_le32(connection_flags
);
562 dev_dbg(&hd
->dev
, "%s - cport = %u, flags = %02x\n", __func__
,
563 cport_id
, connection_flags
);
565 ret
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
566 GB_APB_REQUEST_CPORT_FLAGS
,
567 USB_DIR_OUT
| USB_TYPE_VENDOR
|
568 USB_RECIP_INTERFACE
, cport_id
, 0,
569 req
, sizeof(*req
), ES2_USB_CTRL_TIMEOUT
);
571 dev_err(&udev
->dev
, "failed to set cport flags for port %d\n",
583 static int es2_cport_connected(struct gb_host_device
*hd
, u16 cport_id
)
585 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
586 struct device
*dev
= &es2
->usb_dev
->dev
;
587 struct arpc_cport_connected_req req
;
590 req
.cport_id
= cpu_to_le16(cport_id
);
591 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_CONNECTED
, &req
, sizeof(req
),
592 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
594 dev_err(dev
, "failed to set connected state for cport %u: %d\n",
602 static int es2_cport_flush(struct gb_host_device
*hd
, u16 cport_id
)
604 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
605 struct device
*dev
= &es2
->usb_dev
->dev
;
606 struct arpc_cport_flush_req req
;
609 req
.cport_id
= cpu_to_le16(cport_id
);
610 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_FLUSH
, &req
, sizeof(req
),
611 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
613 dev_err(dev
, "failed to flush cport %u: %d\n", cport_id
, ret
);
620 static int es2_cport_shutdown(struct gb_host_device
*hd
, u16 cport_id
,
621 u8 phase
, unsigned int timeout
)
623 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
624 struct device
*dev
= &es2
->usb_dev
->dev
;
625 struct arpc_cport_shutdown_req req
;
629 if (timeout
> U16_MAX
)
632 req
.cport_id
= cpu_to_le16(cport_id
);
633 req
.timeout
= cpu_to_le16(timeout
);
635 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_SHUTDOWN
, &req
, sizeof(req
),
636 &result
, ES2_ARPC_CPORT_TIMEOUT
+ timeout
);
638 dev_err(dev
, "failed to send shutdown over cport %u: %d (%d)\n",
639 cport_id
, ret
, result
);
646 static int es2_cport_quiesce(struct gb_host_device
*hd
, u16 cport_id
,
647 size_t peer_space
, unsigned int timeout
)
649 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
650 struct device
*dev
= &es2
->usb_dev
->dev
;
651 struct arpc_cport_quiesce_req req
;
655 if (peer_space
> U16_MAX
)
658 if (timeout
> U16_MAX
)
661 req
.cport_id
= cpu_to_le16(cport_id
);
662 req
.peer_space
= cpu_to_le16(peer_space
);
663 req
.timeout
= cpu_to_le16(timeout
);
664 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_QUIESCE
, &req
, sizeof(req
),
665 &result
, ES2_ARPC_CPORT_TIMEOUT
+ timeout
);
667 dev_err(dev
, "failed to quiesce cport %u: %d (%d)\n",
668 cport_id
, ret
, result
);
675 static int es2_cport_clear(struct gb_host_device
*hd
, u16 cport_id
)
677 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
678 struct device
*dev
= &es2
->usb_dev
->dev
;
679 struct arpc_cport_clear_req req
;
682 req
.cport_id
= cpu_to_le16(cport_id
);
683 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_CLEAR
, &req
, sizeof(req
),
684 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
686 dev_err(dev
, "failed to clear cport %u: %d\n", cport_id
, ret
);
693 static int latency_tag_enable(struct gb_host_device
*hd
, u16 cport_id
)
696 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
697 struct usb_device
*udev
= es2
->usb_dev
;
699 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
700 GB_APB_REQUEST_LATENCY_TAG_EN
,
701 USB_DIR_OUT
| USB_TYPE_VENDOR
|
702 USB_RECIP_INTERFACE
, cport_id
, 0, NULL
,
703 0, ES2_USB_CTRL_TIMEOUT
);
706 dev_err(&udev
->dev
, "Cannot enable latency tag for cport %d\n",
711 static int latency_tag_disable(struct gb_host_device
*hd
, u16 cport_id
)
714 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
715 struct usb_device
*udev
= es2
->usb_dev
;
717 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
718 GB_APB_REQUEST_LATENCY_TAG_DIS
,
719 USB_DIR_OUT
| USB_TYPE_VENDOR
|
720 USB_RECIP_INTERFACE
, cport_id
, 0, NULL
,
721 0, ES2_USB_CTRL_TIMEOUT
);
724 dev_err(&udev
->dev
, "Cannot disable latency tag for cport %d\n",
729 static struct gb_hd_driver es2_driver
= {
730 .hd_priv_size
= sizeof(struct es2_ap_dev
),
731 .message_send
= message_send
,
732 .message_cancel
= message_cancel
,
733 .cport_allocate
= es2_cport_allocate
,
734 .cport_release
= es2_cport_release
,
735 .cport_enable
= cport_enable
,
736 .cport_connected
= es2_cport_connected
,
737 .cport_flush
= es2_cport_flush
,
738 .cport_shutdown
= es2_cport_shutdown
,
739 .cport_quiesce
= es2_cport_quiesce
,
740 .cport_clear
= es2_cport_clear
,
741 .latency_tag_enable
= latency_tag_enable
,
742 .latency_tag_disable
= latency_tag_disable
,
746 /* Common function to report consistent warnings based on URB status */
747 static int check_urb_status(struct urb
*urb
)
749 struct device
*dev
= &urb
->dev
->dev
;
750 int status
= urb
->status
;
757 dev_err(dev
, "%s: overflow actual length is %d\n",
758 __func__
, urb
->actual_length
);
765 /* device is gone, stop sending */
768 dev_err(dev
, "%s: unknown status %d\n", __func__
, status
);
773 static void es2_destroy(struct es2_ap_dev
*es2
)
775 struct usb_device
*udev
;
779 debugfs_remove(es2
->apb_log_enable_dentry
);
780 usb_log_disable(es2
);
782 /* Tear down everything! */
783 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
784 urb
= es2
->cport_out_urb
[i
];
787 es2
->cport_out_urb
[i
] = NULL
;
788 es2
->cport_out_urb_busy
[i
] = false; /* just to be anal */
791 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
792 usb_free_urb(es2
->arpc_urb
[i
]);
793 kfree(es2
->arpc_buffer
[i
]);
794 es2
->arpc_buffer
[i
] = NULL
;
797 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
798 usb_free_urb(es2
->cport_in
.urb
[i
]);
799 kfree(es2
->cport_in
.buffer
[i
]);
800 es2
->cport_in
.buffer
[i
] = NULL
;
803 /* release reserved CDSI0 and CDSI1 cports */
804 gb_hd_cport_release_reserved(es2
->hd
, ES2_CPORT_CDSI1
);
805 gb_hd_cport_release_reserved(es2
->hd
, ES2_CPORT_CDSI0
);
813 static void cport_in_callback(struct urb
*urb
)
815 struct gb_host_device
*hd
= urb
->context
;
816 struct device
*dev
= &urb
->dev
->dev
;
817 struct gb_operation_msg_hdr
*header
;
818 int status
= check_urb_status(urb
);
823 if ((status
== -EAGAIN
) || (status
== -EPROTO
))
826 /* The urb is being unlinked */
827 if (status
== -ENOENT
|| status
== -ESHUTDOWN
)
830 dev_err(dev
, "urb cport in error %d (dropped)\n", status
);
834 if (urb
->actual_length
< sizeof(*header
)) {
835 dev_err(dev
, "short message received\n");
839 /* Extract the CPort id, which is packed in the message header */
840 header
= urb
->transfer_buffer
;
841 cport_id
= gb_message_cport_unpack(header
);
843 if (cport_id_valid(hd
, cport_id
)) {
844 greybus_data_rcvd(hd
, cport_id
, urb
->transfer_buffer
,
847 dev_err(dev
, "invalid cport id %u received\n", cport_id
);
850 /* put our urb back in the request pool */
851 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
853 dev_err(dev
, "failed to resubmit in-urb: %d\n", retval
);
856 static void cport_out_callback(struct urb
*urb
)
858 struct gb_message
*message
= urb
->context
;
859 struct gb_host_device
*hd
= message
->operation
->connection
->hd
;
860 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
861 int status
= check_urb_status(urb
);
864 gb_message_cport_clear(message
->header
);
866 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
867 message
->hcpriv
= NULL
;
868 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
871 * Tell the submitter that the message send (attempt) is
872 * complete, and report the status.
874 greybus_message_sent(hd
, message
, status
);
879 static struct arpc
*arpc_alloc(void *payload
, u16 size
, u8 type
)
883 if (size
+ sizeof(*rpc
->req
) > ARPC_OUT_SIZE_MAX
)
886 rpc
= kzalloc(sizeof(*rpc
), GFP_KERNEL
);
890 INIT_LIST_HEAD(&rpc
->list
);
891 rpc
->req
= kzalloc(sizeof(*rpc
->req
) + size
, GFP_KERNEL
);
895 rpc
->resp
= kzalloc(sizeof(*rpc
->resp
), GFP_KERNEL
);
899 rpc
->req
->type
= type
;
900 rpc
->req
->size
= cpu_to_le16(sizeof(*rpc
->req
) + size
);
901 memcpy(rpc
->req
->data
, payload
, size
);
903 init_completion(&rpc
->response_received
);
915 static void arpc_free(struct arpc
*rpc
)
922 static struct arpc
*arpc_find(struct es2_ap_dev
*es2
, __le16 id
)
926 list_for_each_entry(rpc
, &es2
->arpcs
, list
) {
927 if (rpc
->req
->id
== id
)
934 static void arpc_add(struct es2_ap_dev
*es2
, struct arpc
*rpc
)
937 rpc
->req
->id
= cpu_to_le16(es2
->arpc_id_cycle
++);
938 list_add_tail(&rpc
->list
, &es2
->arpcs
);
941 static void arpc_del(struct es2_ap_dev
*es2
, struct arpc
*rpc
)
945 list_del(&rpc
->list
);
949 static int arpc_send(struct es2_ap_dev
*es2
, struct arpc
*rpc
, int timeout
)
951 struct usb_device
*udev
= es2
->usb_dev
;
954 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
955 GB_APB_REQUEST_ARPC_RUN
,
956 USB_DIR_OUT
| USB_TYPE_VENDOR
|
959 rpc
->req
, le16_to_cpu(rpc
->req
->size
),
960 ES2_USB_CTRL_TIMEOUT
);
963 "failed to send ARPC request %d: %d\n",
964 rpc
->req
->type
, retval
);
971 static int arpc_sync(struct es2_ap_dev
*es2
, u8 type
, void *payload
,
972 size_t size
, int *result
, unsigned int timeout
)
981 rpc
= arpc_alloc(payload
, size
, type
);
985 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
987 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
989 retval
= arpc_send(es2
, rpc
, timeout
);
993 retval
= wait_for_completion_interruptible_timeout(
994 &rpc
->response_received
,
995 msecs_to_jiffies(timeout
));
1002 if (rpc
->resp
->result
) {
1003 retval
= -EREMOTEIO
;
1005 *result
= rpc
->resp
->result
;
1011 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
1013 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1016 if (retval
< 0 && retval
!= -EREMOTEIO
) {
1017 dev_err(&es2
->usb_dev
->dev
,
1018 "failed to execute ARPC: %d\n", retval
);
1024 static void arpc_in_callback(struct urb
*urb
)
1026 struct es2_ap_dev
*es2
= urb
->context
;
1027 struct device
*dev
= &urb
->dev
->dev
;
1028 int status
= check_urb_status(urb
);
1030 struct arpc_response_message
*resp
;
1031 unsigned long flags
;
1035 if ((status
== -EAGAIN
) || (status
== -EPROTO
))
1038 /* The urb is being unlinked */
1039 if (status
== -ENOENT
|| status
== -ESHUTDOWN
)
1042 dev_err(dev
, "arpc in-urb error %d (dropped)\n", status
);
1046 if (urb
->actual_length
< sizeof(*resp
)) {
1047 dev_err(dev
, "short aprc response received\n");
1051 resp
= urb
->transfer_buffer
;
1052 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
1053 rpc
= arpc_find(es2
, resp
->id
);
1055 dev_err(dev
, "invalid arpc response id received: %u\n",
1056 le16_to_cpu(resp
->id
));
1057 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1062 memcpy(rpc
->resp
, resp
, sizeof(*resp
));
1063 complete(&rpc
->response_received
);
1064 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1067 /* put our urb back in the request pool */
1068 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
1070 dev_err(dev
, "failed to resubmit arpc in-urb: %d\n", retval
);
1073 #define APB1_LOG_MSG_SIZE 64
1074 static void apb_log_get(struct es2_ap_dev
*es2
, char *buf
)
1079 retval
= usb_control_msg(es2
->usb_dev
,
1080 usb_rcvctrlpipe(es2
->usb_dev
, 0),
1082 USB_DIR_IN
| USB_TYPE_VENDOR
|
1083 USB_RECIP_INTERFACE
,
1087 ES2_USB_CTRL_TIMEOUT
);
1089 kfifo_in(&es2
->apb_log_fifo
, buf
, retval
);
1090 } while (retval
> 0);
1093 static int apb_log_poll(void *data
)
1095 struct es2_ap_dev
*es2
= data
;
1098 buf
= kmalloc(APB1_LOG_MSG_SIZE
, GFP_KERNEL
);
1102 while (!kthread_should_stop()) {
1104 apb_log_get(es2
, buf
);
1112 static ssize_t
apb_log_read(struct file
*f
, char __user
*buf
,
1113 size_t count
, loff_t
*ppos
)
1115 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1120 if (count
> APB1_LOG_SIZE
)
1121 count
= APB1_LOG_SIZE
;
1123 tmp_buf
= kmalloc(count
, GFP_KERNEL
);
1127 copied
= kfifo_out(&es2
->apb_log_fifo
, tmp_buf
, count
);
1128 ret
= simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
, copied
);
1135 static const struct file_operations apb_log_fops
= {
1136 .read
= apb_log_read
,
1139 static void usb_log_enable(struct es2_ap_dev
*es2
)
1141 if (!IS_ERR_OR_NULL(es2
->apb_log_task
))
1144 /* get log from APB1 */
1145 es2
->apb_log_task
= kthread_run(apb_log_poll
, es2
, "apb_log");
1146 if (IS_ERR(es2
->apb_log_task
))
1148 /* XXX We will need to rename this per APB */
1149 es2
->apb_log_dentry
= debugfs_create_file("apb_log", 0444,
1150 gb_debugfs_get(), es2
,
1154 static void usb_log_disable(struct es2_ap_dev
*es2
)
1156 if (IS_ERR_OR_NULL(es2
->apb_log_task
))
1159 debugfs_remove(es2
->apb_log_dentry
);
1160 es2
->apb_log_dentry
= NULL
;
1162 kthread_stop(es2
->apb_log_task
);
1163 es2
->apb_log_task
= NULL
;
1166 static ssize_t
apb_log_enable_read(struct file
*f
, char __user
*buf
,
1167 size_t count
, loff_t
*ppos
)
1169 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1170 int enable
= !IS_ERR_OR_NULL(es2
->apb_log_task
);
1173 sprintf(tmp_buf
, "%d\n", enable
);
1174 return simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
, 2);
1177 static ssize_t
apb_log_enable_write(struct file
*f
, const char __user
*buf
,
1178 size_t count
, loff_t
*ppos
)
1182 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1184 retval
= kstrtoint_from_user(buf
, count
, 10, &enable
);
1189 usb_log_enable(es2
);
1191 usb_log_disable(es2
);
1196 static const struct file_operations apb_log_enable_fops
= {
1197 .read
= apb_log_enable_read
,
1198 .write
= apb_log_enable_write
,
1201 static int apb_get_cport_count(struct usb_device
*udev
)
1204 __le16
*cport_count
;
1206 cport_count
= kzalloc(sizeof(*cport_count
), GFP_KERNEL
);
1210 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
1211 GB_APB_REQUEST_CPORT_COUNT
,
1212 USB_DIR_IN
| USB_TYPE_VENDOR
|
1213 USB_RECIP_INTERFACE
, 0, 0, cport_count
,
1214 sizeof(*cport_count
), ES2_USB_CTRL_TIMEOUT
);
1215 if (retval
!= sizeof(*cport_count
)) {
1216 dev_err(&udev
->dev
, "Cannot retrieve CPort count: %d\n",
1225 retval
= le16_to_cpu(*cport_count
);
1227 /* We need to fit a CPort ID in one byte of a message header */
1228 if (retval
> U8_MAX
) {
1230 dev_warn(&udev
->dev
, "Limiting number of CPorts to U8_MAX\n");
1239 * The ES2 USB Bridge device has 15 endpoints
1240 * 1 Control - usual USB stuff + AP -> APBridgeA messages
1241 * 7 Bulk IN - CPort data in
1242 * 7 Bulk OUT - CPort data out
1244 static int ap_probe(struct usb_interface
*interface
,
1245 const struct usb_device_id
*id
)
1247 struct es2_ap_dev
*es2
;
1248 struct gb_host_device
*hd
;
1249 struct usb_device
*udev
;
1250 struct usb_host_interface
*iface_desc
;
1251 struct usb_endpoint_descriptor
*endpoint
;
1256 bool bulk_out_found
= false;
1257 bool bulk_in_found
= false;
1258 bool arpc_in_found
= false;
1260 udev
= usb_get_dev(interface_to_usbdev(interface
));
1262 num_cports
= apb_get_cport_count(udev
);
1263 if (num_cports
< 0) {
1265 dev_err(&udev
->dev
, "Cannot retrieve CPort count: %d\n",
1270 hd
= gb_hd_create(&es2_driver
, &udev
->dev
, ES2_GBUF_MSG_SIZE_MAX
,
1277 es2
= hd_to_es2(hd
);
1279 es2
->usb_intf
= interface
;
1280 es2
->usb_dev
= udev
;
1281 spin_lock_init(&es2
->cport_out_urb_lock
);
1282 INIT_KFIFO(es2
->apb_log_fifo
);
1283 usb_set_intfdata(interface
, es2
);
1286 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1289 retval
= gb_hd_cport_reserve(hd
, ES2_CPORT_CDSI0
);
1292 retval
= gb_hd_cport_reserve(hd
, ES2_CPORT_CDSI1
);
1296 /* find all bulk endpoints */
1297 iface_desc
= interface
->cur_altsetting
;
1298 for (i
= 0; i
< iface_desc
->desc
.bNumEndpoints
; ++i
) {
1299 endpoint
= &iface_desc
->endpoint
[i
].desc
;
1300 ep_addr
= endpoint
->bEndpointAddress
;
1302 if (usb_endpoint_is_bulk_in(endpoint
)) {
1303 if (!bulk_in_found
) {
1304 es2
->cport_in
.endpoint
= ep_addr
;
1305 bulk_in_found
= true;
1306 } else if (!arpc_in_found
) {
1307 es2
->arpc_endpoint_in
= ep_addr
;
1308 arpc_in_found
= true;
1310 dev_warn(&udev
->dev
,
1311 "Unused bulk IN endpoint found: 0x%02x\n",
1316 if (usb_endpoint_is_bulk_out(endpoint
)) {
1317 if (!bulk_out_found
) {
1318 es2
->cport_out_endpoint
= ep_addr
;
1319 bulk_out_found
= true;
1321 dev_warn(&udev
->dev
,
1322 "Unused bulk OUT endpoint found: 0x%02x\n",
1327 dev_warn(&udev
->dev
,
1328 "Unknown endpoint type found, address 0x%02x\n",
1331 if (!bulk_in_found
|| !arpc_in_found
|| !bulk_out_found
) {
1332 dev_err(&udev
->dev
, "Not enough endpoints found in device, aborting!\n");
1337 /* Allocate buffers for our cport in messages */
1338 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
1342 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1347 es2
->cport_in
.urb
[i
] = urb
;
1349 buffer
= kmalloc(ES2_GBUF_MSG_SIZE_MAX
, GFP_KERNEL
);
1355 usb_fill_bulk_urb(urb
, udev
,
1356 usb_rcvbulkpipe(udev
, es2
->cport_in
.endpoint
),
1357 buffer
, ES2_GBUF_MSG_SIZE_MAX
,
1358 cport_in_callback
, hd
);
1360 es2
->cport_in
.buffer
[i
] = buffer
;
1363 /* Allocate buffers for ARPC in messages */
1364 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
1368 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1373 es2
->arpc_urb
[i
] = urb
;
1375 buffer
= kmalloc(ARPC_IN_SIZE_MAX
, GFP_KERNEL
);
1381 usb_fill_bulk_urb(urb
, udev
,
1382 usb_rcvbulkpipe(udev
,
1383 es2
->arpc_endpoint_in
),
1384 buffer
, ARPC_IN_SIZE_MAX
,
1385 arpc_in_callback
, es2
);
1387 es2
->arpc_buffer
[i
] = buffer
;
1390 /* Allocate urbs for our CPort OUT messages */
1391 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
1394 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1400 es2
->cport_out_urb
[i
] = urb
;
1401 es2
->cport_out_urb_busy
[i
] = false; /* just to be anal */
1404 /* XXX We will need to rename this per APB */
1405 es2
->apb_log_enable_dentry
= debugfs_create_file("apb_log_enable",
1407 gb_debugfs_get(), es2
,
1408 &apb_log_enable_fops
);
1410 INIT_LIST_HEAD(&es2
->arpcs
);
1411 spin_lock_init(&es2
->arpc_lock
);
1413 retval
= es2_arpc_in_enable(es2
);
1417 retval
= gb_hd_add(hd
);
1419 goto err_disable_arpc_in
;
1421 retval
= es2_cport_in_enable(es2
, &es2
->cport_in
);
1429 err_disable_arpc_in
:
1430 es2_arpc_in_disable(es2
);
1437 static void ap_disconnect(struct usb_interface
*interface
)
1439 struct es2_ap_dev
*es2
= usb_get_intfdata(interface
);
1443 es2_cport_in_disable(es2
, &es2
->cport_in
);
1444 es2_arpc_in_disable(es2
);
1449 static struct usb_driver es2_ap_driver
= {
1450 .name
= "es2_ap_driver",
1452 .disconnect
= ap_disconnect
,
1453 .id_table
= id_table
,
1457 module_usb_driver(es2_ap_driver
);
1459 MODULE_DESCRIPTION("Greybus AP USB driver for ES2 controller chips");
1460 MODULE_LICENSE("GPL v2");
1461 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");