1 // SPDX-License-Identifier: GPL-2.0
3 * Greybus "AP" USB driver for "ES2" controller chips
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
8 #include <linux/kthread.h>
9 #include <linux/sizes.h>
10 #include <linux/usb.h>
11 #include <linux/kfifo.h>
12 #include <linux/debugfs.h>
13 #include <linux/list.h>
14 #include <asm/unaligned.h>
18 #include "greybus_trace.h"
19 #include "connection.h"
22 /* Default timeout for USB vendor requests. */
23 #define ES2_USB_CTRL_TIMEOUT 500
25 /* Default timeout for ARPC CPort requests */
26 #define ES2_ARPC_CPORT_TIMEOUT 500
28 /* Fixed CPort numbers */
29 #define ES2_CPORT_CDSI0 16
30 #define ES2_CPORT_CDSI1 17
32 /* Memory sizes for the buffers sent to/from the ES2 controller */
33 #define ES2_GBUF_MSG_SIZE_MAX 2048
35 /* Memory sizes for the ARPC buffers */
36 #define ARPC_OUT_SIZE_MAX U16_MAX
37 #define ARPC_IN_SIZE_MAX 128
39 static const struct usb_device_id id_table
[] = {
40 { USB_DEVICE(0x18d1, 0x1eaf) },
43 MODULE_DEVICE_TABLE(usb
, id_table
);
45 #define APB1_LOG_SIZE SZ_16K
48 * Number of CPort IN urbs in flight at any point in time.
49 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
52 #define NUM_CPORT_IN_URB 4
54 /* Number of CPort OUT urbs in flight at any point in time.
55 * Adjust if we get messages saying we are out of urbs in the system log.
57 #define NUM_CPORT_OUT_URB 8
60 * Number of ARPC in urbs in flight at any point in time.
62 #define NUM_ARPC_IN_URB 2
65 * @endpoint: bulk in endpoint for CPort data
66 * @urb: array of urbs for the CPort in messages
67 * @buffer: array of buffers for the @cport_in_urb urbs
71 struct urb
*urb
[NUM_CPORT_IN_URB
];
72 u8
*buffer
[NUM_CPORT_IN_URB
];
76 * es2_ap_dev - ES2 USB Bridge to AP structure
77 * @usb_dev: pointer to the USB device we are.
78 * @usb_intf: pointer to the USB interface we are bound to.
79 * @hd: pointer to our gb_host_device structure
81 * @cport_in: endpoint, urbs and buffer for cport in messages
82 * @cport_out_endpoint: endpoint for for cport out messages
83 * @cport_out_urb: array of urbs for the CPort out messages
84 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
86 * @cport_out_urb_cancelled: array of flags indicating whether the
87 * corresponding @cport_out_urb is being cancelled
88 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
90 * @apb_log_task: task pointer for logging thread
91 * @apb_log_dentry: file system entry for the log file interface
92 * @apb_log_enable_dentry: file system entry for enabling logging
93 * @apb_log_fifo: kernel FIFO to carry logged data
94 * @arpc_urb: array of urbs for the ARPC in messages
95 * @arpc_buffer: array of buffers for the @arpc_urb urbs
96 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
97 * @arpc_id_cycle: gives an unique id to ARPC
98 * @arpc_lock: locks ARPC list
99 * @arpcs: list of in progress ARPCs
102 struct usb_device
*usb_dev
;
103 struct usb_interface
*usb_intf
;
104 struct gb_host_device
*hd
;
106 struct es2_cport_in cport_in
;
107 __u8 cport_out_endpoint
;
108 struct urb
*cport_out_urb
[NUM_CPORT_OUT_URB
];
109 bool cport_out_urb_busy
[NUM_CPORT_OUT_URB
];
110 bool cport_out_urb_cancelled
[NUM_CPORT_OUT_URB
];
111 spinlock_t cport_out_urb_lock
;
115 struct task_struct
*apb_log_task
;
116 struct dentry
*apb_log_dentry
;
117 struct dentry
*apb_log_enable_dentry
;
118 DECLARE_KFIFO(apb_log_fifo
, char, APB1_LOG_SIZE
);
120 __u8 arpc_endpoint_in
;
121 struct urb
*arpc_urb
[NUM_ARPC_IN_URB
];
122 u8
*arpc_buffer
[NUM_ARPC_IN_URB
];
125 spinlock_t arpc_lock
;
126 struct list_head arpcs
;
130 struct list_head list
;
131 struct arpc_request_message
*req
;
132 struct arpc_response_message
*resp
;
133 struct completion response_received
;
137 static inline struct es2_ap_dev
*hd_to_es2(struct gb_host_device
*hd
)
139 return (struct es2_ap_dev
*)&hd
->hd_priv
;
142 static void cport_out_callback(struct urb
*urb
);
143 static void usb_log_enable(struct es2_ap_dev
*es2
);
144 static void usb_log_disable(struct es2_ap_dev
*es2
);
145 static int arpc_sync(struct es2_ap_dev
*es2
, u8 type
, void *payload
,
146 size_t size
, int *result
, unsigned int timeout
);
148 static int output_sync(struct es2_ap_dev
*es2
, void *req
, u16 size
, u8 cmd
)
150 struct usb_device
*udev
= es2
->usb_dev
;
154 data
= kmemdup(req
, size
, GFP_KERNEL
);
158 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
160 USB_DIR_OUT
| USB_TYPE_VENDOR
|
162 0, 0, data
, size
, ES2_USB_CTRL_TIMEOUT
);
164 dev_err(&udev
->dev
, "%s: return error %d\n", __func__
, retval
);
172 static void ap_urb_complete(struct urb
*urb
)
174 struct usb_ctrlrequest
*dr
= urb
->context
;
180 static int output_async(struct es2_ap_dev
*es2
, void *req
, u16 size
, u8 cmd
)
182 struct usb_device
*udev
= es2
->usb_dev
;
184 struct usb_ctrlrequest
*dr
;
188 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
192 dr
= kmalloc(sizeof(*dr
) + size
, GFP_ATOMIC
);
198 buf
= (u8
*)dr
+ sizeof(*dr
);
199 memcpy(buf
, req
, size
);
202 dr
->bRequestType
= USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_INTERFACE
;
205 dr
->wLength
= cpu_to_le16(size
);
207 usb_fill_control_urb(urb
, udev
, usb_sndctrlpipe(udev
, 0),
208 (unsigned char *)dr
, buf
, size
,
209 ap_urb_complete
, dr
);
210 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
218 static int output(struct gb_host_device
*hd
, void *req
, u16 size
, u8 cmd
,
221 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
224 return output_async(es2
, req
, size
, cmd
);
226 return output_sync(es2
, req
, size
, cmd
);
229 static int es2_cport_in_enable(struct es2_ap_dev
*es2
,
230 struct es2_cport_in
*cport_in
)
236 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
237 urb
= cport_in
->urb
[i
];
239 ret
= usb_submit_urb(urb
, GFP_KERNEL
);
241 dev_err(&es2
->usb_dev
->dev
,
242 "failed to submit in-urb: %d\n", ret
);
250 for (--i
; i
>= 0; --i
) {
251 urb
= cport_in
->urb
[i
];
258 static void es2_cport_in_disable(struct es2_ap_dev
*es2
,
259 struct es2_cport_in
*cport_in
)
264 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
265 urb
= cport_in
->urb
[i
];
270 static int es2_arpc_in_enable(struct es2_ap_dev
*es2
)
276 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
277 urb
= es2
->arpc_urb
[i
];
279 ret
= usb_submit_urb(urb
, GFP_KERNEL
);
281 dev_err(&es2
->usb_dev
->dev
,
282 "failed to submit arpc in-urb: %d\n", ret
);
290 for (--i
; i
>= 0; --i
) {
291 urb
= es2
->arpc_urb
[i
];
298 static void es2_arpc_in_disable(struct es2_ap_dev
*es2
)
303 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
304 urb
= es2
->arpc_urb
[i
];
309 static struct urb
*next_free_urb(struct es2_ap_dev
*es2
, gfp_t gfp_mask
)
311 struct urb
*urb
= NULL
;
315 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
317 /* Look in our pool of allocated urbs first, as that's the "fastest" */
318 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
319 if (es2
->cport_out_urb_busy
[i
] == false &&
320 es2
->cport_out_urb_cancelled
[i
] == false) {
321 es2
->cport_out_urb_busy
[i
] = true;
322 urb
= es2
->cport_out_urb
[i
];
326 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
331 * Crap, pool is empty, complain to the syslog and go allocate one
332 * dynamically as we have to succeed.
334 dev_dbg(&es2
->usb_dev
->dev
,
335 "No free CPort OUT urbs, having to dynamically allocate one!\n");
336 return usb_alloc_urb(0, gfp_mask
);
339 static void free_urb(struct es2_ap_dev
*es2
, struct urb
*urb
)
344 * See if this was an urb in our pool, if so mark it "free", otherwise
345 * we need to free it ourselves.
347 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
348 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
349 if (urb
== es2
->cport_out_urb
[i
]) {
350 es2
->cport_out_urb_busy
[i
] = false;
355 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
357 /* If urb is not NULL, then we need to free this urb */
362 * We (ab)use the operation-message header pad bytes to transfer the
363 * cport id in order to minimise overhead.
366 gb_message_cport_pack(struct gb_operation_msg_hdr
*header
, u16 cport_id
)
368 header
->pad
[0] = cport_id
;
371 /* Clear the pad bytes used for the CPort id */
372 static void gb_message_cport_clear(struct gb_operation_msg_hdr
*header
)
377 /* Extract the CPort id packed into the header, and clear it */
378 static u16
gb_message_cport_unpack(struct gb_operation_msg_hdr
*header
)
380 u16 cport_id
= header
->pad
[0];
382 gb_message_cport_clear(header
);
388 * Returns zero if the message was successfully queued, or a negative errno
391 static int message_send(struct gb_host_device
*hd
, u16 cport_id
,
392 struct gb_message
*message
, gfp_t gfp_mask
)
394 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
395 struct usb_device
*udev
= es2
->usb_dev
;
402 * The data actually transferred will include an indication
403 * of where the data should be sent. Do one last check of
404 * the target CPort id before filling it in.
406 if (!cport_id_valid(hd
, cport_id
)) {
407 dev_err(&udev
->dev
, "invalid cport %u\n", cport_id
);
411 /* Find a free urb */
412 urb
= next_free_urb(es2
, gfp_mask
);
416 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
417 message
->hcpriv
= urb
;
418 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
420 /* Pack the cport id into the message header */
421 gb_message_cport_pack(message
->header
, cport_id
);
423 buffer_size
= sizeof(*message
->header
) + message
->payload_size
;
425 usb_fill_bulk_urb(urb
, udev
,
426 usb_sndbulkpipe(udev
,
427 es2
->cport_out_endpoint
),
428 message
->buffer
, buffer_size
,
429 cport_out_callback
, message
);
430 urb
->transfer_flags
|= URB_ZERO_PACKET
;
432 trace_gb_message_submit(message
);
434 retval
= usb_submit_urb(urb
, gfp_mask
);
436 dev_err(&udev
->dev
, "failed to submit out-urb: %d\n", retval
);
438 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
439 message
->hcpriv
= NULL
;
440 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
443 gb_message_cport_clear(message
->header
);
452 * Can not be called in atomic context.
454 static void message_cancel(struct gb_message
*message
)
456 struct gb_host_device
*hd
= message
->operation
->connection
->hd
;
457 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
463 spin_lock_irq(&es2
->cport_out_urb_lock
);
464 urb
= message
->hcpriv
;
466 /* Prevent dynamically allocated urb from being deallocated. */
469 /* Prevent pre-allocated urb from being reused. */
470 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
471 if (urb
== es2
->cport_out_urb
[i
]) {
472 es2
->cport_out_urb_cancelled
[i
] = true;
476 spin_unlock_irq(&es2
->cport_out_urb_lock
);
480 if (i
< NUM_CPORT_OUT_URB
) {
481 spin_lock_irq(&es2
->cport_out_urb_lock
);
482 es2
->cport_out_urb_cancelled
[i
] = false;
483 spin_unlock_irq(&es2
->cport_out_urb_lock
);
489 static int es2_cport_allocate(struct gb_host_device
*hd
, int cport_id
,
492 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
493 struct ida
*id_map
= &hd
->cport_id_map
;
494 int ida_start
, ida_end
;
497 case ES2_CPORT_CDSI0
:
498 case ES2_CPORT_CDSI1
:
499 dev_err(&hd
->dev
, "cport %d not available\n", cport_id
);
503 if (flags
& GB_CONNECTION_FLAG_OFFLOADED
&&
504 flags
& GB_CONNECTION_FLAG_CDSI1
) {
505 if (es2
->cdsi1_in_use
) {
506 dev_err(&hd
->dev
, "CDSI1 already in use\n");
510 es2
->cdsi1_in_use
= true;
512 return ES2_CPORT_CDSI1
;
517 ida_end
= hd
->num_cports
;
518 } else if (cport_id
< hd
->num_cports
) {
519 ida_start
= cport_id
;
520 ida_end
= cport_id
+ 1;
522 dev_err(&hd
->dev
, "cport %d not available\n", cport_id
);
526 return ida_simple_get(id_map
, ida_start
, ida_end
, GFP_KERNEL
);
529 static void es2_cport_release(struct gb_host_device
*hd
, u16 cport_id
)
531 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
534 case ES2_CPORT_CDSI1
:
535 es2
->cdsi1_in_use
= false;
539 ida_simple_remove(&hd
->cport_id_map
, cport_id
);
542 static int cport_enable(struct gb_host_device
*hd
, u16 cport_id
,
545 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
546 struct usb_device
*udev
= es2
->usb_dev
;
547 struct gb_apb_request_cport_flags
*req
;
548 u32 connection_flags
;
551 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
555 connection_flags
= 0;
556 if (flags
& GB_CONNECTION_FLAG_CONTROL
)
557 connection_flags
|= GB_APB_CPORT_FLAG_CONTROL
;
558 if (flags
& GB_CONNECTION_FLAG_HIGH_PRIO
)
559 connection_flags
|= GB_APB_CPORT_FLAG_HIGH_PRIO
;
561 req
->flags
= cpu_to_le32(connection_flags
);
563 dev_dbg(&hd
->dev
, "%s - cport = %u, flags = %02x\n", __func__
,
564 cport_id
, connection_flags
);
566 ret
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
567 GB_APB_REQUEST_CPORT_FLAGS
,
568 USB_DIR_OUT
| USB_TYPE_VENDOR
|
569 USB_RECIP_INTERFACE
, cport_id
, 0,
570 req
, sizeof(*req
), ES2_USB_CTRL_TIMEOUT
);
571 if (ret
!= sizeof(*req
)) {
572 dev_err(&udev
->dev
, "failed to set cport flags for port %d\n",
587 static int es2_cport_connected(struct gb_host_device
*hd
, u16 cport_id
)
589 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
590 struct device
*dev
= &es2
->usb_dev
->dev
;
591 struct arpc_cport_connected_req req
;
594 req
.cport_id
= cpu_to_le16(cport_id
);
595 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_CONNECTED
, &req
, sizeof(req
),
596 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
598 dev_err(dev
, "failed to set connected state for cport %u: %d\n",
606 static int es2_cport_flush(struct gb_host_device
*hd
, u16 cport_id
)
608 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
609 struct device
*dev
= &es2
->usb_dev
->dev
;
610 struct arpc_cport_flush_req req
;
613 req
.cport_id
= cpu_to_le16(cport_id
);
614 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_FLUSH
, &req
, sizeof(req
),
615 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
617 dev_err(dev
, "failed to flush cport %u: %d\n", cport_id
, ret
);
624 static int es2_cport_shutdown(struct gb_host_device
*hd
, u16 cport_id
,
625 u8 phase
, unsigned int timeout
)
627 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
628 struct device
*dev
= &es2
->usb_dev
->dev
;
629 struct arpc_cport_shutdown_req req
;
633 if (timeout
> U16_MAX
)
636 req
.cport_id
= cpu_to_le16(cport_id
);
637 req
.timeout
= cpu_to_le16(timeout
);
639 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_SHUTDOWN
, &req
, sizeof(req
),
640 &result
, ES2_ARPC_CPORT_TIMEOUT
+ timeout
);
642 dev_err(dev
, "failed to send shutdown over cport %u: %d (%d)\n",
643 cport_id
, ret
, result
);
650 static int es2_cport_quiesce(struct gb_host_device
*hd
, u16 cport_id
,
651 size_t peer_space
, unsigned int timeout
)
653 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
654 struct device
*dev
= &es2
->usb_dev
->dev
;
655 struct arpc_cport_quiesce_req req
;
659 if (peer_space
> U16_MAX
)
662 if (timeout
> U16_MAX
)
665 req
.cport_id
= cpu_to_le16(cport_id
);
666 req
.peer_space
= cpu_to_le16(peer_space
);
667 req
.timeout
= cpu_to_le16(timeout
);
668 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_QUIESCE
, &req
, sizeof(req
),
669 &result
, ES2_ARPC_CPORT_TIMEOUT
+ timeout
);
671 dev_err(dev
, "failed to quiesce cport %u: %d (%d)\n",
672 cport_id
, ret
, result
);
679 static int es2_cport_clear(struct gb_host_device
*hd
, u16 cport_id
)
681 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
682 struct device
*dev
= &es2
->usb_dev
->dev
;
683 struct arpc_cport_clear_req req
;
686 req
.cport_id
= cpu_to_le16(cport_id
);
687 ret
= arpc_sync(es2
, ARPC_TYPE_CPORT_CLEAR
, &req
, sizeof(req
),
688 NULL
, ES2_ARPC_CPORT_TIMEOUT
);
690 dev_err(dev
, "failed to clear cport %u: %d\n", cport_id
, ret
);
697 static int latency_tag_enable(struct gb_host_device
*hd
, u16 cport_id
)
700 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
701 struct usb_device
*udev
= es2
->usb_dev
;
703 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
704 GB_APB_REQUEST_LATENCY_TAG_EN
,
705 USB_DIR_OUT
| USB_TYPE_VENDOR
|
706 USB_RECIP_INTERFACE
, cport_id
, 0, NULL
,
707 0, ES2_USB_CTRL_TIMEOUT
);
710 dev_err(&udev
->dev
, "Cannot enable latency tag for cport %d\n",
715 static int latency_tag_disable(struct gb_host_device
*hd
, u16 cport_id
)
718 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
719 struct usb_device
*udev
= es2
->usb_dev
;
721 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
722 GB_APB_REQUEST_LATENCY_TAG_DIS
,
723 USB_DIR_OUT
| USB_TYPE_VENDOR
|
724 USB_RECIP_INTERFACE
, cport_id
, 0, NULL
,
725 0, ES2_USB_CTRL_TIMEOUT
);
728 dev_err(&udev
->dev
, "Cannot disable latency tag for cport %d\n",
733 static struct gb_hd_driver es2_driver
= {
734 .hd_priv_size
= sizeof(struct es2_ap_dev
),
735 .message_send
= message_send
,
736 .message_cancel
= message_cancel
,
737 .cport_allocate
= es2_cport_allocate
,
738 .cport_release
= es2_cport_release
,
739 .cport_enable
= cport_enable
,
740 .cport_connected
= es2_cport_connected
,
741 .cport_flush
= es2_cport_flush
,
742 .cport_shutdown
= es2_cport_shutdown
,
743 .cport_quiesce
= es2_cport_quiesce
,
744 .cport_clear
= es2_cport_clear
,
745 .latency_tag_enable
= latency_tag_enable
,
746 .latency_tag_disable
= latency_tag_disable
,
750 /* Common function to report consistent warnings based on URB status */
751 static int check_urb_status(struct urb
*urb
)
753 struct device
*dev
= &urb
->dev
->dev
;
754 int status
= urb
->status
;
761 dev_err(dev
, "%s: overflow actual length is %d\n",
762 __func__
, urb
->actual_length
);
769 /* device is gone, stop sending */
772 dev_err(dev
, "%s: unknown status %d\n", __func__
, status
);
777 static void es2_destroy(struct es2_ap_dev
*es2
)
779 struct usb_device
*udev
;
783 debugfs_remove(es2
->apb_log_enable_dentry
);
784 usb_log_disable(es2
);
786 /* Tear down everything! */
787 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
788 urb
= es2
->cport_out_urb
[i
];
791 es2
->cport_out_urb
[i
] = NULL
;
792 es2
->cport_out_urb_busy
[i
] = false; /* just to be anal */
795 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
796 usb_free_urb(es2
->arpc_urb
[i
]);
797 kfree(es2
->arpc_buffer
[i
]);
798 es2
->arpc_buffer
[i
] = NULL
;
801 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
802 usb_free_urb(es2
->cport_in
.urb
[i
]);
803 kfree(es2
->cport_in
.buffer
[i
]);
804 es2
->cport_in
.buffer
[i
] = NULL
;
807 /* release reserved CDSI0 and CDSI1 cports */
808 gb_hd_cport_release_reserved(es2
->hd
, ES2_CPORT_CDSI1
);
809 gb_hd_cport_release_reserved(es2
->hd
, ES2_CPORT_CDSI0
);
817 static void cport_in_callback(struct urb
*urb
)
819 struct gb_host_device
*hd
= urb
->context
;
820 struct device
*dev
= &urb
->dev
->dev
;
821 struct gb_operation_msg_hdr
*header
;
822 int status
= check_urb_status(urb
);
827 if ((status
== -EAGAIN
) || (status
== -EPROTO
))
830 /* The urb is being unlinked */
831 if (status
== -ENOENT
|| status
== -ESHUTDOWN
)
834 dev_err(dev
, "urb cport in error %d (dropped)\n", status
);
838 if (urb
->actual_length
< sizeof(*header
)) {
839 dev_err(dev
, "short message received\n");
843 /* Extract the CPort id, which is packed in the message header */
844 header
= urb
->transfer_buffer
;
845 cport_id
= gb_message_cport_unpack(header
);
847 if (cport_id_valid(hd
, cport_id
)) {
848 greybus_data_rcvd(hd
, cport_id
, urb
->transfer_buffer
,
851 dev_err(dev
, "invalid cport id %u received\n", cport_id
);
854 /* put our urb back in the request pool */
855 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
857 dev_err(dev
, "failed to resubmit in-urb: %d\n", retval
);
860 static void cport_out_callback(struct urb
*urb
)
862 struct gb_message
*message
= urb
->context
;
863 struct gb_host_device
*hd
= message
->operation
->connection
->hd
;
864 struct es2_ap_dev
*es2
= hd_to_es2(hd
);
865 int status
= check_urb_status(urb
);
868 gb_message_cport_clear(message
->header
);
870 spin_lock_irqsave(&es2
->cport_out_urb_lock
, flags
);
871 message
->hcpriv
= NULL
;
872 spin_unlock_irqrestore(&es2
->cport_out_urb_lock
, flags
);
875 * Tell the submitter that the message send (attempt) is
876 * complete, and report the status.
878 greybus_message_sent(hd
, message
, status
);
883 static struct arpc
*arpc_alloc(void *payload
, u16 size
, u8 type
)
887 if (size
+ sizeof(*rpc
->req
) > ARPC_OUT_SIZE_MAX
)
890 rpc
= kzalloc(sizeof(*rpc
), GFP_KERNEL
);
894 INIT_LIST_HEAD(&rpc
->list
);
895 rpc
->req
= kzalloc(sizeof(*rpc
->req
) + size
, GFP_KERNEL
);
899 rpc
->resp
= kzalloc(sizeof(*rpc
->resp
), GFP_KERNEL
);
903 rpc
->req
->type
= type
;
904 rpc
->req
->size
= cpu_to_le16(sizeof(*rpc
->req
) + size
);
905 memcpy(rpc
->req
->data
, payload
, size
);
907 init_completion(&rpc
->response_received
);
919 static void arpc_free(struct arpc
*rpc
)
926 static struct arpc
*arpc_find(struct es2_ap_dev
*es2
, __le16 id
)
930 list_for_each_entry(rpc
, &es2
->arpcs
, list
) {
931 if (rpc
->req
->id
== id
)
938 static void arpc_add(struct es2_ap_dev
*es2
, struct arpc
*rpc
)
941 rpc
->req
->id
= cpu_to_le16(es2
->arpc_id_cycle
++);
942 list_add_tail(&rpc
->list
, &es2
->arpcs
);
945 static void arpc_del(struct es2_ap_dev
*es2
, struct arpc
*rpc
)
949 list_del(&rpc
->list
);
953 static int arpc_send(struct es2_ap_dev
*es2
, struct arpc
*rpc
, int timeout
)
955 struct usb_device
*udev
= es2
->usb_dev
;
958 retval
= usb_control_msg(udev
, usb_sndctrlpipe(udev
, 0),
959 GB_APB_REQUEST_ARPC_RUN
,
960 USB_DIR_OUT
| USB_TYPE_VENDOR
|
963 rpc
->req
, le16_to_cpu(rpc
->req
->size
),
964 ES2_USB_CTRL_TIMEOUT
);
965 if (retval
!= le16_to_cpu(rpc
->req
->size
)) {
967 "failed to send ARPC request %d: %d\n",
968 rpc
->req
->type
, retval
);
977 static int arpc_sync(struct es2_ap_dev
*es2
, u8 type
, void *payload
,
978 size_t size
, int *result
, unsigned int timeout
)
987 rpc
= arpc_alloc(payload
, size
, type
);
991 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
993 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
995 retval
= arpc_send(es2
, rpc
, timeout
);
999 retval
= wait_for_completion_interruptible_timeout(
1000 &rpc
->response_received
,
1001 msecs_to_jiffies(timeout
));
1004 retval
= -ETIMEDOUT
;
1008 if (rpc
->resp
->result
) {
1009 retval
= -EREMOTEIO
;
1011 *result
= rpc
->resp
->result
;
1017 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
1019 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1022 if (retval
< 0 && retval
!= -EREMOTEIO
) {
1023 dev_err(&es2
->usb_dev
->dev
,
1024 "failed to execute ARPC: %d\n", retval
);
1030 static void arpc_in_callback(struct urb
*urb
)
1032 struct es2_ap_dev
*es2
= urb
->context
;
1033 struct device
*dev
= &urb
->dev
->dev
;
1034 int status
= check_urb_status(urb
);
1036 struct arpc_response_message
*resp
;
1037 unsigned long flags
;
1041 if ((status
== -EAGAIN
) || (status
== -EPROTO
))
1044 /* The urb is being unlinked */
1045 if (status
== -ENOENT
|| status
== -ESHUTDOWN
)
1048 dev_err(dev
, "arpc in-urb error %d (dropped)\n", status
);
1052 if (urb
->actual_length
< sizeof(*resp
)) {
1053 dev_err(dev
, "short aprc response received\n");
1057 resp
= urb
->transfer_buffer
;
1058 spin_lock_irqsave(&es2
->arpc_lock
, flags
);
1059 rpc
= arpc_find(es2
, resp
->id
);
1061 dev_err(dev
, "invalid arpc response id received: %u\n",
1062 le16_to_cpu(resp
->id
));
1063 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1068 memcpy(rpc
->resp
, resp
, sizeof(*resp
));
1069 complete(&rpc
->response_received
);
1070 spin_unlock_irqrestore(&es2
->arpc_lock
, flags
);
1073 /* put our urb back in the request pool */
1074 retval
= usb_submit_urb(urb
, GFP_ATOMIC
);
1076 dev_err(dev
, "failed to resubmit arpc in-urb: %d\n", retval
);
1079 #define APB1_LOG_MSG_SIZE 64
1080 static void apb_log_get(struct es2_ap_dev
*es2
, char *buf
)
1085 retval
= usb_control_msg(es2
->usb_dev
,
1086 usb_rcvctrlpipe(es2
->usb_dev
, 0),
1088 USB_DIR_IN
| USB_TYPE_VENDOR
|
1089 USB_RECIP_INTERFACE
,
1093 ES2_USB_CTRL_TIMEOUT
);
1095 kfifo_in(&es2
->apb_log_fifo
, buf
, retval
);
1096 } while (retval
> 0);
1099 static int apb_log_poll(void *data
)
1101 struct es2_ap_dev
*es2
= data
;
1104 buf
= kmalloc(APB1_LOG_MSG_SIZE
, GFP_KERNEL
);
1108 while (!kthread_should_stop()) {
1110 apb_log_get(es2
, buf
);
1118 static ssize_t
apb_log_read(struct file
*f
, char __user
*buf
,
1119 size_t count
, loff_t
*ppos
)
1121 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1126 if (count
> APB1_LOG_SIZE
)
1127 count
= APB1_LOG_SIZE
;
1129 tmp_buf
= kmalloc(count
, GFP_KERNEL
);
1133 copied
= kfifo_out(&es2
->apb_log_fifo
, tmp_buf
, count
);
1134 ret
= simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
, copied
);
1141 static const struct file_operations apb_log_fops
= {
1142 .read
= apb_log_read
,
1145 static void usb_log_enable(struct es2_ap_dev
*es2
)
1147 if (!IS_ERR_OR_NULL(es2
->apb_log_task
))
1150 /* get log from APB1 */
1151 es2
->apb_log_task
= kthread_run(apb_log_poll
, es2
, "apb_log");
1152 if (IS_ERR(es2
->apb_log_task
))
1154 /* XXX We will need to rename this per APB */
1155 es2
->apb_log_dentry
= debugfs_create_file("apb_log", 0444,
1156 gb_debugfs_get(), es2
,
1160 static void usb_log_disable(struct es2_ap_dev
*es2
)
1162 if (IS_ERR_OR_NULL(es2
->apb_log_task
))
1165 debugfs_remove(es2
->apb_log_dentry
);
1166 es2
->apb_log_dentry
= NULL
;
1168 kthread_stop(es2
->apb_log_task
);
1169 es2
->apb_log_task
= NULL
;
1172 static ssize_t
apb_log_enable_read(struct file
*f
, char __user
*buf
,
1173 size_t count
, loff_t
*ppos
)
1175 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1176 int enable
= !IS_ERR_OR_NULL(es2
->apb_log_task
);
1179 sprintf(tmp_buf
, "%d\n", enable
);
1180 return simple_read_from_buffer(buf
, count
, ppos
, tmp_buf
, 3);
1183 static ssize_t
apb_log_enable_write(struct file
*f
, const char __user
*buf
,
1184 size_t count
, loff_t
*ppos
)
1188 struct es2_ap_dev
*es2
= file_inode(f
)->i_private
;
1190 retval
= kstrtoint_from_user(buf
, count
, 10, &enable
);
1195 usb_log_enable(es2
);
1197 usb_log_disable(es2
);
1202 static const struct file_operations apb_log_enable_fops
= {
1203 .read
= apb_log_enable_read
,
1204 .write
= apb_log_enable_write
,
1207 static int apb_get_cport_count(struct usb_device
*udev
)
1210 __le16
*cport_count
;
1212 cport_count
= kzalloc(sizeof(*cport_count
), GFP_KERNEL
);
1216 retval
= usb_control_msg(udev
, usb_rcvctrlpipe(udev
, 0),
1217 GB_APB_REQUEST_CPORT_COUNT
,
1218 USB_DIR_IN
| USB_TYPE_VENDOR
|
1219 USB_RECIP_INTERFACE
, 0, 0, cport_count
,
1220 sizeof(*cport_count
), ES2_USB_CTRL_TIMEOUT
);
1221 if (retval
!= sizeof(*cport_count
)) {
1222 dev_err(&udev
->dev
, "Cannot retrieve CPort count: %d\n",
1231 retval
= le16_to_cpu(*cport_count
);
1233 /* We need to fit a CPort ID in one byte of a message header */
1234 if (retval
> U8_MAX
) {
1236 dev_warn(&udev
->dev
, "Limiting number of CPorts to U8_MAX\n");
1245 * The ES2 USB Bridge device has 15 endpoints
1246 * 1 Control - usual USB stuff + AP -> APBridgeA messages
1247 * 7 Bulk IN - CPort data in
1248 * 7 Bulk OUT - CPort data out
1250 static int ap_probe(struct usb_interface
*interface
,
1251 const struct usb_device_id
*id
)
1253 struct es2_ap_dev
*es2
;
1254 struct gb_host_device
*hd
;
1255 struct usb_device
*udev
;
1256 struct usb_host_interface
*iface_desc
;
1257 struct usb_endpoint_descriptor
*endpoint
;
1262 bool bulk_out_found
= false;
1263 bool bulk_in_found
= false;
1264 bool arpc_in_found
= false;
1266 udev
= usb_get_dev(interface_to_usbdev(interface
));
1268 num_cports
= apb_get_cport_count(udev
);
1269 if (num_cports
< 0) {
1271 dev_err(&udev
->dev
, "Cannot retrieve CPort count: %d\n",
1276 hd
= gb_hd_create(&es2_driver
, &udev
->dev
, ES2_GBUF_MSG_SIZE_MAX
,
1283 es2
= hd_to_es2(hd
);
1285 es2
->usb_intf
= interface
;
1286 es2
->usb_dev
= udev
;
1287 spin_lock_init(&es2
->cport_out_urb_lock
);
1288 INIT_KFIFO(es2
->apb_log_fifo
);
1289 usb_set_intfdata(interface
, es2
);
1292 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1295 retval
= gb_hd_cport_reserve(hd
, ES2_CPORT_CDSI0
);
1298 retval
= gb_hd_cport_reserve(hd
, ES2_CPORT_CDSI1
);
1302 /* find all bulk endpoints */
1303 iface_desc
= interface
->cur_altsetting
;
1304 for (i
= 0; i
< iface_desc
->desc
.bNumEndpoints
; ++i
) {
1305 endpoint
= &iface_desc
->endpoint
[i
].desc
;
1306 ep_addr
= endpoint
->bEndpointAddress
;
1308 if (usb_endpoint_is_bulk_in(endpoint
)) {
1309 if (!bulk_in_found
) {
1310 es2
->cport_in
.endpoint
= ep_addr
;
1311 bulk_in_found
= true;
1312 } else if (!arpc_in_found
) {
1313 es2
->arpc_endpoint_in
= ep_addr
;
1314 arpc_in_found
= true;
1316 dev_warn(&udev
->dev
,
1317 "Unused bulk IN endpoint found: 0x%02x\n",
1322 if (usb_endpoint_is_bulk_out(endpoint
)) {
1323 if (!bulk_out_found
) {
1324 es2
->cport_out_endpoint
= ep_addr
;
1325 bulk_out_found
= true;
1327 dev_warn(&udev
->dev
,
1328 "Unused bulk OUT endpoint found: 0x%02x\n",
1333 dev_warn(&udev
->dev
,
1334 "Unknown endpoint type found, address 0x%02x\n",
1337 if (!bulk_in_found
|| !arpc_in_found
|| !bulk_out_found
) {
1338 dev_err(&udev
->dev
, "Not enough endpoints found in device, aborting!\n");
1343 /* Allocate buffers for our cport in messages */
1344 for (i
= 0; i
< NUM_CPORT_IN_URB
; ++i
) {
1348 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1353 es2
->cport_in
.urb
[i
] = urb
;
1355 buffer
= kmalloc(ES2_GBUF_MSG_SIZE_MAX
, GFP_KERNEL
);
1361 usb_fill_bulk_urb(urb
, udev
,
1362 usb_rcvbulkpipe(udev
, es2
->cport_in
.endpoint
),
1363 buffer
, ES2_GBUF_MSG_SIZE_MAX
,
1364 cport_in_callback
, hd
);
1366 es2
->cport_in
.buffer
[i
] = buffer
;
1369 /* Allocate buffers for ARPC in messages */
1370 for (i
= 0; i
< NUM_ARPC_IN_URB
; ++i
) {
1374 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1379 es2
->arpc_urb
[i
] = urb
;
1381 buffer
= kmalloc(ARPC_IN_SIZE_MAX
, GFP_KERNEL
);
1387 usb_fill_bulk_urb(urb
, udev
,
1388 usb_rcvbulkpipe(udev
,
1389 es2
->arpc_endpoint_in
),
1390 buffer
, ARPC_IN_SIZE_MAX
,
1391 arpc_in_callback
, es2
);
1393 es2
->arpc_buffer
[i
] = buffer
;
1396 /* Allocate urbs for our CPort OUT messages */
1397 for (i
= 0; i
< NUM_CPORT_OUT_URB
; ++i
) {
1400 urb
= usb_alloc_urb(0, GFP_KERNEL
);
1406 es2
->cport_out_urb
[i
] = urb
;
1407 es2
->cport_out_urb_busy
[i
] = false; /* just to be anal */
1410 /* XXX We will need to rename this per APB */
1411 es2
->apb_log_enable_dentry
= debugfs_create_file("apb_log_enable",
1413 gb_debugfs_get(), es2
,
1414 &apb_log_enable_fops
);
1416 INIT_LIST_HEAD(&es2
->arpcs
);
1417 spin_lock_init(&es2
->arpc_lock
);
1419 retval
= es2_arpc_in_enable(es2
);
1423 retval
= gb_hd_add(hd
);
1425 goto err_disable_arpc_in
;
1427 retval
= es2_cport_in_enable(es2
, &es2
->cport_in
);
1435 err_disable_arpc_in
:
1436 es2_arpc_in_disable(es2
);
1443 static void ap_disconnect(struct usb_interface
*interface
)
1445 struct es2_ap_dev
*es2
= usb_get_intfdata(interface
);
1449 es2_cport_in_disable(es2
, &es2
->cport_in
);
1450 es2_arpc_in_disable(es2
);
1455 static struct usb_driver es2_ap_driver
= {
1456 .name
= "es2_ap_driver",
1458 .disconnect
= ap_disconnect
,
1459 .id_table
= id_table
,
1463 module_usb_driver(es2_ap_driver
);
1465 MODULE_LICENSE("GPL v2");
1466 MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");