2 * USB HOST XHCI Controller stack
4 * Based on xHCI host controller driver in linux-kernel
7 * Copyright (C) 2008 Intel Corp.
10 * Copyright (C) 2013 Samsung Electronics Co.Ltd
11 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
12 * Vikas Sajjan <vikas.sajjan@samsung.com>
14 * SPDX-License-Identifier: GPL-2.0+
18 #include <asm/byteorder.h>
21 #include <asm/cache.h>
22 #include <asm-generic/errno.h>
26 #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE
28 * flushes the address passed till the length
30 * @param addr pointer to memory region to be flushed
31 * @param len the length of the cache line to be flushed
34 void xhci_flush_cache(uint32_t addr
, u32 len
)
36 BUG_ON((void *)addr
== NULL
|| len
== 0);
38 flush_dcache_range(addr
& ~(CACHELINE_SIZE
- 1),
39 ALIGN(addr
+ len
, CACHELINE_SIZE
));
43 * invalidates the address passed till the length
45 * @param addr pointer to memory region to be invalidates
46 * @param len the length of the cache line to be invalidated
49 void xhci_inval_cache(uint32_t addr
, u32 len
)
51 BUG_ON((void *)addr
== NULL
|| len
== 0);
53 invalidate_dcache_range(addr
& ~(CACHELINE_SIZE
- 1),
54 ALIGN(addr
+ len
, CACHELINE_SIZE
));
59 * frees the "segment" pointer passed
61 * @param ptr pointer to "segement" to be freed
64 static void xhci_segment_free(struct xhci_segment
*seg
)
73 * frees the "ring" pointer passed
75 * @param ptr pointer to "ring" to be freed
78 static void xhci_ring_free(struct xhci_ring
*ring
)
80 struct xhci_segment
*seg
;
81 struct xhci_segment
*first_seg
;
85 first_seg
= ring
->first_seg
;
86 seg
= first_seg
->next
;
87 while (seg
!= first_seg
) {
88 struct xhci_segment
*next
= seg
->next
;
89 xhci_segment_free(seg
);
92 xhci_segment_free(first_seg
);
98 * frees the "xhci_container_ctx" pointer passed
100 * @param ptr pointer to "xhci_container_ctx" to be freed
103 static void xhci_free_container_ctx(struct xhci_container_ctx
*ctx
)
110 * frees the virtual devices for "xhci_ctrl" pointer passed
112 * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed
115 static void xhci_free_virt_devices(struct xhci_ctrl
*ctrl
)
119 struct xhci_virt_device
*virt_dev
;
122 * refactored here to loop through all virt_dev
123 * Slot ID 0 is reserved
125 for (slot_id
= 0; slot_id
< MAX_HC_SLOTS
; slot_id
++) {
126 virt_dev
= ctrl
->devs
[slot_id
];
130 ctrl
->dcbaa
->dev_context_ptrs
[slot_id
] = 0;
132 for (i
= 0; i
< 31; ++i
)
133 if (virt_dev
->eps
[i
].ring
)
134 xhci_ring_free(virt_dev
->eps
[i
].ring
);
136 if (virt_dev
->in_ctx
)
137 xhci_free_container_ctx(virt_dev
->in_ctx
);
138 if (virt_dev
->out_ctx
)
139 xhci_free_container_ctx(virt_dev
->out_ctx
);
142 /* make sure we are pointing to NULL */
143 ctrl
->devs
[slot_id
] = NULL
;
148 * frees all the memory allocated
150 * @param ptr pointer to "xhci_ctrl" to be cleaned up
153 void xhci_cleanup(struct xhci_ctrl
*ctrl
)
155 xhci_ring_free(ctrl
->event_ring
);
156 xhci_ring_free(ctrl
->cmd_ring
);
157 xhci_free_virt_devices(ctrl
);
158 free(ctrl
->erst
.entries
);
160 memset(ctrl
, '\0', sizeof(struct xhci_ctrl
));
164 * Malloc the aligned memory
166 * @param size size of memory to be allocated
167 * @return allocates the memory and returns the aligned pointer
169 static void *xhci_malloc(unsigned int size
)
172 size_t cacheline_size
= max(XHCI_ALIGNMENT
, CACHELINE_SIZE
);
174 ptr
= memalign(cacheline_size
, ALIGN(size
, cacheline_size
));
176 memset(ptr
, '\0', size
);
178 xhci_flush_cache((uint32_t)ptr
, size
);
184 * Make the prev segment point to the next segment.
185 * Change the last TRB in the prev segment to be a Link TRB which points to the
186 * address of the next segment. The caller needs to set any Link TRB
187 * related flags, such as End TRB, Toggle Cycle, and no snoop.
189 * @param prev pointer to the previous segment
190 * @param next pointer to the next segment
191 * @param link_trbs flag to indicate whether to link the trbs or NOT
194 static void xhci_link_segments(struct xhci_segment
*prev
,
195 struct xhci_segment
*next
, bool link_trbs
)
204 val_64
= (uintptr_t)next
->trbs
;
205 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
= val_64
;
208 * Set the last TRB in the segment to
209 * have a TRB type ID of Link TRB
211 val
= le32_to_cpu(prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
);
212 val
&= ~TRB_TYPE_BITMASK
;
213 val
|= (TRB_LINK
<< TRB_TYPE_SHIFT
);
215 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= cpu_to_le32(val
);
220 * Initialises the Ring's enqueue,dequeue,enq_seg pointers
222 * @param ring pointer to the RING to be intialised
225 static void xhci_initialize_ring_info(struct xhci_ring
*ring
)
228 * The ring is empty, so the enqueue pointer == dequeue pointer
230 ring
->enqueue
= ring
->first_seg
->trbs
;
231 ring
->enq_seg
= ring
->first_seg
;
232 ring
->dequeue
= ring
->enqueue
;
233 ring
->deq_seg
= ring
->first_seg
;
236 * The ring is initialized to 0. The producer must write 1 to the
237 * cycle bit to handover ownership of the TRB, so PCS = 1.
238 * The consumer must compare CCS to the cycle bit to
239 * check ownership, so CCS = 1.
241 ring
->cycle_state
= 1;
245 * Allocates a generic ring segment from the ring pool, sets the dma address,
246 * initializes the segment to zero, and sets the private next pointer to NULL.
248 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
251 * @return pointer to the newly allocated SEGMENT
253 static struct xhci_segment
*xhci_segment_alloc(void)
255 struct xhci_segment
*seg
;
257 seg
= (struct xhci_segment
*)malloc(sizeof(struct xhci_segment
));
260 seg
->trbs
= (union xhci_trb
*)xhci_malloc(SEGMENT_SIZE
);
268 * Create a new ring with zero or more segments.
269 * TODO: current code only uses one-time-allocated single-segment rings
270 * of 1KB anyway, so we might as well get rid of all the segment and
271 * linking code (and maybe increase the size a bit, e.g. 4KB).
274 * Link each segment together into a ring.
275 * Set the end flag and the cycle toggle bit on the last segment.
276 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0.
278 * @param num_segs number of segments in the ring
279 * @param link_trbs flag to indicate whether to link the trbs or NOT
280 * @return pointer to the newly created RING
282 struct xhci_ring
*xhci_ring_alloc(unsigned int num_segs
, bool link_trbs
)
284 struct xhci_ring
*ring
;
285 struct xhci_segment
*prev
;
287 ring
= (struct xhci_ring
*)malloc(sizeof(struct xhci_ring
));
293 ring
->first_seg
= xhci_segment_alloc();
294 BUG_ON(!ring
->first_seg
);
298 prev
= ring
->first_seg
;
299 while (num_segs
> 0) {
300 struct xhci_segment
*next
;
302 next
= xhci_segment_alloc();
305 xhci_link_segments(prev
, next
, link_trbs
);
310 xhci_link_segments(prev
, ring
->first_seg
, link_trbs
);
312 /* See section 4.9.2.1 and 6.4.4.1 */
313 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
|=
314 cpu_to_le32(LINK_TOGGLE
);
316 xhci_initialize_ring_info(ring
);
322 * Allocates the Container context
324 * @param ctrl Host controller data structure
325 * @param type type of XHCI Container Context
326 * @return NULL if failed else pointer to the context on success
328 static struct xhci_container_ctx
329 *xhci_alloc_container_ctx(struct xhci_ctrl
*ctrl
, int type
)
331 struct xhci_container_ctx
*ctx
;
333 ctx
= (struct xhci_container_ctx
*)
334 malloc(sizeof(struct xhci_container_ctx
));
337 BUG_ON((type
!= XHCI_CTX_TYPE_DEVICE
) && (type
!= XHCI_CTX_TYPE_INPUT
));
339 ctx
->size
= (MAX_EP_CTX_NUM
+ 1) *
340 CTX_SIZE(readl(&ctrl
->hccr
->cr_hccparams
));
341 if (type
== XHCI_CTX_TYPE_INPUT
)
342 ctx
->size
+= CTX_SIZE(readl(&ctrl
->hccr
->cr_hccparams
));
344 ctx
->bytes
= (u8
*)xhci_malloc(ctx
->size
);
350 * Allocating virtual device
352 * @param udev pointer to USB deivce structure
353 * @return 0 on success else -1 on failure
355 int xhci_alloc_virt_device(struct usb_device
*udev
)
358 unsigned int slot_id
= udev
->slot_id
;
359 struct xhci_virt_device
*virt_dev
;
360 struct xhci_ctrl
*ctrl
= udev
->controller
;
362 /* Slot ID 0 is reserved */
363 if (ctrl
->devs
[slot_id
]) {
364 printf("Virt dev for slot[%d] already allocated\n", slot_id
);
368 ctrl
->devs
[slot_id
] = (struct xhci_virt_device
*)
369 malloc(sizeof(struct xhci_virt_device
));
371 if (!ctrl
->devs
[slot_id
]) {
372 puts("Failed to allocate virtual device\n");
376 memset(ctrl
->devs
[slot_id
], 0, sizeof(struct xhci_virt_device
));
377 virt_dev
= ctrl
->devs
[slot_id
];
379 /* Allocate the (output) device context that will be used in the HC. */
380 virt_dev
->out_ctx
= xhci_alloc_container_ctx(ctrl
,
381 XHCI_CTX_TYPE_DEVICE
);
382 if (!virt_dev
->out_ctx
) {
383 puts("Failed to allocate out context for virt dev\n");
387 /* Allocate the (input) device context for address device command */
388 virt_dev
->in_ctx
= xhci_alloc_container_ctx(ctrl
,
389 XHCI_CTX_TYPE_INPUT
);
390 if (!virt_dev
->in_ctx
) {
391 puts("Failed to allocate in context for virt dev\n");
395 /* Allocate endpoint 0 ring */
396 virt_dev
->eps
[0].ring
= xhci_ring_alloc(1, true);
398 byte_64
= (uintptr_t)(virt_dev
->out_ctx
->bytes
);
400 /* Point to output device context in dcbaa. */
401 ctrl
->dcbaa
->dev_context_ptrs
[slot_id
] = byte_64
;
403 xhci_flush_cache((uint32_t)&ctrl
->dcbaa
->dev_context_ptrs
[slot_id
],
409 * Allocates the necessary data structures
410 * for XHCI host controller
412 * @param ctrl Host controller data structure
413 * @param hccr pointer to HOST Controller Control Registers
414 * @param hcor pointer to HOST Controller Operational Registers
415 * @return 0 if successful else -1 on failure
417 int xhci_mem_init(struct xhci_ctrl
*ctrl
, struct xhci_hccr
*hccr
,
418 struct xhci_hcor
*hcor
)
425 struct xhci_segment
*seg
;
427 /* DCBAA initialization */
428 ctrl
->dcbaa
= (struct xhci_device_context_array
*)
429 xhci_malloc(sizeof(struct xhci_device_context_array
));
430 if (ctrl
->dcbaa
== NULL
) {
431 puts("unable to allocate DCBA\n");
435 val_64
= (uintptr_t)ctrl
->dcbaa
;
436 /* Set the pointer in DCBAA register */
437 xhci_writeq(&hcor
->or_dcbaap
, val_64
);
439 /* Command ring control pointer register initialization */
440 ctrl
->cmd_ring
= xhci_ring_alloc(1, true);
442 /* Set the address in the Command Ring Control register */
443 trb_64
= (uintptr_t)ctrl
->cmd_ring
->first_seg
->trbs
;
444 val_64
= xhci_readq(&hcor
->or_crcr
);
445 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
446 (trb_64
& (u64
) ~CMD_RING_RSVD_BITS
) |
447 ctrl
->cmd_ring
->cycle_state
;
448 xhci_writeq(&hcor
->or_crcr
, val_64
);
450 /* write the address of db register */
451 val
= xhci_readl(&hccr
->cr_dboff
);
453 ctrl
->dba
= (struct xhci_doorbell_array
*)((char *)hccr
+ val
);
455 /* write the address of runtime register */
456 val
= xhci_readl(&hccr
->cr_rtsoff
);
458 ctrl
->run_regs
= (struct xhci_run_regs
*)((char *)hccr
+ val
);
460 /* writting the address of ir_set structure */
461 ctrl
->ir_set
= &ctrl
->run_regs
->ir_set
[0];
463 /* Event ring does not maintain link TRB */
464 ctrl
->event_ring
= xhci_ring_alloc(ERST_NUM_SEGS
, false);
465 ctrl
->erst
.entries
= (struct xhci_erst_entry
*)
466 xhci_malloc(sizeof(struct xhci_erst_entry
) * ERST_NUM_SEGS
);
468 ctrl
->erst
.num_entries
= ERST_NUM_SEGS
;
470 for (val
= 0, seg
= ctrl
->event_ring
->first_seg
;
474 trb_64
= (uintptr_t)seg
->trbs
;
475 struct xhci_erst_entry
*entry
= &ctrl
->erst
.entries
[val
];
476 xhci_writeq(&entry
->seg_addr
, trb_64
);
477 entry
->seg_size
= cpu_to_le32(TRBS_PER_SEGMENT
);
481 xhci_flush_cache((uint32_t)ctrl
->erst
.entries
,
482 ERST_NUM_SEGS
* sizeof(struct xhci_erst_entry
));
484 deq
= (unsigned long)ctrl
->event_ring
->dequeue
;
486 /* Update HC event ring dequeue pointer */
487 xhci_writeq(&ctrl
->ir_set
->erst_dequeue
,
488 (u64
)deq
& (u64
)~ERST_PTR_MASK
);
490 /* set ERST count with the number of entries in the segment table */
491 val
= xhci_readl(&ctrl
->ir_set
->erst_size
);
492 val
&= ERST_SIZE_MASK
;
493 val
|= ERST_NUM_SEGS
;
494 xhci_writel(&ctrl
->ir_set
->erst_size
, val
);
496 /* this is the event ring segment table pointer */
497 val_64
= xhci_readq(&ctrl
->ir_set
->erst_base
);
498 val_64
&= ERST_PTR_MASK
;
499 val_64
|= ((u32
)(ctrl
->erst
.entries
) & ~ERST_PTR_MASK
);
501 xhci_writeq(&ctrl
->ir_set
->erst_base
, val_64
);
503 /* initializing the virtual devices to NULL */
504 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
505 ctrl
->devs
[i
] = NULL
;
508 * Just Zero'ing this register completely,
509 * or some spurious Device Notification Events
510 * might screw things here.
512 xhci_writel(&hcor
->or_dnctrl
, 0x0);
518 * Give the input control context for the passed container context
520 * @param ctx pointer to the context
521 * @return pointer to the Input control context data
523 struct xhci_input_control_ctx
524 *xhci_get_input_control_ctx(struct xhci_container_ctx
*ctx
)
526 BUG_ON(ctx
->type
!= XHCI_CTX_TYPE_INPUT
);
527 return (struct xhci_input_control_ctx
*)ctx
->bytes
;
531 * Give the slot context for the passed container context
533 * @param ctrl Host controller data structure
534 * @param ctx pointer to the context
535 * @return pointer to the slot control context data
537 struct xhci_slot_ctx
*xhci_get_slot_ctx(struct xhci_ctrl
*ctrl
,
538 struct xhci_container_ctx
*ctx
)
540 if (ctx
->type
== XHCI_CTX_TYPE_DEVICE
)
541 return (struct xhci_slot_ctx
*)ctx
->bytes
;
543 return (struct xhci_slot_ctx
*)
544 (ctx
->bytes
+ CTX_SIZE(readl(&ctrl
->hccr
->cr_hccparams
)));
548 * Gets the EP context from based on the ep_index
550 * @param ctrl Host controller data structure
551 * @param ctx context container
552 * @param ep_index index of the endpoint
553 * @return pointer to the End point context
555 struct xhci_ep_ctx
*xhci_get_ep_ctx(struct xhci_ctrl
*ctrl
,
556 struct xhci_container_ctx
*ctx
,
557 unsigned int ep_index
)
559 /* increment ep index by offset of start of ep ctx array */
561 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
)
564 return (struct xhci_ep_ctx
*)
566 (ep_index
* CTX_SIZE(readl(&ctrl
->hccr
->cr_hccparams
))));
570 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
571 * Useful when you want to change one particular aspect of the endpoint
572 * and then issue a configure endpoint command.
574 * @param ctrl Host controller data structure
575 * @param in_ctx contains the input context
576 * @param out_ctx contains the input context
577 * @param ep_index index of the end point
580 void xhci_endpoint_copy(struct xhci_ctrl
*ctrl
,
581 struct xhci_container_ctx
*in_ctx
,
582 struct xhci_container_ctx
*out_ctx
,
583 unsigned int ep_index
)
585 struct xhci_ep_ctx
*out_ep_ctx
;
586 struct xhci_ep_ctx
*in_ep_ctx
;
588 out_ep_ctx
= xhci_get_ep_ctx(ctrl
, out_ctx
, ep_index
);
589 in_ep_ctx
= xhci_get_ep_ctx(ctrl
, in_ctx
, ep_index
);
591 in_ep_ctx
->ep_info
= out_ep_ctx
->ep_info
;
592 in_ep_ctx
->ep_info2
= out_ep_ctx
->ep_info2
;
593 in_ep_ctx
->deq
= out_ep_ctx
->deq
;
594 in_ep_ctx
->tx_info
= out_ep_ctx
->tx_info
;
598 * Copy output xhci_slot_ctx to the input xhci_slot_ctx.
599 * Useful when you want to change one particular aspect of the endpoint
600 * and then issue a configure endpoint command.
601 * Only the context entries field matters, but
602 * we'll copy the whole thing anyway.
604 * @param ctrl Host controller data structure
605 * @param in_ctx contains the inpout context
606 * @param out_ctx contains the inpout context
609 void xhci_slot_copy(struct xhci_ctrl
*ctrl
, struct xhci_container_ctx
*in_ctx
,
610 struct xhci_container_ctx
*out_ctx
)
612 struct xhci_slot_ctx
*in_slot_ctx
;
613 struct xhci_slot_ctx
*out_slot_ctx
;
615 in_slot_ctx
= xhci_get_slot_ctx(ctrl
, in_ctx
);
616 out_slot_ctx
= xhci_get_slot_ctx(ctrl
, out_ctx
);
618 in_slot_ctx
->dev_info
= out_slot_ctx
->dev_info
;
619 in_slot_ctx
->dev_info2
= out_slot_ctx
->dev_info2
;
620 in_slot_ctx
->tt_info
= out_slot_ctx
->tt_info
;
621 in_slot_ctx
->dev_state
= out_slot_ctx
->dev_state
;
625 * Setup an xHCI virtual device for a Set Address command
627 * @param udev pointer to the Device Data Structure
628 * @return returns negative value on failure else 0 on success
630 void xhci_setup_addressable_virt_dev(struct usb_device
*udev
)
632 struct usb_device
*hop
= udev
;
633 struct xhci_virt_device
*virt_dev
;
634 struct xhci_ep_ctx
*ep0_ctx
;
635 struct xhci_slot_ctx
*slot_ctx
;
638 struct xhci_ctrl
*ctrl
= udev
->controller
;
640 virt_dev
= ctrl
->devs
[udev
->slot_id
];
644 /* Extract the EP0 and Slot Ctrl */
645 ep0_ctx
= xhci_get_ep_ctx(ctrl
, virt_dev
->in_ctx
, 0);
646 slot_ctx
= xhci_get_slot_ctx(ctrl
, virt_dev
->in_ctx
);
648 /* Only the control endpoint is valid - one endpoint context */
649 slot_ctx
->dev_info
|= cpu_to_le32(LAST_CTX(1) | 0);
651 switch (udev
->speed
) {
652 case USB_SPEED_SUPER
:
653 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_SS
);
656 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_HS
);
659 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_FS
);
662 slot_ctx
->dev_info
|= cpu_to_le32(SLOT_SPEED_LS
);
665 /* Speed was set earlier, this shouldn't happen. */
669 /* Extract the root hub port number */
671 while (hop
->parent
->parent
)
673 port_num
= hop
->portnr
;
674 debug("port_num = %d\n", port_num
);
676 slot_ctx
->dev_info2
|=
677 cpu_to_le32(((port_num
& ROOT_HUB_PORT_MASK
) <<
678 ROOT_HUB_PORT_SHIFT
));
680 /* Step 4 - ring already allocated */
682 ep0_ctx
->ep_info2
= cpu_to_le32(CTRL_EP
<< EP_TYPE_SHIFT
);
683 debug("SPEED = %d\n", udev
->speed
);
685 switch (udev
->speed
) {
686 case USB_SPEED_SUPER
:
687 ep0_ctx
->ep_info2
|= cpu_to_le32(((512 & MAX_PACKET_MASK
) <<
689 debug("Setting Packet size = 512bytes\n");
692 /* USB core guesses at a 64-byte max packet first for FS devices */
694 ep0_ctx
->ep_info2
|= cpu_to_le32(((64 & MAX_PACKET_MASK
) <<
696 debug("Setting Packet size = 64bytes\n");
699 ep0_ctx
->ep_info2
|= cpu_to_le32(((8 & MAX_PACKET_MASK
) <<
701 debug("Setting Packet size = 8bytes\n");
708 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
710 cpu_to_le32(((0 & MAX_BURST_MASK
) << MAX_BURST_SHIFT
) |
711 ((3 & ERROR_COUNT_MASK
) << ERROR_COUNT_SHIFT
));
713 trb_64
= (uintptr_t)virt_dev
->eps
[0].ring
->first_seg
->trbs
;
714 ep0_ctx
->deq
= cpu_to_le64(trb_64
| virt_dev
->eps
[0].ring
->cycle_state
);
716 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
718 xhci_flush_cache((uint32_t)ep0_ctx
, sizeof(struct xhci_ep_ctx
));
719 xhci_flush_cache((uint32_t)slot_ctx
, sizeof(struct xhci_slot_ctx
));