2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/dmapool.h>
30 * Allocates a generic ring segment from the ring pool, sets the dma address,
31 * initializes the segment to zero, and sets the private next pointer to NULL.
34 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
36 static struct xhci_segment
*xhci_segment_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
38 struct xhci_segment
*seg
;
41 seg
= kzalloc(sizeof *seg
, flags
);
44 xhci_dbg(xhci
, "Allocating priv segment structure at %p\n", seg
);
46 seg
->trbs
= dma_pool_alloc(xhci
->segment_pool
, flags
, &dma
);
51 xhci_dbg(xhci
, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
52 seg
->trbs
, (unsigned long long)dma
);
54 memset(seg
->trbs
, 0, SEGMENT_SIZE
);
61 static void xhci_segment_free(struct xhci_hcd
*xhci
, struct xhci_segment
*seg
)
66 xhci_dbg(xhci
, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
67 seg
->trbs
, (unsigned long long)seg
->dma
);
68 dma_pool_free(xhci
->segment_pool
, seg
->trbs
, seg
->dma
);
71 xhci_dbg(xhci
, "Freeing priv segment structure at %p\n", seg
);
76 * Make the prev segment point to the next segment.
78 * Change the last TRB in the prev segment to be a Link TRB which points to the
79 * DMA address of the next segment. The caller needs to set any Link TRB
80 * related flags, such as End TRB, Toggle Cycle, and no snoop.
82 static void xhci_link_segments(struct xhci_hcd
*xhci
, struct xhci_segment
*prev
,
83 struct xhci_segment
*next
, bool link_trbs
)
91 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.segment_ptr
= next
->dma
;
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val
= prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
;
95 val
&= ~TRB_TYPE_BITMASK
;
96 val
|= TRB_TYPE(TRB_LINK
);
97 /* Always set the chain bit with 0.95 hardware */
98 if (xhci_link_trb_quirk(xhci
))
100 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
= val
;
102 xhci_dbg(xhci
, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
103 (unsigned long long)prev
->dma
,
104 (unsigned long long)next
->dma
);
107 /* XXX: Do we need the hcd structure in all these functions? */
108 void xhci_ring_free(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
)
110 struct xhci_segment
*seg
;
111 struct xhci_segment
*first_seg
;
113 if (!ring
|| !ring
->first_seg
)
115 first_seg
= ring
->first_seg
;
116 seg
= first_seg
->next
;
117 xhci_dbg(xhci
, "Freeing ring at %p\n", ring
);
118 while (seg
!= first_seg
) {
119 struct xhci_segment
*next
= seg
->next
;
120 xhci_segment_free(xhci
, seg
);
123 xhci_segment_free(xhci
, first_seg
);
124 ring
->first_seg
= NULL
;
129 * Create a new ring with zero or more segments.
131 * Link each segment together into a ring.
132 * Set the end flag and the cycle toggle bit on the last segment.
133 * See section 4.9.1 and figures 15 and 16.
135 static struct xhci_ring
*xhci_ring_alloc(struct xhci_hcd
*xhci
,
136 unsigned int num_segs
, bool link_trbs
, gfp_t flags
)
138 struct xhci_ring
*ring
;
139 struct xhci_segment
*prev
;
141 ring
= kzalloc(sizeof *(ring
), flags
);
142 xhci_dbg(xhci
, "Allocating ring at %p\n", ring
);
146 INIT_LIST_HEAD(&ring
->td_list
);
150 ring
->first_seg
= xhci_segment_alloc(xhci
, flags
);
151 if (!ring
->first_seg
)
155 prev
= ring
->first_seg
;
156 while (num_segs
> 0) {
157 struct xhci_segment
*next
;
159 next
= xhci_segment_alloc(xhci
, flags
);
162 xhci_link_segments(xhci
, prev
, next
, link_trbs
);
167 xhci_link_segments(xhci
, prev
, ring
->first_seg
, link_trbs
);
170 /* See section 4.9.2.1 and 6.4.4.1 */
171 prev
->trbs
[TRBS_PER_SEGMENT
-1].link
.control
|= (LINK_TOGGLE
);
172 xhci_dbg(xhci
, "Wrote link toggle flag to"
173 " segment %p (virtual), 0x%llx (DMA)\n",
174 prev
, (unsigned long long)prev
->dma
);
176 /* The ring is empty, so the enqueue pointer == dequeue pointer */
177 ring
->enqueue
= ring
->first_seg
->trbs
;
178 ring
->enq_seg
= ring
->first_seg
;
179 ring
->dequeue
= ring
->enqueue
;
180 ring
->deq_seg
= ring
->first_seg
;
181 /* The ring is initialized to 0. The producer must write 1 to the cycle
182 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
183 * compare CCS to the cycle bit to check ownership, so CCS = 1.
185 ring
->cycle_state
= 1;
190 xhci_ring_free(xhci
, ring
);
194 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
196 struct xhci_container_ctx
*xhci_alloc_container_ctx(struct xhci_hcd
*xhci
,
197 int type
, gfp_t flags
)
199 struct xhci_container_ctx
*ctx
= kzalloc(sizeof(*ctx
), flags
);
203 BUG_ON((type
!= XHCI_CTX_TYPE_DEVICE
) && (type
!= XHCI_CTX_TYPE_INPUT
));
205 ctx
->size
= HCC_64BYTE_CONTEXT(xhci
->hcc_params
) ? 2048 : 1024;
206 if (type
== XHCI_CTX_TYPE_INPUT
)
207 ctx
->size
+= CTX_SIZE(xhci
->hcc_params
);
209 ctx
->bytes
= dma_pool_alloc(xhci
->device_pool
, flags
, &ctx
->dma
);
210 memset(ctx
->bytes
, 0, ctx
->size
);
214 void xhci_free_container_ctx(struct xhci_hcd
*xhci
,
215 struct xhci_container_ctx
*ctx
)
217 dma_pool_free(xhci
->device_pool
, ctx
->bytes
, ctx
->dma
);
221 struct xhci_input_control_ctx
*xhci_get_input_control_ctx(struct xhci_hcd
*xhci
,
222 struct xhci_container_ctx
*ctx
)
224 BUG_ON(ctx
->type
!= XHCI_CTX_TYPE_INPUT
);
225 return (struct xhci_input_control_ctx
*)ctx
->bytes
;
228 struct xhci_slot_ctx
*xhci_get_slot_ctx(struct xhci_hcd
*xhci
,
229 struct xhci_container_ctx
*ctx
)
231 if (ctx
->type
== XHCI_CTX_TYPE_DEVICE
)
232 return (struct xhci_slot_ctx
*)ctx
->bytes
;
234 return (struct xhci_slot_ctx
*)
235 (ctx
->bytes
+ CTX_SIZE(xhci
->hcc_params
));
238 struct xhci_ep_ctx
*xhci_get_ep_ctx(struct xhci_hcd
*xhci
,
239 struct xhci_container_ctx
*ctx
,
240 unsigned int ep_index
)
242 /* increment ep index by offset of start of ep ctx array */
244 if (ctx
->type
== XHCI_CTX_TYPE_INPUT
)
247 return (struct xhci_ep_ctx
*)
248 (ctx
->bytes
+ (ep_index
* CTX_SIZE(xhci
->hcc_params
)));
251 /* All the xhci_tds in the ring's TD list should be freed at this point */
252 void xhci_free_virt_device(struct xhci_hcd
*xhci
, int slot_id
)
254 struct xhci_virt_device
*dev
;
257 /* Slot ID 0 is reserved */
258 if (slot_id
== 0 || !xhci
->devs
[slot_id
])
261 dev
= xhci
->devs
[slot_id
];
262 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = 0;
266 for (i
= 0; i
< 31; ++i
)
267 if (dev
->eps
[i
].ring
)
268 xhci_ring_free(xhci
, dev
->eps
[i
].ring
);
271 xhci_free_container_ctx(xhci
, dev
->in_ctx
);
273 xhci_free_container_ctx(xhci
, dev
->out_ctx
);
275 kfree(xhci
->devs
[slot_id
]);
276 xhci
->devs
[slot_id
] = 0;
279 int xhci_alloc_virt_device(struct xhci_hcd
*xhci
, int slot_id
,
280 struct usb_device
*udev
, gfp_t flags
)
282 struct xhci_virt_device
*dev
;
285 /* Slot ID 0 is reserved */
286 if (slot_id
== 0 || xhci
->devs
[slot_id
]) {
287 xhci_warn(xhci
, "Bad Slot ID %d\n", slot_id
);
291 xhci
->devs
[slot_id
] = kzalloc(sizeof(*xhci
->devs
[slot_id
]), flags
);
292 if (!xhci
->devs
[slot_id
])
294 dev
= xhci
->devs
[slot_id
];
296 /* Allocate the (output) device context that will be used in the HC. */
297 dev
->out_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_DEVICE
, flags
);
301 xhci_dbg(xhci
, "Slot %d output ctx = 0x%llx (dma)\n", slot_id
,
302 (unsigned long long)dev
->out_ctx
->dma
);
304 /* Allocate the (input) device context for address device command */
305 dev
->in_ctx
= xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
, flags
);
309 xhci_dbg(xhci
, "Slot %d input ctx = 0x%llx (dma)\n", slot_id
,
310 (unsigned long long)dev
->in_ctx
->dma
);
312 /* Initialize the cancellation list for each endpoint */
313 for (i
= 0; i
< 31; i
++)
314 INIT_LIST_HEAD(&dev
->eps
[i
].cancelled_td_list
);
316 /* Allocate endpoint 0 ring */
317 dev
->eps
[0].ring
= xhci_ring_alloc(xhci
, 1, true, flags
);
318 if (!dev
->eps
[0].ring
)
321 init_completion(&dev
->cmd_completion
);
322 INIT_LIST_HEAD(&dev
->cmd_list
);
324 /* Point to output device context in dcbaa. */
325 xhci
->dcbaa
->dev_context_ptrs
[slot_id
] = dev
->out_ctx
->dma
;
326 xhci_dbg(xhci
, "Set slot id %d dcbaa entry %p to 0x%llx\n",
328 &xhci
->dcbaa
->dev_context_ptrs
[slot_id
],
329 (unsigned long long) xhci
->dcbaa
->dev_context_ptrs
[slot_id
]);
333 xhci_free_virt_device(xhci
, slot_id
);
337 /* Setup an xHCI virtual device for a Set Address command */
338 int xhci_setup_addressable_virt_dev(struct xhci_hcd
*xhci
, struct usb_device
*udev
)
340 struct xhci_virt_device
*dev
;
341 struct xhci_ep_ctx
*ep0_ctx
;
342 struct usb_device
*top_dev
;
343 struct xhci_slot_ctx
*slot_ctx
;
344 struct xhci_input_control_ctx
*ctrl_ctx
;
346 dev
= xhci
->devs
[udev
->slot_id
];
347 /* Slot ID 0 is reserved */
348 if (udev
->slot_id
== 0 || !dev
) {
349 xhci_warn(xhci
, "Slot ID %d is not assigned to this device\n",
353 ep0_ctx
= xhci_get_ep_ctx(xhci
, dev
->in_ctx
, 0);
354 ctrl_ctx
= xhci_get_input_control_ctx(xhci
, dev
->in_ctx
);
355 slot_ctx
= xhci_get_slot_ctx(xhci
, dev
->in_ctx
);
357 /* 2) New slot context and endpoint 0 context are valid*/
358 ctrl_ctx
->add_flags
= SLOT_FLAG
| EP0_FLAG
;
360 /* 3) Only the control endpoint is valid - one endpoint context */
361 slot_ctx
->dev_info
|= LAST_CTX(1);
363 slot_ctx
->dev_info
|= (u32
) udev
->route
;
364 switch (udev
->speed
) {
365 case USB_SPEED_SUPER
:
366 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_SS
;
369 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_HS
;
372 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_FS
;
375 slot_ctx
->dev_info
|= (u32
) SLOT_SPEED_LS
;
377 case USB_SPEED_VARIABLE
:
378 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
382 /* Speed was set earlier, this shouldn't happen. */
385 /* Find the root hub port this device is under */
386 for (top_dev
= udev
; top_dev
->parent
&& top_dev
->parent
->parent
;
387 top_dev
= top_dev
->parent
)
388 /* Found device below root hub */;
389 slot_ctx
->dev_info2
|= (u32
) ROOT_HUB_PORT(top_dev
->portnum
);
390 xhci_dbg(xhci
, "Set root hub portnum to %d\n", top_dev
->portnum
);
392 /* Is this a LS/FS device under a HS hub? */
393 if ((udev
->speed
== USB_SPEED_LOW
|| udev
->speed
== USB_SPEED_FULL
) &&
395 slot_ctx
->tt_info
= udev
->tt
->hub
->slot_id
;
396 slot_ctx
->tt_info
|= udev
->ttport
<< 8;
398 slot_ctx
->dev_info
|= DEV_MTT
;
400 xhci_dbg(xhci
, "udev->tt = %p\n", udev
->tt
);
401 xhci_dbg(xhci
, "udev->ttport = 0x%x\n", udev
->ttport
);
403 /* Step 4 - ring already allocated */
405 ep0_ctx
->ep_info2
= EP_TYPE(CTRL_EP
);
407 * XXX: Not sure about wireless USB devices.
409 switch (udev
->speed
) {
410 case USB_SPEED_SUPER
:
411 ep0_ctx
->ep_info2
|= MAX_PACKET(512);
414 /* USB core guesses at a 64-byte max packet first for FS devices */
416 ep0_ctx
->ep_info2
|= MAX_PACKET(64);
419 ep0_ctx
->ep_info2
|= MAX_PACKET(8);
421 case USB_SPEED_VARIABLE
:
422 xhci_dbg(xhci
, "FIXME xHCI doesn't support wireless speeds\n");
429 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
430 ep0_ctx
->ep_info2
|= MAX_BURST(0);
431 ep0_ctx
->ep_info2
|= ERROR_COUNT(3);
434 dev
->eps
[0].ring
->first_seg
->dma
;
435 ep0_ctx
->deq
|= dev
->eps
[0].ring
->cycle_state
;
437 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
442 /* Return the polling or NAK interval.
444 * The polling interval is expressed in "microframes". If xHCI's Interval field
445 * is set to N, it will service the endpoint every 2^(Interval)*125us.
447 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
450 static inline unsigned int xhci_get_endpoint_interval(struct usb_device
*udev
,
451 struct usb_host_endpoint
*ep
)
453 unsigned int interval
= 0;
455 switch (udev
->speed
) {
458 if (usb_endpoint_xfer_control(&ep
->desc
) ||
459 usb_endpoint_xfer_bulk(&ep
->desc
))
460 interval
= ep
->desc
.bInterval
;
461 /* Fall through - SS and HS isoc/int have same decoding */
462 case USB_SPEED_SUPER
:
463 if (usb_endpoint_xfer_int(&ep
->desc
) ||
464 usb_endpoint_xfer_isoc(&ep
->desc
)) {
465 if (ep
->desc
.bInterval
== 0)
468 interval
= ep
->desc
.bInterval
- 1;
471 if (interval
!= ep
->desc
.bInterval
+ 1)
472 dev_warn(&udev
->dev
, "ep %#x - rounding interval to %d microframes\n",
473 ep
->desc
.bEndpointAddress
, 1 << interval
);
476 /* Convert bInterval (in 1-255 frames) to microframes and round down to
477 * nearest power of 2.
481 if (usb_endpoint_xfer_int(&ep
->desc
) ||
482 usb_endpoint_xfer_isoc(&ep
->desc
)) {
483 interval
= fls(8*ep
->desc
.bInterval
) - 1;
488 if ((1 << interval
) != 8*ep
->desc
.bInterval
)
489 dev_warn(&udev
->dev
, "ep %#x - rounding interval to %d microframes\n",
490 ep
->desc
.bEndpointAddress
, 1 << interval
);
496 return EP_INTERVAL(interval
);
499 static inline u32
xhci_get_endpoint_type(struct usb_device
*udev
,
500 struct usb_host_endpoint
*ep
)
505 in
= usb_endpoint_dir_in(&ep
->desc
);
506 if (usb_endpoint_xfer_control(&ep
->desc
)) {
507 type
= EP_TYPE(CTRL_EP
);
508 } else if (usb_endpoint_xfer_bulk(&ep
->desc
)) {
510 type
= EP_TYPE(BULK_IN_EP
);
512 type
= EP_TYPE(BULK_OUT_EP
);
513 } else if (usb_endpoint_xfer_isoc(&ep
->desc
)) {
515 type
= EP_TYPE(ISOC_IN_EP
);
517 type
= EP_TYPE(ISOC_OUT_EP
);
518 } else if (usb_endpoint_xfer_int(&ep
->desc
)) {
520 type
= EP_TYPE(INT_IN_EP
);
522 type
= EP_TYPE(INT_OUT_EP
);
529 int xhci_endpoint_init(struct xhci_hcd
*xhci
,
530 struct xhci_virt_device
*virt_dev
,
531 struct usb_device
*udev
,
532 struct usb_host_endpoint
*ep
,
535 unsigned int ep_index
;
536 struct xhci_ep_ctx
*ep_ctx
;
537 struct xhci_ring
*ep_ring
;
538 unsigned int max_packet
;
539 unsigned int max_burst
;
541 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
542 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
544 /* Set up the endpoint ring */
545 virt_dev
->eps
[ep_index
].new_ring
=
546 xhci_ring_alloc(xhci
, 1, true, mem_flags
);
547 if (!virt_dev
->eps
[ep_index
].new_ring
)
549 ep_ring
= virt_dev
->eps
[ep_index
].new_ring
;
550 ep_ctx
->deq
= ep_ring
->first_seg
->dma
| ep_ring
->cycle_state
;
552 ep_ctx
->ep_info
= xhci_get_endpoint_interval(udev
, ep
);
554 /* FIXME dig Mult and streams info out of ep companion desc */
556 /* Allow 3 retries for everything but isoc;
557 * error count = 0 means infinite retries.
559 if (!usb_endpoint_xfer_isoc(&ep
->desc
))
560 ep_ctx
->ep_info2
= ERROR_COUNT(3);
562 ep_ctx
->ep_info2
= ERROR_COUNT(1);
564 ep_ctx
->ep_info2
|= xhci_get_endpoint_type(udev
, ep
);
566 /* Set the max packet size and max burst */
567 switch (udev
->speed
) {
568 case USB_SPEED_SUPER
:
569 max_packet
= ep
->desc
.wMaxPacketSize
;
570 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
571 /* dig out max burst from ep companion desc */
572 if (!ep
->ss_ep_comp
) {
573 xhci_warn(xhci
, "WARN no SS endpoint companion descriptor.\n");
576 max_packet
= ep
->ss_ep_comp
->desc
.bMaxBurst
;
578 ep_ctx
->ep_info2
|= MAX_BURST(max_packet
);
581 /* bits 11:12 specify the number of additional transaction
582 * opportunities per microframe (USB 2.0, section 9.6.6)
584 if (usb_endpoint_xfer_isoc(&ep
->desc
) ||
585 usb_endpoint_xfer_int(&ep
->desc
)) {
586 max_burst
= (ep
->desc
.wMaxPacketSize
& 0x1800) >> 11;
587 ep_ctx
->ep_info2
|= MAX_BURST(max_burst
);
592 max_packet
= ep
->desc
.wMaxPacketSize
& 0x3ff;
593 ep_ctx
->ep_info2
|= MAX_PACKET(max_packet
);
598 /* FIXME Debug endpoint context */
602 void xhci_endpoint_zero(struct xhci_hcd
*xhci
,
603 struct xhci_virt_device
*virt_dev
,
604 struct usb_host_endpoint
*ep
)
606 unsigned int ep_index
;
607 struct xhci_ep_ctx
*ep_ctx
;
609 ep_index
= xhci_get_endpoint_index(&ep
->desc
);
610 ep_ctx
= xhci_get_ep_ctx(xhci
, virt_dev
->in_ctx
, ep_index
);
613 ep_ctx
->ep_info2
= 0;
616 /* Don't free the endpoint ring until the set interface or configuration
621 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
622 * Useful when you want to change one particular aspect of the endpoint and then
623 * issue a configure endpoint command.
625 void xhci_endpoint_copy(struct xhci_hcd
*xhci
,
626 struct xhci_container_ctx
*in_ctx
,
627 struct xhci_container_ctx
*out_ctx
,
628 unsigned int ep_index
)
630 struct xhci_ep_ctx
*out_ep_ctx
;
631 struct xhci_ep_ctx
*in_ep_ctx
;
633 out_ep_ctx
= xhci_get_ep_ctx(xhci
, out_ctx
, ep_index
);
634 in_ep_ctx
= xhci_get_ep_ctx(xhci
, in_ctx
, ep_index
);
636 in_ep_ctx
->ep_info
= out_ep_ctx
->ep_info
;
637 in_ep_ctx
->ep_info2
= out_ep_ctx
->ep_info2
;
638 in_ep_ctx
->deq
= out_ep_ctx
->deq
;
639 in_ep_ctx
->tx_info
= out_ep_ctx
->tx_info
;
642 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
643 * Useful when you want to change one particular aspect of the endpoint and then
644 * issue a configure endpoint command. Only the context entries field matters,
645 * but we'll copy the whole thing anyway.
647 void xhci_slot_copy(struct xhci_hcd
*xhci
,
648 struct xhci_container_ctx
*in_ctx
,
649 struct xhci_container_ctx
*out_ctx
)
651 struct xhci_slot_ctx
*in_slot_ctx
;
652 struct xhci_slot_ctx
*out_slot_ctx
;
654 in_slot_ctx
= xhci_get_slot_ctx(xhci
, in_ctx
);
655 out_slot_ctx
= xhci_get_slot_ctx(xhci
, out_ctx
);
657 in_slot_ctx
->dev_info
= out_slot_ctx
->dev_info
;
658 in_slot_ctx
->dev_info2
= out_slot_ctx
->dev_info2
;
659 in_slot_ctx
->tt_info
= out_slot_ctx
->tt_info
;
660 in_slot_ctx
->dev_state
= out_slot_ctx
->dev_state
;
663 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
664 static int scratchpad_alloc(struct xhci_hcd
*xhci
, gfp_t flags
)
667 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
668 int num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
670 xhci_dbg(xhci
, "Allocating %d scratchpad buffers\n", num_sp
);
675 xhci
->scratchpad
= kzalloc(sizeof(*xhci
->scratchpad
), flags
);
676 if (!xhci
->scratchpad
)
679 xhci
->scratchpad
->sp_array
=
680 pci_alloc_consistent(to_pci_dev(dev
),
681 num_sp
* sizeof(u64
),
682 &xhci
->scratchpad
->sp_dma
);
683 if (!xhci
->scratchpad
->sp_array
)
686 xhci
->scratchpad
->sp_buffers
= kzalloc(sizeof(void *) * num_sp
, flags
);
687 if (!xhci
->scratchpad
->sp_buffers
)
690 xhci
->scratchpad
->sp_dma_buffers
=
691 kzalloc(sizeof(dma_addr_t
) * num_sp
, flags
);
693 if (!xhci
->scratchpad
->sp_dma_buffers
)
696 xhci
->dcbaa
->dev_context_ptrs
[0] = xhci
->scratchpad
->sp_dma
;
697 for (i
= 0; i
< num_sp
; i
++) {
699 void *buf
= pci_alloc_consistent(to_pci_dev(dev
),
700 xhci
->page_size
, &dma
);
704 xhci
->scratchpad
->sp_array
[i
] = dma
;
705 xhci
->scratchpad
->sp_buffers
[i
] = buf
;
706 xhci
->scratchpad
->sp_dma_buffers
[i
] = dma
;
712 for (i
= i
- 1; i
>= 0; i
--) {
713 pci_free_consistent(to_pci_dev(dev
), xhci
->page_size
,
714 xhci
->scratchpad
->sp_buffers
[i
],
715 xhci
->scratchpad
->sp_dma_buffers
[i
]);
717 kfree(xhci
->scratchpad
->sp_dma_buffers
);
720 kfree(xhci
->scratchpad
->sp_buffers
);
723 pci_free_consistent(to_pci_dev(dev
), num_sp
* sizeof(u64
),
724 xhci
->scratchpad
->sp_array
,
725 xhci
->scratchpad
->sp_dma
);
728 kfree(xhci
->scratchpad
);
729 xhci
->scratchpad
= NULL
;
735 static void scratchpad_free(struct xhci_hcd
*xhci
)
739 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
741 if (!xhci
->scratchpad
)
744 num_sp
= HCS_MAX_SCRATCHPAD(xhci
->hcs_params2
);
746 for (i
= 0; i
< num_sp
; i
++) {
747 pci_free_consistent(pdev
, xhci
->page_size
,
748 xhci
->scratchpad
->sp_buffers
[i
],
749 xhci
->scratchpad
->sp_dma_buffers
[i
]);
751 kfree(xhci
->scratchpad
->sp_dma_buffers
);
752 kfree(xhci
->scratchpad
->sp_buffers
);
753 pci_free_consistent(pdev
, num_sp
* sizeof(u64
),
754 xhci
->scratchpad
->sp_array
,
755 xhci
->scratchpad
->sp_dma
);
756 kfree(xhci
->scratchpad
);
757 xhci
->scratchpad
= NULL
;
760 struct xhci_command
*xhci_alloc_command(struct xhci_hcd
*xhci
,
761 bool allocate_completion
, gfp_t mem_flags
)
763 struct xhci_command
*command
;
765 command
= kzalloc(sizeof(*command
), mem_flags
);
770 xhci_alloc_container_ctx(xhci
, XHCI_CTX_TYPE_INPUT
, mem_flags
);
771 if (!command
->in_ctx
)
774 if (allocate_completion
) {
775 command
->completion
=
776 kzalloc(sizeof(struct completion
), mem_flags
);
777 if (!command
->completion
) {
778 xhci_free_container_ctx(xhci
, command
->in_ctx
);
781 init_completion(command
->completion
);
785 INIT_LIST_HEAD(&command
->cmd_list
);
789 void xhci_free_command(struct xhci_hcd
*xhci
,
790 struct xhci_command
*command
)
792 xhci_free_container_ctx(xhci
,
794 kfree(command
->completion
);
798 void xhci_mem_cleanup(struct xhci_hcd
*xhci
)
800 struct pci_dev
*pdev
= to_pci_dev(xhci_to_hcd(xhci
)->self
.controller
);
804 /* Free the Event Ring Segment Table and the actual Event Ring */
805 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_size
);
806 xhci_write_64(xhci
, 0, &xhci
->ir_set
->erst_base
);
807 xhci_write_64(xhci
, 0, &xhci
->ir_set
->erst_dequeue
);
808 size
= sizeof(struct xhci_erst_entry
)*(xhci
->erst
.num_entries
);
809 if (xhci
->erst
.entries
)
810 pci_free_consistent(pdev
, size
,
811 xhci
->erst
.entries
, xhci
->erst
.erst_dma_addr
);
812 xhci
->erst
.entries
= NULL
;
813 xhci_dbg(xhci
, "Freed ERST\n");
814 if (xhci
->event_ring
)
815 xhci_ring_free(xhci
, xhci
->event_ring
);
816 xhci
->event_ring
= NULL
;
817 xhci_dbg(xhci
, "Freed event ring\n");
819 xhci_write_64(xhci
, 0, &xhci
->op_regs
->cmd_ring
);
821 xhci_ring_free(xhci
, xhci
->cmd_ring
);
822 xhci
->cmd_ring
= NULL
;
823 xhci_dbg(xhci
, "Freed command ring\n");
825 for (i
= 1; i
< MAX_HC_SLOTS
; ++i
)
826 xhci_free_virt_device(xhci
, i
);
828 if (xhci
->segment_pool
)
829 dma_pool_destroy(xhci
->segment_pool
);
830 xhci
->segment_pool
= NULL
;
831 xhci_dbg(xhci
, "Freed segment pool\n");
833 if (xhci
->device_pool
)
834 dma_pool_destroy(xhci
->device_pool
);
835 xhci
->device_pool
= NULL
;
836 xhci_dbg(xhci
, "Freed device context pool\n");
838 xhci_write_64(xhci
, 0, &xhci
->op_regs
->dcbaa_ptr
);
840 pci_free_consistent(pdev
, sizeof(*xhci
->dcbaa
),
841 xhci
->dcbaa
, xhci
->dcbaa
->dma
);
845 xhci
->page_shift
= 0;
846 scratchpad_free(xhci
);
849 int xhci_mem_init(struct xhci_hcd
*xhci
, gfp_t flags
)
852 struct device
*dev
= xhci_to_hcd(xhci
)->self
.controller
;
853 unsigned int val
, val2
;
855 struct xhci_segment
*seg
;
859 page_size
= xhci_readl(xhci
, &xhci
->op_regs
->page_size
);
860 xhci_dbg(xhci
, "Supported page size register = 0x%x\n", page_size
);
861 for (i
= 0; i
< 16; i
++) {
862 if ((0x1 & page_size
) != 0)
864 page_size
= page_size
>> 1;
867 xhci_dbg(xhci
, "Supported page size of %iK\n", (1 << (i
+12)) / 1024);
869 xhci_warn(xhci
, "WARN: no supported page size\n");
870 /* Use 4K pages, since that's common and the minimum the HC supports */
871 xhci
->page_shift
= 12;
872 xhci
->page_size
= 1 << xhci
->page_shift
;
873 xhci_dbg(xhci
, "HCD page size set to %iK\n", xhci
->page_size
/ 1024);
876 * Program the Number of Device Slots Enabled field in the CONFIG
877 * register with the max value of slots the HC can handle.
879 val
= HCS_MAX_SLOTS(xhci_readl(xhci
, &xhci
->cap_regs
->hcs_params1
));
880 xhci_dbg(xhci
, "// xHC can handle at most %d device slots.\n",
882 val2
= xhci_readl(xhci
, &xhci
->op_regs
->config_reg
);
883 val
|= (val2
& ~HCS_SLOTS_MASK
);
884 xhci_dbg(xhci
, "// Setting Max device slots reg = 0x%x.\n",
886 xhci_writel(xhci
, val
, &xhci
->op_regs
->config_reg
);
889 * Section 5.4.8 - doorbell array must be
890 * "physically contiguous and 64-byte (cache line) aligned".
892 xhci
->dcbaa
= pci_alloc_consistent(to_pci_dev(dev
),
893 sizeof(*xhci
->dcbaa
), &dma
);
896 memset(xhci
->dcbaa
, 0, sizeof *(xhci
->dcbaa
));
897 xhci
->dcbaa
->dma
= dma
;
898 xhci_dbg(xhci
, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
899 (unsigned long long)xhci
->dcbaa
->dma
, xhci
->dcbaa
);
900 xhci_write_64(xhci
, dma
, &xhci
->op_regs
->dcbaa_ptr
);
903 * Initialize the ring segment pool. The ring must be a contiguous
904 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
905 * however, the command ring segment needs 64-byte aligned segments,
906 * so we pick the greater alignment need.
908 xhci
->segment_pool
= dma_pool_create("xHCI ring segments", dev
,
909 SEGMENT_SIZE
, 64, xhci
->page_size
);
911 /* See Table 46 and Note on Figure 55 */
912 xhci
->device_pool
= dma_pool_create("xHCI input/output contexts", dev
,
913 2112, 64, xhci
->page_size
);
914 if (!xhci
->segment_pool
|| !xhci
->device_pool
)
917 /* Set up the command ring to have one segments for now. */
918 xhci
->cmd_ring
= xhci_ring_alloc(xhci
, 1, true, flags
);
921 xhci_dbg(xhci
, "Allocated command ring at %p\n", xhci
->cmd_ring
);
922 xhci_dbg(xhci
, "First segment DMA is 0x%llx\n",
923 (unsigned long long)xhci
->cmd_ring
->first_seg
->dma
);
925 /* Set the address in the Command Ring Control register */
926 val_64
= xhci_read_64(xhci
, &xhci
->op_regs
->cmd_ring
);
927 val_64
= (val_64
& (u64
) CMD_RING_RSVD_BITS
) |
928 (xhci
->cmd_ring
->first_seg
->dma
& (u64
) ~CMD_RING_RSVD_BITS
) |
929 xhci
->cmd_ring
->cycle_state
;
930 xhci_dbg(xhci
, "// Setting command ring address to 0x%x\n", val
);
931 xhci_write_64(xhci
, val_64
, &xhci
->op_regs
->cmd_ring
);
932 xhci_dbg_cmd_ptrs(xhci
);
934 val
= xhci_readl(xhci
, &xhci
->cap_regs
->db_off
);
936 xhci_dbg(xhci
, "// Doorbell array is located at offset 0x%x"
937 " from cap regs base addr\n", val
);
938 xhci
->dba
= (void *) xhci
->cap_regs
+ val
;
940 xhci_print_run_regs(xhci
);
941 /* Set ir_set to interrupt register set 0 */
942 xhci
->ir_set
= (void *) xhci
->run_regs
->ir_set
;
945 * Event ring setup: Allocate a normal ring, but also setup
946 * the event ring segment table (ERST). Section 4.9.3.
948 xhci_dbg(xhci
, "// Allocating event ring\n");
949 xhci
->event_ring
= xhci_ring_alloc(xhci
, ERST_NUM_SEGS
, false, flags
);
950 if (!xhci
->event_ring
)
953 xhci
->erst
.entries
= pci_alloc_consistent(to_pci_dev(dev
),
954 sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
, &dma
);
955 if (!xhci
->erst
.entries
)
957 xhci_dbg(xhci
, "// Allocated event ring segment table at 0x%llx\n",
958 (unsigned long long)dma
);
960 memset(xhci
->erst
.entries
, 0, sizeof(struct xhci_erst_entry
)*ERST_NUM_SEGS
);
961 xhci
->erst
.num_entries
= ERST_NUM_SEGS
;
962 xhci
->erst
.erst_dma_addr
= dma
;
963 xhci_dbg(xhci
, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
964 xhci
->erst
.num_entries
,
966 (unsigned long long)xhci
->erst
.erst_dma_addr
);
968 /* set ring base address and size for each segment table entry */
969 for (val
= 0, seg
= xhci
->event_ring
->first_seg
; val
< ERST_NUM_SEGS
; val
++) {
970 struct xhci_erst_entry
*entry
= &xhci
->erst
.entries
[val
];
971 entry
->seg_addr
= seg
->dma
;
972 entry
->seg_size
= TRBS_PER_SEGMENT
;
977 /* set ERST count with the number of entries in the segment table */
978 val
= xhci_readl(xhci
, &xhci
->ir_set
->erst_size
);
979 val
&= ERST_SIZE_MASK
;
980 val
|= ERST_NUM_SEGS
;
981 xhci_dbg(xhci
, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
983 xhci_writel(xhci
, val
, &xhci
->ir_set
->erst_size
);
985 xhci_dbg(xhci
, "// Set ERST entries to point to event ring.\n");
986 /* set the segment table base address */
987 xhci_dbg(xhci
, "// Set ERST base address for ir_set 0 = 0x%llx\n",
988 (unsigned long long)xhci
->erst
.erst_dma_addr
);
989 val_64
= xhci_read_64(xhci
, &xhci
->ir_set
->erst_base
);
990 val_64
&= ERST_PTR_MASK
;
991 val_64
|= (xhci
->erst
.erst_dma_addr
& (u64
) ~ERST_PTR_MASK
);
992 xhci_write_64(xhci
, val_64
, &xhci
->ir_set
->erst_base
);
994 /* Set the event ring dequeue address */
995 xhci_set_hc_event_deq(xhci
);
996 xhci_dbg(xhci
, "Wrote ERST address to ir_set 0.\n");
997 xhci_print_ir_set(xhci
, xhci
->ir_set
, 0);
1000 * XXX: Might need to set the Interrupter Moderation Register to
1001 * something other than the default (~1ms minimum between interrupts).
1002 * See section 5.5.1.2.
1004 init_completion(&xhci
->addr_dev
);
1005 for (i
= 0; i
< MAX_HC_SLOTS
; ++i
)
1008 if (scratchpad_alloc(xhci
, flags
))
1014 xhci_warn(xhci
, "Couldn't initialize memory\n");
1015 xhci_mem_cleanup(xhci
);