2 * Thunderbolt Cactus Ridge driver - control channel and configuration commands
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
7 #include <linux/crc32.h>
8 #include <linux/slab.h>
10 #include <linux/dmapool.h>
11 #include <linux/workqueue.h>
12 #include <linux/kfifo.h>
20 struct ring_frame frame
;
23 #define TB_CTL_RX_PKG_COUNT 10
26 * struct tb_cfg - thunderbolt control channel
33 struct dma_pool
*frame_pool
;
34 struct ctl_pkg
*rx_packets
[TB_CTL_RX_PKG_COUNT
];
35 DECLARE_KFIFO(response_fifo
, struct ctl_pkg
*, 16);
36 struct completion response_ready
;
43 #define tb_ctl_WARN(ctl, format, arg...) \
44 dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
46 #define tb_ctl_err(ctl, format, arg...) \
47 dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
49 #define tb_ctl_warn(ctl, format, arg...) \
50 dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
52 #define tb_ctl_info(ctl, format, arg...) \
53 dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
56 /* configuration packets definitions */
58 enum tb_cfg_pkg_type
{
62 TB_CFG_PKG_NOTIFY_ACK
= 4,
64 TB_CFG_PKG_XDOMAIN_REQ
= 6,
65 TB_CFG_PKG_XDOMAIN_RESP
= 7,
66 TB_CFG_PKG_OVERRIDE
= 8,
68 TB_CFG_PKG_PREPARE_TO_SLEEP
= 0xd,
72 struct tb_cfg_header
{
74 u32 unknown
:10; /* highest order bit is set on replies */
78 /* additional header for read/write packets */
79 struct tb_cfg_address
{
80 u32 offset
:13; /* in dwords */
81 u32 length
:6; /* in dwords */
83 enum tb_cfg_space space
:2;
84 u32 seq
:2; /* sequence number */
88 /* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
90 struct tb_cfg_header header
;
91 struct tb_cfg_address addr
;
94 /* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
95 struct cfg_write_pkg
{
96 struct tb_cfg_header header
;
97 struct tb_cfg_address addr
;
98 u32 data
[64]; /* maximum size, tb_cfg_address.length has 6 bits */
101 /* TB_CFG_PKG_ERROR */
102 struct cfg_error_pkg
{
103 struct tb_cfg_header header
;
104 enum tb_cfg_error error
:4;
107 u32 zero2
:2; /* Both should be zero, still they are different fields. */
111 /* TB_CFG_PKG_EVENT */
112 struct cfg_event_pkg
{
113 struct tb_cfg_header header
;
119 /* TB_CFG_PKG_RESET */
120 struct cfg_reset_pkg
{
121 struct tb_cfg_header header
;
124 /* TB_CFG_PKG_PREPARE_TO_SLEEP */
126 struct tb_cfg_header header
;
131 /* utility functions */
133 static u64
get_route(struct tb_cfg_header header
)
135 return (u64
) header
.route_hi
<< 32 | header
.route_lo
;
138 static struct tb_cfg_header
make_header(u64 route
)
140 struct tb_cfg_header header
= {
141 .route_hi
= route
>> 32,
144 /* check for overflow, route_hi is not 32 bits! */
145 WARN_ON(get_route(header
) != route
);
149 static int check_header(struct ctl_pkg
*pkg
, u32 len
, enum tb_cfg_pkg_type type
,
152 struct tb_cfg_header
*header
= pkg
->buffer
;
154 /* check frame, TODO: frame flags */
155 if (WARN(len
!= pkg
->frame
.size
,
156 "wrong framesize (expected %#x, got %#x)\n",
157 len
, pkg
->frame
.size
))
159 if (WARN(type
!= pkg
->frame
.eof
, "wrong eof (expected %#x, got %#x)\n",
160 type
, pkg
->frame
.eof
))
162 if (WARN(pkg
->frame
.sof
, "wrong sof (expected 0x0, got %#x)\n",
167 if (WARN(header
->unknown
!= 1 << 9,
168 "header->unknown is %#x\n", header
->unknown
))
170 if (WARN(route
!= get_route(*header
),
171 "wrong route (expected %llx, got %llx)",
172 route
, get_route(*header
)))
177 static int check_config_address(struct tb_cfg_address addr
,
178 enum tb_cfg_space space
, u32 offset
,
181 if (WARN(addr
.zero
, "addr.zero is %#x\n", addr
.zero
))
183 if (WARN(space
!= addr
.space
, "wrong space (expected %x, got %x\n)",
186 if (WARN(offset
!= addr
.offset
, "wrong offset (expected %x, got %x\n)",
187 offset
, addr
.offset
))
189 if (WARN(length
!= addr
.length
, "wrong space (expected %x, got %x\n)",
190 length
, addr
.length
))
192 if (WARN(addr
.seq
, "addr.seq is %#x\n", addr
.seq
))
195 * We cannot check addr->port as it is set to the upstream port of the
201 static struct tb_cfg_result
decode_error(struct ctl_pkg
*response
)
203 struct cfg_error_pkg
*pkg
= response
->buffer
;
204 struct tb_cfg_result res
= { 0 };
205 res
.response_route
= get_route(pkg
->header
);
206 res
.response_port
= 0;
207 res
.err
= check_header(response
, sizeof(*pkg
), TB_CFG_PKG_ERROR
,
208 get_route(pkg
->header
));
212 WARN(pkg
->zero1
, "pkg->zero1 is %#x\n", pkg
->zero1
);
213 WARN(pkg
->zero2
, "pkg->zero1 is %#x\n", pkg
->zero1
);
214 WARN(pkg
->zero3
, "pkg->zero1 is %#x\n", pkg
->zero1
);
216 res
.tb_error
= pkg
->error
;
217 res
.response_port
= pkg
->port
;
222 static struct tb_cfg_result
parse_header(struct ctl_pkg
*pkg
, u32 len
,
223 enum tb_cfg_pkg_type type
, u64 route
)
225 struct tb_cfg_header
*header
= pkg
->buffer
;
226 struct tb_cfg_result res
= { 0 };
228 if (pkg
->frame
.eof
== TB_CFG_PKG_ERROR
)
229 return decode_error(pkg
);
231 res
.response_port
= 0; /* will be updated later for cfg_read/write */
232 res
.response_route
= get_route(*header
);
233 res
.err
= check_header(pkg
, len
, type
, route
);
237 static void tb_cfg_print_error(struct tb_ctl
*ctl
,
238 const struct tb_cfg_result
*res
)
240 WARN_ON(res
->err
!= 1);
241 switch (res
->tb_error
) {
242 case TB_CFG_ERROR_PORT_NOT_CONNECTED
:
243 /* Port is not connected. This can happen during surprise
244 * removal. Do not warn. */
246 case TB_CFG_ERROR_INVALID_CONFIG_SPACE
:
248 * Invalid cfg_space/offset/length combination in
249 * cfg_read/cfg_write.
252 "CFG_ERROR(%llx:%x): Invalid config space or offset\n",
253 res
->response_route
, res
->response_port
);
255 case TB_CFG_ERROR_NO_SUCH_PORT
:
257 * - The route contains a non-existent port.
258 * - The route contains a non-PHY port (e.g. PCIe).
259 * - The port in cfg_read/cfg_write does not exist.
261 tb_ctl_WARN(ctl
, "CFG_ERROR(%llx:%x): Invalid port\n",
262 res
->response_route
, res
->response_port
);
264 case TB_CFG_ERROR_LOOP
:
265 tb_ctl_WARN(ctl
, "CFG_ERROR(%llx:%x): Route contains a loop\n",
266 res
->response_route
, res
->response_port
);
269 /* 5,6,7,9 and 11 are also valid error codes */
270 tb_ctl_WARN(ctl
, "CFG_ERROR(%llx:%x): Unknown error\n",
271 res
->response_route
, res
->response_port
);
276 static void cpu_to_be32_array(__be32
*dst
, u32
*src
, size_t len
)
279 for (i
= 0; i
< len
; i
++)
280 dst
[i
] = cpu_to_be32(src
[i
]);
283 static void be32_to_cpu_array(u32
*dst
, __be32
*src
, size_t len
)
286 for (i
= 0; i
< len
; i
++)
287 dst
[i
] = be32_to_cpu(src
[i
]);
290 static __be32
tb_crc(void *data
, size_t len
)
292 return cpu_to_be32(~__crc32c_le(~0, data
, len
));
295 static void tb_ctl_pkg_free(struct ctl_pkg
*pkg
)
298 dma_pool_free(pkg
->ctl
->frame_pool
,
299 pkg
->buffer
, pkg
->frame
.buffer_phy
);
304 static struct ctl_pkg
*tb_ctl_pkg_alloc(struct tb_ctl
*ctl
)
306 struct ctl_pkg
*pkg
= kzalloc(sizeof(*pkg
), GFP_KERNEL
);
310 pkg
->buffer
= dma_pool_alloc(ctl
->frame_pool
, GFP_KERNEL
,
311 &pkg
->frame
.buffer_phy
);
322 static void tb_ctl_tx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
325 struct ctl_pkg
*pkg
= container_of(frame
, typeof(*pkg
), frame
);
326 tb_ctl_pkg_free(pkg
);
330 * tb_cfg_tx() - transmit a packet on the control channel
332 * len must be a multiple of four.
334 * Return: Returns 0 on success or an error code on failure.
336 static int tb_ctl_tx(struct tb_ctl
*ctl
, void *data
, size_t len
,
337 enum tb_cfg_pkg_type type
)
341 if (len
% 4 != 0) { /* required for le->be conversion */
342 tb_ctl_WARN(ctl
, "TX: invalid size: %zu\n", len
);
345 if (len
> TB_FRAME_SIZE
- 4) { /* checksum is 4 bytes */
346 tb_ctl_WARN(ctl
, "TX: packet too large: %zu/%d\n",
347 len
, TB_FRAME_SIZE
- 4);
350 pkg
= tb_ctl_pkg_alloc(ctl
);
353 pkg
->frame
.callback
= tb_ctl_tx_callback
;
354 pkg
->frame
.size
= len
+ 4;
355 pkg
->frame
.sof
= type
;
356 pkg
->frame
.eof
= type
;
357 cpu_to_be32_array(pkg
->buffer
, data
, len
/ 4);
358 *(__be32
*) (pkg
->buffer
+ len
) = tb_crc(pkg
->buffer
, len
);
360 res
= ring_tx(ctl
->tx
, &pkg
->frame
);
361 if (res
) /* ring is stopped */
362 tb_ctl_pkg_free(pkg
);
367 * tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback
369 static void tb_ctl_handle_plug_event(struct tb_ctl
*ctl
,
370 struct ctl_pkg
*response
)
372 struct cfg_event_pkg
*pkg
= response
->buffer
;
373 u64 route
= get_route(pkg
->header
);
375 if (check_header(response
, sizeof(*pkg
), TB_CFG_PKG_EVENT
, route
)) {
376 tb_ctl_warn(ctl
, "malformed TB_CFG_PKG_EVENT\n");
380 if (tb_cfg_error(ctl
, route
, pkg
->port
, TB_CFG_ERROR_ACK_PLUG_EVENT
))
381 tb_ctl_warn(ctl
, "could not ack plug event on %llx:%x\n",
383 WARN(pkg
->zero
, "pkg->zero is %#x\n", pkg
->zero
);
384 ctl
->callback(ctl
->callback_data
, route
, pkg
->port
, pkg
->unplug
);
387 static void tb_ctl_rx_submit(struct ctl_pkg
*pkg
)
389 ring_rx(pkg
->ctl
->rx
, &pkg
->frame
); /*
390 * We ignore failures during stop.
391 * All rx packets are referenced
392 * from ctl->rx_packets, so we do
397 static void tb_ctl_rx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
400 struct ctl_pkg
*pkg
= container_of(frame
, typeof(*pkg
), frame
);
404 * ring is stopped, packet is referenced from
408 if (frame
->size
< 4 || frame
->size
% 4 != 0) {
409 tb_ctl_err(pkg
->ctl
, "RX: invalid size %#x, dropping packet\n",
414 frame
->size
-= 4; /* remove checksum */
415 if (*(__be32
*) (pkg
->buffer
+ frame
->size
)
416 != tb_crc(pkg
->buffer
, frame
->size
)) {
418 "RX: checksum mismatch, dropping packet\n");
421 be32_to_cpu_array(pkg
->buffer
, pkg
->buffer
, frame
->size
/ 4);
423 if (frame
->eof
== TB_CFG_PKG_EVENT
) {
424 tb_ctl_handle_plug_event(pkg
->ctl
, pkg
);
427 if (!kfifo_put(&pkg
->ctl
->response_fifo
, pkg
)) {
428 tb_ctl_err(pkg
->ctl
, "RX: fifo is full\n");
431 complete(&pkg
->ctl
->response_ready
);
434 tb_ctl_rx_submit(pkg
);
438 * tb_ctl_rx() - receive a packet from the control channel
440 static struct tb_cfg_result
tb_ctl_rx(struct tb_ctl
*ctl
, void *buffer
,
441 size_t length
, int timeout_msec
,
442 u64 route
, enum tb_cfg_pkg_type type
)
444 struct tb_cfg_result res
;
447 if (!wait_for_completion_timeout(&ctl
->response_ready
,
448 msecs_to_jiffies(timeout_msec
))) {
449 tb_ctl_WARN(ctl
, "RX: timeout\n");
450 return (struct tb_cfg_result
) { .err
= -ETIMEDOUT
};
452 if (!kfifo_get(&ctl
->response_fifo
, &pkg
)) {
453 tb_ctl_WARN(ctl
, "empty kfifo\n");
454 return (struct tb_cfg_result
) { .err
= -EIO
};
457 res
= parse_header(pkg
, length
, type
, route
);
459 memcpy(buffer
, pkg
->buffer
, length
);
460 tb_ctl_rx_submit(pkg
);
465 /* public interface, alloc/start/stop/free */
468 * tb_ctl_alloc() - allocate a control channel
470 * cb will be invoked once for every hot plug event.
472 * Return: Returns a pointer on success or NULL on failure.
474 struct tb_ctl
*tb_ctl_alloc(struct tb_nhi
*nhi
, hotplug_cb cb
, void *cb_data
)
477 struct tb_ctl
*ctl
= kzalloc(sizeof(*ctl
), GFP_KERNEL
);
482 ctl
->callback_data
= cb_data
;
484 init_completion(&ctl
->response_ready
);
485 INIT_KFIFO(ctl
->response_fifo
);
486 ctl
->frame_pool
= dma_pool_create("thunderbolt_ctl", &nhi
->pdev
->dev
,
487 TB_FRAME_SIZE
, 4, 0);
488 if (!ctl
->frame_pool
)
491 ctl
->tx
= ring_alloc_tx(nhi
, 0, 10);
495 ctl
->rx
= ring_alloc_rx(nhi
, 0, 10);
499 for (i
= 0; i
< TB_CTL_RX_PKG_COUNT
; i
++) {
500 ctl
->rx_packets
[i
] = tb_ctl_pkg_alloc(ctl
);
501 if (!ctl
->rx_packets
[i
])
503 ctl
->rx_packets
[i
]->frame
.callback
= tb_ctl_rx_callback
;
506 tb_ctl_info(ctl
, "control channel created\n");
514 * tb_ctl_free() - free a control channel
516 * Must be called after tb_ctl_stop.
518 * Must NOT be called from ctl->callback.
520 void tb_ctl_free(struct tb_ctl
*ctl
)
528 /* free RX packets */
529 for (i
= 0; i
< TB_CTL_RX_PKG_COUNT
; i
++)
530 tb_ctl_pkg_free(ctl
->rx_packets
[i
]);
534 dma_pool_destroy(ctl
->frame_pool
);
539 * tb_cfg_start() - start/resume the control channel
541 void tb_ctl_start(struct tb_ctl
*ctl
)
544 tb_ctl_info(ctl
, "control channel starting...\n");
545 ring_start(ctl
->tx
); /* is used to ack hotplug packets, start first */
547 for (i
= 0; i
< TB_CTL_RX_PKG_COUNT
; i
++)
548 tb_ctl_rx_submit(ctl
->rx_packets
[i
]);
552 * control() - pause the control channel
554 * All invocations of ctl->callback will have finished after this method
557 * Must NOT be called from ctl->callback.
559 void tb_ctl_stop(struct tb_ctl
*ctl
)
564 if (!kfifo_is_empty(&ctl
->response_fifo
))
565 tb_ctl_WARN(ctl
, "dangling response in response_fifo\n");
566 kfifo_reset(&ctl
->response_fifo
);
567 tb_ctl_info(ctl
, "control channel stopped\n");
570 /* public interface, commands */
573 * tb_cfg_error() - send error packet
575 * Return: Returns 0 on success or an error code on failure.
577 int tb_cfg_error(struct tb_ctl
*ctl
, u64 route
, u32 port
,
578 enum tb_cfg_error error
)
580 struct cfg_error_pkg pkg
= {
581 .header
= make_header(route
),
585 tb_ctl_info(ctl
, "resetting error on %llx:%x.\n", route
, port
);
586 return tb_ctl_tx(ctl
, &pkg
, sizeof(pkg
), TB_CFG_PKG_ERROR
);
590 * tb_cfg_reset() - send a reset packet and wait for a response
592 * If the switch at route is incorrectly configured then we will not receive a
593 * reply (even though the switch will reset). The caller should check for
594 * -ETIMEDOUT and attempt to reconfigure the switch.
596 struct tb_cfg_result
tb_cfg_reset(struct tb_ctl
*ctl
, u64 route
,
600 struct cfg_reset_pkg request
= { .header
= make_header(route
) };
601 struct tb_cfg_header reply
;
603 err
= tb_ctl_tx(ctl
, &request
, sizeof(request
), TB_CFG_PKG_RESET
);
605 return (struct tb_cfg_result
) { .err
= err
};
607 return tb_ctl_rx(ctl
, &reply
, sizeof(reply
), timeout_msec
, route
,
612 * tb_cfg_read() - read from config space into buffer
614 * Offset and length are in dwords.
616 struct tb_cfg_result
tb_cfg_read_raw(struct tb_ctl
*ctl
, void *buffer
,
617 u64 route
, u32 port
, enum tb_cfg_space space
,
618 u32 offset
, u32 length
, int timeout_msec
)
620 struct tb_cfg_result res
= { 0 };
621 struct cfg_read_pkg request
= {
622 .header
= make_header(route
),
630 struct cfg_write_pkg reply
;
632 res
.err
= tb_ctl_tx(ctl
, &request
, sizeof(request
), TB_CFG_PKG_READ
);
636 res
= tb_ctl_rx(ctl
, &reply
, 12 + 4 * length
, timeout_msec
, route
,
641 res
.response_port
= reply
.addr
.port
;
642 res
.err
= check_config_address(reply
.addr
, space
, offset
, length
);
644 memcpy(buffer
, &reply
.data
, 4 * length
);
649 * tb_cfg_write() - write from buffer into config space
651 * Offset and length are in dwords.
653 struct tb_cfg_result
tb_cfg_write_raw(struct tb_ctl
*ctl
, void *buffer
,
654 u64 route
, u32 port
, enum tb_cfg_space space
,
655 u32 offset
, u32 length
, int timeout_msec
)
657 struct tb_cfg_result res
= { 0 };
658 struct cfg_write_pkg request
= {
659 .header
= make_header(route
),
667 struct cfg_read_pkg reply
;
669 memcpy(&request
.data
, buffer
, length
* 4);
671 res
.err
= tb_ctl_tx(ctl
, &request
, 12 + 4 * length
, TB_CFG_PKG_WRITE
);
675 res
= tb_ctl_rx(ctl
, &reply
, sizeof(reply
), timeout_msec
, route
,
680 res
.response_port
= reply
.addr
.port
;
681 res
.err
= check_config_address(reply
.addr
, space
, offset
, length
);
685 int tb_cfg_read(struct tb_ctl
*ctl
, void *buffer
, u64 route
, u32 port
,
686 enum tb_cfg_space space
, u32 offset
, u32 length
)
688 struct tb_cfg_result res
= tb_cfg_read_raw(ctl
, buffer
, route
, port
,
689 space
, offset
, length
, TB_CFG_DEFAULT_TIMEOUT
);
691 tb_cfg_print_error(ctl
, &res
);
694 WARN(res
.err
, "tb_cfg_read: %d\n", res
.err
);
698 int tb_cfg_write(struct tb_ctl
*ctl
, void *buffer
, u64 route
, u32 port
,
699 enum tb_cfg_space space
, u32 offset
, u32 length
)
701 struct tb_cfg_result res
= tb_cfg_write_raw(ctl
, buffer
, route
, port
,
702 space
, offset
, length
, TB_CFG_DEFAULT_TIMEOUT
);
704 tb_cfg_print_error(ctl
, &res
);
707 WARN(res
.err
, "tb_cfg_write: %d\n", res
.err
);
712 * tb_cfg_get_upstream_port() - get upstream port number of switch at route
714 * Reads the first dword from the switches TB_CFG_SWITCH config area and
715 * returns the port number from which the reply originated.
717 * Return: Returns the upstream port number on success or an error code on
720 int tb_cfg_get_upstream_port(struct tb_ctl
*ctl
, u64 route
)
723 struct tb_cfg_result res
= tb_cfg_read_raw(ctl
, &dummy
, route
, 0,
725 TB_CFG_DEFAULT_TIMEOUT
);
730 return res
.response_port
;