1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Thunderbolt service API
5 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2017, Intel Corporation
7 * Authors: Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
11 #ifndef THUNDERBOLT_H_
12 #define THUNDERBOLT_H_
14 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/pci.h>
20 #include <linux/uuid.h>
21 #include <linux/workqueue.h>
23 enum tb_cfg_pkg_type
{
27 TB_CFG_PKG_NOTIFY_ACK
= 4,
29 TB_CFG_PKG_XDOMAIN_REQ
= 6,
30 TB_CFG_PKG_XDOMAIN_RESP
= 7,
31 TB_CFG_PKG_OVERRIDE
= 8,
33 TB_CFG_PKG_ICM_EVENT
= 10,
34 TB_CFG_PKG_ICM_CMD
= 11,
35 TB_CFG_PKG_ICM_RESP
= 12,
36 TB_CFG_PKG_PREPARE_TO_SLEEP
= 13,
40 * enum tb_security_level - Thunderbolt security level
41 * @TB_SECURITY_NONE: No security, legacy mode
42 * @TB_SECURITY_USER: User approval required at minimum
43 * @TB_SECURITY_SECURE: One time saved key required at minimum
44 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
45 * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
46 * Thunderbolt dock (and Display Port). All PCIe
47 * links downstream of the dock are removed.
49 enum tb_security_level
{
58 * struct tb - main thunderbolt bus structure
60 * @lock: Big lock. Must be held when accessing any struct
61 * tb_switch / struct tb_port.
62 * @nhi: Pointer to the NHI structure
63 * @ctl: Control channel for this domain
64 * @wq: Ordered workqueue for all domain specific work
65 * @root_switch: Root switch of this domain
66 * @cm_ops: Connection manager specific operations vector
67 * @index: Linux assigned domain number
68 * @security_level: Current security level
69 * @nboot_acl: Number of boot ACLs the domain supports
70 * @privdata: Private connection manager specific data
77 struct workqueue_struct
*wq
;
78 struct tb_switch
*root_switch
;
79 const struct tb_cm_ops
*cm_ops
;
81 enum tb_security_level security_level
;
83 unsigned long privdata
[0];
86 extern struct bus_type tb_bus_type
;
87 extern struct device_type tb_service_type
;
88 extern struct device_type tb_xdomain_type
;
90 #define TB_LINKS_PER_PHY_PORT 2
92 static inline unsigned int tb_phy_port_from_link(unsigned int link
)
94 return (link
- 1) / TB_LINKS_PER_PHY_PORT
;
98 * struct tb_property_dir - XDomain property directory
99 * @uuid: Directory UUID or %NULL if root directory
100 * @properties: List of properties in this directory
102 * User needs to provide serialization if needed.
104 struct tb_property_dir
{
106 struct list_head properties
;
109 enum tb_property_type
{
110 TB_PROPERTY_TYPE_UNKNOWN
= 0x00,
111 TB_PROPERTY_TYPE_DIRECTORY
= 0x44,
112 TB_PROPERTY_TYPE_DATA
= 0x64,
113 TB_PROPERTY_TYPE_TEXT
= 0x74,
114 TB_PROPERTY_TYPE_VALUE
= 0x76,
117 #define TB_PROPERTY_KEY_SIZE 8
120 * struct tb_property - XDomain property
121 * @list: Used to link properties together in a directory
122 * @key: Key for the property (always terminated).
123 * @type: Type of the property
124 * @length: Length of the property data in dwords
125 * @value: Property value
127 * Users use @type to determine which field in @value is filled.
130 struct list_head list
;
131 char key
[TB_PROPERTY_KEY_SIZE
+ 1];
132 enum tb_property_type type
;
135 struct tb_property_dir
*dir
;
142 struct tb_property_dir
*tb_property_parse_dir(const u32
*block
,
144 ssize_t
tb_property_format_dir(const struct tb_property_dir
*dir
, u32
*block
,
146 struct tb_property_dir
*tb_property_create_dir(const uuid_t
*uuid
);
147 void tb_property_free_dir(struct tb_property_dir
*dir
);
148 int tb_property_add_immediate(struct tb_property_dir
*parent
, const char *key
,
150 int tb_property_add_data(struct tb_property_dir
*parent
, const char *key
,
151 const void *buf
, size_t buflen
);
152 int tb_property_add_text(struct tb_property_dir
*parent
, const char *key
,
154 int tb_property_add_dir(struct tb_property_dir
*parent
, const char *key
,
155 struct tb_property_dir
*dir
);
156 void tb_property_remove(struct tb_property
*tb_property
);
157 struct tb_property
*tb_property_find(struct tb_property_dir
*dir
,
158 const char *key
, enum tb_property_type type
);
159 struct tb_property
*tb_property_get_next(struct tb_property_dir
*dir
,
160 struct tb_property
*prev
);
162 #define tb_property_for_each(dir, property) \
163 for (property = tb_property_get_next(dir, NULL); \
165 property = tb_property_get_next(dir, property))
167 int tb_register_property_dir(const char *key
, struct tb_property_dir
*dir
);
168 void tb_unregister_property_dir(const char *key
, struct tb_property_dir
*dir
);
171 * struct tb_xdomain - Cross-domain (XDomain) connection
172 * @dev: XDomain device
173 * @tb: Pointer to the domain
174 * @remote_uuid: UUID of the remote domain (host)
175 * @local_uuid: Cached local UUID
176 * @route: Route string the other domain can be reached
177 * @vendor: Vendor ID of the remote domain
178 * @device: Device ID of the demote domain
179 * @lock: Lock to serialize access to the following fields of this structure
180 * @vendor_name: Name of the vendor (or %NULL if not known)
181 * @device_name: Name of the device (or %NULL if not known)
182 * @is_unplugged: The XDomain is unplugged
183 * @resume: The XDomain is being resumed
184 * @transmit_path: HopID which the remote end expects us to transmit
185 * @transmit_ring: Local ring (hop) where outgoing packets are pushed
186 * @receive_path: HopID which we expect the remote end to transmit
187 * @receive_ring: Local ring (hop) where incoming packets arrive
188 * @service_ids: Used to generate IDs for the services
189 * @properties: Properties exported by the remote domain
190 * @property_block_gen: Generation of @properties
191 * @properties_lock: Lock protecting @properties.
192 * @get_properties_work: Work used to get remote domain properties
193 * @properties_retries: Number of times left to read properties
194 * @properties_changed_work: Work used to notify the remote domain that
195 * our properties have changed
196 * @properties_changed_retries: Number of times left to send properties
197 * changed notification
198 * @link: Root switch link the remote domain is connected (ICM only)
199 * @depth: Depth in the chain the remote domain is connected (ICM only)
201 * This structure represents connection across two domains (hosts).
202 * Each XDomain contains zero or more services which are exposed as
203 * &struct tb_service objects.
205 * Service drivers may access this structure if they need to enumerate
206 * non-standard properties but they need hold @lock when doing so
207 * because properties can be changed asynchronously in response to
208 * changes in the remote domain.
214 const uuid_t
*local_uuid
;
219 const char *vendor_name
;
220 const char *device_name
;
227 struct ida service_ids
;
228 struct tb_property_dir
*properties
;
229 u32 property_block_gen
;
230 struct delayed_work get_properties_work
;
231 int properties_retries
;
232 struct delayed_work properties_changed_work
;
233 int properties_changed_retries
;
238 int tb_xdomain_enable_paths(struct tb_xdomain
*xd
, u16 transmit_path
,
239 u16 transmit_ring
, u16 receive_path
,
241 int tb_xdomain_disable_paths(struct tb_xdomain
*xd
);
242 struct tb_xdomain
*tb_xdomain_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
);
243 struct tb_xdomain
*tb_xdomain_find_by_route(struct tb
*tb
, u64 route
);
245 static inline struct tb_xdomain
*
246 tb_xdomain_find_by_uuid_locked(struct tb
*tb
, const uuid_t
*uuid
)
248 struct tb_xdomain
*xd
;
250 mutex_lock(&tb
->lock
);
251 xd
= tb_xdomain_find_by_uuid(tb
, uuid
);
252 mutex_unlock(&tb
->lock
);
257 static inline struct tb_xdomain
*
258 tb_xdomain_find_by_route_locked(struct tb
*tb
, u64 route
)
260 struct tb_xdomain
*xd
;
262 mutex_lock(&tb
->lock
);
263 xd
= tb_xdomain_find_by_route(tb
, route
);
264 mutex_unlock(&tb
->lock
);
269 static inline struct tb_xdomain
*tb_xdomain_get(struct tb_xdomain
*xd
)
272 get_device(&xd
->dev
);
276 static inline void tb_xdomain_put(struct tb_xdomain
*xd
)
279 put_device(&xd
->dev
);
282 static inline bool tb_is_xdomain(const struct device
*dev
)
284 return dev
->type
== &tb_xdomain_type
;
287 static inline struct tb_xdomain
*tb_to_xdomain(struct device
*dev
)
289 if (tb_is_xdomain(dev
))
290 return container_of(dev
, struct tb_xdomain
, dev
);
294 int tb_xdomain_response(struct tb_xdomain
*xd
, const void *response
,
295 size_t size
, enum tb_cfg_pkg_type type
);
296 int tb_xdomain_request(struct tb_xdomain
*xd
, const void *request
,
297 size_t request_size
, enum tb_cfg_pkg_type request_type
,
298 void *response
, size_t response_size
,
299 enum tb_cfg_pkg_type response_type
,
300 unsigned int timeout_msec
);
303 * tb_protocol_handler - Protocol specific handler
304 * @uuid: XDomain messages with this UUID are dispatched to this handler
305 * @callback: Callback called with the XDomain message. Returning %1
306 * here tells the XDomain core that the message was handled
307 * by this handler and should not be forwared to other
309 * @data: Data passed with the callback
310 * @list: Handlers are linked using this
312 * Thunderbolt services can hook into incoming XDomain requests by
313 * registering protocol handler. Only limitation is that the XDomain
314 * discovery protocol UUID cannot be registered since it is handled by
315 * the core XDomain code.
317 * The @callback must check that the message is really directed to the
318 * service the driver implements.
320 struct tb_protocol_handler
{
322 int (*callback
)(const void *buf
, size_t size
, void *data
);
324 struct list_head list
;
327 int tb_register_protocol_handler(struct tb_protocol_handler
*handler
);
328 void tb_unregister_protocol_handler(struct tb_protocol_handler
*handler
);
331 * struct tb_service - Thunderbolt service
332 * @dev: XDomain device
333 * @id: ID of the service (shown in sysfs)
334 * @key: Protocol key from the properties directory
335 * @prtcid: Protocol ID from the properties directory
336 * @prtcvers: Protocol version from the properties directory
337 * @prtcrevs: Protocol software revision from the properties directory
338 * @prtcstns: Protocol settings mask from the properties directory
340 * Each domain exposes set of services it supports as collection of
341 * properties. For each service there will be one corresponding
342 * &struct tb_service. Service drivers are bound to these.
354 static inline struct tb_service
*tb_service_get(struct tb_service
*svc
)
357 get_device(&svc
->dev
);
361 static inline void tb_service_put(struct tb_service
*svc
)
364 put_device(&svc
->dev
);
367 static inline bool tb_is_service(const struct device
*dev
)
369 return dev
->type
== &tb_service_type
;
372 static inline struct tb_service
*tb_to_service(struct device
*dev
)
374 if (tb_is_service(dev
))
375 return container_of(dev
, struct tb_service
, dev
);
380 * tb_service_driver - Thunderbolt service driver
381 * @driver: Driver structure
382 * @probe: Called when the driver is probed
383 * @remove: Called when the driver is removed (optional)
384 * @shutdown: Called at shutdown time to stop the service (optional)
385 * @id_table: Table of service identifiers the driver supports
387 struct tb_service_driver
{
388 struct device_driver driver
;
389 int (*probe
)(struct tb_service
*svc
, const struct tb_service_id
*id
);
390 void (*remove
)(struct tb_service
*svc
);
391 void (*shutdown
)(struct tb_service
*svc
);
392 const struct tb_service_id
*id_table
;
395 #define TB_SERVICE(key, id) \
396 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
397 TBSVC_MATCH_PROTOCOL_ID, \
398 .protocol_key = (key), \
401 int tb_register_service_driver(struct tb_service_driver
*drv
);
402 void tb_unregister_service_driver(struct tb_service_driver
*drv
);
404 static inline void *tb_service_get_drvdata(const struct tb_service
*svc
)
406 return dev_get_drvdata(&svc
->dev
);
409 static inline void tb_service_set_drvdata(struct tb_service
*svc
, void *data
)
411 dev_set_drvdata(&svc
->dev
, data
);
414 static inline struct tb_xdomain
*tb_service_parent(struct tb_service
*svc
)
416 return tb_to_xdomain(svc
->dev
.parent
);
420 * struct tb_nhi - thunderbolt native host interface
421 * @lock: Must be held during ring creation/destruction. Is acquired by
422 * interrupt_work when dispatching interrupts to individual rings.
423 * @pdev: Pointer to the PCI device
424 * @iobase: MMIO space of the NHI
425 * @tx_rings: All Tx rings available on this host controller
426 * @rx_rings: All Rx rings available on this host controller
427 * @msix_ida: Used to allocate MSI-X vectors for rings
428 * @going_away: The host controller device is about to disappear so when
429 * this flag is set, avoid touching the hardware anymore.
430 * @interrupt_work: Work scheduled to handle ring interrupt when no
432 * @hop_count: Number of rings (end point hops) supported by NHI.
436 struct pci_dev
*pdev
;
437 void __iomem
*iobase
;
438 struct tb_ring
**tx_rings
;
439 struct tb_ring
**rx_rings
;
442 struct work_struct interrupt_work
;
447 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
448 * @lock: Lock serializing actions to this ring. Must be acquired after
450 * @nhi: Pointer to the native host controller interface
451 * @size: Size of the ring
452 * @hop: Hop (DMA channel) associated with this ring
453 * @head: Head of the ring (write next descriptor here)
454 * @tail: Tail of the ring (complete next descriptor here)
455 * @descriptors: Allocated descriptors for this ring
456 * @queue: Queue holding frames to be transferred over this ring
457 * @in_flight: Queue holding frames that are currently in flight
458 * @work: Interrupt work structure
459 * @is_tx: Is the ring Tx or Rx
460 * @running: Is the ring running
461 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
462 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
463 * @flags: Ring specific flags
464 * @sof_mask: Bit mask used to detect start of frame PDF
465 * @eof_mask: Bit mask used to detect end of frame PDF
466 * @start_poll: Called when ring interrupt is triggered to start
467 * polling. Passing %NULL keeps the ring in interrupt mode.
468 * @poll_data: Data passed to @start_poll
477 struct ring_desc
*descriptors
;
478 dma_addr_t descriptors_dma
;
479 struct list_head queue
;
480 struct list_head in_flight
;
481 struct work_struct work
;
489 void (*start_poll
)(void *data
);
493 /* Leave ring interrupt enabled on suspend */
494 #define RING_FLAG_NO_SUSPEND BIT(0)
495 /* Configure the ring to be in frame mode */
496 #define RING_FLAG_FRAME BIT(1)
497 /* Enable end-to-end flow control */
498 #define RING_FLAG_E2E BIT(2)
501 typedef void (*ring_cb
)(struct tb_ring
*, struct ring_frame
*, bool canceled
);
504 * enum ring_desc_flags - Flags for DMA ring descriptor
505 * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
506 * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
507 * %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
508 * %RING_DESC_POSTED: Always set this
509 * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
510 * %RING_DESC_INTERRUPT: Request an interrupt on completion
512 enum ring_desc_flags
{
513 RING_DESC_ISOCH
= 0x1,
514 RING_DESC_CRC_ERROR
= 0x1,
515 RING_DESC_COMPLETED
= 0x2,
516 RING_DESC_POSTED
= 0x4,
517 RING_DESC_BUFFER_OVERRUN
= 0x04,
518 RING_DESC_INTERRUPT
= 0x8,
522 * struct ring_frame - For use with ring_rx/ring_tx
523 * @buffer_phy: DMA mapped address of the frame
524 * @callback: Callback called when the frame is finished (optional)
525 * @list: Frame is linked to a queue using this
526 * @size: Size of the frame in bytes (%0 means %4096)
527 * @flags: Flags for the frame (see &enum ring_desc_flags)
528 * @eof: End of frame protocol defined field
529 * @sof: Start of frame protocol defined field
532 dma_addr_t buffer_phy
;
534 struct list_head list
;
541 /* Minimum size for ring_rx */
542 #define TB_FRAME_SIZE 0x100
544 struct tb_ring
*tb_ring_alloc_tx(struct tb_nhi
*nhi
, int hop
, int size
,
546 struct tb_ring
*tb_ring_alloc_rx(struct tb_nhi
*nhi
, int hop
, int size
,
547 unsigned int flags
, u16 sof_mask
, u16 eof_mask
,
548 void (*start_poll
)(void *), void *poll_data
);
549 void tb_ring_start(struct tb_ring
*ring
);
550 void tb_ring_stop(struct tb_ring
*ring
);
551 void tb_ring_free(struct tb_ring
*ring
);
553 int __tb_ring_enqueue(struct tb_ring
*ring
, struct ring_frame
*frame
);
556 * tb_ring_rx() - enqueue a frame on an RX ring
557 * @ring: Ring to enqueue the frame
558 * @frame: Frame to enqueue
560 * @frame->buffer, @frame->buffer_phy have to be set. The buffer must
561 * contain at least %TB_FRAME_SIZE bytes.
563 * @frame->callback will be invoked with @frame->size, @frame->flags,
564 * @frame->eof, @frame->sof set once the frame has been received.
566 * If ring_stop() is called after the packet has been enqueued
567 * @frame->callback will be called with canceled set to true.
569 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
571 static inline int tb_ring_rx(struct tb_ring
*ring
, struct ring_frame
*frame
)
573 WARN_ON(ring
->is_tx
);
574 return __tb_ring_enqueue(ring
, frame
);
578 * tb_ring_tx() - enqueue a frame on an TX ring
579 * @ring: Ring the enqueue the frame
580 * @frame: Frame to enqueue
582 * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and
583 * @frame->sof have to be set.
585 * @frame->callback will be invoked with once the frame has been transmitted.
587 * If ring_stop() is called after the packet has been enqueued @frame->callback
588 * will be called with canceled set to true.
590 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
592 static inline int tb_ring_tx(struct tb_ring
*ring
, struct ring_frame
*frame
)
594 WARN_ON(!ring
->is_tx
);
595 return __tb_ring_enqueue(ring
, frame
);
598 /* Used only when the ring is in polling mode */
599 struct ring_frame
*tb_ring_poll(struct tb_ring
*ring
);
600 void tb_ring_poll_complete(struct tb_ring
*ring
);
603 * tb_ring_dma_device() - Return device used for DMA mapping
604 * @ring: Ring whose DMA device is retrieved
606 * Use this function when you are mapping DMA for buffers that are
607 * passed to the ring for sending/receiving.
609 static inline struct device
*tb_ring_dma_device(struct tb_ring
*ring
)
611 return &ring
->nhi
->pdev
->dev
;
614 #endif /* THUNDERBOLT_H_ */