2 * Copyright © 2014 Red Hat.
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
25 #include <linux/types.h>
26 #include <drm/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
29 struct drm_dp_mst_branch
;
32 * struct drm_dp_vcpi - Virtual Channel Payload Identifier
33 * @vcpi: Virtual channel ID.
34 * @pbn: Payload Bandwidth Number for this channel
35 * @aligned_pbn: PBN aligned with slot size
36 * @num_slots: number of slots for this PBN
46 * struct drm_dp_mst_port - MST port
47 * @port_num: port number
48 * @input: if this port is an input port.
49 * @mcs: message capability status - DP 1.2 spec.
50 * @ddps: DisplayPort Device Plug Status - DP 1.2
51 * @pdt: Peer Device Type
52 * @ldps: Legacy Device Plug Status
53 * @dpcd_rev: DPCD revision of device on this port
54 * @num_sdp_streams: Number of simultaneous streams
55 * @num_sdp_stream_sinks: Number of stream sinks
56 * @available_pbn: Available bandwidth for this port.
57 * @next: link to next port on this branch device
58 * @mstb: branch device attach below this port
59 * @aux: i2c aux transport to talk to device connected to this port.
60 * @parent: branch device parent of this port
61 * @vcpi: Virtual Channel Payload info for this port.
62 * @connector: DRM connector this port is connected to.
63 * @mgr: topology manager this port lives under.
65 * This structure represents an MST port endpoint on a device somewhere
66 * in the MST topology.
68 struct drm_dp_mst_port
{
70 * @topology_kref: refcount for this port's lifetime in the topology,
71 * only the DP MST helpers should need to touch this
73 struct kref topology_kref
;
76 * @malloc_kref: refcount for the memory allocation containing this
77 * structure. See drm_dp_mst_get_port_malloc() and
78 * drm_dp_mst_put_port_malloc().
80 struct kref malloc_kref
;
90 u8 num_sdp_stream_sinks
;
91 uint16_t available_pbn
;
92 struct list_head next
;
93 struct drm_dp_mst_branch
*mstb
; /* pointer to an mstb if this port has one */
94 struct drm_dp_aux aux
; /* i2c bus for this port? */
95 struct drm_dp_mst_branch
*parent
;
97 struct drm_dp_vcpi vcpi
;
98 struct drm_connector
*connector
;
99 struct drm_dp_mst_topology_mgr
*mgr
;
102 * @cached_edid: for DP logical ports - make tiling work by ensuring
103 * that the EDID for all connectors is read immediately.
105 struct edid
*cached_edid
;
107 * @has_audio: Tracks whether the sink connector to this port is
114 * struct drm_dp_mst_branch - MST branch device.
115 * @rad: Relative Address to talk to this branch device.
116 * @lct: Link count total to talk to this branch device.
117 * @num_ports: number of ports on the branch.
118 * @msg_slots: one bit per transmitted msg slot.
119 * @ports: linked list of ports on this branch.
120 * @port_parent: pointer to the port parent, NULL if toplevel.
121 * @mgr: topology manager for this branch device.
122 * @tx_slots: transmission slots for this device.
123 * @last_seqno: last sequence number used to talk to this.
124 * @link_address_sent: if a link address message has been sent to this device yet.
125 * @guid: guid for DP 1.2 branch device. port under this branch can be
126 * identified by port #.
128 * This structure represents an MST branch device, there is one
129 * primary branch device at the root, along with any other branches connected
130 * to downstream port of parent branches.
132 struct drm_dp_mst_branch
{
134 * @topology_kref: refcount for this branch device's lifetime in the
135 * topology, only the DP MST helpers should need to touch this
137 struct kref topology_kref
;
140 * @malloc_kref: refcount for the memory allocation containing this
141 * structure. See drm_dp_mst_get_mstb_malloc() and
142 * drm_dp_mst_put_mstb_malloc().
144 struct kref malloc_kref
;
151 struct list_head ports
;
153 /* list of tx ops queue for this port */
154 struct drm_dp_mst_port
*port_parent
;
155 struct drm_dp_mst_topology_mgr
*mgr
;
157 /* slots are protected by mstb->mgr->qlock */
158 struct drm_dp_sideband_msg_tx
*tx_slots
[2];
160 bool link_address_sent
;
162 /* global unique identifier to identify branch devices */
167 /* sideband msg header - not bit struct */
168 struct drm_dp_sideband_msg_hdr
{
180 struct drm_dp_nak_reply
{
186 struct drm_dp_link_address_ack_reply
{
189 struct drm_dp_link_addr_reply_port
{
195 bool legacy_device_plug_status
;
199 u8 num_sdp_stream_sinks
;
203 struct drm_dp_remote_dpcd_read_ack_reply
{
209 struct drm_dp_remote_dpcd_write_ack_reply
{
213 struct drm_dp_remote_dpcd_write_nak_reply
{
216 u8 bytes_written_before_failure
;
219 struct drm_dp_remote_i2c_read_ack_reply
{
225 struct drm_dp_remote_i2c_read_nak_reply
{
228 u8 i2c_nak_transaction
;
231 struct drm_dp_remote_i2c_write_ack_reply
{
236 struct drm_dp_sideband_msg_rx
{
240 u8 curchunk_idx
; /* chunk we are parsing now */
242 u8 curlen
; /* total length of the msg */
245 struct drm_dp_sideband_msg_hdr initial_hdr
;
248 #define DRM_DP_MAX_SDP_STREAMS 16
249 struct drm_dp_allocate_payload
{
251 u8 number_sdp_streams
;
254 u8 sdp_stream_sink
[DRM_DP_MAX_SDP_STREAMS
];
257 struct drm_dp_allocate_payload_ack_reply
{
263 struct drm_dp_connection_status_notify
{
266 bool legacy_device_plug_status
;
267 bool displayport_device_plug_status
;
268 bool message_capability_status
;
273 struct drm_dp_remote_dpcd_read
{
279 struct drm_dp_remote_dpcd_write
{
286 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
287 struct drm_dp_remote_i2c_read
{
295 u8 i2c_transaction_delay
;
296 } transactions
[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS
];
297 u8 read_i2c_device_id
;
301 struct drm_dp_remote_i2c_write
{
303 u8 write_i2c_device_id
;
308 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
309 struct drm_dp_port_number_req
{
313 struct drm_dp_enum_path_resources_ack_reply
{
315 u16 full_payload_bw_number
;
316 u16 avail_payload_bw_number
;
319 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
320 struct drm_dp_port_number_rep
{
324 struct drm_dp_query_payload
{
329 struct drm_dp_resource_status_notify
{
335 struct drm_dp_query_payload_ack_reply
{
340 struct drm_dp_sideband_msg_req_body
{
343 struct drm_dp_connection_status_notify conn_stat
;
344 struct drm_dp_port_number_req port_num
;
345 struct drm_dp_resource_status_notify resource_stat
;
347 struct drm_dp_query_payload query_payload
;
348 struct drm_dp_allocate_payload allocate_payload
;
350 struct drm_dp_remote_dpcd_read dpcd_read
;
351 struct drm_dp_remote_dpcd_write dpcd_write
;
353 struct drm_dp_remote_i2c_read i2c_read
;
354 struct drm_dp_remote_i2c_write i2c_write
;
358 struct drm_dp_sideband_msg_reply_body
{
362 struct drm_dp_nak_reply nak
;
363 struct drm_dp_link_address_ack_reply link_addr
;
364 struct drm_dp_port_number_rep port_number
;
366 struct drm_dp_enum_path_resources_ack_reply path_resources
;
367 struct drm_dp_allocate_payload_ack_reply allocate_payload
;
368 struct drm_dp_query_payload_ack_reply query_payload
;
370 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack
;
371 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack
;
372 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack
;
374 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack
;
375 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack
;
376 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack
;
380 /* msg is queued to be put into a slot */
381 #define DRM_DP_SIDEBAND_TX_QUEUED 0
382 /* msg has started transmitting on a slot - still on msgq */
383 #define DRM_DP_SIDEBAND_TX_START_SEND 1
384 /* msg has finished transmitting on a slot - removed from msgq only in slot */
385 #define DRM_DP_SIDEBAND_TX_SENT 2
386 /* msg has received a response - removed from slot */
387 #define DRM_DP_SIDEBAND_TX_RX 3
388 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
390 struct drm_dp_sideband_msg_tx
{
395 struct drm_dp_mst_branch
*dst
;
396 struct list_head next
;
400 struct drm_dp_sideband_msg_reply_body reply
;
403 /* sideband msg handler */
404 struct drm_dp_mst_topology_mgr
;
405 struct drm_dp_mst_topology_cbs
{
406 /* create a connector for a port */
407 struct drm_connector
*(*add_connector
)(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
, const char *path
);
408 void (*register_connector
)(struct drm_connector
*connector
);
409 void (*destroy_connector
)(struct drm_dp_mst_topology_mgr
*mgr
,
410 struct drm_connector
*connector
);
413 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
415 #define DP_PAYLOAD_LOCAL 1
416 #define DP_PAYLOAD_REMOTE 2
417 #define DP_PAYLOAD_DELETE_LOCAL 3
419 struct drm_dp_payload
{
426 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
428 struct drm_dp_vcpi_allocation
{
429 struct drm_dp_mst_port
*port
;
431 struct list_head next
;
434 struct drm_dp_mst_topology_state
{
435 struct drm_private_state base
;
436 struct list_head vcpis
;
437 struct drm_dp_mst_topology_mgr
*mgr
;
440 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
443 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
445 * This struct represents the toplevel displayport MST topology manager.
446 * There should be one instance of this for every MST capable DP connector
449 struct drm_dp_mst_topology_mgr
{
451 * @base: Base private object for atomic
453 struct drm_private_obj base
;
456 * @dev: device pointer for adding i2c devices etc.
458 struct drm_device
*dev
;
460 * @cbs: callbacks for connector addition and destruction.
462 const struct drm_dp_mst_topology_cbs
*cbs
;
464 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
467 int max_dpcd_transaction_bytes
;
469 * @aux: AUX channel for the DP MST connector this topolgy mgr is
472 struct drm_dp_aux
*aux
;
474 * @max_payloads: maximum number of payloads the GPU can generate.
478 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
479 * to build the MST connector path value.
484 * @down_rep_recv: Message receiver state for down replies. This and
485 * @up_req_recv are only ever access from the work item, which is
488 struct drm_dp_sideband_msg_rx down_rep_recv
;
490 * @up_req_recv: Message receiver state for up requests. This and
491 * @down_rep_recv are only ever access from the work item, which is
494 struct drm_dp_sideband_msg_rx up_req_recv
;
497 * @lock: protects mst state, primary, dpcd.
502 * @mst_state: If this manager is enabled for an MST capable port. False
503 * if no MST sink/branch devices is connected.
507 * @mst_primary: Pointer to the primary/first branch device.
509 struct drm_dp_mst_branch
*mst_primary
;
512 * @dpcd: Cache of DPCD for primary port.
514 u8 dpcd
[DP_RECEIVER_CAP_SIZE
];
516 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
520 * @pbn_div: PBN to slots divisor.
525 * @funcs: Atomic helper callbacks
527 const struct drm_private_state_funcs
*funcs
;
530 * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
531 * &drm_dp_sideband_msg_tx.state once they are queued
535 * @tx_msg_downq: List of pending down replies.
537 struct list_head tx_msg_downq
;
540 * @payload_lock: Protect payload information.
542 struct mutex payload_lock
;
544 * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
545 * VCPI structure itself is &drm_dp_mst_port.vcpi.
547 struct drm_dp_vcpi
**proposed_vcpis
;
549 * @payloads: Array of payloads.
551 struct drm_dp_payload
*payloads
;
553 * @payload_mask: Elements of @payloads actually in use. Since
554 * reallocation of active outputs isn't possible gaps can be created by
555 * disabling outputs out of order compared to how they've been enabled.
557 unsigned long payload_mask
;
559 * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
561 unsigned long vcpi_mask
;
564 * @tx_waitq: Wait to queue stall for the tx worker.
566 wait_queue_head_t tx_waitq
;
570 struct work_struct work
;
572 * @tx_work: Sideband transmit worker. This can nest within the main
573 * @work worker for each transaction @work launches.
575 struct work_struct tx_work
;
578 * @destroy_connector_list: List of to be destroyed connectors.
580 struct list_head destroy_connector_list
;
582 * @destroy_connector_lock: Protects @connector_list.
584 struct mutex destroy_connector_lock
;
586 * @destroy_connector_work: Work item to destroy connectors. Needed to
587 * avoid locking inversion.
589 struct work_struct destroy_connector_work
;
592 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr
*mgr
,
593 struct drm_device
*dev
, struct drm_dp_aux
*aux
,
594 int max_dpcd_transaction_bytes
,
595 int max_payloads
, int conn_base_id
);
597 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr
*mgr
);
600 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr
*mgr
, bool mst_state
);
603 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr
*mgr
, u8
*esi
, bool *handled
);
606 enum drm_connector_status
drm_dp_mst_detect_port(struct drm_connector
*connector
, struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
);
608 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr
*mgr
,
609 struct drm_dp_mst_port
*port
);
610 struct edid
*drm_dp_mst_get_edid(struct drm_connector
*connector
, struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
);
613 int drm_dp_calc_pbn_mode(int clock
, int bpp
);
616 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
617 struct drm_dp_mst_port
*port
, int pbn
, int slots
);
619 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
);
622 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
);
625 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
626 struct drm_dp_mst_port
*port
);
629 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
,
633 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr
*mgr
);
636 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr
*mgr
);
638 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr
*mgr
);
640 void drm_dp_mst_dump_topology(struct seq_file
*m
,
641 struct drm_dp_mst_topology_mgr
*mgr
);
643 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr
*mgr
);
645 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr
*mgr
);
646 struct drm_dp_mst_topology_state
*drm_atomic_get_mst_topology_state(struct drm_atomic_state
*state
,
647 struct drm_dp_mst_topology_mgr
*mgr
);
649 drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state
*state
,
650 struct drm_dp_mst_topology_mgr
*mgr
,
651 struct drm_dp_mst_port
*port
, int pbn
);
653 drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state
*state
,
654 struct drm_dp_mst_topology_mgr
*mgr
,
655 struct drm_dp_mst_port
*port
);
656 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr
*mgr
,
657 struct drm_dp_mst_port
*port
, bool power_up
);
658 int __must_check
drm_dp_mst_atomic_check(struct drm_atomic_state
*state
);
660 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port
*port
);
661 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port
*port
);
663 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs
;
666 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
668 * @state: &struct drm_atomic_state pointer
669 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
670 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
672 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
674 * @i: int iteration cursor, for macro-internal use
676 * Used by for_each_oldnew_mst_mgr_in_state(),
677 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
678 * call this directly.
681 * True if the current &struct drm_private_obj is a &struct
682 * drm_dp_mst_topology_mgr, false otherwise.
685 __drm_dp_mst_state_iter_get(struct drm_atomic_state
*state
,
686 struct drm_dp_mst_topology_mgr
**mgr
,
687 struct drm_dp_mst_topology_state
**old_state
,
688 struct drm_dp_mst_topology_state
**new_state
,
691 struct __drm_private_objs_state
*objs_state
= &state
->private_objs
[i
];
693 if (objs_state
->ptr
->funcs
!= &drm_dp_mst_topology_state_funcs
)
696 *mgr
= to_dp_mst_topology_mgr(objs_state
->ptr
);
698 *old_state
= to_dp_mst_topology_state(objs_state
->old_state
);
700 *new_state
= to_dp_mst_topology_state(objs_state
->new_state
);
706 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
707 * managers in an atomic update
708 * @__state: &struct drm_atomic_state pointer
709 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
710 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
712 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
714 * @__i: int iteration cursor, for macro-internal use
716 * This iterates over all DRM DP MST topology managers in an atomic update,
717 * tracking both old and new state. This is useful in places where the state
718 * delta needs to be considered, for example in atomic check functions.
720 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
721 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
722 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
725 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
726 * in an atomic update
727 * @__state: &struct drm_atomic_state pointer
728 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
729 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
731 * @__i: int iteration cursor, for macro-internal use
733 * This iterates over all DRM DP MST topology managers in an atomic update,
734 * tracking only the old state. This is useful in disable functions, where we
735 * need the old state the hardware is still in.
737 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
738 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
739 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
742 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
743 * in an atomic update
744 * @__state: &struct drm_atomic_state pointer
745 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
746 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
748 * @__i: int iteration cursor, for macro-internal use
750 * This iterates over all DRM DP MST topology managers in an atomic update,
751 * tracking only the new state. This is useful in enable functions, where we
752 * need the new state the hardware should be in when the atomic commit
753 * operation has completed.
755 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
756 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
757 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))