2 * Copyright © 2014 Red Hat.
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
25 #include <linux/types.h>
26 #include <drm/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
29 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
30 #include <linux/stackdepot.h>
31 #include <linux/timekeeping.h>
33 enum drm_dp_mst_topology_ref_type
{
34 DRM_DP_MST_TOPOLOGY_REF_GET
,
35 DRM_DP_MST_TOPOLOGY_REF_PUT
,
38 struct drm_dp_mst_topology_ref_history
{
39 struct drm_dp_mst_topology_ref_entry
{
40 enum drm_dp_mst_topology_ref_type type
;
43 depot_stack_handle_t backtrace
;
47 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
49 struct drm_dp_mst_branch
;
52 * struct drm_dp_vcpi - Virtual Channel Payload Identifier
53 * @vcpi: Virtual channel ID.
54 * @pbn: Payload Bandwidth Number for this channel
55 * @aligned_pbn: PBN aligned with slot size
56 * @num_slots: number of slots for this PBN
66 * struct drm_dp_mst_port - MST port
67 * @port_num: port number
68 * @input: if this port is an input port. Protected by
69 * &drm_dp_mst_topology_mgr.base.lock.
70 * @mcs: message capability status - DP 1.2 spec. Protected by
71 * &drm_dp_mst_topology_mgr.base.lock.
72 * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
73 * &drm_dp_mst_topology_mgr.base.lock.
74 * @pdt: Peer Device Type. Protected by
75 * &drm_dp_mst_topology_mgr.base.lock.
76 * @ldps: Legacy Device Plug Status. Protected by
77 * &drm_dp_mst_topology_mgr.base.lock.
78 * @dpcd_rev: DPCD revision of device on this port. Protected by
79 * &drm_dp_mst_topology_mgr.base.lock.
80 * @num_sdp_streams: Number of simultaneous streams. Protected by
81 * &drm_dp_mst_topology_mgr.base.lock.
82 * @num_sdp_stream_sinks: Number of stream sinks. Protected by
83 * &drm_dp_mst_topology_mgr.base.lock.
84 * @full_pbn: Max possible bandwidth for this port. Protected by
85 * &drm_dp_mst_topology_mgr.base.lock.
86 * @next: link to next port on this branch device
87 * @aux: i2c aux transport to talk to device connected to this port, protected
88 * by &drm_dp_mst_topology_mgr.base.lock.
89 * @parent: branch device parent of this port
90 * @vcpi: Virtual Channel Payload info for this port.
91 * @connector: DRM connector this port is connected to. Protected by
92 * &drm_dp_mst_topology_mgr.base.lock.
93 * @mgr: topology manager this port lives under.
95 * This structure represents an MST port endpoint on a device somewhere
96 * in the MST topology.
98 struct drm_dp_mst_port
{
100 * @topology_kref: refcount for this port's lifetime in the topology,
101 * only the DP MST helpers should need to touch this
103 struct kref topology_kref
;
106 * @malloc_kref: refcount for the memory allocation containing this
107 * structure. See drm_dp_mst_get_port_malloc() and
108 * drm_dp_mst_put_port_malloc().
110 struct kref malloc_kref
;
112 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
114 * @topology_ref_history: A history of each topology
115 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
117 struct drm_dp_mst_topology_ref_history topology_ref_history
;
128 u8 num_sdp_stream_sinks
;
130 struct list_head next
;
132 * @mstb: the branch device connected to this port, if there is one.
133 * This should be considered protected for reading by
134 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
135 * &drm_dp_mst_topology_mgr.up_req_work and
136 * &drm_dp_mst_topology_mgr.work, which do not grab
137 * &drm_dp_mst_topology_mgr.lock during reads but are the only
138 * updaters of this list and are protected from writing concurrently
139 * by &drm_dp_mst_topology_mgr.probe_lock.
141 struct drm_dp_mst_branch
*mstb
;
142 struct drm_dp_aux aux
; /* i2c bus for this port? */
143 struct drm_dp_mst_branch
*parent
;
145 struct drm_dp_vcpi vcpi
;
146 struct drm_connector
*connector
;
147 struct drm_dp_mst_topology_mgr
*mgr
;
150 * @cached_edid: for DP logical ports - make tiling work by ensuring
151 * that the EDID for all connectors is read immediately.
153 struct edid
*cached_edid
;
155 * @has_audio: Tracks whether the sink connector to this port is
161 * @fec_capable: bool indicating if FEC can be supported up to that
162 * point in the MST topology.
167 /* sideband msg header - not bit struct */
168 struct drm_dp_sideband_msg_hdr
{
180 struct drm_dp_sideband_msg_rx
{
184 u8 curchunk_idx
; /* chunk we are parsing now */
186 u8 curlen
; /* total length of the msg */
189 struct drm_dp_sideband_msg_hdr initial_hdr
;
193 * struct drm_dp_mst_branch - MST branch device.
194 * @rad: Relative Address to talk to this branch device.
195 * @lct: Link count total to talk to this branch device.
196 * @num_ports: number of ports on the branch.
197 * @port_parent: pointer to the port parent, NULL if toplevel.
198 * @mgr: topology manager for this branch device.
199 * @link_address_sent: if a link address message has been sent to this device yet.
200 * @guid: guid for DP 1.2 branch device. port under this branch can be
201 * identified by port #.
203 * This structure represents an MST branch device, there is one
204 * primary branch device at the root, along with any other branches connected
205 * to downstream port of parent branches.
207 struct drm_dp_mst_branch
{
209 * @topology_kref: refcount for this branch device's lifetime in the
210 * topology, only the DP MST helpers should need to touch this
212 struct kref topology_kref
;
215 * @malloc_kref: refcount for the memory allocation containing this
216 * structure. See drm_dp_mst_get_mstb_malloc() and
217 * drm_dp_mst_put_mstb_malloc().
219 struct kref malloc_kref
;
221 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
223 * @topology_ref_history: A history of each topology
224 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
226 struct drm_dp_mst_topology_ref_history topology_ref_history
;
230 * @destroy_next: linked-list entry used by
231 * drm_dp_delayed_destroy_work()
233 struct list_head destroy_next
;
240 * @ports: the list of ports on this branch device. This should be
241 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
242 * There are two exceptions to this:
243 * &drm_dp_mst_topology_mgr.up_req_work and
244 * &drm_dp_mst_topology_mgr.work, which do not grab
245 * &drm_dp_mst_topology_mgr.lock during reads but are the only
246 * updaters of this list and are protected from updating the list
247 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
249 struct list_head ports
;
251 struct drm_dp_mst_port
*port_parent
;
252 struct drm_dp_mst_topology_mgr
*mgr
;
254 bool link_address_sent
;
256 /* global unique identifier to identify branch devices */
261 struct drm_dp_nak_reply
{
267 struct drm_dp_link_address_ack_reply
{
270 struct drm_dp_link_addr_reply_port
{
276 bool legacy_device_plug_status
;
280 u8 num_sdp_stream_sinks
;
284 struct drm_dp_remote_dpcd_read_ack_reply
{
290 struct drm_dp_remote_dpcd_write_ack_reply
{
294 struct drm_dp_remote_dpcd_write_nak_reply
{
297 u8 bytes_written_before_failure
;
300 struct drm_dp_remote_i2c_read_ack_reply
{
306 struct drm_dp_remote_i2c_read_nak_reply
{
309 u8 i2c_nak_transaction
;
312 struct drm_dp_remote_i2c_write_ack_reply
{
317 #define DRM_DP_MAX_SDP_STREAMS 16
318 struct drm_dp_allocate_payload
{
320 u8 number_sdp_streams
;
323 u8 sdp_stream_sink
[DRM_DP_MAX_SDP_STREAMS
];
326 struct drm_dp_allocate_payload_ack_reply
{
332 struct drm_dp_connection_status_notify
{
335 bool legacy_device_plug_status
;
336 bool displayport_device_plug_status
;
337 bool message_capability_status
;
342 struct drm_dp_remote_dpcd_read
{
348 struct drm_dp_remote_dpcd_write
{
355 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
356 struct drm_dp_remote_i2c_read
{
359 struct drm_dp_remote_i2c_read_tx
{
364 u8 i2c_transaction_delay
;
365 } transactions
[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS
];
366 u8 read_i2c_device_id
;
370 struct drm_dp_remote_i2c_write
{
372 u8 write_i2c_device_id
;
377 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
378 struct drm_dp_port_number_req
{
382 struct drm_dp_enum_path_resources_ack_reply
{
385 u16 full_payload_bw_number
;
386 u16 avail_payload_bw_number
;
389 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
390 struct drm_dp_port_number_rep
{
394 struct drm_dp_query_payload
{
399 struct drm_dp_resource_status_notify
{
405 struct drm_dp_query_payload_ack_reply
{
410 struct drm_dp_sideband_msg_req_body
{
413 struct drm_dp_connection_status_notify conn_stat
;
414 struct drm_dp_port_number_req port_num
;
415 struct drm_dp_resource_status_notify resource_stat
;
417 struct drm_dp_query_payload query_payload
;
418 struct drm_dp_allocate_payload allocate_payload
;
420 struct drm_dp_remote_dpcd_read dpcd_read
;
421 struct drm_dp_remote_dpcd_write dpcd_write
;
423 struct drm_dp_remote_i2c_read i2c_read
;
424 struct drm_dp_remote_i2c_write i2c_write
;
428 struct drm_dp_sideband_msg_reply_body
{
432 struct drm_dp_nak_reply nak
;
433 struct drm_dp_link_address_ack_reply link_addr
;
434 struct drm_dp_port_number_rep port_number
;
436 struct drm_dp_enum_path_resources_ack_reply path_resources
;
437 struct drm_dp_allocate_payload_ack_reply allocate_payload
;
438 struct drm_dp_query_payload_ack_reply query_payload
;
440 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack
;
441 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack
;
442 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack
;
444 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack
;
445 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack
;
446 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack
;
450 /* msg is queued to be put into a slot */
451 #define DRM_DP_SIDEBAND_TX_QUEUED 0
452 /* msg has started transmitting on a slot - still on msgq */
453 #define DRM_DP_SIDEBAND_TX_START_SEND 1
454 /* msg has finished transmitting on a slot - removed from msgq only in slot */
455 #define DRM_DP_SIDEBAND_TX_SENT 2
456 /* msg has received a response - removed from slot */
457 #define DRM_DP_SIDEBAND_TX_RX 3
458 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
460 struct drm_dp_sideband_msg_tx
{
465 struct drm_dp_mst_branch
*dst
;
466 struct list_head next
;
470 struct drm_dp_sideband_msg_reply_body reply
;
473 /* sideband msg handler */
474 struct drm_dp_mst_topology_mgr
;
475 struct drm_dp_mst_topology_cbs
{
476 /* create a connector for a port */
477 struct drm_connector
*(*add_connector
)(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
, const char *path
);
479 * Checks for any pending MST interrupts, passing them to MST core for
480 * processing, the same way an HPD IRQ pulse handler would do this.
481 * If provided MST core calls this callback from a poll-waiting loop
482 * when waiting for MST down message replies. The driver is expected
483 * to guard against a race between this callback and the driver's HPD
486 void (*poll_hpd_irq
)(struct drm_dp_mst_topology_mgr
*mgr
);
489 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
491 #define DP_PAYLOAD_LOCAL 1
492 #define DP_PAYLOAD_REMOTE 2
493 #define DP_PAYLOAD_DELETE_LOCAL 3
495 struct drm_dp_payload
{
502 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
504 struct drm_dp_vcpi_allocation
{
505 struct drm_dp_mst_port
*port
;
509 struct list_head next
;
512 struct drm_dp_mst_topology_state
{
513 struct drm_private_state base
;
514 struct list_head vcpis
;
515 struct drm_dp_mst_topology_mgr
*mgr
;
518 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
521 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
523 * This struct represents the toplevel displayport MST topology manager.
524 * There should be one instance of this for every MST capable DP connector
527 struct drm_dp_mst_topology_mgr
{
529 * @base: Base private object for atomic
531 struct drm_private_obj base
;
534 * @dev: device pointer for adding i2c devices etc.
536 struct drm_device
*dev
;
538 * @cbs: callbacks for connector addition and destruction.
540 const struct drm_dp_mst_topology_cbs
*cbs
;
542 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
545 int max_dpcd_transaction_bytes
;
547 * @aux: AUX channel for the DP MST connector this topolgy mgr is
550 struct drm_dp_aux
*aux
;
552 * @max_payloads: maximum number of payloads the GPU can generate.
556 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
557 * to build the MST connector path value.
562 * @up_req_recv: Message receiver state for up requests.
564 struct drm_dp_sideband_msg_rx up_req_recv
;
567 * @down_rep_recv: Message receiver state for replies to down
570 struct drm_dp_sideband_msg_rx down_rep_recv
;
573 * @lock: protects @mst_state, @mst_primary, @dpcd, and
574 * @payload_id_table_cleared.
579 * @probe_lock: Prevents @work and @up_req_work, the only writers of
580 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
581 * while they update the topology.
583 struct mutex probe_lock
;
586 * @mst_state: If this manager is enabled for an MST capable port. False
587 * if no MST sink/branch devices is connected.
592 * @payload_id_table_cleared: Whether or not we've cleared the payload
593 * ID table for @mst_primary. Protected by @lock.
595 bool payload_id_table_cleared
: 1;
598 * @mst_primary: Pointer to the primary/first branch device.
600 struct drm_dp_mst_branch
*mst_primary
;
603 * @dpcd: Cache of DPCD for primary port.
605 u8 dpcd
[DP_RECEIVER_CAP_SIZE
];
607 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
611 * @pbn_div: PBN to slots divisor.
616 * @funcs: Atomic helper callbacks
618 const struct drm_private_state_funcs
*funcs
;
621 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
626 * @tx_msg_downq: List of pending down requests
628 struct list_head tx_msg_downq
;
631 * @payload_lock: Protect payload information.
633 struct mutex payload_lock
;
635 * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
636 * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
637 * this array is determined by @max_payloads.
639 struct drm_dp_vcpi
**proposed_vcpis
;
641 * @payloads: Array of payloads. The size of this array is determined
644 struct drm_dp_payload
*payloads
;
646 * @payload_mask: Elements of @payloads actually in use. Since
647 * reallocation of active outputs isn't possible gaps can be created by
648 * disabling outputs out of order compared to how they've been enabled.
650 unsigned long payload_mask
;
652 * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
654 unsigned long vcpi_mask
;
657 * @tx_waitq: Wait to queue stall for the tx worker.
659 wait_queue_head_t tx_waitq
;
663 struct work_struct work
;
665 * @tx_work: Sideband transmit worker. This can nest within the main
666 * @work worker for each transaction @work launches.
668 struct work_struct tx_work
;
671 * @destroy_port_list: List of to be destroyed connectors.
673 struct list_head destroy_port_list
;
675 * @destroy_branch_device_list: List of to be destroyed branch
678 struct list_head destroy_branch_device_list
;
680 * @delayed_destroy_lock: Protects @destroy_port_list and
681 * @destroy_branch_device_list.
683 struct mutex delayed_destroy_lock
;
686 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
687 * A dedicated WQ makes it possible to drain any requeued work items
690 struct workqueue_struct
*delayed_destroy_wq
;
693 * @delayed_destroy_work: Work item to destroy MST port and branch
694 * devices, needed to avoid locking inversion.
696 struct work_struct delayed_destroy_work
;
699 * @up_req_list: List of pending up requests from the topology that
700 * need to be processed, in chronological order.
702 struct list_head up_req_list
;
704 * @up_req_lock: Protects @up_req_list
706 struct mutex up_req_lock
;
708 * @up_req_work: Work item to process up requests received from the
709 * topology. Needed to avoid blocking hotplug handling and sideband
712 struct work_struct up_req_work
;
714 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
716 * @topology_ref_history_lock: protects
717 * &drm_dp_mst_port.topology_ref_history and
718 * &drm_dp_mst_branch.topology_ref_history.
720 struct mutex topology_ref_history_lock
;
724 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr
*mgr
,
725 struct drm_device
*dev
, struct drm_dp_aux
*aux
,
726 int max_dpcd_transaction_bytes
,
727 int max_payloads
, int conn_base_id
);
729 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr
*mgr
);
732 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr
*mgr
, bool mst_state
);
735 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr
*mgr
, u8
*esi
, bool *handled
);
739 drm_dp_mst_detect_port(struct drm_connector
*connector
,
740 struct drm_modeset_acquire_ctx
*ctx
,
741 struct drm_dp_mst_topology_mgr
*mgr
,
742 struct drm_dp_mst_port
*port
);
744 struct edid
*drm_dp_mst_get_edid(struct drm_connector
*connector
, struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
);
747 int drm_dp_calc_pbn_mode(int clock
, int bpp
, bool dsc
);
749 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
750 struct drm_dp_mst_port
*port
, int pbn
, int slots
);
752 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
);
755 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
);
758 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
759 struct drm_dp_mst_port
*port
);
762 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
,
766 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr
*mgr
);
769 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr
*mgr
);
771 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr
*mgr
);
773 void drm_dp_mst_dump_topology(struct seq_file
*m
,
774 struct drm_dp_mst_topology_mgr
*mgr
);
776 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr
*mgr
);
778 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr
*mgr
,
781 ssize_t
drm_dp_mst_dpcd_read(struct drm_dp_aux
*aux
,
782 unsigned int offset
, void *buffer
, size_t size
);
783 ssize_t
drm_dp_mst_dpcd_write(struct drm_dp_aux
*aux
,
784 unsigned int offset
, void *buffer
, size_t size
);
786 int drm_dp_mst_connector_late_register(struct drm_connector
*connector
,
787 struct drm_dp_mst_port
*port
);
788 void drm_dp_mst_connector_early_unregister(struct drm_connector
*connector
,
789 struct drm_dp_mst_port
*port
);
791 struct drm_dp_mst_topology_state
*drm_atomic_get_mst_topology_state(struct drm_atomic_state
*state
,
792 struct drm_dp_mst_topology_mgr
*mgr
);
794 drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state
*state
,
795 struct drm_dp_mst_topology_mgr
*mgr
,
796 struct drm_dp_mst_port
*port
, int pbn
,
798 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state
*state
,
799 struct drm_dp_mst_port
*port
,
800 int pbn
, int pbn_div
,
803 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state
*state
,
804 struct drm_dp_mst_topology_mgr
*mgr
);
806 drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state
*state
,
807 struct drm_dp_mst_topology_mgr
*mgr
,
808 struct drm_dp_mst_port
*port
);
809 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr
*mgr
,
810 struct drm_dp_mst_port
*port
, bool power_up
);
811 int __must_check
drm_dp_mst_atomic_check(struct drm_atomic_state
*state
);
813 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port
*port
);
814 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port
*port
);
816 struct drm_dp_aux
*drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port
*port
);
818 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs
;
821 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
823 * @state: &struct drm_atomic_state pointer
824 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
825 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
827 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
829 * @i: int iteration cursor, for macro-internal use
831 * Used by for_each_oldnew_mst_mgr_in_state(),
832 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
833 * call this directly.
836 * True if the current &struct drm_private_obj is a &struct
837 * drm_dp_mst_topology_mgr, false otherwise.
840 __drm_dp_mst_state_iter_get(struct drm_atomic_state
*state
,
841 struct drm_dp_mst_topology_mgr
**mgr
,
842 struct drm_dp_mst_topology_state
**old_state
,
843 struct drm_dp_mst_topology_state
**new_state
,
846 struct __drm_private_objs_state
*objs_state
= &state
->private_objs
[i
];
848 if (objs_state
->ptr
->funcs
!= &drm_dp_mst_topology_state_funcs
)
851 *mgr
= to_dp_mst_topology_mgr(objs_state
->ptr
);
853 *old_state
= to_dp_mst_topology_state(objs_state
->old_state
);
855 *new_state
= to_dp_mst_topology_state(objs_state
->new_state
);
861 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
862 * managers in an atomic update
863 * @__state: &struct drm_atomic_state pointer
864 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
865 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
867 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
869 * @__i: int iteration cursor, for macro-internal use
871 * This iterates over all DRM DP MST topology managers in an atomic update,
872 * tracking both old and new state. This is useful in places where the state
873 * delta needs to be considered, for example in atomic check functions.
875 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
876 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
877 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
880 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
881 * in an atomic update
882 * @__state: &struct drm_atomic_state pointer
883 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
884 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
886 * @__i: int iteration cursor, for macro-internal use
888 * This iterates over all DRM DP MST topology managers in an atomic update,
889 * tracking only the old state. This is useful in disable functions, where we
890 * need the old state the hardware is still in.
892 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
893 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
894 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
897 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
898 * in an atomic update
899 * @__state: &struct drm_atomic_state pointer
900 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
901 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
903 * @__i: int iteration cursor, for macro-internal use
905 * This iterates over all DRM DP MST topology managers in an atomic update,
906 * tracking only the new state. This is useful in enable functions, where we
907 * need the new state the hardware should be in when the atomic commit
908 * operation has completed.
910 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
911 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
912 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))