mm: make wait_on_page_writeback() wait for multiple pending writebacks
[linux/fpc-iii.git] / include / drm / drm_dp_mst_helper.h
blobf5e92fe9151c389a9e55a2c6f857eb91279a2fdb
1 /*
2 * Copyright © 2014 Red Hat.
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
25 #include <linux/types.h>
26 #include <drm/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
29 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
30 #include <linux/stackdepot.h>
31 #include <linux/timekeeping.h>
33 enum drm_dp_mst_topology_ref_type {
34 DRM_DP_MST_TOPOLOGY_REF_GET,
35 DRM_DP_MST_TOPOLOGY_REF_PUT,
38 struct drm_dp_mst_topology_ref_history {
39 struct drm_dp_mst_topology_ref_entry {
40 enum drm_dp_mst_topology_ref_type type;
41 int count;
42 ktime_t ts_nsec;
43 depot_stack_handle_t backtrace;
44 } *entries;
45 int len;
47 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
49 struct drm_dp_mst_branch;
51 /**
52 * struct drm_dp_vcpi - Virtual Channel Payload Identifier
53 * @vcpi: Virtual channel ID.
54 * @pbn: Payload Bandwidth Number for this channel
55 * @aligned_pbn: PBN aligned with slot size
56 * @num_slots: number of slots for this PBN
58 struct drm_dp_vcpi {
59 int vcpi;
60 int pbn;
61 int aligned_pbn;
62 int num_slots;
65 /**
66 * struct drm_dp_mst_port - MST port
67 * @port_num: port number
68 * @input: if this port is an input port. Protected by
69 * &drm_dp_mst_topology_mgr.base.lock.
70 * @mcs: message capability status - DP 1.2 spec. Protected by
71 * &drm_dp_mst_topology_mgr.base.lock.
72 * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
73 * &drm_dp_mst_topology_mgr.base.lock.
74 * @pdt: Peer Device Type. Protected by
75 * &drm_dp_mst_topology_mgr.base.lock.
76 * @ldps: Legacy Device Plug Status. Protected by
77 * &drm_dp_mst_topology_mgr.base.lock.
78 * @dpcd_rev: DPCD revision of device on this port. Protected by
79 * &drm_dp_mst_topology_mgr.base.lock.
80 * @num_sdp_streams: Number of simultaneous streams. Protected by
81 * &drm_dp_mst_topology_mgr.base.lock.
82 * @num_sdp_stream_sinks: Number of stream sinks. Protected by
83 * &drm_dp_mst_topology_mgr.base.lock.
84 * @full_pbn: Max possible bandwidth for this port. Protected by
85 * &drm_dp_mst_topology_mgr.base.lock.
86 * @next: link to next port on this branch device
87 * @aux: i2c aux transport to talk to device connected to this port, protected
88 * by &drm_dp_mst_topology_mgr.base.lock.
89 * @parent: branch device parent of this port
90 * @vcpi: Virtual Channel Payload info for this port.
91 * @connector: DRM connector this port is connected to. Protected by
92 * &drm_dp_mst_topology_mgr.base.lock.
93 * @mgr: topology manager this port lives under.
95 * This structure represents an MST port endpoint on a device somewhere
96 * in the MST topology.
98 struct drm_dp_mst_port {
99 /**
100 * @topology_kref: refcount for this port's lifetime in the topology,
101 * only the DP MST helpers should need to touch this
103 struct kref topology_kref;
106 * @malloc_kref: refcount for the memory allocation containing this
107 * structure. See drm_dp_mst_get_port_malloc() and
108 * drm_dp_mst_put_port_malloc().
110 struct kref malloc_kref;
112 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
114 * @topology_ref_history: A history of each topology
115 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
117 struct drm_dp_mst_topology_ref_history topology_ref_history;
118 #endif
120 u8 port_num;
121 bool input;
122 bool mcs;
123 bool ddps;
124 u8 pdt;
125 bool ldps;
126 u8 dpcd_rev;
127 u8 num_sdp_streams;
128 u8 num_sdp_stream_sinks;
129 uint16_t full_pbn;
130 struct list_head next;
132 * @mstb: the branch device connected to this port, if there is one.
133 * This should be considered protected for reading by
134 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
135 * &drm_dp_mst_topology_mgr.up_req_work and
136 * &drm_dp_mst_topology_mgr.work, which do not grab
137 * &drm_dp_mst_topology_mgr.lock during reads but are the only
138 * updaters of this list and are protected from writing concurrently
139 * by &drm_dp_mst_topology_mgr.probe_lock.
141 struct drm_dp_mst_branch *mstb;
142 struct drm_dp_aux aux; /* i2c bus for this port? */
143 struct drm_dp_mst_branch *parent;
145 struct drm_dp_vcpi vcpi;
146 struct drm_connector *connector;
147 struct drm_dp_mst_topology_mgr *mgr;
150 * @cached_edid: for DP logical ports - make tiling work by ensuring
151 * that the EDID for all connectors is read immediately.
153 struct edid *cached_edid;
155 * @has_audio: Tracks whether the sink connector to this port is
156 * audio-capable.
158 bool has_audio;
161 * @fec_capable: bool indicating if FEC can be supported up to that
162 * point in the MST topology.
164 bool fec_capable;
167 /* sideband msg header - not bit struct */
168 struct drm_dp_sideband_msg_hdr {
169 u8 lct;
170 u8 lcr;
171 u8 rad[8];
172 bool broadcast;
173 bool path_msg;
174 u8 msg_len;
175 bool somt;
176 bool eomt;
177 bool seqno;
180 struct drm_dp_sideband_msg_rx {
181 u8 chunk[48];
182 u8 msg[256];
183 u8 curchunk_len;
184 u8 curchunk_idx; /* chunk we are parsing now */
185 u8 curchunk_hdrlen;
186 u8 curlen; /* total length of the msg */
187 bool have_somt;
188 bool have_eomt;
189 struct drm_dp_sideband_msg_hdr initial_hdr;
193 * struct drm_dp_mst_branch - MST branch device.
194 * @rad: Relative Address to talk to this branch device.
195 * @lct: Link count total to talk to this branch device.
196 * @num_ports: number of ports on the branch.
197 * @port_parent: pointer to the port parent, NULL if toplevel.
198 * @mgr: topology manager for this branch device.
199 * @link_address_sent: if a link address message has been sent to this device yet.
200 * @guid: guid for DP 1.2 branch device. port under this branch can be
201 * identified by port #.
203 * This structure represents an MST branch device, there is one
204 * primary branch device at the root, along with any other branches connected
205 * to downstream port of parent branches.
207 struct drm_dp_mst_branch {
209 * @topology_kref: refcount for this branch device's lifetime in the
210 * topology, only the DP MST helpers should need to touch this
212 struct kref topology_kref;
215 * @malloc_kref: refcount for the memory allocation containing this
216 * structure. See drm_dp_mst_get_mstb_malloc() and
217 * drm_dp_mst_put_mstb_malloc().
219 struct kref malloc_kref;
221 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
223 * @topology_ref_history: A history of each topology
224 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
226 struct drm_dp_mst_topology_ref_history topology_ref_history;
227 #endif
230 * @destroy_next: linked-list entry used by
231 * drm_dp_delayed_destroy_work()
233 struct list_head destroy_next;
235 u8 rad[8];
236 u8 lct;
237 int num_ports;
240 * @ports: the list of ports on this branch device. This should be
241 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
242 * There are two exceptions to this:
243 * &drm_dp_mst_topology_mgr.up_req_work and
244 * &drm_dp_mst_topology_mgr.work, which do not grab
245 * &drm_dp_mst_topology_mgr.lock during reads but are the only
246 * updaters of this list and are protected from updating the list
247 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
249 struct list_head ports;
251 struct drm_dp_mst_port *port_parent;
252 struct drm_dp_mst_topology_mgr *mgr;
254 bool link_address_sent;
256 /* global unique identifier to identify branch devices */
257 u8 guid[16];
261 struct drm_dp_nak_reply {
262 u8 guid[16];
263 u8 reason;
264 u8 nak_data;
267 struct drm_dp_link_address_ack_reply {
268 u8 guid[16];
269 u8 nports;
270 struct drm_dp_link_addr_reply_port {
271 bool input_port;
272 u8 peer_device_type;
273 u8 port_number;
274 bool mcs;
275 bool ddps;
276 bool legacy_device_plug_status;
277 u8 dpcd_revision;
278 u8 peer_guid[16];
279 u8 num_sdp_streams;
280 u8 num_sdp_stream_sinks;
281 } ports[16];
284 struct drm_dp_remote_dpcd_read_ack_reply {
285 u8 port_number;
286 u8 num_bytes;
287 u8 bytes[255];
290 struct drm_dp_remote_dpcd_write_ack_reply {
291 u8 port_number;
294 struct drm_dp_remote_dpcd_write_nak_reply {
295 u8 port_number;
296 u8 reason;
297 u8 bytes_written_before_failure;
300 struct drm_dp_remote_i2c_read_ack_reply {
301 u8 port_number;
302 u8 num_bytes;
303 u8 bytes[255];
306 struct drm_dp_remote_i2c_read_nak_reply {
307 u8 port_number;
308 u8 nak_reason;
309 u8 i2c_nak_transaction;
312 struct drm_dp_remote_i2c_write_ack_reply {
313 u8 port_number;
316 struct drm_dp_query_stream_enc_status_ack_reply {
317 /* Bit[23:16]- Stream Id */
318 u8 stream_id;
320 /* Bit[15]- Signed */
321 bool reply_signed;
323 /* Bit[10:8]- Stream Output Sink Type */
324 bool unauthorizable_device_present;
325 bool legacy_device_present;
326 bool query_capable_device_present;
328 /* Bit[12:11]- Stream Output CP Type */
329 bool hdcp_1x_device_present;
330 bool hdcp_2x_device_present;
332 /* Bit[4]- Stream Authentication */
333 bool auth_completed;
335 /* Bit[3]- Stream Encryption */
336 bool encryption_enabled;
338 /* Bit[2]- Stream Repeater Function Present */
339 bool repeater_present;
341 /* Bit[1:0]- Stream State */
342 u8 state;
345 #define DRM_DP_MAX_SDP_STREAMS 16
346 struct drm_dp_allocate_payload {
347 u8 port_number;
348 u8 number_sdp_streams;
349 u8 vcpi;
350 u16 pbn;
351 u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
354 struct drm_dp_allocate_payload_ack_reply {
355 u8 port_number;
356 u8 vcpi;
357 u16 allocated_pbn;
360 struct drm_dp_connection_status_notify {
361 u8 guid[16];
362 u8 port_number;
363 bool legacy_device_plug_status;
364 bool displayport_device_plug_status;
365 bool message_capability_status;
366 bool input_port;
367 u8 peer_device_type;
370 struct drm_dp_remote_dpcd_read {
371 u8 port_number;
372 u32 dpcd_address;
373 u8 num_bytes;
376 struct drm_dp_remote_dpcd_write {
377 u8 port_number;
378 u32 dpcd_address;
379 u8 num_bytes;
380 u8 *bytes;
383 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
384 struct drm_dp_remote_i2c_read {
385 u8 num_transactions;
386 u8 port_number;
387 struct drm_dp_remote_i2c_read_tx {
388 u8 i2c_dev_id;
389 u8 num_bytes;
390 u8 *bytes;
391 u8 no_stop_bit;
392 u8 i2c_transaction_delay;
393 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
394 u8 read_i2c_device_id;
395 u8 num_bytes_read;
398 struct drm_dp_remote_i2c_write {
399 u8 port_number;
400 u8 write_i2c_device_id;
401 u8 num_bytes;
402 u8 *bytes;
405 struct drm_dp_query_stream_enc_status {
406 u8 stream_id;
407 u8 client_id[7]; /* 56-bit nonce */
408 u8 stream_event;
409 bool valid_stream_event;
410 u8 stream_behavior;
411 u8 valid_stream_behavior;
414 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
415 struct drm_dp_port_number_req {
416 u8 port_number;
419 struct drm_dp_enum_path_resources_ack_reply {
420 u8 port_number;
421 bool fec_capable;
422 u16 full_payload_bw_number;
423 u16 avail_payload_bw_number;
426 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
427 struct drm_dp_port_number_rep {
428 u8 port_number;
431 struct drm_dp_query_payload {
432 u8 port_number;
433 u8 vcpi;
436 struct drm_dp_resource_status_notify {
437 u8 port_number;
438 u8 guid[16];
439 u16 available_pbn;
442 struct drm_dp_query_payload_ack_reply {
443 u8 port_number;
444 u16 allocated_pbn;
447 struct drm_dp_sideband_msg_req_body {
448 u8 req_type;
449 union ack_req {
450 struct drm_dp_connection_status_notify conn_stat;
451 struct drm_dp_port_number_req port_num;
452 struct drm_dp_resource_status_notify resource_stat;
454 struct drm_dp_query_payload query_payload;
455 struct drm_dp_allocate_payload allocate_payload;
457 struct drm_dp_remote_dpcd_read dpcd_read;
458 struct drm_dp_remote_dpcd_write dpcd_write;
460 struct drm_dp_remote_i2c_read i2c_read;
461 struct drm_dp_remote_i2c_write i2c_write;
463 struct drm_dp_query_stream_enc_status enc_status;
464 } u;
467 struct drm_dp_sideband_msg_reply_body {
468 u8 reply_type;
469 u8 req_type;
470 union ack_replies {
471 struct drm_dp_nak_reply nak;
472 struct drm_dp_link_address_ack_reply link_addr;
473 struct drm_dp_port_number_rep port_number;
475 struct drm_dp_enum_path_resources_ack_reply path_resources;
476 struct drm_dp_allocate_payload_ack_reply allocate_payload;
477 struct drm_dp_query_payload_ack_reply query_payload;
479 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
480 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
481 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
483 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
484 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
485 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
487 struct drm_dp_query_stream_enc_status_ack_reply enc_status;
488 } u;
491 /* msg is queued to be put into a slot */
492 #define DRM_DP_SIDEBAND_TX_QUEUED 0
493 /* msg has started transmitting on a slot - still on msgq */
494 #define DRM_DP_SIDEBAND_TX_START_SEND 1
495 /* msg has finished transmitting on a slot - removed from msgq only in slot */
496 #define DRM_DP_SIDEBAND_TX_SENT 2
497 /* msg has received a response - removed from slot */
498 #define DRM_DP_SIDEBAND_TX_RX 3
499 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
501 struct drm_dp_sideband_msg_tx {
502 u8 msg[256];
503 u8 chunk[48];
504 u8 cur_offset;
505 u8 cur_len;
506 struct drm_dp_mst_branch *dst;
507 struct list_head next;
508 int seqno;
509 int state;
510 bool path_msg;
511 struct drm_dp_sideband_msg_reply_body reply;
514 /* sideband msg handler */
515 struct drm_dp_mst_topology_mgr;
516 struct drm_dp_mst_topology_cbs {
517 /* create a connector for a port */
518 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
520 * Checks for any pending MST interrupts, passing them to MST core for
521 * processing, the same way an HPD IRQ pulse handler would do this.
522 * If provided MST core calls this callback from a poll-waiting loop
523 * when waiting for MST down message replies. The driver is expected
524 * to guard against a race between this callback and the driver's HPD
525 * IRQ pulse handler.
527 void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
530 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
532 #define DP_PAYLOAD_LOCAL 1
533 #define DP_PAYLOAD_REMOTE 2
534 #define DP_PAYLOAD_DELETE_LOCAL 3
536 struct drm_dp_payload {
537 int payload_state;
538 int start_slot;
539 int num_slots;
540 int vcpi;
543 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
545 struct drm_dp_vcpi_allocation {
546 struct drm_dp_mst_port *port;
547 int vcpi;
548 int pbn;
549 bool dsc_enabled;
550 struct list_head next;
553 struct drm_dp_mst_topology_state {
554 struct drm_private_state base;
555 struct list_head vcpis;
556 struct drm_dp_mst_topology_mgr *mgr;
559 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
562 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
564 * This struct represents the toplevel displayport MST topology manager.
565 * There should be one instance of this for every MST capable DP connector
566 * on the GPU.
568 struct drm_dp_mst_topology_mgr {
570 * @base: Base private object for atomic
572 struct drm_private_obj base;
575 * @dev: device pointer for adding i2c devices etc.
577 struct drm_device *dev;
579 * @cbs: callbacks for connector addition and destruction.
581 const struct drm_dp_mst_topology_cbs *cbs;
583 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
584 * in one go.
586 int max_dpcd_transaction_bytes;
588 * @aux: AUX channel for the DP MST connector this topolgy mgr is
589 * controlling.
591 struct drm_dp_aux *aux;
593 * @max_payloads: maximum number of payloads the GPU can generate.
595 int max_payloads;
597 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
598 * to build the MST connector path value.
600 int conn_base_id;
603 * @up_req_recv: Message receiver state for up requests.
605 struct drm_dp_sideband_msg_rx up_req_recv;
608 * @down_rep_recv: Message receiver state for replies to down
609 * requests.
611 struct drm_dp_sideband_msg_rx down_rep_recv;
614 * @lock: protects @mst_state, @mst_primary, @dpcd, and
615 * @payload_id_table_cleared.
617 struct mutex lock;
620 * @probe_lock: Prevents @work and @up_req_work, the only writers of
621 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
622 * while they update the topology.
624 struct mutex probe_lock;
627 * @mst_state: If this manager is enabled for an MST capable port. False
628 * if no MST sink/branch devices is connected.
630 bool mst_state : 1;
633 * @payload_id_table_cleared: Whether or not we've cleared the payload
634 * ID table for @mst_primary. Protected by @lock.
636 bool payload_id_table_cleared : 1;
639 * @mst_primary: Pointer to the primary/first branch device.
641 struct drm_dp_mst_branch *mst_primary;
644 * @dpcd: Cache of DPCD for primary port.
646 u8 dpcd[DP_RECEIVER_CAP_SIZE];
648 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
650 u8 sink_count;
652 * @pbn_div: PBN to slots divisor.
654 int pbn_div;
657 * @funcs: Atomic helper callbacks
659 const struct drm_private_state_funcs *funcs;
662 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
664 struct mutex qlock;
667 * @tx_msg_downq: List of pending down requests
669 struct list_head tx_msg_downq;
672 * @payload_lock: Protect payload information.
674 struct mutex payload_lock;
676 * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
677 * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
678 * this array is determined by @max_payloads.
680 struct drm_dp_vcpi **proposed_vcpis;
682 * @payloads: Array of payloads. The size of this array is determined
683 * by @max_payloads.
685 struct drm_dp_payload *payloads;
687 * @payload_mask: Elements of @payloads actually in use. Since
688 * reallocation of active outputs isn't possible gaps can be created by
689 * disabling outputs out of order compared to how they've been enabled.
691 unsigned long payload_mask;
693 * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
695 unsigned long vcpi_mask;
698 * @tx_waitq: Wait to queue stall for the tx worker.
700 wait_queue_head_t tx_waitq;
702 * @work: Probe work.
704 struct work_struct work;
706 * @tx_work: Sideband transmit worker. This can nest within the main
707 * @work worker for each transaction @work launches.
709 struct work_struct tx_work;
712 * @destroy_port_list: List of to be destroyed connectors.
714 struct list_head destroy_port_list;
716 * @destroy_branch_device_list: List of to be destroyed branch
717 * devices.
719 struct list_head destroy_branch_device_list;
721 * @delayed_destroy_lock: Protects @destroy_port_list and
722 * @destroy_branch_device_list.
724 struct mutex delayed_destroy_lock;
727 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
728 * A dedicated WQ makes it possible to drain any requeued work items
729 * on it.
731 struct workqueue_struct *delayed_destroy_wq;
734 * @delayed_destroy_work: Work item to destroy MST port and branch
735 * devices, needed to avoid locking inversion.
737 struct work_struct delayed_destroy_work;
740 * @up_req_list: List of pending up requests from the topology that
741 * need to be processed, in chronological order.
743 struct list_head up_req_list;
745 * @up_req_lock: Protects @up_req_list
747 struct mutex up_req_lock;
749 * @up_req_work: Work item to process up requests received from the
750 * topology. Needed to avoid blocking hotplug handling and sideband
751 * transmissions.
753 struct work_struct up_req_work;
755 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
757 * @topology_ref_history_lock: protects
758 * &drm_dp_mst_port.topology_ref_history and
759 * &drm_dp_mst_branch.topology_ref_history.
761 struct mutex topology_ref_history_lock;
762 #endif
765 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
766 struct drm_device *dev, struct drm_dp_aux *aux,
767 int max_dpcd_transaction_bytes,
768 int max_payloads, int conn_base_id);
770 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
772 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
773 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
775 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
779 drm_dp_mst_detect_port(struct drm_connector *connector,
780 struct drm_modeset_acquire_ctx *ctx,
781 struct drm_dp_mst_topology_mgr *mgr,
782 struct drm_dp_mst_port *port);
784 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
787 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
789 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
790 struct drm_dp_mst_port *port, int pbn, int slots);
792 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
795 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
798 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
799 struct drm_dp_mst_port *port);
802 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
803 int pbn);
806 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
809 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
811 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
813 void drm_dp_mst_dump_topology(struct seq_file *m,
814 struct drm_dp_mst_topology_mgr *mgr);
816 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
817 int __must_check
818 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
819 bool sync);
821 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
822 unsigned int offset, void *buffer, size_t size);
823 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
824 unsigned int offset, void *buffer, size_t size);
826 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
827 struct drm_dp_mst_port *port);
828 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
829 struct drm_dp_mst_port *port);
831 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
832 struct drm_dp_mst_topology_mgr *mgr);
833 int __must_check
834 drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
835 struct drm_dp_mst_topology_mgr *mgr,
836 struct drm_dp_mst_port *port, int pbn,
837 int pbn_div);
838 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
839 struct drm_dp_mst_port *port,
840 int pbn, int pbn_div,
841 bool enable);
842 int __must_check
843 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
844 struct drm_dp_mst_topology_mgr *mgr);
845 int __must_check
846 drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
847 struct drm_dp_mst_topology_mgr *mgr,
848 struct drm_dp_mst_port *port);
849 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
850 struct drm_dp_mst_port *port, bool power_up);
851 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
852 struct drm_dp_mst_port *port,
853 struct drm_dp_query_stream_enc_status_ack_reply *status);
854 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
856 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
857 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
859 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
861 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
864 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
865 * macro-internal use
866 * @state: &struct drm_atomic_state pointer
867 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
868 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
869 * iteration cursor
870 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
871 * iteration cursor
872 * @i: int iteration cursor, for macro-internal use
874 * Used by for_each_oldnew_mst_mgr_in_state(),
875 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
876 * call this directly.
878 * Returns:
879 * True if the current &struct drm_private_obj is a &struct
880 * drm_dp_mst_topology_mgr, false otherwise.
882 static inline bool
883 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
884 struct drm_dp_mst_topology_mgr **mgr,
885 struct drm_dp_mst_topology_state **old_state,
886 struct drm_dp_mst_topology_state **new_state,
887 int i)
889 struct __drm_private_objs_state *objs_state = &state->private_objs[i];
891 if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
892 return false;
894 *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
895 if (old_state)
896 *old_state = to_dp_mst_topology_state(objs_state->old_state);
897 if (new_state)
898 *new_state = to_dp_mst_topology_state(objs_state->new_state);
900 return true;
904 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
905 * managers in an atomic update
906 * @__state: &struct drm_atomic_state pointer
907 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
908 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
909 * state
910 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
911 * state
912 * @__i: int iteration cursor, for macro-internal use
914 * This iterates over all DRM DP MST topology managers in an atomic update,
915 * tracking both old and new state. This is useful in places where the state
916 * delta needs to be considered, for example in atomic check functions.
918 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
919 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
920 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
923 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
924 * in an atomic update
925 * @__state: &struct drm_atomic_state pointer
926 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
927 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
928 * state
929 * @__i: int iteration cursor, for macro-internal use
931 * This iterates over all DRM DP MST topology managers in an atomic update,
932 * tracking only the old state. This is useful in disable functions, where we
933 * need the old state the hardware is still in.
935 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
936 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
937 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
940 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
941 * in an atomic update
942 * @__state: &struct drm_atomic_state pointer
943 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
944 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
945 * state
946 * @__i: int iteration cursor, for macro-internal use
948 * This iterates over all DRM DP MST topology managers in an atomic update,
949 * tracking only the new state. This is useful in enable functions, where we
950 * need the new state the hardware should be in when the atomic commit
951 * operation has completed.
953 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
954 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
955 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
957 #endif