0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #ifndef _DRM_DP_MST_HELPER_H_
0023 #define _DRM_DP_MST_HELPER_H_
0024
0025 #include <linux/types.h>
0026 #include <drm/display/drm_dp_helper.h>
0027 #include <drm/drm_atomic.h>
0028
0029 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
0030 #include <linux/stackdepot.h>
0031 #include <linux/timekeeping.h>
0032
0033 enum drm_dp_mst_topology_ref_type {
0034 DRM_DP_MST_TOPOLOGY_REF_GET,
0035 DRM_DP_MST_TOPOLOGY_REF_PUT,
0036 };
0037
0038 struct drm_dp_mst_topology_ref_history {
0039 struct drm_dp_mst_topology_ref_entry {
0040 enum drm_dp_mst_topology_ref_type type;
0041 int count;
0042 ktime_t ts_nsec;
0043 depot_stack_handle_t backtrace;
0044 } *entries;
0045 int len;
0046 };
0047 #endif
0048
0049 struct drm_dp_mst_branch;
0050
0051
0052
0053
0054
0055
0056
0057
0058 struct drm_dp_vcpi {
0059 int vcpi;
0060 int pbn;
0061 int aligned_pbn;
0062 int num_slots;
0063 };
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098 struct drm_dp_mst_port {
0099
0100
0101
0102
0103 struct kref topology_kref;
0104
0105
0106
0107
0108
0109
0110 struct kref malloc_kref;
0111
0112 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
0113
0114
0115
0116
0117 struct drm_dp_mst_topology_ref_history topology_ref_history;
0118 #endif
0119
0120 u8 port_num;
0121 bool input;
0122 bool mcs;
0123 bool ddps;
0124 u8 pdt;
0125 bool ldps;
0126 u8 dpcd_rev;
0127 u8 num_sdp_streams;
0128 u8 num_sdp_stream_sinks;
0129 uint16_t full_pbn;
0130 struct list_head next;
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 struct drm_dp_mst_branch *mstb;
0142 struct drm_dp_aux aux;
0143 struct drm_dp_mst_branch *parent;
0144
0145 struct drm_dp_vcpi vcpi;
0146 struct drm_connector *connector;
0147 struct drm_dp_mst_topology_mgr *mgr;
0148
0149
0150
0151
0152
0153 struct edid *cached_edid;
0154
0155
0156
0157
0158 bool has_audio;
0159
0160
0161
0162
0163
0164 bool fec_capable;
0165 };
0166
0167
0168 struct drm_dp_sideband_msg_hdr {
0169 u8 lct;
0170 u8 lcr;
0171 u8 rad[8];
0172 bool broadcast;
0173 bool path_msg;
0174 u8 msg_len;
0175 bool somt;
0176 bool eomt;
0177 bool seqno;
0178 };
0179
0180 struct drm_dp_sideband_msg_rx {
0181 u8 chunk[48];
0182 u8 msg[256];
0183 u8 curchunk_len;
0184 u8 curchunk_idx;
0185 u8 curchunk_hdrlen;
0186 u8 curlen;
0187 bool have_somt;
0188 bool have_eomt;
0189 struct drm_dp_sideband_msg_hdr initial_hdr;
0190 };
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207 struct drm_dp_mst_branch {
0208
0209
0210
0211
0212 struct kref topology_kref;
0213
0214
0215
0216
0217
0218
0219 struct kref malloc_kref;
0220
0221 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
0222
0223
0224
0225
0226 struct drm_dp_mst_topology_ref_history topology_ref_history;
0227 #endif
0228
0229
0230
0231
0232
0233 struct list_head destroy_next;
0234
0235 u8 rad[8];
0236 u8 lct;
0237 int num_ports;
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 struct list_head ports;
0250
0251 struct drm_dp_mst_port *port_parent;
0252 struct drm_dp_mst_topology_mgr *mgr;
0253
0254 bool link_address_sent;
0255
0256
0257 u8 guid[16];
0258 };
0259
0260
0261 struct drm_dp_nak_reply {
0262 u8 guid[16];
0263 u8 reason;
0264 u8 nak_data;
0265 };
0266
0267 struct drm_dp_link_address_ack_reply {
0268 u8 guid[16];
0269 u8 nports;
0270 struct drm_dp_link_addr_reply_port {
0271 bool input_port;
0272 u8 peer_device_type;
0273 u8 port_number;
0274 bool mcs;
0275 bool ddps;
0276 bool legacy_device_plug_status;
0277 u8 dpcd_revision;
0278 u8 peer_guid[16];
0279 u8 num_sdp_streams;
0280 u8 num_sdp_stream_sinks;
0281 } ports[16];
0282 };
0283
0284 struct drm_dp_remote_dpcd_read_ack_reply {
0285 u8 port_number;
0286 u8 num_bytes;
0287 u8 bytes[255];
0288 };
0289
0290 struct drm_dp_remote_dpcd_write_ack_reply {
0291 u8 port_number;
0292 };
0293
0294 struct drm_dp_remote_dpcd_write_nak_reply {
0295 u8 port_number;
0296 u8 reason;
0297 u8 bytes_written_before_failure;
0298 };
0299
0300 struct drm_dp_remote_i2c_read_ack_reply {
0301 u8 port_number;
0302 u8 num_bytes;
0303 u8 bytes[255];
0304 };
0305
0306 struct drm_dp_remote_i2c_read_nak_reply {
0307 u8 port_number;
0308 u8 nak_reason;
0309 u8 i2c_nak_transaction;
0310 };
0311
0312 struct drm_dp_remote_i2c_write_ack_reply {
0313 u8 port_number;
0314 };
0315
0316 struct drm_dp_query_stream_enc_status_ack_reply {
0317
0318 u8 stream_id;
0319
0320
0321 bool reply_signed;
0322
0323
0324 bool unauthorizable_device_present;
0325 bool legacy_device_present;
0326 bool query_capable_device_present;
0327
0328
0329 bool hdcp_1x_device_present;
0330 bool hdcp_2x_device_present;
0331
0332
0333 bool auth_completed;
0334
0335
0336 bool encryption_enabled;
0337
0338
0339 bool repeater_present;
0340
0341
0342 u8 state;
0343 };
0344
0345 #define DRM_DP_MAX_SDP_STREAMS 16
0346 struct drm_dp_allocate_payload {
0347 u8 port_number;
0348 u8 number_sdp_streams;
0349 u8 vcpi;
0350 u16 pbn;
0351 u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
0352 };
0353
0354 struct drm_dp_allocate_payload_ack_reply {
0355 u8 port_number;
0356 u8 vcpi;
0357 u16 allocated_pbn;
0358 };
0359
0360 struct drm_dp_connection_status_notify {
0361 u8 guid[16];
0362 u8 port_number;
0363 bool legacy_device_plug_status;
0364 bool displayport_device_plug_status;
0365 bool message_capability_status;
0366 bool input_port;
0367 u8 peer_device_type;
0368 };
0369
0370 struct drm_dp_remote_dpcd_read {
0371 u8 port_number;
0372 u32 dpcd_address;
0373 u8 num_bytes;
0374 };
0375
0376 struct drm_dp_remote_dpcd_write {
0377 u8 port_number;
0378 u32 dpcd_address;
0379 u8 num_bytes;
0380 u8 *bytes;
0381 };
0382
0383 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
0384 struct drm_dp_remote_i2c_read {
0385 u8 num_transactions;
0386 u8 port_number;
0387 struct drm_dp_remote_i2c_read_tx {
0388 u8 i2c_dev_id;
0389 u8 num_bytes;
0390 u8 *bytes;
0391 u8 no_stop_bit;
0392 u8 i2c_transaction_delay;
0393 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
0394 u8 read_i2c_device_id;
0395 u8 num_bytes_read;
0396 };
0397
0398 struct drm_dp_remote_i2c_write {
0399 u8 port_number;
0400 u8 write_i2c_device_id;
0401 u8 num_bytes;
0402 u8 *bytes;
0403 };
0404
0405 struct drm_dp_query_stream_enc_status {
0406 u8 stream_id;
0407 u8 client_id[7];
0408 u8 stream_event;
0409 bool valid_stream_event;
0410 u8 stream_behavior;
0411 u8 valid_stream_behavior;
0412 };
0413
0414
0415 struct drm_dp_port_number_req {
0416 u8 port_number;
0417 };
0418
0419 struct drm_dp_enum_path_resources_ack_reply {
0420 u8 port_number;
0421 bool fec_capable;
0422 u16 full_payload_bw_number;
0423 u16 avail_payload_bw_number;
0424 };
0425
0426
0427 struct drm_dp_port_number_rep {
0428 u8 port_number;
0429 };
0430
0431 struct drm_dp_query_payload {
0432 u8 port_number;
0433 u8 vcpi;
0434 };
0435
0436 struct drm_dp_resource_status_notify {
0437 u8 port_number;
0438 u8 guid[16];
0439 u16 available_pbn;
0440 };
0441
0442 struct drm_dp_query_payload_ack_reply {
0443 u8 port_number;
0444 u16 allocated_pbn;
0445 };
0446
0447 struct drm_dp_sideband_msg_req_body {
0448 u8 req_type;
0449 union ack_req {
0450 struct drm_dp_connection_status_notify conn_stat;
0451 struct drm_dp_port_number_req port_num;
0452 struct drm_dp_resource_status_notify resource_stat;
0453
0454 struct drm_dp_query_payload query_payload;
0455 struct drm_dp_allocate_payload allocate_payload;
0456
0457 struct drm_dp_remote_dpcd_read dpcd_read;
0458 struct drm_dp_remote_dpcd_write dpcd_write;
0459
0460 struct drm_dp_remote_i2c_read i2c_read;
0461 struct drm_dp_remote_i2c_write i2c_write;
0462
0463 struct drm_dp_query_stream_enc_status enc_status;
0464 } u;
0465 };
0466
0467 struct drm_dp_sideband_msg_reply_body {
0468 u8 reply_type;
0469 u8 req_type;
0470 union ack_replies {
0471 struct drm_dp_nak_reply nak;
0472 struct drm_dp_link_address_ack_reply link_addr;
0473 struct drm_dp_port_number_rep port_number;
0474
0475 struct drm_dp_enum_path_resources_ack_reply path_resources;
0476 struct drm_dp_allocate_payload_ack_reply allocate_payload;
0477 struct drm_dp_query_payload_ack_reply query_payload;
0478
0479 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
0480 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
0481 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
0482
0483 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
0484 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
0485 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
0486
0487 struct drm_dp_query_stream_enc_status_ack_reply enc_status;
0488 } u;
0489 };
0490
0491
0492 #define DRM_DP_SIDEBAND_TX_QUEUED 0
0493
0494 #define DRM_DP_SIDEBAND_TX_START_SEND 1
0495
0496 #define DRM_DP_SIDEBAND_TX_SENT 2
0497
0498 #define DRM_DP_SIDEBAND_TX_RX 3
0499 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
0500
0501 struct drm_dp_sideband_msg_tx {
0502 u8 msg[256];
0503 u8 chunk[48];
0504 u8 cur_offset;
0505 u8 cur_len;
0506 struct drm_dp_mst_branch *dst;
0507 struct list_head next;
0508 int seqno;
0509 int state;
0510 bool path_msg;
0511 struct drm_dp_sideband_msg_reply_body reply;
0512 };
0513
0514
0515 struct drm_dp_mst_topology_mgr;
0516 struct drm_dp_mst_topology_cbs {
0517
0518 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
0519
0520
0521
0522
0523
0524
0525
0526
0527 void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
0528 };
0529
0530 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
0531
0532 #define DP_PAYLOAD_LOCAL 1
0533 #define DP_PAYLOAD_REMOTE 2
0534 #define DP_PAYLOAD_DELETE_LOCAL 3
0535
0536 struct drm_dp_payload {
0537 int payload_state;
0538 int start_slot;
0539 int num_slots;
0540 int vcpi;
0541 };
0542
0543 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
0544
0545 struct drm_dp_vcpi_allocation {
0546 struct drm_dp_mst_port *port;
0547 int vcpi;
0548 int pbn;
0549 bool dsc_enabled;
0550 struct list_head next;
0551 };
0552
0553 struct drm_dp_mst_topology_state {
0554 struct drm_private_state base;
0555 struct list_head vcpis;
0556 struct drm_dp_mst_topology_mgr *mgr;
0557 u8 total_avail_slots;
0558 u8 start_slot;
0559 };
0560
0561 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
0562
0563
0564
0565
0566
0567
0568
0569
0570 struct drm_dp_mst_topology_mgr {
0571
0572
0573
0574 struct drm_private_obj base;
0575
0576
0577
0578
0579 struct drm_device *dev;
0580
0581
0582
0583 const struct drm_dp_mst_topology_cbs *cbs;
0584
0585
0586
0587
0588 int max_dpcd_transaction_bytes;
0589
0590
0591
0592
0593 struct drm_dp_aux *aux;
0594
0595
0596
0597 int max_payloads;
0598
0599
0600
0601 int max_lane_count;
0602
0603
0604
0605 int max_link_rate;
0606
0607
0608
0609
0610 int conn_base_id;
0611
0612
0613
0614
0615 struct drm_dp_sideband_msg_rx up_req_recv;
0616
0617
0618
0619
0620
0621 struct drm_dp_sideband_msg_rx down_rep_recv;
0622
0623
0624
0625
0626
0627 struct mutex lock;
0628
0629
0630
0631
0632
0633
0634 struct mutex probe_lock;
0635
0636
0637
0638
0639
0640 bool mst_state : 1;
0641
0642
0643
0644
0645
0646 bool payload_id_table_cleared : 1;
0647
0648
0649
0650
0651 struct drm_dp_mst_branch *mst_primary;
0652
0653
0654
0655
0656 u8 dpcd[DP_RECEIVER_CAP_SIZE];
0657
0658
0659
0660 u8 sink_count;
0661
0662
0663
0664 int pbn_div;
0665
0666
0667
0668
0669 const struct drm_private_state_funcs *funcs;
0670
0671
0672
0673
0674 struct mutex qlock;
0675
0676
0677
0678
0679 struct list_head tx_msg_downq;
0680
0681
0682
0683
0684 struct mutex payload_lock;
0685
0686
0687
0688
0689
0690 struct drm_dp_vcpi **proposed_vcpis;
0691
0692
0693
0694
0695 struct drm_dp_payload *payloads;
0696
0697
0698
0699
0700
0701 unsigned long payload_mask;
0702
0703
0704
0705 unsigned long vcpi_mask;
0706
0707
0708
0709
0710 wait_queue_head_t tx_waitq;
0711
0712
0713
0714 struct work_struct work;
0715
0716
0717
0718
0719 struct work_struct tx_work;
0720
0721
0722
0723
0724 struct list_head destroy_port_list;
0725
0726
0727
0728
0729 struct list_head destroy_branch_device_list;
0730
0731
0732
0733
0734 struct mutex delayed_destroy_lock;
0735
0736
0737
0738
0739
0740
0741 struct workqueue_struct *delayed_destroy_wq;
0742
0743
0744
0745
0746
0747 struct work_struct delayed_destroy_work;
0748
0749
0750
0751
0752
0753 struct list_head up_req_list;
0754
0755
0756
0757 struct mutex up_req_lock;
0758
0759
0760
0761
0762
0763 struct work_struct up_req_work;
0764
0765 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
0766
0767
0768
0769
0770
0771 struct mutex topology_ref_history_lock;
0772 #endif
0773 };
0774
0775 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
0776 struct drm_device *dev, struct drm_dp_aux *aux,
0777 int max_dpcd_transaction_bytes,
0778 int max_payloads,
0779 int max_lane_count, int max_link_rate,
0780 int conn_base_id);
0781
0782 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
0783
0784 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
0785 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
0786
0787 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
0788
0789
0790 int
0791 drm_dp_mst_detect_port(struct drm_connector *connector,
0792 struct drm_modeset_acquire_ctx *ctx,
0793 struct drm_dp_mst_topology_mgr *mgr,
0794 struct drm_dp_mst_port *port);
0795
0796 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
0797
0798 int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
0799 int link_rate, int link_lane_count);
0800
0801 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
0802
0803 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
0804 struct drm_dp_mst_port *port, int pbn, int slots);
0805
0806 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
0807
0808
0809 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
0810
0811 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
0812
0813 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
0814 struct drm_dp_mst_port *port);
0815
0816
0817 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
0818 int pbn);
0819
0820
0821 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot);
0822
0823
0824 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
0825
0826 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
0827
0828 void drm_dp_mst_dump_topology(struct seq_file *m,
0829 struct drm_dp_mst_topology_mgr *mgr);
0830
0831 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
0832 int __must_check
0833 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
0834 bool sync);
0835
0836 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
0837 unsigned int offset, void *buffer, size_t size);
0838 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
0839 unsigned int offset, void *buffer, size_t size);
0840
0841 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
0842 struct drm_dp_mst_port *port);
0843 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
0844 struct drm_dp_mst_port *port);
0845
0846 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
0847 struct drm_dp_mst_topology_mgr *mgr);
0848 int __must_check
0849 drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
0850 struct drm_dp_mst_topology_mgr *mgr,
0851 struct drm_dp_mst_port *port, int pbn,
0852 int pbn_div);
0853 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
0854 struct drm_dp_mst_port *port,
0855 int pbn, int pbn_div,
0856 bool enable);
0857 int __must_check
0858 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
0859 struct drm_dp_mst_topology_mgr *mgr);
0860 int __must_check
0861 drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
0862 struct drm_dp_mst_topology_mgr *mgr,
0863 struct drm_dp_mst_port *port);
0864 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
0865 struct drm_dp_mst_port *port, bool power_up);
0866 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
0867 struct drm_dp_mst_port *port,
0868 struct drm_dp_query_stream_enc_status_ack_reply *status);
0869 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
0870
0871 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
0872 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
0873
0874 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
0875
0876 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897 static inline bool
0898 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
0899 struct drm_dp_mst_topology_mgr **mgr,
0900 struct drm_dp_mst_topology_state **old_state,
0901 struct drm_dp_mst_topology_state **new_state,
0902 int i)
0903 {
0904 struct __drm_private_objs_state *objs_state = &state->private_objs[i];
0905
0906 if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
0907 return false;
0908
0909 *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
0910 if (old_state)
0911 *old_state = to_dp_mst_topology_state(objs_state->old_state);
0912 if (new_state)
0913 *new_state = to_dp_mst_topology_state(objs_state->new_state);
0914
0915 return true;
0916 }
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
0934 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
0935 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
0951 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
0952 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
0969 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
0970 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
0971
0972 #endif