2 * Copyright 2014 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <subdev/bios.h>
30 #include <subdev/bios/init.h>
31 #include <subdev/i2c.h>
33 #include <nvif/event.h>
45 nvkm_dp_train_sense(struct lt_state
*lt
, bool pc
, u32 delay
)
47 struct nvkm_dp
*dp
= lt
->dp
;
50 if (dp
->dpcd
[DPCD_RC0E_AUX_RD_INTERVAL
])
51 mdelay(dp
->dpcd
[DPCD_RC0E_AUX_RD_INTERVAL
] * 4);
55 ret
= nvkm_rdaux(dp
->aux
, DPCD_LS02
, lt
->stat
, 6);
60 ret
= nvkm_rdaux(dp
->aux
, DPCD_LS0C
, <
->pc2stat
, 1);
63 OUTP_TRACE(&dp
->outp
, "status %6ph pc2 %02x",
64 lt
->stat
, lt
->pc2stat
);
66 OUTP_TRACE(&dp
->outp
, "status %6ph", lt
->stat
);
73 nvkm_dp_train_drive(struct lt_state
*lt
, bool pc
)
75 struct nvkm_dp
*dp
= lt
->dp
;
76 struct nvkm_ior
*ior
= dp
->outp
.ior
;
77 struct nvkm_bios
*bios
= ior
->disp
->engine
.subdev
.device
->bios
;
78 struct nvbios_dpout info
;
79 struct nvbios_dpcfg ocfg
;
80 u8 ver
, hdr
, cnt
, len
;
84 for (i
= 0; i
< ior
->dp
.nr
; i
++) {
85 u8 lane
= (lt
->stat
[4 + (i
>> 1)] >> ((i
& 1) * 4)) & 0xf;
86 u8 lpc2
= (lt
->pc2stat
>> (i
* 2)) & 0x3;
87 u8 lpre
= (lane
& 0x0c) >> 2;
88 u8 lvsw
= (lane
& 0x03) >> 0;
94 lpc2
= hipc
| DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED
;
96 lpre
= hipe
| DPCD_LC03_MAX_SWING_REACHED
; /* yes. */
97 lvsw
= hivs
= 3 - (lpre
& 3);
100 lvsw
= hivs
| DPCD_LC03_MAX_SWING_REACHED
;
103 lt
->conf
[i
] = (lpre
<< 3) | lvsw
;
104 lt
->pc2conf
[i
>> 1] |= lpc2
<< ((i
& 1) * 4);
106 OUTP_TRACE(&dp
->outp
, "config lane %d %02x %02x",
107 i
, lt
->conf
[i
], lpc2
);
109 data
= nvbios_dpout_match(bios
, dp
->outp
.info
.hasht
,
111 &ver
, &hdr
, &cnt
, &len
, &info
);
115 data
= nvbios_dpcfg_match(bios
, data
, lpc2
& 3, lvsw
& 3,
116 lpre
& 3, &ver
, &hdr
, &cnt
, &len
,
121 ior
->func
->dp
.drive(ior
, i
, ocfg
.pc
, ocfg
.dc
,
122 ocfg
.pe
, ocfg
.tx_pu
);
125 ret
= nvkm_wraux(dp
->aux
, DPCD_LC03(0), lt
->conf
, 4);
130 ret
= nvkm_wraux(dp
->aux
, DPCD_LC0F
, lt
->pc2conf
, 2);
139 nvkm_dp_train_pattern(struct lt_state
*lt
, u8 pattern
)
141 struct nvkm_dp
*dp
= lt
->dp
;
144 OUTP_TRACE(&dp
->outp
, "training pattern %d", pattern
);
145 dp
->outp
.ior
->func
->dp
.pattern(dp
->outp
.ior
, pattern
);
147 nvkm_rdaux(dp
->aux
, DPCD_LC02
, &sink_tp
, 1);
148 sink_tp
&= ~DPCD_LC02_TRAINING_PATTERN_SET
;
150 nvkm_wraux(dp
->aux
, DPCD_LC02
, &sink_tp
, 1);
154 nvkm_dp_train_eq(struct lt_state
*lt
)
156 bool eq_done
= false, cr_done
= true;
159 if (lt
->dp
->dpcd
[DPCD_RC02
] & DPCD_RC02_TPS3_SUPPORTED
)
160 nvkm_dp_train_pattern(lt
, 3);
162 nvkm_dp_train_pattern(lt
, 2);
166 nvkm_dp_train_drive(lt
, lt
->pc2
)) ||
167 nvkm_dp_train_sense(lt
, lt
->pc2
, 400))
170 eq_done
= !!(lt
->stat
[2] & DPCD_LS04_INTERLANE_ALIGN_DONE
);
171 for (i
= 0; i
< lt
->dp
->outp
.ior
->dp
.nr
&& eq_done
; i
++) {
172 u8 lane
= (lt
->stat
[i
>> 1] >> ((i
& 1) * 4)) & 0xf;
173 if (!(lane
& DPCD_LS02_LANE0_CR_DONE
))
175 if (!(lane
& DPCD_LS02_LANE0_CHANNEL_EQ_DONE
) ||
176 !(lane
& DPCD_LS02_LANE0_SYMBOL_LOCKED
))
179 } while (!eq_done
&& cr_done
&& ++tries
<= 5);
181 return eq_done
? 0 : -1;
185 nvkm_dp_train_cr(struct lt_state
*lt
)
187 bool cr_done
= false, abort
= false;
188 int voltage
= lt
->conf
[0] & DPCD_LC03_VOLTAGE_SWING_SET
;
191 nvkm_dp_train_pattern(lt
, 1);
194 if (nvkm_dp_train_drive(lt
, false) ||
195 nvkm_dp_train_sense(lt
, false, 100))
199 for (i
= 0; i
< lt
->dp
->outp
.ior
->dp
.nr
; i
++) {
200 u8 lane
= (lt
->stat
[i
>> 1] >> ((i
& 1) * 4)) & 0xf;
201 if (!(lane
& DPCD_LS02_LANE0_CR_DONE
)) {
203 if (lt
->conf
[i
] & DPCD_LC03_MAX_SWING_REACHED
)
209 if ((lt
->conf
[0] & DPCD_LC03_VOLTAGE_SWING_SET
) != voltage
) {
210 voltage
= lt
->conf
[0] & DPCD_LC03_VOLTAGE_SWING_SET
;
213 } while (!cr_done
&& !abort
&& ++tries
< 5);
215 return cr_done
? 0 : -1;
219 nvkm_dp_train_links(struct nvkm_dp
*dp
)
221 struct nvkm_ior
*ior
= dp
->outp
.ior
;
222 struct nvkm_disp
*disp
= dp
->outp
.disp
;
223 struct nvkm_subdev
*subdev
= &disp
->engine
.subdev
;
224 struct nvkm_bios
*bios
= subdev
->device
->bios
;
225 struct lt_state lt
= {
232 OUTP_DBG(&dp
->outp
, "training %d x %d MB/s",
233 ior
->dp
.nr
, ior
->dp
.bw
* 27);
235 /* Intersect misc. capabilities of the OR and sink. */
236 if (disp
->engine
.subdev
.device
->chipset
< 0xd0)
237 dp
->dpcd
[DPCD_RC02
] &= ~DPCD_RC02_TPS3_SUPPORTED
;
238 lt
.pc2
= dp
->dpcd
[DPCD_RC02
] & DPCD_RC02_TPS3_SUPPORTED
;
240 /* Set desired link configuration on the source. */
241 if ((lnkcmp
= lt
.dp
->info
.lnkcmp
)) {
242 if (dp
->version
< 0x30) {
243 while ((ior
->dp
.bw
* 2700) < nvbios_rd16(bios
, lnkcmp
))
245 lnkcmp
= nvbios_rd16(bios
, lnkcmp
+ 2);
247 while (ior
->dp
.bw
< nvbios_rd08(bios
, lnkcmp
))
249 lnkcmp
= nvbios_rd16(bios
, lnkcmp
+ 1);
252 nvbios_init(subdev
, lnkcmp
,
253 init
.outp
= &dp
->outp
.info
;
255 init
.link
= ior
->asy
.link
;
259 ret
= ior
->func
->dp
.links(ior
, dp
->aux
);
262 OUTP_ERR(&dp
->outp
, "train failed with %d", ret
);
268 ior
->func
->dp
.power(ior
, ior
->dp
.nr
);
270 /* Set desired link configuration on the sink. */
271 sink
[0] = ior
->dp
.bw
;
272 sink
[1] = ior
->dp
.nr
;
274 sink
[1] |= DPCD_LC01_ENHANCED_FRAME_EN
;
276 ret
= nvkm_wraux(dp
->aux
, DPCD_LC00_LINK_BW_SET
, sink
, 2);
280 /* Attempt to train the link in this configuration. */
281 memset(lt
.stat
, 0x00, sizeof(lt
.stat
));
282 ret
= nvkm_dp_train_cr(<
);
284 ret
= nvkm_dp_train_eq(<
);
285 nvkm_dp_train_pattern(<
, 0);
290 nvkm_dp_train_fini(struct nvkm_dp
*dp
)
292 /* Execute AfterLinkTraining script from DP Info table. */
293 nvbios_init(&dp
->outp
.disp
->engine
.subdev
, dp
->info
.script
[1],
294 init
.outp
= &dp
->outp
.info
;
295 init
.or = dp
->outp
.ior
->id
;
296 init
.link
= dp
->outp
.ior
->asy
.link
;
301 nvkm_dp_train_init(struct nvkm_dp
*dp
)
303 /* Execute EnableSpread/DisableSpread script from DP Info table. */
304 if (dp
->dpcd
[DPCD_RC03
] & DPCD_RC03_MAX_DOWNSPREAD
) {
305 nvbios_init(&dp
->outp
.disp
->engine
.subdev
, dp
->info
.script
[2],
306 init
.outp
= &dp
->outp
.info
;
307 init
.or = dp
->outp
.ior
->id
;
308 init
.link
= dp
->outp
.ior
->asy
.link
;
311 nvbios_init(&dp
->outp
.disp
->engine
.subdev
, dp
->info
.script
[3],
312 init
.outp
= &dp
->outp
.info
;
313 init
.or = dp
->outp
.ior
->id
;
314 init
.link
= dp
->outp
.ior
->asy
.link
;
318 /* Execute BeforeLinkTraining script from DP Info table. */
319 nvbios_init(&dp
->outp
.disp
->engine
.subdev
, dp
->info
.script
[0],
320 init
.outp
= &dp
->outp
.info
;
321 init
.or = dp
->outp
.ior
->id
;
322 init
.link
= dp
->outp
.ior
->asy
.link
;
326 static const struct dp_rates
{
330 } nvkm_dp_rates
[] = {
331 { 2160000, 0x14, 4 },
332 { 1080000, 0x0a, 4 },
333 { 1080000, 0x14, 2 },
344 nvkm_dp_train(struct nvkm_dp
*dp
, u32 dataKBps
)
346 struct nvkm_ior
*ior
= dp
->outp
.ior
;
347 const u8 sink_nr
= dp
->dpcd
[DPCD_RC02
] & DPCD_RC02_MAX_LANE_COUNT
;
348 const u8 sink_bw
= dp
->dpcd
[DPCD_RC01_MAX_LINK_RATE
];
349 const u8 outp_nr
= dp
->outp
.info
.dpconf
.link_nr
;
350 const u8 outp_bw
= dp
->outp
.info
.dpconf
.link_bw
;
351 const struct dp_rates
*failsafe
= NULL
, *cfg
;
355 /* Find the lowest configuration of the OR that can support
356 * the required link rate.
358 * We will refuse to program the OR to lower rates, even if
359 * link training fails at higher rates (or even if the sink
360 * can't support the rate at all, though the DD is supposed
361 * to prevent such situations from happening).
363 * Attempting to do so can cause the entire display to hang,
364 * and it's better to have a failed modeset than that.
366 for (cfg
= nvkm_dp_rates
; cfg
->rate
; cfg
++) {
367 if (cfg
->nr
<= outp_nr
&& cfg
->nr
<= outp_bw
)
369 if (failsafe
&& cfg
[1].rate
< dataKBps
)
373 if (WARN_ON(!failsafe
))
376 /* Ensure sink is not in a low-power state. */
377 if (!nvkm_rdaux(dp
->aux
, DPCD_SC00
, &pwr
, 1)) {
378 if ((pwr
& DPCD_SC00_SET_POWER
) != DPCD_SC00_SET_POWER_D0
) {
379 pwr
&= ~DPCD_SC00_SET_POWER
;
380 pwr
|= DPCD_SC00_SET_POWER_D0
;
381 nvkm_wraux(dp
->aux
, DPCD_SC00
, &pwr
, 1);
386 OUTP_DBG(&dp
->outp
, "training (min: %d x %d MB/s)",
387 failsafe
->nr
, failsafe
->bw
* 27);
388 nvkm_dp_train_init(dp
);
389 for (cfg
= nvkm_dp_rates
; ret
< 0 && cfg
<= failsafe
; cfg
++) {
390 /* Skip configurations not supported by both OR and sink. */
391 if ((cfg
->nr
> outp_nr
|| cfg
->bw
> outp_bw
||
392 cfg
->nr
> sink_nr
|| cfg
->bw
> sink_bw
)) {
395 OUTP_ERR(&dp
->outp
, "link rate unsupported by sink");
397 ior
->dp
.mst
= dp
->lt
.mst
;
398 ior
->dp
.ef
= dp
->dpcd
[DPCD_RC02
] & DPCD_RC02_ENHANCED_FRAME_CAP
;
399 ior
->dp
.bw
= cfg
->bw
;
400 ior
->dp
.nr
= cfg
->nr
;
402 /* Program selected link configuration. */
403 ret
= nvkm_dp_train_links(dp
);
405 nvkm_dp_train_fini(dp
);
407 OUTP_ERR(&dp
->outp
, "training failed");
409 OUTP_DBG(&dp
->outp
, "training done");
410 atomic_set(&dp
->lt
.done
, 1);
415 nvkm_dp_release(struct nvkm_outp
*outp
, struct nvkm_ior
*ior
)
417 struct nvkm_dp
*dp
= nvkm_dp(outp
);
419 /* Prevent link from being retrained if sink sends an IRQ. */
420 atomic_set(&dp
->lt
.done
, 0);
423 /* Execute DisableLT script from DP Info Table. */
424 nvbios_init(&ior
->disp
->engine
.subdev
, dp
->info
.script
[4],
425 init
.outp
= &dp
->outp
.info
;
427 init
.link
= ior
->arm
.link
;
432 nvkm_dp_acquire(struct nvkm_outp
*outp
)
434 struct nvkm_dp
*dp
= nvkm_dp(outp
);
435 struct nvkm_ior
*ior
= dp
->outp
.ior
;
436 struct nvkm_head
*head
;
444 mutex_lock(&dp
->mutex
);
446 /* Check that link configuration meets current requirements. */
447 list_for_each_entry(head
, &outp
->disp
->head
, head
) {
448 if (ior
->asy
.head
& (1 << head
->id
)) {
449 u32 khz
= (head
->asy
.hz
>> ior
->asy
.rgdiv
) / 1000;
450 datakbps
+= khz
* head
->asy
.or.depth
;
454 linkKBps
= ior
->dp
.bw
* 27000 * ior
->dp
.nr
;
455 dataKBps
= DIV_ROUND_UP(datakbps
, 8);
456 OUTP_DBG(&dp
->outp
, "data %d KB/s link %d KB/s mst %d->%d",
457 dataKBps
, linkKBps
, ior
->dp
.mst
, dp
->lt
.mst
);
458 if (linkKBps
< dataKBps
|| ior
->dp
.mst
!= dp
->lt
.mst
) {
459 OUTP_DBG(&dp
->outp
, "link requirements changed");
463 /* Check that link is still trained. */
464 ret
= nvkm_rdaux(dp
->aux
, DPCD_LS02
, stat
, 3);
467 "failed to read link status, assuming no sink");
471 if (stat
[2] & DPCD_LS04_INTERLANE_ALIGN_DONE
) {
472 for (i
= 0; i
< ior
->dp
.nr
; i
++) {
473 u8 lane
= (stat
[i
>> 1] >> ((i
& 1) * 4)) & 0x0f;
474 if (!(lane
& DPCD_LS02_LANE0_CR_DONE
) ||
475 !(lane
& DPCD_LS02_LANE0_CHANNEL_EQ_DONE
) ||
476 !(lane
& DPCD_LS02_LANE0_SYMBOL_LOCKED
)) {
478 "lane %d not equalised", lane
);
484 OUTP_DBG(&dp
->outp
, "no inter-lane alignment");
488 if (retrain
|| !atomic_read(&dp
->lt
.done
))
489 ret
= nvkm_dp_train(dp
, dataKBps
);
490 mutex_unlock(&dp
->mutex
);
495 nvkm_dp_enable(struct nvkm_dp
*dp
, bool enable
)
497 struct nvkm_i2c_aux
*aux
= dp
->aux
;
501 OUTP_DBG(&dp
->outp
, "aux power -> always");
502 nvkm_i2c_aux_monitor(aux
, true);
506 if (!nvkm_rdaux(aux
, DPCD_RC00_DPCD_REV
, dp
->dpcd
,
512 OUTP_DBG(&dp
->outp
, "aux power -> demand");
513 nvkm_i2c_aux_monitor(aux
, false);
517 atomic_set(&dp
->lt
.done
, 0);
521 nvkm_dp_hpd(struct nvkm_notify
*notify
)
523 const struct nvkm_i2c_ntfy_rep
*line
= notify
->data
;
524 struct nvkm_dp
*dp
= container_of(notify
, typeof(*dp
), hpd
);
525 struct nvkm_conn
*conn
= dp
->outp
.conn
;
526 struct nvkm_disp
*disp
= dp
->outp
.disp
;
527 struct nvif_notify_conn_rep_v0 rep
= {};
529 OUTP_DBG(&dp
->outp
, "HPD: %d", line
->mask
);
530 if (line
->mask
& NVKM_I2C_IRQ
) {
531 if (atomic_read(&dp
->lt
.done
))
532 dp
->outp
.func
->acquire(&dp
->outp
);
533 rep
.mask
|= NVIF_NOTIFY_CONN_V0_IRQ
;
535 nvkm_dp_enable(dp
, true);
538 if (line
->mask
& NVKM_I2C_UNPLUG
)
539 rep
.mask
|= NVIF_NOTIFY_CONN_V0_UNPLUG
;
540 if (line
->mask
& NVKM_I2C_PLUG
)
541 rep
.mask
|= NVIF_NOTIFY_CONN_V0_PLUG
;
543 nvkm_event_send(&disp
->hpd
, rep
.mask
, conn
->index
, &rep
, sizeof(rep
));
544 return NVKM_NOTIFY_KEEP
;
548 nvkm_dp_fini(struct nvkm_outp
*outp
)
550 struct nvkm_dp
*dp
= nvkm_dp(outp
);
551 nvkm_notify_put(&dp
->hpd
);
552 nvkm_dp_enable(dp
, false);
556 nvkm_dp_init(struct nvkm_outp
*outp
)
558 struct nvkm_dp
*dp
= nvkm_dp(outp
);
559 nvkm_notify_put(&dp
->outp
.conn
->hpd
);
560 nvkm_dp_enable(dp
, true);
561 nvkm_notify_get(&dp
->hpd
);
565 nvkm_dp_dtor(struct nvkm_outp
*outp
)
567 struct nvkm_dp
*dp
= nvkm_dp(outp
);
568 nvkm_notify_fini(&dp
->hpd
);
572 static const struct nvkm_outp_func
574 .dtor
= nvkm_dp_dtor
,
575 .init
= nvkm_dp_init
,
576 .fini
= nvkm_dp_fini
,
577 .acquire
= nvkm_dp_acquire
,
578 .release
= nvkm_dp_release
,
582 nvkm_dp_ctor(struct nvkm_disp
*disp
, int index
, struct dcb_output
*dcbE
,
583 struct nvkm_i2c_aux
*aux
, struct nvkm_dp
*dp
)
585 struct nvkm_device
*device
= disp
->engine
.subdev
.device
;
586 struct nvkm_bios
*bios
= device
->bios
;
587 struct nvkm_i2c
*i2c
= device
->i2c
;
592 ret
= nvkm_outp_ctor(&nvkm_dp_func
, disp
, index
, dcbE
, &dp
->outp
);
598 OUTP_ERR(&dp
->outp
, "no aux");
602 /* bios data is not optional */
603 data
= nvbios_dpout_match(bios
, dp
->outp
.info
.hasht
,
604 dp
->outp
.info
.hashm
, &dp
->version
,
605 &hdr
, &cnt
, &len
, &dp
->info
);
607 OUTP_ERR(&dp
->outp
, "no bios dp data");
611 OUTP_DBG(&dp
->outp
, "bios dp %02x %02x %02x %02x",
612 dp
->version
, hdr
, cnt
, len
);
614 /* hotplug detect, replaces gpio-based mechanism with aux events */
615 ret
= nvkm_notify_init(NULL
, &i2c
->event
, nvkm_dp_hpd
, true,
616 &(struct nvkm_i2c_ntfy_req
) {
617 .mask
= NVKM_I2C_PLUG
| NVKM_I2C_UNPLUG
|
621 sizeof(struct nvkm_i2c_ntfy_req
),
622 sizeof(struct nvkm_i2c_ntfy_rep
),
625 OUTP_ERR(&dp
->outp
, "error monitoring aux hpd: %d", ret
);
629 mutex_init(&dp
->mutex
);
630 atomic_set(&dp
->lt
.done
, 0);
635 nvkm_dp_new(struct nvkm_disp
*disp
, int index
, struct dcb_output
*dcbE
,
636 struct nvkm_outp
**poutp
)
638 struct nvkm_i2c
*i2c
= disp
->engine
.subdev
.device
->i2c
;
639 struct nvkm_i2c_aux
*aux
;
642 if (dcbE
->location
== 0)
643 aux
= nvkm_i2c_aux_find(i2c
, NVKM_I2C_AUX_CCB(dcbE
->i2c_index
));
645 aux
= nvkm_i2c_aux_find(i2c
, NVKM_I2C_AUX_EXT(dcbE
->extdev
));
647 if (!(dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
)))
651 return nvkm_dp_ctor(disp
, index
, dcbE
, aux
, dp
);