Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / gpu / drm / nouveau / nvkm / engine / disp / dp.c
blob7c5bed29ffef164224825a3f9e0eaac7aa786239
1 /*
2 * Copyright 2014 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
24 #include "dp.h"
25 #include "conn.h"
26 #include "head.h"
27 #include "ior.h"
29 #include <subdev/bios.h>
30 #include <subdev/bios/init.h>
31 #include <subdev/i2c.h>
33 #include <nvif/event.h>
35 struct lt_state {
36 struct nvkm_dp *dp;
37 u8 stat[6];
38 u8 conf[4];
39 bool pc2;
40 u8 pc2stat;
41 u8 pc2conf[2];
44 static int
45 nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
47 struct nvkm_dp *dp = lt->dp;
48 int ret;
50 if (dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL])
51 mdelay(dp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4);
52 else
53 udelay(delay);
55 ret = nvkm_rdaux(dp->aux, DPCD_LS02, lt->stat, 6);
56 if (ret)
57 return ret;
59 if (pc) {
60 ret = nvkm_rdaux(dp->aux, DPCD_LS0C, &lt->pc2stat, 1);
61 if (ret)
62 lt->pc2stat = 0x00;
63 OUTP_TRACE(&dp->outp, "status %6ph pc2 %02x",
64 lt->stat, lt->pc2stat);
65 } else {
66 OUTP_TRACE(&dp->outp, "status %6ph", lt->stat);
69 return 0;
72 static int
73 nvkm_dp_train_drive(struct lt_state *lt, bool pc)
75 struct nvkm_dp *dp = lt->dp;
76 struct nvkm_ior *ior = dp->outp.ior;
77 struct nvkm_bios *bios = ior->disp->engine.subdev.device->bios;
78 struct nvbios_dpout info;
79 struct nvbios_dpcfg ocfg;
80 u8 ver, hdr, cnt, len;
81 u32 data;
82 int ret, i;
84 for (i = 0; i < ior->dp.nr; i++) {
85 u8 lane = (lt->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
86 u8 lpc2 = (lt->pc2stat >> (i * 2)) & 0x3;
87 u8 lpre = (lane & 0x0c) >> 2;
88 u8 lvsw = (lane & 0x03) >> 0;
89 u8 hivs = 3 - lpre;
90 u8 hipe = 3;
91 u8 hipc = 3;
93 if (lpc2 >= hipc)
94 lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
95 if (lpre >= hipe) {
96 lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
97 lvsw = hivs = 3 - (lpre & 3);
98 } else
99 if (lvsw >= hivs) {
100 lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
103 lt->conf[i] = (lpre << 3) | lvsw;
104 lt->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
106 OUTP_TRACE(&dp->outp, "config lane %d %02x %02x",
107 i, lt->conf[i], lpc2);
109 data = nvbios_dpout_match(bios, dp->outp.info.hasht,
110 dp->outp.info.hashm,
111 &ver, &hdr, &cnt, &len, &info);
112 if (!data)
113 continue;
115 data = nvbios_dpcfg_match(bios, data, lpc2 & 3, lvsw & 3,
116 lpre & 3, &ver, &hdr, &cnt, &len,
117 &ocfg);
118 if (!data)
119 continue;
121 ior->func->dp.drive(ior, i, ocfg.pc, ocfg.dc,
122 ocfg.pe, ocfg.tx_pu);
125 ret = nvkm_wraux(dp->aux, DPCD_LC03(0), lt->conf, 4);
126 if (ret)
127 return ret;
129 if (pc) {
130 ret = nvkm_wraux(dp->aux, DPCD_LC0F, lt->pc2conf, 2);
131 if (ret)
132 return ret;
135 return 0;
138 static void
139 nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
141 struct nvkm_dp *dp = lt->dp;
142 u8 sink_tp;
144 OUTP_TRACE(&dp->outp, "training pattern %d", pattern);
145 dp->outp.ior->func->dp.pattern(dp->outp.ior, pattern);
147 nvkm_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
148 sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
149 sink_tp |= pattern;
150 nvkm_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
153 static int
154 nvkm_dp_train_eq(struct lt_state *lt)
156 bool eq_done = false, cr_done = true;
157 int tries = 0, i;
159 if (lt->dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
160 nvkm_dp_train_pattern(lt, 3);
161 else
162 nvkm_dp_train_pattern(lt, 2);
164 do {
165 if ((tries &&
166 nvkm_dp_train_drive(lt, lt->pc2)) ||
167 nvkm_dp_train_sense(lt, lt->pc2, 400))
168 break;
170 eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
171 for (i = 0; i < lt->dp->outp.ior->dp.nr && eq_done; i++) {
172 u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
173 if (!(lane & DPCD_LS02_LANE0_CR_DONE))
174 cr_done = false;
175 if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
176 !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
177 eq_done = false;
179 } while (!eq_done && cr_done && ++tries <= 5);
181 return eq_done ? 0 : -1;
184 static int
185 nvkm_dp_train_cr(struct lt_state *lt)
187 bool cr_done = false, abort = false;
188 int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
189 int tries = 0, i;
191 nvkm_dp_train_pattern(lt, 1);
193 do {
194 if (nvkm_dp_train_drive(lt, false) ||
195 nvkm_dp_train_sense(lt, false, 100))
196 break;
198 cr_done = true;
199 for (i = 0; i < lt->dp->outp.ior->dp.nr; i++) {
200 u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
201 if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
202 cr_done = false;
203 if (lt->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
204 abort = true;
205 break;
209 if ((lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
210 voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
211 tries = 0;
213 } while (!cr_done && !abort && ++tries < 5);
215 return cr_done ? 0 : -1;
218 static int
219 nvkm_dp_train_links(struct nvkm_dp *dp)
221 struct nvkm_ior *ior = dp->outp.ior;
222 struct nvkm_disp *disp = dp->outp.disp;
223 struct nvkm_subdev *subdev = &disp->engine.subdev;
224 struct nvkm_bios *bios = subdev->device->bios;
225 struct lt_state lt = {
226 .dp = dp,
228 u32 lnkcmp;
229 u8 sink[2];
230 int ret;
232 OUTP_DBG(&dp->outp, "training %d x %d MB/s",
233 ior->dp.nr, ior->dp.bw * 27);
235 /* Intersect misc. capabilities of the OR and sink. */
236 if (disp->engine.subdev.device->chipset < 0xd0)
237 dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
238 lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
240 /* Set desired link configuration on the source. */
241 if ((lnkcmp = lt.dp->info.lnkcmp)) {
242 if (dp->version < 0x30) {
243 while ((ior->dp.bw * 2700) < nvbios_rd16(bios, lnkcmp))
244 lnkcmp += 4;
245 lnkcmp = nvbios_rd16(bios, lnkcmp + 2);
246 } else {
247 while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
248 lnkcmp += 3;
249 lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
252 nvbios_init(subdev, lnkcmp,
253 init.outp = &dp->outp.info;
254 init.or = ior->id;
255 init.link = ior->asy.link;
259 ret = ior->func->dp.links(ior, dp->aux);
260 if (ret) {
261 if (ret < 0) {
262 OUTP_ERR(&dp->outp, "train failed with %d", ret);
263 return ret;
265 return 0;
268 ior->func->dp.power(ior, ior->dp.nr);
270 /* Set desired link configuration on the sink. */
271 sink[0] = ior->dp.bw;
272 sink[1] = ior->dp.nr;
273 if (ior->dp.ef)
274 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
276 ret = nvkm_wraux(dp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
277 if (ret)
278 return ret;
280 /* Attempt to train the link in this configuration. */
281 memset(lt.stat, 0x00, sizeof(lt.stat));
282 ret = nvkm_dp_train_cr(&lt);
283 if (ret == 0)
284 ret = nvkm_dp_train_eq(&lt);
285 nvkm_dp_train_pattern(&lt, 0);
286 return ret;
289 static void
290 nvkm_dp_train_fini(struct nvkm_dp *dp)
292 /* Execute AfterLinkTraining script from DP Info table. */
293 nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[1],
294 init.outp = &dp->outp.info;
295 init.or = dp->outp.ior->id;
296 init.link = dp->outp.ior->asy.link;
300 static void
301 nvkm_dp_train_init(struct nvkm_dp *dp)
303 /* Execute EnableSpread/DisableSpread script from DP Info table. */
304 if (dp->dpcd[DPCD_RC03] & DPCD_RC03_MAX_DOWNSPREAD) {
305 nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[2],
306 init.outp = &dp->outp.info;
307 init.or = dp->outp.ior->id;
308 init.link = dp->outp.ior->asy.link;
310 } else {
311 nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[3],
312 init.outp = &dp->outp.info;
313 init.or = dp->outp.ior->id;
314 init.link = dp->outp.ior->asy.link;
318 /* Execute BeforeLinkTraining script from DP Info table. */
319 nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
320 init.outp = &dp->outp.info;
321 init.or = dp->outp.ior->id;
322 init.link = dp->outp.ior->asy.link;
326 static const struct dp_rates {
327 u32 rate;
328 u8 bw;
329 u8 nr;
330 } nvkm_dp_rates[] = {
331 { 2160000, 0x14, 4 },
332 { 1080000, 0x0a, 4 },
333 { 1080000, 0x14, 2 },
334 { 648000, 0x06, 4 },
335 { 540000, 0x0a, 2 },
336 { 540000, 0x14, 1 },
337 { 324000, 0x06, 2 },
338 { 270000, 0x0a, 1 },
339 { 162000, 0x06, 1 },
343 static int
344 nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
346 struct nvkm_ior *ior = dp->outp.ior;
347 const u8 sink_nr = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
348 const u8 sink_bw = dp->dpcd[DPCD_RC01_MAX_LINK_RATE];
349 const u8 outp_nr = dp->outp.info.dpconf.link_nr;
350 const u8 outp_bw = dp->outp.info.dpconf.link_bw;
351 const struct dp_rates *failsafe = NULL, *cfg;
352 int ret = -EINVAL;
353 u8 pwr;
355 /* Find the lowest configuration of the OR that can support
356 * the required link rate.
358 * We will refuse to program the OR to lower rates, even if
359 * link training fails at higher rates (or even if the sink
360 * can't support the rate at all, though the DD is supposed
361 * to prevent such situations from happening).
363 * Attempting to do so can cause the entire display to hang,
364 * and it's better to have a failed modeset than that.
366 for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
367 if (cfg->nr <= outp_nr && cfg->nr <= outp_bw)
368 failsafe = cfg;
369 if (failsafe && cfg[1].rate < dataKBps)
370 break;
373 if (WARN_ON(!failsafe))
374 return ret;
376 /* Ensure sink is not in a low-power state. */
377 if (!nvkm_rdaux(dp->aux, DPCD_SC00, &pwr, 1)) {
378 if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
379 pwr &= ~DPCD_SC00_SET_POWER;
380 pwr |= DPCD_SC00_SET_POWER_D0;
381 nvkm_wraux(dp->aux, DPCD_SC00, &pwr, 1);
385 /* Link training. */
386 OUTP_DBG(&dp->outp, "training (min: %d x %d MB/s)",
387 failsafe->nr, failsafe->bw * 27);
388 nvkm_dp_train_init(dp);
389 for (cfg = nvkm_dp_rates; ret < 0 && cfg <= failsafe; cfg++) {
390 /* Skip configurations not supported by both OR and sink. */
391 if ((cfg->nr > outp_nr || cfg->bw > outp_bw ||
392 cfg->nr > sink_nr || cfg->bw > sink_bw)) {
393 if (cfg != failsafe)
394 continue;
395 OUTP_ERR(&dp->outp, "link rate unsupported by sink");
397 ior->dp.mst = dp->lt.mst;
398 ior->dp.ef = dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
399 ior->dp.bw = cfg->bw;
400 ior->dp.nr = cfg->nr;
402 /* Program selected link configuration. */
403 ret = nvkm_dp_train_links(dp);
405 nvkm_dp_train_fini(dp);
406 if (ret < 0)
407 OUTP_ERR(&dp->outp, "training failed");
408 else
409 OUTP_DBG(&dp->outp, "training done");
410 atomic_set(&dp->lt.done, 1);
411 return ret;
414 static void
415 nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
417 struct nvkm_dp *dp = nvkm_dp(outp);
419 /* Prevent link from being retrained if sink sends an IRQ. */
420 atomic_set(&dp->lt.done, 0);
421 ior->dp.nr = 0;
423 /* Execute DisableLT script from DP Info Table. */
424 nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
425 init.outp = &dp->outp.info;
426 init.or = ior->id;
427 init.link = ior->arm.link;
431 static int
432 nvkm_dp_acquire(struct nvkm_outp *outp)
434 struct nvkm_dp *dp = nvkm_dp(outp);
435 struct nvkm_ior *ior = dp->outp.ior;
436 struct nvkm_head *head;
437 bool retrain = true;
438 u32 datakbps = 0;
439 u32 dataKBps;
440 u32 linkKBps;
441 u8 stat[3];
442 int ret, i;
444 mutex_lock(&dp->mutex);
446 /* Check that link configuration meets current requirements. */
447 list_for_each_entry(head, &outp->disp->head, head) {
448 if (ior->asy.head & (1 << head->id)) {
449 u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
450 datakbps += khz * head->asy.or.depth;
454 linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
455 dataKBps = DIV_ROUND_UP(datakbps, 8);
456 OUTP_DBG(&dp->outp, "data %d KB/s link %d KB/s mst %d->%d",
457 dataKBps, linkKBps, ior->dp.mst, dp->lt.mst);
458 if (linkKBps < dataKBps || ior->dp.mst != dp->lt.mst) {
459 OUTP_DBG(&dp->outp, "link requirements changed");
460 goto done;
463 /* Check that link is still trained. */
464 ret = nvkm_rdaux(dp->aux, DPCD_LS02, stat, 3);
465 if (ret) {
466 OUTP_DBG(&dp->outp,
467 "failed to read link status, assuming no sink");
468 goto done;
471 if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
472 for (i = 0; i < ior->dp.nr; i++) {
473 u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
474 if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
475 !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
476 !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
477 OUTP_DBG(&dp->outp,
478 "lane %d not equalised", lane);
479 goto done;
482 retrain = false;
483 } else {
484 OUTP_DBG(&dp->outp, "no inter-lane alignment");
487 done:
488 if (retrain || !atomic_read(&dp->lt.done))
489 ret = nvkm_dp_train(dp, dataKBps);
490 mutex_unlock(&dp->mutex);
491 return ret;
494 static void
495 nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
497 struct nvkm_i2c_aux *aux = dp->aux;
499 if (enable) {
500 if (!dp->present) {
501 OUTP_DBG(&dp->outp, "aux power -> always");
502 nvkm_i2c_aux_monitor(aux, true);
503 dp->present = true;
506 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
507 sizeof(dp->dpcd)))
508 return;
511 if (dp->present) {
512 OUTP_DBG(&dp->outp, "aux power -> demand");
513 nvkm_i2c_aux_monitor(aux, false);
514 dp->present = false;
517 atomic_set(&dp->lt.done, 0);
520 static int
521 nvkm_dp_hpd(struct nvkm_notify *notify)
523 const struct nvkm_i2c_ntfy_rep *line = notify->data;
524 struct nvkm_dp *dp = container_of(notify, typeof(*dp), hpd);
525 struct nvkm_conn *conn = dp->outp.conn;
526 struct nvkm_disp *disp = dp->outp.disp;
527 struct nvif_notify_conn_rep_v0 rep = {};
529 OUTP_DBG(&dp->outp, "HPD: %d", line->mask);
530 if (line->mask & NVKM_I2C_IRQ) {
531 if (atomic_read(&dp->lt.done))
532 dp->outp.func->acquire(&dp->outp);
533 rep.mask |= NVIF_NOTIFY_CONN_V0_IRQ;
534 } else {
535 nvkm_dp_enable(dp, true);
538 if (line->mask & NVKM_I2C_UNPLUG)
539 rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
540 if (line->mask & NVKM_I2C_PLUG)
541 rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
543 nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
544 return NVKM_NOTIFY_KEEP;
547 static void
548 nvkm_dp_fini(struct nvkm_outp *outp)
550 struct nvkm_dp *dp = nvkm_dp(outp);
551 nvkm_notify_put(&dp->hpd);
552 nvkm_dp_enable(dp, false);
555 static void
556 nvkm_dp_init(struct nvkm_outp *outp)
558 struct nvkm_dp *dp = nvkm_dp(outp);
559 nvkm_notify_put(&dp->outp.conn->hpd);
560 nvkm_dp_enable(dp, true);
561 nvkm_notify_get(&dp->hpd);
564 static void *
565 nvkm_dp_dtor(struct nvkm_outp *outp)
567 struct nvkm_dp *dp = nvkm_dp(outp);
568 nvkm_notify_fini(&dp->hpd);
569 return dp;
572 static const struct nvkm_outp_func
573 nvkm_dp_func = {
574 .dtor = nvkm_dp_dtor,
575 .init = nvkm_dp_init,
576 .fini = nvkm_dp_fini,
577 .acquire = nvkm_dp_acquire,
578 .release = nvkm_dp_release,
581 static int
582 nvkm_dp_ctor(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
583 struct nvkm_i2c_aux *aux, struct nvkm_dp *dp)
585 struct nvkm_device *device = disp->engine.subdev.device;
586 struct nvkm_bios *bios = device->bios;
587 struct nvkm_i2c *i2c = device->i2c;
588 u8 hdr, cnt, len;
589 u32 data;
590 int ret;
592 ret = nvkm_outp_ctor(&nvkm_dp_func, disp, index, dcbE, &dp->outp);
593 if (ret)
594 return ret;
596 dp->aux = aux;
597 if (!dp->aux) {
598 OUTP_ERR(&dp->outp, "no aux");
599 return -EINVAL;
602 /* bios data is not optional */
603 data = nvbios_dpout_match(bios, dp->outp.info.hasht,
604 dp->outp.info.hashm, &dp->version,
605 &hdr, &cnt, &len, &dp->info);
606 if (!data) {
607 OUTP_ERR(&dp->outp, "no bios dp data");
608 return -EINVAL;
611 OUTP_DBG(&dp->outp, "bios dp %02x %02x %02x %02x",
612 dp->version, hdr, cnt, len);
614 /* hotplug detect, replaces gpio-based mechanism with aux events */
615 ret = nvkm_notify_init(NULL, &i2c->event, nvkm_dp_hpd, true,
616 &(struct nvkm_i2c_ntfy_req) {
617 .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG |
618 NVKM_I2C_IRQ,
619 .port = dp->aux->id,
621 sizeof(struct nvkm_i2c_ntfy_req),
622 sizeof(struct nvkm_i2c_ntfy_rep),
623 &dp->hpd);
624 if (ret) {
625 OUTP_ERR(&dp->outp, "error monitoring aux hpd: %d", ret);
626 return ret;
629 mutex_init(&dp->mutex);
630 atomic_set(&dp->lt.done, 0);
631 return 0;
635 nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
636 struct nvkm_outp **poutp)
638 struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
639 struct nvkm_i2c_aux *aux;
640 struct nvkm_dp *dp;
642 if (dcbE->location == 0)
643 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_CCB(dcbE->i2c_index));
644 else
645 aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
647 if (!(dp = kzalloc(sizeof(*dp), GFP_KERNEL)))
648 return -ENOMEM;
649 *poutp = &dp->outp;
651 return nvkm_dp_ctor(disp, index, dcbE, aux, dp);