2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <subdev/timer.h>
29 g94_sor_dp_watermark(struct nvkm_ior
*sor
, int head
, u8 watermark
)
31 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
32 const u32 loff
= nv50_sor_link(sor
);
33 nvkm_mask(device
, 0x61c128 + loff
, 0x0000003f, watermark
);
37 g94_sor_dp_activesym(struct nvkm_ior
*sor
, int head
,
38 u8 TU
, u8 VTUa
, u8 VTUf
, u8 VTUi
)
40 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
41 const u32 loff
= nv50_sor_link(sor
);
42 nvkm_mask(device
, 0x61c10c + loff
, 0x000001fc, TU
<< 2);
43 nvkm_mask(device
, 0x61c128 + loff
, 0x010f7f00, VTUa
<< 24 |
49 g94_sor_dp_audio_sym(struct nvkm_ior
*sor
, int head
, u16 h
, u32 v
)
51 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
52 const u32 soff
= nv50_ior_base(sor
);
53 nvkm_mask(device
, 0x61c1e8 + soff
, 0x0000ffff, h
);
54 nvkm_mask(device
, 0x61c1ec + soff
, 0x00ffffff, v
);
58 g94_sor_dp_drive(struct nvkm_ior
*sor
, int ln
, int pc
, int dc
, int pe
, int pu
)
60 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
61 const u32 loff
= nv50_sor_link(sor
);
62 const u32 shift
= sor
->func
->dp
.lanes
[ln
] * 8;
65 data
[0] = nvkm_rd32(device
, 0x61c118 + loff
) & ~(0x000000ff << shift
);
66 data
[1] = nvkm_rd32(device
, 0x61c120 + loff
) & ~(0x000000ff << shift
);
67 data
[2] = nvkm_rd32(device
, 0x61c130 + loff
);
68 if ((data
[2] & 0x0000ff00) < (pu
<< 8) || ln
== 0)
69 data
[2] = (data
[2] & ~0x0000ff00) | (pu
<< 8);
70 nvkm_wr32(device
, 0x61c118 + loff
, data
[0] | (dc
<< shift
));
71 nvkm_wr32(device
, 0x61c120 + loff
, data
[1] | (pe
<< shift
));
72 nvkm_wr32(device
, 0x61c130 + loff
, data
[2]);
76 g94_sor_dp_pattern(struct nvkm_ior
*sor
, int pattern
)
78 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
79 const u32 loff
= nv50_sor_link(sor
);
80 nvkm_mask(device
, 0x61c10c + loff
, 0x0f000000, pattern
<< 24);
84 g94_sor_dp_power(struct nvkm_ior
*sor
, int nr
)
86 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
87 const u32 soff
= nv50_ior_base(sor
);
88 const u32 loff
= nv50_sor_link(sor
);
91 for (i
= 0; i
< nr
; i
++)
92 mask
|= 1 << sor
->func
->dp
.lanes
[i
];
94 nvkm_mask(device
, 0x61c130 + loff
, 0x0000000f, mask
);
95 nvkm_mask(device
, 0x61c034 + soff
, 0x80000000, 0x80000000);
96 nvkm_msec(device
, 2000,
97 if (!(nvkm_rd32(device
, 0x61c034 + soff
) & 0x80000000))
103 g94_sor_dp_links(struct nvkm_ior
*sor
, struct nvkm_i2c_aux
*aux
)
105 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
106 const u32 soff
= nv50_ior_base(sor
);
107 const u32 loff
= nv50_sor_link(sor
);
108 u32 dpctrl
= 0x00000000;
109 u32 clksor
= 0x00000000;
111 dpctrl
|= ((1 << sor
->dp
.nr
) - 1) << 16;
113 dpctrl
|= 0x00004000;
114 if (sor
->dp
.bw
> 0x06)
115 clksor
|= 0x00040000;
117 nvkm_mask(device
, 0x614300 + soff
, 0x000c0000, clksor
);
118 nvkm_mask(device
, 0x61c10c + loff
, 0x001f4000, dpctrl
);
123 g94_sor_war_needed(struct nvkm_ior
*sor
)
125 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
126 const u32 soff
= nv50_ior_base(sor
);
127 if (sor
->asy
.proto
== TMDS
) {
128 switch (nvkm_rd32(device
, 0x614300 + soff
) & 0x00030000) {
140 g94_sor_war_update_sppll1(struct nvkm_disp
*disp
)
142 struct nvkm_device
*device
= disp
->engine
.subdev
.device
;
143 struct nvkm_ior
*ior
;
147 list_for_each_entry(ior
, &disp
->ior
, head
) {
148 if (ior
->type
!= SOR
)
151 clksor
= nvkm_rd32(device
, 0x614300 + nv50_ior_base(ior
));
152 switch (clksor
& 0x03000000) {
165 nvkm_mask(device
, 0x00e840, 0x80000000, 0x00000000);
169 g94_sor_war_3(struct nvkm_ior
*sor
)
171 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
172 const u32 soff
= nv50_ior_base(sor
);
175 if (!g94_sor_war_needed(sor
))
178 sorpwr
= nvkm_rd32(device
, 0x61c004 + soff
);
179 if (sorpwr
& 0x00000001) {
180 u32 seqctl
= nvkm_rd32(device
, 0x61c030 + soff
);
181 u32 pd_pc
= (seqctl
& 0x00000f00) >> 8;
182 u32 pu_pc
= seqctl
& 0x0000000f;
184 nvkm_wr32(device
, 0x61c040 + soff
+ pd_pc
* 4, 0x1f008000);
186 nvkm_msec(device
, 2000,
187 if (!(nvkm_rd32(device
, 0x61c030 + soff
) & 0x10000000))
190 nvkm_mask(device
, 0x61c004 + soff
, 0x80000001, 0x80000000);
191 nvkm_msec(device
, 2000,
192 if (!(nvkm_rd32(device
, 0x61c030 + soff
) & 0x10000000))
196 nvkm_wr32(device
, 0x61c040 + soff
+ pd_pc
* 4, 0x00002000);
197 nvkm_wr32(device
, 0x61c040 + soff
+ pu_pc
* 4, 0x1f000000);
200 nvkm_mask(device
, 0x61c10c + soff
, 0x00000001, 0x00000000);
201 nvkm_mask(device
, 0x614300 + soff
, 0x03000000, 0x00000000);
203 if (sorpwr
& 0x00000001) {
204 nvkm_mask(device
, 0x61c004 + soff
, 0x80000001, 0x80000001);
207 g94_sor_war_update_sppll1(sor
->disp
);
211 g94_sor_war_2(struct nvkm_ior
*sor
)
213 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
214 const u32 soff
= nv50_ior_base(sor
);
216 if (!g94_sor_war_needed(sor
))
219 nvkm_mask(device
, 0x00e840, 0x80000000, 0x80000000);
220 nvkm_mask(device
, 0x614300 + soff
, 0x03000000, 0x03000000);
221 nvkm_mask(device
, 0x61c10c + soff
, 0x00000001, 0x00000001);
223 nvkm_mask(device
, 0x61c00c + soff
, 0x0f000000, 0x00000000);
224 nvkm_mask(device
, 0x61c008 + soff
, 0xff000000, 0x14000000);
225 nvkm_usec(device
, 400, NVKM_DELAY
);
226 nvkm_mask(device
, 0x61c008 + soff
, 0xff000000, 0x00000000);
227 nvkm_mask(device
, 0x61c00c + soff
, 0x0f000000, 0x01000000);
229 if (nvkm_rd32(device
, 0x61c004 + soff
) & 0x00000001) {
230 u32 seqctl
= nvkm_rd32(device
, 0x61c030 + soff
);
231 u32 pu_pc
= seqctl
& 0x0000000f;
232 nvkm_wr32(device
, 0x61c040 + soff
+ pu_pc
* 4, 0x1f008000);
237 g94_sor_state(struct nvkm_ior
*sor
, struct nvkm_ior_state
*state
)
239 struct nvkm_device
*device
= sor
->disp
->engine
.subdev
.device
;
240 const u32 coff
= sor
->id
* 8 + (state
== &sor
->arm
) * 4;
241 u32 ctrl
= nvkm_rd32(device
, 0x610794 + coff
);
243 state
->proto_evo
= (ctrl
& 0x00000f00) >> 8;
244 switch (state
->proto_evo
) {
245 case 0: state
->proto
= LVDS
; state
->link
= 1; break;
246 case 1: state
->proto
= TMDS
; state
->link
= 1; break;
247 case 2: state
->proto
= TMDS
; state
->link
= 2; break;
248 case 5: state
->proto
= TMDS
; state
->link
= 3; break;
249 case 8: state
->proto
= DP
; state
->link
= 1; break;
250 case 9: state
->proto
= DP
; state
->link
= 2; break;
252 state
->proto
= UNKNOWN
;
256 state
->head
= ctrl
& 0x00000003;
257 nv50_pior_depth(sor
, state
, ctrl
);
260 static const struct nvkm_ior_func
262 .state
= g94_sor_state
,
263 .power
= nv50_sor_power
,
264 .clock
= nv50_sor_clock
,
265 .war_2
= g94_sor_war_2
,
266 .war_3
= g94_sor_war_3
,
268 .lanes
= { 2, 1, 0, 3},
269 .links
= g94_sor_dp_links
,
270 .power
= g94_sor_dp_power
,
271 .pattern
= g94_sor_dp_pattern
,
272 .drive
= g94_sor_dp_drive
,
273 .audio_sym
= g94_sor_dp_audio_sym
,
274 .activesym
= g94_sor_dp_activesym
,
275 .watermark
= g94_sor_dp_watermark
,
280 g94_sor_new(struct nvkm_disp
*disp
, int id
)
282 return nv50_sor_new_(&g94_sor
, disp
, id
);