2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie
27 #include <drm/amdgpu_drm.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_i2c.h"
33 #include "atom-bits.h"
34 #include "atombios_encoders.h"
35 #include "bif/bif_4_1_d.h"
37 static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device
*adev
,
38 ATOM_GPIO_I2C_ASSIGMENT
*gpio
,
44 static struct amdgpu_i2c_bus_rec
amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT
*gpio
)
46 struct amdgpu_i2c_bus_rec i2c
;
48 memset(&i2c
, 0, sizeof(struct amdgpu_i2c_bus_rec
));
50 i2c
.mask_clk_reg
= le16_to_cpu(gpio
->usClkMaskRegisterIndex
);
51 i2c
.mask_data_reg
= le16_to_cpu(gpio
->usDataMaskRegisterIndex
);
52 i2c
.en_clk_reg
= le16_to_cpu(gpio
->usClkEnRegisterIndex
);
53 i2c
.en_data_reg
= le16_to_cpu(gpio
->usDataEnRegisterIndex
);
54 i2c
.y_clk_reg
= le16_to_cpu(gpio
->usClkY_RegisterIndex
);
55 i2c
.y_data_reg
= le16_to_cpu(gpio
->usDataY_RegisterIndex
);
56 i2c
.a_clk_reg
= le16_to_cpu(gpio
->usClkA_RegisterIndex
);
57 i2c
.a_data_reg
= le16_to_cpu(gpio
->usDataA_RegisterIndex
);
58 i2c
.mask_clk_mask
= (1 << gpio
->ucClkMaskShift
);
59 i2c
.mask_data_mask
= (1 << gpio
->ucDataMaskShift
);
60 i2c
.en_clk_mask
= (1 << gpio
->ucClkEnShift
);
61 i2c
.en_data_mask
= (1 << gpio
->ucDataEnShift
);
62 i2c
.y_clk_mask
= (1 << gpio
->ucClkY_Shift
);
63 i2c
.y_data_mask
= (1 << gpio
->ucDataY_Shift
);
64 i2c
.a_clk_mask
= (1 << gpio
->ucClkA_Shift
);
65 i2c
.a_data_mask
= (1 << gpio
->ucDataA_Shift
);
67 if (gpio
->sucI2cId
.sbfAccess
.bfHW_Capable
)
68 i2c
.hw_capable
= true;
70 i2c
.hw_capable
= false;
72 if (gpio
->sucI2cId
.ucAccess
== 0xa0)
77 i2c
.i2c_id
= gpio
->sucI2cId
.ucAccess
;
87 struct amdgpu_i2c_bus_rec
amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device
*adev
,
90 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
91 ATOM_GPIO_I2C_ASSIGMENT
*gpio
;
92 struct amdgpu_i2c_bus_rec i2c
;
93 int index
= GetIndexIntoMasterTable(DATA
, GPIO_I2C_Info
);
94 struct _ATOM_GPIO_I2C_INFO
*i2c_info
;
95 uint16_t data_offset
, size
;
98 memset(&i2c
, 0, sizeof(struct amdgpu_i2c_bus_rec
));
101 if (amdgpu_atom_parse_data_header(ctx
, index
, &size
, NULL
, NULL
, &data_offset
)) {
102 i2c_info
= (struct _ATOM_GPIO_I2C_INFO
*)(ctx
->bios
+ data_offset
);
104 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
105 sizeof(ATOM_GPIO_I2C_ASSIGMENT
);
107 gpio
= &i2c_info
->asGPIO_Info
[0];
108 for (i
= 0; i
< num_indices
; i
++) {
110 amdgpu_atombios_lookup_i2c_gpio_quirks(adev
, gpio
, i
);
112 if (gpio
->sucI2cId
.ucAccess
== id
) {
113 i2c
= amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio
);
116 gpio
= (ATOM_GPIO_I2C_ASSIGMENT
*)
117 ((u8
*)gpio
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT
));
124 void amdgpu_atombios_i2c_init(struct amdgpu_device
*adev
)
126 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
127 ATOM_GPIO_I2C_ASSIGMENT
*gpio
;
128 struct amdgpu_i2c_bus_rec i2c
;
129 int index
= GetIndexIntoMasterTable(DATA
, GPIO_I2C_Info
);
130 struct _ATOM_GPIO_I2C_INFO
*i2c_info
;
131 uint16_t data_offset
, size
;
135 if (amdgpu_atom_parse_data_header(ctx
, index
, &size
, NULL
, NULL
, &data_offset
)) {
136 i2c_info
= (struct _ATOM_GPIO_I2C_INFO
*)(ctx
->bios
+ data_offset
);
138 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
139 sizeof(ATOM_GPIO_I2C_ASSIGMENT
);
141 gpio
= &i2c_info
->asGPIO_Info
[0];
142 for (i
= 0; i
< num_indices
; i
++) {
143 amdgpu_atombios_lookup_i2c_gpio_quirks(adev
, gpio
, i
);
145 i2c
= amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio
);
148 sprintf(stmp
, "0x%x", i2c
.i2c_id
);
149 adev
->i2c_bus
[i
] = amdgpu_i2c_create(adev
->ddev
, &i2c
, stmp
);
151 gpio
= (ATOM_GPIO_I2C_ASSIGMENT
*)
152 ((u8
*)gpio
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT
));
157 struct amdgpu_gpio_rec
158 amdgpu_atombios_lookup_gpio(struct amdgpu_device
*adev
,
161 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
162 struct amdgpu_gpio_rec gpio
;
163 int index
= GetIndexIntoMasterTable(DATA
, GPIO_Pin_LUT
);
164 struct _ATOM_GPIO_PIN_LUT
*gpio_info
;
165 ATOM_GPIO_PIN_ASSIGNMENT
*pin
;
166 u16 data_offset
, size
;
169 memset(&gpio
, 0, sizeof(struct amdgpu_gpio_rec
));
172 if (amdgpu_atom_parse_data_header(ctx
, index
, &size
, NULL
, NULL
, &data_offset
)) {
173 gpio_info
= (struct _ATOM_GPIO_PIN_LUT
*)(ctx
->bios
+ data_offset
);
175 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
176 sizeof(ATOM_GPIO_PIN_ASSIGNMENT
);
178 pin
= gpio_info
->asGPIO_Pin
;
179 for (i
= 0; i
< num_indices
; i
++) {
180 if (id
== pin
->ucGPIO_ID
) {
181 gpio
.id
= pin
->ucGPIO_ID
;
182 gpio
.reg
= le16_to_cpu(pin
->usGpioPin_AIndex
);
183 gpio
.shift
= pin
->ucGpioPinBitShift
;
184 gpio
.mask
= (1 << pin
->ucGpioPinBitShift
);
188 pin
= (ATOM_GPIO_PIN_ASSIGNMENT
*)
189 ((u8
*)pin
+ sizeof(ATOM_GPIO_PIN_ASSIGNMENT
));
196 static struct amdgpu_hpd
197 amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device
*adev
,
198 struct amdgpu_gpio_rec
*gpio
)
200 struct amdgpu_hpd hpd
;
203 memset(&hpd
, 0, sizeof(struct amdgpu_hpd
));
205 reg
= amdgpu_display_hpd_get_gpio_reg(adev
);
208 if (gpio
->reg
== reg
) {
211 hpd
.hpd
= AMDGPU_HPD_1
;
214 hpd
.hpd
= AMDGPU_HPD_2
;
217 hpd
.hpd
= AMDGPU_HPD_3
;
220 hpd
.hpd
= AMDGPU_HPD_4
;
223 hpd
.hpd
= AMDGPU_HPD_5
;
226 hpd
.hpd
= AMDGPU_HPD_6
;
229 hpd
.hpd
= AMDGPU_HPD_NONE
;
233 hpd
.hpd
= AMDGPU_HPD_NONE
;
237 static const int object_connector_convert
[] = {
238 DRM_MODE_CONNECTOR_Unknown
,
239 DRM_MODE_CONNECTOR_DVII
,
240 DRM_MODE_CONNECTOR_DVII
,
241 DRM_MODE_CONNECTOR_DVID
,
242 DRM_MODE_CONNECTOR_DVID
,
243 DRM_MODE_CONNECTOR_VGA
,
244 DRM_MODE_CONNECTOR_Composite
,
245 DRM_MODE_CONNECTOR_SVIDEO
,
246 DRM_MODE_CONNECTOR_Unknown
,
247 DRM_MODE_CONNECTOR_Unknown
,
248 DRM_MODE_CONNECTOR_9PinDIN
,
249 DRM_MODE_CONNECTOR_Unknown
,
250 DRM_MODE_CONNECTOR_HDMIA
,
251 DRM_MODE_CONNECTOR_HDMIB
,
252 DRM_MODE_CONNECTOR_LVDS
,
253 DRM_MODE_CONNECTOR_9PinDIN
,
254 DRM_MODE_CONNECTOR_Unknown
,
255 DRM_MODE_CONNECTOR_Unknown
,
256 DRM_MODE_CONNECTOR_Unknown
,
257 DRM_MODE_CONNECTOR_DisplayPort
,
258 DRM_MODE_CONNECTOR_eDP
,
259 DRM_MODE_CONNECTOR_Unknown
262 bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device
*adev
)
264 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
265 struct atom_context
*ctx
= mode_info
->atom_context
;
266 int index
= GetIndexIntoMasterTable(DATA
, Object_Header
);
267 u16 size
, data_offset
;
269 ATOM_DISPLAY_OBJECT_PATH_TABLE
*path_obj
;
270 ATOM_OBJECT_HEADER
*obj_header
;
272 if (!amdgpu_atom_parse_data_header(ctx
, index
, &size
, &frev
, &crev
, &data_offset
))
278 obj_header
= (ATOM_OBJECT_HEADER
*) (ctx
->bios
+ data_offset
);
279 path_obj
= (ATOM_DISPLAY_OBJECT_PATH_TABLE
*)
280 (ctx
->bios
+ data_offset
+
281 le16_to_cpu(obj_header
->usDisplayPathTableOffset
));
283 if (path_obj
->ucNumOfDispPath
)
289 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device
*adev
)
291 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
292 struct atom_context
*ctx
= mode_info
->atom_context
;
293 int index
= GetIndexIntoMasterTable(DATA
, Object_Header
);
294 u16 size
, data_offset
;
296 ATOM_CONNECTOR_OBJECT_TABLE
*con_obj
;
297 ATOM_ENCODER_OBJECT_TABLE
*enc_obj
;
298 ATOM_OBJECT_TABLE
*router_obj
;
299 ATOM_DISPLAY_OBJECT_PATH_TABLE
*path_obj
;
300 ATOM_OBJECT_HEADER
*obj_header
;
301 int i
, j
, k
, path_size
, device_support
;
303 u16 conn_id
, connector_object_id
;
304 struct amdgpu_i2c_bus_rec ddc_bus
;
305 struct amdgpu_router router
;
306 struct amdgpu_gpio_rec gpio
;
307 struct amdgpu_hpd hpd
;
309 if (!amdgpu_atom_parse_data_header(ctx
, index
, &size
, &frev
, &crev
, &data_offset
))
315 obj_header
= (ATOM_OBJECT_HEADER
*) (ctx
->bios
+ data_offset
);
316 path_obj
= (ATOM_DISPLAY_OBJECT_PATH_TABLE
*)
317 (ctx
->bios
+ data_offset
+
318 le16_to_cpu(obj_header
->usDisplayPathTableOffset
));
319 con_obj
= (ATOM_CONNECTOR_OBJECT_TABLE
*)
320 (ctx
->bios
+ data_offset
+
321 le16_to_cpu(obj_header
->usConnectorObjectTableOffset
));
322 enc_obj
= (ATOM_ENCODER_OBJECT_TABLE
*)
323 (ctx
->bios
+ data_offset
+
324 le16_to_cpu(obj_header
->usEncoderObjectTableOffset
));
325 router_obj
= (ATOM_OBJECT_TABLE
*)
326 (ctx
->bios
+ data_offset
+
327 le16_to_cpu(obj_header
->usRouterObjectTableOffset
));
328 device_support
= le16_to_cpu(obj_header
->usDeviceSupport
);
331 for (i
= 0; i
< path_obj
->ucNumOfDispPath
; i
++) {
332 uint8_t *addr
= (uint8_t *) path_obj
->asDispPath
;
333 ATOM_DISPLAY_OBJECT_PATH
*path
;
335 path
= (ATOM_DISPLAY_OBJECT_PATH
*) addr
;
336 path_size
+= le16_to_cpu(path
->usSize
);
338 if (device_support
& le16_to_cpu(path
->usDeviceTag
)) {
339 uint8_t con_obj_id
, con_obj_num
, con_obj_type
;
342 (le16_to_cpu(path
->usConnObjectId
) & OBJECT_ID_MASK
)
345 (le16_to_cpu(path
->usConnObjectId
) & ENUM_ID_MASK
)
348 (le16_to_cpu(path
->usConnObjectId
) &
349 OBJECT_TYPE_MASK
) >> OBJECT_TYPE_SHIFT
;
351 /* Skip TV/CV support */
352 if ((le16_to_cpu(path
->usDeviceTag
) ==
353 ATOM_DEVICE_TV1_SUPPORT
) ||
354 (le16_to_cpu(path
->usDeviceTag
) ==
355 ATOM_DEVICE_CV_SUPPORT
))
358 if (con_obj_id
>= ARRAY_SIZE(object_connector_convert
)) {
359 DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
360 con_obj_id
, le16_to_cpu(path
->usDeviceTag
));
365 object_connector_convert
[con_obj_id
];
366 connector_object_id
= con_obj_id
;
368 if (connector_type
== DRM_MODE_CONNECTOR_Unknown
)
371 router
.ddc_valid
= false;
372 router
.cd_valid
= false;
373 for (j
= 0; j
< ((le16_to_cpu(path
->usSize
) - 8) / 2); j
++) {
374 uint8_t grph_obj_id
, grph_obj_num
, grph_obj_type
;
377 (le16_to_cpu(path
->usGraphicObjIds
[j
]) &
378 OBJECT_ID_MASK
) >> OBJECT_ID_SHIFT
;
380 (le16_to_cpu(path
->usGraphicObjIds
[j
]) &
381 ENUM_ID_MASK
) >> ENUM_ID_SHIFT
;
383 (le16_to_cpu(path
->usGraphicObjIds
[j
]) &
384 OBJECT_TYPE_MASK
) >> OBJECT_TYPE_SHIFT
;
386 if (grph_obj_type
== GRAPH_OBJECT_TYPE_ENCODER
) {
387 for (k
= 0; k
< enc_obj
->ucNumberOfObjects
; k
++) {
388 u16 encoder_obj
= le16_to_cpu(enc_obj
->asObjects
[k
].usObjectID
);
389 if (le16_to_cpu(path
->usGraphicObjIds
[j
]) == encoder_obj
) {
390 ATOM_COMMON_RECORD_HEADER
*record
= (ATOM_COMMON_RECORD_HEADER
*)
391 (ctx
->bios
+ data_offset
+
392 le16_to_cpu(enc_obj
->asObjects
[k
].usRecordOffset
));
393 ATOM_ENCODER_CAP_RECORD
*cap_record
;
396 while (record
->ucRecordSize
> 0 &&
397 record
->ucRecordType
> 0 &&
398 record
->ucRecordType
<= ATOM_MAX_OBJECT_RECORD_NUMBER
) {
399 switch (record
->ucRecordType
) {
400 case ATOM_ENCODER_CAP_RECORD_TYPE
:
401 cap_record
=(ATOM_ENCODER_CAP_RECORD
*)
403 caps
= le16_to_cpu(cap_record
->usEncoderCap
);
406 record
= (ATOM_COMMON_RECORD_HEADER
*)
407 ((char *)record
+ record
->ucRecordSize
);
409 amdgpu_display_add_encoder(adev
, encoder_obj
,
410 le16_to_cpu(path
->usDeviceTag
),
414 } else if (grph_obj_type
== GRAPH_OBJECT_TYPE_ROUTER
) {
415 for (k
= 0; k
< router_obj
->ucNumberOfObjects
; k
++) {
416 u16 router_obj_id
= le16_to_cpu(router_obj
->asObjects
[k
].usObjectID
);
417 if (le16_to_cpu(path
->usGraphicObjIds
[j
]) == router_obj_id
) {
418 ATOM_COMMON_RECORD_HEADER
*record
= (ATOM_COMMON_RECORD_HEADER
*)
419 (ctx
->bios
+ data_offset
+
420 le16_to_cpu(router_obj
->asObjects
[k
].usRecordOffset
));
421 ATOM_I2C_RECORD
*i2c_record
;
422 ATOM_I2C_ID_CONFIG_ACCESS
*i2c_config
;
423 ATOM_ROUTER_DDC_PATH_SELECT_RECORD
*ddc_path
;
424 ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
*cd_path
;
425 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT
*router_src_dst_table
=
426 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT
*)
427 (ctx
->bios
+ data_offset
+
428 le16_to_cpu(router_obj
->asObjects
[k
].usSrcDstTableOffset
));
429 u8
*num_dst_objs
= (u8
*)
430 ((u8
*)router_src_dst_table
+ 1 +
431 (router_src_dst_table
->ucNumberOfSrc
* 2));
432 u16
*dst_objs
= (u16
*)(num_dst_objs
+ 1);
435 router
.router_id
= router_obj_id
;
436 for (enum_id
= 0; enum_id
< (*num_dst_objs
); enum_id
++) {
437 if (le16_to_cpu(path
->usConnObjectId
) ==
438 le16_to_cpu(dst_objs
[enum_id
]))
442 while (record
->ucRecordSize
> 0 &&
443 record
->ucRecordType
> 0 &&
444 record
->ucRecordType
<= ATOM_MAX_OBJECT_RECORD_NUMBER
) {
445 switch (record
->ucRecordType
) {
446 case ATOM_I2C_RECORD_TYPE
:
451 (ATOM_I2C_ID_CONFIG_ACCESS
*)
452 &i2c_record
->sucI2cId
;
454 amdgpu_atombios_lookup_i2c_gpio(adev
,
457 router
.i2c_addr
= i2c_record
->ucI2CAddr
>> 1;
459 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE
:
460 ddc_path
= (ATOM_ROUTER_DDC_PATH_SELECT_RECORD
*)
462 router
.ddc_valid
= true;
463 router
.ddc_mux_type
= ddc_path
->ucMuxType
;
464 router
.ddc_mux_control_pin
= ddc_path
->ucMuxControlPin
;
465 router
.ddc_mux_state
= ddc_path
->ucMuxState
[enum_id
];
467 case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE
:
468 cd_path
= (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
*)
470 router
.cd_valid
= true;
471 router
.cd_mux_type
= cd_path
->ucMuxType
;
472 router
.cd_mux_control_pin
= cd_path
->ucMuxControlPin
;
473 router
.cd_mux_state
= cd_path
->ucMuxState
[enum_id
];
476 record
= (ATOM_COMMON_RECORD_HEADER
*)
477 ((char *)record
+ record
->ucRecordSize
);
484 /* look up gpio for ddc, hpd */
485 ddc_bus
.valid
= false;
486 hpd
.hpd
= AMDGPU_HPD_NONE
;
487 if ((le16_to_cpu(path
->usDeviceTag
) &
488 (ATOM_DEVICE_TV_SUPPORT
| ATOM_DEVICE_CV_SUPPORT
)) == 0) {
489 for (j
= 0; j
< con_obj
->ucNumberOfObjects
; j
++) {
490 if (le16_to_cpu(path
->usConnObjectId
) ==
491 le16_to_cpu(con_obj
->asObjects
[j
].
493 ATOM_COMMON_RECORD_HEADER
495 (ATOM_COMMON_RECORD_HEADER
497 (ctx
->bios
+ data_offset
+
498 le16_to_cpu(con_obj
->
501 ATOM_I2C_RECORD
*i2c_record
;
502 ATOM_HPD_INT_RECORD
*hpd_record
;
503 ATOM_I2C_ID_CONFIG_ACCESS
*i2c_config
;
505 while (record
->ucRecordSize
> 0 &&
506 record
->ucRecordType
> 0 &&
507 record
->ucRecordType
<= ATOM_MAX_OBJECT_RECORD_NUMBER
) {
508 switch (record
->ucRecordType
) {
509 case ATOM_I2C_RECORD_TYPE
:
514 (ATOM_I2C_ID_CONFIG_ACCESS
*)
515 &i2c_record
->sucI2cId
;
516 ddc_bus
= amdgpu_atombios_lookup_i2c_gpio(adev
,
520 case ATOM_HPD_INT_RECORD_TYPE
:
522 (ATOM_HPD_INT_RECORD
*)
524 gpio
= amdgpu_atombios_lookup_gpio(adev
,
525 hpd_record
->ucHPDIntGPIOID
);
526 hpd
= amdgpu_atombios_get_hpd_info_from_gpio(adev
, &gpio
);
527 hpd
.plugged_state
= hpd_record
->ucPlugged_PinState
;
531 (ATOM_COMMON_RECORD_HEADER
542 /* needed for aux chan transactions */
543 ddc_bus
.hpd
= hpd
.hpd
;
545 conn_id
= le16_to_cpu(path
->usConnObjectId
);
547 amdgpu_display_add_connector(adev
,
549 le16_to_cpu(path
->usDeviceTag
),
550 connector_type
, &ddc_bus
,
558 amdgpu_link_encoder_connector(adev
->ddev
);
563 union firmware_info
{
564 ATOM_FIRMWARE_INFO info
;
565 ATOM_FIRMWARE_INFO_V1_2 info_12
;
566 ATOM_FIRMWARE_INFO_V1_3 info_13
;
567 ATOM_FIRMWARE_INFO_V1_4 info_14
;
568 ATOM_FIRMWARE_INFO_V2_1 info_21
;
569 ATOM_FIRMWARE_INFO_V2_2 info_22
;
572 int amdgpu_atombios_get_clock_info(struct amdgpu_device
*adev
)
574 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
575 int index
= GetIndexIntoMasterTable(DATA
, FirmwareInfo
);
577 uint16_t data_offset
;
580 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
581 &frev
, &crev
, &data_offset
)) {
583 struct amdgpu_pll
*ppll
= &adev
->clock
.ppll
[0];
584 struct amdgpu_pll
*spll
= &adev
->clock
.spll
;
585 struct amdgpu_pll
*mpll
= &adev
->clock
.mpll
;
586 union firmware_info
*firmware_info
=
587 (union firmware_info
*)(mode_info
->atom_context
->bios
+
590 ppll
->reference_freq
=
591 le16_to_cpu(firmware_info
->info
.usReferenceClock
);
592 ppll
->reference_div
= 0;
595 le32_to_cpu(firmware_info
->info_12
.ulMinPixelClockPLL_Output
);
597 le32_to_cpu(firmware_info
->info
.ulMaxPixelClockPLL_Output
);
599 ppll
->lcd_pll_out_min
=
600 le16_to_cpu(firmware_info
->info_14
.usLcdMinPixelClockPLL_Output
) * 100;
601 if (ppll
->lcd_pll_out_min
== 0)
602 ppll
->lcd_pll_out_min
= ppll
->pll_out_min
;
603 ppll
->lcd_pll_out_max
=
604 le16_to_cpu(firmware_info
->info_14
.usLcdMaxPixelClockPLL_Output
) * 100;
605 if (ppll
->lcd_pll_out_max
== 0)
606 ppll
->lcd_pll_out_max
= ppll
->pll_out_max
;
608 if (ppll
->pll_out_min
== 0)
609 ppll
->pll_out_min
= 64800;
612 le16_to_cpu(firmware_info
->info
.usMinPixelClockPLL_Input
);
614 le16_to_cpu(firmware_info
->info
.usMaxPixelClockPLL_Input
);
616 ppll
->min_post_div
= 2;
617 ppll
->max_post_div
= 0x7f;
618 ppll
->min_frac_feedback_div
= 0;
619 ppll
->max_frac_feedback_div
= 9;
620 ppll
->min_ref_div
= 2;
621 ppll
->max_ref_div
= 0x3ff;
622 ppll
->min_feedback_div
= 4;
623 ppll
->max_feedback_div
= 0xfff;
626 for (i
= 1; i
< AMDGPU_MAX_PPLL
; i
++)
627 adev
->clock
.ppll
[i
] = *ppll
;
630 spll
->reference_freq
=
631 le16_to_cpu(firmware_info
->info_21
.usCoreReferenceClock
);
632 spll
->reference_div
= 0;
635 le16_to_cpu(firmware_info
->info
.usMinEngineClockPLL_Output
);
637 le32_to_cpu(firmware_info
->info
.ulMaxEngineClockPLL_Output
);
640 if (spll
->pll_out_min
== 0)
641 spll
->pll_out_min
= 64800;
644 le16_to_cpu(firmware_info
->info
.usMinEngineClockPLL_Input
);
646 le16_to_cpu(firmware_info
->info
.usMaxEngineClockPLL_Input
);
648 spll
->min_post_div
= 1;
649 spll
->max_post_div
= 1;
650 spll
->min_ref_div
= 2;
651 spll
->max_ref_div
= 0xff;
652 spll
->min_feedback_div
= 4;
653 spll
->max_feedback_div
= 0xff;
657 mpll
->reference_freq
=
658 le16_to_cpu(firmware_info
->info_21
.usMemoryReferenceClock
);
659 mpll
->reference_div
= 0;
662 le16_to_cpu(firmware_info
->info
.usMinMemoryClockPLL_Output
);
664 le32_to_cpu(firmware_info
->info
.ulMaxMemoryClockPLL_Output
);
667 if (mpll
->pll_out_min
== 0)
668 mpll
->pll_out_min
= 64800;
671 le16_to_cpu(firmware_info
->info
.usMinMemoryClockPLL_Input
);
673 le16_to_cpu(firmware_info
->info
.usMaxMemoryClockPLL_Input
);
675 adev
->clock
.default_sclk
=
676 le32_to_cpu(firmware_info
->info
.ulDefaultEngineClock
);
677 adev
->clock
.default_mclk
=
678 le32_to_cpu(firmware_info
->info
.ulDefaultMemoryClock
);
680 mpll
->min_post_div
= 1;
681 mpll
->max_post_div
= 1;
682 mpll
->min_ref_div
= 2;
683 mpll
->max_ref_div
= 0xff;
684 mpll
->min_feedback_div
= 4;
685 mpll
->max_feedback_div
= 0xff;
689 adev
->clock
.default_dispclk
=
690 le32_to_cpu(firmware_info
->info_21
.ulDefaultDispEngineClkFreq
);
691 /* set a reasonable default for DP */
692 if (adev
->clock
.default_dispclk
< 53900) {
693 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
694 adev
->clock
.default_dispclk
/ 100);
695 adev
->clock
.default_dispclk
= 60000;
697 adev
->clock
.dp_extclk
=
698 le16_to_cpu(firmware_info
->info_21
.usUniphyDPModeExtClkFreq
);
699 adev
->clock
.current_dispclk
= adev
->clock
.default_dispclk
;
701 adev
->clock
.max_pixel_clock
= le16_to_cpu(firmware_info
->info
.usMaxPixelClock
);
702 if (adev
->clock
.max_pixel_clock
== 0)
703 adev
->clock
.max_pixel_clock
= 40000;
705 /* not technically a clock, but... */
706 adev
->mode_info
.firmware_flags
=
707 le16_to_cpu(firmware_info
->info
.usFirmwareCapability
.susAccess
);
712 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
713 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
719 ATOM_GFX_INFO_V2_1 info
;
722 int amdgpu_atombios_get_gfx_info(struct amdgpu_device
*adev
)
724 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
725 int index
= GetIndexIntoMasterTable(DATA
, GFX_Info
);
727 uint16_t data_offset
;
730 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
731 &frev
, &crev
, &data_offset
)) {
732 union gfx_info
*gfx_info
= (union gfx_info
*)
733 (mode_info
->atom_context
->bios
+ data_offset
);
735 adev
->gfx
.config
.max_shader_engines
= gfx_info
->info
.max_shader_engines
;
736 adev
->gfx
.config
.max_tile_pipes
= gfx_info
->info
.max_tile_pipes
;
737 adev
->gfx
.config
.max_cu_per_sh
= gfx_info
->info
.max_cu_per_sh
;
738 adev
->gfx
.config
.max_sh_per_se
= gfx_info
->info
.max_sh_per_se
;
739 adev
->gfx
.config
.max_backends_per_se
= gfx_info
->info
.max_backends_per_se
;
740 adev
->gfx
.config
.max_texture_channel_caches
=
741 gfx_info
->info
.max_texture_channel_caches
;
749 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
750 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
751 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
752 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
753 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
754 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9
;
758 * Return vram width from integrated system info table, if available,
761 int amdgpu_atombios_get_vram_width(struct amdgpu_device
*adev
)
763 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
764 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
765 u16 data_offset
, size
;
766 union igp_info
*igp_info
;
769 /* get any igp specific overrides */
770 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, &size
,
771 &frev
, &crev
, &data_offset
)) {
772 igp_info
= (union igp_info
*)
773 (mode_info
->atom_context
->bios
+ data_offset
);
777 return igp_info
->info_8
.ucUMAChannelNumber
* 64;
786 static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device
*adev
,
787 struct amdgpu_atom_ss
*ss
,
790 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
791 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
792 u16 data_offset
, size
;
793 union igp_info
*igp_info
;
795 u16 percentage
= 0, rate
= 0;
797 /* get any igp specific overrides */
798 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, &size
,
799 &frev
, &crev
, &data_offset
)) {
800 igp_info
= (union igp_info
*)
801 (mode_info
->atom_context
->bios
+ data_offset
);
805 case ASIC_INTERNAL_SS_ON_TMDS
:
806 percentage
= le16_to_cpu(igp_info
->info_6
.usDVISSPercentage
);
807 rate
= le16_to_cpu(igp_info
->info_6
.usDVISSpreadRateIn10Hz
);
809 case ASIC_INTERNAL_SS_ON_HDMI
:
810 percentage
= le16_to_cpu(igp_info
->info_6
.usHDMISSPercentage
);
811 rate
= le16_to_cpu(igp_info
->info_6
.usHDMISSpreadRateIn10Hz
);
813 case ASIC_INTERNAL_SS_ON_LVDS
:
814 percentage
= le16_to_cpu(igp_info
->info_6
.usLvdsSSPercentage
);
815 rate
= le16_to_cpu(igp_info
->info_6
.usLvdsSSpreadRateIn10Hz
);
821 case ASIC_INTERNAL_SS_ON_TMDS
:
822 percentage
= le16_to_cpu(igp_info
->info_7
.usDVISSPercentage
);
823 rate
= le16_to_cpu(igp_info
->info_7
.usDVISSpreadRateIn10Hz
);
825 case ASIC_INTERNAL_SS_ON_HDMI
:
826 percentage
= le16_to_cpu(igp_info
->info_7
.usHDMISSPercentage
);
827 rate
= le16_to_cpu(igp_info
->info_7
.usHDMISSpreadRateIn10Hz
);
829 case ASIC_INTERNAL_SS_ON_LVDS
:
830 percentage
= le16_to_cpu(igp_info
->info_7
.usLvdsSSPercentage
);
831 rate
= le16_to_cpu(igp_info
->info_7
.usLvdsSSpreadRateIn10Hz
);
837 case ASIC_INTERNAL_SS_ON_TMDS
:
838 percentage
= le16_to_cpu(igp_info
->info_8
.usDVISSPercentage
);
839 rate
= le16_to_cpu(igp_info
->info_8
.usDVISSpreadRateIn10Hz
);
841 case ASIC_INTERNAL_SS_ON_HDMI
:
842 percentage
= le16_to_cpu(igp_info
->info_8
.usHDMISSPercentage
);
843 rate
= le16_to_cpu(igp_info
->info_8
.usHDMISSpreadRateIn10Hz
);
845 case ASIC_INTERNAL_SS_ON_LVDS
:
846 percentage
= le16_to_cpu(igp_info
->info_8
.usLvdsSSPercentage
);
847 rate
= le16_to_cpu(igp_info
->info_8
.usLvdsSSpreadRateIn10Hz
);
853 case ASIC_INTERNAL_SS_ON_TMDS
:
854 percentage
= le16_to_cpu(igp_info
->info_9
.usDVISSPercentage
);
855 rate
= le16_to_cpu(igp_info
->info_9
.usDVISSpreadRateIn10Hz
);
857 case ASIC_INTERNAL_SS_ON_HDMI
:
858 percentage
= le16_to_cpu(igp_info
->info_9
.usHDMISSPercentage
);
859 rate
= le16_to_cpu(igp_info
->info_9
.usHDMISSpreadRateIn10Hz
);
861 case ASIC_INTERNAL_SS_ON_LVDS
:
862 percentage
= le16_to_cpu(igp_info
->info_9
.usLvdsSSPercentage
);
863 rate
= le16_to_cpu(igp_info
->info_9
.usLvdsSSpreadRateIn10Hz
);
868 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
872 ss
->percentage
= percentage
;
879 struct _ATOM_ASIC_INTERNAL_SS_INFO info
;
880 struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2
;
881 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3
;
884 union asic_ss_assignment
{
885 struct _ATOM_ASIC_SS_ASSIGNMENT v1
;
886 struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2
;
887 struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3
;
890 bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device
*adev
,
891 struct amdgpu_atom_ss
*ss
,
894 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
895 int index
= GetIndexIntoMasterTable(DATA
, ASIC_InternalSS_Info
);
896 uint16_t data_offset
, size
;
897 union asic_ss_info
*ss_info
;
898 union asic_ss_assignment
*ss_assign
;
902 if (id
== ASIC_INTERNAL_MEMORY_SS
) {
903 if (!(adev
->mode_info
.firmware_flags
& ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT
))
906 if (id
== ASIC_INTERNAL_ENGINE_SS
) {
907 if (!(adev
->mode_info
.firmware_flags
& ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT
))
911 memset(ss
, 0, sizeof(struct amdgpu_atom_ss
));
912 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, &size
,
913 &frev
, &crev
, &data_offset
)) {
916 (union asic_ss_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
920 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
921 sizeof(ATOM_ASIC_SS_ASSIGNMENT
);
923 ss_assign
= (union asic_ss_assignment
*)((u8
*)&ss_info
->info
.asSpreadSpectrum
[0]);
924 for (i
= 0; i
< num_indices
; i
++) {
925 if ((ss_assign
->v1
.ucClockIndication
== id
) &&
926 (clock
<= le32_to_cpu(ss_assign
->v1
.ulTargetClockRange
))) {
928 le16_to_cpu(ss_assign
->v1
.usSpreadSpectrumPercentage
);
929 ss
->type
= ss_assign
->v1
.ucSpreadSpectrumMode
;
930 ss
->rate
= le16_to_cpu(ss_assign
->v1
.usSpreadRateInKhz
);
931 ss
->percentage_divider
= 100;
934 ss_assign
= (union asic_ss_assignment
*)
935 ((u8
*)ss_assign
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT
));
939 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
940 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2
);
941 ss_assign
= (union asic_ss_assignment
*)((u8
*)&ss_info
->info_2
.asSpreadSpectrum
[0]);
942 for (i
= 0; i
< num_indices
; i
++) {
943 if ((ss_assign
->v2
.ucClockIndication
== id
) &&
944 (clock
<= le32_to_cpu(ss_assign
->v2
.ulTargetClockRange
))) {
946 le16_to_cpu(ss_assign
->v2
.usSpreadSpectrumPercentage
);
947 ss
->type
= ss_assign
->v2
.ucSpreadSpectrumMode
;
948 ss
->rate
= le16_to_cpu(ss_assign
->v2
.usSpreadRateIn10Hz
);
949 ss
->percentage_divider
= 100;
951 ((id
== ASIC_INTERNAL_ENGINE_SS
) ||
952 (id
== ASIC_INTERNAL_MEMORY_SS
)))
956 ss_assign
= (union asic_ss_assignment
*)
957 ((u8
*)ss_assign
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2
));
961 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
962 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3
);
963 ss_assign
= (union asic_ss_assignment
*)((u8
*)&ss_info
->info_3
.asSpreadSpectrum
[0]);
964 for (i
= 0; i
< num_indices
; i
++) {
965 if ((ss_assign
->v3
.ucClockIndication
== id
) &&
966 (clock
<= le32_to_cpu(ss_assign
->v3
.ulTargetClockRange
))) {
968 le16_to_cpu(ss_assign
->v3
.usSpreadSpectrumPercentage
);
969 ss
->type
= ss_assign
->v3
.ucSpreadSpectrumMode
;
970 ss
->rate
= le16_to_cpu(ss_assign
->v3
.usSpreadRateIn10Hz
);
971 if (ss_assign
->v3
.ucSpreadSpectrumMode
&
972 SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK
)
973 ss
->percentage_divider
= 1000;
975 ss
->percentage_divider
= 100;
976 if ((id
== ASIC_INTERNAL_ENGINE_SS
) ||
977 (id
== ASIC_INTERNAL_MEMORY_SS
))
979 if (adev
->flags
& AMD_IS_APU
)
980 amdgpu_atombios_get_igp_ss_overrides(adev
, ss
, id
);
983 ss_assign
= (union asic_ss_assignment
*)
984 ((u8
*)ss_assign
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3
));
988 DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev
, crev
);
996 union get_clock_dividers
{
997 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1
;
998 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2
;
999 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3
;
1000 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4
;
1001 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5
;
1002 struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in
;
1003 struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out
;
1006 int amdgpu_atombios_get_clock_dividers(struct amdgpu_device
*adev
,
1010 struct atom_clock_dividers
*dividers
)
1012 union get_clock_dividers args
;
1013 int index
= GetIndexIntoMasterTable(COMMAND
, ComputeMemoryEnginePLL
);
1016 memset(&args
, 0, sizeof(args
));
1017 memset(dividers
, 0, sizeof(struct atom_clock_dividers
));
1019 if (!amdgpu_atom_parse_cmd_header(adev
->mode_info
.atom_context
, index
, &frev
, &crev
))
1026 /* r6xx, r7xx, evergreen, ni, si.
1027 * TODO: add support for asic_type <= CHIP_RV770*/
1028 if (clock_type
== COMPUTE_ENGINE_PLL_PARAM
) {
1029 args
.v3
.ulClockParams
= cpu_to_le32((clock_type
<< 24) | clock
);
1031 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1033 dividers
->post_div
= args
.v3
.ucPostDiv
;
1034 dividers
->enable_post_div
= (args
.v3
.ucCntlFlag
&
1035 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN
) ? true : false;
1036 dividers
->enable_dithen
= (args
.v3
.ucCntlFlag
&
1037 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE
) ? false : true;
1038 dividers
->whole_fb_div
= le16_to_cpu(args
.v3
.ulFbDiv
.usFbDiv
);
1039 dividers
->frac_fb_div
= le16_to_cpu(args
.v3
.ulFbDiv
.usFbDivFrac
);
1040 dividers
->ref_div
= args
.v3
.ucRefDiv
;
1041 dividers
->vco_mode
= (args
.v3
.ucCntlFlag
&
1042 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE
) ? 1 : 0;
1044 /* for SI we use ComputeMemoryClockParam for memory plls */
1045 if (adev
->asic_type
>= CHIP_TAHITI
)
1047 args
.v5
.ulClockParams
= cpu_to_le32((clock_type
<< 24) | clock
);
1049 args
.v5
.ucInputFlag
= ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN
;
1051 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1053 dividers
->post_div
= args
.v5
.ucPostDiv
;
1054 dividers
->enable_post_div
= (args
.v5
.ucCntlFlag
&
1055 ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN
) ? true : false;
1056 dividers
->enable_dithen
= (args
.v5
.ucCntlFlag
&
1057 ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE
) ? false : true;
1058 dividers
->whole_fb_div
= le16_to_cpu(args
.v5
.ulFbDiv
.usFbDiv
);
1059 dividers
->frac_fb_div
= le16_to_cpu(args
.v5
.ulFbDiv
.usFbDivFrac
);
1060 dividers
->ref_div
= args
.v5
.ucRefDiv
;
1061 dividers
->vco_mode
= (args
.v5
.ucCntlFlag
&
1062 ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE
) ? 1 : 0;
1067 args
.v4
.ulClock
= cpu_to_le32(clock
); /* 10 khz */
1069 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1071 dividers
->post_divider
= dividers
->post_div
= args
.v4
.ucPostDiv
;
1072 dividers
->real_clock
= le32_to_cpu(args
.v4
.ulClock
);
1076 /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
1077 args
.v6_in
.ulClock
.ulComputeClockFlag
= clock_type
;
1078 args
.v6_in
.ulClock
.ulClockFreq
= cpu_to_le32(clock
); /* 10 khz */
1080 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1082 dividers
->whole_fb_div
= le16_to_cpu(args
.v6_out
.ulFbDiv
.usFbDiv
);
1083 dividers
->frac_fb_div
= le16_to_cpu(args
.v6_out
.ulFbDiv
.usFbDivFrac
);
1084 dividers
->ref_div
= args
.v6_out
.ucPllRefDiv
;
1085 dividers
->post_div
= args
.v6_out
.ucPllPostDiv
;
1086 dividers
->flags
= args
.v6_out
.ucPllCntlFlag
;
1087 dividers
->real_clock
= le32_to_cpu(args
.v6_out
.ulClock
.ulClock
);
1088 dividers
->post_divider
= args
.v6_out
.ulClock
.ucPostDiv
;
1096 int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device
*adev
,
1099 struct atom_mpll_param
*mpll_param
)
1101 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args
;
1102 int index
= GetIndexIntoMasterTable(COMMAND
, ComputeMemoryClockParam
);
1105 memset(&args
, 0, sizeof(args
));
1106 memset(mpll_param
, 0, sizeof(struct atom_mpll_param
));
1108 if (!amdgpu_atom_parse_cmd_header(adev
->mode_info
.atom_context
, index
, &frev
, &crev
))
1116 args
.ulClock
= cpu_to_le32(clock
); /* 10 khz */
1117 args
.ucInputFlag
= 0;
1119 args
.ucInputFlag
|= MPLL_INPUT_FLAG_STROBE_MODE_EN
;
1121 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1123 mpll_param
->clkfrac
= le16_to_cpu(args
.ulFbDiv
.usFbDivFrac
);
1124 mpll_param
->clkf
= le16_to_cpu(args
.ulFbDiv
.usFbDiv
);
1125 mpll_param
->post_div
= args
.ucPostDiv
;
1126 mpll_param
->dll_speed
= args
.ucDllSpeed
;
1127 mpll_param
->bwcntl
= args
.ucBWCntl
;
1128 mpll_param
->vco_mode
=
1129 (args
.ucPllCntlFlag
& MPLL_CNTL_FLAG_VCO_MODE_MASK
);
1130 mpll_param
->yclk_sel
=
1131 (args
.ucPllCntlFlag
& MPLL_CNTL_FLAG_BYPASS_DQ_PLL
) ? 1 : 0;
1133 (args
.ucPllCntlFlag
& MPLL_CNTL_FLAG_QDR_ENABLE
) ? 1 : 0;
1134 mpll_param
->half_rate
=
1135 (args
.ucPllCntlFlag
& MPLL_CNTL_FLAG_AD_HALF_RATE
) ? 1 : 0;
1147 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device
*adev
,
1148 u32 eng_clock
, u32 mem_clock
)
1150 SET_ENGINE_CLOCK_PS_ALLOCATION args
;
1151 int index
= GetIndexIntoMasterTable(COMMAND
, DynamicMemorySettings
);
1154 memset(&args
, 0, sizeof(args
));
1156 tmp
= eng_clock
& SET_CLOCK_FREQ_MASK
;
1157 tmp
|= (COMPUTE_ENGINE_PLL_PARAM
<< 24);
1159 args
.ulTargetEngineClock
= cpu_to_le32(tmp
);
1161 args
.sReserved
.ulClock
= cpu_to_le32(mem_clock
& SET_CLOCK_FREQ_MASK
);
1163 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1166 void amdgpu_atombios_get_default_voltages(struct amdgpu_device
*adev
,
1167 u16
*vddc
, u16
*vddci
, u16
*mvdd
)
1169 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
1170 int index
= GetIndexIntoMasterTable(DATA
, FirmwareInfo
);
1173 union firmware_info
*firmware_info
;
1179 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
1180 &frev
, &crev
, &data_offset
)) {
1182 (union firmware_info
*)(mode_info
->atom_context
->bios
+
1184 *vddc
= le16_to_cpu(firmware_info
->info_14
.usBootUpVDDCVoltage
);
1185 if ((frev
== 2) && (crev
>= 2)) {
1186 *vddci
= le16_to_cpu(firmware_info
->info_22
.usBootUpVDDCIVoltage
);
1187 *mvdd
= le16_to_cpu(firmware_info
->info_22
.usBootUpMVDDCVoltage
);
1193 struct _SET_VOLTAGE_PS_ALLOCATION alloc
;
1194 struct _SET_VOLTAGE_PARAMETERS v1
;
1195 struct _SET_VOLTAGE_PARAMETERS_V2 v2
;
1196 struct _SET_VOLTAGE_PARAMETERS_V1_3 v3
;
1199 int amdgpu_atombios_get_max_vddc(struct amdgpu_device
*adev
, u8 voltage_type
,
1200 u16 voltage_id
, u16
*voltage
)
1202 union set_voltage args
;
1203 int index
= GetIndexIntoMasterTable(COMMAND
, SetVoltage
);
1206 if (!amdgpu_atom_parse_cmd_header(adev
->mode_info
.atom_context
, index
, &frev
, &crev
))
1213 args
.v2
.ucVoltageType
= SET_VOLTAGE_GET_MAX_VOLTAGE
;
1214 args
.v2
.ucVoltageMode
= 0;
1215 args
.v2
.usVoltageLevel
= 0;
1217 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1219 *voltage
= le16_to_cpu(args
.v2
.usVoltageLevel
);
1222 args
.v3
.ucVoltageType
= voltage_type
;
1223 args
.v3
.ucVoltageMode
= ATOM_GET_VOLTAGE_LEVEL
;
1224 args
.v3
.usVoltageLevel
= cpu_to_le16(voltage_id
);
1226 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1228 *voltage
= le16_to_cpu(args
.v3
.usVoltageLevel
);
1231 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1238 int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device
*adev
,
1242 return amdgpu_atombios_get_max_vddc(adev
, VOLTAGE_TYPE_VDDC
, leakage_idx
, voltage
);
1245 int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device
*adev
,
1248 union set_voltage args
;
1249 int index
= GetIndexIntoMasterTable(COMMAND
, SetVoltage
);
1252 if (!amdgpu_atom_parse_cmd_header(adev
->mode_info
.atom_context
, index
, &frev
, &crev
))
1258 args
.v3
.ucVoltageType
= 0;
1259 args
.v3
.ucVoltageMode
= ATOM_GET_LEAKAGE_ID
;
1260 args
.v3
.usVoltageLevel
= 0;
1262 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1264 *leakage_id
= le16_to_cpu(args
.v3
.usVoltageLevel
);
1267 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1274 int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device
*adev
,
1275 u16
*vddc
, u16
*vddci
,
1276 u16 virtual_voltage_id
,
1277 u16 vbios_voltage_id
)
1279 int index
= GetIndexIntoMasterTable(DATA
, ASIC_ProfilingInfo
);
1281 u16 data_offset
, size
;
1283 ATOM_ASIC_PROFILING_INFO_V2_1
*profile
;
1284 u16
*leakage_bin
, *vddc_id_buf
, *vddc_buf
, *vddci_id_buf
, *vddci_buf
;
1289 if (!amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1290 &frev
, &crev
, &data_offset
))
1293 profile
= (ATOM_ASIC_PROFILING_INFO_V2_1
*)
1294 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1302 if (size
< sizeof(ATOM_ASIC_PROFILING_INFO_V2_1
))
1304 leakage_bin
= (u16
*)
1305 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1306 le16_to_cpu(profile
->usLeakageBinArrayOffset
));
1307 vddc_id_buf
= (u16
*)
1308 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1309 le16_to_cpu(profile
->usElbVDDC_IdArrayOffset
));
1311 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1312 le16_to_cpu(profile
->usElbVDDC_LevelArrayOffset
));
1313 vddci_id_buf
= (u16
*)
1314 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1315 le16_to_cpu(profile
->usElbVDDCI_IdArrayOffset
));
1317 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1318 le16_to_cpu(profile
->usElbVDDCI_LevelArrayOffset
));
1320 if (profile
->ucElbVDDC_Num
> 0) {
1321 for (i
= 0; i
< profile
->ucElbVDDC_Num
; i
++) {
1322 if (vddc_id_buf
[i
] == virtual_voltage_id
) {
1323 for (j
= 0; j
< profile
->ucLeakageBinNum
; j
++) {
1324 if (vbios_voltage_id
<= leakage_bin
[j
]) {
1325 *vddc
= vddc_buf
[j
* profile
->ucElbVDDC_Num
+ i
];
1333 if (profile
->ucElbVDDCI_Num
> 0) {
1334 for (i
= 0; i
< profile
->ucElbVDDCI_Num
; i
++) {
1335 if (vddci_id_buf
[i
] == virtual_voltage_id
) {
1336 for (j
= 0; j
< profile
->ucLeakageBinNum
; j
++) {
1337 if (vbios_voltage_id
<= leakage_bin
[j
]) {
1338 *vddci
= vddci_buf
[j
* profile
->ucElbVDDCI_Num
+ i
];
1348 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1353 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1360 union get_voltage_info
{
1361 struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in
;
1362 struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out
;
1365 int amdgpu_atombios_get_voltage_evv(struct amdgpu_device
*adev
,
1366 u16 virtual_voltage_id
,
1369 int index
= GetIndexIntoMasterTable(COMMAND
, GetVoltageInfo
);
1371 u32 count
= adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
;
1372 union get_voltage_info args
;
1374 for (entry_id
= 0; entry_id
< count
; entry_id
++) {
1375 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[entry_id
].v
==
1380 if (entry_id
>= count
)
1383 args
.in
.ucVoltageType
= VOLTAGE_TYPE_VDDC
;
1384 args
.in
.ucVoltageMode
= ATOM_GET_VOLTAGE_EVV_VOLTAGE
;
1385 args
.in
.usVoltageLevel
= cpu_to_le16(virtual_voltage_id
);
1386 args
.in
.ulSCLKFreq
=
1387 cpu_to_le32(adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[entry_id
].clk
);
1389 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1391 *voltage
= le16_to_cpu(args
.evv_out
.usVoltageLevel
);
1396 union voltage_object_info
{
1397 struct _ATOM_VOLTAGE_OBJECT_INFO v1
;
1398 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2
;
1399 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3
;
1402 union voltage_object
{
1403 struct _ATOM_VOLTAGE_OBJECT v1
;
1404 struct _ATOM_VOLTAGE_OBJECT_V2 v2
;
1405 union _ATOM_VOLTAGE_OBJECT_V3 v3
;
1409 static ATOM_VOLTAGE_OBJECT_V3
*amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1
*v3
,
1410 u8 voltage_type
, u8 voltage_mode
)
1412 u32 size
= le16_to_cpu(v3
->sHeader
.usStructureSize
);
1413 u32 offset
= offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1
, asVoltageObj
[0]);
1414 u8
*start
= (u8
*)v3
;
1416 while (offset
< size
) {
1417 ATOM_VOLTAGE_OBJECT_V3
*vo
= (ATOM_VOLTAGE_OBJECT_V3
*)(start
+ offset
);
1418 if ((vo
->asGpioVoltageObj
.sHeader
.ucVoltageType
== voltage_type
) &&
1419 (vo
->asGpioVoltageObj
.sHeader
.ucVoltageMode
== voltage_mode
))
1421 offset
+= le16_to_cpu(vo
->asGpioVoltageObj
.sHeader
.usSize
);
1426 int amdgpu_atombios_get_svi2_info(struct amdgpu_device
*adev
,
1428 u8
*svd_gpio_id
, u8
*svc_gpio_id
)
1430 int index
= GetIndexIntoMasterTable(DATA
, VoltageObjectInfo
);
1432 u16 data_offset
, size
;
1433 union voltage_object_info
*voltage_info
;
1434 union voltage_object
*voltage_object
= NULL
;
1436 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1437 &frev
, &crev
, &data_offset
)) {
1438 voltage_info
= (union voltage_object_info
*)
1439 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1445 voltage_object
= (union voltage_object
*)
1446 amdgpu_atombios_lookup_voltage_object_v3(&voltage_info
->v3
,
1449 if (voltage_object
) {
1450 *svd_gpio_id
= voltage_object
->v3
.asSVID2Obj
.ucSVDGpioId
;
1451 *svc_gpio_id
= voltage_object
->v3
.asSVID2Obj
.ucSVCGpioId
;
1457 DRM_ERROR("unknown voltage object table\n");
1462 DRM_ERROR("unknown voltage object table\n");
1471 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device
*adev
,
1472 u8 voltage_type
, u8 voltage_mode
)
1474 int index
= GetIndexIntoMasterTable(DATA
, VoltageObjectInfo
);
1476 u16 data_offset
, size
;
1477 union voltage_object_info
*voltage_info
;
1479 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1480 &frev
, &crev
, &data_offset
)) {
1481 voltage_info
= (union voltage_object_info
*)
1482 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1488 if (amdgpu_atombios_lookup_voltage_object_v3(&voltage_info
->v3
,
1489 voltage_type
, voltage_mode
))
1493 DRM_ERROR("unknown voltage object table\n");
1498 DRM_ERROR("unknown voltage object table\n");
1506 int amdgpu_atombios_get_voltage_table(struct amdgpu_device
*adev
,
1507 u8 voltage_type
, u8 voltage_mode
,
1508 struct atom_voltage_table
*voltage_table
)
1510 int index
= GetIndexIntoMasterTable(DATA
, VoltageObjectInfo
);
1512 u16 data_offset
, size
;
1514 union voltage_object_info
*voltage_info
;
1515 union voltage_object
*voltage_object
= NULL
;
1517 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1518 &frev
, &crev
, &data_offset
)) {
1519 voltage_info
= (union voltage_object_info
*)
1520 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1526 voltage_object
= (union voltage_object
*)
1527 amdgpu_atombios_lookup_voltage_object_v3(&voltage_info
->v3
,
1528 voltage_type
, voltage_mode
);
1529 if (voltage_object
) {
1530 ATOM_GPIO_VOLTAGE_OBJECT_V3
*gpio
=
1531 &voltage_object
->v3
.asGpioVoltageObj
;
1532 VOLTAGE_LUT_ENTRY_V2
*lut
;
1533 if (gpio
->ucGpioEntryNum
> MAX_VOLTAGE_ENTRIES
)
1535 lut
= &gpio
->asVolGpioLut
[0];
1536 for (i
= 0; i
< gpio
->ucGpioEntryNum
; i
++) {
1537 voltage_table
->entries
[i
].value
=
1538 le16_to_cpu(lut
->usVoltageValue
);
1539 voltage_table
->entries
[i
].smio_low
=
1540 le32_to_cpu(lut
->ulVoltageId
);
1541 lut
= (VOLTAGE_LUT_ENTRY_V2
*)
1542 ((u8
*)lut
+ sizeof(VOLTAGE_LUT_ENTRY_V2
));
1544 voltage_table
->mask_low
= le32_to_cpu(gpio
->ulGpioMaskVal
);
1545 voltage_table
->count
= gpio
->ucGpioEntryNum
;
1546 voltage_table
->phase_delay
= gpio
->ucPhaseDelay
;
1551 DRM_ERROR("unknown voltage object table\n");
1556 DRM_ERROR("unknown voltage object table\n");
1564 struct _ATOM_VRAM_INFO_V3 v1_3
;
1565 struct _ATOM_VRAM_INFO_V4 v1_4
;
1566 struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1
;
1569 #define MEM_ID_MASK 0xff000000
1570 #define MEM_ID_SHIFT 24
1571 #define CLOCK_RANGE_MASK 0x00ffffff
1572 #define CLOCK_RANGE_SHIFT 0
1573 #define LOW_NIBBLE_MASK 0xf
1574 #define DATA_EQU_PREV 0
1575 #define DATA_FROM_TABLE 4
1577 int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device
*adev
,
1579 struct atom_mc_reg_table
*reg_table
)
1581 int index
= GetIndexIntoMasterTable(DATA
, VRAM_Info
);
1582 u8 frev
, crev
, num_entries
, t_mem_id
, num_ranges
= 0;
1584 u16 data_offset
, size
;
1585 union vram_info
*vram_info
;
1587 memset(reg_table
, 0, sizeof(struct atom_mc_reg_table
));
1589 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1590 &frev
, &crev
, &data_offset
)) {
1591 vram_info
= (union vram_info
*)
1592 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1595 DRM_ERROR("old table version %d, %d\n", frev
, crev
);
1600 if (module_index
< vram_info
->v2_1
.ucNumOfVRAMModule
) {
1601 ATOM_INIT_REG_BLOCK
*reg_block
=
1602 (ATOM_INIT_REG_BLOCK
*)
1603 ((u8
*)vram_info
+ le16_to_cpu(vram_info
->v2_1
.usMemClkPatchTblOffset
));
1604 ATOM_MEMORY_SETTING_DATA_BLOCK
*reg_data
=
1605 (ATOM_MEMORY_SETTING_DATA_BLOCK
*)
1606 ((u8
*)reg_block
+ (2 * sizeof(u16
)) +
1607 le16_to_cpu(reg_block
->usRegIndexTblSize
));
1608 ATOM_INIT_REG_INDEX_FORMAT
*format
= ®_block
->asRegIndexBuf
[0];
1609 num_entries
= (u8
)((le16_to_cpu(reg_block
->usRegIndexTblSize
)) /
1610 sizeof(ATOM_INIT_REG_INDEX_FORMAT
)) - 1;
1611 if (num_entries
> VBIOS_MC_REGISTER_ARRAY_SIZE
)
1613 while (i
< num_entries
) {
1614 if (format
->ucPreRegDataLength
& ACCESS_PLACEHOLDER
)
1616 reg_table
->mc_reg_address
[i
].s1
=
1617 (u16
)(le16_to_cpu(format
->usRegIndex
));
1618 reg_table
->mc_reg_address
[i
].pre_reg_data
=
1619 (u8
)(format
->ucPreRegDataLength
);
1621 format
= (ATOM_INIT_REG_INDEX_FORMAT
*)
1622 ((u8
*)format
+ sizeof(ATOM_INIT_REG_INDEX_FORMAT
));
1624 reg_table
->last
= i
;
1625 while ((le32_to_cpu(*(u32
*)reg_data
) != END_OF_REG_DATA_BLOCK
) &&
1626 (num_ranges
< VBIOS_MAX_AC_TIMING_ENTRIES
)) {
1627 t_mem_id
= (u8
)((le32_to_cpu(*(u32
*)reg_data
) & MEM_ID_MASK
)
1629 if (module_index
== t_mem_id
) {
1630 reg_table
->mc_reg_table_entry
[num_ranges
].mclk_max
=
1631 (u32
)((le32_to_cpu(*(u32
*)reg_data
) & CLOCK_RANGE_MASK
)
1632 >> CLOCK_RANGE_SHIFT
);
1633 for (i
= 0, j
= 1; i
< reg_table
->last
; i
++) {
1634 if ((reg_table
->mc_reg_address
[i
].pre_reg_data
& LOW_NIBBLE_MASK
) == DATA_FROM_TABLE
) {
1635 reg_table
->mc_reg_table_entry
[num_ranges
].mc_data
[i
] =
1636 (u32
)le32_to_cpu(*((u32
*)reg_data
+ j
));
1638 } else if ((reg_table
->mc_reg_address
[i
].pre_reg_data
& LOW_NIBBLE_MASK
) == DATA_EQU_PREV
) {
1639 reg_table
->mc_reg_table_entry
[num_ranges
].mc_data
[i
] =
1640 reg_table
->mc_reg_table_entry
[num_ranges
].mc_data
[i
- 1];
1645 reg_data
= (ATOM_MEMORY_SETTING_DATA_BLOCK
*)
1646 ((u8
*)reg_data
+ le16_to_cpu(reg_block
->usRegDataBlkSize
));
1648 if (le32_to_cpu(*(u32
*)reg_data
) != END_OF_REG_DATA_BLOCK
)
1650 reg_table
->num_entries
= num_ranges
;
1655 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1660 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1668 bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device
*adev
)
1670 int index
= GetIndexIntoMasterTable(DATA
, GPUVirtualizationInfo
);
1672 u16 data_offset
, size
;
1674 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1675 &frev
, &crev
, &data_offset
))
1681 void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device
*adev
, bool lock
)
1683 uint32_t bios_6_scratch
;
1685 bios_6_scratch
= RREG32(mmBIOS_SCRATCH_6
);
1688 bios_6_scratch
|= ATOM_S6_CRITICAL_STATE
;
1689 bios_6_scratch
&= ~ATOM_S6_ACC_MODE
;
1691 bios_6_scratch
&= ~ATOM_S6_CRITICAL_STATE
;
1692 bios_6_scratch
|= ATOM_S6_ACC_MODE
;
1695 WREG32(mmBIOS_SCRATCH_6
, bios_6_scratch
);
1698 void amdgpu_atombios_scratch_regs_init(struct amdgpu_device
*adev
)
1700 uint32_t bios_2_scratch
, bios_6_scratch
;
1702 bios_2_scratch
= RREG32(mmBIOS_SCRATCH_2
);
1703 bios_6_scratch
= RREG32(mmBIOS_SCRATCH_6
);
1705 /* let the bios control the backlight */
1706 bios_2_scratch
&= ~ATOM_S2_VRI_BRIGHT_ENABLE
;
1708 /* tell the bios not to handle mode switching */
1709 bios_6_scratch
|= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH
;
1711 /* clear the vbios dpms state */
1712 bios_2_scratch
&= ~ATOM_S2_DEVICE_DPMS_STATE
;
1714 WREG32(mmBIOS_SCRATCH_2
, bios_2_scratch
);
1715 WREG32(mmBIOS_SCRATCH_6
, bios_6_scratch
);
1718 void amdgpu_atombios_scratch_regs_save(struct amdgpu_device
*adev
)
1722 for (i
= 0; i
< AMDGPU_BIOS_NUM_SCRATCH
; i
++)
1723 adev
->bios_scratch
[i
] = RREG32(mmBIOS_SCRATCH_0
+ i
);
1726 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device
*adev
)
1731 * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
1732 * execute ASIC_Init posting via driver
1734 adev
->bios_scratch
[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK
;
1736 for (i
= 0; i
< AMDGPU_BIOS_NUM_SCRATCH
; i
++)
1737 WREG32(mmBIOS_SCRATCH_0
+ i
, adev
->bios_scratch
[i
]);
1740 void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device
*adev
,
1743 u32 tmp
= RREG32(mmBIOS_SCRATCH_3
);
1746 tmp
|= ATOM_S3_ASIC_GUI_ENGINE_HUNG
;
1748 tmp
&= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG
;
1750 WREG32(mmBIOS_SCRATCH_3
, tmp
);
1753 /* Atom needs data in little endian format
1754 * so swap as appropriate when copying data to
1755 * or from atom. Note that atom operates on
1758 void amdgpu_atombios_copy_swap(u8
*dst
, u8
*src
, u8 num_bytes
, bool to_le
)
1761 u8 src_tmp
[20], dst_tmp
[20]; /* used for byteswapping */
1765 memcpy(src_tmp
, src
, num_bytes
);
1766 src32
= (u32
*)src_tmp
;
1767 dst32
= (u32
*)dst_tmp
;
1769 for (i
= 0; i
< ((num_bytes
+ 3) / 4); i
++)
1770 dst32
[i
] = cpu_to_le32(src32
[i
]);
1771 memcpy(dst
, dst_tmp
, num_bytes
);
1773 u8 dws
= num_bytes
& ~3;
1774 for (i
= 0; i
< ((num_bytes
+ 3) / 4); i
++)
1775 dst32
[i
] = le32_to_cpu(src32
[i
]);
1776 memcpy(dst
, dst_tmp
, dws
);
1777 if (num_bytes
% 4) {
1778 for (i
= 0; i
< (num_bytes
% 4); i
++)
1779 dst
[dws
+i
] = dst_tmp
[dws
+i
];
1783 memcpy(dst
, src
, num_bytes
);
1787 int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device
*adev
)
1789 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
1790 int index
= GetIndexIntoMasterTable(DATA
, VRAM_UsageByFirmware
);
1791 uint16_t data_offset
;
1792 int usage_bytes
= 0;
1793 struct _ATOM_VRAM_USAGE_BY_FIRMWARE
*firmware_usage
;
1795 if (amdgpu_atom_parse_data_header(ctx
, index
, NULL
, NULL
, NULL
, &data_offset
)) {
1796 firmware_usage
= (struct _ATOM_VRAM_USAGE_BY_FIRMWARE
*)(ctx
->bios
+ data_offset
);
1798 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1799 le32_to_cpu(firmware_usage
->asFirmwareVramReserveInfo
[0].ulStartAddrUsedByFirmware
),
1800 le16_to_cpu(firmware_usage
->asFirmwareVramReserveInfo
[0].usFirmwareUseInKb
));
1802 usage_bytes
= le16_to_cpu(firmware_usage
->asFirmwareVramReserveInfo
[0].usFirmwareUseInKb
) * 1024;
1804 ctx
->scratch_size_bytes
= 0;
1805 if (usage_bytes
== 0)
1806 usage_bytes
= 20 * 1024;
1807 /* allocate some scratch memory */
1808 ctx
->scratch
= kzalloc(usage_bytes
, GFP_KERNEL
);
1811 ctx
->scratch_size_bytes
= usage_bytes
;