2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 * Created on: Aug 30, 2016
30 #include <linux/delay.h>
32 #include "dm_services.h"
36 #include "dc_dmub_srv.h"
38 static inline void submit_dmub_read_modify_write(
39 struct dc_reg_helper_state
*offload
,
40 const struct dc_context
*ctx
)
42 struct dmub_rb_cmd_read_modify_write
*cmd_buf
= &offload
->cmd_data
.read_modify_write
;
45 offload
->should_burst_write
=
46 (offload
->same_addr_count
== (DMUB_READ_MODIFY_WRITE_SEQ__MAX
- 1));
47 cmd_buf
->header
.payload_bytes
=
48 sizeof(struct dmub_cmd_read_modify_write_sequence
) * offload
->reg_seq_count
;
50 gather
= ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
;
51 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= false;
53 dc_dmub_srv_cmd_queue(ctx
->dmub_srv
, &cmd_buf
->header
);
55 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= gather
;
57 memset(cmd_buf
, 0, sizeof(*cmd_buf
));
59 offload
->reg_seq_count
= 0;
60 offload
->same_addr_count
= 0;
63 static inline void submit_dmub_burst_write(
64 struct dc_reg_helper_state
*offload
,
65 const struct dc_context
*ctx
)
67 struct dmub_rb_cmd_burst_write
*cmd_buf
= &offload
->cmd_data
.burst_write
;
70 cmd_buf
->header
.payload_bytes
=
71 sizeof(uint32_t) * offload
->reg_seq_count
;
73 gather
= ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
;
74 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= false;
76 dc_dmub_srv_cmd_queue(ctx
->dmub_srv
, &cmd_buf
->header
);
78 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= gather
;
80 memset(cmd_buf
, 0, sizeof(*cmd_buf
));
82 offload
->reg_seq_count
= 0;
85 static inline void submit_dmub_reg_wait(
86 struct dc_reg_helper_state
*offload
,
87 const struct dc_context
*ctx
)
89 struct dmub_rb_cmd_reg_wait
*cmd_buf
= &offload
->cmd_data
.reg_wait
;
92 gather
= ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
;
93 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= false;
95 dc_dmub_srv_cmd_queue(ctx
->dmub_srv
, &cmd_buf
->header
);
97 memset(cmd_buf
, 0, sizeof(*cmd_buf
));
98 offload
->reg_seq_count
= 0;
100 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= gather
;
103 struct dc_reg_value_masks
{
108 struct dc_reg_sequence
{
110 struct dc_reg_value_masks value_masks
;
113 static inline void set_reg_field_value_masks(
114 struct dc_reg_value_masks
*field_value_mask
,
121 field_value_mask
->value
= (field_value_mask
->value
& ~mask
) | (mask
& (value
<< shift
));
122 field_value_mask
->mask
= field_value_mask
->mask
| mask
;
125 static void set_reg_field_values(struct dc_reg_value_masks
*field_value_mask
,
126 uint32_t addr
, int n
,
127 uint8_t shift1
, uint32_t mask1
, uint32_t field_value1
,
130 uint32_t shift
, mask
, field_value
;
133 /* gather all bits value/mask getting updated in this register */
134 set_reg_field_value_masks(field_value_mask
,
135 field_value1
, mask1
, shift1
);
138 shift
= va_arg(ap
, uint32_t);
139 mask
= va_arg(ap
, uint32_t);
140 field_value
= va_arg(ap
, uint32_t);
142 set_reg_field_value_masks(field_value_mask
,
143 field_value
, mask
, shift
);
148 static void dmub_flush_buffer_execute(
149 struct dc_reg_helper_state
*offload
,
150 const struct dc_context
*ctx
)
152 submit_dmub_read_modify_write(offload
, ctx
);
153 dc_dmub_srv_cmd_execute(ctx
->dmub_srv
);
156 static void dmub_flush_burst_write_buffer_execute(
157 struct dc_reg_helper_state
*offload
,
158 const struct dc_context
*ctx
)
160 submit_dmub_burst_write(offload
, ctx
);
161 dc_dmub_srv_cmd_execute(ctx
->dmub_srv
);
164 static bool dmub_reg_value_burst_set_pack(const struct dc_context
*ctx
, uint32_t addr
,
167 struct dc_reg_helper_state
*offload
= &ctx
->dmub_srv
->reg_helper_offload
;
168 struct dmub_rb_cmd_burst_write
*cmd_buf
= &offload
->cmd_data
.burst_write
;
170 /* flush command if buffer is full */
171 if (offload
->reg_seq_count
== DMUB_BURST_WRITE_VALUES__MAX
)
172 dmub_flush_burst_write_buffer_execute(offload
, ctx
);
174 if (offload
->cmd_data
.cmd_common
.header
.type
== DMUB_CMD__REG_SEQ_BURST_WRITE
&&
175 addr
!= cmd_buf
->addr
) {
176 dmub_flush_burst_write_buffer_execute(offload
, ctx
);
180 cmd_buf
->header
.type
= DMUB_CMD__REG_SEQ_BURST_WRITE
;
181 cmd_buf
->header
.sub_type
= 0;
182 cmd_buf
->addr
= addr
;
183 cmd_buf
->write_values
[offload
->reg_seq_count
] = reg_val
;
184 offload
->reg_seq_count
++;
189 static uint32_t dmub_reg_value_pack(const struct dc_context
*ctx
, uint32_t addr
,
190 struct dc_reg_value_masks
*field_value_mask
)
192 struct dc_reg_helper_state
*offload
= &ctx
->dmub_srv
->reg_helper_offload
;
193 struct dmub_rb_cmd_read_modify_write
*cmd_buf
= &offload
->cmd_data
.read_modify_write
;
194 struct dmub_cmd_read_modify_write_sequence
*seq
;
196 /* flush command if buffer is full */
197 if (offload
->cmd_data
.cmd_common
.header
.type
!= DMUB_CMD__REG_SEQ_BURST_WRITE
&&
198 offload
->reg_seq_count
== DMUB_READ_MODIFY_WRITE_SEQ__MAX
)
199 dmub_flush_buffer_execute(offload
, ctx
);
201 if (offload
->should_burst_write
) {
202 if (dmub_reg_value_burst_set_pack(ctx
, addr
, field_value_mask
->value
))
203 return field_value_mask
->value
;
205 offload
->should_burst_write
= false;
209 cmd_buf
->header
.type
= DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE
;
210 cmd_buf
->header
.sub_type
= 0;
211 seq
= &cmd_buf
->seq
[offload
->reg_seq_count
];
213 if (offload
->reg_seq_count
) {
214 if (cmd_buf
->seq
[offload
->reg_seq_count
- 1].addr
== addr
)
215 offload
->same_addr_count
++;
217 offload
->same_addr_count
= 0;
221 seq
->modify_mask
= field_value_mask
->mask
;
222 seq
->modify_value
= field_value_mask
->value
;
223 offload
->reg_seq_count
++;
225 return field_value_mask
->value
;
228 static void dmub_reg_wait_done_pack(const struct dc_context
*ctx
, uint32_t addr
,
229 uint32_t mask
, uint32_t shift
, uint32_t condition_value
, uint32_t time_out_us
)
231 struct dc_reg_helper_state
*offload
= &ctx
->dmub_srv
->reg_helper_offload
;
232 struct dmub_rb_cmd_reg_wait
*cmd_buf
= &offload
->cmd_data
.reg_wait
;
234 cmd_buf
->header
.type
= DMUB_CMD__REG_REG_WAIT
;
235 cmd_buf
->header
.sub_type
= 0;
236 cmd_buf
->reg_wait
.addr
= addr
;
237 cmd_buf
->reg_wait
.condition_field_value
= mask
& (condition_value
<< shift
);
238 cmd_buf
->reg_wait
.mask
= mask
;
239 cmd_buf
->reg_wait
.time_out_us
= time_out_us
;
242 uint32_t generic_reg_update_ex(const struct dc_context
*ctx
,
243 uint32_t addr
, int n
,
244 uint8_t shift1
, uint32_t mask1
, uint32_t field_value1
,
247 struct dc_reg_value_masks field_value_mask
= {0};
251 va_start(ap
, field_value1
);
253 set_reg_field_values(&field_value_mask
, addr
, n
, shift1
, mask1
,
259 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
)
260 return dmub_reg_value_pack(ctx
, addr
, &field_value_mask
);
261 /* todo: return void so we can decouple code running in driver from register states */
263 /* mmio write directly */
264 reg_val
= dm_read_reg(ctx
, addr
);
265 reg_val
= (reg_val
& ~field_value_mask
.mask
) | field_value_mask
.value
;
266 dm_write_reg(ctx
, addr
, reg_val
);
270 uint32_t generic_reg_set_ex(const struct dc_context
*ctx
,
271 uint32_t addr
, uint32_t reg_val
, int n
,
272 uint8_t shift1
, uint32_t mask1
, uint32_t field_value1
,
275 struct dc_reg_value_masks field_value_mask
= {0};
278 va_start(ap
, field_value1
);
280 set_reg_field_values(&field_value_mask
, addr
, n
, shift1
, mask1
,
286 /* mmio write directly */
287 reg_val
= (reg_val
& ~field_value_mask
.mask
) | field_value_mask
.value
;
290 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
) {
291 return dmub_reg_value_burst_set_pack(ctx
, addr
, reg_val
);
292 /* todo: return void so we can decouple code running in driver from register states */
295 dm_write_reg(ctx
, addr
, reg_val
);
299 uint32_t dm_read_reg_func(
300 const struct dc_context
*ctx
,
302 const char *func_name
)
305 #ifdef DM_CHECK_ADDR_0
307 DC_ERR("invalid register read; address = 0\n");
313 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
&&
314 !ctx
->dmub_srv
->reg_helper_offload
.should_burst_write
) {
319 value
= cgs_read_register(ctx
->cgs_device
, address
);
320 trace_amdgpu_dc_rreg(&ctx
->perf_trace
->read_count
, address
, value
);
325 uint32_t generic_reg_get(const struct dc_context
*ctx
, uint32_t addr
,
326 uint8_t shift
, uint32_t mask
, uint32_t *field_value
)
328 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
329 *field_value
= get_reg_field_value_ex(reg_val
, mask
, shift
);
333 uint32_t generic_reg_get2(const struct dc_context
*ctx
, uint32_t addr
,
334 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
335 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
)
337 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
338 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
339 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
343 uint32_t generic_reg_get3(const struct dc_context
*ctx
, uint32_t addr
,
344 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
345 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
346 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
)
348 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
349 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
350 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
351 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
355 uint32_t generic_reg_get4(const struct dc_context
*ctx
, uint32_t addr
,
356 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
357 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
358 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
359 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
)
361 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
362 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
363 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
364 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
365 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
369 uint32_t generic_reg_get5(const struct dc_context
*ctx
, uint32_t addr
,
370 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
371 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
372 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
373 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
,
374 uint8_t shift5
, uint32_t mask5
, uint32_t *field_value5
)
376 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
377 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
378 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
379 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
380 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
381 *field_value5
= get_reg_field_value_ex(reg_val
, mask5
, shift5
);
385 uint32_t generic_reg_get6(const struct dc_context
*ctx
, uint32_t addr
,
386 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
387 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
388 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
389 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
,
390 uint8_t shift5
, uint32_t mask5
, uint32_t *field_value5
,
391 uint8_t shift6
, uint32_t mask6
, uint32_t *field_value6
)
393 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
394 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
395 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
396 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
397 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
398 *field_value5
= get_reg_field_value_ex(reg_val
, mask5
, shift5
);
399 *field_value6
= get_reg_field_value_ex(reg_val
, mask6
, shift6
);
403 uint32_t generic_reg_get7(const struct dc_context
*ctx
, uint32_t addr
,
404 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
405 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
406 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
407 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
,
408 uint8_t shift5
, uint32_t mask5
, uint32_t *field_value5
,
409 uint8_t shift6
, uint32_t mask6
, uint32_t *field_value6
,
410 uint8_t shift7
, uint32_t mask7
, uint32_t *field_value7
)
412 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
413 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
414 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
415 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
416 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
417 *field_value5
= get_reg_field_value_ex(reg_val
, mask5
, shift5
);
418 *field_value6
= get_reg_field_value_ex(reg_val
, mask6
, shift6
);
419 *field_value7
= get_reg_field_value_ex(reg_val
, mask7
, shift7
);
423 uint32_t generic_reg_get8(const struct dc_context
*ctx
, uint32_t addr
,
424 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
425 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
426 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
427 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
,
428 uint8_t shift5
, uint32_t mask5
, uint32_t *field_value5
,
429 uint8_t shift6
, uint32_t mask6
, uint32_t *field_value6
,
430 uint8_t shift7
, uint32_t mask7
, uint32_t *field_value7
,
431 uint8_t shift8
, uint32_t mask8
, uint32_t *field_value8
)
433 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
434 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
435 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
436 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
437 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
438 *field_value5
= get_reg_field_value_ex(reg_val
, mask5
, shift5
);
439 *field_value6
= get_reg_field_value_ex(reg_val
, mask6
, shift6
);
440 *field_value7
= get_reg_field_value_ex(reg_val
, mask7
, shift7
);
441 *field_value8
= get_reg_field_value_ex(reg_val
, mask8
, shift8
);
444 /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
445 * compiler won't be able to check for size match and is prone to stack corruption type of bugs
447 uint32_t generic_reg_get(const struct dc_context *ctx,
448 uint32_t addr, int n, ...)
450 uint32_t shift, mask;
451 uint32_t *field_value;
455 reg_val = dm_read_reg(ctx, addr);
461 shift = va_arg(ap, uint32_t);
462 mask = va_arg(ap, uint32_t);
463 field_value = va_arg(ap, uint32_t *);
465 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
475 void generic_reg_wait(const struct dc_context
*ctx
,
476 uint32_t addr
, uint32_t shift
, uint32_t mask
, uint32_t condition_value
,
477 unsigned int delay_between_poll_us
, unsigned int time_out_num_tries
,
478 const char *func_name
, int line
)
480 uint32_t field_value
;
485 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
) {
486 dmub_reg_wait_done_pack(ctx
, addr
, mask
, shift
, condition_value
,
487 delay_between_poll_us
* time_out_num_tries
);
492 * Something is terribly wrong if time out is > 3000ms.
493 * 3000ms is the maximum time needed for SMU to pass values back.
494 * This value comes from experiments.
497 ASSERT(delay_between_poll_us
* time_out_num_tries
<= 3000000);
499 for (i
= 0; i
<= time_out_num_tries
; i
++) {
501 if (delay_between_poll_us
>= 1000)
502 msleep(delay_between_poll_us
/1000);
503 else if (delay_between_poll_us
> 0)
504 udelay(delay_between_poll_us
);
507 reg_val
= dm_read_reg(ctx
, addr
);
509 field_value
= get_reg_field_value_ex(reg_val
, mask
, shift
);
511 if (field_value
== condition_value
) {
512 if (i
* delay_between_poll_us
> 1000 &&
513 !IS_FPGA_MAXIMUS_DC(ctx
->dce_environment
))
514 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
515 delay_between_poll_us
* i
/ 1000,
521 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
522 delay_between_poll_us
, time_out_num_tries
,
525 if (!IS_FPGA_MAXIMUS_DC(ctx
->dce_environment
))
529 void generic_write_indirect_reg(const struct dc_context
*ctx
,
530 uint32_t addr_index
, uint32_t addr_data
,
531 uint32_t index
, uint32_t data
)
533 dm_write_reg(ctx
, addr_index
, index
);
534 dm_write_reg(ctx
, addr_data
, data
);
537 uint32_t generic_read_indirect_reg(const struct dc_context
*ctx
,
538 uint32_t addr_index
, uint32_t addr_data
,
543 // when reg read, there should not be any offload.
545 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
) {
549 dm_write_reg(ctx
, addr_index
, index
);
550 value
= dm_read_reg(ctx
, addr_data
);
555 uint32_t generic_indirect_reg_get(const struct dc_context
*ctx
,
556 uint32_t addr_index
, uint32_t addr_data
,
557 uint32_t index
, int n
,
558 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
561 uint32_t shift
, mask
, *field_value
;
567 va_start(ap
, field_value1
);
569 value
= generic_read_indirect_reg(ctx
, addr_index
, addr_data
, index
);
570 *field_value1
= get_reg_field_value_ex(value
, mask1
, shift1
);
573 shift
= va_arg(ap
, uint32_t);
574 mask
= va_arg(ap
, uint32_t);
575 field_value
= va_arg(ap
, uint32_t *);
577 *field_value
= get_reg_field_value_ex(value
, mask
, shift
);
586 uint32_t generic_indirect_reg_update_ex(const struct dc_context
*ctx
,
587 uint32_t addr_index
, uint32_t addr_data
,
588 uint32_t index
, uint32_t reg_val
, int n
,
589 uint8_t shift1
, uint32_t mask1
, uint32_t field_value1
,
592 uint32_t shift
, mask
, field_value
;
597 va_start(ap
, field_value1
);
599 reg_val
= set_reg_field_value_ex(reg_val
, field_value1
, mask1
, shift1
);
602 shift
= va_arg(ap
, uint32_t);
603 mask
= va_arg(ap
, uint32_t);
604 field_value
= va_arg(ap
, uint32_t);
606 reg_val
= set_reg_field_value_ex(reg_val
, field_value
, mask
, shift
);
610 generic_write_indirect_reg(ctx
, addr_index
, addr_data
, index
, reg_val
);
616 void reg_sequence_start_gather(const struct dc_context
*ctx
)
618 /* if reg sequence is supported and enabled, set flag to
619 * indicate we want to have REG_SET, REG_UPDATE macro build
620 * reg sequence command buffer rather than MMIO directly.
623 if (ctx
->dmub_srv
&& ctx
->dc
->debug
.dmub_offload_enabled
) {
624 struct dc_reg_helper_state
*offload
=
625 &ctx
->dmub_srv
->reg_helper_offload
;
627 /* caller sequence mismatch. need to debug caller. offload will not work!!! */
628 ASSERT(!offload
->gather_in_progress
);
630 offload
->gather_in_progress
= true;
634 void reg_sequence_start_execute(const struct dc_context
*ctx
)
636 struct dc_reg_helper_state
*offload
;
641 offload
= &ctx
->dmub_srv
->reg_helper_offload
;
643 if (offload
&& offload
->gather_in_progress
) {
644 offload
->gather_in_progress
= false;
645 offload
->should_burst_write
= false;
646 switch (offload
->cmd_data
.cmd_common
.header
.type
) {
647 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE
:
648 submit_dmub_read_modify_write(offload
, ctx
);
650 case DMUB_CMD__REG_REG_WAIT
:
651 submit_dmub_reg_wait(offload
, ctx
);
653 case DMUB_CMD__REG_SEQ_BURST_WRITE
:
654 submit_dmub_burst_write(offload
, ctx
);
660 dc_dmub_srv_cmd_execute(ctx
->dmub_srv
);
664 void reg_sequence_wait_done(const struct dc_context
*ctx
)
666 /* callback to DM to poll for last submission done*/
667 struct dc_reg_helper_state
*offload
;
672 offload
= &ctx
->dmub_srv
->reg_helper_offload
;
675 ctx
->dc
->debug
.dmub_offload_enabled
&&
676 !ctx
->dc
->debug
.dmcub_emulation
) {
677 dc_dmub_srv_wait_idle(ctx
->dmub_srv
);