2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 * Created on: Aug 30, 2016
30 #include <linux/delay.h>
32 #include "dm_services.h"
36 #include "dc_dmub_srv.h"
38 static inline void submit_dmub_read_modify_write(
39 struct dc_reg_helper_state
*offload
,
40 const struct dc_context
*ctx
)
42 struct dmub_rb_cmd_read_modify_write
*cmd_buf
= &offload
->cmd_data
.read_modify_write
;
45 offload
->should_burst_write
=
46 (offload
->same_addr_count
== (DMUB_READ_MODIFY_WRITE_SEQ__MAX
- 1));
47 cmd_buf
->header
.payload_bytes
=
48 sizeof(struct dmub_cmd_read_modify_write_sequence
) * offload
->reg_seq_count
;
50 gather
= ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
;
51 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= false;
53 dc_dmub_srv_cmd_queue(ctx
->dmub_srv
, &offload
->cmd_data
);
55 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= gather
;
57 memset(cmd_buf
, 0, sizeof(*cmd_buf
));
59 offload
->reg_seq_count
= 0;
60 offload
->same_addr_count
= 0;
63 static inline void submit_dmub_burst_write(
64 struct dc_reg_helper_state
*offload
,
65 const struct dc_context
*ctx
)
67 struct dmub_rb_cmd_burst_write
*cmd_buf
= &offload
->cmd_data
.burst_write
;
70 cmd_buf
->header
.payload_bytes
=
71 sizeof(uint32_t) * offload
->reg_seq_count
;
73 gather
= ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
;
74 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= false;
76 dc_dmub_srv_cmd_queue(ctx
->dmub_srv
, &offload
->cmd_data
);
78 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= gather
;
80 memset(cmd_buf
, 0, sizeof(*cmd_buf
));
82 offload
->reg_seq_count
= 0;
85 static inline void submit_dmub_reg_wait(
86 struct dc_reg_helper_state
*offload
,
87 const struct dc_context
*ctx
)
89 struct dmub_rb_cmd_reg_wait
*cmd_buf
= &offload
->cmd_data
.reg_wait
;
92 gather
= ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
;
93 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= false;
95 dc_dmub_srv_cmd_queue(ctx
->dmub_srv
, &offload
->cmd_data
);
97 memset(cmd_buf
, 0, sizeof(*cmd_buf
));
98 offload
->reg_seq_count
= 0;
100 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
= gather
;
103 struct dc_reg_value_masks
{
108 struct dc_reg_sequence
{
110 struct dc_reg_value_masks value_masks
;
113 static inline void set_reg_field_value_masks(
114 struct dc_reg_value_masks
*field_value_mask
,
121 field_value_mask
->value
= (field_value_mask
->value
& ~mask
) | (mask
& (value
<< shift
));
122 field_value_mask
->mask
= field_value_mask
->mask
| mask
;
125 static void set_reg_field_values(struct dc_reg_value_masks
*field_value_mask
,
126 uint32_t addr
, int n
,
127 uint8_t shift1
, uint32_t mask1
, uint32_t field_value1
,
130 uint32_t shift
, mask
, field_value
;
133 /* gather all bits value/mask getting updated in this register */
134 set_reg_field_value_masks(field_value_mask
,
135 field_value1
, mask1
, shift1
);
138 shift
= va_arg(ap
, uint32_t);
139 mask
= va_arg(ap
, uint32_t);
140 field_value
= va_arg(ap
, uint32_t);
142 set_reg_field_value_masks(field_value_mask
,
143 field_value
, mask
, shift
);
148 static void dmub_flush_buffer_execute(
149 struct dc_reg_helper_state
*offload
,
150 const struct dc_context
*ctx
)
152 submit_dmub_read_modify_write(offload
, ctx
);
153 dc_dmub_srv_cmd_execute(ctx
->dmub_srv
);
156 static void dmub_flush_burst_write_buffer_execute(
157 struct dc_reg_helper_state
*offload
,
158 const struct dc_context
*ctx
)
160 submit_dmub_burst_write(offload
, ctx
);
161 dc_dmub_srv_cmd_execute(ctx
->dmub_srv
);
164 static bool dmub_reg_value_burst_set_pack(const struct dc_context
*ctx
, uint32_t addr
,
167 struct dc_reg_helper_state
*offload
= &ctx
->dmub_srv
->reg_helper_offload
;
168 struct dmub_rb_cmd_burst_write
*cmd_buf
= &offload
->cmd_data
.burst_write
;
170 /* flush command if buffer is full */
171 if (offload
->reg_seq_count
== DMUB_BURST_WRITE_VALUES__MAX
)
172 dmub_flush_burst_write_buffer_execute(offload
, ctx
);
174 if (offload
->cmd_data
.cmd_common
.header
.type
== DMUB_CMD__REG_SEQ_BURST_WRITE
&&
175 addr
!= cmd_buf
->addr
) {
176 dmub_flush_burst_write_buffer_execute(offload
, ctx
);
180 cmd_buf
->header
.type
= DMUB_CMD__REG_SEQ_BURST_WRITE
;
181 cmd_buf
->header
.sub_type
= 0;
182 cmd_buf
->addr
= addr
;
183 cmd_buf
->write_values
[offload
->reg_seq_count
] = reg_val
;
184 offload
->reg_seq_count
++;
189 static uint32_t dmub_reg_value_pack(const struct dc_context
*ctx
, uint32_t addr
,
190 struct dc_reg_value_masks
*field_value_mask
)
192 struct dc_reg_helper_state
*offload
= &ctx
->dmub_srv
->reg_helper_offload
;
193 struct dmub_rb_cmd_read_modify_write
*cmd_buf
= &offload
->cmd_data
.read_modify_write
;
194 struct dmub_cmd_read_modify_write_sequence
*seq
;
196 /* flush command if buffer is full */
197 if (offload
->cmd_data
.cmd_common
.header
.type
!= DMUB_CMD__REG_SEQ_BURST_WRITE
&&
198 offload
->reg_seq_count
== DMUB_READ_MODIFY_WRITE_SEQ__MAX
)
199 dmub_flush_buffer_execute(offload
, ctx
);
201 if (offload
->should_burst_write
) {
202 if (dmub_reg_value_burst_set_pack(ctx
, addr
, field_value_mask
->value
))
203 return field_value_mask
->value
;
205 offload
->should_burst_write
= false;
209 cmd_buf
->header
.type
= DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE
;
210 cmd_buf
->header
.sub_type
= 0;
211 seq
= &cmd_buf
->seq
[offload
->reg_seq_count
];
213 if (offload
->reg_seq_count
) {
214 if (cmd_buf
->seq
[offload
->reg_seq_count
- 1].addr
== addr
)
215 offload
->same_addr_count
++;
217 offload
->same_addr_count
= 0;
221 seq
->modify_mask
= field_value_mask
->mask
;
222 seq
->modify_value
= field_value_mask
->value
;
223 offload
->reg_seq_count
++;
225 return field_value_mask
->value
;
228 static void dmub_reg_wait_done_pack(const struct dc_context
*ctx
, uint32_t addr
,
229 uint32_t mask
, uint32_t shift
, uint32_t condition_value
, uint32_t time_out_us
)
231 struct dc_reg_helper_state
*offload
= &ctx
->dmub_srv
->reg_helper_offload
;
232 struct dmub_rb_cmd_reg_wait
*cmd_buf
= &offload
->cmd_data
.reg_wait
;
234 cmd_buf
->header
.type
= DMUB_CMD__REG_REG_WAIT
;
235 cmd_buf
->header
.sub_type
= 0;
236 cmd_buf
->reg_wait
.addr
= addr
;
237 cmd_buf
->reg_wait
.condition_field_value
= mask
& (condition_value
<< shift
);
238 cmd_buf
->reg_wait
.mask
= mask
;
239 cmd_buf
->reg_wait
.time_out_us
= time_out_us
;
242 uint32_t generic_reg_update_ex(const struct dc_context
*ctx
,
243 uint32_t addr
, int n
,
244 uint8_t shift1
, uint32_t mask1
, uint32_t field_value1
,
247 struct dc_reg_value_masks field_value_mask
= {0};
251 va_start(ap
, field_value1
);
253 set_reg_field_values(&field_value_mask
, addr
, n
, shift1
, mask1
,
259 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
)
260 return dmub_reg_value_pack(ctx
, addr
, &field_value_mask
);
261 /* todo: return void so we can decouple code running in driver from register states */
263 /* mmio write directly */
264 reg_val
= dm_read_reg(ctx
, addr
);
265 reg_val
= (reg_val
& ~field_value_mask
.mask
) | field_value_mask
.value
;
266 dm_write_reg(ctx
, addr
, reg_val
);
270 uint32_t generic_reg_set_ex(const struct dc_context
*ctx
,
271 uint32_t addr
, uint32_t reg_val
, int n
,
272 uint8_t shift1
, uint32_t mask1
, uint32_t field_value1
,
275 struct dc_reg_value_masks field_value_mask
= {0};
278 va_start(ap
, field_value1
);
280 set_reg_field_values(&field_value_mask
, addr
, n
, shift1
, mask1
,
286 /* mmio write directly */
287 reg_val
= (reg_val
& ~field_value_mask
.mask
) | field_value_mask
.value
;
290 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
) {
291 return dmub_reg_value_burst_set_pack(ctx
, addr
, reg_val
);
292 /* todo: return void so we can decouple code running in driver from register states */
295 dm_write_reg(ctx
, addr
, reg_val
);
299 uint32_t generic_reg_get(const struct dc_context
*ctx
, uint32_t addr
,
300 uint8_t shift
, uint32_t mask
, uint32_t *field_value
)
302 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
303 *field_value
= get_reg_field_value_ex(reg_val
, mask
, shift
);
307 uint32_t generic_reg_get2(const struct dc_context
*ctx
, uint32_t addr
,
308 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
309 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
)
311 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
312 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
313 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
317 uint32_t generic_reg_get3(const struct dc_context
*ctx
, uint32_t addr
,
318 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
319 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
320 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
)
322 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
323 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
324 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
325 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
329 uint32_t generic_reg_get4(const struct dc_context
*ctx
, uint32_t addr
,
330 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
331 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
332 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
333 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
)
335 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
336 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
337 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
338 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
339 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
343 uint32_t generic_reg_get5(const struct dc_context
*ctx
, uint32_t addr
,
344 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
345 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
346 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
347 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
,
348 uint8_t shift5
, uint32_t mask5
, uint32_t *field_value5
)
350 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
351 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
352 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
353 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
354 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
355 *field_value5
= get_reg_field_value_ex(reg_val
, mask5
, shift5
);
359 uint32_t generic_reg_get6(const struct dc_context
*ctx
, uint32_t addr
,
360 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
361 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
362 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
363 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
,
364 uint8_t shift5
, uint32_t mask5
, uint32_t *field_value5
,
365 uint8_t shift6
, uint32_t mask6
, uint32_t *field_value6
)
367 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
368 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
369 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
370 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
371 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
372 *field_value5
= get_reg_field_value_ex(reg_val
, mask5
, shift5
);
373 *field_value6
= get_reg_field_value_ex(reg_val
, mask6
, shift6
);
377 uint32_t generic_reg_get7(const struct dc_context
*ctx
, uint32_t addr
,
378 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
379 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
380 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
381 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
,
382 uint8_t shift5
, uint32_t mask5
, uint32_t *field_value5
,
383 uint8_t shift6
, uint32_t mask6
, uint32_t *field_value6
,
384 uint8_t shift7
, uint32_t mask7
, uint32_t *field_value7
)
386 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
387 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
388 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
389 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
390 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
391 *field_value5
= get_reg_field_value_ex(reg_val
, mask5
, shift5
);
392 *field_value6
= get_reg_field_value_ex(reg_val
, mask6
, shift6
);
393 *field_value7
= get_reg_field_value_ex(reg_val
, mask7
, shift7
);
397 uint32_t generic_reg_get8(const struct dc_context
*ctx
, uint32_t addr
,
398 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
399 uint8_t shift2
, uint32_t mask2
, uint32_t *field_value2
,
400 uint8_t shift3
, uint32_t mask3
, uint32_t *field_value3
,
401 uint8_t shift4
, uint32_t mask4
, uint32_t *field_value4
,
402 uint8_t shift5
, uint32_t mask5
, uint32_t *field_value5
,
403 uint8_t shift6
, uint32_t mask6
, uint32_t *field_value6
,
404 uint8_t shift7
, uint32_t mask7
, uint32_t *field_value7
,
405 uint8_t shift8
, uint32_t mask8
, uint32_t *field_value8
)
407 uint32_t reg_val
= dm_read_reg(ctx
, addr
);
408 *field_value1
= get_reg_field_value_ex(reg_val
, mask1
, shift1
);
409 *field_value2
= get_reg_field_value_ex(reg_val
, mask2
, shift2
);
410 *field_value3
= get_reg_field_value_ex(reg_val
, mask3
, shift3
);
411 *field_value4
= get_reg_field_value_ex(reg_val
, mask4
, shift4
);
412 *field_value5
= get_reg_field_value_ex(reg_val
, mask5
, shift5
);
413 *field_value6
= get_reg_field_value_ex(reg_val
, mask6
, shift6
);
414 *field_value7
= get_reg_field_value_ex(reg_val
, mask7
, shift7
);
415 *field_value8
= get_reg_field_value_ex(reg_val
, mask8
, shift8
);
418 /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
419 * compiler won't be able to check for size match and is prone to stack corruption type of bugs
421 uint32_t generic_reg_get(const struct dc_context *ctx,
422 uint32_t addr, int n, ...)
424 uint32_t shift, mask;
425 uint32_t *field_value;
429 reg_val = dm_read_reg(ctx, addr);
435 shift = va_arg(ap, uint32_t);
436 mask = va_arg(ap, uint32_t);
437 field_value = va_arg(ap, uint32_t *);
439 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
449 void generic_reg_wait(const struct dc_context
*ctx
,
450 uint32_t addr
, uint32_t shift
, uint32_t mask
, uint32_t condition_value
,
451 unsigned int delay_between_poll_us
, unsigned int time_out_num_tries
,
452 const char *func_name
, int line
)
454 uint32_t field_value
;
459 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
) {
460 dmub_reg_wait_done_pack(ctx
, addr
, mask
, shift
, condition_value
,
461 delay_between_poll_us
* time_out_num_tries
);
466 * Something is terribly wrong if time out is > 3000ms.
467 * 3000ms is the maximum time needed for SMU to pass values back.
468 * This value comes from experiments.
471 ASSERT(delay_between_poll_us
* time_out_num_tries
<= 3000000);
473 for (i
= 0; i
<= time_out_num_tries
; i
++) {
475 if (delay_between_poll_us
>= 1000)
476 msleep(delay_between_poll_us
/1000);
477 else if (delay_between_poll_us
> 0)
478 udelay(delay_between_poll_us
);
481 reg_val
= dm_read_reg(ctx
, addr
);
483 field_value
= get_reg_field_value_ex(reg_val
, mask
, shift
);
485 if (field_value
== condition_value
) {
486 if (i
* delay_between_poll_us
> 1000 &&
487 !IS_FPGA_MAXIMUS_DC(ctx
->dce_environment
))
488 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
489 delay_between_poll_us
* i
/ 1000,
495 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
496 delay_between_poll_us
, time_out_num_tries
,
499 if (!IS_FPGA_MAXIMUS_DC(ctx
->dce_environment
))
503 void generic_write_indirect_reg(const struct dc_context
*ctx
,
504 uint32_t addr_index
, uint32_t addr_data
,
505 uint32_t index
, uint32_t data
)
507 dm_write_reg(ctx
, addr_index
, index
);
508 dm_write_reg(ctx
, addr_data
, data
);
511 uint32_t generic_read_indirect_reg(const struct dc_context
*ctx
,
512 uint32_t addr_index
, uint32_t addr_data
,
517 // when reg read, there should not be any offload.
519 ctx
->dmub_srv
->reg_helper_offload
.gather_in_progress
) {
523 dm_write_reg(ctx
, addr_index
, index
);
524 value
= dm_read_reg(ctx
, addr_data
);
529 uint32_t generic_indirect_reg_get(const struct dc_context
*ctx
,
530 uint32_t addr_index
, uint32_t addr_data
,
531 uint32_t index
, int n
,
532 uint8_t shift1
, uint32_t mask1
, uint32_t *field_value1
,
535 uint32_t shift
, mask
, *field_value
;
541 va_start(ap
, field_value1
);
543 value
= generic_read_indirect_reg(ctx
, addr_index
, addr_data
, index
);
544 *field_value1
= get_reg_field_value_ex(value
, mask1
, shift1
);
547 shift
= va_arg(ap
, uint32_t);
548 mask
= va_arg(ap
, uint32_t);
549 field_value
= va_arg(ap
, uint32_t *);
551 *field_value
= get_reg_field_value_ex(value
, mask
, shift
);
560 uint32_t generic_indirect_reg_update_ex(const struct dc_context
*ctx
,
561 uint32_t addr_index
, uint32_t addr_data
,
562 uint32_t index
, uint32_t reg_val
, int n
,
563 uint8_t shift1
, uint32_t mask1
, uint32_t field_value1
,
566 uint32_t shift
, mask
, field_value
;
571 va_start(ap
, field_value1
);
573 reg_val
= set_reg_field_value_ex(reg_val
, field_value1
, mask1
, shift1
);
576 shift
= va_arg(ap
, uint32_t);
577 mask
= va_arg(ap
, uint32_t);
578 field_value
= va_arg(ap
, uint32_t);
580 reg_val
= set_reg_field_value_ex(reg_val
, field_value
, mask
, shift
);
584 generic_write_indirect_reg(ctx
, addr_index
, addr_data
, index
, reg_val
);
590 void reg_sequence_start_gather(const struct dc_context
*ctx
)
592 /* if reg sequence is supported and enabled, set flag to
593 * indicate we want to have REG_SET, REG_UPDATE macro build
594 * reg sequence command buffer rather than MMIO directly.
597 if (ctx
->dmub_srv
&& ctx
->dc
->debug
.dmub_offload_enabled
) {
598 struct dc_reg_helper_state
*offload
=
599 &ctx
->dmub_srv
->reg_helper_offload
;
601 /* caller sequence mismatch. need to debug caller. offload will not work!!! */
602 ASSERT(!offload
->gather_in_progress
);
604 offload
->gather_in_progress
= true;
608 void reg_sequence_start_execute(const struct dc_context
*ctx
)
610 struct dc_reg_helper_state
*offload
;
615 offload
= &ctx
->dmub_srv
->reg_helper_offload
;
617 if (offload
&& offload
->gather_in_progress
) {
618 offload
->gather_in_progress
= false;
619 offload
->should_burst_write
= false;
620 switch (offload
->cmd_data
.cmd_common
.header
.type
) {
621 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE
:
622 submit_dmub_read_modify_write(offload
, ctx
);
624 case DMUB_CMD__REG_REG_WAIT
:
625 submit_dmub_reg_wait(offload
, ctx
);
627 case DMUB_CMD__REG_SEQ_BURST_WRITE
:
628 submit_dmub_burst_write(offload
, ctx
);
634 dc_dmub_srv_cmd_execute(ctx
->dmub_srv
);
638 void reg_sequence_wait_done(const struct dc_context
*ctx
)
640 /* callback to DM to poll for last submission done*/
641 struct dc_reg_helper_state
*offload
;
646 offload
= &ctx
->dmub_srv
->reg_helper_offload
;
649 ctx
->dc
->debug
.dmub_offload_enabled
&&
650 !ctx
->dc
->debug
.dmcub_emulation
) {
651 dc_dmub_srv_wait_idle(ctx
->dmub_srv
);