treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / df_v3_6.c
blobf51326598a8c05bed36776522e07eea3288846ad
1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu.h"
24 #include "df_v3_6.h"
26 #include "df/df_3_6_default.h"
27 #include "df/df_3_6_offset.h"
28 #include "df/df_3_6_sh_mask.h"
30 static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
31 16, 32, 0, 0, 0, 2, 4, 8};
33 /* init df format attrs */
34 AMDGPU_PMU_ATTR(event, "config:0-7");
35 AMDGPU_PMU_ATTR(instance, "config:8-15");
36 AMDGPU_PMU_ATTR(umask, "config:16-23");
38 /* df format attributes */
39 static struct attribute *df_v3_6_format_attrs[] = {
40 &pmu_attr_event.attr,
41 &pmu_attr_instance.attr,
42 &pmu_attr_umask.attr,
43 NULL
46 /* df format attribute group */
47 static struct attribute_group df_v3_6_format_attr_group = {
48 .name = "format",
49 .attrs = df_v3_6_format_attrs,
52 /* df event attrs */
53 AMDGPU_PMU_ATTR(cake0_pcsout_txdata,
54 "event=0x7,instance=0x46,umask=0x2");
55 AMDGPU_PMU_ATTR(cake1_pcsout_txdata,
56 "event=0x7,instance=0x47,umask=0x2");
57 AMDGPU_PMU_ATTR(cake0_pcsout_txmeta,
58 "event=0x7,instance=0x46,umask=0x4");
59 AMDGPU_PMU_ATTR(cake1_pcsout_txmeta,
60 "event=0x7,instance=0x47,umask=0x4");
61 AMDGPU_PMU_ATTR(cake0_ftiinstat_reqalloc,
62 "event=0xb,instance=0x46,umask=0x4");
63 AMDGPU_PMU_ATTR(cake1_ftiinstat_reqalloc,
64 "event=0xb,instance=0x47,umask=0x4");
65 AMDGPU_PMU_ATTR(cake0_ftiinstat_rspalloc,
66 "event=0xb,instance=0x46,umask=0x8");
67 AMDGPU_PMU_ATTR(cake1_ftiinstat_rspalloc,
68 "event=0xb,instance=0x47,umask=0x8");
70 /* df event attributes */
71 static struct attribute *df_v3_6_event_attrs[] = {
72 &pmu_attr_cake0_pcsout_txdata.attr,
73 &pmu_attr_cake1_pcsout_txdata.attr,
74 &pmu_attr_cake0_pcsout_txmeta.attr,
75 &pmu_attr_cake1_pcsout_txmeta.attr,
76 &pmu_attr_cake0_ftiinstat_reqalloc.attr,
77 &pmu_attr_cake1_ftiinstat_reqalloc.attr,
78 &pmu_attr_cake0_ftiinstat_rspalloc.attr,
79 &pmu_attr_cake1_ftiinstat_rspalloc.attr,
80 NULL
83 /* df event attribute group */
84 static struct attribute_group df_v3_6_event_attr_group = {
85 .name = "events",
86 .attrs = df_v3_6_event_attrs
89 /* df event attr groups */
90 const struct attribute_group *df_v3_6_attr_groups[] = {
91 &df_v3_6_format_attr_group,
92 &df_v3_6_event_attr_group,
93 NULL
96 static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
97 uint32_t ficaa_val)
99 unsigned long flags, address, data;
100 uint32_t ficadl_val, ficadh_val;
102 address = adev->nbio.funcs->get_pcie_index_offset(adev);
103 data = adev->nbio.funcs->get_pcie_data_offset(adev);
105 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
106 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
107 WREG32(data, ficaa_val);
109 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3);
110 ficadl_val = RREG32(data);
112 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
113 ficadh_val = RREG32(data);
115 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
117 return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val);
120 static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
121 uint32_t ficadl_val, uint32_t ficadh_val)
123 unsigned long flags, address, data;
125 address = adev->nbio.funcs->get_pcie_index_offset(adev);
126 data = adev->nbio.funcs->get_pcie_data_offset(adev);
128 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
129 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
130 WREG32(data, ficaa_val);
132 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3);
133 WREG32(data, ficadl_val);
135 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3);
136 WREG32(data, ficadh_val);
138 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
142 * df_v3_6_perfmon_rreg - read perfmon lo and hi
144 * required to be atomic. no mmio method provided so subsequent reads for lo
145 * and hi require to preserve df finite state machine
147 static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
148 uint32_t lo_addr, uint32_t *lo_val,
149 uint32_t hi_addr, uint32_t *hi_val)
151 unsigned long flags, address, data;
153 address = adev->nbio.funcs->get_pcie_index_offset(adev);
154 data = adev->nbio.funcs->get_pcie_data_offset(adev);
156 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
157 WREG32(address, lo_addr);
158 *lo_val = RREG32(data);
159 WREG32(address, hi_addr);
160 *hi_val = RREG32(data);
161 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
165 * df_v3_6_perfmon_wreg - write to perfmon lo and hi
167 * required to be atomic. no mmio method provided so subsequent reads after
168 * data writes cannot occur to preserve data fabrics finite state machine.
170 static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
171 uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
173 unsigned long flags, address, data;
175 address = adev->nbio.funcs->get_pcie_index_offset(adev);
176 data = adev->nbio.funcs->get_pcie_data_offset(adev);
178 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
179 WREG32(address, lo_addr);
180 WREG32(data, lo_val);
181 WREG32(address, hi_addr);
182 WREG32(data, hi_val);
183 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
186 /* same as perfmon_wreg but return status on write value check */
187 static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev,
188 uint32_t lo_addr, uint32_t lo_val,
189 uint32_t hi_addr, uint32_t hi_val)
191 unsigned long flags, address, data;
192 uint32_t lo_val_rb, hi_val_rb;
194 address = adev->nbio.funcs->get_pcie_index_offset(adev);
195 data = adev->nbio.funcs->get_pcie_data_offset(adev);
197 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
198 WREG32(address, lo_addr);
199 WREG32(data, lo_val);
200 WREG32(address, hi_addr);
201 WREG32(data, hi_val);
203 WREG32(address, lo_addr);
204 lo_val_rb = RREG32(data);
205 WREG32(address, hi_addr);
206 hi_val_rb = RREG32(data);
207 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
209 if (!(lo_val == lo_val_rb && hi_val == hi_val_rb))
210 return -EBUSY;
212 return 0;
217 * retry arming counters every 100 usecs within 1 millisecond interval.
218 * if retry fails after time out, return error.
220 #define ARM_RETRY_USEC_TIMEOUT 1000
221 #define ARM_RETRY_USEC_INTERVAL 100
222 static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
223 uint32_t lo_addr, uint32_t lo_val,
224 uint32_t hi_addr, uint32_t hi_val)
226 int countdown = ARM_RETRY_USEC_TIMEOUT;
228 while (countdown) {
230 if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val,
231 hi_addr, hi_val))
232 break;
234 countdown -= ARM_RETRY_USEC_INTERVAL;
235 udelay(ARM_RETRY_USEC_INTERVAL);
238 return countdown > 0 ? 0 : -ETIME;
241 /* get the number of df counters available */
242 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
243 struct device_attribute *attr,
244 char *buf)
246 struct amdgpu_device *adev;
247 struct drm_device *ddev;
248 int i, count;
250 ddev = dev_get_drvdata(dev);
251 adev = ddev->dev_private;
252 count = 0;
254 for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
255 if (adev->df_perfmon_config_assign_mask[i] == 0)
256 count++;
259 return snprintf(buf, PAGE_SIZE, "%i\n", count);
262 /* device attr for available perfmon counters */
263 static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL);
265 static void df_v3_6_query_hashes(struct amdgpu_device *adev)
267 u32 tmp;
269 adev->df.hash_status.hash_64k = false;
270 adev->df.hash_status.hash_2m = false;
271 adev->df.hash_status.hash_1g = false;
273 if (adev->asic_type != CHIP_ARCTURUS)
274 return;
276 /* encoding for hash-enabled on Arcturus */
277 if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) {
278 tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl);
279 adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp,
280 DF_CS_UMC_AON0_DfGlobalCtrl,
281 GlbHashIntlvCtl64K);
282 adev->df.hash_status.hash_2m = REG_GET_FIELD(tmp,
283 DF_CS_UMC_AON0_DfGlobalCtrl,
284 GlbHashIntlvCtl2M);
285 adev->df.hash_status.hash_1g = REG_GET_FIELD(tmp,
286 DF_CS_UMC_AON0_DfGlobalCtrl,
287 GlbHashIntlvCtl1G);
291 /* init perfmons */
292 static void df_v3_6_sw_init(struct amdgpu_device *adev)
294 int i, ret;
296 ret = device_create_file(adev->dev, &dev_attr_df_cntr_avail);
297 if (ret)
298 DRM_ERROR("failed to create file for available df counters\n");
300 for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++)
301 adev->df_perfmon_config_assign_mask[i] = 0;
303 df_v3_6_query_hashes(adev);
306 static void df_v3_6_sw_fini(struct amdgpu_device *adev)
309 device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
313 static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
314 bool enable)
316 u32 tmp;
318 if (enable) {
319 tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
320 tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
321 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
322 } else
323 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
324 mmFabricConfigAccessControl_DEFAULT);
327 static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
329 u32 tmp;
331 tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
332 tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
333 tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
335 return tmp;
338 static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
340 int fb_channel_number;
342 fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
343 if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
344 fb_channel_number = 0;
346 return df_v3_6_channel_number[fb_channel_number];
349 static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
350 bool enable)
352 u32 tmp;
354 if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
355 /* Put DF on broadcast mode */
356 adev->df.funcs->enable_broadcast_mode(adev, true);
358 if (enable) {
359 tmp = RREG32_SOC15(DF, 0,
360 mmDF_PIE_AON0_DfGlobalClkGater);
361 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
362 tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
363 WREG32_SOC15(DF, 0,
364 mmDF_PIE_AON0_DfGlobalClkGater, tmp);
365 } else {
366 tmp = RREG32_SOC15(DF, 0,
367 mmDF_PIE_AON0_DfGlobalClkGater);
368 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
369 tmp |= DF_V3_6_MGCG_DISABLE;
370 WREG32_SOC15(DF, 0,
371 mmDF_PIE_AON0_DfGlobalClkGater, tmp);
374 /* Exit broadcast mode */
375 adev->df.funcs->enable_broadcast_mode(adev, false);
379 static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
380 u32 *flags)
382 u32 tmp;
384 /* AMD_CG_SUPPORT_DF_MGCG */
385 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
386 if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
387 *flags |= AMD_CG_SUPPORT_DF_MGCG;
390 /* get assigned df perfmon ctr as int */
391 static int df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev,
392 uint64_t config)
394 int i;
396 for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
397 if ((config & 0x0FFFFFFUL) ==
398 adev->df_perfmon_config_assign_mask[i])
399 return i;
402 return -EINVAL;
405 /* get address based on counter assignment */
406 static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev,
407 uint64_t config,
408 int is_ctrl,
409 uint32_t *lo_base_addr,
410 uint32_t *hi_base_addr)
412 int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
414 if (target_cntr < 0)
415 return;
417 switch (target_cntr) {
419 case 0:
420 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
421 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
422 break;
423 case 1:
424 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
425 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
426 break;
427 case 2:
428 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
429 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
430 break;
431 case 3:
432 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
433 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
434 break;
440 /* get read counter address */
441 static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev,
442 uint64_t config,
443 uint32_t *lo_base_addr,
444 uint32_t *hi_base_addr)
446 df_v3_6_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr);
449 /* get control counter settings i.e. address and values to set */
450 static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev,
451 uint64_t config,
452 uint32_t *lo_base_addr,
453 uint32_t *hi_base_addr,
454 uint32_t *lo_val,
455 uint32_t *hi_val)
458 uint32_t eventsel, instance, unitmask;
459 uint32_t instance_10, instance_5432, instance_76;
461 df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr);
463 if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) {
464 DRM_ERROR("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x",
465 *lo_base_addr, *hi_base_addr);
466 return -ENXIO;
469 eventsel = DF_V3_6_GET_EVENT(config) & 0x3f;
470 unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf;
471 instance = DF_V3_6_GET_INSTANCE(config);
473 instance_10 = instance & 0x3;
474 instance_5432 = (instance >> 2) & 0xf;
475 instance_76 = (instance >> 6) & 0x3;
477 *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22);
478 *hi_val = (instance_76 << 29) | instance_5432;
480 DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
481 config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val);
483 return 0;
486 /* add df performance counters for read */
487 static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
488 uint64_t config)
490 int i, target_cntr;
492 target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
494 if (target_cntr >= 0)
495 return 0;
497 for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) {
498 if (adev->df_perfmon_config_assign_mask[i] == 0U) {
499 adev->df_perfmon_config_assign_mask[i] =
500 config & 0x0FFFFFFUL;
501 return 0;
505 return -ENOSPC;
508 #define DEFERRED_ARM_MASK (1 << 31)
509 static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
510 uint64_t config, bool is_deferred)
512 int target_cntr;
514 target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
516 if (target_cntr < 0)
517 return -EINVAL;
519 if (is_deferred)
520 adev->df_perfmon_config_assign_mask[target_cntr] |=
521 DEFERRED_ARM_MASK;
522 else
523 adev->df_perfmon_config_assign_mask[target_cntr] &=
524 ~DEFERRED_ARM_MASK;
526 return 0;
529 static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
530 uint64_t config)
532 int target_cntr;
534 target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
537 * we never get target_cntr < 0 since this funciton is only called in
538 * pmc_count for now but we should check anyways.
540 return (target_cntr >= 0 &&
541 (adev->df_perfmon_config_assign_mask[target_cntr]
542 & DEFERRED_ARM_MASK));
546 /* release performance counter */
547 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev,
548 uint64_t config)
550 int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
552 if (target_cntr >= 0)
553 adev->df_perfmon_config_assign_mask[target_cntr] = 0ULL;
557 static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev,
558 uint64_t config)
560 uint32_t lo_base_addr, hi_base_addr;
562 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
563 &hi_base_addr);
565 if ((lo_base_addr == 0) || (hi_base_addr == 0))
566 return;
568 df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0);
571 static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config,
572 int is_enable)
574 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
575 int err = 0, ret = 0;
577 switch (adev->asic_type) {
578 case CHIP_VEGA20:
579 if (is_enable)
580 return df_v3_6_pmc_add_cntr(adev, config);
582 df_v3_6_reset_perfmon_cntr(adev, config);
584 ret = df_v3_6_pmc_get_ctrl_settings(adev,
585 config,
586 &lo_base_addr,
587 &hi_base_addr,
588 &lo_val,
589 &hi_val);
591 if (ret)
592 return ret;
594 err = df_v3_6_perfmon_arm_with_retry(adev,
595 lo_base_addr,
596 lo_val,
597 hi_base_addr,
598 hi_val);
600 if (err)
601 ret = df_v3_6_pmc_set_deferred(adev, config, true);
603 break;
604 default:
605 break;
608 return ret;
611 static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
612 int is_disable)
614 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val;
615 int ret = 0;
617 switch (adev->asic_type) {
618 case CHIP_VEGA20:
619 ret = df_v3_6_pmc_get_ctrl_settings(adev,
620 config,
621 &lo_base_addr,
622 &hi_base_addr,
623 &lo_val,
624 &hi_val);
626 if (ret)
627 return ret;
629 df_v3_6_reset_perfmon_cntr(adev, config);
631 if (is_disable)
632 df_v3_6_pmc_release_cntr(adev, config);
634 break;
635 default:
636 break;
639 return ret;
642 static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
643 uint64_t config,
644 uint64_t *count)
646 uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0;
647 *count = 0;
649 switch (adev->asic_type) {
650 case CHIP_VEGA20:
651 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr,
652 &hi_base_addr);
654 if ((lo_base_addr == 0) || (hi_base_addr == 0))
655 return;
657 /* rearm the counter or throw away count value on failure */
658 if (df_v3_6_pmc_is_deferred(adev, config)) {
659 int rearm_err = df_v3_6_perfmon_arm_with_status(adev,
660 lo_base_addr, lo_val,
661 hi_base_addr, hi_val);
663 if (rearm_err)
664 return;
666 df_v3_6_pmc_set_deferred(adev, config, false);
669 df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val,
670 hi_base_addr, &hi_val);
672 *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL);
674 if (*count >= DF_V3_6_PERFMON_OVERFLOW)
675 *count = 0;
677 DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x",
678 config, lo_base_addr, hi_base_addr, lo_val, hi_val);
680 break;
681 default:
682 break;
686 const struct amdgpu_df_funcs df_v3_6_funcs = {
687 .sw_init = df_v3_6_sw_init,
688 .sw_fini = df_v3_6_sw_fini,
689 .enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
690 .get_fb_channel_number = df_v3_6_get_fb_channel_number,
691 .get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
692 .update_medium_grain_clock_gating =
693 df_v3_6_update_medium_grain_clock_gating,
694 .get_clockgating_state = df_v3_6_get_clockgating_state,
695 .pmc_start = df_v3_6_pmc_start,
696 .pmc_stop = df_v3_6_pmc_stop,
697 .pmc_get_count = df_v3_6_pmc_get_count,
698 .get_fica = df_v3_6_get_fica,
699 .set_fica = df_v3_6_set_fica