2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/option.h>
29 #include <nvif/class.h>
30 #include <nvif/if0002.h>
31 #include <nvif/if0003.h>
32 #include <nvif/ioctl.h>
33 #include <nvif/unpack.h>
36 nvkm_pm_count_perfdom(struct nvkm_pm
*pm
)
38 struct nvkm_perfdom
*dom
;
41 list_for_each_entry(dom
, &pm
->domains
, head
)
47 nvkm_perfdom_count_perfsig(struct nvkm_perfdom
*dom
)
53 for (i
= 0; i
< dom
->signal_nr
; i
++) {
54 if (dom
->signal
[i
].name
)
61 static struct nvkm_perfdom
*
62 nvkm_perfdom_find(struct nvkm_pm
*pm
, int di
)
64 struct nvkm_perfdom
*dom
;
67 list_for_each_entry(dom
, &pm
->domains
, head
) {
74 static struct nvkm_perfsig
*
75 nvkm_perfsig_find(struct nvkm_pm
*pm
, u8 di
, u8 si
, struct nvkm_perfdom
**pdom
)
77 struct nvkm_perfdom
*dom
= *pdom
;
80 dom
= nvkm_perfdom_find(pm
, di
);
86 if (!dom
->signal
[si
].name
)
88 return &dom
->signal
[si
];
92 nvkm_perfsig_count_perfsrc(struct nvkm_perfsig
*sig
)
96 for (i
= 0; i
< ARRAY_SIZE(sig
->source
); i
++) {
103 static struct nvkm_perfsrc
*
104 nvkm_perfsrc_find(struct nvkm_pm
*pm
, struct nvkm_perfsig
*sig
, int si
)
106 struct nvkm_perfsrc
*src
;
108 int tmp
= 1; /* Sources ID start from 1 */
111 for (i
= 0; i
< ARRAY_SIZE(sig
->source
) && sig
->source
[i
]; i
++) {
112 if (sig
->source
[i
] == si
) {
119 list_for_each_entry(src
, &pm
->sources
, head
) {
129 nvkm_perfsrc_enable(struct nvkm_pm
*pm
, struct nvkm_perfctr
*ctr
)
131 struct nvkm_subdev
*subdev
= &pm
->engine
.subdev
;
132 struct nvkm_device
*device
= subdev
->device
;
133 struct nvkm_perfdom
*dom
= NULL
;
134 struct nvkm_perfsig
*sig
;
135 struct nvkm_perfsrc
*src
;
139 for (i
= 0; i
< 4; i
++) {
140 for (j
= 0; j
< 8 && ctr
->source
[i
][j
]; j
++) {
141 sig
= nvkm_perfsig_find(pm
, ctr
->domain
,
142 ctr
->signal
[i
], &dom
);
146 src
= nvkm_perfsrc_find(pm
, sig
, ctr
->source
[i
][j
]);
150 /* set enable bit if needed */
151 mask
= value
= 0x00000000;
153 mask
= value
= 0x80000000;
154 mask
|= (src
->mask
<< src
->shift
);
155 value
|= ((ctr
->source
[i
][j
] >> 32) << src
->shift
);
157 /* enable the source */
158 nvkm_mask(device
, src
->addr
, mask
, value
);
160 "enabled source %08x %08x %08x\n",
161 src
->addr
, mask
, value
);
168 nvkm_perfsrc_disable(struct nvkm_pm
*pm
, struct nvkm_perfctr
*ctr
)
170 struct nvkm_subdev
*subdev
= &pm
->engine
.subdev
;
171 struct nvkm_device
*device
= subdev
->device
;
172 struct nvkm_perfdom
*dom
= NULL
;
173 struct nvkm_perfsig
*sig
;
174 struct nvkm_perfsrc
*src
;
178 for (i
= 0; i
< 4; i
++) {
179 for (j
= 0; j
< 8 && ctr
->source
[i
][j
]; j
++) {
180 sig
= nvkm_perfsig_find(pm
, ctr
->domain
,
181 ctr
->signal
[i
], &dom
);
185 src
= nvkm_perfsrc_find(pm
, sig
, ctr
->source
[i
][j
]);
189 /* unset enable bit if needed */
193 mask
|= (src
->mask
<< src
->shift
);
195 /* disable the source */
196 nvkm_mask(device
, src
->addr
, mask
, 0);
197 nvkm_debug(subdev
, "disabled source %08x %08x\n",
204 /*******************************************************************************
205 * Perfdom object classes
206 ******************************************************************************/
208 nvkm_perfdom_init(struct nvkm_perfdom
*dom
, void *data
, u32 size
)
211 struct nvif_perfdom_init none
;
213 struct nvkm_object
*object
= &dom
->object
;
214 struct nvkm_pm
*pm
= dom
->perfmon
->pm
;
215 int ret
= -ENOSYS
, i
;
217 nvif_ioctl(object
, "perfdom init size %d\n", size
);
218 if (!(ret
= nvif_unvers(ret
, &data
, &size
, args
->none
))) {
219 nvif_ioctl(object
, "perfdom init\n");
223 for (i
= 0; i
< 4; i
++) {
225 dom
->func
->init(pm
, dom
, dom
->ctr
[i
]);
228 nvkm_perfsrc_enable(pm
, dom
->ctr
[i
]);
232 /* start next batch of counters for sampling */
233 dom
->func
->next(pm
, dom
);
238 nvkm_perfdom_sample(struct nvkm_perfdom
*dom
, void *data
, u32 size
)
241 struct nvif_perfdom_sample none
;
243 struct nvkm_object
*object
= &dom
->object
;
244 struct nvkm_pm
*pm
= dom
->perfmon
->pm
;
247 nvif_ioctl(object
, "perfdom sample size %d\n", size
);
248 if (!(ret
= nvif_unvers(ret
, &data
, &size
, args
->none
))) {
249 nvif_ioctl(object
, "perfdom sample\n");
254 /* sample previous batch of counters */
255 list_for_each_entry(dom
, &pm
->domains
, head
)
256 dom
->func
->next(pm
, dom
);
262 nvkm_perfdom_read(struct nvkm_perfdom
*dom
, void *data
, u32 size
)
265 struct nvif_perfdom_read_v0 v0
;
267 struct nvkm_object
*object
= &dom
->object
;
268 struct nvkm_pm
*pm
= dom
->perfmon
->pm
;
269 int ret
= -ENOSYS
, i
;
271 nvif_ioctl(object
, "perfdom read size %d\n", size
);
272 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
273 nvif_ioctl(object
, "perfdom read vers %d\n", args
->v0
.version
);
277 for (i
= 0; i
< 4; i
++) {
279 dom
->func
->read(pm
, dom
, dom
->ctr
[i
]);
285 for (i
= 0; i
< 4; i
++)
287 args
->v0
.ctr
[i
] = dom
->ctr
[i
]->ctr
;
288 args
->v0
.clk
= dom
->clk
;
293 nvkm_perfdom_mthd(struct nvkm_object
*object
, u32 mthd
, void *data
, u32 size
)
295 struct nvkm_perfdom
*dom
= nvkm_perfdom(object
);
297 case NVIF_PERFDOM_V0_INIT
:
298 return nvkm_perfdom_init(dom
, data
, size
);
299 case NVIF_PERFDOM_V0_SAMPLE
:
300 return nvkm_perfdom_sample(dom
, data
, size
);
301 case NVIF_PERFDOM_V0_READ
:
302 return nvkm_perfdom_read(dom
, data
, size
);
310 nvkm_perfdom_dtor(struct nvkm_object
*object
)
312 struct nvkm_perfdom
*dom
= nvkm_perfdom(object
);
313 struct nvkm_pm
*pm
= dom
->perfmon
->pm
;
316 for (i
= 0; i
< 4; i
++) {
317 struct nvkm_perfctr
*ctr
= dom
->ctr
[i
];
319 nvkm_perfsrc_disable(pm
, ctr
);
321 list_del(&ctr
->head
);
330 nvkm_perfctr_new(struct nvkm_perfdom
*dom
, int slot
, u8 domain
,
331 struct nvkm_perfsig
*signal
[4], u64 source
[4][8],
332 u16 logic_op
, struct nvkm_perfctr
**pctr
)
334 struct nvkm_perfctr
*ctr
;
340 ctr
= *pctr
= kzalloc(sizeof(*ctr
), GFP_KERNEL
);
344 ctr
->domain
= domain
;
345 ctr
->logic_op
= logic_op
;
347 for (i
= 0; i
< 4; i
++) {
349 ctr
->signal
[i
] = signal
[i
] - dom
->signal
;
350 for (j
= 0; j
< 8; j
++)
351 ctr
->source
[i
][j
] = source
[i
][j
];
354 list_add_tail(&ctr
->head
, &dom
->list
);
359 static const struct nvkm_object_func
361 .dtor
= nvkm_perfdom_dtor
,
362 .mthd
= nvkm_perfdom_mthd
,
366 nvkm_perfdom_new_(struct nvkm_perfmon
*perfmon
,
367 const struct nvkm_oclass
*oclass
, void *data
, u32 size
,
368 struct nvkm_object
**pobject
)
371 struct nvif_perfdom_v0 v0
;
373 struct nvkm_pm
*pm
= perfmon
->pm
;
374 struct nvkm_object
*parent
= oclass
->parent
;
375 struct nvkm_perfdom
*sdom
= NULL
;
376 struct nvkm_perfctr
*ctr
[4] = {};
377 struct nvkm_perfdom
*dom
;
381 nvif_ioctl(parent
, "create perfdom size %d\n", size
);
382 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
383 nvif_ioctl(parent
, "create perfdom vers %d dom %d mode %02x\n",
384 args
->v0
.version
, args
->v0
.domain
, args
->v0
.mode
);
388 for (c
= 0; c
< ARRAY_SIZE(args
->v0
.ctr
); c
++) {
389 struct nvkm_perfsig
*sig
[4] = {};
392 for (s
= 0; s
< ARRAY_SIZE(args
->v0
.ctr
[c
].signal
); s
++) {
393 sig
[s
] = nvkm_perfsig_find(pm
, args
->v0
.domain
,
394 args
->v0
.ctr
[c
].signal
[s
],
396 if (args
->v0
.ctr
[c
].signal
[s
] && !sig
[s
])
399 for (m
= 0; m
< 8; m
++) {
400 src
[s
][m
] = args
->v0
.ctr
[c
].source
[s
][m
];
401 if (src
[s
][m
] && !nvkm_perfsrc_find(pm
, sig
[s
],
407 ret
= nvkm_perfctr_new(sdom
, c
, args
->v0
.domain
, sig
, src
,
408 args
->v0
.ctr
[c
].logic_op
, &ctr
[c
]);
416 if (!(dom
= kzalloc(sizeof(*dom
), GFP_KERNEL
)))
418 nvkm_object_ctor(&nvkm_perfdom
, oclass
, &dom
->object
);
419 dom
->perfmon
= perfmon
;
420 *pobject
= &dom
->object
;
422 dom
->func
= sdom
->func
;
423 dom
->addr
= sdom
->addr
;
424 dom
->mode
= args
->v0
.mode
;
425 for (c
= 0; c
< ARRAY_SIZE(ctr
); c
++)
426 dom
->ctr
[c
] = ctr
[c
];
430 /*******************************************************************************
431 * Perfmon object classes
432 ******************************************************************************/
434 nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon
*perfmon
,
435 void *data
, u32 size
)
438 struct nvif_perfmon_query_domain_v0 v0
;
440 struct nvkm_object
*object
= &perfmon
->object
;
441 struct nvkm_pm
*pm
= perfmon
->pm
;
442 struct nvkm_perfdom
*dom
;
444 int di
, ret
= -ENOSYS
;
446 nvif_ioctl(object
, "perfmon query domain size %d\n", size
);
447 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
448 nvif_ioctl(object
, "perfmon domain vers %d iter %02x\n",
449 args
->v0
.version
, args
->v0
.iter
);
450 di
= (args
->v0
.iter
& 0xff) - 1;
454 domain_nr
= nvkm_pm_count_perfdom(pm
);
455 if (di
>= (int)domain_nr
)
459 dom
= nvkm_perfdom_find(pm
, di
);
464 args
->v0
.signal_nr
= nvkm_perfdom_count_perfsig(dom
);
465 strncpy(args
->v0
.name
, dom
->name
, sizeof(args
->v0
.name
) - 1);
467 /* Currently only global counters (PCOUNTER) are implemented
468 * but this will be different for local counters (MP). */
469 args
->v0
.counter_nr
= 4;
472 if (++di
< domain_nr
) {
473 args
->v0
.iter
= ++di
;
477 args
->v0
.iter
= 0xff;
482 nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon
*perfmon
,
483 void *data
, u32 size
)
486 struct nvif_perfmon_query_signal_v0 v0
;
488 struct nvkm_object
*object
= &perfmon
->object
;
489 struct nvkm_pm
*pm
= perfmon
->pm
;
490 struct nvkm_device
*device
= pm
->engine
.subdev
.device
;
491 struct nvkm_perfdom
*dom
;
492 struct nvkm_perfsig
*sig
;
493 const bool all
= nvkm_boolopt(device
->cfgopt
, "NvPmShowAll", false);
494 const bool raw
= nvkm_boolopt(device
->cfgopt
, "NvPmUnnamed", all
);
495 int ret
= -ENOSYS
, si
;
497 nvif_ioctl(object
, "perfmon query signal size %d\n", size
);
498 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
500 "perfmon query signal vers %d dom %d iter %04x\n",
501 args
->v0
.version
, args
->v0
.domain
, args
->v0
.iter
);
502 si
= (args
->v0
.iter
& 0xffff) - 1;
506 dom
= nvkm_perfdom_find(pm
, args
->v0
.domain
);
507 if (dom
== NULL
|| si
>= (int)dom
->signal_nr
)
511 sig
= &dom
->signal
[si
];
512 if (raw
|| !sig
->name
) {
513 snprintf(args
->v0
.name
, sizeof(args
->v0
.name
),
514 "/%s/%02x", dom
->name
, si
);
516 strncpy(args
->v0
.name
, sig
->name
,
517 sizeof(args
->v0
.name
) - 1);
520 args
->v0
.signal
= si
;
521 args
->v0
.source_nr
= nvkm_perfsig_count_perfsrc(sig
);
524 while (++si
< dom
->signal_nr
) {
525 if (all
|| dom
->signal
[si
].name
) {
526 args
->v0
.iter
= ++si
;
531 args
->v0
.iter
= 0xffff;
536 nvkm_perfmon_mthd_query_source(struct nvkm_perfmon
*perfmon
,
537 void *data
, u32 size
)
540 struct nvif_perfmon_query_source_v0 v0
;
542 struct nvkm_object
*object
= &perfmon
->object
;
543 struct nvkm_pm
*pm
= perfmon
->pm
;
544 struct nvkm_perfdom
*dom
= NULL
;
545 struct nvkm_perfsig
*sig
;
546 struct nvkm_perfsrc
*src
;
548 int si
, ret
= -ENOSYS
;
550 nvif_ioctl(object
, "perfmon query source size %d\n", size
);
551 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))) {
553 "perfmon source vers %d dom %d sig %02x iter %02x\n",
554 args
->v0
.version
, args
->v0
.domain
, args
->v0
.signal
,
556 si
= (args
->v0
.iter
& 0xff) - 1;
560 sig
= nvkm_perfsig_find(pm
, args
->v0
.domain
, args
->v0
.signal
, &dom
);
564 source_nr
= nvkm_perfsig_count_perfsrc(sig
);
565 if (si
>= (int)source_nr
)
569 src
= nvkm_perfsrc_find(pm
, sig
, sig
->source
[si
]);
573 args
->v0
.source
= sig
->source
[si
];
574 args
->v0
.mask
= src
->mask
;
575 strncpy(args
->v0
.name
, src
->name
, sizeof(args
->v0
.name
) - 1);
578 if (++si
< source_nr
) {
579 args
->v0
.iter
= ++si
;
583 args
->v0
.iter
= 0xff;
588 nvkm_perfmon_mthd(struct nvkm_object
*object
, u32 mthd
, void *data
, u32 size
)
590 struct nvkm_perfmon
*perfmon
= nvkm_perfmon(object
);
592 case NVIF_PERFMON_V0_QUERY_DOMAIN
:
593 return nvkm_perfmon_mthd_query_domain(perfmon
, data
, size
);
594 case NVIF_PERFMON_V0_QUERY_SIGNAL
:
595 return nvkm_perfmon_mthd_query_signal(perfmon
, data
, size
);
596 case NVIF_PERFMON_V0_QUERY_SOURCE
:
597 return nvkm_perfmon_mthd_query_source(perfmon
, data
, size
);
605 nvkm_perfmon_child_new(const struct nvkm_oclass
*oclass
, void *data
, u32 size
,
606 struct nvkm_object
**pobject
)
608 struct nvkm_perfmon
*perfmon
= nvkm_perfmon(oclass
->parent
);
609 return nvkm_perfdom_new_(perfmon
, oclass
, data
, size
, pobject
);
613 nvkm_perfmon_child_get(struct nvkm_object
*object
, int index
,
614 struct nvkm_oclass
*oclass
)
617 oclass
->base
.oclass
= NVIF_CLASS_PERFDOM
;
618 oclass
->base
.minver
= 0;
619 oclass
->base
.maxver
= 0;
620 oclass
->ctor
= nvkm_perfmon_child_new
;
627 nvkm_perfmon_dtor(struct nvkm_object
*object
)
629 struct nvkm_perfmon
*perfmon
= nvkm_perfmon(object
);
630 struct nvkm_pm
*pm
= perfmon
->pm
;
631 mutex_lock(&pm
->engine
.subdev
.mutex
);
632 if (pm
->perfmon
== &perfmon
->object
)
634 mutex_unlock(&pm
->engine
.subdev
.mutex
);
638 static const struct nvkm_object_func
640 .dtor
= nvkm_perfmon_dtor
,
641 .mthd
= nvkm_perfmon_mthd
,
642 .sclass
= nvkm_perfmon_child_get
,
646 nvkm_perfmon_new(struct nvkm_pm
*pm
, const struct nvkm_oclass
*oclass
,
647 void *data
, u32 size
, struct nvkm_object
**pobject
)
649 struct nvkm_perfmon
*perfmon
;
651 if (!(perfmon
= kzalloc(sizeof(*perfmon
), GFP_KERNEL
)))
653 nvkm_object_ctor(&nvkm_perfmon
, oclass
, &perfmon
->object
);
655 *pobject
= &perfmon
->object
;
659 /*******************************************************************************
660 * PPM engine/subdev functions
661 ******************************************************************************/
664 nvkm_pm_oclass_new(struct nvkm_device
*device
, const struct nvkm_oclass
*oclass
,
665 void *data
, u32 size
, struct nvkm_object
**pobject
)
667 struct nvkm_pm
*pm
= nvkm_pm(oclass
->engine
);
670 ret
= nvkm_perfmon_new(pm
, oclass
, data
, size
, pobject
);
674 mutex_lock(&pm
->engine
.subdev
.mutex
);
675 if (pm
->perfmon
== NULL
)
676 pm
->perfmon
= *pobject
;
677 ret
= (pm
->perfmon
== *pobject
) ? 0 : -EBUSY
;
678 mutex_unlock(&pm
->engine
.subdev
.mutex
);
682 static const struct nvkm_device_oclass
684 .base
.oclass
= NVIF_CLASS_PERFMON
,
687 .ctor
= nvkm_pm_oclass_new
,
691 nvkm_pm_oclass_get(struct nvkm_oclass
*oclass
, int index
,
692 const struct nvkm_device_oclass
**class)
695 oclass
->base
= nvkm_pm_oclass
.base
;
696 *class = &nvkm_pm_oclass
;
703 nvkm_perfsrc_new(struct nvkm_pm
*pm
, struct nvkm_perfsig
*sig
,
704 const struct nvkm_specsrc
*spec
)
706 const struct nvkm_specsrc
*ssrc
;
707 const struct nvkm_specmux
*smux
;
708 struct nvkm_perfsrc
*src
;
712 /* No sources are defined for this signal. */
724 list_for_each_entry(src
, &pm
->sources
, head
) {
725 if (src
->addr
== ssrc
->addr
&&
726 src
->shift
== smux
->shift
) {
734 src
= kzalloc(sizeof(*src
), GFP_KERNEL
);
738 src
->addr
= ssrc
->addr
;
739 src
->mask
= smux
->mask
;
740 src
->shift
= smux
->shift
;
741 src
->enable
= smux
->enable
;
743 len
= strlen(ssrc
->name
) +
744 strlen(smux
->name
) + 2;
745 src
->name
= kzalloc(len
, GFP_KERNEL
);
750 snprintf(src
->name
, len
, "%s_%s", ssrc
->name
,
753 list_add_tail(&src
->head
, &pm
->sources
);
756 sig
->source
[source_nr
++] = source_id
+ 1;
766 nvkm_perfdom_new(struct nvkm_pm
*pm
, const char *name
, u32 mask
,
767 u32 base
, u32 size_unit
, u32 size_domain
,
768 const struct nvkm_specdom
*spec
)
770 const struct nvkm_specdom
*sdom
;
771 const struct nvkm_specsig
*ssig
;
772 struct nvkm_perfdom
*dom
;
775 for (i
= 0; i
== 0 || mask
; i
++) {
776 u32 addr
= base
+ (i
* size_unit
);
777 if (i
&& !(mask
& (1 << i
)))
781 while (sdom
->signal_nr
) {
782 dom
= kzalloc(struct_size(dom
, signal
, sdom
->signal_nr
),
788 snprintf(dom
->name
, sizeof(dom
->name
),
789 "%s/%02x/%02x", name
, i
,
792 snprintf(dom
->name
, sizeof(dom
->name
),
793 "%s/%02x", name
, (int)(sdom
- spec
));
796 list_add_tail(&dom
->head
, &pm
->domains
);
797 INIT_LIST_HEAD(&dom
->list
);
798 dom
->func
= sdom
->func
;
800 dom
->signal_nr
= sdom
->signal_nr
;
802 ssig
= (sdom
++)->signal
;
804 struct nvkm_perfsig
*sig
=
805 &dom
->signal
[ssig
->signal
];
806 sig
->name
= ssig
->name
;
807 ret
= nvkm_perfsrc_new(pm
, sig
, ssig
->source
);
823 nvkm_pm_fini(struct nvkm_engine
*engine
, bool suspend
)
825 struct nvkm_pm
*pm
= nvkm_pm(engine
);
832 nvkm_pm_dtor(struct nvkm_engine
*engine
)
834 struct nvkm_pm
*pm
= nvkm_pm(engine
);
835 struct nvkm_perfdom
*dom
, *next_dom
;
836 struct nvkm_perfsrc
*src
, *next_src
;
838 list_for_each_entry_safe(dom
, next_dom
, &pm
->domains
, head
) {
839 list_del(&dom
->head
);
843 list_for_each_entry_safe(src
, next_src
, &pm
->sources
, head
) {
844 list_del(&src
->head
);
852 static const struct nvkm_engine_func
854 .dtor
= nvkm_pm_dtor
,
855 .fini
= nvkm_pm_fini
,
856 .base
.sclass
= nvkm_pm_oclass_get
,
860 nvkm_pm_ctor(const struct nvkm_pm_func
*func
, struct nvkm_device
*device
,
861 int index
, struct nvkm_pm
*pm
)
864 INIT_LIST_HEAD(&pm
->domains
);
865 INIT_LIST_HEAD(&pm
->sources
);
866 return nvkm_engine_ctor(&nvkm_pm
, device
, index
, true, &pm
->engine
);