Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / gpu / drm / nouveau / nvkm / engine / pm / base.c
blob53859b6254d6243b815478ab965a36788ce803e8
1 /*
2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
24 #include "priv.h"
26 #include <core/client.h>
27 #include <core/option.h>
29 #include <nvif/class.h>
30 #include <nvif/if0002.h>
31 #include <nvif/if0003.h>
32 #include <nvif/ioctl.h>
33 #include <nvif/unpack.h>
35 static u8
36 nvkm_pm_count_perfdom(struct nvkm_pm *pm)
38 struct nvkm_perfdom *dom;
39 u8 domain_nr = 0;
41 list_for_each_entry(dom, &pm->domains, head)
42 domain_nr++;
43 return domain_nr;
46 static u16
47 nvkm_perfdom_count_perfsig(struct nvkm_perfdom *dom)
49 u16 signal_nr = 0;
50 int i;
52 if (dom) {
53 for (i = 0; i < dom->signal_nr; i++) {
54 if (dom->signal[i].name)
55 signal_nr++;
58 return signal_nr;
61 static struct nvkm_perfdom *
62 nvkm_perfdom_find(struct nvkm_pm *pm, int di)
64 struct nvkm_perfdom *dom;
65 int tmp = 0;
67 list_for_each_entry(dom, &pm->domains, head) {
68 if (tmp++ == di)
69 return dom;
71 return NULL;
74 static struct nvkm_perfsig *
75 nvkm_perfsig_find(struct nvkm_pm *pm, u8 di, u8 si, struct nvkm_perfdom **pdom)
77 struct nvkm_perfdom *dom = *pdom;
79 if (dom == NULL) {
80 dom = nvkm_perfdom_find(pm, di);
81 if (dom == NULL)
82 return NULL;
83 *pdom = dom;
86 if (!dom->signal[si].name)
87 return NULL;
88 return &dom->signal[si];
91 static u8
92 nvkm_perfsig_count_perfsrc(struct nvkm_perfsig *sig)
94 u8 source_nr = 0, i;
96 for (i = 0; i < ARRAY_SIZE(sig->source); i++) {
97 if (sig->source[i])
98 source_nr++;
100 return source_nr;
103 static struct nvkm_perfsrc *
104 nvkm_perfsrc_find(struct nvkm_pm *pm, struct nvkm_perfsig *sig, int si)
106 struct nvkm_perfsrc *src;
107 bool found = false;
108 int tmp = 1; /* Sources ID start from 1 */
109 u8 i;
111 for (i = 0; i < ARRAY_SIZE(sig->source) && sig->source[i]; i++) {
112 if (sig->source[i] == si) {
113 found = true;
114 break;
118 if (found) {
119 list_for_each_entry(src, &pm->sources, head) {
120 if (tmp++ == si)
121 return src;
125 return NULL;
128 static int
129 nvkm_perfsrc_enable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
131 struct nvkm_subdev *subdev = &pm->engine.subdev;
132 struct nvkm_device *device = subdev->device;
133 struct nvkm_perfdom *dom = NULL;
134 struct nvkm_perfsig *sig;
135 struct nvkm_perfsrc *src;
136 u32 mask, value;
137 int i, j;
139 for (i = 0; i < 4; i++) {
140 for (j = 0; j < 8 && ctr->source[i][j]; j++) {
141 sig = nvkm_perfsig_find(pm, ctr->domain,
142 ctr->signal[i], &dom);
143 if (!sig)
144 return -EINVAL;
146 src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
147 if (!src)
148 return -EINVAL;
150 /* set enable bit if needed */
151 mask = value = 0x00000000;
152 if (src->enable)
153 mask = value = 0x80000000;
154 mask |= (src->mask << src->shift);
155 value |= ((ctr->source[i][j] >> 32) << src->shift);
157 /* enable the source */
158 nvkm_mask(device, src->addr, mask, value);
159 nvkm_debug(subdev,
160 "enabled source %08x %08x %08x\n",
161 src->addr, mask, value);
164 return 0;
167 static int
168 nvkm_perfsrc_disable(struct nvkm_pm *pm, struct nvkm_perfctr *ctr)
170 struct nvkm_subdev *subdev = &pm->engine.subdev;
171 struct nvkm_device *device = subdev->device;
172 struct nvkm_perfdom *dom = NULL;
173 struct nvkm_perfsig *sig;
174 struct nvkm_perfsrc *src;
175 u32 mask;
176 int i, j;
178 for (i = 0; i < 4; i++) {
179 for (j = 0; j < 8 && ctr->source[i][j]; j++) {
180 sig = nvkm_perfsig_find(pm, ctr->domain,
181 ctr->signal[i], &dom);
182 if (!sig)
183 return -EINVAL;
185 src = nvkm_perfsrc_find(pm, sig, ctr->source[i][j]);
186 if (!src)
187 return -EINVAL;
189 /* unset enable bit if needed */
190 mask = 0x00000000;
191 if (src->enable)
192 mask = 0x80000000;
193 mask |= (src->mask << src->shift);
195 /* disable the source */
196 nvkm_mask(device, src->addr, mask, 0);
197 nvkm_debug(subdev, "disabled source %08x %08x\n",
198 src->addr, mask);
201 return 0;
204 /*******************************************************************************
205 * Perfdom object classes
206 ******************************************************************************/
207 static int
208 nvkm_perfdom_init(struct nvkm_perfdom *dom, void *data, u32 size)
210 union {
211 struct nvif_perfdom_init none;
212 } *args = data;
213 struct nvkm_object *object = &dom->object;
214 struct nvkm_pm *pm = dom->perfmon->pm;
215 int ret = -ENOSYS, i;
217 nvif_ioctl(object, "perfdom init size %d\n", size);
218 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
219 nvif_ioctl(object, "perfdom init\n");
220 } else
221 return ret;
223 for (i = 0; i < 4; i++) {
224 if (dom->ctr[i]) {
225 dom->func->init(pm, dom, dom->ctr[i]);
227 /* enable sources */
228 nvkm_perfsrc_enable(pm, dom->ctr[i]);
232 /* start next batch of counters for sampling */
233 dom->func->next(pm, dom);
234 return 0;
237 static int
238 nvkm_perfdom_sample(struct nvkm_perfdom *dom, void *data, u32 size)
240 union {
241 struct nvif_perfdom_sample none;
242 } *args = data;
243 struct nvkm_object *object = &dom->object;
244 struct nvkm_pm *pm = dom->perfmon->pm;
245 int ret = -ENOSYS;
247 nvif_ioctl(object, "perfdom sample size %d\n", size);
248 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
249 nvif_ioctl(object, "perfdom sample\n");
250 } else
251 return ret;
252 pm->sequence++;
254 /* sample previous batch of counters */
255 list_for_each_entry(dom, &pm->domains, head)
256 dom->func->next(pm, dom);
258 return 0;
261 static int
262 nvkm_perfdom_read(struct nvkm_perfdom *dom, void *data, u32 size)
264 union {
265 struct nvif_perfdom_read_v0 v0;
266 } *args = data;
267 struct nvkm_object *object = &dom->object;
268 struct nvkm_pm *pm = dom->perfmon->pm;
269 int ret = -ENOSYS, i;
271 nvif_ioctl(object, "perfdom read size %d\n", size);
272 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
273 nvif_ioctl(object, "perfdom read vers %d\n", args->v0.version);
274 } else
275 return ret;
277 for (i = 0; i < 4; i++) {
278 if (dom->ctr[i])
279 dom->func->read(pm, dom, dom->ctr[i]);
282 if (!dom->clk)
283 return -EAGAIN;
285 for (i = 0; i < 4; i++)
286 if (dom->ctr[i])
287 args->v0.ctr[i] = dom->ctr[i]->ctr;
288 args->v0.clk = dom->clk;
289 return 0;
292 static int
293 nvkm_perfdom_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
295 struct nvkm_perfdom *dom = nvkm_perfdom(object);
296 switch (mthd) {
297 case NVIF_PERFDOM_V0_INIT:
298 return nvkm_perfdom_init(dom, data, size);
299 case NVIF_PERFDOM_V0_SAMPLE:
300 return nvkm_perfdom_sample(dom, data, size);
301 case NVIF_PERFDOM_V0_READ:
302 return nvkm_perfdom_read(dom, data, size);
303 default:
304 break;
306 return -EINVAL;
309 static void *
310 nvkm_perfdom_dtor(struct nvkm_object *object)
312 struct nvkm_perfdom *dom = nvkm_perfdom(object);
313 struct nvkm_pm *pm = dom->perfmon->pm;
314 int i;
316 for (i = 0; i < 4; i++) {
317 struct nvkm_perfctr *ctr = dom->ctr[i];
318 if (ctr) {
319 nvkm_perfsrc_disable(pm, ctr);
320 if (ctr->head.next)
321 list_del(&ctr->head);
323 kfree(ctr);
326 return dom;
329 static int
330 nvkm_perfctr_new(struct nvkm_perfdom *dom, int slot, u8 domain,
331 struct nvkm_perfsig *signal[4], u64 source[4][8],
332 u16 logic_op, struct nvkm_perfctr **pctr)
334 struct nvkm_perfctr *ctr;
335 int i, j;
337 if (!dom)
338 return -EINVAL;
340 ctr = *pctr = kzalloc(sizeof(*ctr), GFP_KERNEL);
341 if (!ctr)
342 return -ENOMEM;
344 ctr->domain = domain;
345 ctr->logic_op = logic_op;
346 ctr->slot = slot;
347 for (i = 0; i < 4; i++) {
348 if (signal[i]) {
349 ctr->signal[i] = signal[i] - dom->signal;
350 for (j = 0; j < 8; j++)
351 ctr->source[i][j] = source[i][j];
354 list_add_tail(&ctr->head, &dom->list);
356 return 0;
359 static const struct nvkm_object_func
360 nvkm_perfdom = {
361 .dtor = nvkm_perfdom_dtor,
362 .mthd = nvkm_perfdom_mthd,
365 static int
366 nvkm_perfdom_new_(struct nvkm_perfmon *perfmon,
367 const struct nvkm_oclass *oclass, void *data, u32 size,
368 struct nvkm_object **pobject)
370 union {
371 struct nvif_perfdom_v0 v0;
372 } *args = data;
373 struct nvkm_pm *pm = perfmon->pm;
374 struct nvkm_object *parent = oclass->parent;
375 struct nvkm_perfdom *sdom = NULL;
376 struct nvkm_perfctr *ctr[4] = {};
377 struct nvkm_perfdom *dom;
378 int c, s, m;
379 int ret = -ENOSYS;
381 nvif_ioctl(parent, "create perfdom size %d\n", size);
382 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
383 nvif_ioctl(parent, "create perfdom vers %d dom %d mode %02x\n",
384 args->v0.version, args->v0.domain, args->v0.mode);
385 } else
386 return ret;
388 for (c = 0; c < ARRAY_SIZE(args->v0.ctr); c++) {
389 struct nvkm_perfsig *sig[4] = {};
390 u64 src[4][8] = {};
392 for (s = 0; s < ARRAY_SIZE(args->v0.ctr[c].signal); s++) {
393 sig[s] = nvkm_perfsig_find(pm, args->v0.domain,
394 args->v0.ctr[c].signal[s],
395 &sdom);
396 if (args->v0.ctr[c].signal[s] && !sig[s])
397 return -EINVAL;
399 for (m = 0; m < 8; m++) {
400 src[s][m] = args->v0.ctr[c].source[s][m];
401 if (src[s][m] && !nvkm_perfsrc_find(pm, sig[s],
402 src[s][m]))
403 return -EINVAL;
407 ret = nvkm_perfctr_new(sdom, c, args->v0.domain, sig, src,
408 args->v0.ctr[c].logic_op, &ctr[c]);
409 if (ret)
410 return ret;
413 if (!sdom)
414 return -EINVAL;
416 if (!(dom = kzalloc(sizeof(*dom), GFP_KERNEL)))
417 return -ENOMEM;
418 nvkm_object_ctor(&nvkm_perfdom, oclass, &dom->object);
419 dom->perfmon = perfmon;
420 *pobject = &dom->object;
422 dom->func = sdom->func;
423 dom->addr = sdom->addr;
424 dom->mode = args->v0.mode;
425 for (c = 0; c < ARRAY_SIZE(ctr); c++)
426 dom->ctr[c] = ctr[c];
427 return 0;
430 /*******************************************************************************
431 * Perfmon object classes
432 ******************************************************************************/
433 static int
434 nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
435 void *data, u32 size)
437 union {
438 struct nvif_perfmon_query_domain_v0 v0;
439 } *args = data;
440 struct nvkm_object *object = &perfmon->object;
441 struct nvkm_pm *pm = perfmon->pm;
442 struct nvkm_perfdom *dom;
443 u8 domain_nr;
444 int di, ret = -ENOSYS;
446 nvif_ioctl(object, "perfmon query domain size %d\n", size);
447 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
448 nvif_ioctl(object, "perfmon domain vers %d iter %02x\n",
449 args->v0.version, args->v0.iter);
450 di = (args->v0.iter & 0xff) - 1;
451 } else
452 return ret;
454 domain_nr = nvkm_pm_count_perfdom(pm);
455 if (di >= (int)domain_nr)
456 return -EINVAL;
458 if (di >= 0) {
459 dom = nvkm_perfdom_find(pm, di);
460 if (dom == NULL)
461 return -EINVAL;
463 args->v0.id = di;
464 args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
465 strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
467 /* Currently only global counters (PCOUNTER) are implemented
468 * but this will be different for local counters (MP). */
469 args->v0.counter_nr = 4;
472 if (++di < domain_nr) {
473 args->v0.iter = ++di;
474 return 0;
477 args->v0.iter = 0xff;
478 return 0;
481 static int
482 nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
483 void *data, u32 size)
485 union {
486 struct nvif_perfmon_query_signal_v0 v0;
487 } *args = data;
488 struct nvkm_object *object = &perfmon->object;
489 struct nvkm_pm *pm = perfmon->pm;
490 struct nvkm_device *device = pm->engine.subdev.device;
491 struct nvkm_perfdom *dom;
492 struct nvkm_perfsig *sig;
493 const bool all = nvkm_boolopt(device->cfgopt, "NvPmShowAll", false);
494 const bool raw = nvkm_boolopt(device->cfgopt, "NvPmUnnamed", all);
495 int ret = -ENOSYS, si;
497 nvif_ioctl(object, "perfmon query signal size %d\n", size);
498 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
499 nvif_ioctl(object,
500 "perfmon query signal vers %d dom %d iter %04x\n",
501 args->v0.version, args->v0.domain, args->v0.iter);
502 si = (args->v0.iter & 0xffff) - 1;
503 } else
504 return ret;
506 dom = nvkm_perfdom_find(pm, args->v0.domain);
507 if (dom == NULL || si >= (int)dom->signal_nr)
508 return -EINVAL;
510 if (si >= 0) {
511 sig = &dom->signal[si];
512 if (raw || !sig->name) {
513 snprintf(args->v0.name, sizeof(args->v0.name),
514 "/%s/%02x", dom->name, si);
515 } else {
516 strncpy(args->v0.name, sig->name,
517 sizeof(args->v0.name) - 1);
520 args->v0.signal = si;
521 args->v0.source_nr = nvkm_perfsig_count_perfsrc(sig);
524 while (++si < dom->signal_nr) {
525 if (all || dom->signal[si].name) {
526 args->v0.iter = ++si;
527 return 0;
531 args->v0.iter = 0xffff;
532 return 0;
535 static int
536 nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
537 void *data, u32 size)
539 union {
540 struct nvif_perfmon_query_source_v0 v0;
541 } *args = data;
542 struct nvkm_object *object = &perfmon->object;
543 struct nvkm_pm *pm = perfmon->pm;
544 struct nvkm_perfdom *dom = NULL;
545 struct nvkm_perfsig *sig;
546 struct nvkm_perfsrc *src;
547 u8 source_nr = 0;
548 int si, ret = -ENOSYS;
550 nvif_ioctl(object, "perfmon query source size %d\n", size);
551 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
552 nvif_ioctl(object,
553 "perfmon source vers %d dom %d sig %02x iter %02x\n",
554 args->v0.version, args->v0.domain, args->v0.signal,
555 args->v0.iter);
556 si = (args->v0.iter & 0xff) - 1;
557 } else
558 return ret;
560 sig = nvkm_perfsig_find(pm, args->v0.domain, args->v0.signal, &dom);
561 if (!sig)
562 return -EINVAL;
564 source_nr = nvkm_perfsig_count_perfsrc(sig);
565 if (si >= (int)source_nr)
566 return -EINVAL;
568 if (si >= 0) {
569 src = nvkm_perfsrc_find(pm, sig, sig->source[si]);
570 if (!src)
571 return -EINVAL;
573 args->v0.source = sig->source[si];
574 args->v0.mask = src->mask;
575 strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
578 if (++si < source_nr) {
579 args->v0.iter = ++si;
580 return 0;
583 args->v0.iter = 0xff;
584 return 0;
587 static int
588 nvkm_perfmon_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
590 struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
591 switch (mthd) {
592 case NVIF_PERFMON_V0_QUERY_DOMAIN:
593 return nvkm_perfmon_mthd_query_domain(perfmon, data, size);
594 case NVIF_PERFMON_V0_QUERY_SIGNAL:
595 return nvkm_perfmon_mthd_query_signal(perfmon, data, size);
596 case NVIF_PERFMON_V0_QUERY_SOURCE:
597 return nvkm_perfmon_mthd_query_source(perfmon, data, size);
598 default:
599 break;
601 return -EINVAL;
604 static int
605 nvkm_perfmon_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
606 struct nvkm_object **pobject)
608 struct nvkm_perfmon *perfmon = nvkm_perfmon(oclass->parent);
609 return nvkm_perfdom_new_(perfmon, oclass, data, size, pobject);
612 static int
613 nvkm_perfmon_child_get(struct nvkm_object *object, int index,
614 struct nvkm_oclass *oclass)
616 if (index == 0) {
617 oclass->base.oclass = NVIF_CLASS_PERFDOM;
618 oclass->base.minver = 0;
619 oclass->base.maxver = 0;
620 oclass->ctor = nvkm_perfmon_child_new;
621 return 0;
623 return -EINVAL;
626 static void *
627 nvkm_perfmon_dtor(struct nvkm_object *object)
629 struct nvkm_perfmon *perfmon = nvkm_perfmon(object);
630 struct nvkm_pm *pm = perfmon->pm;
631 mutex_lock(&pm->engine.subdev.mutex);
632 if (pm->perfmon == &perfmon->object)
633 pm->perfmon = NULL;
634 mutex_unlock(&pm->engine.subdev.mutex);
635 return perfmon;
638 static const struct nvkm_object_func
639 nvkm_perfmon = {
640 .dtor = nvkm_perfmon_dtor,
641 .mthd = nvkm_perfmon_mthd,
642 .sclass = nvkm_perfmon_child_get,
645 static int
646 nvkm_perfmon_new(struct nvkm_pm *pm, const struct nvkm_oclass *oclass,
647 void *data, u32 size, struct nvkm_object **pobject)
649 struct nvkm_perfmon *perfmon;
651 if (!(perfmon = kzalloc(sizeof(*perfmon), GFP_KERNEL)))
652 return -ENOMEM;
653 nvkm_object_ctor(&nvkm_perfmon, oclass, &perfmon->object);
654 perfmon->pm = pm;
655 *pobject = &perfmon->object;
656 return 0;
659 /*******************************************************************************
660 * PPM engine/subdev functions
661 ******************************************************************************/
663 static int
664 nvkm_pm_oclass_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
665 void *data, u32 size, struct nvkm_object **pobject)
667 struct nvkm_pm *pm = nvkm_pm(oclass->engine);
668 int ret;
670 ret = nvkm_perfmon_new(pm, oclass, data, size, pobject);
671 if (ret)
672 return ret;
674 mutex_lock(&pm->engine.subdev.mutex);
675 if (pm->perfmon == NULL)
676 pm->perfmon = *pobject;
677 ret = (pm->perfmon == *pobject) ? 0 : -EBUSY;
678 mutex_unlock(&pm->engine.subdev.mutex);
679 return ret;
682 static const struct nvkm_device_oclass
683 nvkm_pm_oclass = {
684 .base.oclass = NVIF_CLASS_PERFMON,
685 .base.minver = -1,
686 .base.maxver = -1,
687 .ctor = nvkm_pm_oclass_new,
690 static int
691 nvkm_pm_oclass_get(struct nvkm_oclass *oclass, int index,
692 const struct nvkm_device_oclass **class)
694 if (index == 0) {
695 oclass->base = nvkm_pm_oclass.base;
696 *class = &nvkm_pm_oclass;
697 return index;
699 return 1;
702 static int
703 nvkm_perfsrc_new(struct nvkm_pm *pm, struct nvkm_perfsig *sig,
704 const struct nvkm_specsrc *spec)
706 const struct nvkm_specsrc *ssrc;
707 const struct nvkm_specmux *smux;
708 struct nvkm_perfsrc *src;
709 u8 source_nr = 0;
711 if (!spec) {
712 /* No sources are defined for this signal. */
713 return 0;
716 ssrc = spec;
717 while (ssrc->name) {
718 smux = ssrc->mux;
719 while (smux->name) {
720 bool found = false;
721 u8 source_id = 0;
722 u32 len;
724 list_for_each_entry(src, &pm->sources, head) {
725 if (src->addr == ssrc->addr &&
726 src->shift == smux->shift) {
727 found = true;
728 break;
730 source_id++;
733 if (!found) {
734 src = kzalloc(sizeof(*src), GFP_KERNEL);
735 if (!src)
736 return -ENOMEM;
738 src->addr = ssrc->addr;
739 src->mask = smux->mask;
740 src->shift = smux->shift;
741 src->enable = smux->enable;
743 len = strlen(ssrc->name) +
744 strlen(smux->name) + 2;
745 src->name = kzalloc(len, GFP_KERNEL);
746 if (!src->name) {
747 kfree(src);
748 return -ENOMEM;
750 snprintf(src->name, len, "%s_%s", ssrc->name,
751 smux->name);
753 list_add_tail(&src->head, &pm->sources);
756 sig->source[source_nr++] = source_id + 1;
757 smux++;
759 ssrc++;
762 return 0;
766 nvkm_perfdom_new(struct nvkm_pm *pm, const char *name, u32 mask,
767 u32 base, u32 size_unit, u32 size_domain,
768 const struct nvkm_specdom *spec)
770 const struct nvkm_specdom *sdom;
771 const struct nvkm_specsig *ssig;
772 struct nvkm_perfdom *dom;
773 int ret, i;
775 for (i = 0; i == 0 || mask; i++) {
776 u32 addr = base + (i * size_unit);
777 if (i && !(mask & (1 << i)))
778 continue;
780 sdom = spec;
781 while (sdom->signal_nr) {
782 dom = kzalloc(sizeof(*dom) + sdom->signal_nr *
783 sizeof(*dom->signal), GFP_KERNEL);
784 if (!dom)
785 return -ENOMEM;
787 if (mask) {
788 snprintf(dom->name, sizeof(dom->name),
789 "%s/%02x/%02x", name, i,
790 (int)(sdom - spec));
791 } else {
792 snprintf(dom->name, sizeof(dom->name),
793 "%s/%02x", name, (int)(sdom - spec));
796 list_add_tail(&dom->head, &pm->domains);
797 INIT_LIST_HEAD(&dom->list);
798 dom->func = sdom->func;
799 dom->addr = addr;
800 dom->signal_nr = sdom->signal_nr;
802 ssig = (sdom++)->signal;
803 while (ssig->name) {
804 struct nvkm_perfsig *sig =
805 &dom->signal[ssig->signal];
806 sig->name = ssig->name;
807 ret = nvkm_perfsrc_new(pm, sig, ssig->source);
808 if (ret)
809 return ret;
810 ssig++;
813 addr += size_domain;
816 mask &= ~(1 << i);
819 return 0;
822 static int
823 nvkm_pm_fini(struct nvkm_engine *engine, bool suspend)
825 struct nvkm_pm *pm = nvkm_pm(engine);
826 if (pm->func->fini)
827 pm->func->fini(pm);
828 return 0;
831 static void *
832 nvkm_pm_dtor(struct nvkm_engine *engine)
834 struct nvkm_pm *pm = nvkm_pm(engine);
835 struct nvkm_perfdom *dom, *next_dom;
836 struct nvkm_perfsrc *src, *next_src;
838 list_for_each_entry_safe(dom, next_dom, &pm->domains, head) {
839 list_del(&dom->head);
840 kfree(dom);
843 list_for_each_entry_safe(src, next_src, &pm->sources, head) {
844 list_del(&src->head);
845 kfree(src->name);
846 kfree(src);
849 return pm;
852 static const struct nvkm_engine_func
853 nvkm_pm = {
854 .dtor = nvkm_pm_dtor,
855 .fini = nvkm_pm_fini,
856 .base.sclass = nvkm_pm_oclass_get,
860 nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
861 int index, struct nvkm_pm *pm)
863 pm->func = func;
864 INIT_LIST_HEAD(&pm->domains);
865 INIT_LIST_HEAD(&pm->sources);
866 return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);