2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/option.h>
29 #include <nvif/class.h>
30 #include <nvif/ioctl.h>
31 #include <nvif/unpack.h>
34 nvkm_pm_count_perfdom(struct nvkm_pm
*pm
)
36 struct nvkm_perfdom
*dom
;
39 list_for_each_entry(dom
, &pm
->domains
, head
)
45 nvkm_perfdom_count_perfsig(struct nvkm_perfdom
*dom
)
51 for (i
= 0; i
< dom
->signal_nr
; i
++) {
52 if (dom
->signal
[i
].name
)
59 static struct nvkm_perfdom
*
60 nvkm_perfdom_find(struct nvkm_pm
*pm
, int di
)
62 struct nvkm_perfdom
*dom
;
65 list_for_each_entry(dom
, &pm
->domains
, head
) {
73 nvkm_perfsig_find(struct nvkm_pm
*pm
, u8 di
, u8 si
, struct nvkm_perfdom
**pdom
)
75 struct nvkm_perfdom
*dom
= *pdom
;
78 dom
= nvkm_perfdom_find(pm
, di
);
84 if (!dom
->signal
[si
].name
)
86 return &dom
->signal
[si
];
90 nvkm_perfsig_count_perfsrc(struct nvkm_perfsig
*sig
)
94 for (i
= 0; i
< ARRAY_SIZE(sig
->source
); i
++) {
101 static struct nvkm_perfsrc
*
102 nvkm_perfsrc_find(struct nvkm_pm
*pm
, struct nvkm_perfsig
*sig
, int si
)
104 struct nvkm_perfsrc
*src
;
106 int tmp
= 1; /* Sources ID start from 1 */
109 for (i
= 0; i
< ARRAY_SIZE(sig
->source
) && sig
->source
[i
]; i
++) {
110 if (sig
->source
[i
] == si
) {
117 list_for_each_entry(src
, &pm
->sources
, head
) {
127 nvkm_perfsrc_enable(struct nvkm_pm
*pm
, struct nvkm_perfctr
*ctr
)
129 struct nvkm_subdev
*subdev
= &pm
->engine
.subdev
;
130 struct nvkm_device
*device
= subdev
->device
;
131 struct nvkm_perfdom
*dom
= NULL
;
132 struct nvkm_perfsig
*sig
;
133 struct nvkm_perfsrc
*src
;
137 for (i
= 0; i
< 4; i
++) {
138 for (j
= 0; j
< 8 && ctr
->source
[i
][j
]; j
++) {
139 sig
= nvkm_perfsig_find(pm
, ctr
->domain
,
140 ctr
->signal
[i
], &dom
);
144 src
= nvkm_perfsrc_find(pm
, sig
, ctr
->source
[i
][j
]);
148 /* set enable bit if needed */
149 mask
= value
= 0x00000000;
151 mask
= value
= 0x80000000;
152 mask
|= (src
->mask
<< src
->shift
);
153 value
|= ((ctr
->source
[i
][j
] >> 32) << src
->shift
);
155 /* enable the source */
156 nvkm_mask(device
, src
->addr
, mask
, value
);
158 "enabled source %08x %08x %08x\n",
159 src
->addr
, mask
, value
);
166 nvkm_perfsrc_disable(struct nvkm_pm
*pm
, struct nvkm_perfctr
*ctr
)
168 struct nvkm_subdev
*subdev
= &pm
->engine
.subdev
;
169 struct nvkm_device
*device
= subdev
->device
;
170 struct nvkm_perfdom
*dom
= NULL
;
171 struct nvkm_perfsig
*sig
;
172 struct nvkm_perfsrc
*src
;
176 for (i
= 0; i
< 4; i
++) {
177 for (j
= 0; j
< 8 && ctr
->source
[i
][j
]; j
++) {
178 sig
= nvkm_perfsig_find(pm
, ctr
->domain
,
179 ctr
->signal
[i
], &dom
);
183 src
= nvkm_perfsrc_find(pm
, sig
, ctr
->source
[i
][j
]);
187 /* unset enable bit if needed */
191 mask
|= (src
->mask
<< src
->shift
);
193 /* disable the source */
194 nvkm_mask(device
, src
->addr
, mask
, 0);
195 nvkm_debug(subdev
, "disabled source %08x %08x\n",
202 /*******************************************************************************
203 * Perfdom object classes
204 ******************************************************************************/
206 nvkm_perfdom_init(struct nvkm_perfdom
*dom
, void *data
, u32 size
)
209 struct nvif_perfdom_init none
;
211 struct nvkm_object
*object
= &dom
->object
;
212 struct nvkm_pm
*pm
= dom
->perfmon
->pm
;
215 nvif_ioctl(object
, "perfdom init size %d\n", size
);
216 if (nvif_unvers(args
->none
)) {
217 nvif_ioctl(object
, "perfdom init\n");
221 for (i
= 0; i
< 4; i
++) {
223 dom
->func
->init(pm
, dom
, dom
->ctr
[i
]);
226 nvkm_perfsrc_enable(pm
, dom
->ctr
[i
]);
230 /* start next batch of counters for sampling */
231 dom
->func
->next(pm
, dom
);
236 nvkm_perfdom_sample(struct nvkm_perfdom
*dom
, void *data
, u32 size
)
239 struct nvif_perfdom_sample none
;
241 struct nvkm_object
*object
= &dom
->object
;
242 struct nvkm_pm
*pm
= dom
->perfmon
->pm
;
245 nvif_ioctl(object
, "perfdom sample size %d\n", size
);
246 if (nvif_unvers(args
->none
)) {
247 nvif_ioctl(object
, "perfdom sample\n");
252 /* sample previous batch of counters */
253 list_for_each_entry(dom
, &pm
->domains
, head
)
254 dom
->func
->next(pm
, dom
);
260 nvkm_perfdom_read(struct nvkm_perfdom
*dom
, void *data
, u32 size
)
263 struct nvif_perfdom_read_v0 v0
;
265 struct nvkm_object
*object
= &dom
->object
;
266 struct nvkm_pm
*pm
= dom
->perfmon
->pm
;
269 nvif_ioctl(object
, "perfdom read size %d\n", size
);
270 if (nvif_unpack(args
->v0
, 0, 0, false)) {
271 nvif_ioctl(object
, "perfdom read vers %d\n", args
->v0
.version
);
275 for (i
= 0; i
< 4; i
++) {
277 dom
->func
->read(pm
, dom
, dom
->ctr
[i
]);
283 for (i
= 0; i
< 4; i
++)
285 args
->v0
.ctr
[i
] = dom
->ctr
[i
]->ctr
;
286 args
->v0
.clk
= dom
->clk
;
291 nvkm_perfdom_mthd(struct nvkm_object
*object
, u32 mthd
, void *data
, u32 size
)
293 struct nvkm_perfdom
*dom
= nvkm_perfdom(object
);
295 case NVIF_PERFDOM_V0_INIT
:
296 return nvkm_perfdom_init(dom
, data
, size
);
297 case NVIF_PERFDOM_V0_SAMPLE
:
298 return nvkm_perfdom_sample(dom
, data
, size
);
299 case NVIF_PERFDOM_V0_READ
:
300 return nvkm_perfdom_read(dom
, data
, size
);
308 nvkm_perfdom_dtor(struct nvkm_object
*object
)
310 struct nvkm_perfdom
*dom
= nvkm_perfdom(object
);
311 struct nvkm_pm
*pm
= dom
->perfmon
->pm
;
314 for (i
= 0; i
< 4; i
++) {
315 struct nvkm_perfctr
*ctr
= dom
->ctr
[i
];
317 nvkm_perfsrc_disable(pm
, ctr
);
319 list_del(&ctr
->head
);
328 nvkm_perfctr_new(struct nvkm_perfdom
*dom
, int slot
, u8 domain
,
329 struct nvkm_perfsig
*signal
[4], u64 source
[4][8],
330 u16 logic_op
, struct nvkm_perfctr
**pctr
)
332 struct nvkm_perfctr
*ctr
;
338 ctr
= *pctr
= kzalloc(sizeof(*ctr
), GFP_KERNEL
);
342 ctr
->domain
= domain
;
343 ctr
->logic_op
= logic_op
;
345 for (i
= 0; i
< 4; i
++) {
347 ctr
->signal
[i
] = signal
[i
] - dom
->signal
;
348 for (j
= 0; j
< 8; j
++)
349 ctr
->source
[i
][j
] = source
[i
][j
];
352 list_add_tail(&ctr
->head
, &dom
->list
);
357 static const struct nvkm_object_func
359 .dtor
= nvkm_perfdom_dtor
,
360 .mthd
= nvkm_perfdom_mthd
,
364 nvkm_perfdom_new_(struct nvkm_perfmon
*perfmon
,
365 const struct nvkm_oclass
*oclass
, void *data
, u32 size
,
366 struct nvkm_object
**pobject
)
369 struct nvif_perfdom_v0 v0
;
371 struct nvkm_pm
*pm
= perfmon
->pm
;
372 struct nvkm_object
*parent
= oclass
->parent
;
373 struct nvkm_perfdom
*sdom
= NULL
;
374 struct nvkm_perfctr
*ctr
[4] = {};
375 struct nvkm_perfdom
*dom
;
379 nvif_ioctl(parent
, "create perfdom size %d\n", size
);
380 if (nvif_unpack(args
->v0
, 0, 0, false)) {
381 nvif_ioctl(parent
, "create perfdom vers %d dom %d mode %02x\n",
382 args
->v0
.version
, args
->v0
.domain
, args
->v0
.mode
);
386 for (c
= 0; c
< ARRAY_SIZE(args
->v0
.ctr
); c
++) {
387 struct nvkm_perfsig
*sig
[4] = {};
390 for (s
= 0; s
< ARRAY_SIZE(args
->v0
.ctr
[c
].signal
); s
++) {
391 sig
[s
] = nvkm_perfsig_find(pm
, args
->v0
.domain
,
392 args
->v0
.ctr
[c
].signal
[s
],
394 if (args
->v0
.ctr
[c
].signal
[s
] && !sig
[s
])
397 for (m
= 0; m
< 8; m
++) {
398 src
[s
][m
] = args
->v0
.ctr
[c
].source
[s
][m
];
399 if (src
[s
][m
] && !nvkm_perfsrc_find(pm
, sig
[s
],
405 ret
= nvkm_perfctr_new(sdom
, c
, args
->v0
.domain
, sig
, src
,
406 args
->v0
.ctr
[c
].logic_op
, &ctr
[c
]);
414 if (!(dom
= kzalloc(sizeof(*dom
), GFP_KERNEL
)))
416 nvkm_object_ctor(&nvkm_perfdom
, oclass
, &dom
->object
);
417 dom
->perfmon
= perfmon
;
418 *pobject
= &dom
->object
;
420 dom
->func
= sdom
->func
;
421 dom
->addr
= sdom
->addr
;
422 dom
->mode
= args
->v0
.mode
;
423 for (c
= 0; c
< ARRAY_SIZE(ctr
); c
++)
424 dom
->ctr
[c
] = ctr
[c
];
428 /*******************************************************************************
429 * Perfmon object classes
430 ******************************************************************************/
432 nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon
*perfmon
,
433 void *data
, u32 size
)
436 struct nvif_perfmon_query_domain_v0 v0
;
438 struct nvkm_object
*object
= &perfmon
->object
;
439 struct nvkm_pm
*pm
= perfmon
->pm
;
440 struct nvkm_perfdom
*dom
;
444 nvif_ioctl(object
, "perfmon query domain size %d\n", size
);
445 if (nvif_unpack(args
->v0
, 0, 0, false)) {
446 nvif_ioctl(object
, "perfmon domain vers %d iter %02x\n",
447 args
->v0
.version
, args
->v0
.iter
);
448 di
= (args
->v0
.iter
& 0xff) - 1;
452 domain_nr
= nvkm_pm_count_perfdom(pm
);
453 if (di
>= (int)domain_nr
)
457 dom
= nvkm_perfdom_find(pm
, di
);
462 args
->v0
.signal_nr
= nvkm_perfdom_count_perfsig(dom
);
463 strncpy(args
->v0
.name
, dom
->name
, sizeof(args
->v0
.name
));
465 /* Currently only global counters (PCOUNTER) are implemented
466 * but this will be different for local counters (MP). */
467 args
->v0
.counter_nr
= 4;
470 if (++di
< domain_nr
) {
471 args
->v0
.iter
= ++di
;
475 args
->v0
.iter
= 0xff;
480 nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon
*perfmon
,
481 void *data
, u32 size
)
484 struct nvif_perfmon_query_signal_v0 v0
;
486 struct nvkm_object
*object
= &perfmon
->object
;
487 struct nvkm_pm
*pm
= perfmon
->pm
;
488 struct nvkm_device
*device
= pm
->engine
.subdev
.device
;
489 struct nvkm_perfdom
*dom
;
490 struct nvkm_perfsig
*sig
;
491 const bool all
= nvkm_boolopt(device
->cfgopt
, "NvPmShowAll", false);
492 const bool raw
= nvkm_boolopt(device
->cfgopt
, "NvPmUnnamed", all
);
495 nvif_ioctl(object
, "perfmon query signal size %d\n", size
);
496 if (nvif_unpack(args
->v0
, 0, 0, false)) {
498 "perfmon query signal vers %d dom %d iter %04x\n",
499 args
->v0
.version
, args
->v0
.domain
, args
->v0
.iter
);
500 si
= (args
->v0
.iter
& 0xffff) - 1;
504 dom
= nvkm_perfdom_find(pm
, args
->v0
.domain
);
505 if (dom
== NULL
|| si
>= (int)dom
->signal_nr
)
509 sig
= &dom
->signal
[si
];
510 if (raw
|| !sig
->name
) {
511 snprintf(args
->v0
.name
, sizeof(args
->v0
.name
),
512 "/%s/%02x", dom
->name
, si
);
514 strncpy(args
->v0
.name
, sig
->name
,
515 sizeof(args
->v0
.name
));
518 args
->v0
.signal
= si
;
519 args
->v0
.source_nr
= nvkm_perfsig_count_perfsrc(sig
);
522 while (++si
< dom
->signal_nr
) {
523 if (all
|| dom
->signal
[si
].name
) {
524 args
->v0
.iter
= ++si
;
529 args
->v0
.iter
= 0xffff;
534 nvkm_perfmon_mthd_query_source(struct nvkm_perfmon
*perfmon
,
535 void *data
, u32 size
)
538 struct nvif_perfmon_query_source_v0 v0
;
540 struct nvkm_object
*object
= &perfmon
->object
;
541 struct nvkm_pm
*pm
= perfmon
->pm
;
542 struct nvkm_perfdom
*dom
= NULL
;
543 struct nvkm_perfsig
*sig
;
544 struct nvkm_perfsrc
*src
;
548 nvif_ioctl(object
, "perfmon query source size %d\n", size
);
549 if (nvif_unpack(args
->v0
, 0, 0, false)) {
551 "perfmon source vers %d dom %d sig %02x iter %02x\n",
552 args
->v0
.version
, args
->v0
.domain
, args
->v0
.signal
,
554 si
= (args
->v0
.iter
& 0xff) - 1;
558 sig
= nvkm_perfsig_find(pm
, args
->v0
.domain
, args
->v0
.signal
, &dom
);
562 source_nr
= nvkm_perfsig_count_perfsrc(sig
);
563 if (si
>= (int)source_nr
)
567 src
= nvkm_perfsrc_find(pm
, sig
, sig
->source
[si
]);
571 args
->v0
.source
= sig
->source
[si
];
572 args
->v0
.mask
= src
->mask
;
573 strncpy(args
->v0
.name
, src
->name
, sizeof(args
->v0
.name
));
576 if (++si
< source_nr
) {
577 args
->v0
.iter
= ++si
;
581 args
->v0
.iter
= 0xff;
586 nvkm_perfmon_mthd(struct nvkm_object
*object
, u32 mthd
, void *data
, u32 size
)
588 struct nvkm_perfmon
*perfmon
= nvkm_perfmon(object
);
590 case NVIF_PERFMON_V0_QUERY_DOMAIN
:
591 return nvkm_perfmon_mthd_query_domain(perfmon
, data
, size
);
592 case NVIF_PERFMON_V0_QUERY_SIGNAL
:
593 return nvkm_perfmon_mthd_query_signal(perfmon
, data
, size
);
594 case NVIF_PERFMON_V0_QUERY_SOURCE
:
595 return nvkm_perfmon_mthd_query_source(perfmon
, data
, size
);
603 nvkm_perfmon_child_new(const struct nvkm_oclass
*oclass
, void *data
, u32 size
,
604 struct nvkm_object
**pobject
)
606 struct nvkm_perfmon
*perfmon
= nvkm_perfmon(oclass
->parent
);
607 return nvkm_perfdom_new_(perfmon
, oclass
, data
, size
, pobject
);
611 nvkm_perfmon_child_get(struct nvkm_object
*object
, int index
,
612 struct nvkm_oclass
*oclass
)
615 oclass
->base
.oclass
= NVIF_IOCTL_NEW_V0_PERFDOM
;
616 oclass
->base
.minver
= 0;
617 oclass
->base
.maxver
= 0;
618 oclass
->ctor
= nvkm_perfmon_child_new
;
625 nvkm_perfmon_dtor(struct nvkm_object
*object
)
627 struct nvkm_perfmon
*perfmon
= nvkm_perfmon(object
);
628 struct nvkm_pm
*pm
= perfmon
->pm
;
629 mutex_lock(&pm
->engine
.subdev
.mutex
);
630 if (pm
->perfmon
== &perfmon
->object
)
632 mutex_unlock(&pm
->engine
.subdev
.mutex
);
636 static const struct nvkm_object_func
638 .dtor
= nvkm_perfmon_dtor
,
639 .mthd
= nvkm_perfmon_mthd
,
640 .sclass
= nvkm_perfmon_child_get
,
644 nvkm_perfmon_new(struct nvkm_pm
*pm
, const struct nvkm_oclass
*oclass
,
645 void *data
, u32 size
, struct nvkm_object
**pobject
)
647 struct nvkm_perfmon
*perfmon
;
649 if (!(perfmon
= kzalloc(sizeof(*perfmon
), GFP_KERNEL
)))
651 nvkm_object_ctor(&nvkm_perfmon
, oclass
, &perfmon
->object
);
653 *pobject
= &perfmon
->object
;
657 /*******************************************************************************
658 * PPM engine/subdev functions
659 ******************************************************************************/
662 nvkm_pm_oclass_new(struct nvkm_device
*device
, const struct nvkm_oclass
*oclass
,
663 void *data
, u32 size
, struct nvkm_object
**pobject
)
665 struct nvkm_pm
*pm
= nvkm_pm(oclass
->engine
);
668 ret
= nvkm_perfmon_new(pm
, oclass
, data
, size
, pobject
);
672 mutex_lock(&pm
->engine
.subdev
.mutex
);
673 if (pm
->perfmon
== NULL
)
674 pm
->perfmon
= *pobject
;
675 ret
= (pm
->perfmon
== *pobject
) ? 0 : -EBUSY
;
676 mutex_unlock(&pm
->engine
.subdev
.mutex
);
680 static const struct nvkm_device_oclass
682 .base
.oclass
= NVIF_IOCTL_NEW_V0_PERFMON
,
685 .ctor
= nvkm_pm_oclass_new
,
689 nvkm_pm_oclass_get(struct nvkm_oclass
*oclass
, int index
,
690 const struct nvkm_device_oclass
**class)
693 oclass
->base
= nvkm_pm_oclass
.base
;
694 *class = &nvkm_pm_oclass
;
701 nvkm_perfsrc_new(struct nvkm_pm
*pm
, struct nvkm_perfsig
*sig
,
702 const struct nvkm_specsrc
*spec
)
704 const struct nvkm_specsrc
*ssrc
;
705 const struct nvkm_specmux
*smux
;
706 struct nvkm_perfsrc
*src
;
710 /* No sources are defined for this signal. */
722 list_for_each_entry(src
, &pm
->sources
, head
) {
723 if (src
->addr
== ssrc
->addr
&&
724 src
->shift
== smux
->shift
) {
732 src
= kzalloc(sizeof(*src
), GFP_KERNEL
);
736 src
->addr
= ssrc
->addr
;
737 src
->mask
= smux
->mask
;
738 src
->shift
= smux
->shift
;
739 src
->enable
= smux
->enable
;
741 len
= strlen(ssrc
->name
) +
742 strlen(smux
->name
) + 2;
743 src
->name
= kzalloc(len
, GFP_KERNEL
);
748 snprintf(src
->name
, len
, "%s_%s", ssrc
->name
,
751 list_add_tail(&src
->head
, &pm
->sources
);
754 sig
->source
[source_nr
++] = source_id
+ 1;
764 nvkm_perfdom_new(struct nvkm_pm
*pm
, const char *name
, u32 mask
,
765 u32 base
, u32 size_unit
, u32 size_domain
,
766 const struct nvkm_specdom
*spec
)
768 const struct nvkm_specdom
*sdom
;
769 const struct nvkm_specsig
*ssig
;
770 struct nvkm_perfdom
*dom
;
773 for (i
= 0; i
== 0 || mask
; i
++) {
774 u32 addr
= base
+ (i
* size_unit
);
775 if (i
&& !(mask
& (1 << i
)))
779 while (sdom
->signal_nr
) {
780 dom
= kzalloc(sizeof(*dom
) + sdom
->signal_nr
*
781 sizeof(*dom
->signal
), GFP_KERNEL
);
786 snprintf(dom
->name
, sizeof(dom
->name
),
787 "%s/%02x/%02x", name
, i
,
790 snprintf(dom
->name
, sizeof(dom
->name
),
791 "%s/%02x", name
, (int)(sdom
- spec
));
794 list_add_tail(&dom
->head
, &pm
->domains
);
795 INIT_LIST_HEAD(&dom
->list
);
796 dom
->func
= sdom
->func
;
798 dom
->signal_nr
= sdom
->signal_nr
;
800 ssig
= (sdom
++)->signal
;
802 struct nvkm_perfsig
*sig
=
803 &dom
->signal
[ssig
->signal
];
804 sig
->name
= ssig
->name
;
805 ret
= nvkm_perfsrc_new(pm
, sig
, ssig
->source
);
821 nvkm_pm_fini(struct nvkm_engine
*engine
, bool suspend
)
823 struct nvkm_pm
*pm
= nvkm_pm(engine
);
830 nvkm_pm_dtor(struct nvkm_engine
*engine
)
832 struct nvkm_pm
*pm
= nvkm_pm(engine
);
833 struct nvkm_perfdom
*dom
, *next_dom
;
834 struct nvkm_perfsrc
*src
, *next_src
;
836 list_for_each_entry_safe(dom
, next_dom
, &pm
->domains
, head
) {
837 list_del(&dom
->head
);
841 list_for_each_entry_safe(src
, next_src
, &pm
->sources
, head
) {
842 list_del(&src
->head
);
850 static const struct nvkm_engine_func
852 .dtor
= nvkm_pm_dtor
,
853 .fini
= nvkm_pm_fini
,
854 .base
.sclass
= nvkm_pm_oclass_get
,
858 nvkm_pm_ctor(const struct nvkm_pm_func
*func
, struct nvkm_device
*device
,
859 int index
, struct nvkm_pm
*pm
)
862 INIT_LIST_HEAD(&pm
->domains
);
863 INIT_LIST_HEAD(&pm
->sources
);
864 return nvkm_engine_ctor(&nvkm_pm
, device
, index
, 0, true, &pm
->engine
);