2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/option.h>
29 #include <core/notify.h>
30 #include <core/parent.h>
31 #include <subdev/bios.h>
32 #include <subdev/fb.h>
33 #include <subdev/instmem.h>
35 #include <nvif/class.h>
36 #include <nvif/unpack.h>
38 static DEFINE_MUTEX(nv_devices_mutex
);
39 static LIST_HEAD(nv_devices
);
42 nvkm_device_find(u64 name
)
44 struct nvkm_device
*device
, *match
= NULL
;
45 mutex_lock(&nv_devices_mutex
);
46 list_for_each_entry(device
, &nv_devices
, head
) {
47 if (device
->handle
== name
) {
52 mutex_unlock(&nv_devices_mutex
);
57 nvkm_device_list(u64
*name
, int size
)
59 struct nvkm_device
*device
;
61 mutex_lock(&nv_devices_mutex
);
62 list_for_each_entry(device
, &nv_devices
, head
) {
64 name
[nr
- 1] = device
->handle
;
66 mutex_unlock(&nv_devices_mutex
);
70 /******************************************************************************
71 * nvkm_devobj (0x0080): class implementation
72 *****************************************************************************/
75 struct nvkm_parent base
;
76 struct nvkm_object
*subdev
[NVDEV_SUBDEV_NR
];
80 nvkm_devobj_info(struct nvkm_object
*object
, void *data
, u32 size
)
82 struct nvkm_device
*device
= nv_device(object
);
83 struct nvkm_fb
*pfb
= nvkm_fb(device
);
84 struct nvkm_instmem
*imem
= nvkm_instmem(device
);
86 struct nv_device_info_v0 v0
;
90 nv_ioctl(object
, "device info size %d\n", size
);
91 if (nvif_unpack(args
->v0
, 0, 0, false)) {
92 nv_ioctl(object
, "device info vers %d\n", args
->v0
.version
);
96 switch (device
->chipset
) {
107 args
->v0
.platform
= NV_DEVICE_INFO_V0_IGP
;
111 if (pci_find_capability(device
->pdev
, PCI_CAP_ID_AGP
))
112 args
->v0
.platform
= NV_DEVICE_INFO_V0_AGP
;
114 if (pci_is_pcie(device
->pdev
))
115 args
->v0
.platform
= NV_DEVICE_INFO_V0_PCIE
;
117 args
->v0
.platform
= NV_DEVICE_INFO_V0_PCI
;
119 args
->v0
.platform
= NV_DEVICE_INFO_V0_SOC
;
124 switch (device
->card_type
) {
125 case NV_04
: args
->v0
.family
= NV_DEVICE_INFO_V0_TNT
; break;
127 case NV_11
: args
->v0
.family
= NV_DEVICE_INFO_V0_CELSIUS
; break;
128 case NV_20
: args
->v0
.family
= NV_DEVICE_INFO_V0_KELVIN
; break;
129 case NV_30
: args
->v0
.family
= NV_DEVICE_INFO_V0_RANKINE
; break;
130 case NV_40
: args
->v0
.family
= NV_DEVICE_INFO_V0_CURIE
; break;
131 case NV_50
: args
->v0
.family
= NV_DEVICE_INFO_V0_TESLA
; break;
132 case NV_C0
: args
->v0
.family
= NV_DEVICE_INFO_V0_FERMI
; break;
133 case NV_E0
: args
->v0
.family
= NV_DEVICE_INFO_V0_KEPLER
; break;
134 case GM100
: args
->v0
.family
= NV_DEVICE_INFO_V0_MAXWELL
; break;
140 args
->v0
.chipset
= device
->chipset
;
141 args
->v0
.revision
= device
->chiprev
;
142 if (pfb
) args
->v0
.ram_size
= args
->v0
.ram_user
= pfb
->ram
->size
;
143 else args
->v0
.ram_size
= args
->v0
.ram_user
= 0;
144 if (imem
) args
->v0
.ram_user
= args
->v0
.ram_user
- imem
->reserved
;
149 nvkm_devobj_mthd(struct nvkm_object
*object
, u32 mthd
, void *data
, u32 size
)
152 case NV_DEVICE_V0_INFO
:
153 return nvkm_devobj_info(object
, data
, size
);
161 nvkm_devobj_rd08(struct nvkm_object
*object
, u64 addr
)
163 return nv_rd08(object
->engine
, addr
);
167 nvkm_devobj_rd16(struct nvkm_object
*object
, u64 addr
)
169 return nv_rd16(object
->engine
, addr
);
173 nvkm_devobj_rd32(struct nvkm_object
*object
, u64 addr
)
175 return nv_rd32(object
->engine
, addr
);
179 nvkm_devobj_wr08(struct nvkm_object
*object
, u64 addr
, u8 data
)
181 nv_wr08(object
->engine
, addr
, data
);
185 nvkm_devobj_wr16(struct nvkm_object
*object
, u64 addr
, u16 data
)
187 nv_wr16(object
->engine
, addr
, data
);
191 nvkm_devobj_wr32(struct nvkm_object
*object
, u64 addr
, u32 data
)
193 nv_wr32(object
->engine
, addr
, data
);
197 nvkm_devobj_map(struct nvkm_object
*object
, u64
*addr
, u32
*size
)
199 struct nvkm_device
*device
= nv_device(object
);
200 *addr
= nv_device_resource_start(device
, 0);
201 *size
= nv_device_resource_len(device
, 0);
205 static const u64 disable_map
[] = {
206 [NVDEV_SUBDEV_VBIOS
] = NV_DEVICE_V0_DISABLE_VBIOS
,
207 [NVDEV_SUBDEV_DEVINIT
] = NV_DEVICE_V0_DISABLE_CORE
,
208 [NVDEV_SUBDEV_GPIO
] = NV_DEVICE_V0_DISABLE_CORE
,
209 [NVDEV_SUBDEV_I2C
] = NV_DEVICE_V0_DISABLE_CORE
,
210 [NVDEV_SUBDEV_CLK
] = NV_DEVICE_V0_DISABLE_CORE
,
211 [NVDEV_SUBDEV_MXM
] = NV_DEVICE_V0_DISABLE_CORE
,
212 [NVDEV_SUBDEV_MC
] = NV_DEVICE_V0_DISABLE_CORE
,
213 [NVDEV_SUBDEV_BUS
] = NV_DEVICE_V0_DISABLE_CORE
,
214 [NVDEV_SUBDEV_TIMER
] = NV_DEVICE_V0_DISABLE_CORE
,
215 [NVDEV_SUBDEV_FB
] = NV_DEVICE_V0_DISABLE_CORE
,
216 [NVDEV_SUBDEV_LTC
] = NV_DEVICE_V0_DISABLE_CORE
,
217 [NVDEV_SUBDEV_IBUS
] = NV_DEVICE_V0_DISABLE_CORE
,
218 [NVDEV_SUBDEV_INSTMEM
] = NV_DEVICE_V0_DISABLE_CORE
,
219 [NVDEV_SUBDEV_MMU
] = NV_DEVICE_V0_DISABLE_CORE
,
220 [NVDEV_SUBDEV_BAR
] = NV_DEVICE_V0_DISABLE_CORE
,
221 [NVDEV_SUBDEV_VOLT
] = NV_DEVICE_V0_DISABLE_CORE
,
222 [NVDEV_SUBDEV_THERM
] = NV_DEVICE_V0_DISABLE_CORE
,
223 [NVDEV_SUBDEV_PMU
] = NV_DEVICE_V0_DISABLE_CORE
,
224 [NVDEV_SUBDEV_FUSE
] = NV_DEVICE_V0_DISABLE_CORE
,
225 [NVDEV_ENGINE_DMAOBJ
] = NV_DEVICE_V0_DISABLE_CORE
,
226 [NVDEV_ENGINE_PM
] = NV_DEVICE_V0_DISABLE_CORE
,
227 [NVDEV_ENGINE_FIFO
] = NV_DEVICE_V0_DISABLE_FIFO
,
228 [NVDEV_ENGINE_SW
] = NV_DEVICE_V0_DISABLE_FIFO
,
229 [NVDEV_ENGINE_GR
] = NV_DEVICE_V0_DISABLE_GR
,
230 [NVDEV_ENGINE_MPEG
] = NV_DEVICE_V0_DISABLE_MPEG
,
231 [NVDEV_ENGINE_ME
] = NV_DEVICE_V0_DISABLE_ME
,
232 [NVDEV_ENGINE_VP
] = NV_DEVICE_V0_DISABLE_VP
,
233 [NVDEV_ENGINE_CIPHER
] = NV_DEVICE_V0_DISABLE_CIPHER
,
234 [NVDEV_ENGINE_BSP
] = NV_DEVICE_V0_DISABLE_BSP
,
235 [NVDEV_ENGINE_MSPPP
] = NV_DEVICE_V0_DISABLE_MSPPP
,
236 [NVDEV_ENGINE_CE0
] = NV_DEVICE_V0_DISABLE_CE0
,
237 [NVDEV_ENGINE_CE1
] = NV_DEVICE_V0_DISABLE_CE1
,
238 [NVDEV_ENGINE_CE2
] = NV_DEVICE_V0_DISABLE_CE2
,
239 [NVDEV_ENGINE_VIC
] = NV_DEVICE_V0_DISABLE_VIC
,
240 [NVDEV_ENGINE_MSENC
] = NV_DEVICE_V0_DISABLE_MSENC
,
241 [NVDEV_ENGINE_DISP
] = NV_DEVICE_V0_DISABLE_DISP
,
242 [NVDEV_ENGINE_MSVLD
] = NV_DEVICE_V0_DISABLE_MSVLD
,
243 [NVDEV_ENGINE_SEC
] = NV_DEVICE_V0_DISABLE_SEC
,
244 [NVDEV_SUBDEV_NR
] = 0,
248 nvkm_devobj_dtor(struct nvkm_object
*object
)
250 struct nvkm_devobj
*devobj
= (void *)object
;
253 for (i
= NVDEV_SUBDEV_NR
- 1; i
>= 0; i
--)
254 nvkm_object_ref(NULL
, &devobj
->subdev
[i
]);
256 nvkm_parent_destroy(&devobj
->base
);
259 static struct nvkm_oclass
260 nvkm_devobj_oclass_super
= {
262 .ofuncs
= &(struct nvkm_ofuncs
) {
263 .dtor
= nvkm_devobj_dtor
,
264 .init
= _nvkm_parent_init
,
265 .fini
= _nvkm_parent_fini
,
266 .mthd
= nvkm_devobj_mthd
,
267 .map
= nvkm_devobj_map
,
268 .rd08
= nvkm_devobj_rd08
,
269 .rd16
= nvkm_devobj_rd16
,
270 .rd32
= nvkm_devobj_rd32
,
271 .wr08
= nvkm_devobj_wr08
,
272 .wr16
= nvkm_devobj_wr16
,
273 .wr32
= nvkm_devobj_wr32
,
278 nvkm_devobj_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
279 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
280 struct nvkm_object
**pobject
)
283 struct nv_device_v0 v0
;
285 struct nvkm_client
*client
= nv_client(parent
);
286 struct nvkm_device
*device
;
287 struct nvkm_devobj
*devobj
;
289 u64 disable
, mmio_base
, mmio_size
;
293 nv_ioctl(parent
, "create device size %d\n", size
);
294 if (nvif_unpack(args
->v0
, 0, 0, false)) {
295 nv_ioctl(parent
, "create device v%d device %016llx "
296 "disable %016llx debug0 %016llx\n",
297 args
->v0
.version
, args
->v0
.device
,
298 args
->v0
.disable
, args
->v0
.debug0
);
302 /* give priviledged clients register access */
304 oclass
= &nvkm_devobj_oclass_super
;
306 /* find the device subdev that matches what the client requested */
307 device
= nv_device(client
->device
);
308 if (args
->v0
.device
!= ~0) {
309 device
= nvkm_device_find(args
->v0
.device
);
314 ret
= nvkm_parent_create(parent
, nv_object(device
), oclass
, 0,
316 (1ULL << NVDEV_ENGINE_DMAOBJ
) |
317 (1ULL << NVDEV_ENGINE_FIFO
) |
318 (1ULL << NVDEV_ENGINE_DISP
) |
319 (1ULL << NVDEV_ENGINE_PM
), &devobj
);
320 *pobject
= nv_object(devobj
);
324 mmio_base
= nv_device_resource_start(device
, 0);
325 mmio_size
= nv_device_resource_len(device
, 0);
327 /* translate api disable mask into internal mapping */
328 disable
= args
->v0
.debug0
;
329 for (i
= 0; i
< NVDEV_SUBDEV_NR
; i
++) {
330 if (args
->v0
.disable
& disable_map
[i
])
331 disable
|= (1ULL << i
);
334 /* identify the chipset, and determine classes of subdev/engines */
335 if (!(args
->v0
.disable
& NV_DEVICE_V0_DISABLE_IDENTIFY
) &&
336 !device
->card_type
) {
337 map
= ioremap(mmio_base
, 0x102000);
341 /* switch mmio to cpu's native endianness */
343 if (ioread32_native(map
+ 0x000004) != 0x00000000)
345 if (ioread32_native(map
+ 0x000004) == 0x00000000)
347 iowrite32_native(0x01000001, map
+ 0x000004);
349 /* read boot0 and strapping information */
350 boot0
= ioread32_native(map
+ 0x000000);
351 strap
= ioread32_native(map
+ 0x101000);
354 /* determine chipset and derive architecture from it */
355 if ((boot0
& 0x1f000000) > 0) {
356 device
->chipset
= (boot0
& 0x1ff00000) >> 20;
357 device
->chiprev
= (boot0
& 0x000000ff);
358 switch (device
->chipset
& 0x1f0) {
360 if (0x461 & (1 << (device
->chipset
& 0xf)))
361 device
->card_type
= NV_10
;
363 device
->card_type
= NV_11
;
364 device
->chiprev
= 0x00;
367 case 0x020: device
->card_type
= NV_20
; break;
368 case 0x030: device
->card_type
= NV_30
; break;
370 case 0x060: device
->card_type
= NV_40
; break;
374 case 0x0a0: device
->card_type
= NV_50
; break;
376 case 0x0d0: device
->card_type
= NV_C0
; break;
379 case 0x100: device
->card_type
= NV_E0
; break;
381 case 0x120: device
->card_type
= GM100
; break;
386 if ((boot0
& 0xff00fff0) == 0x20004000) {
387 if (boot0
& 0x00f00000)
388 device
->chipset
= 0x05;
390 device
->chipset
= 0x04;
391 device
->card_type
= NV_04
;
394 switch (device
->card_type
) {
395 case NV_04
: ret
= nv04_identify(device
); break;
397 case NV_11
: ret
= nv10_identify(device
); break;
398 case NV_20
: ret
= nv20_identify(device
); break;
399 case NV_30
: ret
= nv30_identify(device
); break;
400 case NV_40
: ret
= nv40_identify(device
); break;
401 case NV_50
: ret
= nv50_identify(device
); break;
402 case NV_C0
: ret
= gf100_identify(device
); break;
403 case NV_E0
: ret
= gk104_identify(device
); break;
404 case GM100
: ret
= gm100_identify(device
); break;
411 nv_error(device
, "unknown chipset, 0x%08x\n", boot0
);
415 nv_info(device
, "BOOT0 : 0x%08x\n", boot0
);
416 nv_info(device
, "Chipset: %s (NV%02X)\n",
417 device
->cname
, device
->chipset
);
418 nv_info(device
, "Family : NV%02X\n", device
->card_type
);
420 /* determine frequency of timing crystal */
421 if ( device
->card_type
<= NV_10
|| device
->chipset
< 0x17 ||
422 (device
->chipset
>= 0x20 && device
->chipset
< 0x25))
428 case 0x00000000: device
->crystal
= 13500; break;
429 case 0x00000040: device
->crystal
= 14318; break;
430 case 0x00400000: device
->crystal
= 27000; break;
431 case 0x00400040: device
->crystal
= 25000; break;
434 nv_debug(device
, "crystal freq: %dKHz\n", device
->crystal
);
436 if ( (args
->v0
.disable
& NV_DEVICE_V0_DISABLE_IDENTIFY
)) {
437 device
->cname
= "NULL";
438 device
->oclass
[NVDEV_SUBDEV_VBIOS
] = &nvkm_bios_oclass
;
441 if (!(args
->v0
.disable
& NV_DEVICE_V0_DISABLE_MMIO
) &&
442 !nv_subdev(device
)->mmio
) {
443 nv_subdev(device
)->mmio
= ioremap(mmio_base
, mmio_size
);
444 if (!nv_subdev(device
)->mmio
) {
445 nv_error(device
, "unable to map device registers\n");
450 /* ensure requested subsystems are available for use */
451 for (i
= 1, c
= 1; i
< NVDEV_SUBDEV_NR
; i
++) {
452 if (!(oclass
= device
->oclass
[i
]) || (disable
& (1ULL << i
)))
455 if (device
->subdev
[i
]) {
456 nvkm_object_ref(device
->subdev
[i
], &devobj
->subdev
[i
]);
460 ret
= nvkm_object_ctor(nv_object(device
), NULL
, oclass
,
461 NULL
, i
, &devobj
->subdev
[i
]);
467 device
->subdev
[i
] = devobj
->subdev
[i
];
469 /* note: can't init *any* subdevs until devinit has been run
470 * due to not knowing exactly what the vbios init tables will
471 * mess with. devinit also can't be run until all of its
472 * dependencies have been created.
474 * this code delays init of any subdev until all of devinit's
475 * dependencies have been created, and then initialises each
476 * subdev in turn as they're created.
478 while (i
>= NVDEV_SUBDEV_DEVINIT_LAST
&& c
<= i
) {
479 struct nvkm_object
*subdev
= devobj
->subdev
[c
++];
480 if (subdev
&& !nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
481 ret
= nvkm_object_inc(subdev
);
484 atomic_dec(&nv_object(device
)->usecount
);
487 nvkm_subdev_reset(subdev
);
495 static struct nvkm_ofuncs
496 nvkm_devobj_ofuncs
= {
497 .ctor
= nvkm_devobj_ctor
,
498 .dtor
= nvkm_devobj_dtor
,
499 .init
= _nvkm_parent_init
,
500 .fini
= _nvkm_parent_fini
,
501 .mthd
= nvkm_devobj_mthd
,
504 /******************************************************************************
505 * nvkm_device: engine functions
506 *****************************************************************************/
511 struct nvkm_object
*device
= nv_object(obj
);
512 if (device
->engine
== NULL
) {
513 while (device
&& device
->parent
)
514 device
= device
->parent
;
516 device
= &nv_object(obj
)->engine
->subdev
.object
;
517 if (device
&& device
->parent
)
518 device
= device
->parent
;
520 #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
521 if (unlikely(!device
))
522 nv_assert("BAD CAST -> NvDevice, 0x%08x\n", nv_hclass(obj
));
524 return (void *)device
;
527 static struct nvkm_oclass
528 nvkm_device_sclass
[] = {
529 { 0x0080, &nvkm_devobj_ofuncs
},
534 nvkm_device_event_ctor(struct nvkm_object
*object
, void *data
, u32 size
,
535 struct nvkm_notify
*notify
)
537 if (!WARN_ON(size
!= 0)) {
546 static const struct nvkm_event_func
547 nvkm_device_event_func
= {
548 .ctor
= nvkm_device_event_ctor
,
552 nvkm_device_fini(struct nvkm_object
*object
, bool suspend
)
554 struct nvkm_device
*device
= (void *)object
;
555 struct nvkm_object
*subdev
;
558 for (i
= NVDEV_SUBDEV_NR
- 1; i
>= 0; i
--) {
559 if ((subdev
= device
->subdev
[i
])) {
560 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
561 ret
= nvkm_object_dec(subdev
, suspend
);
568 ret
= nvkm_acpi_fini(device
, suspend
);
570 for (; ret
&& i
< NVDEV_SUBDEV_NR
; i
++) {
571 if ((subdev
= device
->subdev
[i
])) {
572 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
573 ret
= nvkm_object_inc(subdev
);
585 nvkm_device_init(struct nvkm_object
*object
)
587 struct nvkm_device
*device
= (void *)object
;
588 struct nvkm_object
*subdev
;
591 ret
= nvkm_acpi_init(device
);
595 for (i
= 0; i
< NVDEV_SUBDEV_NR
; i
++) {
596 if ((subdev
= device
->subdev
[i
])) {
597 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
)) {
598 ret
= nvkm_object_inc(subdev
);
602 nvkm_subdev_reset(subdev
);
609 for (--i
; ret
&& i
>= 0; i
--) {
610 if ((subdev
= device
->subdev
[i
])) {
611 if (!nv_iclass(subdev
, NV_ENGINE_CLASS
))
612 nvkm_object_dec(subdev
, false);
617 nvkm_acpi_fini(device
, false);
622 nvkm_device_dtor(struct nvkm_object
*object
)
624 struct nvkm_device
*device
= (void *)object
;
626 nvkm_event_fini(&device
->event
);
628 mutex_lock(&nv_devices_mutex
);
629 list_del(&device
->head
);
630 mutex_unlock(&nv_devices_mutex
);
632 if (nv_subdev(device
)->mmio
)
633 iounmap(nv_subdev(device
)->mmio
);
635 nvkm_engine_destroy(&device
->engine
);
639 nv_device_resource_start(struct nvkm_device
*device
, unsigned int bar
)
641 if (nv_device_is_pci(device
)) {
642 return pci_resource_start(device
->pdev
, bar
);
644 struct resource
*res
;
645 res
= platform_get_resource(device
->platformdev
,
646 IORESOURCE_MEM
, bar
);
654 nv_device_resource_len(struct nvkm_device
*device
, unsigned int bar
)
656 if (nv_device_is_pci(device
)) {
657 return pci_resource_len(device
->pdev
, bar
);
659 struct resource
*res
;
660 res
= platform_get_resource(device
->platformdev
,
661 IORESOURCE_MEM
, bar
);
664 return resource_size(res
);
669 nv_device_get_irq(struct nvkm_device
*device
, bool stall
)
671 if (nv_device_is_pci(device
)) {
672 return device
->pdev
->irq
;
674 return platform_get_irq_byname(device
->platformdev
,
675 stall
? "stall" : "nonstall");
679 static struct nvkm_oclass
680 nvkm_device_oclass
= {
681 .handle
= NV_ENGINE(DEVICE
, 0x00),
682 .ofuncs
= &(struct nvkm_ofuncs
) {
683 .dtor
= nvkm_device_dtor
,
684 .init
= nvkm_device_init
,
685 .fini
= nvkm_device_fini
,
690 nvkm_device_create_(void *dev
, enum nv_bus_type type
, u64 name
,
691 const char *sname
, const char *cfg
, const char *dbg
,
692 int length
, void **pobject
)
694 struct nvkm_device
*device
;
697 mutex_lock(&nv_devices_mutex
);
698 list_for_each_entry(device
, &nv_devices
, head
) {
699 if (device
->handle
== name
)
703 ret
= nvkm_engine_create_(NULL
, NULL
, &nvkm_device_oclass
, true,
704 "DEVICE", "device", length
, pobject
);
713 case NVKM_BUS_PLATFORM
:
714 device
->platformdev
= dev
;
717 device
->handle
= name
;
718 device
->cfgopt
= cfg
;
719 device
->dbgopt
= dbg
;
720 device
->name
= sname
;
722 nv_subdev(device
)->debug
= nvkm_dbgopt(device
->dbgopt
, "DEVICE");
723 nv_engine(device
)->sclass
= nvkm_device_sclass
;
724 list_add(&device
->head
, &nv_devices
);
726 ret
= nvkm_event_init(&nvkm_device_event_func
, 1, 1, &device
->event
);
728 mutex_unlock(&nv_devices_mutex
);