1 /* fuc microcode for gf100 PGRAPH/HUB
3 * Copyright 2011 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
27 hub_mmio_list_head: .b32 #hub_mmio_list_base
28 hub_mmio_list_tail: .b32 #hub_mmio_list_next
38 chan_mmio_count: .b32 0
39 chan_mmio_address: .b32 0
45 .b32 0x0417e91c // 0x17e91c, 2
50 // reports an exception to the host
52 // In: $r15 error code (see os.h)
55 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15)
57 nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r15)
60 // HUB fuc initialisation, executed by triggering ucode start, will
61 // fall through to main loop after completion.
65 // 31:31: set to signal completion
67 // 31:0: total PGRAPH context size
74 nv_iord($r1, NV_PGRAPH_FECS_CAPS, 0)
80 mov $r2 NV_PGRAPH_FECS_ACCESS_FIFO
81 nv_iowr(NV_PGRAPH_FECS_ACCESS, 0, $r2)
83 // setup i0 handler, and route all interrupts to it
88 nv_iowr(NV_PGRAPH_FECS_INTR_ROUTE, 0, $r2)
90 // route HUB_CHSW_PULSE to fuc interrupt 8
91 mov $r2 0x2003 // { HUB_CHSW_PULSE, ZERO } -> intr 8
92 nv_iowr(NV_PGRAPH_FECS_IROUTE, 0, $r2)
94 // not sure what these are, route them because NVIDIA does, and
95 // the IRQ handler will signal the host if we ever get one.. we
96 // may find out if/why we need to handle these if so..
98 mov $r2 0x2004 // { 0x04, ZERO } -> intr 9
99 nv_iowr(NV_PGRAPH_FECS_IROUTE, 1, $r2)
100 mov $r2 0x200b // { HUB_FIRMWARE_MTHD, ZERO } -> intr 10
101 nv_iowr(NV_PGRAPH_FECS_IROUTE, 2, $r2)
102 mov $r2 0x200c // { 0x0c, ZERO } -> intr 15
103 nv_iowr(NV_PGRAPH_FECS_IROUTE, 7, $r2)
105 // enable all INTR_UP interrupts
107 nv_iowr(NV_PGRAPH_FECS_INTR_UP_EN, 0, $r3)
109 // enable fifo, ctxsw, 9, fwmthd, 15 interrupts
111 nv_iowr(NV_PGRAPH_FECS_INTR_EN_SET, 0, $r2)
113 // fifo level triggered, rest edge
114 mov $r2 NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL
115 nv_iowr(NV_PGRAPH_FECS_INTR_MODE, 0, $r2)
120 // fetch enabled GPC/ROP counts
121 nv_rd32($r14, 0x409604)
123 st b32 D[$r0 + #rop_count] $r1
125 st b32 D[$r0 + #gpc_count] $r15
127 // set BAR_REQMASK to GPC mask
131 nv_iowr(NV_PGRAPH_FECS_BAR_MASK0, 0, $r1)
132 nv_iowr(NV_PGRAPH_FECS_BAR_MASK1, 0, $r1)
134 // context size calculation, reserve first 256 bytes for use by fuc
144 // calculate size of mmio context data
145 ld b32 $r14 D[$r0 + #hub_mmio_list_head]
146 ld b32 $r15 D[$r0 + #hub_mmio_list_tail]
149 // set mmctx base addresses now so we don't have to do it later,
150 // they don't (currently) ever change
152 nv_iowr(NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE, 0, $r4)
153 nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE, 0, $r4)
157 nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_COUNT, 0, $r15) // wtf??
159 // strands, base offset needs to be aligned to 256 bytes
164 call(strand_ctx_init)
167 // initialise each GPC in sequence by passing in the offset of its
168 // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
169 // has previously been uploaded by the host) running.
171 // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
172 // when it has completed, and return the size of its context data
173 // in GPCn_CC_SCRATCH[1]
175 ld b32 $r3 D[$r0 + #gpc_count]
178 // setup, and start GPC ucode running
179 add b32 $r14 $r4 0x804
181 call(nv_wr32) // CC_SCRATCH[1] = ctx offset
182 add b32 $r14 $r4 0x10c
185 add b32 $r14 $r4 0x104
186 call(nv_wr32) // ENTRY
187 add b32 $r14 $r4 0x100
188 mov $r15 2 // CTRL_START_TRIGGER
189 call(nv_wr32) // CTRL
191 // wait for it to complete, and adjust context size
192 add b32 $r14 $r4 0x800
197 add b32 $r14 $r4 0x804
212 // save context size, and tell host we're ready
213 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1)
216 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(0), 0, $r1)
218 // Main program loop, very simple, sleeps until woken up by the interrupt
219 // handler, pulls a command from the queue and executes its handler
222 // sleep until we have something to do
230 // context switch, requested by GPU?
232 bra ne #main_not_ctx_switch
234 nv_iord($r1, NV_PGRAPH_FECS_CHAN_ADDR, 0)
235 nv_iord($r2, NV_PGRAPH_FECS_CHAN_NEXT, 0)
240 bra e #chsw_prev_no_next
261 nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
270 // ack the context switch request
272 mov $r2 NV_PGRAPH_FECS_CHSW_ACK
273 nv_iowr(NV_PGRAPH_FECS_CHSW, 0, $r2)
277 // request to set current channel? (*not* a context switch)
280 bra ne #main_not_ctx_chan
285 // request to store current channel context?
288 bra ne #main_not_ctx_save
298 or $r15 E_BAD_COMMAND
305 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(0), 0, $r2)
322 // incoming fifo command?
323 nv_iord($r10, NV_PGRAPH_FECS_INTR, 0)
324 and $r11 $r10 NV_PGRAPH_FECS_INTR_FIFO
326 // queue incoming fifo command for later processing
328 nv_iord($r14, NV_PGRAPH_FECS_FIFO_CMD, 0)
329 nv_iord($r15, NV_PGRAPH_FECS_FIFO_DATA, 0)
333 nv_iowr(NV_PGRAPH_FECS_FIFO_ACK, 0, $r14)
335 // context switch request?
337 and $r11 $r10 NV_PGRAPH_FECS_INTR_CHSW
339 // enqueue a context switch for later processing
346 and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
348 // none we handle; report to host and ack
349 nv_rd32($r15, NV_PGRAPH_TRAPPED_DATA_LO)
350 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(4), 0, $r15)
351 nv_rd32($r15, NV_PGRAPH_TRAPPED_ADDR)
352 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(3), 0, $r15)
355 imm32($r15, NV_PGRAPH_FE_OBJECT_TABLE(0))
358 nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(2), 0, $r15)
359 mov $r15 E_BAD_FWMTHD
362 nv_wr32(0x400144, $r11)
364 // anything we didn't handle, bring it to the host's attention
366 mov $r11 0x504 // FIFO | CHSW | FWMTHD
370 nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r11)
372 // ack, and wake up main()
374 nv_iowr(NV_PGRAPH_FECS_INTR_ACK, 0, $r10)
390 // Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
393 nv_wr32(0x404160, $r15)
395 nv_rd32($r15, 0x404160)
397 bra e #ctx_4160s_wait
400 // Without clearing again at end of xfer, some things cause PGRAPH
401 // to hang with STATUS=0x00000007 until it's cleared.. fbcon can
402 // still function with it set however...
405 nv_wr32(0x404160, $r15)
409 // Again, not real sure
411 // In: $r15 value to set 0x404170 to
415 nv_wr32(0x404170, $r15)
418 // Waits for a ctx_4170s() call to complete
421 nv_rd32($r15, 0x404170)
426 // Disables various things, waits a bit, and re-enables them..
428 // Not sure how exactly this helps, perhaps "ENABLE" is not such a
429 // good description for the bits we turn off? Anyways, without this,
430 // funny things happen.
433 mov $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC
434 or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP
435 or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC
436 or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN
437 nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
441 bra ne #ctx_redswitch_delay
442 or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP
443 or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN
444 nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
447 // Not a clue what this is for, except that unless the value is 0x10, the
448 // strand context is saved (and presumably restored) incorrectly..
450 // In: $r15 value to set to (0x00/0x10 are used)
453 nv_iowr(NV_PGRAPH_FECS_UNK86C, 0, $r15)
454 nv_wr32(0x408a14, $r15)
455 nv_wr32(NV_PGRAPH_GPCX_GPCCS_UNK86C, $r15)
458 // In: $r15 NV_PGRAPH_FECS_MEM_CMD_*
460 nv_iowr(NV_PGRAPH_FECS_MEM_CMD, 0, $r15)
462 nv_iord($r15, NV_PGRAPH_FECS_MEM_CMD, 0)
467 // ctx_load - load's a channel's ctxctl data, and selects its vm
469 // In: $r2 channel address
474 // switch to channel, somewhat magic in parts..
475 mov $r10 12 // DONE_UNK12
478 nv_iowr(0x409a24, 0, $r15)
479 nv_iowr(NV_PGRAPH_FECS_CHAN_NEXT, 0, $r2)
480 nv_iowr(NV_PGRAPH_FECS_MEM_CHAN, 0, $r2)
481 mov $r15 NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN
483 nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
485 // load channel header, fetch PGRAPH context pointer
492 nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r2)
493 imm32($r2, NV_PGRAPH_FECS_MEM_TARGET_UNK31)
494 or $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM
495 nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
496 mov $r1 0x10 // chan + 0x0210
498 sethi $r2 0x00020000 // 16 bytes
503 // update current context
504 ld b32 $r1 D[$r0 + #xfer_data + 4]
506 ld b32 $r2 D[$r0 + #xfer_data + 0]
509 st b32 D[$r0 + #ctx_current] $r1
511 // set transfer base to start of context, and fetch context header
513 nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r1)
514 mov $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VM
515 nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
517 sethi $r1 0x00060000 // 256 bytes
525 // ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
526 // the active channel for ctxctl, but not actually transfer
527 // any context data. intended for use only during initial
528 // context construction.
530 // In: $r2 channel address
537 mov $r10 12 // DONE_UNK12
539 mov $r15 5 // MEM_CMD 5 ???
546 // Execute per-context state overrides list
548 // Only executed on the first load of a channel. Might want to look into
549 // removing this and having the host directly modify the channel's context
550 // to change this state... The nouveau DRM already builds this list as
551 // it's definitely needed for NVIDIA's, so we may as well use it for now
553 // Input: $r1 mmio list length
556 // set transfer base to be the mmio list
557 ld b32 $r3 D[$r0 + #chan_mmio_address]
558 nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
562 // fetch next 256 bytes of mmio list if necessary
564 bra ne #ctx_mmio_pull
566 sethi $r5 0x00060000 // 256 bytes
570 // execute a single list entry
572 ld b32 $r14 D[$r4 + #xfer_data + 0x00]
573 ld b32 $r15 D[$r4 + #xfer_data + 0x04]
579 bra ne #ctx_mmio_loop
581 // set transfer base back to the current context
583 ld b32 $r3 D[$r0 + #ctx_current]
584 nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
586 // disable the mmio list now, we don't need/want to execute it again
587 st b32 D[$r0 + #chan_mmio_count] $r0
589 sethi $r1 0x00060000 // 256 bytes
594 // Transfer HUB context data between GPU and storage area
596 // In: $r2 channel address
597 // $p1 clear on save, set on load
598 // $p2 set if opposite direction done/will be done, so:
599 // on save it means: "a load will follow this save"
600 // on load it means: "a save preceeded this load"
603 // according to mwk, some kind of wait for idle
605 nv_iowr(0x409c08, 0, $r14)
607 nv_iord($r14, 0x409c00, 0)
609 bra ne #ctx_xfer_idle
611 bra not $p1 #ctx_xfer_pre
612 bra $p2 #ctx_xfer_pre_load
619 bra not $p1 #ctx_xfer_exec
630 // fetch context pointer, and initiate xfer on all GPCs
632 ld b32 $r1 D[$r0 + #ctx_current]
635 nv_iowr(NV_PGRAPH_FECS_BAR, 0, $r2)
637 nv_wr32(0x41a500, $r1) // GPC_BCAST_WRCMD_DATA = ctx pointer
642 nv_wr32(0x41a504, $r15) // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
647 nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r2)
648 xbit $r2 $flags $p1 // SAVE/LOAD
649 add b32 $r2 NV_PGRAPH_FECS_STRAND_CMD_SAVE
650 nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r2)
653 xbit $r10 $flags $p1 // direction
654 or $r10 6 // first, last
655 mov $r11 0 // base = 0
656 ld b32 $r12 D[$r0 + #hub_mmio_list_head]
657 ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
658 mov $r14 0 // not multi
661 // wait for GPCs to all complete
662 mov $r10 8 // DONE_BAR
665 // wait for strand xfer to complete
669 bra $p1 #ctx_xfer_post
670 mov $r10 12 // DONE_UNK12
672 mov $r15 5 // MEM_CMD 5 ???
675 bra $p2 #ctx_xfer_done
686 bra not $p1 #ctx_xfer_no_post_mmio
687 ld b32 $r1 D[$r0 + #chan_mmio_count]
689 bra e #ctx_xfer_no_post_mmio
692 ctx_xfer_no_post_mmio: