diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc | 696 |
1 files changed, 696 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc new file mode 100644 index 000000000000..87f99e38acbf --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc @@ -0,0 +1,696 @@ +/* fuc microcode for gf100 PGRAPH/HUB + * + * Copyright 2011 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifdef INCLUDE_DATA +hub_mmio_list_head: .b32 #hub_mmio_list_base +hub_mmio_list_tail: .b32 #hub_mmio_list_next + +gpc_count: .b32 0 +rop_count: .b32 0 +cmd_queue: queue_init + +ctx_current: .b32 0 + +.align 256 +chan_data: +chan_mmio_count: .b32 0 +chan_mmio_address: .b32 0 + +.align 256 +xfer_data: .skip 256 + +hub_mmio_list_base: +.b32 0x0417e91c // 0x17e91c, 2 +hub_mmio_list_next: +#endif + +#ifdef INCLUDE_CODE +// reports an exception to the host +// +// In: $r15 error code (see os.h) +// +error: + nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15) + mov $r15 1 + nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r15) + ret + +// HUB fuc initialisation, executed by triggering ucode start, will +// fall through to main loop after completion. +// +// Output: +// CC_SCRATCH[0]: +// 31:31: set to signal completion +// CC_SCRATCH[1]: +// 31:0: total PGRAPH context size +// +init: + clear b32 $r0 + mov $xdbase $r0 + + // setup stack + nv_iord($r1, NV_PGRAPH_FECS_CAPS, 0) + extr $r1 $r1 9:17 + shl b32 $r1 8 + mov $sp $r1 + + // enable fifo access + mov $r2 NV_PGRAPH_FECS_ACCESS_FIFO + nv_iowr(NV_PGRAPH_FECS_ACCESS, 0, $r2) + + // setup i0 handler, and route all interrupts to it + mov $r1 #ih + mov $iv0 $r1 + + clear b32 $r2 + nv_iowr(NV_PGRAPH_FECS_INTR_ROUTE, 0, $r2) + + // route HUB_CHSW_PULSE to fuc interrupt 8 + mov $r2 0x2003 // { HUB_CHSW_PULSE, ZERO } -> intr 8 + nv_iowr(NV_PGRAPH_FECS_IROUTE, 0, $r2) + + // not sure what these are, route them because NVIDIA does, and + // the IRQ handler will signal the host if we ever get one.. we + // may find out if/why we need to handle these if so.. + // + mov $r2 0x2004 // { 0x04, ZERO } -> intr 9 + nv_iowr(NV_PGRAPH_FECS_IROUTE, 1, $r2) + mov $r2 0x200b // { HUB_FIRMWARE_MTHD, ZERO } -> intr 10 + nv_iowr(NV_PGRAPH_FECS_IROUTE, 2, $r2) + mov $r2 0x200c // { 0x0c, ZERO } -> intr 15 + nv_iowr(NV_PGRAPH_FECS_IROUTE, 7, $r2) + + // enable all INTR_UP interrupts + sub b32 $r3 $r0 1 + nv_iowr(NV_PGRAPH_FECS_INTR_UP_EN, 0, $r3) + + // enable fifo, ctxsw, 9, fwmthd, 15 interrupts + imm32($r2, 0x8704) + nv_iowr(NV_PGRAPH_FECS_INTR_EN_SET, 0, $r2) + + // fifo level triggered, rest edge + mov $r2 NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL + nv_iowr(NV_PGRAPH_FECS_INTR_MODE, 0, $r2) + + // enable interrupts + bset $flags ie0 + + // fetch enabled GPC/ROP counts + nv_rd32($r14, 0x409604) + extr $r1 $r15 16:20 + st b32 D[$r0 + #rop_count] $r1 + and $r15 0x1f + st b32 D[$r0 + #gpc_count] $r15 + + // set BAR_REQMASK to GPC mask + mov $r1 1 + shl b32 $r1 $r15 + sub b32 $r1 1 + nv_iowr(NV_PGRAPH_FECS_BAR_MASK0, 0, $r1) + nv_iowr(NV_PGRAPH_FECS_BAR_MASK1, 0, $r1) + + // context size calculation, reserve first 256 bytes for use by fuc + mov $r1 256 + + // + mov $r15 2 + call(ctx_4170s) + call(ctx_4170w) + mov $r15 0x10 + call(ctx_86c) + + // calculate size of mmio context data + ld b32 $r14 D[$r0 + #hub_mmio_list_head] + ld b32 $r15 D[$r0 + #hub_mmio_list_tail] + call(mmctx_size) + + // set mmctx base addresses now so we don't have to do it later, + // they don't (currently) ever change + shr b32 $r4 $r1 8 + nv_iowr(NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE, 0, $r4) + nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE, 0, $r4) + add b32 $r3 0x1300 + add b32 $r1 $r15 + shr b32 $r15 2 + nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_COUNT, 0, $r15) // wtf?? + + // strands, base offset needs to be aligned to 256 bytes + shr b32 $r1 8 + add b32 $r1 1 + shl b32 $r1 8 + mov b32 $r15 $r1 + call(strand_ctx_init) + add b32 $r1 $r15 + + // initialise each GPC in sequence by passing in the offset of its + // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which + // has previously been uploaded by the host) running. + // + // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31 + // when it has completed, and return the size of its context data + // in GPCn_CC_SCRATCH[1] + // + ld b32 $r3 D[$r0 + #gpc_count] + imm32($r4, 0x502000) + init_gpc: + // setup, and start GPC ucode running + add b32 $r14 $r4 0x804 + mov b32 $r15 $r1 + call(nv_wr32) // CC_SCRATCH[1] = ctx offset + add b32 $r14 $r4 0x10c + clear b32 $r15 + call(nv_wr32) + add b32 $r14 $r4 0x104 + call(nv_wr32) // ENTRY + add b32 $r14 $r4 0x100 + mov $r15 2 // CTRL_START_TRIGGER + call(nv_wr32) // CTRL + + // wait for it to complete, and adjust context size + add b32 $r14 $r4 0x800 + init_gpc_wait: + call(nv_rd32) + xbit $r15 $r15 31 + bra e #init_gpc_wait + add b32 $r14 $r4 0x804 + call(nv_rd32) + add b32 $r1 $r15 + + // next! + add b32 $r4 0x8000 + sub b32 $r3 1 + bra ne #init_gpc + + // + mov $r15 0 + call(ctx_86c) + mov $r15 0 + call(ctx_4170s) + + // save context size, and tell host we're ready + nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1) + clear b32 $r1 + bset $r1 31 + nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(0), 0, $r1) + +// Main program loop, very simple, sleeps until woken up by the interrupt +// handler, pulls a command from the queue and executes its handler +// +main: + // sleep until we have something to do + bset $flags $p0 + sleep $p0 + mov $r13 #cmd_queue + call(queue_get) + bra $p1 #main + + // context switch, requested by GPU? + cmpu b32 $r14 0x4001 + bra ne #main_not_ctx_switch + trace_set(T_AUTO) + nv_iord($r1, NV_PGRAPH_FECS_CHAN_ADDR, 0) + nv_iord($r2, NV_PGRAPH_FECS_CHAN_NEXT, 0) + + xbit $r3 $r1 31 + bra e #chsw_no_prev + xbit $r3 $r2 31 + bra e #chsw_prev_no_next + push $r2 + mov b32 $r2 $r1 + trace_set(T_SAVE) + bclr $flags $p1 + bset $flags $p2 + call(ctx_xfer) + trace_clr(T_SAVE); + pop $r2 + trace_set(T_LOAD); + bset $flags $p1 + call(ctx_xfer) + trace_clr(T_LOAD); + bra #chsw_done + chsw_prev_no_next: + push $r2 + mov b32 $r2 $r1 + bclr $flags $p1 + bclr $flags $p2 + call(ctx_xfer) + pop $r2 + nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2) + bra #chsw_done + chsw_no_prev: + xbit $r3 $r2 31 + bra e #chsw_done + bset $flags $p1 + bclr $flags $p2 + call(ctx_xfer) + + // ack the context switch request + chsw_done: + mov $r2 NV_PGRAPH_FECS_CHSW_ACK + nv_iowr(NV_PGRAPH_FECS_CHSW, 0, $r2) + trace_clr(T_AUTO) + bra #main + + // request to set current channel? (*not* a context switch) + main_not_ctx_switch: + cmpu b32 $r14 0x0001 + bra ne #main_not_ctx_chan + mov b32 $r2 $r15 + call(ctx_chan) + bra #main_done + + // request to store current channel context? + main_not_ctx_chan: + cmpu b32 $r14 0x0002 + bra ne #main_not_ctx_save + trace_set(T_SAVE) + bclr $flags $p1 + bclr $flags $p2 + call(ctx_xfer) + trace_clr(T_SAVE) + bra #main_done + + main_not_ctx_save: + shl b32 $r15 $r14 16 + or $r15 E_BAD_COMMAND + call(error) + bra #main + + main_done: + clear b32 $r2 + bset $r2 31 + nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(0), 0, $r2) + bra #main + +// interrupt handler +ih: + push $r8 + mov $r8 $flags + push $r8 + push $r9 + push $r10 + push $r11 + push $r13 + push $r14 + push $r15 + clear b32 $r0 + + // incoming fifo command? + nv_iord($r10, NV_PGRAPH_FECS_INTR, 0) + and $r11 $r10 NV_PGRAPH_FECS_INTR_FIFO + bra e #ih_no_fifo + // queue incoming fifo command for later processing + mov $r13 #cmd_queue + nv_iord($r14, NV_PGRAPH_FECS_FIFO_CMD, 0) + nv_iord($r15, NV_PGRAPH_FECS_FIFO_DATA, 0) + call(queue_put) + add b32 $r11 0x400 + mov $r14 1 + nv_iowr(NV_PGRAPH_FECS_FIFO_ACK, 0, $r14) + + // context switch request? + ih_no_fifo: + and $r11 $r10 NV_PGRAPH_FECS_INTR_CHSW + bra e #ih_no_ctxsw + // enqueue a context switch for later processing + mov $r13 #cmd_queue + mov $r14 0x4001 + call(queue_put) + + // firmware method? + ih_no_ctxsw: + and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD + bra e #ih_no_fwmthd + // none we handle; report to host and ack + nv_rd32($r15, NV_PGRAPH_TRAPPED_DATA_LO) + nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(4), 0, $r15) + nv_rd32($r15, NV_PGRAPH_TRAPPED_ADDR) + nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(3), 0, $r15) + extr $r14 $r15 16:18 + shl b32 $r14 $r14 2 + imm32($r15, NV_PGRAPH_FE_OBJECT_TABLE(0)) + add b32 $r14 $r15 + call(nv_rd32) + nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(2), 0, $r15) + mov $r15 E_BAD_FWMTHD + call(error) + mov $r11 0x100 + nv_wr32(0x400144, $r11) + + // anything we didn't handle, bring it to the host's attention + ih_no_fwmthd: + mov $r11 0x504 // FIFO | CHSW | FWMTHD + not b32 $r11 + and $r11 $r10 $r11 + bra e #ih_no_other + nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r11) + + // ack, and wake up main() + ih_no_other: + nv_iowr(NV_PGRAPH_FECS_INTR_ACK, 0, $r10) + + pop $r15 + pop $r14 + pop $r13 + pop $r11 + pop $r10 + pop $r9 + pop $r8 + mov $flags $r8 + pop $r8 + bclr $flags $p0 + iret + +#if CHIPSET < GK100 +// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done +ctx_4160s: + mov $r15 1 + nv_wr32(0x404160, $r15) + ctx_4160s_wait: + nv_rd32($r15, 0x404160) + xbit $r15 $r15 4 + bra e #ctx_4160s_wait + ret + +// Without clearing again at end of xfer, some things cause PGRAPH +// to hang with STATUS=0x00000007 until it's cleared.. fbcon can +// still function with it set however... +ctx_4160c: + clear b32 $r15 + nv_wr32(0x404160, $r15) + ret +#endif + +// Again, not real sure +// +// In: $r15 value to set 0x404170 to +// +ctx_4170s: + or $r15 0x10 + nv_wr32(0x404170, $r15) + ret + +// Waits for a ctx_4170s() call to complete +// +ctx_4170w: + nv_rd32($r15, 0x404170) + and $r15 0x10 + bra ne #ctx_4170w + ret + +// Disables various things, waits a bit, and re-enables them.. +// +// Not sure how exactly this helps, perhaps "ENABLE" is not such a +// good description for the bits we turn off? Anyways, without this, +// funny things happen. +// +ctx_redswitch: + mov $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC + or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP + or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC + or $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN + nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14) + mov $r15 8 + ctx_redswitch_delay: + sub b32 $r15 1 + bra ne #ctx_redswitch_delay + or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP + or $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN + nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14) + ret + +// Not a clue what this is for, except that unless the value is 0x10, the +// strand context is saved (and presumably restored) incorrectly.. +// +// In: $r15 value to set to (0x00/0x10 are used) +// +ctx_86c: + nv_iowr(NV_PGRAPH_FECS_UNK86C, 0, $r15) + nv_wr32(0x408a14, $r15) + nv_wr32(NV_PGRAPH_GPCX_GPCCS_UNK86C, $r15) + ret + +// In: $r15 NV_PGRAPH_FECS_MEM_CMD_* +ctx_mem: + nv_iowr(NV_PGRAPH_FECS_MEM_CMD, 0, $r15) + ctx_mem_wait: + nv_iord($r15, NV_PGRAPH_FECS_MEM_CMD, 0) + or $r15 $r15 + bra ne #ctx_mem_wait + ret + +// ctx_load - load's a channel's ctxctl data, and selects its vm +// +// In: $r2 channel address +// +ctx_load: + trace_set(T_CHAN) + + // switch to channel, somewhat magic in parts.. + mov $r10 12 // DONE_UNK12 + call(wait_donez) + clear b32 $r15 + nv_iowr(0x409a24, 0, $r15) + nv_iowr(NV_PGRAPH_FECS_CHAN_NEXT, 0, $r2) + nv_iowr(NV_PGRAPH_FECS_MEM_CHAN, 0, $r2) + mov $r15 NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN + call(ctx_mem) + nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2) + + // load channel header, fetch PGRAPH context pointer + mov $xtargets $r0 + bclr $r2 31 + shl b32 $r2 4 + add b32 $r2 2 + + trace_set(T_LCHAN) + nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r2) + imm32($r2, NV_PGRAPH_FECS_MEM_TARGET_UNK31) + or $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM + nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2) + mov $r1 0x10 // chan + 0x0210 + mov $r2 #xfer_data + sethi $r2 0x00020000 // 16 bytes + xdld $r1 $r2 + xdwait + trace_clr(T_LCHAN) + + // update current context + ld b32 $r1 D[$r0 + #xfer_data + 4] + shl b32 $r1 24 + ld b32 $r2 D[$r0 + #xfer_data + 0] + shr b32 $r2 8 + or $r1 $r2 + st b32 D[$r0 + #ctx_current] $r1 + + // set transfer base to start of context, and fetch context header + trace_set(T_LCTXH) + nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r1) + mov $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VM + nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2) + mov $r1 #chan_data + sethi $r1 0x00060000 // 256 bytes + xdld $r0 $r1 + xdwait + trace_clr(T_LCTXH) + + trace_clr(T_CHAN) + ret + +// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as +// the active channel for ctxctl, but not actually transfer +// any context data. intended for use only during initial +// context construction. +// +// In: $r2 channel address +// +ctx_chan: +#if CHIPSET < GK100 + call(ctx_4160s) +#endif + call(ctx_load) + mov $r10 12 // DONE_UNK12 + call(wait_donez) + mov $r15 5 // MEM_CMD 5 ??? + call(ctx_mem) +#if CHIPSET < GK100 + call(ctx_4160c) +#endif + ret + +// Execute per-context state overrides list +// +// Only executed on the first load of a channel. Might want to look into +// removing this and having the host directly modify the channel's context +// to change this state... The nouveau DRM already builds this list as +// it's definitely needed for NVIDIA's, so we may as well use it for now +// +// Input: $r1 mmio list length +// +ctx_mmio_exec: + // set transfer base to be the mmio list + ld b32 $r3 D[$r0 + #chan_mmio_address] + nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3) + + clear b32 $r3 + ctx_mmio_loop: + // fetch next 256 bytes of mmio list if necessary + and $r4 $r3 0xff + bra ne #ctx_mmio_pull + mov $r5 #xfer_data + sethi $r5 0x00060000 // 256 bytes + xdld $r3 $r5 + xdwait + + // execute a single list entry + ctx_mmio_pull: + ld b32 $r14 D[$r4 + #xfer_data + 0x00] + ld b32 $r15 D[$r4 + #xfer_data + 0x04] + call(nv_wr32) + + // next! + add b32 $r3 8 + sub b32 $r1 1 + bra ne #ctx_mmio_loop + + // set transfer base back to the current context + ctx_mmio_done: + ld b32 $r3 D[$r0 + #ctx_current] + nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3) + + // disable the mmio list now, we don't need/want to execute it again + st b32 D[$r0 + #chan_mmio_count] $r0 + mov $r1 #chan_data + sethi $r1 0x00060000 // 256 bytes + xdst $r0 $r1 + xdwait + ret + +// Transfer HUB context data between GPU and storage area +// +// In: $r2 channel address +// $p1 clear on save, set on load +// $p2 set if opposite direction done/will be done, so: +// on save it means: "a load will follow this save" +// on load it means: "a save preceeded this load" +// +ctx_xfer: + // according to mwk, some kind of wait for idle + mov $r14 4 + nv_iowr(0x409c08, 0, $r14) + ctx_xfer_idle: + nv_iord($r14, 0x409c00, 0) + and $r14 0x2000 + bra ne #ctx_xfer_idle + + bra not $p1 #ctx_xfer_pre + bra $p2 #ctx_xfer_pre_load + ctx_xfer_pre: + mov $r15 0x10 + call(ctx_86c) +#if CHIPSET < GK100 + call(ctx_4160s) +#endif + bra not $p1 #ctx_xfer_exec + + ctx_xfer_pre_load: + mov $r15 2 + call(ctx_4170s) + call(ctx_4170w) + call(ctx_redswitch) + clear b32 $r15 + call(ctx_4170s) + call(ctx_load) + + // fetch context pointer, and initiate xfer on all GPCs + ctx_xfer_exec: + ld b32 $r1 D[$r0 + #ctx_current] + + clear b32 $r2 + nv_iowr(NV_PGRAPH_FECS_BAR, 0, $r2) + + nv_wr32(0x41a500, $r1) // GPC_BCAST_WRCMD_DATA = ctx pointer + xbit $r15 $flags $p1 + xbit $r2 $flags $p2 + shl b32 $r2 1 + or $r15 $r2 + nv_wr32(0x41a504, $r15) // GPC_BCAST_WRCMD_CMD = GPC_XFER(type) + + // strands + call(strand_pre) + clear b32 $r2 + nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r2) + xbit $r2 $flags $p1 // SAVE/LOAD + add b32 $r2 NV_PGRAPH_FECS_STRAND_CMD_SAVE + nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r2) + + // mmio context + xbit $r10 $flags $p1 // direction + or $r10 6 // first, last + mov $r11 0 // base = 0 + ld b32 $r12 D[$r0 + #hub_mmio_list_head] + ld b32 $r13 D[$r0 + #hub_mmio_list_tail] + mov $r14 0 // not multi + call(mmctx_xfer) + + // wait for GPCs to all complete + mov $r10 8 // DONE_BAR + call(wait_doneo) + + // wait for strand xfer to complete + call(strand_wait) + + // post-op + bra $p1 #ctx_xfer_post + mov $r10 12 // DONE_UNK12 + call(wait_donez) + mov $r15 5 // MEM_CMD 5 ??? + call(ctx_mem) + + bra $p2 #ctx_xfer_done + ctx_xfer_post: + mov $r15 2 + call(ctx_4170s) + clear b32 $r15 + call(ctx_86c) + call(strand_post) + call(ctx_4170w) + clear b32 $r15 + call(ctx_4170s) + + bra not $p1 #ctx_xfer_no_post_mmio + ld b32 $r1 D[$r0 + #chan_mmio_count] + or $r1 $r1 + bra e #ctx_xfer_no_post_mmio + call(ctx_mmio_exec) + + ctx_xfer_no_post_mmio: +#if CHIPSET < GK100 + call(ctx_4160c) +#endif + + ctx_xfer_done: + ret +#endif |