X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fnvfx%2Fnvfx_fragprog.c;h=dbd7c77346512d4ae00afa0d1308c1a1ce3f59ab;hb=358795bc0bacdba9a36bb010ef18ee1b2d086f2d;hp=6772d9bd5160a28ded16642feb4ed0eb8c8cfc2f;hpb=ff20543c815a14fddad1efaaef1610454a1924d4;p=mesa.git diff --git a/src/gallium/drivers/nvfx/nvfx_fragprog.c b/src/gallium/drivers/nvfx/nvfx_fragprog.c index 6772d9bd516..dbd7c773465 100644 --- a/src/gallium/drivers/nvfx/nvfx_fragprog.c +++ b/src/gallium/drivers/nvfx/nvfx_fragprog.c @@ -1,87 +1,81 @@ +#include #include "pipe/p_context.h" #include "pipe/p_defines.h" #include "pipe/p_state.h" #include "util/u_inlines.h" +#include "util/u_debug.h" #include "pipe/p_shader_tokens.h" #include "tgsi/tgsi_parse.h" #include "tgsi/tgsi_util.h" +#include "tgsi/tgsi_dump.h" +#include "tgsi/tgsi_ureg.h" #include "nvfx_context.h" #include "nvfx_shader.h" +#include "nvfx_resource.h" -#define MAX_CONSTS 128 -#define MAX_IMM 32 struct nvfx_fpc { + struct nvfx_pipe_fragment_program* pfp; struct nvfx_fragment_program *fp; - uint attrib_map[PIPE_MAX_SHADER_INPUTS]; - - unsigned r_temps; - unsigned r_temps_discard; - struct nvfx_sreg r_result[PIPE_MAX_SHADER_OUTPUTS]; - struct nvfx_sreg *r_temp; + unsigned max_temps; + unsigned long long r_temps; + unsigned long long r_temps_discard; + struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS]; + struct nvfx_reg *r_temp; + unsigned sprite_coord_temp; int num_regs; unsigned inst_offset; unsigned have_const; - struct { - int pipe; - float vals[4]; - } consts[MAX_CONSTS]; - int nr_consts; + struct util_dynarray imm_data; - struct nvfx_sreg imm[MAX_IMM]; + struct nvfx_reg* r_imm; unsigned nr_imm; + + unsigned char generic_to_slot[256]; /* semantic idx for each input semantic */ + + struct util_dynarray if_stack; + //struct util_dynarray loop_stack; + struct util_dynarray label_relocs; }; -static INLINE struct nvfx_sreg +static INLINE struct nvfx_reg temp(struct nvfx_fpc *fpc) { - int idx = ffs(~fpc->r_temps) - 1; + int idx = __builtin_ctzll(~fpc->r_temps); - if (idx < 0) { + if (idx >= fpc->max_temps) { NOUVEAU_ERR("out of temps!!\n"); assert(0); - return nvfx_sr(NVFXSR_TEMP, 0); + return nvfx_reg(NVFXSR_TEMP, 0); } - fpc->r_temps |= (1 << idx); - fpc->r_temps_discard |= (1 << idx); - return nvfx_sr(NVFXSR_TEMP, idx); + fpc->r_temps |= (1ULL << idx); + fpc->r_temps_discard |= (1ULL << idx); + return nvfx_reg(NVFXSR_TEMP, idx); } static INLINE void release_temps(struct nvfx_fpc *fpc) { fpc->r_temps &= ~fpc->r_temps_discard; - fpc->r_temps_discard = 0; + fpc->r_temps_discard = 0ULL; } -static INLINE struct nvfx_sreg -constant(struct nvfx_fpc *fpc, int pipe, float vals[4]) +static inline struct nvfx_reg +nvfx_fp_imm(struct nvfx_fpc *fpc, float a, float b, float c, float d) { - int idx; - - if (fpc->nr_consts == MAX_CONSTS) - assert(0); - idx = fpc->nr_consts++; + float v[4] = {a, b, c, d}; + int idx = fpc->imm_data.size >> 4; - fpc->consts[idx].pipe = pipe; - if (pipe == -1) - memcpy(fpc->consts[idx].vals, vals, 4 * sizeof(float)); - return nvfx_sr(NVFXSR_CONST, idx); + memcpy(util_dynarray_grow(&fpc->imm_data, sizeof(float) * 4), v, 4 * sizeof(float)); + return nvfx_reg(NVFXSR_IMM, idx); } -#define arith(cc,s,o,d,m,s0,s1,s2) \ - nvfx_fp_arith((cc), (s), NVFX_FP_OP_OPCODE_##o, \ - (d), (m), (s0), (s1), (s2)) -#define tex(cc,s,o,u,d,m,s0,s1,s2) \ - nvfx_fp_tex((cc), (s), NVFX_FP_OP_OPCODE_##o, (u), \ - (d), (m), (s0), none, none) - static void grow_insns(struct nvfx_fpc *fpc, int size) { @@ -92,44 +86,59 @@ grow_insns(struct nvfx_fpc *fpc, int size) } static void -emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_sreg src) +emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_src src) { struct nvfx_fragment_program *fp = fpc->fp; uint32_t *hw = &fp->insn[fpc->inst_offset]; uint32_t sr = 0; - switch (src.type) { + switch (src.reg.type) { case NVFXSR_INPUT: sr |= (NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT); - hw[0] |= (src.index << NVFX_FP_OP_INPUT_SRC_SHIFT); + hw[0] |= (src.reg.index << NVFX_FP_OP_INPUT_SRC_SHIFT); break; case NVFXSR_OUTPUT: sr |= NVFX_FP_REG_SRC_HALF; /* fall-through */ case NVFXSR_TEMP: sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT); - sr |= (src.index << NVFX_FP_REG_SRC_SHIFT); + sr |= (src.reg.index << NVFX_FP_REG_SRC_SHIFT); + break; + case NVFXSR_RELOCATED: + sr |= (NVFX_FP_REG_TYPE_TEMP << NVFX_FP_REG_TYPE_SHIFT); + sr |= (fpc->sprite_coord_temp << NVFX_FP_REG_SRC_SHIFT); + //printf("adding relocation at %x for %x\n", fpc->inst_offset, src.index); + util_dynarray_append(&fpc->fp->slot_relocations[src.reg.index], unsigned, fpc->inst_offset + pos + 1); + break; + case NVFXSR_IMM: + if (!fpc->have_const) { + grow_insns(fpc, 4); + hw = &fp->insn[fpc->inst_offset]; + fpc->have_const = 1; + } + + memcpy(&fp->insn[fpc->inst_offset + 4], + (float*)fpc->imm_data.data + src.reg.index * 4, + sizeof(uint32_t) * 4); + + sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT); break; case NVFXSR_CONST: if (!fpc->have_const) { grow_insns(fpc, 4); + hw = &fp->insn[fpc->inst_offset]; fpc->have_const = 1; } - hw = &fp->insn[fpc->inst_offset]; - if (fpc->consts[src.index].pipe >= 0) { + { struct nvfx_fragment_program_data *fpd; fp->consts = realloc(fp->consts, ++fp->nr_consts * sizeof(*fpd)); fpd = &fp->consts[fp->nr_consts - 1]; fpd->offset = fpc->inst_offset + 4; - fpd->index = fpc->consts[src.index].pipe; + fpd->index = src.reg.index; memset(&fp->insn[fpd->offset], 0, sizeof(uint32_t) * 4); - } else { - memcpy(&fp->insn[fpc->inst_offset + 4], - fpc->consts[src.index].vals, - sizeof(uint32_t) * 4); } sr |= (NVFX_FP_REG_TYPE_CONST << NVFX_FP_REG_TYPE_SHIFT); @@ -156,7 +165,7 @@ emit_src(struct nvfx_fpc *fpc, int pos, struct nvfx_sreg src) } static void -emit_dst(struct nvfx_fpc *fpc, struct nvfx_sreg dst) +emit_dst(struct nvfx_fpc *fpc, struct nvfx_reg dst) { struct nvfx_fragment_program *fp = fpc->fp; uint32_t *hw = &fp->insn[fpc->inst_offset]; @@ -184,9 +193,7 @@ emit_dst(struct nvfx_fpc *fpc, struct nvfx_sreg dst) } static void -nvfx_fp_arith(struct nvfx_fpc *fpc, int sat, int op, - struct nvfx_sreg dst, int mask, - struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2) +nvfx_fp_emit(struct nvfx_fpc *fpc, struct nvfx_insn insn) { struct nvfx_fragment_program *fp = fpc->fp; uint32_t *hw; @@ -197,68 +204,225 @@ nvfx_fp_arith(struct nvfx_fpc *fpc, int sat, int op, hw = &fp->insn[fpc->inst_offset]; memset(hw, 0, sizeof(uint32_t) * 4); - if (op == NVFX_FP_OP_OPCODE_KIL) - fp->fp_control |= NV34TCL_FP_CONTROL_USES_KIL; - hw[0] |= (op << NVFX_FP_OP_OPCODE_SHIFT); - hw[0] |= (mask << NVFX_FP_OP_OUTMASK_SHIFT); - hw[2] |= (dst.dst_scale << NVFX_FP_OP_DST_SCALE_SHIFT); + if (insn.op == NVFX_FP_OP_OPCODE_KIL) + fp->fp_control |= NV30_3D_FP_CONTROL_USES_KIL; + hw[0] |= (insn.op << NVFX_FP_OP_OPCODE_SHIFT); + hw[0] |= (insn.mask << NVFX_FP_OP_OUTMASK_SHIFT); + hw[2] |= (insn.scale << NVFX_FP_OP_DST_SCALE_SHIFT); - if (sat) + if (insn.sat) hw[0] |= NVFX_FP_OP_OUT_SAT; - if (dst.cc_update) + if (insn.cc_update) hw[0] |= NVFX_FP_OP_COND_WRITE_ENABLE; - hw[1] |= (dst.cc_test << NVFX_FP_OP_COND_SHIFT); - hw[1] |= ((dst.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) | - (dst.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) | - (dst.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) | - (dst.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT)); - - emit_dst(fpc, dst); - emit_src(fpc, 0, s0); - emit_src(fpc, 1, s1); - emit_src(fpc, 2, s2); + hw[1] |= (insn.cc_test << NVFX_FP_OP_COND_SHIFT); + hw[1] |= ((insn.cc_swz[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT) | + (insn.cc_swz[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT) | + (insn.cc_swz[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT) | + (insn.cc_swz[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT)); + + if(insn.unit >= 0) + { + hw[0] |= (insn.unit << NVFX_FP_OP_TEX_UNIT_SHIFT); + fp->samplers |= (1 << insn.unit); + } + + emit_dst(fpc, insn.dst); + emit_src(fpc, 0, insn.src[0]); + emit_src(fpc, 1, insn.src[1]); + emit_src(fpc, 2, insn.src[2]); } +#define arith(s,o,d,m,s0,s1,s2) \ + nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, -1, \ + (d), (m), (s0), (s1), (s2)) + +#define tex(s,o,u,d,m,s0,s1,s2) \ + nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, (u), \ + (d), (m), (s0), none, none) + +/* IF src.x != 0, as TGSI specifies */ static void -nvfx_fp_tex(struct nvfx_fpc *fpc, int sat, int op, int unit, - struct nvfx_sreg dst, int mask, - struct nvfx_sreg s0, struct nvfx_sreg s1, struct nvfx_sreg s2) +nv40_fp_if(struct nvfx_fpc *fpc, struct nvfx_src src) { - struct nvfx_fragment_program *fp = fpc->fp; + const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0)); + struct nvfx_insn insn = arith(0, MOV, none.reg, NVFX_FP_MASK_X, src, none, none); + uint32_t *hw; + insn.cc_update = 1; + nvfx_fp_emit(fpc, insn); - nvfx_fp_arith(fpc, sat, op, dst, mask, s0, s1, s2); + fpc->inst_offset = fpc->fp->insn_len; + grow_insns(fpc, 4); + hw = &fpc->fp->insn[fpc->inst_offset]; + /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */ + hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) | + NV40_FP_OP_OUT_NONE | + (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT); + /* Use .xxxx swizzle so that we check only src[0].x*/ + hw[1] = (0 << NVFX_FP_OP_COND_SWZ_X_SHIFT) | + (0 << NVFX_FP_OP_COND_SWZ_Y_SHIFT) | + (0 << NVFX_FP_OP_COND_SWZ_Z_SHIFT) | + (0 << NVFX_FP_OP_COND_SWZ_W_SHIFT) | + (NVFX_FP_OP_COND_NE << NVFX_FP_OP_COND_SHIFT); + hw[2] = 0; /* | NV40_FP_OP_OPCODE_IS_BRANCH | else_offset */ + hw[3] = 0; /* | endif_offset */ + util_dynarray_append(&fpc->if_stack, unsigned, fpc->inst_offset); +} - fp->insn[fpc->inst_offset] |= (unit << NVFX_FP_OP_TEX_UNIT_SHIFT); - fp->samplers |= (1 << unit); +/* IF src.x != 0, as TGSI specifies */ +static void +nv40_fp_cal(struct nvfx_fpc *fpc, unsigned target) +{ + struct nvfx_relocation reloc; + uint32_t *hw; + fpc->inst_offset = fpc->fp->insn_len; + grow_insns(fpc, 4); + hw = &fpc->fp->insn[fpc->inst_offset]; + /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */ + hw[0] = (NV40_FP_OP_BRA_OPCODE_CAL << NVFX_FP_OP_OPCODE_SHIFT); + /* Use .xxxx swizzle so that we check only src[0].x*/ + hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) | + (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT); + hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */ + hw[3] = 0; + reloc.target = target; + reloc.location = fpc->inst_offset + 2; + util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc); } -static INLINE struct nvfx_sreg +static void +nv40_fp_ret(struct nvfx_fpc *fpc) +{ + uint32_t *hw; + fpc->inst_offset = fpc->fp->insn_len; + grow_insns(fpc, 4); + hw = &fpc->fp->insn[fpc->inst_offset]; + /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */ + hw[0] = (NV40_FP_OP_BRA_OPCODE_RET << NVFX_FP_OP_OPCODE_SHIFT); + /* Use .xxxx swizzle so that we check only src[0].x*/ + hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) | + (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT); + hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | call_offset */ + hw[3] = 0; +} + +static void +nv40_fp_rep(struct nvfx_fpc *fpc, unsigned count, unsigned target) +{ + struct nvfx_relocation reloc; + uint32_t *hw; + fpc->inst_offset = fpc->fp->insn_len; + grow_insns(fpc, 4); + hw = &fpc->fp->insn[fpc->inst_offset]; + /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */ + hw[0] = (NV40_FP_OP_BRA_OPCODE_REP << NVFX_FP_OP_OPCODE_SHIFT) | + NV40_FP_OP_OUT_NONE | + (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT); + /* Use .xxxx swizzle so that we check only src[0].x*/ + hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_ALL_SHIFT) | + (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT); + hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | + (count << NV40_FP_OP_REP_COUNT1_SHIFT) | + (count << NV40_FP_OP_REP_COUNT2_SHIFT) | + (count << NV40_FP_OP_REP_COUNT3_SHIFT); + hw[3] = 0; /* | end_offset */ + reloc.target = target; + reloc.location = fpc->inst_offset + 3; + util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc); + //util_dynarray_append(&fpc->loop_stack, unsigned, target); +} + +/* warning: this only works forward, and probably only if not inside any IF */ +static void +nv40_fp_bra(struct nvfx_fpc *fpc, unsigned target) +{ + struct nvfx_relocation reloc; + uint32_t *hw; + fpc->inst_offset = fpc->fp->insn_len; + grow_insns(fpc, 4); + hw = &fpc->fp->insn[fpc->inst_offset]; + /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */ + hw[0] = (NV40_FP_OP_BRA_OPCODE_IF << NVFX_FP_OP_OPCODE_SHIFT) | + NV40_FP_OP_OUT_NONE | + (NVFX_FP_PRECISION_FP16 << NVFX_FP_OP_PRECISION_SHIFT); + /* Use .xxxx swizzle so that we check only src[0].x*/ + hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) | + (NVFX_FP_OP_COND_FL << NVFX_FP_OP_COND_SHIFT); + hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; /* | else_offset */ + hw[3] = 0; /* | endif_offset */ + reloc.target = target; + reloc.location = fpc->inst_offset + 2; + util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc); + reloc.target = target; + reloc.location = fpc->inst_offset + 3; + util_dynarray_append(&fpc->label_relocs, struct nvfx_relocation, reloc); +} + +static void +nv40_fp_brk(struct nvfx_fpc *fpc) +{ + uint32_t *hw; + fpc->inst_offset = fpc->fp->insn_len; + grow_insns(fpc, 4); + hw = &fpc->fp->insn[fpc->inst_offset]; + /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */ + hw[0] = (NV40_FP_OP_BRA_OPCODE_BRK << NVFX_FP_OP_OPCODE_SHIFT) | + NV40_FP_OP_OUT_NONE; + /* Use .xxxx swizzle so that we check only src[0].x*/ + hw[1] = (NVFX_SWZ_IDENTITY << NVFX_FP_OP_COND_SWZ_X_SHIFT) | + (NVFX_FP_OP_COND_TR << NVFX_FP_OP_COND_SHIFT); + hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH; + hw[3] = 0; +} + +static INLINE struct nvfx_src tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc) { - struct nvfx_sreg src = { 0 }; + struct nvfx_src src; switch (fsrc->Register.File) { case TGSI_FILE_INPUT: - src = nvfx_sr(NVFXSR_INPUT, - fpc->attrib_map[fsrc->Register.Index]); + if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_POSITION) { + assert(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 0); + src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_POSITION); + } else if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_COLOR) { + if(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 0) + src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_COL0); + else if(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 1) + src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_COL1); + else + assert(0); + } else if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FOG) { + assert(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 0); + src.reg = nvfx_reg(NVFXSR_INPUT, NVFX_FP_OP_INPUT_SRC_FOGC); + } else if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FACE) { + /* TODO: check this has the correct values */ + /* XXX: what do we do for nv30 here (assuming it lacks facing)?! */ + assert(fpc->pfp->info.input_semantic_index[fsrc->Register.Index] == 0); + src.reg = nvfx_reg(NVFXSR_INPUT, NV40_FP_OP_INPUT_SRC_FACING); + } else { + assert(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_GENERIC); + src.reg = nvfx_reg(NVFXSR_RELOCATED, fpc->generic_to_slot[fpc->pfp->info.input_semantic_index[fsrc->Register.Index]]); + } break; case TGSI_FILE_CONSTANT: - src = constant(fpc, fsrc->Register.Index, NULL); + src.reg = nvfx_reg(NVFXSR_CONST, fsrc->Register.Index); break; case TGSI_FILE_IMMEDIATE: assert(fsrc->Register.Index < fpc->nr_imm); - src = fpc->imm[fsrc->Register.Index]; + src.reg = fpc->r_imm[fsrc->Register.Index]; break; case TGSI_FILE_TEMPORARY: - src = fpc->r_temp[fsrc->Register.Index]; + src.reg = fpc->r_temp[fsrc->Register.Index]; break; /* NV40 fragprog result regs are just temps, so this is simple */ case TGSI_FILE_OUTPUT: - src = fpc->r_result[fsrc->Register.Index]; + src.reg = fpc->r_result[fsrc->Register.Index]; break; default: NOUVEAU_ERR("bad src file\n"); + src.reg.index = 0; + src.reg.type = 0; break; } @@ -268,10 +432,13 @@ tgsi_src(struct nvfx_fpc *fpc, const struct tgsi_full_src_register *fsrc) src.swz[1] = fsrc->Register.SwizzleY; src.swz[2] = fsrc->Register.SwizzleZ; src.swz[3] = fsrc->Register.SwizzleW; + src.indirect = 0; + src.indirect_reg = 0; + src.indirect_swz = 0; return src; } -static INLINE struct nvfx_sreg +static INLINE struct nvfx_reg tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) { switch (fdst->Register.File) { case TGSI_FILE_OUTPUT: @@ -279,10 +446,10 @@ tgsi_dst(struct nvfx_fpc *fpc, const struct tgsi_full_dst_register *fdst) { case TGSI_FILE_TEMPORARY: return fpc->r_temp[fdst->Register.Index]; case TGSI_FILE_NULL: - return nvfx_sr(NVFXSR_NONE, 0); + return nvfx_reg(NVFXSR_NONE, 0); default: NOUVEAU_ERR("bad dst file %d\n", fdst->Register.File); - return nvfx_sr(NVFXSR_NONE, 0); + return nvfx_reg(NVFXSR_NONE, 0); } } @@ -302,8 +469,10 @@ static boolean nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc, const struct tgsi_full_instruction *finst) { - const struct nvfx_sreg none = nvfx_sr(NVFXSR_NONE, 0); - struct nvfx_sreg src[3], dst, tmp; + const struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0)); + struct nvfx_insn insn; + struct nvfx_src src[3], tmp; + struct nvfx_reg dst; int mask, sat, unit = 0; int ai = -1, ci = -1, ii = -1; int i; @@ -327,13 +496,26 @@ nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc, switch (fsrc->Register.File) { case TGSI_FILE_INPUT: - if (ai == -1 || ai == fsrc->Register.Index) { + if(fpc->pfp->info.input_semantic_name[fsrc->Register.Index] == TGSI_SEMANTIC_FOG && (0 + || fsrc->Register.SwizzleX == PIPE_SWIZZLE_ALPHA + || fsrc->Register.SwizzleY == PIPE_SWIZZLE_ALPHA + || fsrc->Register.SwizzleZ == PIPE_SWIZZLE_ALPHA + || fsrc->Register.SwizzleW == PIPE_SWIZZLE_ALPHA + )) { + /* hardware puts 0 in fogcoord.w, but GL/Gallium want 1 there */ + struct nvfx_src addend = nvfx_src(nvfx_fp_imm(fpc, 0, 0, 0, 1)); + addend.swz[0] = fsrc->Register.SwizzleX; + addend.swz[1] = fsrc->Register.SwizzleY; + addend.swz[2] = fsrc->Register.SwizzleZ; + addend.swz[3] = fsrc->Register.SwizzleW; + src[i] = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, ADD, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), addend, none)); + } else if (ai == -1 || ai == fsrc->Register.Index) { ai = fsrc->Register.Index; src[i] = tgsi_src(fpc, fsrc); } else { - src[i] = temp(fpc); - arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL, - tgsi_src(fpc, fsrc), none, none); + src[i] = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none)); } break; case TGSI_FILE_CONSTANT: @@ -342,9 +524,8 @@ nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc, ci = fsrc->Register.Index; src[i] = tgsi_src(fpc, fsrc); } else { - src[i] = temp(fpc); - arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL, - tgsi_src(fpc, fsrc), none, none); + src[i] = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none)); } break; case TGSI_FILE_IMMEDIATE: @@ -353,9 +534,8 @@ nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc, ii = fsrc->Register.Index; src[i] = tgsi_src(fpc, fsrc); } else { - src[i] = temp(fpc); - arith(fpc, 0, MOV, src[i], NVFX_FP_MASK_ALL, - tgsi_src(fpc, fsrc), none, none); + src[i] = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, MOV, src[i].reg, NVFX_FP_MASK_ALL, tgsi_src(fpc, fsrc), none, none)); } break; case TGSI_FILE_TEMPORARY: @@ -378,277 +558,377 @@ nvfx_fragprog_parse_instruction(struct nvfx_context* nvfx, struct nvfx_fpc *fpc, switch (finst->Instruction.Opcode) { case TGSI_OPCODE_ABS: - arith(fpc, sat, MOV, dst, mask, abs(src[0]), none, none); + nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, abs(src[0]), none, none)); break; case TGSI_OPCODE_ADD: - arith(fpc, sat, ADD, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_CMP: - tmp = nvfx_sr(NVFXSR_NONE, 0); - tmp.cc_update = 1; - arith(fpc, 0, MOV, tmp, 0xf, src[0], none, none); - dst.cc_test = NVFX_COND_GE; - arith(fpc, sat, MOV, dst, mask, src[2], none, none); - dst.cc_test = NVFX_COND_LT; - arith(fpc, sat, MOV, dst, mask, src[1], none, none); + insn = arith(0, MOV, none.reg, mask, src[0], none, none); + insn.cc_update = 1; + nvfx_fp_emit(fpc, insn); + + insn = arith(sat, MOV, dst, mask, src[2], none, none); + insn.cc_test = NVFX_COND_GE; + nvfx_fp_emit(fpc, insn); + + insn = arith(sat, MOV, dst, mask, src[1], none, none); + insn.cc_test = NVFX_COND_LT; + nvfx_fp_emit(fpc, insn); break; case TGSI_OPCODE_COS: - arith(fpc, sat, COS, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, COS, dst, mask, src[0], none, none)); break; case TGSI_OPCODE_DDX: if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) { - tmp = temp(fpc); - arith(fpc, sat, DDX, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, - swz(src[0], Z, W, Z, W), none, none); - arith(fpc, 0, MOV, tmp, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, - swz(tmp, X, Y, X, Y), none, none); - arith(fpc, sat, DDX, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], - none, none); - arith(fpc, 0, MOV, dst, mask, tmp, none, none); + tmp = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none)); + nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none)); + nvfx_fp_emit(fpc, arith(sat, DDX, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none)); + nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none)); } else { - arith(fpc, sat, DDX, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, DDX, dst, mask, src[0], none, none)); } break; case TGSI_OPCODE_DDY: if (mask & (NVFX_FP_MASK_Z | NVFX_FP_MASK_W)) { - tmp = temp(fpc); - arith(fpc, sat, DDY, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, - swz(src[0], Z, W, Z, W), none, none); - arith(fpc, 0, MOV, tmp, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, - swz(tmp, X, Y, X, Y), none, none); - arith(fpc, sat, DDY, tmp, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], - none, none); - arith(fpc, 0, MOV, dst, mask, tmp, none, none); + tmp = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, swz(src[0], Z, W, Z, W), none, none)); + nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_Z | NVFX_FP_MASK_W, swz(tmp, X, Y, X, Y), none, none)); + nvfx_fp_emit(fpc, arith(sat, DDY, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], none, none)); + nvfx_fp_emit(fpc, arith(0, MOV, dst, mask, tmp, none, none)); } else { - arith(fpc, sat, DDY, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, DDY, dst, mask, src[0], none, none)); } break; + case TGSI_OPCODE_DP2: + tmp = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, src[0], src[1], none)); + nvfx_fp_emit(fpc, arith(0, ADD, dst, mask, swz(tmp, X, X, X, X), swz(tmp, Y, Y, Y, Y), none)); + break; case TGSI_OPCODE_DP3: - arith(fpc, sat, DP3, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, DP3, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_DP4: - arith(fpc, sat, DP4, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, DP4, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_DPH: - tmp = temp(fpc); - arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_X, src[0], src[1], none); - arith(fpc, sat, ADD, dst, mask, swz(tmp, X, X, X, X), - swz(src[1], W, W, W, W), none); + tmp = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[1], none)); + nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, swz(tmp, X, X, X, X), swz(src[1], W, W, W, W), none)); break; case TGSI_OPCODE_DST: - arith(fpc, sat, DST, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, DST, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_EX2: - arith(fpc, sat, EX2, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, src[0], none, none)); break; case TGSI_OPCODE_FLR: - arith(fpc, sat, FLR, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, FLR, dst, mask, src[0], none, none)); break; case TGSI_OPCODE_FRC: - arith(fpc, sat, FRC, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, FRC, dst, mask, src[0], none, none)); break; case TGSI_OPCODE_KILP: - arith(fpc, 0, KIL, none, 0, none, none, none); + nvfx_fp_emit(fpc, arith(0, KIL, none.reg, 0, none, none, none)); break; case TGSI_OPCODE_KIL: - dst = nvfx_sr(NVFXSR_NONE, 0); - dst.cc_update = 1; - arith(fpc, 0, MOV, dst, NVFX_FP_MASK_ALL, src[0], none, none); - dst.cc_update = 0; dst.cc_test = NVFX_COND_LT; - arith(fpc, 0, KIL, dst, 0, none, none, none); + insn = arith(0, MOV, none.reg, NVFX_FP_MASK_ALL, src[0], none, none); + insn.cc_update = 1; + nvfx_fp_emit(fpc, insn); + + insn = arith(0, KIL, none.reg, 0, none, none, none); + insn.cc_test = NVFX_COND_LT; + nvfx_fp_emit(fpc, insn); break; case TGSI_OPCODE_LG2: - arith(fpc, sat, LG2, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, LG2, dst, mask, src[0], none, none)); + break; + case TGSI_OPCODE_LIT: + if(!nvfx->is_nv4x) + nvfx_fp_emit(fpc, arith(sat, LIT_NV30, dst, mask, src[0], src[1], src[2])); + else { + /* we use FLT_MIN, so that log2 never gives -infinity, and thus multiplication by + * specular 0 always gives 0, so that ex2 gives 1, to satisfy the 0^0 = 1 requirement + * + * NOTE: if we start using half precision, we might need an fp16 FLT_MIN here instead + */ + struct nvfx_src maxs = nvfx_src(nvfx_fp_imm(fpc, 0, FLT_MIN, 0, 0)); + tmp = nvfx_src(temp(fpc)); + if (ci>= 0 || ii >= 0) { + nvfx_fp_emit(fpc, arith(0, MOV, tmp.reg, NVFX_FP_MASK_X | NVFX_FP_MASK_Y, maxs, none, none)); + maxs = tmp; + } + nvfx_fp_emit(fpc, arith(0, MAX, tmp.reg, NVFX_FP_MASK_Y | NVFX_FP_MASK_W, swz(src[0], X, X, X, Y), swz(maxs, X, X, Y, Y), none)); + nvfx_fp_emit(fpc, arith(0, LG2, tmp.reg, NVFX_FP_MASK_W, swz(tmp, W, W, W, W), none, none)); + nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_W, swz(tmp, W, W, W, W), swz(src[0], W, W, W, W), none)); + nvfx_fp_emit(fpc, arith(sat, LITEX2_NV40, dst, mask, swz(tmp, Y, Y, W, W), none, none)); + } break; -// case TGSI_OPCODE_LIT: case TGSI_OPCODE_LRP: if(!nvfx->is_nv4x) - arith(fpc, sat, LRP_NV30, dst, mask, src[0], src[1], src[2]); + nvfx_fp_emit(fpc, arith(sat, LRP_NV30, dst, mask, src[0], src[1], src[2])); else { - tmp = temp(fpc); - arith(fpc, 0, MAD, tmp, mask, neg(src[0]), src[2], src[2]); - arith(fpc, sat, MAD, dst, mask, src[0], src[1], tmp); + tmp = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2])); + nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], tmp)); } break; case TGSI_OPCODE_MAD: - arith(fpc, sat, MAD, dst, mask, src[0], src[1], src[2]); + nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, src[0], src[1], src[2])); break; case TGSI_OPCODE_MAX: - arith(fpc, sat, MAX, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, MAX, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_MIN: - arith(fpc, sat, MIN, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, MIN, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_MOV: - arith(fpc, sat, MOV, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, src[0], none, none)); break; case TGSI_OPCODE_MUL: - arith(fpc, sat, MUL, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, MUL, dst, mask, src[0], src[1], none)); + break; + case TGSI_OPCODE_NOP: break; case TGSI_OPCODE_POW: if(!nvfx->is_nv4x) - arith(fpc, sat, POW_NV30, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, POW_NV30, dst, mask, src[0], src[1], none)); else { - tmp = temp(fpc); - arith(fpc, 0, LG2, tmp, NVFX_FP_MASK_X, - swz(src[0], X, X, X, X), none, none); - arith(fpc, 0, MUL, tmp, NVFX_FP_MASK_X, swz(tmp, X, X, X, X), - swz(src[1], X, X, X, X), none); - arith(fpc, sat, EX2, dst, mask, - swz(tmp, X, X, X, X), none, none); + tmp = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none)); + nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, NVFX_FP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none)); + nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, swz(tmp, X, X, X, X), none, none)); } break; case TGSI_OPCODE_RCP: - arith(fpc, sat, RCP, dst, mask, src[0], none, none); - break; - case TGSI_OPCODE_RET: - assert(0); + nvfx_fp_emit(fpc, arith(sat, RCP, dst, mask, src[0], none, none)); break; case TGSI_OPCODE_RFL: if(!nvfx->is_nv4x) - arith(fpc, 0, RFL_NV30, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(0, RFL_NV30, dst, mask, src[0], src[1], none)); else { - tmp = temp(fpc); - arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_X, src[0], src[0], none); - arith(fpc, 0, DP3, tmp, NVFX_FP_MASK_Y, src[0], src[1], none); - arith(fpc, 0, DIV, scale(tmp, 2X), NVFX_FP_MASK_Z, - swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none); - arith(fpc, sat, MAD, dst, mask, - swz(tmp, Z, Z, Z, Z), src[0], neg(src[1])); + tmp = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_X, src[0], src[0], none)); + nvfx_fp_emit(fpc, arith(0, DP3, tmp.reg, NVFX_FP_MASK_Y, src[0], src[1], none)); + insn = arith(0, DIV, tmp.reg, NVFX_FP_MASK_Z, swz(tmp, Y, Y, Y, Y), swz(tmp, X, X, X, X), none); + insn.scale = NVFX_FP_OP_DST_SCALE_2X; + nvfx_fp_emit(fpc, insn); + nvfx_fp_emit(fpc, arith(sat, MAD, dst, mask, swz(tmp, Z, Z, Z, Z), src[0], neg(src[1]))); } break; case TGSI_OPCODE_RSQ: if(!nvfx->is_nv4x) - arith(fpc, sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none); + nvfx_fp_emit(fpc, arith(sat, RSQ_NV30, dst, mask, abs(swz(src[0], X, X, X, X)), none, none)); else { - tmp = temp(fpc); - arith(fpc, 0, LG2, scale(tmp, INV_2X), NVFX_FP_MASK_X, - abs(swz(src[0], X, X, X, X)), none, none); - arith(fpc, sat, EX2, dst, mask, - neg(swz(tmp, X, X, X, X)), none, none); + tmp = nvfx_src(temp(fpc)); + insn = arith(0, LG2, tmp.reg, NVFX_FP_MASK_X, abs(swz(src[0], X, X, X, X)), none, none); + insn.scale = NVFX_FP_OP_DST_SCALE_INV_2X; + nvfx_fp_emit(fpc, insn); + nvfx_fp_emit(fpc, arith(sat, EX2, dst, mask, neg(swz(tmp, X, X, X, X)), none, none)); } break; case TGSI_OPCODE_SCS: /* avoid overwriting the source */ if(src[0].swz[NVFX_SWZ_X] != NVFX_SWZ_X) { - if (mask & NVFX_FP_MASK_X) { - arith(fpc, sat, COS, dst, NVFX_FP_MASK_X, - swz(src[0], X, X, X, X), none, none); - } - if (mask & NVFX_FP_MASK_Y) { - arith(fpc, sat, SIN, dst, NVFX_FP_MASK_Y, - swz(src[0], X, X, X, X), none, none); - } + if (mask & NVFX_FP_MASK_X) + nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none)); + if (mask & NVFX_FP_MASK_Y) + nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none)); } else { - if (mask & NVFX_FP_MASK_Y) { - arith(fpc, sat, SIN, dst, NVFX_FP_MASK_Y, - swz(src[0], X, X, X, X), none, none); - } - if (mask & NVFX_FP_MASK_X) { - arith(fpc, sat, COS, dst, NVFX_FP_MASK_X, - swz(src[0], X, X, X, X), none, none); - } + if (mask & NVFX_FP_MASK_Y) + nvfx_fp_emit(fpc, arith(sat, SIN, dst, NVFX_FP_MASK_Y, swz(src[0], X, X, X, X), none, none)); + if (mask & NVFX_FP_MASK_X) + nvfx_fp_emit(fpc, arith(sat, COS, dst, NVFX_FP_MASK_X, swz(src[0], X, X, X, X), none, none)); } break; case TGSI_OPCODE_SEQ: - arith(fpc, sat, SEQ, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, SEQ, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_SFL: - arith(fpc, sat, SFL, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, SFL, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_SGE: - arith(fpc, sat, SGE, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, SGE, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_SGT: - arith(fpc, sat, SGT, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, SGT, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_SIN: - arith(fpc, sat, SIN, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, arith(sat, SIN, dst, mask, src[0], none, none)); break; case TGSI_OPCODE_SLE: - arith(fpc, sat, SLE, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, SLE, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_SLT: - arith(fpc, sat, SLT, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, SLT, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_SNE: - arith(fpc, sat, SNE, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, SNE, dst, mask, src[0], src[1], none)); + break; + case TGSI_OPCODE_SSG: + { + struct nvfx_src minones = swz(nvfx_src(nvfx_fp_imm(fpc, -1, -1, -1, -1)), X, X, X, X); + + insn = arith(sat, MOV, dst, mask, src[0], none, none); + insn.cc_update = 1; + nvfx_fp_emit(fpc, insn); + + insn = arith(0, STR, dst, mask, none, none, none); + insn.cc_test = NVFX_COND_GT; + nvfx_fp_emit(fpc, insn); + + if(!sat) { + insn = arith(0, MOV, dst, mask, minones, none, none); + insn.cc_test = NVFX_COND_LT; + nvfx_fp_emit(fpc, insn); + } break; + } case TGSI_OPCODE_STR: - arith(fpc, sat, STR, dst, mask, src[0], src[1], none); + nvfx_fp_emit(fpc, arith(sat, STR, dst, mask, src[0], src[1], none)); break; case TGSI_OPCODE_SUB: - arith(fpc, sat, ADD, dst, mask, src[0], neg(src[1]), none); + nvfx_fp_emit(fpc, arith(sat, ADD, dst, mask, src[0], neg(src[1]), none)); break; case TGSI_OPCODE_TEX: - tex(fpc, sat, TEX, unit, dst, mask, src[0], none, none); + nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none)); break; - case TGSI_OPCODE_TXB: - tex(fpc, sat, TXB, unit, dst, mask, src[0], none, none); + case TGSI_OPCODE_TRUNC: + tmp = nvfx_src(temp(fpc)); + insn = arith(0, MOV, none.reg, mask, src[0], none, none); + insn.cc_update = 1; + nvfx_fp_emit(fpc, insn); + + nvfx_fp_emit(fpc, arith(0, FLR, tmp.reg, mask, abs(src[0]), none, none)); + nvfx_fp_emit(fpc, arith(sat, MOV, dst, mask, tmp, none, none)); + + insn = arith(sat, MOV, dst, mask, neg(tmp), none, none); + insn.cc_test = NVFX_COND_LT; + nvfx_fp_emit(fpc, insn); + break; + case TGSI_OPCODE_TXB: + nvfx_fp_emit(fpc, tex(sat, TXB, unit, dst, mask, src[0], none, none)); + break; + case TGSI_OPCODE_TXL: + if(nvfx->is_nv4x) + nvfx_fp_emit(fpc, tex(sat, TXL_NV40, unit, dst, mask, src[0], none, none)); + else /* unsupported on nv30, use TEX and hope they like it */ + nvfx_fp_emit(fpc, tex(sat, TEX, unit, dst, mask, src[0], none, none)); + break; + case TGSI_OPCODE_TXP: + nvfx_fp_emit(fpc, tex(sat, TXP, unit, dst, mask, src[0], none, none)); + break; + case TGSI_OPCODE_XPD: + tmp = nvfx_src(temp(fpc)); + nvfx_fp_emit(fpc, arith(0, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none)); + nvfx_fp_emit(fpc, arith(sat, MAD, dst, (mask & ~NVFX_FP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp))); break; - case TGSI_OPCODE_TXP: - tex(fpc, sat, TXP, unit, dst, mask, src[0], none, none); + + case TGSI_OPCODE_IF: + // MOVRC0 R31 (TR0.xyzw), R: + // IF (NE.xxxx) ELSE END + if(!nvfx->use_nv4x) + goto nv3x_cflow; + nv40_fp_if(fpc, src[0]); break; - case TGSI_OPCODE_XPD: - tmp = temp(fpc); - arith(fpc, 0, MUL, tmp, mask, - swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none); - arith(fpc, sat, MAD, dst, (mask & ~NVFX_FP_MASK_W), - swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), - neg(tmp)); + + case TGSI_OPCODE_ELSE: + { + uint32_t *hw; + if(!nvfx->use_nv4x) + goto nv3x_cflow; + assert(util_dynarray_contains(&fpc->if_stack, unsigned)); + hw = &fpc->fp->insn[util_dynarray_top(&fpc->if_stack, unsigned)]; + hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len; break; - default: - NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode); - return FALSE; } - release_temps(fpc); - return TRUE; -} + case TGSI_OPCODE_ENDIF: + { + uint32_t *hw; + if(!nvfx->use_nv4x) + goto nv3x_cflow; + assert(util_dynarray_contains(&fpc->if_stack, unsigned)); + hw = &fpc->fp->insn[util_dynarray_pop(&fpc->if_stack, unsigned)]; + if(!hw[2]) + hw[2] = NV40_FP_OP_OPCODE_IS_BRANCH | fpc->fp->insn_len; + hw[3] = fpc->fp->insn_len; + break; + } -static boolean -nvfx_fragprog_parse_decl_attrib(struct nvfx_context* nvfx, struct nvfx_fpc *fpc, - const struct tgsi_full_declaration *fdec) -{ - int hw; + case TGSI_OPCODE_BRA: + /* This can in limited cases be implemented with an IF with the else and endif labels pointing to the target */ + /* no state tracker uses this, so don't implement this for now */ + assert(0); + nv40_fp_bra(fpc, finst->Label.Label); + break; - switch (fdec->Semantic.Name) { - case TGSI_SEMANTIC_POSITION: - hw = NVFX_FP_OP_INPUT_SRC_POSITION; + case TGSI_OPCODE_BGNSUB: + case TGSI_OPCODE_ENDSUB: + /* nothing to do here */ break; - case TGSI_SEMANTIC_COLOR: - if (fdec->Semantic.Index == 0) { - hw = NVFX_FP_OP_INPUT_SRC_COL0; - } else - if (fdec->Semantic.Index == 1) { - hw = NVFX_FP_OP_INPUT_SRC_COL1; - } else { - NOUVEAU_ERR("bad colour semantic index\n"); - return FALSE; - } + + case TGSI_OPCODE_CAL: + if(!nvfx->use_nv4x) + goto nv3x_cflow; + nv40_fp_cal(fpc, finst->Label.Label); break; - case TGSI_SEMANTIC_FOG: - hw = NVFX_FP_OP_INPUT_SRC_FOGC; + + case TGSI_OPCODE_RET: + if(!nvfx->use_nv4x) + goto nv3x_cflow; + nv40_fp_ret(fpc); break; - case TGSI_SEMANTIC_GENERIC: - if (fdec->Semantic.Index <= 7) { - hw = NVFX_FP_OP_INPUT_SRC_TC(fdec->Semantic. - Index); - } else { - NOUVEAU_ERR("bad generic semantic index\n"); - return FALSE; + + case TGSI_OPCODE_BGNLOOP: + if(!nvfx->use_nv4x) + goto nv3x_cflow; + /* TODO: we should support using two nested REPs to allow a > 255 iteration count */ + nv40_fp_rep(fpc, 255, finst->Label.Label); + break; + + case TGSI_OPCODE_ENDLOOP: + break; + + case TGSI_OPCODE_BRK: + if(!nvfx->use_nv4x) + goto nv3x_cflow; + nv40_fp_brk(fpc); + break; + + case TGSI_OPCODE_CONT: + { + static int warned = 0; + if(!warned) { + NOUVEAU_ERR("Sorry, the continue keyword is not implemented: ignoring it.\n"); + warned = 1; } break; - default: - NOUVEAU_ERR("bad input semantic\n"); + } + + default: + NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode); return FALSE; } - fpc->attrib_map[fdec->Range.First] = hw; +out: + release_temps(fpc); return TRUE; +nv3x_cflow: + { + static int warned = 0; + if(!warned) { + NOUVEAU_ERR( + "Sorry, control flow instructions are not supported in hardware on nv3x: ignoring them\n" + "If rendering is incorrect, try to disable GLSL support in the application.\n"); + warned = 1; + } + } + goto out; } static boolean @@ -670,7 +950,7 @@ nvfx_fragprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_fpc *fpc, case 2: hw = 3; break; case 3: hw = 4; break; } - if(hw > ((nvfx->is_nv4x) ? 4 : 2)) { + if(hw > ((nvfx->use_nv4x) ? 4 : 2)) { NOUVEAU_ERR("bad rcol index\n"); return FALSE; } @@ -680,8 +960,8 @@ nvfx_fragprog_parse_decl_output(struct nvfx_context* nvfx, struct nvfx_fpc *fpc, return FALSE; } - fpc->r_result[idx] = nvfx_sr(NVFXSR_OUTPUT, hw); - fpc->r_temps |= (1 << hw); + fpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw); + fpc->r_temps |= (1ULL << hw); return TRUE; } @@ -690,8 +970,20 @@ nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc) { struct tgsi_parse_context p; int high_temp = -1, i; + struct util_semantic_set set; + unsigned num_texcoords = nvfx->use_nv4x ? 10 : 8; - tgsi_parse_init(&p, fpc->fp->pipe.tokens); + fpc->fp->num_slots = util_semantic_set_from_program_file(&set, fpc->pfp->pipe.tokens, TGSI_FILE_INPUT); + if(fpc->fp->num_slots > num_texcoords) + return FALSE; + util_semantic_layout_from_set(fpc->fp->slot_to_generic, &set, 0, num_texcoords); + util_semantic_table_from_layout(fpc->generic_to_slot, fpc->fp->slot_to_generic, 0, num_texcoords); + + memset(fpc->fp->slot_to_fp_input, 0xff, sizeof(fpc->fp->slot_to_fp_input)); + + fpc->r_imm = CALLOC(fpc->pfp->info.immediate_count, sizeof(struct nvfx_reg)); + + tgsi_parse_init(&p, fpc->pfp->pipe.tokens); while (!tgsi_parse_end_of_tokens(&p)) { const union tgsi_full_token *tok = &p.FullToken; @@ -702,10 +994,6 @@ nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc) const struct tgsi_full_declaration *fdec; fdec = &p.FullToken.FullDeclaration; switch (fdec->Declaration.File) { - case TGSI_FILE_INPUT: - if (!nvfx_fragprog_parse_decl_attrib(nvfx, fpc, fdec)) - goto out_err; - break; case TGSI_FILE_OUTPUT: if (!nvfx_fragprog_parse_decl_output(nvfx, fpc, fdec)) goto out_err; @@ -724,19 +1012,14 @@ nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc) case TGSI_TOKEN_TYPE_IMMEDIATE: { struct tgsi_full_immediate *imm; - float vals[4]; imm = &p.FullToken.FullImmediate; assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32); - assert(fpc->nr_imm < MAX_IMM); + assert(fpc->nr_imm < fpc->pfp->info.immediate_count); - vals[0] = imm->u[0].Float; - vals[1] = imm->u[1].Float; - vals[2] = imm->u[2].Float; - vals[3] = imm->u[3].Float; - fpc->imm[fpc->nr_imm++] = constant(fpc, -1, vals); - } + fpc->r_imm[fpc->nr_imm++] = nvfx_fp_imm(fpc, imm->u[0].Float, imm->u[1].Float, imm->u[2].Float, imm->u[3].Float); break; + } default: break; } @@ -744,40 +1027,75 @@ nvfx_fragprog_prepare(struct nvfx_context* nvfx, struct nvfx_fpc *fpc) tgsi_parse_free(&p); if (++high_temp) { - fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_sreg)); + fpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg)); for (i = 0; i < high_temp; i++) fpc->r_temp[i] = temp(fpc); - fpc->r_temps_discard = 0; + fpc->r_temps_discard = 0ULL; } return TRUE; out_err: - if (fpc->r_temp) + if (fpc->r_temp) { FREE(fpc->r_temp); + fpc->r_temp = NULL; + } tgsi_parse_free(&p); return FALSE; } -static void +DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp, "NVFX_DUMP_FP", FALSE) + +static struct nvfx_fragment_program* nvfx_fragprog_translate(struct nvfx_context *nvfx, - struct nvfx_fragment_program *fp) + struct nvfx_pipe_fragment_program *pfp, + boolean emulate_sprite_flipping) { struct tgsi_parse_context parse; struct nvfx_fpc *fpc = NULL; + struct util_dynarray insns; + struct nvfx_fragment_program* fp = NULL; + const int min_size = 4096; + + fp = CALLOC_STRUCT(nvfx_fragment_program); + if(!fp) + goto out_err; - fpc = CALLOC(1, sizeof(struct nvfx_fpc)); + fpc = CALLOC_STRUCT(nvfx_fpc); if (!fpc) - return; + goto out_err; + + fpc->max_temps = nvfx->use_nv4x ? 48 : 32; + fpc->pfp = pfp; fpc->fp = fp; fpc->num_regs = 2; - if (!nvfx_fragprog_prepare(nvfx, fpc)) { - FREE(fpc); - return; + for (unsigned i = 0; i < pfp->info.num_properties; ++i) { + if (pfp->info.properties[i].name == TGSI_PROPERTY_FS_COORD_ORIGIN) { + if(pfp->info.properties[i].data[0]) + fp->coord_conventions |= NV30_3D_COORD_CONVENTIONS_ORIGIN_INVERTED; + } else if (pfp->info.properties[i].name == TGSI_PROPERTY_FS_COORD_PIXEL_CENTER) { + if(pfp->info.properties[i].data[0]) + fp->coord_conventions |= NV30_3D_COORD_CONVENTIONS_CENTER_INTEGER; + } } - tgsi_parse_init(&parse, fp->pipe.tokens); + if (!nvfx_fragprog_prepare(nvfx, fpc)) + goto out_err; + + tgsi_parse_init(&parse, pfp->pipe.tokens); + util_dynarray_init(&insns); + + if(emulate_sprite_flipping) + { + struct nvfx_reg reg = temp(fpc); + struct nvfx_src sprite_input = nvfx_src(nvfx_reg(NVFXSR_RELOCATED, fp->num_slots)); + struct nvfx_src imm = nvfx_src(nvfx_fp_imm(fpc, 1, -1, 0, 0)); + + fpc->sprite_coord_temp = reg.index; + fpc->r_temps_discard = 0ULL; + nvfx_fp_emit(fpc, arith(0, MAD, reg, NVFX_FP_MASK_ALL, sprite_input, swz(imm, X, Y, X, X), swz(imm, Z, X, Z, Z))); + } while (!tgsi_parse_end_of_tokens(&parse)) { tgsi_parse_token(&parse); @@ -787,6 +1105,7 @@ nvfx_fragprog_translate(struct nvfx_context *nvfx, { const struct tgsi_full_instruction *finst; + util_dynarray_append(&insns, unsigned, fp->insn_len); finst = &parse.FullToken.FullInstruction; if (!nvfx_fragprog_parse_instruction(nvfx, fpc, finst)) goto out_err; @@ -796,17 +1115,25 @@ nvfx_fragprog_translate(struct nvfx_context *nvfx, break; } } + util_dynarray_append(&insns, unsigned, fp->insn_len); + + for(unsigned i = 0; i < fpc->label_relocs.size; i += sizeof(struct nvfx_relocation)) + { + struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)fpc->label_relocs.data + i); + fp->insn[label_reloc->location] |= ((unsigned*)insns.data)[label_reloc->target]; + } + util_dynarray_fini(&insns); if(!nvfx->is_nv4x) fp->fp_control |= (fpc->num_regs-1)/2; else - fp->fp_control |= fpc->num_regs << NV40TCL_FP_CONTROL_TEMP_COUNT_SHIFT; + fp->fp_control |= fpc->num_regs << NV40_3D_FP_CONTROL_TEMP_COUNT__SHIFT; /* Terminate final instruction */ if(fp->insn) - fp->insn[fpc->inst_offset] |= 0x00000001; + fp->insn[fpc->inst_offset] |= 0x00000001; - /* Append NOP + END instruction, may or may not be necessary. */ + /* Append NOP + END instruction for branches to the end of the program */ fpc->inst_offset = fp->insn_len; grow_insns(fpc, 4); fp->insn[fpc->inst_offset + 0] = 0x00000001; @@ -814,76 +1141,248 @@ nvfx_fragprog_translate(struct nvfx_context *nvfx, fp->insn[fpc->inst_offset + 2] = 0x00000000; fp->insn[fpc->inst_offset + 3] = 0x00000000; - fp->translated = TRUE; -out_err: + if(debug_get_option_nvfx_dump_fp()) + { + debug_printf("\n"); + tgsi_dump(pfp->pipe.tokens, 0); + + debug_printf("\n%s fragment program:\n", nvfx->is_nv4x ? "nv4x" : "nv3x"); + for (unsigned i = 0; i < fp->insn_len; i += 4) + debug_printf("%3u: %08x %08x %08x %08x\n", i >> 2, fp->insn[i], fp->insn[i + 1], fp->insn[i + 2], fp->insn[i + 3]); + debug_printf("\n"); + } + + fp->prog_size = (fp->insn_len * 4 + 63) & ~63; + + if(fp->prog_size >= min_size) + fp->progs_per_bo = 1; + else + fp->progs_per_bo = min_size / fp->prog_size; + fp->bo_prog_idx = fp->progs_per_bo - 1; + +out: tgsi_parse_free(&parse); - if (fpc->r_temp) - FREE(fpc->r_temp); - FREE(fpc); + if(fpc) + { + if (fpc->r_temp) + FREE(fpc->r_temp); + util_dynarray_fini(&fpc->if_stack); + util_dynarray_fini(&fpc->label_relocs); + util_dynarray_fini(&fpc->imm_data); + //util_dynarray_fini(&fpc->loop_stack); + FREE(fpc); + } + return fp; + +out_err: + _debug_printf("Error: failed to compile this fragment program:\n"); + tgsi_dump(pfp->pipe.tokens, 0); + + if(fp) + { + FREE(fp); + fp = NULL; + } + goto out; } static inline void nvfx_fp_memcpy(void* dst, const void* src, size_t len) { -#ifndef WORDS_BIGENDIAN +#ifndef PIPE_ARCH_BIG_ENDIAN memcpy(dst, src, len); #else size_t i; for(i = 0; i < len; i += 4) { - uint32_t v = (uint32_t*)((char*)src + i); + uint32_t v = *(uint32_t*)((char*)src + i); *(uint32_t*)((char*)dst + i) = (v >> 16) | (v << 16); } #endif } +/* The hardware only supports immediate constants inside the fragment program, + * and at least on nv30 doesn't support an indirect linkage table. + * + * Hence, we need to patch the fragment program itself both to update constants + * and update linkage. + * + * Using a single fragment program would entail unacceptable stalls if the GPU is + * already rendering with that fragment program. + * Thus, we instead use a "rotating queue" of buffer objects, each of which is + * packed with multiple versions of the same program. + * + * Whenever we need to patch something, we move to the next program and + * patch it. If all buffer objects are in use by the GPU, we allocate another one, + * expanding the queue. + * + * As an additional optimization, we record when all the programs have the + * current input slot configuration, and at that point we stop patching inputs. + * This happens, for instance, if a given fragment program is always used with + * the same vertex program (i.e. always with GLSL), or if the layouts match + * enough (non-GLSL). + * + * Note that instead of using multiple programs, we could push commands + * on the FIFO to patch a single program: it's not fully clear which option is + * faster, but my guess is that the current way is faster. + * + * We also track the previous slot assignments for each version and don't + * patch if they are the same (this could perhaps be removed). + */ + void nvfx_fragprog_validate(struct nvfx_context *nvfx) { struct nouveau_channel* chan = nvfx->screen->base.channel; - struct nvfx_fragment_program *fp = nvfx->fragprog; - int update = 0; - int i; + struct nouveau_grobj *eng3d = nvfx->screen->eng3d; + struct nvfx_pipe_fragment_program *pfp = nvfx->fragprog; + struct nvfx_vertex_program* vp; + + // TODO: the multiplication by point_quad_rasterization is probably superfluous + unsigned sprite_coord_enable = nvfx->rasterizer->pipe.point_quad_rasterization * nvfx->rasterizer->pipe.sprite_coord_enable; + + boolean emulate_sprite_flipping = sprite_coord_enable && nvfx->rasterizer->pipe.sprite_coord_mode; + unsigned key = emulate_sprite_flipping; + struct nvfx_fragment_program* fp; - if (!fp->translated) + fp = pfp->fps[key]; + if (!fp) { - const int min_size = 4096; + fp = nvfx_fragprog_translate(nvfx, pfp, emulate_sprite_flipping); - nvfx_fragprog_translate(nvfx, fp); - if (!fp->translated) { - static unsigned dummy[8] = {1, 0, 0, 0, 1, 0, 0, 0}; - static int warned = 0; - if(!warned) + if(!fp) + { + if(!nvfx->dummy_fs) { - fprintf(stderr, "nvfx: failed to translate fragment program!\n"); - warned = 1; + struct ureg_program *ureg = ureg_create( TGSI_PROCESSOR_FRAGMENT ); + if (ureg) + { + ureg_END( ureg ); + nvfx->dummy_fs = ureg_create_shader_and_destroy( ureg, &nvfx->pipe ); + } + + if(!nvfx->dummy_fs) + { + _debug_printf("Error: unable to create a dummy fragment shader: aborting."); + abort(); + } } - /* use dummy program: we cannot fail here */ - fp->translated = TRUE; - fp->insn = malloc(sizeof(dummy)); - memcpy(fp->insn, dummy, sizeof(dummy)); - fp->insn_len = sizeof(dummy) / sizeof(dummy[0]); + fp = nvfx_fragprog_translate(nvfx, nvfx->dummy_fs, FALSE); + emulate_sprite_flipping = FALSE; + + if(!fp) + { + _debug_printf("Error: unable to compile even a dummy fragment shader: aborting."); + abort(); + } } - update = TRUE; - fp->prog_size = (fp->insn_len * 4 + 63) & ~63; + pfp->fps[key] = fp; + } + + vp = nvfx->hw_vertprog; + + if (fp->last_vp_id != vp->id || fp->last_sprite_coord_enable != sprite_coord_enable) { + int sprite_real_input = -1; + int sprite_reloc_input; + unsigned i; + fp->last_vp_id = vp->id; + fp->last_sprite_coord_enable = sprite_coord_enable; + + if(sprite_coord_enable) + { + sprite_real_input = vp->sprite_fp_input; + if(sprite_real_input < 0) + { + unsigned used_texcoords = 0; + for(unsigned i = 0; i < fp->num_slots; ++i) { + unsigned generic = fp->slot_to_generic[i]; + if((generic < 32) && !((1 << generic) & sprite_coord_enable)) + { + unsigned char slot_mask = vp->generic_to_fp_input[generic]; + if(slot_mask >= 0xf0) + used_texcoords |= 1 << ((slot_mask & 0xf) - NVFX_FP_OP_INPUT_SRC_TC0); + } + } + + sprite_real_input = NVFX_FP_OP_INPUT_SRC_TC(__builtin_ctz(~used_texcoords)); + } + + fp->point_sprite_control |= (1 << (sprite_real_input - NVFX_FP_OP_INPUT_SRC_TC0 + 8)); + } + else + fp->point_sprite_control = 0; - if(fp->prog_size >= min_size) - fp->progs_per_bo = 1; + if(emulate_sprite_flipping) + sprite_reloc_input = 0; else - fp->progs_per_bo = min_size / fp->prog_size; - fp->bo_prog_idx = fp->progs_per_bo - 1; - } + sprite_reloc_input = sprite_real_input; + + for(i = 0; i < fp->num_slots; ++i) { + unsigned generic = fp->slot_to_generic[i]; + if((generic < 32) && ((1 << generic) & sprite_coord_enable)) + { + if(fp->slot_to_fp_input[i] != sprite_reloc_input) + goto update_slots; + } + else + { + unsigned char slot_mask = vp->generic_to_fp_input[generic]; + if((slot_mask >> 4) & (slot_mask ^ fp->slot_to_fp_input[i])) + goto update_slots; + } + } - /* we must update constants even on "just" fragprog changes, because - we don't check whether the current constant buffer matches the latest - one bound to this fragment program */ - if (nvfx->dirty & (NVFX_NEW_FRAGCONST | NVFX_NEW_FRAGPROG)) - update = TRUE; + if(emulate_sprite_flipping) + { + if(fp->slot_to_fp_input[fp->num_slots] != sprite_real_input) + goto update_slots; + } - if(update) { + if(0) + { +update_slots: + /* optimization: we start updating from the slot we found the first difference in */ + for(; i < fp->num_slots; ++i) + { + unsigned generic = fp->slot_to_generic[i]; + if((generic < 32) && ((1 << generic) & sprite_coord_enable)) + fp->slot_to_fp_input[i] = sprite_reloc_input; + else + fp->slot_to_fp_input[i] = vp->generic_to_fp_input[generic] & 0xf; + } + + fp->slot_to_fp_input[fp->num_slots] = sprite_real_input; + + if(nvfx->is_nv4x) + { + fp->or = 0; + for(i = 0; i <= fp->num_slots; ++i) { + unsigned fp_input = fp->slot_to_fp_input[i]; + if(fp_input == NVFX_FP_OP_INPUT_SRC_TC(8)) + fp->or |= (1 << 12); + else if(fp_input == NVFX_FP_OP_INPUT_SRC_TC(9)) + fp->or |= (1 << 13); + else if(fp_input >= NVFX_FP_OP_INPUT_SRC_TC(0) && fp_input <= NVFX_FP_OP_INPUT_SRC_TC(7)) + fp->or |= (1 << (fp_input - NVFX_FP_OP_INPUT_SRC_TC0 + 14)); + } + } + + fp->progs_left_with_obsolete_slot_assignments = fp->progs; + goto update; + } + } + + /* We must update constants even on "just" fragprog changes, because + * we don't check whether the current constant buffer matches the latest + * one bound to this fragment program. + * Doing such a check would likely be a pessimization. + */ + if ((nvfx->hw_fragprog != fp) || (nvfx->dirty & (NVFX_NEW_FRAGPROG | NVFX_NEW_FRAGCONST))) { int offset; + uint32_t* fpmap; +update: ++fp->bo_prog_idx; if(fp->bo_prog_idx >= fp->progs_per_bo) { @@ -893,9 +1392,12 @@ nvfx_fragprog_validate(struct nvfx_context *nvfx) } else { - struct nvfx_fragment_program_bo* fpbo = os_malloc_aligned(sizeof(struct nvfx_fragment_program) + fp->prog_size * fp->progs_per_bo, 16); - char *map, *buf; + struct nvfx_fragment_program_bo* fpbo = os_malloc_aligned(sizeof(struct nvfx_fragment_program) + (fp->prog_size + 8) * fp->progs_per_bo, 16); + uint8_t* map; + uint8_t* buf; + fpbo->slots = (unsigned char*)&fpbo->insn[(fp->prog_size) * fp->progs_per_bo]; + memset(fpbo->slots, 0, 8 * fp->progs_per_bo); if(fp->fpbo) { fpbo->next = fp->fpbo->next; @@ -905,12 +1407,14 @@ nvfx_fragprog_validate(struct nvfx_context *nvfx) fpbo->next = fpbo; fp->fpbo = fpbo; fpbo->bo = 0; + fp->progs += fp->progs_per_bo; + fp->progs_left_with_obsolete_slot_assignments += fp->progs_per_bo; nouveau_bo_new(nvfx->screen->base.device, NOUVEAU_BO_VRAM | NOUVEAU_BO_MAP, 64, fp->prog_size * fp->progs_per_bo, &fpbo->bo); nouveau_bo_map(fpbo->bo, NOUVEAU_BO_NOSYNC); map = fpbo->bo->map; - buf = fpbo->insn; - for(int i = 0; i < fp->progs_per_bo; ++i) + buf = (uint8_t*)fpbo->insn; + for(unsigned i = 0; i < fp->progs_per_bo; ++i) { memcpy(buf, fp->insn, fp->insn_len * 4); nvfx_fp_memcpy(map, fp->insn, fp->insn_len * 4); @@ -922,15 +1426,14 @@ nvfx_fragprog_validate(struct nvfx_context *nvfx) } offset = fp->bo_prog_idx * fp->prog_size; + fpmap = (uint32_t*)((char*)fp->fpbo->bo->map + offset); if(nvfx->constbuf[PIPE_SHADER_FRAGMENT]) { struct pipe_resource* constbuf = nvfx->constbuf[PIPE_SHADER_FRAGMENT]; - // TODO: avoid using transfers, just directly the buffer - struct pipe_transfer* transfer; - // TODO: does this check make any sense, or should we do this unconditionally? - uint32_t* map = pipe_buffer_map(&nvfx->pipe, constbuf, PIPE_TRANSFER_READ, &transfer); + uint32_t* map = (uint32_t*)nvfx_buffer(constbuf)->data; uint32_t* fpmap = (uint32_t*)((char*)fp->fpbo->bo->map + offset); uint32_t* buf = (uint32_t*)((char*)fp->fpbo->insn + offset); + int i; for (i = 0; i < fp->nr_consts; ++i) { unsigned off = fp->consts[i].offset; unsigned idx = fp->consts[i].index * 4; @@ -941,49 +1444,112 @@ nvfx_fragprog_validate(struct nvfx_context *nvfx) nvfx_fp_memcpy(&fpmap[off], &map[idx], 4 * sizeof(uint32_t)); } } - pipe_buffer_unmap(&nvfx->pipe, constbuf, transfer); } - } - if(update || (nvfx->dirty & NVFX_NEW_FRAGPROG)) { - int offset = fp->bo_prog_idx * fp->prog_size; + /* we only do this if we aren't sure that all program versions have the + * current slot assignments, otherwise we just update constants for speed + */ + if(fp->progs_left_with_obsolete_slot_assignments) { + unsigned char* fpbo_slots = &fp->fpbo->slots[fp->bo_prog_idx * 8]; + /* also relocate sprite coord slot, if any */ + for(unsigned i = 0; i <= fp->num_slots; ++i) { + unsigned value = fp->slot_to_fp_input[i];; + if(value != fpbo_slots[i]) { + unsigned* p; + unsigned* begin = (unsigned*)fp->slot_relocations[i].data; + unsigned* end = (unsigned*)((char*)fp->slot_relocations[i].data + fp->slot_relocations[i].size); + //printf("fp %p reloc slot %u/%u: %u -> %u\n", fp, i, fp->num_slots, fpbo_slots[i], value); + if(value == 0) + { + /* was relocated to an input, switch type to temporary */ + for(p = begin; p != end; ++p) { + unsigned off = *p; + unsigned dw = fp->insn[off]; + dw &=~ NVFX_FP_REG_TYPE_MASK; + //printf("reloc_tmp at %x\n", off); + nvfx_fp_memcpy(&fpmap[off], &dw, sizeof(dw)); + } + } else { + if(!fpbo_slots[i]) + { + /* was relocated to a temporary, switch type to input */ + for(p= begin; p != end; ++p) { + unsigned off = *p; + unsigned dw = fp->insn[off]; + //printf("reloc_in at %x\n", off); + dw |= NVFX_FP_REG_TYPE_INPUT << NVFX_FP_REG_TYPE_SHIFT; + nvfx_fp_memcpy(&fpmap[off], &dw, sizeof(dw)); + } + } + + /* set the correct input index */ + for(p = begin; p != end; ++p) { + unsigned off = *p & ~3; + unsigned dw = fp->insn[off]; + //printf("reloc&~3 at %x\n", off); + dw = (dw & ~NVFX_FP_OP_INPUT_SRC_MASK) | (value << NVFX_FP_OP_INPUT_SRC_SHIFT); + nvfx_fp_memcpy(&fpmap[off], &dw, sizeof(dw)); + } + } + fpbo_slots[i] = value; + } + } + --fp->progs_left_with_obsolete_slot_assignments; + } + + nvfx->hw_fragprog = fp; + MARK_RING(chan, 8, 1); - OUT_RING(chan, RING_3D(NV34TCL_FP_ACTIVE_PROGRAM, 1)); + BEGIN_RING(chan, eng3d, NV30_3D_FP_ACTIVE_PROGRAM, 1); OUT_RELOC(chan, fp->fpbo->bo, offset, NOUVEAU_BO_VRAM | NOUVEAU_BO_GART | NOUVEAU_BO_RD | NOUVEAU_BO_LOW | - NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0, - NV34TCL_FP_ACTIVE_PROGRAM_DMA1); - OUT_RING(chan, RING_3D(NV34TCL_FP_CONTROL, 1)); + NOUVEAU_BO_OR, NV30_3D_FP_ACTIVE_PROGRAM_DMA0, + NV30_3D_FP_ACTIVE_PROGRAM_DMA1); + BEGIN_RING(chan, eng3d, NV30_3D_FP_CONTROL, 1); OUT_RING(chan, fp->fp_control); if(!nvfx->is_nv4x) { - OUT_RING(chan, RING_3D(NV34TCL_FP_REG_CONTROL, 1)); + BEGIN_RING(chan, eng3d, NV30_3D_FP_REG_CONTROL, 1); OUT_RING(chan, (1<<16)|0x4); - OUT_RING(chan, RING_3D(NV34TCL_TX_UNITS_ENABLE, 1)); + BEGIN_RING(chan, eng3d, NV30_3D_TEX_UNITS_ENABLE, 1); OUT_RING(chan, fp->samplers); } } + + { + unsigned pointsprite_control = fp->point_sprite_control | nvfx->rasterizer->pipe.point_quad_rasterization; + if(pointsprite_control != nvfx->hw_pointsprite_control) + { + BEGIN_RING(chan, eng3d, NV30_3D_POINT_SPRITE, 1); + OUT_RING(chan, pointsprite_control); + nvfx->hw_pointsprite_control = pointsprite_control; + } + } + + nvfx->relocs_needed &=~ NVFX_RELOCATE_FRAGPROG; } void nvfx_fragprog_relocate(struct nvfx_context *nvfx) { struct nouveau_channel* chan = nvfx->screen->base.channel; - struct nvfx_fragment_program *fp = nvfx->fragprog; + struct nvfx_fragment_program *fp = nvfx->hw_fragprog; struct nouveau_bo* bo = fp->fpbo->bo; int offset = fp->bo_prog_idx * fp->prog_size; unsigned fp_flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_RD; // TODO: GART? fp_flags |= NOUVEAU_BO_DUMMY; MARK_RING(chan, 2, 2); - OUT_RELOC(chan, bo, RING_3D(NV34TCL_FP_ACTIVE_PROGRAM, 1), fp_flags, 0, 0); + OUT_RELOC(chan, bo, RING_3D(NV30_3D_FP_ACTIVE_PROGRAM, 1), fp_flags, 0, 0); OUT_RELOC(chan, bo, offset, fp_flags | NOUVEAU_BO_LOW | - NOUVEAU_BO_OR, NV34TCL_FP_ACTIVE_PROGRAM_DMA0, - NV34TCL_FP_ACTIVE_PROGRAM_DMA1); + NOUVEAU_BO_OR, NV30_3D_FP_ACTIVE_PROGRAM_DMA0, + NV30_3D_FP_ACTIVE_PROGRAM_DMA1); + nvfx->relocs_needed &=~ NVFX_RELOCATE_FRAGPROG; } void nvfx_fragprog_destroy(struct nvfx_context *nvfx, struct nvfx_fragment_program *fp) { + unsigned i; struct nvfx_fragment_program_bo* fpbo = fp->fpbo; if(fpbo) { @@ -992,13 +1558,66 @@ nvfx_fragprog_destroy(struct nvfx_context *nvfx, struct nvfx_fragment_program_bo* next = fpbo->next; nouveau_bo_unmap(fpbo->bo); nouveau_bo_ref(0, &fpbo->bo); - free(fpbo); + os_free_aligned(fpbo); fpbo = next; } while(fpbo != fp->fpbo); } + for(i = 0; i < Elements(fp->slot_relocations); ++i) + util_dynarray_fini(&fp->slot_relocations[i]); + if (fp->insn_len) FREE(fp->insn); } +static void * +nvfx_fp_state_create(struct pipe_context *pipe, + const struct pipe_shader_state *cso) +{ + struct nvfx_pipe_fragment_program *pfp; + + pfp = CALLOC(1, sizeof(struct nvfx_pipe_fragment_program)); + pfp->pipe.tokens = tgsi_dup_tokens(cso->tokens); + + tgsi_scan_shader(pfp->pipe.tokens, &pfp->info); + + return (void *)pfp; +} + +static void +nvfx_fp_state_bind(struct pipe_context *pipe, void *hwcso) +{ + struct nvfx_context *nvfx = nvfx_context(pipe); + + nvfx->fragprog = hwcso; + nvfx->dirty |= NVFX_NEW_FRAGPROG; +} + +static void +nvfx_fp_state_delete(struct pipe_context *pipe, void *hwcso) +{ + struct nvfx_context *nvfx = nvfx_context(pipe); + struct nvfx_pipe_fragment_program *pfp = hwcso; + unsigned i; + + for(i = 0; i < Elements(pfp->fps); ++i) + { + if(pfp->fps[i]) + { + nvfx_fragprog_destroy(nvfx, pfp->fps[i]); + FREE(pfp->fps[i]); + } + } + + FREE((void*)pfp->pipe.tokens); + FREE(pfp); +} + +void +nvfx_init_fragprog_functions(struct nvfx_context *nvfx) +{ + nvfx->pipe.create_fs_state = nvfx_fp_state_create; + nvfx->pipe.bind_fs_state = nvfx_fp_state_bind; + nvfx->pipe.delete_fs_state = nvfx_fp_state_delete; +}