#include <inttypes.h>
#include "pipe/p_state.h"
#include "util/u_format.h"
-#include "util/u_hash_table.h"
#include "util/u_hash.h"
#include "util/u_memory.h"
#include "util/u_pack_color.h"
bool point_coord_upper_left;
bool light_twoside;
uint8_t alpha_test_func;
+ uint8_t logicop_func;
uint32_t point_sprite_mask;
struct pipe_rt_blend_state blend;
}
static struct qreg
-add_uniform(struct vc4_compile *c,
- enum quniform_contents contents,
- uint32_t data)
-{
- uint32_t uniform = c->num_uniforms++;
- struct qreg u = { QFILE_UNIF, uniform };
-
- if (uniform >= c->uniform_array_size) {
- c->uniform_array_size = MAX2(MAX2(16, uniform + 1),
- c->uniform_array_size * 2);
-
- c->uniform_data = reralloc(c, c->uniform_data,
- uint32_t,
- c->uniform_array_size);
- c->uniform_contents = reralloc(c, c->uniform_contents,
- enum quniform_contents,
- c->uniform_array_size);
- }
-
- c->uniform_contents[uniform] = contents;
- c->uniform_data[uniform] = data;
-
- return u;
-}
+indirect_uniform_load(struct vc4_compile *c,
+ struct tgsi_full_src_register *src, int swiz)
+{
+ struct tgsi_ind_register *indirect = &src->Indirect;
+ struct vc4_compiler_ubo_range *range = &c->ubo_ranges[indirect->ArrayID];
+ if (!range->used) {
+ range->used = true;
+ range->dst_offset = c->next_ubo_dst_offset;
+ c->next_ubo_dst_offset += range->size;
+ c->num_ubo_ranges++;
+ };
-static struct qreg
-get_temp_for_uniform(struct vc4_compile *c, enum quniform_contents contents,
- uint32_t data)
-{
- struct qreg u = add_uniform(c, contents, data);
- struct qreg t = qir_MOV(c, u);
- return t;
-}
+ assert(src->Register.Indirect);
+ assert(indirect->File == TGSI_FILE_ADDRESS);
-static struct qreg
-qir_uniform_ui(struct vc4_compile *c, uint32_t ui)
-{
- return get_temp_for_uniform(c, QUNIFORM_CONSTANT, ui);
-}
+ struct qreg addr_val = c->addr[indirect->Swizzle];
+ struct qreg indirect_offset =
+ qir_ADD(c, addr_val, qir_uniform_ui(c,
+ range->dst_offset +
+ (src->Register.Index * 16)+
+ swiz * 4));
+ indirect_offset = qir_MIN(c, indirect_offset, qir_uniform_ui(c, (range->dst_offset +
+ range->size - 4)));
-static struct qreg
-qir_uniform_f(struct vc4_compile *c, float f)
-{
- return qir_uniform_ui(c, fui(f));
+ qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
+ struct qreg r4 = qir_TEX_RESULT(c);
+ c->num_texture_samples++;
+ return qir_MOV(c, r4);
}
static struct qreg
get_src(struct vc4_compile *c, unsigned tgsi_op,
- struct tgsi_src_register *src, int i)
+ struct tgsi_full_src_register *full_src, int i)
{
+ struct tgsi_src_register *src = &full_src->Register;
struct qreg r = c->undef;
uint32_t s = i;
abort();
}
- assert(!src->Indirect);
-
switch (src->File) {
case TGSI_FILE_NULL:
return r;
r = c->consts[src->Index * 4 + s];
break;
case TGSI_FILE_CONSTANT:
- r = get_temp_for_uniform(c, QUNIFORM_UNIFORM,
- src->Index * 4 + s);
+ if (src->Indirect) {
+ r = indirect_uniform_load(c, full_src, s);
+ } else {
+ r = qir_uniform(c, QUNIFORM_UNIFORM, src->Index * 4 + s);
+ }
break;
case TGSI_FILE_INPUT:
r = c->inputs[src->Index * 4 + s];
c->num_outputs = MAX2(c->num_outputs,
tgsi_dst->Index * 4 + i + 1);
break;
+ case TGSI_FILE_ADDRESS:
+ assert(tgsi_dst->Index == 0);
+ c->addr[i] = val;
+ break;
default:
fprintf(stderr, "unknown dst file %d\n", tgsi_dst->File);
abort();
}
}
+static inline struct qreg
+qir_SAT(struct vc4_compile *c, struct qreg val)
+{
+ return qir_FMAX(c,
+ qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
+ qir_uniform_f(c, 0.0));
+}
+
static struct qreg
tgsi_to_qir_alu(struct vc4_compile *c,
struct tgsi_full_instruction *tgsi_inst,
struct tgsi_full_instruction *tgsi_inst,
enum qop op, struct qreg *src, int i)
{
- struct qreg src0_hi = qir_SHR(c, src[0 * 4 + i],
- qir_uniform_ui(c, 16));
- struct qreg src0_lo = qir_AND(c, src[0 * 4 + i],
- qir_uniform_ui(c, 0xffff));
- struct qreg src1_hi = qir_SHR(c, src[1 * 4 + i],
- qir_uniform_ui(c, 16));
- struct qreg src1_lo = qir_AND(c, src[1 * 4 + i],
- qir_uniform_ui(c, 0xffff));
+ struct qreg src0 = src[0 * 4 + i];
+ struct qreg src0_hi = qir_SHR(c, src0, qir_uniform_ui(c, 24));
+ struct qreg src1 = src[1 * 4 + i];
+ struct qreg src1_hi = qir_SHR(c, src1, qir_uniform_ui(c, 24));
- struct qreg hilo = qir_MUL24(c, src0_hi, src1_lo);
- struct qreg lohi = qir_MUL24(c, src0_lo, src1_hi);
- struct qreg lolo = qir_MUL24(c, src0_lo, src1_lo);
+ struct qreg hilo = qir_MUL24(c, src0_hi, src1);
+ struct qreg lohi = qir_MUL24(c, src0, src1_hi);
+ struct qreg lolo = qir_MUL24(c, src0, src1);
return qir_ADD(c, lolo, qir_SHL(c,
qir_ADD(c, hilo, lohi),
- qir_uniform_ui(c, 16)));
+ qir_uniform_ui(c, 24)));
+}
+
+static struct qreg
+tgsi_to_qir_umad(struct vc4_compile *c,
+ struct tgsi_full_instruction *tgsi_inst,
+ enum qop op, struct qreg *src, int i)
+{
+ return qir_ADD(c, tgsi_to_qir_umul(c, NULL, 0, src, i), src[2 * 4 + i]);
}
static struct qreg
src[2 * 4 + i]);
}
+static struct qreg
+tgsi_to_qir_ucmp(struct vc4_compile *c,
+ struct tgsi_full_instruction *tgsi_inst,
+ enum qop op, struct qreg *src, int i)
+{
+ qir_SF(c, src[0 * 4 + i]);
+ return qir_SEL_X_Y_ZC(c,
+ src[1 * 4 + i],
+ src[2 * 4 + i]);
+}
+
static struct qreg
tgsi_to_qir_mad(struct vc4_compile *c,
struct tgsi_full_instruction *tgsi_inst,
}
struct qreg texture_u[] = {
- add_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
- add_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
- add_uniform(c, QUNIFORM_CONSTANT, 0),
- add_uniform(c, QUNIFORM_CONSTANT, 0),
+ qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
+ qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
+ qir_uniform(c, QUNIFORM_CONSTANT, 0),
+ qir_uniform(c, QUNIFORM_CONSTANT, 0),
};
uint32_t next_texture_u = 0;
if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_RECT ||
tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
s = qir_FMUL(c, s,
- get_temp_for_uniform(c,
- QUNIFORM_TEXRECT_SCALE_X,
- unit));
+ qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
t = qir_FMUL(c, t,
- get_temp_for_uniform(c,
- QUNIFORM_TEXRECT_SCALE_Y,
- unit));
+ qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
}
if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
is_txl) {
- texture_u[2] = add_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
+ texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
unit | (is_txl << 16));
}
c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
- qir_TEX_R(c, get_temp_for_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
+ qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
texture_u[next_texture_u++]);
}
if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
- s = qir_FMIN(c, qir_FMAX(c, s, qir_uniform_f(c, 0.0)),
- qir_uniform_f(c, 1.0));
+ s = qir_SAT(c, s);
}
if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
- t = qir_FMIN(c, qir_FMAX(c, t, qir_uniform_f(c, 0.0)),
- qir_uniform_f(c, 1.0));
+ t = qir_SAT(c, t);
}
qir_TEX_T(c, t, texture_u[next_texture_u++]);
qir_uniform_f(c, -1.0));
}
+/* Compare to tgsi_to_qir_flr() for the floor logic. */
+static struct qreg
+tgsi_to_qir_arl(struct vc4_compile *c,
+ struct tgsi_full_instruction *tgsi_inst,
+ enum qop op, struct qreg *src, int i)
+{
+ struct qreg trunc = qir_FTOI(c, src[0 * 4 + i]);
+ struct qreg scaled = qir_SHL(c, trunc, qir_uniform_ui(c, 4));
+
+ qir_SF(c, qir_FSUB(c, src[0 * 4 + i], qir_ITOF(c, trunc)));
+
+ return qir_SEL_X_Y_NS(c, qir_SUB(c, scaled, qir_uniform_ui(c, 4)),
+ scaled);
+}
+
+static struct qreg
+tgsi_to_qir_uarl(struct vc4_compile *c,
+ struct tgsi_full_instruction *tgsi_inst,
+ enum qop op, struct qreg *src, int i)
+{
+ return qir_SHL(c, src[0 * 4 + i], qir_uniform_ui(c, 4));
+}
+
+static struct qreg
+get_channel_from_vpm(struct vc4_compile *c,
+ struct qreg *vpm_reads,
+ uint8_t swiz,
+ const struct util_format_description *desc)
+{
+ const struct util_format_channel_description *chan =
+ &desc->channel[swiz];
+ struct qreg temp;
+
+ if (swiz > UTIL_FORMAT_SWIZZLE_W)
+ return get_swizzled_channel(c, vpm_reads, swiz);
+ else if (chan->size == 32 &&
+ chan->type == UTIL_FORMAT_TYPE_FLOAT) {
+ return get_swizzled_channel(c, vpm_reads, swiz);
+ } else if (chan->size == 32 &&
+ chan->type == UTIL_FORMAT_TYPE_SIGNED) {
+ if (chan->normalized) {
+ return qir_FMUL(c,
+ qir_ITOF(c, vpm_reads[swiz]),
+ qir_uniform_f(c,
+ 1.0 / 0x7fffffff));
+ } else {
+ return qir_ITOF(c, vpm_reads[swiz]);
+ }
+ } else if (chan->size == 8 &&
+ (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
+ chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
+ struct qreg vpm = vpm_reads[0];
+ if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
+ temp = qir_XOR(c, vpm, qir_uniform_ui(c, 0x80808080));
+ if (chan->normalized) {
+ return qir_FSUB(c, qir_FMUL(c,
+ qir_UNPACK_8_F(c, temp, swiz),
+ qir_uniform_f(c, 2.0)),
+ qir_uniform_f(c, 1.0));
+ } else {
+ return qir_FADD(c,
+ qir_ITOF(c,
+ qir_UNPACK_8_I(c, temp,
+ swiz)),
+ qir_uniform_f(c, -128.0));
+ }
+ } else {
+ if (chan->normalized) {
+ return qir_UNPACK_8_F(c, vpm, swiz);
+ } else {
+ return qir_ITOF(c, qir_UNPACK_8_I(c, vpm, swiz));
+ }
+ }
+ } else if (chan->size == 16 &&
+ (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
+ chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
+ struct qreg vpm = vpm_reads[swiz / 2];
+
+ /* Note that UNPACK_16F eats a half float, not ints, so we use
+ * UNPACK_16_I for all of these.
+ */
+ if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
+ temp = qir_ITOF(c, qir_UNPACK_16_I(c, vpm, swiz % 2));
+ if (chan->normalized) {
+ return qir_FMUL(c, temp,
+ qir_uniform_f(c, 1/32768.0f));
+ } else {
+ return temp;
+ }
+ } else {
+ /* UNPACK_16I sign-extends, so we have to emit ANDs. */
+ temp = vpm;
+ if (swiz == 1 || swiz == 3)
+ temp = qir_UNPACK_16_I(c, temp, 1);
+ temp = qir_AND(c, temp, qir_uniform_ui(c, 0xffff));
+ temp = qir_ITOF(c, temp);
+
+ if (chan->normalized) {
+ return qir_FMUL(c, temp,
+ qir_uniform_f(c, 1 / 65535.0));
+ } else {
+ return temp;
+ }
+ }
+ } else {
+ return c->undef;
+ }
+}
+
static void
emit_vertex_input(struct vc4_compile *c, int attr)
{
enum pipe_format format = c->vs_key->attr_formats[attr];
+ uint32_t attr_size = util_format_get_blocksize(format);
struct qreg vpm_reads[4];
- /* Right now, we're setting the VPM offsets to be 16 bytes wide every
- * time, so we always read 4 32-bit VPM entries.
- */
- for (int i = 0; i < 4; i++) {
- vpm_reads[i] = qir_get_temp(c);
- qir_emit(c, qir_inst(QOP_VPM_READ,
- vpm_reads[i],
- c->undef,
- c->undef));
+ c->vattr_sizes[attr] = align(attr_size, 4);
+ for (int i = 0; i < align(attr_size, 4) / 4; i++) {
+ struct qreg vpm = { QFILE_VPM, attr * 4 + i };
+ vpm_reads[i] = qir_MOV(c, vpm);
c->num_inputs++;
}
for (int i = 0; i < 4; i++) {
uint8_t swiz = desc->swizzle[i];
- struct qreg result;
+ struct qreg result = get_channel_from_vpm(c, vpm_reads,
+ swiz, desc);
- if (swiz > UTIL_FORMAT_SWIZZLE_W)
- result = get_swizzled_channel(c, vpm_reads, swiz);
- else if (desc->channel[swiz].size == 32 &&
- desc->channel[swiz].type == UTIL_FORMAT_TYPE_FLOAT) {
- result = get_swizzled_channel(c, vpm_reads, swiz);
- } else if (desc->channel[swiz].size == 8 &&
- (desc->channel[swiz].type == UTIL_FORMAT_TYPE_UNSIGNED ||
- desc->channel[swiz].type == UTIL_FORMAT_TYPE_SIGNED) &&
- desc->channel[swiz].normalized) {
- struct qreg vpm = vpm_reads[0];
- if (desc->channel[swiz].type == UTIL_FORMAT_TYPE_SIGNED)
- vpm = qir_XOR(c, vpm, qir_uniform_ui(c, 0x80808080));
- result = qir_UNPACK_8(c, vpm, swiz);
- } else {
+ if (result.file == QFILE_NULL) {
if (!format_warned) {
fprintf(stderr,
"vtx element %d unsupported type: %s\n",
}
result = qir_uniform_f(c, 0.0);
}
-
- if (desc->channel[swiz].normalized &&
- desc->channel[swiz].type == UTIL_FORMAT_TYPE_SIGNED) {
- result = qir_FSUB(c,
- qir_FMUL(c,
- result,
- qir_uniform_f(c, 2.0)),
- qir_uniform_f(c, 1.0));
- }
-
c->inputs[attr * 4 + i] = result;
}
}
c->output_semantics[decl_offset].swizzle = semantic_swizzle;
}
+static void
+add_array_info(struct vc4_compile *c, uint32_t array_id,
+ uint32_t start, uint32_t size)
+{
+ if (array_id >= c->ubo_ranges_array_size) {
+ c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
+ array_id + 1);
+ c->ubo_ranges = reralloc(c, c->ubo_ranges,
+ struct vc4_compiler_ubo_range,
+ c->ubo_ranges_array_size);
+ }
+
+ c->ubo_ranges[array_id].dst_offset = 0;
+ c->ubo_ranges[array_id].src_offset = start;
+ c->ubo_ranges[array_id].size = size;
+ c->ubo_ranges[array_id].used = false;
+}
+
static void
emit_tgsi_declaration(struct vc4_compile *c,
struct tgsi_full_declaration *decl)
}
break;
+
+ case TGSI_FILE_CONSTANT:
+ add_array_info(c,
+ decl->Array.ArrayID,
+ decl->Range.First * 16,
+ (decl->Range.Last -
+ decl->Range.First + 1) * 16);
+ break;
}
}
}
emit_tgsi_instruction(struct vc4_compile *c,
struct tgsi_full_instruction *tgsi_inst)
{
- struct {
+ static const struct {
enum qop op;
struct qreg (*func)(struct vc4_compile *c,
struct tgsi_full_instruction *tgsi_inst,
[TGSI_OPCODE_NOT] = { QOP_NOT, tgsi_to_qir_alu },
[TGSI_OPCODE_UMUL] = { 0, tgsi_to_qir_umul },
+ [TGSI_OPCODE_UMAD] = { 0, tgsi_to_qir_umad },
[TGSI_OPCODE_IDIV] = { 0, tgsi_to_qir_idiv },
[TGSI_OPCODE_INEG] = { 0, tgsi_to_qir_ineg },
[TGSI_OPCODE_ISLT] = { 0, tgsi_to_qir_islt },
[TGSI_OPCODE_CMP] = { 0, tgsi_to_qir_cmp },
+ [TGSI_OPCODE_UCMP] = { 0, tgsi_to_qir_ucmp },
[TGSI_OPCODE_MAD] = { 0, tgsi_to_qir_mad },
[TGSI_OPCODE_RCP] = { QOP_RCP, tgsi_to_qir_rcp },
[TGSI_OPCODE_RSQ] = { QOP_RSQ, tgsi_to_qir_rsq },
[TGSI_OPCODE_COS] = { 0, tgsi_to_qir_cos },
[TGSI_OPCODE_CLAMP] = { 0, tgsi_to_qir_clamp },
[TGSI_OPCODE_SSG] = { 0, tgsi_to_qir_ssg },
+ [TGSI_OPCODE_ARL] = { 0, tgsi_to_qir_arl },
+ [TGSI_OPCODE_UARL] = { 0, tgsi_to_qir_uarl },
};
static int asdf = 0;
uint32_t tgsi_op = tgsi_inst->Instruction.Opcode;
for (int i = 0; i < 4; i++) {
src_regs[4 * s + i] =
get_src(c, tgsi_inst->Instruction.Opcode,
- &tgsi_inst->Src[s].Register, i);
+ &tgsi_inst->Src[s], i);
}
}
}
case PIPE_BLENDFACTOR_CONST_COLOR:
return qir_FMUL(c, val,
- get_temp_for_uniform(c,
- QUNIFORM_BLEND_CONST_COLOR,
- channel));
+ qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR,
+ channel));
case PIPE_BLENDFACTOR_CONST_ALPHA:
return qir_FMUL(c, val,
- get_temp_for_uniform(c,
- QUNIFORM_BLEND_CONST_COLOR,
- 3));
+ qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR, 3));
case PIPE_BLENDFACTOR_ZERO:
return qir_uniform_f(c, 0.0);
case PIPE_BLENDFACTOR_INV_SRC_COLOR:
case PIPE_BLENDFACTOR_INV_CONST_COLOR:
return qir_FMUL(c, val,
qir_FSUB(c, qir_uniform_f(c, 1.0),
- get_temp_for_uniform(c,
- QUNIFORM_BLEND_CONST_COLOR,
- channel)));
+ qir_uniform(c,
+ QUNIFORM_BLEND_CONST_COLOR,
+ channel)));
case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
return qir_FMUL(c, val,
qir_FSUB(c, qir_uniform_f(c, 1.0),
- get_temp_for_uniform(c,
- QUNIFORM_BLEND_CONST_COLOR,
- 3)));
+ qir_uniform(c,
+ QUNIFORM_BLEND_CONST_COLOR,
+ 3)));
default:
case PIPE_BLENDFACTOR_SRC1_COLOR:
return;
}
+ struct qreg clamped_src[4];
+ struct qreg clamped_dst[4];
+ for (int i = 0; i < 4; i++) {
+ clamped_src[i] = qir_SAT(c, src_color[i]);
+ clamped_dst[i] = qir_SAT(c, dst_color[i]);
+ }
+ src_color = clamped_src;
+ dst_color = clamped_dst;
+
struct qreg src_blend[4], dst_blend[4];
for (int i = 0; i < 3; i++) {
src_blend[i] = vc4_blend_channel(c,
alpha_test_discard(struct vc4_compile *c)
{
struct qreg src_alpha;
- struct qreg alpha_ref = get_temp_for_uniform(c, QUNIFORM_ALPHA_REF, 0);
+ struct qreg alpha_ref = qir_uniform(c, QUNIFORM_ALPHA_REF, 0);
if (!c->fs_key->alpha_test)
return;
}
}
+static struct qreg
+vc4_logicop(struct vc4_compile *c, struct qreg src, struct qreg dst)
+{
+ switch (c->fs_key->logicop_func) {
+ case PIPE_LOGICOP_CLEAR:
+ return qir_uniform_f(c, 0.0);
+ case PIPE_LOGICOP_NOR:
+ return qir_NOT(c, qir_OR(c, src, dst));
+ case PIPE_LOGICOP_AND_INVERTED:
+ return qir_AND(c, qir_NOT(c, src), dst);
+ case PIPE_LOGICOP_COPY_INVERTED:
+ return qir_NOT(c, src);
+ case PIPE_LOGICOP_AND_REVERSE:
+ return qir_AND(c, src, qir_NOT(c, dst));
+ case PIPE_LOGICOP_INVERT:
+ return qir_NOT(c, dst);
+ case PIPE_LOGICOP_XOR:
+ return qir_XOR(c, src, dst);
+ case PIPE_LOGICOP_NAND:
+ return qir_NOT(c, qir_AND(c, src, dst));
+ case PIPE_LOGICOP_AND:
+ return qir_AND(c, src, dst);
+ case PIPE_LOGICOP_EQUIV:
+ return qir_NOT(c, qir_XOR(c, src, dst));
+ case PIPE_LOGICOP_NOOP:
+ return dst;
+ case PIPE_LOGICOP_OR_INVERTED:
+ return qir_OR(c, qir_NOT(c, src), dst);
+ case PIPE_LOGICOP_OR_REVERSE:
+ return qir_OR(c, src, qir_NOT(c, dst));
+ case PIPE_LOGICOP_OR:
+ return qir_OR(c, src, dst);
+ case PIPE_LOGICOP_SET:
+ return qir_uniform_ui(c, ~0);
+ case PIPE_LOGICOP_COPY:
+ default:
+ return src;
+ }
+}
+
static void
emit_frag_end(struct vc4_compile *c)
{
struct qreg tlb_read_color[4] = { c->undef, c->undef, c->undef, c->undef };
struct qreg dst_color[4] = { c->undef, c->undef, c->undef, c->undef };
struct qreg linear_dst_color[4] = { c->undef, c->undef, c->undef, c->undef };
+ struct qreg packed_dst_color = c->undef;
+
if (c->fs_key->blend.blend_enable ||
- c->fs_key->blend.colormask != 0xf) {
+ c->fs_key->blend.colormask != 0xf ||
+ c->fs_key->logicop_func != PIPE_LOGICOP_COPY) {
struct qreg r4 = qir_TLB_COLOR_READ(c);
for (int i = 0; i < 4; i++)
tlb_read_color[i] = qir_R4_UNPACK(c, r4, i);
linear_dst_color[i] = dst_color[i];
}
}
+
+ /* Save the packed value for logic ops. Can't reuse r4
+ * becuase other things might smash it (like sRGB)
+ */
+ packed_dst_color = qir_MOV(c, r4);
}
struct qreg blend_color[4];
blend_color[i] = qir_srgb_encode(c, blend_color[i]);
}
- /* If the bit isn't set in the color mask, then just return the
- * original dst color, instead.
- */
- for (int i = 0; i < 4; i++) {
- if (!(c->fs_key->blend.colormask & (1 << i))) {
- blend_color[i] = dst_color[i];
- }
- }
-
/* Debug: Sometimes you're getting a black output and just want to see
* if the FS is getting executed at all. Spam magenta into the color
* output.
qir_TLB_DISCARD_SETUP(c, c->discard);
if (c->fs_key->stencil_enabled) {
- qir_TLB_STENCIL_SETUP(c, add_uniform(c, QUNIFORM_STENCIL, 0));
+ qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 0));
if (c->fs_key->stencil_twoside) {
- qir_TLB_STENCIL_SETUP(c, add_uniform(c, QUNIFORM_STENCIL, 1));
+ qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 1));
}
if (c->fs_key->stencil_full_writemasks) {
- qir_TLB_STENCIL_SETUP(c, add_uniform(c, QUNIFORM_STENCIL, 2));
+ qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 2));
}
}
qir_TLB_Z_WRITE(c, z);
}
- bool color_written = false;
+ struct qreg packed_color = c->undef;
for (int i = 0; i < 4; i++) {
- if (swizzled_outputs[i].file != QFILE_NULL)
- color_written = true;
+ if (swizzled_outputs[i].file == QFILE_NULL)
+ continue;
+ if (packed_color.file == QFILE_NULL) {
+ packed_color = qir_PACK_8888_F(c, swizzled_outputs[i]);
+ } else {
+ packed_color = qir_PACK_8_F(c,
+ packed_color,
+ swizzled_outputs[i],
+ i);
+ }
}
- struct qreg packed_color;
- if (color_written) {
- /* Fill in any undefined colors. The simulator will assertion
- * fail if we read something that wasn't written, and I don't
- * know what hardware does.
- */
- for (int i = 0; i < 4; i++) {
- if (swizzled_outputs[i].file == QFILE_NULL)
- swizzled_outputs[i] = qir_uniform_f(c, 0.0);
- }
- packed_color = qir_get_temp(c);
- qir_emit(c, qir_inst4(QOP_PACK_COLORS, packed_color,
- swizzled_outputs[0],
- swizzled_outputs[1],
- swizzled_outputs[2],
- swizzled_outputs[3]));
- } else {
+ if (packed_color.file == QFILE_NULL)
packed_color = qir_uniform_ui(c, 0);
+
+ if (c->fs_key->logicop_func != PIPE_LOGICOP_COPY) {
+ packed_color = vc4_logicop(c, packed_color, packed_dst_color);
+ }
+
+ /* If the bit isn't set in the color mask, then just return the
+ * original dst color, instead.
+ */
+ uint32_t colormask = 0xffffffff;
+ for (int i = 0; i < 4; i++) {
+ if (format_swiz[i] < 4 &&
+ !(c->fs_key->blend.colormask & (1 << format_swiz[i]))) {
+ colormask &= ~(0xff << (i * 8));
+ }
+ }
+ if (colormask != 0xffffffff) {
+ packed_color = qir_OR(c,
+ qir_AND(c, packed_color,
+ qir_uniform_ui(c, colormask)),
+ qir_AND(c, packed_dst_color,
+ qir_uniform_ui(c, ~colormask)));
}
qir_emit(c, qir_inst(QOP_TLB_COLOR_WRITE, c->undef,
for (int i = 0; i < 2; i++) {
struct qreg scale =
- add_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
+ qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
xyi[i] = qir_FTOI(c, qir_FMUL(c,
qir_FMUL(c,
- c->outputs[i],
+ c->outputs[c->output_position_index + i],
scale),
rcp_w));
}
static void
emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
{
- struct qreg zscale = add_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
- struct qreg zoffset = add_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
+ struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
+ struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
- qir_VPM_WRITE(c, qir_FMUL(c, qir_FADD(c, qir_FMUL(c,
- c->outputs[2],
+ qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
+ c->outputs[c->output_position_index + 2],
zscale),
- zoffset),
- rcp_w));
+ rcp_w),
+ zoffset));
}
static void
{
struct qreg point_size;
- if (c->output_point_size_index)
+ if (c->output_point_size_index != -1)
point_size = c->outputs[c->output_point_size_index + 3];
else
point_size = qir_uniform_f(c, 1.0);
if (c->num_inputs)
return;
- for (int i = 0; i < 4; i++) {
- qir_emit(c, qir_inst(QOP_VPM_READ,
- qir_get_temp(c),
- c->undef,
- c->undef));
- c->num_inputs++;
- }
+ c->vattr_sizes[0] = 4;
+ struct qreg vpm = { QFILE_VPM, 0 };
+ (void)qir_MOV(c, vpm);
+ c->num_inputs++;
}
static void
emit_ucp_clipdistance(struct vc4_compile *c)
{
- struct qreg *clipvertex;
-
+ unsigned cv;
if (c->output_clipvertex_index != -1)
- clipvertex = &c->outputs[c->output_clipvertex_index];
+ cv = c->output_clipvertex_index;
else if (c->output_position_index != -1)
- clipvertex = &c->outputs[c->output_position_index];
+ cv = c->output_position_index;
else
return;
plane,
TGSI_SWIZZLE_X);
+
struct qreg dist = qir_uniform_f(c, 0.0);
for (int i = 0; i < 4; i++) {
+ struct qreg pos_chan = c->outputs[cv + i];
struct qreg ucp =
- add_uniform(c, QUNIFORM_USER_CLIP_PLANE,
+ qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
plane * 4 + i);
- dist = qir_FADD(c, dist, qir_FMUL(c, clipvertex[i], ucp));
+ dist = qir_FADD(c, dist, qir_FMUL(c, pos_chan, ucp));
}
c->outputs[output_index] = dist;
struct vc4_varying_semantic *fs_inputs,
uint32_t num_fs_inputs)
{
- struct qreg rcp_w = qir_RCP(c, c->outputs[3]);
+ struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
emit_stub_vpm_read(c);
emit_ucp_clipdistance(c);
static void
emit_coord_end(struct vc4_compile *c)
{
- struct qreg rcp_w = qir_RCP(c, c->outputs[3]);
+ struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
emit_stub_vpm_read(c);
for (int i = 0; i < 4; i++)
- qir_VPM_WRITE(c, c->outputs[i]);
+ qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
emit_scaled_viewport_write(c, rcp_w);
emit_zs_write(c, rcp_w);
int ret;
c->stage = stage;
+ for (int i = 0; i < 4; i++)
+ c->addr[i] = qir_uniform_f(c, 0.0);
+
c->shader_state = &key->shader_state->base;
+ c->program_id = key->shader_state->program_id;
+ c->variant_id = key->shader_state->compiled_variant_count++;
c->key = key;
switch (stage) {
assert(ret == TGSI_PARSE_OK);
if (vc4_debug & VC4_DEBUG_TGSI) {
- fprintf(stderr, "TGSI:\n");
+ fprintf(stderr, "%s prog %d/%d TGSI:\n",
+ qir_get_stage_name(c->stage),
+ c->program_id, c->variant_id);
tgsi_dump(tokens, 0);
}
}
tgsi_parse_free(&c->parser);
+ if (vc4_debug & VC4_DEBUG_QIR) {
+ fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
+ qir_get_stage_name(c->stage),
+ c->program_id, c->variant_id);
+ qir_dump(c);
+ }
qir_optimize(c);
+ qir_lower_uniforms(c);
if (vc4_debug & VC4_DEBUG_QIR) {
- fprintf(stderr, "QIR:\n");
+ fprintf(stderr, "%s prog %d/%d QIR:\n",
+ qir_get_stage_name(c->stage),
+ c->program_id, c->variant_id);
qir_dump(c);
}
qir_reorder_uniforms(c);
vc4_generate_code(vc4, c);
if (vc4_debug & VC4_DEBUG_SHADERDB) {
- fprintf(stderr, "SHADER-DB: %s: %d instructions\n",
- qir_get_stage_name(c->stage), c->qpu_inst_count);
- fprintf(stderr, "SHADER-DB: %s: %d uniforms\n",
- qir_get_stage_name(c->stage), c->num_uniforms);
+ fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
+ qir_get_stage_name(c->stage),
+ c->program_id, c->variant_id,
+ c->qpu_inst_count);
+ fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
+ qir_get_stage_name(c->stage),
+ c->program_id, c->variant_id,
+ c->num_uniforms);
}
return c;
vc4_shader_state_create(struct pipe_context *pctx,
const struct pipe_shader_state *cso)
{
+ struct vc4_context *vc4 = vc4_context(pctx);
struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
if (!so)
return NULL;
so->base.tokens = tgsi_transform_lowering(&lowering_config, cso->tokens, &info);
if (!so->base.tokens)
so->base.tokens = tgsi_dup_tokens(cso->tokens);
+ so->program_id = vc4->next_uncompiled_program_id++;
return so;
}
vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
struct vc4_key *key)
{
- struct util_hash_table *ht;
+ struct hash_table *ht;
uint32_t key_size;
if (stage == QSTAGE_FRAG) {
ht = vc4->fs_cache;
}
struct vc4_compiled_shader *shader;
- shader = util_hash_table_get(ht, key);
- if (shader)
- return shader;
+ struct hash_entry *entry = _mesa_hash_table_search(ht, key);
+ if (entry)
+ return entry->data;
struct vc4_compile *c = vc4_shader_tgsi_to_qir(vc4, stage, key);
shader = rzalloc(NULL, struct vc4_compiled_shader);
shader->program_id = vc4->next_compiled_program_id++;
if (stage == QSTAGE_FRAG) {
+ bool input_live[c->num_input_semantics];
+ struct simple_node *node;
+
+ memset(input_live, 0, sizeof(input_live));
+ foreach(node, &c->instructions) {
+ struct qinst *inst = (struct qinst *)node;
+ for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
+ if (inst->src[i].file == QFILE_VARY)
+ input_live[inst->src[i].index] = true;
+ }
+ }
+
shader->input_semantics = ralloc_array(shader,
struct vc4_varying_semantic,
c->num_input_semantics);
for (int i = 0; i < c->num_input_semantics; i++) {
struct vc4_varying_semantic *sem = &c->input_semantics[i];
+ if (!input_live[i])
+ continue;
+
/* Skip non-VS-output inputs. */
if (sem->semantic == (uint8_t)~0)
continue;
- if (sem->semantic == TGSI_SEMANTIC_COLOR)
+ if (sem->semantic == TGSI_SEMANTIC_COLOR ||
+ sem->semantic == TGSI_SEMANTIC_BCOLOR) {
shader->color_inputs |= (1 << shader->num_inputs);
+ }
+
shader->input_semantics[shader->num_inputs] = *sem;
shader->num_inputs++;
}
} else {
shader->num_inputs = c->num_inputs;
+
+ shader->vattr_offsets[0] = 0;
+ for (int i = 0; i < 8; i++) {
+ shader->vattr_offsets[i + 1] =
+ shader->vattr_offsets[i] + c->vattr_sizes[i];
+
+ if (c->vattr_sizes[i])
+ shader->vattrs_live |= (1 << i);
+ }
}
copy_uniform_state_to_shader(shader, c);
c->qpu_inst_count * sizeof(uint64_t),
"code");
+ /* Copy the compiler UBO range state to the compiled shader, dropping
+ * out arrays that were never referenced by an indirect load.
+ *
+ * (Note that QIR dead code elimination of an array access still
+ * leaves that array alive, though)
+ */
+ if (c->num_ubo_ranges) {
+ shader->num_ubo_ranges = c->num_ubo_ranges;
+ shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
+ c->num_ubo_ranges);
+ uint32_t j = 0;
+ for (int i = 0; i < c->ubo_ranges_array_size; i++) {
+ struct vc4_compiler_ubo_range *range =
+ &c->ubo_ranges[i];
+ if (!range->used)
+ continue;
+
+ shader->ubo_ranges[j].dst_offset = range->dst_offset;
+ shader->ubo_ranges[j].src_offset = range->src_offset;
+ shader->ubo_ranges[j].size = range->size;
+ shader->ubo_size += c->ubo_ranges[i].size;
+ j++;
+ }
+ }
+
qir_compile_destroy(c);
struct vc4_key *dup_key;
- dup_key = malloc(key_size);
+ dup_key = ralloc_size(shader, key_size);
memcpy(dup_key, key, key_size);
- util_hash_table_set(ht, dup_key, shader);
+ _mesa_hash_table_insert(ht, dup_key, shader);
return shader;
}
VC4_DIRTY_RASTERIZER |
VC4_DIRTY_FRAGTEX |
VC4_DIRTY_TEXSTATE |
- VC4_DIRTY_PROG))) {
+ VC4_DIRTY_UNCOMPILED_FS))) {
return;
}
key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
prim_mode <= PIPE_PRIM_LINE_STRIP);
key->blend = vc4->blend->rt[0];
-
+ if (vc4->blend->logicop_enable) {
+ key->logicop_func = vc4->blend->logicop_func;
+ } else {
+ key->logicop_func = PIPE_LOGICOP_COPY;
+ }
if (vc4->framebuffer.cbufs[0])
key->color_format = vc4->framebuffer.cbufs[0]->format;
if (vc4->prog.fs == old_fs)
return;
+ vc4->dirty |= VC4_DIRTY_COMPILED_FS;
if (vc4->rasterizer->base.flatshade &&
old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
VC4_DIRTY_VERTTEX |
VC4_DIRTY_TEXSTATE |
VC4_DIRTY_VTXSTATE |
- VC4_DIRTY_PROG))) {
+ VC4_DIRTY_UNCOMPILED_VS |
+ VC4_DIRTY_COMPILED_FS))) {
return;
}
vc4_update_compiled_vs(vc4, prim_mode);
}
-static unsigned
-fs_cache_hash(void *key)
+static uint32_t
+fs_cache_hash(const void *key)
{
return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
}
-static unsigned
-vs_cache_hash(void *key)
+static uint32_t
+vs_cache_hash(const void *key)
{
return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
}
-static int
-fs_cache_compare(void *key1, void *key2)
+static bool
+fs_cache_compare(const void *key1, const void *key2)
{
- return memcmp(key1, key2, sizeof(struct vc4_fs_key));
+ return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
}
-static int
-vs_cache_compare(void *key1, void *key2)
+static bool
+vs_cache_compare(const void *key1, const void *key2)
{
- return memcmp(key1, key2, sizeof(struct vc4_vs_key));
-}
-
-struct delete_state {
- struct vc4_context *vc4;
- struct vc4_uncompiled_shader *shader_state;
-};
-
-static enum pipe_error
-fs_delete_from_cache(void *in_key, void *in_value, void *data)
-{
- struct delete_state *del = data;
- struct vc4_fs_key *key = in_key;
- struct vc4_compiled_shader *shader = in_value;
-
- if (key->base.shader_state == data) {
- util_hash_table_remove(del->vc4->fs_cache, key);
- vc4_bo_unreference(&shader->bo);
- ralloc_free(shader);
- }
-
- return 0;
+ return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
}
-static enum pipe_error
-vs_delete_from_cache(void *in_key, void *in_value, void *data)
+static void
+delete_from_cache_if_matches(struct hash_table *ht,
+ struct hash_entry *entry,
+ struct vc4_uncompiled_shader *so)
{
- struct delete_state *del = data;
- struct vc4_vs_key *key = in_key;
- struct vc4_compiled_shader *shader = in_value;
+ const struct vc4_key *key = entry->key;
- if (key->base.shader_state == data) {
- util_hash_table_remove(del->vc4->vs_cache, key);
+ if (key->shader_state == so) {
+ struct vc4_compiled_shader *shader = entry->data;
+ _mesa_hash_table_remove(ht, entry);
vc4_bo_unreference(&shader->bo);
ralloc_free(shader);
}
-
- return 0;
}
static void
{
struct vc4_context *vc4 = vc4_context(pctx);
struct vc4_uncompiled_shader *so = hwcso;
- struct delete_state del;
- del.vc4 = vc4;
- del.shader_state = so;
- util_hash_table_foreach(vc4->fs_cache, fs_delete_from_cache, &del);
- util_hash_table_foreach(vc4->vs_cache, vs_delete_from_cache, &del);
+ struct hash_entry *entry;
+ hash_table_foreach(vc4->fs_cache, entry)
+ delete_from_cache_if_matches(vc4->fs_cache, entry, so);
+ hash_table_foreach(vc4->vs_cache, entry)
+ delete_from_cache_if_matches(vc4->vs_cache, entry, so);
if (so->twoside_tokens != so->base.tokens)
free((void *)so->twoside_tokens);
texture->u.tex.first_level, VC4_TEX_P0_MIPLVLS) |
VC4_SET_FIELD(texture->target == PIPE_TEXTURE_CUBE,
VC4_TEX_P0_CMMODE) |
- VC4_SET_FIELD(rsc->vc4_format & 7, VC4_TEX_P0_TYPE));
+ VC4_SET_FIELD(rsc->vc4_format & 15, VC4_TEX_P0_TYPE));
}
static void
(sampler->mag_img_filter == PIPE_TEX_MIPFILTER_NEAREST ||
sampler->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST);
- cl_u32(&vc4->uniforms,
+ cl_aligned_u32(&vc4->uniforms,
VC4_SET_FIELD(rsc->vc4_format >> 4, VC4_TEX_P1_TYPE4) |
VC4_SET_FIELD(texture->texture->height0 & 2047,
VC4_TEX_P1_HEIGHT) |
struct pipe_sampler_view *texture = texstate->textures[unit];
struct vc4_resource *rsc = vc4_resource(texture->texture);
- cl_u32(&vc4->uniforms,
+ cl_aligned_u32(&vc4->uniforms,
VC4_SET_FIELD(VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE,
VC4_TEX_P2_PTYPE) |
VC4_SET_FIELD(rsc->cube_map_stride >> 12, VC4_TEX_P2_CMST) |
}
}
- cl_u32(&vc4->uniforms, uc.ui[0]);
+ cl_aligned_u32(&vc4->uniforms, uc.ui[0]);
}
static uint32_t
return fui(1.0f / dim);
}
+static struct vc4_bo *
+vc4_upload_ubo(struct vc4_context *vc4, struct vc4_compiled_shader *shader,
+ const uint32_t *gallium_uniforms)
+{
+ if (!shader->ubo_size)
+ return NULL;
+
+ struct vc4_bo *ubo = vc4_bo_alloc(vc4->screen, shader->ubo_size, "ubo");
+ uint32_t *data = vc4_bo_map(ubo);
+ for (uint32_t i = 0; i < shader->num_ubo_ranges; i++) {
+ memcpy(data + shader->ubo_ranges[i].dst_offset,
+ gallium_uniforms + shader->ubo_ranges[i].src_offset,
+ shader->ubo_ranges[i].size);
+ }
+
+ return ubo;
+}
+
void
vc4_write_uniforms(struct vc4_context *vc4, struct vc4_compiled_shader *shader,
struct vc4_constbuf_stateobj *cb,
{
struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
const uint32_t *gallium_uniforms = cb->cb[0].user_buffer;
+ struct vc4_bo *ubo = vc4_upload_ubo(vc4, shader, gallium_uniforms);
+
+ cl_ensure_space(&vc4->uniforms, (uinfo->count +
+ uinfo->num_texture_samples) * 4);
cl_start_shader_reloc(&vc4->uniforms, uinfo->num_texture_samples);
switch (uinfo->contents[i]) {
case QUNIFORM_CONSTANT:
- cl_u32(&vc4->uniforms, uinfo->data[i]);
+ cl_aligned_u32(&vc4->uniforms, uinfo->data[i]);
break;
case QUNIFORM_UNIFORM:
- cl_u32(&vc4->uniforms,
- gallium_uniforms[uinfo->data[i]]);
+ cl_aligned_u32(&vc4->uniforms,
+ gallium_uniforms[uinfo->data[i]]);
break;
case QUNIFORM_VIEWPORT_X_SCALE:
- cl_f(&vc4->uniforms, vc4->viewport.scale[0] * 16.0f);
+ cl_aligned_f(&vc4->uniforms, vc4->viewport.scale[0] * 16.0f);
break;
case QUNIFORM_VIEWPORT_Y_SCALE:
- cl_f(&vc4->uniforms, vc4->viewport.scale[1] * 16.0f);
+ cl_aligned_f(&vc4->uniforms, vc4->viewport.scale[1] * 16.0f);
break;
case QUNIFORM_VIEWPORT_Z_OFFSET:
- cl_f(&vc4->uniforms, vc4->viewport.translate[2]);
+ cl_aligned_f(&vc4->uniforms, vc4->viewport.translate[2]);
break;
case QUNIFORM_VIEWPORT_Z_SCALE:
- cl_f(&vc4->uniforms, vc4->viewport.scale[2]);
+ cl_aligned_f(&vc4->uniforms, vc4->viewport.scale[2]);
break;
case QUNIFORM_USER_CLIP_PLANE:
- cl_f(&vc4->uniforms,
- vc4->clip.ucp[uinfo->data[i] / 4][uinfo->data[i] % 4]);
+ cl_aligned_f(&vc4->uniforms,
+ vc4->clip.ucp[uinfo->data[i] / 4][uinfo->data[i] % 4]);
break;
case QUNIFORM_TEXTURE_CONFIG_P0:
write_texture_p2(vc4, texstate, uinfo->data[i]);
break;
+ case QUNIFORM_UBO_ADDR:
+ cl_aligned_reloc(vc4, &vc4->uniforms, ubo, 0);
+ break;
+
case QUNIFORM_TEXTURE_BORDER_COLOR:
write_texture_border_color(vc4, texstate, uinfo->data[i]);
break;
case QUNIFORM_TEXRECT_SCALE_X:
case QUNIFORM_TEXRECT_SCALE_Y:
- cl_u32(&vc4->uniforms,
- get_texrect_scale(texstate,
- uinfo->contents[i],
- uinfo->data[i]));
+ cl_aligned_u32(&vc4->uniforms,
+ get_texrect_scale(texstate,
+ uinfo->contents[i],
+ uinfo->data[i]));
break;
case QUNIFORM_BLEND_CONST_COLOR:
- cl_f(&vc4->uniforms,
- vc4->blend_color.color[uinfo->data[i]]);
+ cl_aligned_f(&vc4->uniforms,
+ CLAMP(vc4->blend_color.color[uinfo->data[i]], 0, 1));
break;
case QUNIFORM_STENCIL:
- cl_u32(&vc4->uniforms,
- vc4->zsa->stencil_uniforms[uinfo->data[i]] |
- (uinfo->data[i] <= 1 ?
- (vc4->stencil_ref.ref_value[uinfo->data[i]] << 8) :
- 0));
+ cl_aligned_u32(&vc4->uniforms,
+ vc4->zsa->stencil_uniforms[uinfo->data[i]] |
+ (uinfo->data[i] <= 1 ?
+ (vc4->stencil_ref.ref_value[uinfo->data[i]] << 8) :
+ 0));
break;
case QUNIFORM_ALPHA_REF:
- cl_f(&vc4->uniforms, vc4->zsa->base.alpha.ref_value);
+ cl_aligned_f(&vc4->uniforms,
+ vc4->zsa->base.alpha.ref_value);
break;
}
#if 0
{
struct vc4_context *vc4 = vc4_context(pctx);
vc4->prog.bind_fs = hwcso;
- vc4->prog.dirty |= VC4_SHADER_DIRTY_FP;
- vc4->dirty |= VC4_DIRTY_PROG;
+ vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
}
static void
{
struct vc4_context *vc4 = vc4_context(pctx);
vc4->prog.bind_vs = hwcso;
- vc4->prog.dirty |= VC4_SHADER_DIRTY_VP;
- vc4->dirty |= VC4_DIRTY_PROG;
+ vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
}
void
pctx->bind_fs_state = vc4_fp_state_bind;
pctx->bind_vs_state = vc4_vp_state_bind;
- vc4->fs_cache = util_hash_table_create(fs_cache_hash, fs_cache_compare);
- vc4->vs_cache = util_hash_table_create(vs_cache_hash, vs_cache_compare);
+ vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
+ fs_cache_compare);
+ vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
+ vs_cache_compare);
+}
+
+void
+vc4_program_fini(struct pipe_context *pctx)
+{
+ struct vc4_context *vc4 = vc4_context(pctx);
+
+ struct hash_entry *entry;
+ hash_table_foreach(vc4->fs_cache, entry) {
+ struct vc4_compiled_shader *shader = entry->data;
+ vc4_bo_unreference(&shader->bo);
+ ralloc_free(shader);
+ _mesa_hash_table_remove(vc4->fs_cache, entry);
+ }
+
+ hash_table_foreach(vc4->vs_cache, entry) {
+ struct vc4_compiled_shader *shader = entry->data;
+ vc4_bo_unreference(&shader->bo);
+ ralloc_free(shader);
+ _mesa_hash_table_remove(vc4->vs_cache, entry);
+ }
}