fs_reg reg;
if (var->data.location == VARYING_SLOT_POS) {
- reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
- var->data.origin_upper_left);
+ reg = *emit_fragcoord_interpolation();
emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
input, reg), 0xF);
} else if (var->data.location == VARYING_SLOT_LAYER) {
}
} else {
assert(type->is_scalar() || type->is_vector());
- this->outputs[*location] = *reg;
- this->output_components[*location] = type->vector_elements;
- *reg = offset(*reg, bld, 4);
- (*location)++;
+ unsigned num_elements = type->vector_elements;
+ if (type->is_double())
+ num_elements *= 2;
+ for (unsigned count = 0; count < num_elements; count += 4) {
+ this->outputs[*location] = *reg;
+ this->output_components[*location] = MIN2(4, num_elements - count);
+ *reg = offset(*reg, bld, 4);
+ (*location)++;
+ }
}
}
stride(byte_offset(retype(brw_vec1_grf(1, 0),
BRW_REGISTER_TYPE_UB), 28),
1, 8, 0),
- brw_imm_uv(0x76543210));
+ brw_imm_v(0x76543210));
/* A set bit in the pixel mask means the channel is enabled, but
* that is the opposite of gl_HelperInvocation so we need to invert
break;
case nir_op_fddy:
if (fs_key->high_quality_derivatives) {
- inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
- brw_imm_d(fs_key->render_to_fbo));
+ inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
} else {
- inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
- brw_imm_d(fs_key->render_to_fbo));
+ inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
}
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy_fine:
- inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
- brw_imm_d(fs_key->render_to_fbo));
+ inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_fddy_coarse:
- inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
- brw_imm_d(fs_key->render_to_fbo));
+ inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
inst->saturate = instr->dest.saturate;
break;
case nir_op_extract_u8:
case nir_op_extract_i8: {
nir_const_value *byte = nir_src_as_const_value(instr->src[1].src);
+ assert(byte != NULL);
bld.emit(SHADER_OPCODE_EXTRACT_BYTE,
result, op[0], brw_imm_ud(byte->u32[0]));
break;
case nir_op_extract_u16:
case nir_op_extract_i16: {
nir_const_value *word = nir_src_as_const_value(instr->src[1].src);
+ assert(word != NULL);
bld.emit(SHADER_OPCODE_EXTRACT_WORD,
result, op[0], brw_imm_ud(word->u32[0]));
break;
}
fs_reg
-fs_visitor::get_nir_src(nir_src src)
+fs_visitor::get_nir_src(const nir_src &src)
{
fs_reg reg;
if (src.is_ssa) {
return retype(reg, BRW_REGISTER_TYPE_D);
}
+/**
+ * Return an IMM for constants; otherwise call get_nir_src() as normal.
+ */
+fs_reg
+fs_visitor::get_nir_src_imm(const nir_src &src)
+{
+ nir_const_value *val = nir_src_as_const_value(src);
+ return val ? fs_reg(brw_imm_d(val->i32[0])) : get_nir_src(src);
+}
+
fs_reg
-fs_visitor::get_nir_dest(nir_dest dest)
+fs_visitor::get_nir_dest(const nir_dest &dest)
{
if (dest.is_ssa) {
const brw_reg_type reg_type =
*/
const bool is_point_size = (base_offset == 0);
- if (offset_const != NULL && vertex_const != NULL &&
+ /* TODO: figure out push input layout for invocations == 1 */
+ if (gs_prog_data->invocations == 1 &&
+ offset_const != NULL && vertex_const != NULL &&
4 * (base_offset + offset_const->u32[0]) < push_reg_count) {
int imm_offset = (base_offset + offset_const->u32[0]) * 4 +
vertex_const->u32[0] * push_reg_count;
/* This input was pushed into registers. */
if (is_point_size) {
/* gl_PointSize comes in .w */
- assert(imm_offset == 0);
bld.MOV(dst, fs_reg(ATTR, imm_offset + 3, dst.type));
} else {
for (unsigned i = 0; i < num_components; i++) {
fs_reg(ATTR, imm_offset + i, dst.type));
}
}
- } else {
- /* Resort to the pull model. Ensure the VUE handles are provided. */
- gs_prog_data->base.include_vue_handles = true;
+ return;
+ }
- unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
- fs_reg icp_handle;
+ /* Resort to the pull model. Ensure the VUE handles are provided. */
+ gs_prog_data->base.include_vue_handles = true;
+
+ unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
+ fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ if (gs_prog_data->invocations == 1) {
if (vertex_const) {
/* The vertex index is constant; just select the proper URB handle. */
icp_handle =
fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
- icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
/* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
fs_reg(icp_offset_bytes),
brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
}
+ } else {
+ assert(gs_prog_data->invocations > 1);
- fs_inst *inst;
- if (offset_const) {
- /* Constant indexing - use global offset. */
- inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
- inst->offset = base_offset + offset_const->u32[0];
- inst->base_mrf = -1;
- inst->mlen = 1;
- inst->regs_written = num_components;
+ if (vertex_const) {
+ assert(devinfo->gen >= 9 || vertex_const->i32[0] <= 5);
+ bld.MOV(icp_handle,
+ retype(brw_vec1_grf(first_icp_handle +
+ vertex_const->i32[0] / 8,
+ vertex_const->i32[0] % 8),
+ BRW_REGISTER_TYPE_UD));
} else {
- /* Indirect indexing - use per-slot offsets as well. */
- const fs_reg srcs[] = { icp_handle, get_nir_src(offset_src) };
- fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
- bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
+ /* The vertex index is non-constant. We need to use indirect
+ * addressing to fetch the proper URB handle.
+ *
+ */
+ fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
- inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
- inst->offset = base_offset;
- inst->base_mrf = -1;
- inst->mlen = 2;
- inst->regs_written = num_components;
- }
+ /* Convert vertex_index to bytes (multiply by 4) */
+ bld.SHL(icp_offset_bytes,
+ retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
+ brw_imm_ud(2u));
- if (is_point_size) {
- /* Read the whole VUE header (because of alignment) and read .w. */
- fs_reg tmp = bld.vgrf(dst.type, 4);
- inst->dst = tmp;
- inst->regs_written = 4;
- bld.MOV(dst, offset(tmp, bld, 3));
+ /* Use first_icp_handle as the base offset. There is one DWord
+ * of URB handles per vertex, so inform the register allocator that
+ * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
+ */
+ bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
+ fs_reg(brw_vec8_grf(first_icp_handle, 0)),
+ fs_reg(icp_offset_bytes),
+ brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
+ REG_SIZE));
}
}
+
+ fs_inst *inst;
+ if (offset_const) {
+ /* Constant indexing - use global offset. */
+ inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
+ inst->offset = base_offset + offset_const->u32[0];
+ inst->base_mrf = -1;
+ inst->mlen = 1;
+ inst->regs_written = num_components;
+ } else {
+ /* Indirect indexing - use per-slot offsets as well. */
+ const fs_reg srcs[] = { icp_handle, get_nir_src(offset_src) };
+ fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
+ bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
+
+ inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
+ inst->offset = base_offset;
+ inst->base_mrf = -1;
+ inst->mlen = 2;
+ inst->regs_written = num_components;
+ }
+
+ if (is_point_size) {
+ /* Read the whole VUE header (because of alignment) and read .w. */
+ fs_reg tmp = bld.vgrf(dst.type, 4);
+ inst->dst = tmp;
+ inst->regs_written = 4;
+ bld.MOV(dst, offset(tmp, bld, 3));
+ }
}
fs_reg
return get_nir_src(*offset_src);
}
+static void
+do_untyped_vector_read(const fs_builder &bld,
+ const fs_reg dest,
+ const fs_reg surf_index,
+ const fs_reg offset_reg,
+ unsigned num_components)
+{
+ if (type_sz(dest.type) == 4) {
+ fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
+ 1 /* dims */,
+ num_components,
+ BRW_PREDICATE_NONE);
+ read_result.type = dest.type;
+ for (unsigned i = 0; i < num_components; i++)
+ bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
+ } else if (type_sz(dest.type) == 8) {
+ /* Reading a dvec, so we need to:
+ *
+ * 1. Multiply num_components by 2, to account for the fact that we
+ * need to read 64-bit components.
+ * 2. Shuffle the result of the load to form valid 64-bit elements
+ * 3. Emit a second load (for components z/w) if needed.
+ */
+ fs_reg read_offset = bld.vgrf(BRW_REGISTER_TYPE_UD);
+ bld.MOV(read_offset, offset_reg);
+
+ int iters = num_components <= 2 ? 1 : 2;
+
+ /* Load the dvec, the first iteration loads components x/y, the second
+ * iteration, if needed, loads components z/w
+ */
+ for (int it = 0; it < iters; it++) {
+ /* Compute number of components to read in this iteration */
+ int iter_components = MIN2(2, num_components);
+ num_components -= iter_components;
+
+ /* Read. Since this message reads 32-bit components, we need to
+ * read twice as many components.
+ */
+ fs_reg read_result = emit_untyped_read(bld, surf_index, read_offset,
+ 1 /* dims */,
+ iter_components * 2,
+ BRW_PREDICATE_NONE);
+
+ /* Shuffle the 32-bit load result into valid 64-bit data */
+ const fs_reg packed_result = bld.vgrf(dest.type, iter_components);
+ shuffle_32bit_load_result_to_64bit_data(
+ bld, packed_result, read_result, iter_components);
+
+ /* Move each component to its destination */
+ read_result = retype(read_result, BRW_REGISTER_TYPE_DF);
+ for (int c = 0; c < iter_components; c++) {
+ bld.MOV(offset(dest, bld, it * 2 + c),
+ offset(packed_result, bld, c));
+ }
+
+ bld.ADD(read_offset, read_offset, brw_imm_ud(16));
+ }
+ } else {
+ unreachable("Unsupported type");
+ }
+}
+
void
fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
nir_intrinsic_instr *instr)
brw_imm_ud(4 * REG_SIZE));
}
- if (indirect_offset.file == BAD_FILE) {
- /* Constant indexing - use global offset. */
- inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
- inst->offset = imm_offset;
- inst->mlen = 1;
- inst->base_mrf = -1;
- inst->regs_written = instr->num_components;
- } else {
- /* Indirect indexing - use per-slot offsets as well. */
- const fs_reg srcs[] = { icp_handle, indirect_offset };
- fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
- bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
+ /* We can only read two double components with each URB read, so
+ * we send two read messages in that case, each one loading up to
+ * two double components.
+ */
+ unsigned num_iterations = 1;
+ unsigned num_components = instr->num_components;
+ fs_reg orig_dst = dst;
+ if (type_sz(dst.type) == 8) {
+ if (instr->num_components > 2) {
+ num_iterations = 2;
+ num_components = 2;
+ }
- inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
- inst->offset = imm_offset;
- inst->base_mrf = -1;
- inst->mlen = 2;
- inst->regs_written = instr->num_components;
+ fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
+ dst = tmp;
}
- /* Copy the temporary to the destination to deal with writemasking.
- *
- * Also attempt to deal with gl_PointSize being in the .w component.
- */
- if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
- inst->dst = bld.vgrf(dst.type, 4);
- inst->regs_written = 4;
- bld.MOV(dst, offset(inst->dst, bld, 3));
+ for (unsigned iter = 0; iter < num_iterations; iter++) {
+ if (indirect_offset.file == BAD_FILE) {
+ /* Constant indexing - use global offset. */
+ inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
+ inst->offset = imm_offset;
+ inst->mlen = 1;
+ inst->base_mrf = -1;
+ } else {
+ /* Indirect indexing - use per-slot offsets as well. */
+ const fs_reg srcs[] = { icp_handle, indirect_offset };
+ fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
+ bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
+
+ inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
+ inst->offset = imm_offset;
+ inst->base_mrf = -1;
+ inst->mlen = 2;
+ }
+ inst->regs_written = num_components * type_sz(dst.type) / 4;
+
+ /* If we are reading 64-bit data using 32-bit read messages we need
+ * build proper 64-bit data elements by shuffling the low and high
+ * 32-bit components around like we do for other things like UBOs
+ * or SSBOs.
+ */
+ if (type_sz(dst.type) == 8) {
+ shuffle_32bit_load_result_to_64bit_data(
+ bld, dst, retype(dst, BRW_REGISTER_TYPE_F), num_components);
+
+ for (unsigned c = 0; c < num_components; c++) {
+ bld.MOV(offset(orig_dst, bld, iter * 2 + c),
+ offset(dst, bld, c));
+ }
+ }
+
+ /* Copy the temporary to the destination to deal with writemasking.
+ *
+ * Also attempt to deal with gl_PointSize being in the .w component.
+ */
+ if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
+ assert(type_sz(dst.type) < 8);
+ inst->dst = bld.vgrf(dst.type, 4);
+ inst->regs_written = 4;
+ bld.MOV(dst, offset(inst->dst, bld, 3));
+ }
+
+ /* If we are loading double data and we need a second read message
+ * adjust the write offset
+ */
+ if (num_iterations > 1) {
+ num_components = instr->num_components - 2;
+ if (indirect_offset.file == BAD_FILE) {
+ imm_offset++;
+ } else {
+ fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
+ bld.ADD(new_indirect, indirect_offset, brw_imm_ud(1u));
+ indirect_offset = new_indirect;
+ }
+ }
}
break;
}
case nir_intrinsic_store_output:
case nir_intrinsic_store_per_vertex_output: {
fs_reg value = get_nir_src(instr->src[0]);
+ bool is_64bit = (instr->src[0].is_ssa ?
+ instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size) == 64;
fs_reg indirect_offset = get_indirect_offset(instr);
unsigned imm_offset = instr->const_index[0];
unsigned swiz = BRW_SWIZZLE_XYZW;
unsigned num_components = _mesa_fls(mask);
enum opcode opcode;
- if (mask != WRITEMASK_XYZW) {
- srcs[header_regs++] = brw_imm_ud(mask << 16);
- opcode = indirect_offset.file != BAD_FILE ?
- SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
- SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
- } else {
- opcode = indirect_offset.file != BAD_FILE ?
- SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
- SHADER_OPCODE_URB_WRITE_SIMD8;
+ /* We can only pack two 64-bit components in a single message, so send
+ * 2 messages if we have more components
+ */
+ unsigned num_iterations = 1;
+ unsigned iter_components = num_components;
+ if (is_64bit && instr->num_components > 2) {
+ num_iterations = 2;
+ iter_components = 2;
}
- for (unsigned i = 0; i < num_components; i++) {
- if (mask & (1 << i))
- srcs[header_regs + i] = offset(value, bld, BRW_GET_SWZ(swiz, i));
- }
+ /* 64-bit data needs to me shuffled before we can write it to the URB.
+ * We will use this temporary to shuffle the components in each
+ * iteration.
+ */
+ fs_reg tmp =
+ fs_reg(VGRF, alloc.allocate(2 * iter_components), value.type);
+
+ for (unsigned iter = 0; iter < num_iterations; iter++) {
+ if (!is_64bit && mask != WRITEMASK_XYZW) {
+ srcs[header_regs++] = brw_imm_ud(mask << 16);
+ opcode = indirect_offset.file != BAD_FILE ?
+ SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
+ SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
+ } else if (is_64bit && ((mask & WRITEMASK_XY) != WRITEMASK_XY)) {
+ /* Expand the 64-bit mask to 32-bit channels. We only handle
+ * two channels in each iteration, so we only care about X/Y.
+ */
+ unsigned mask32 = 0;
+ if (mask & WRITEMASK_X)
+ mask32 |= WRITEMASK_XY;
+ if (mask & WRITEMASK_Y)
+ mask32 |= WRITEMASK_ZW;
+
+ /* If the mask does not include any of the channels X or Y there
+ * is nothing to do in this iteration. Move on to the next couple
+ * of 64-bit channels.
+ */
+ if (!mask32) {
+ mask >>= 2;
+ imm_offset++;
+ continue;
+ }
- unsigned mlen = header_regs + num_components;
+ srcs[header_regs++] = brw_imm_ud(mask32 << 16);
+ opcode = indirect_offset.file != BAD_FILE ?
+ SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
+ SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
+ } else {
+ opcode = indirect_offset.file != BAD_FILE ?
+ SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
+ SHADER_OPCODE_URB_WRITE_SIMD8;
+ }
- fs_reg payload =
- bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
- bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
+ for (unsigned i = 0; i < iter_components; i++) {
+ if (!(mask & (1 << i)))
+ continue;
- fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
- inst->offset = imm_offset;
- inst->mlen = mlen;
- inst->base_mrf = -1;
+ if (!is_64bit) {
+ srcs[header_regs + i] = offset(value, bld, BRW_GET_SWZ(swiz, i));
+ } else {
+ /* We need to shuffle the 64-bit data to match the layout
+ * expected by our 32-bit URB write messages. We use a temporary
+ * for that.
+ */
+ unsigned channel = BRW_GET_SWZ(swiz, iter * 2 + i);
+ shuffle_64bit_data_for_32bit_write(bld,
+ retype(offset(tmp, bld, 2 * i), BRW_REGISTER_TYPE_F),
+ retype(offset(value, bld, 2 * channel), BRW_REGISTER_TYPE_DF),
+ 1);
+
+ /* Now copy the data to the destination */
+ fs_reg dest = fs_reg(VGRF, alloc.allocate(2), value.type);
+ unsigned idx = 2 * i;
+ bld.MOV(dest, offset(tmp, bld, idx));
+ bld.MOV(offset(dest, bld, 1), offset(tmp, bld, idx + 1));
+ srcs[header_regs + idx] = dest;
+ srcs[header_regs + idx + 1] = offset(dest, bld, 1);
+ }
+ }
+
+ unsigned mlen =
+ header_regs + (is_64bit ? 2 * iter_components : iter_components);
+ fs_reg payload =
+ bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
+ bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
+
+ fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
+ inst->offset = imm_offset;
+ inst->mlen = mlen;
+ inst->base_mrf = -1;
+
+ /* If this is a 64-bit attribute, select the next two 64-bit channels
+ * to be handled in the next iteration.
+ */
+ if (is_64bit) {
+ mask >>= 2;
+ imm_offset++;
+ }
+ }
break;
}
if (imm_offset < max_push_slots) {
fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
for (int i = 0; i < instr->num_components; i++) {
- bld.MOV(offset(dest, bld, i),
- component(src, 4 * (imm_offset % 2) + i));
+ unsigned comp = 16 / type_sz(dest.type) * (imm_offset % 2) + i;
+ bld.MOV(offset(dest, bld, i), component(src, comp));
}
tes_prog_data->base.urb_read_length =
MAX2(tes_prog_data->base.urb_read_length,
fs_reg temp = vgrf(glsl_type::float_type);
bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
fs_reg itemp = vgrf(glsl_type::int_type);
- bld.MOV(itemp, temp); /* float to int */
+ /* float to int */
+ bld.MOV(itemp, temp);
/* Clamp the upper end of the range to +7/16.
* ARB_gpu_shader5 requires that we support a maximum offset
}
/* Read the vector */
- fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
- 1 /* dims */,
- instr->num_components,
- BRW_PREDICATE_NONE);
- read_result.type = dest.type;
- for (int i = 0; i < instr->num_components; i++)
- bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
-
+ do_untyped_vector_read(bld, dest, surf_index, offset_reg,
+ instr->num_components);
break;
}
/* Writemask */
unsigned writemask = instr->const_index[1];
+ /* get_nir_src() retypes to integer. Be wary of 64-bit types though
+ * since the untyped writes below operate in units of 32-bits, which
+ * means that we need to write twice as many components each time.
+ * Also, we have to suffle 64-bit data to be in the appropriate layout
+ * expected by our 32-bit write messages.
+ */
+ unsigned type_size = 4;
+ unsigned bit_size = instr->src[0].is_ssa ?
+ instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
+ if (bit_size == 64) {
+ type_size = 8;
+ fs_reg tmp =
+ fs_reg(VGRF, alloc.allocate(alloc.sizes[val_reg.nr]), val_reg.type);
+ shuffle_64bit_data_for_32bit_write(
+ bld,
+ retype(tmp, BRW_REGISTER_TYPE_F),
+ retype(val_reg, BRW_REGISTER_TYPE_DF),
+ instr->num_components);
+ val_reg = tmp;
+ }
+
+ unsigned type_slots = type_size / 4;
+
/* Combine groups of consecutive enabled channels in one write
* message. We use ffs to find the first enabled channel and then ffs on
* the bit-inverse, down-shifted writemask to determine the length of
while (writemask) {
unsigned first_component = ffs(writemask) - 1;
unsigned length = ffs(~(writemask >> first_component)) - 1;
- fs_reg offset_reg;
+ /* We can't write more than 2 64-bit components at once. Limit the
+ * length of the write to what we can do and let the next iteration
+ * handle the rest
+ */
+ if (type_size > 4)
+ length = MIN2(2, length);
+
+ fs_reg offset_reg;
nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
if (const_offset) {
offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0] +
- 4 * first_component);
+ type_size * first_component);
} else {
offset_reg = vgrf(glsl_type::uint_type);
bld.ADD(offset_reg,
retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
- brw_imm_ud(instr->const_index[0] + 4 * first_component));
+ brw_imm_ud(instr->const_index[0] + type_size * first_component));
}
emit_untyped_write(bld, surf_index, offset_reg,
- offset(val_reg, bld, first_component),
- 1 /* dims */, length,
+ offset(val_reg, bld, first_component * type_slots),
+ 1 /* dims */, length * type_slots,
BRW_PREDICATE_NONE);
/* Clear the bits in the writemask that we just wrote, then try
for (int i = 0; i < instr->num_components; i++)
VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
- base_offset, i * 4);
+ base_offset, i * type_sz(dest.type));
} else {
- fs_reg packed_consts = vgrf(glsl_type::float_type);
- packed_consts.type = dest.type;
+ /* Even if we are loading doubles, a pull constant load will load
+ * a 32-bit vec4, so should only reserve vgrf space for that. If we
+ * need to load a full dvec4 we will have to emit 2 loads. This is
+ * similar to demote_pull_constants(), except that in that case we
+ * see individual accesses to each component of the vector and then
+ * we let CSE deal with duplicate loads. Here we see a vector access
+ * and we have to split it if necessary.
+ */
+ const unsigned type_size = type_sz(dest.type);
+ const fs_reg packed_consts = bld.vgrf(BRW_REGISTER_TYPE_F);
+ for (unsigned c = 0; c < instr->num_components;) {
+ const unsigned base = const_offset->u32[0] + c * type_size;
- struct brw_reg const_offset_reg = brw_imm_ud(const_offset->u32[0] & ~15);
- bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
- surf_index, const_offset_reg);
+ /* Number of usable components in the next 16B-aligned load */
+ const unsigned count = MIN2(instr->num_components - c,
+ (16 - base % 16) / type_size);
- for (unsigned i = 0; i < instr->num_components; i++) {
- packed_consts.set_smear(const_offset->u32[0] % 16 / 4 + i);
+ bld.exec_all()
+ .emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
+ packed_consts, surf_index, brw_imm_ud(base & ~15));
- /* The std140 packing rules don't allow vectors to cross 16-byte
- * boundaries, and a reg is 32 bytes.
- */
- assert(packed_consts.subreg_offset < 32);
+ const fs_reg consts =
+ retype(byte_offset(packed_consts, base & 15), dest.type);
+
+ for (unsigned d = 0; d < count; d++)
+ bld.MOV(offset(dest, bld, c + d), component(consts, d));
- bld.MOV(dest, packed_consts);
- dest = offset(dest, bld, 1);
+ c += count;
}
}
break;
}
/* Read the vector */
- fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
- 1 /* dims */,
- instr->num_components,
- BRW_PREDICATE_NONE);
- read_result.type = dest.type;
- for (int i = 0; i < instr->num_components; i++)
- bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
+ do_untyped_vector_read(bld, dest, surf_index, offset_reg,
+ instr->num_components);
break;
}
for (unsigned j = 0; j < instr->num_components; j++) {
bld.MOV(offset(dest, bld, j), offset(src, bld, j));
}
+
+ if (type_sz(src.type) == 8) {
+ shuffle_32bit_load_result_to_64bit_data(bld,
+ dest,
+ retype(dest, BRW_REGISTER_TYPE_F),
+ instr->num_components);
+ }
+
break;
}
/* Writemask */
unsigned writemask = instr->const_index[0];
+ /* get_nir_src() retypes to integer. Be wary of 64-bit types though
+ * since the untyped writes below operate in units of 32-bits, which
+ * means that we need to write twice as many components each time.
+ * Also, we have to suffle 64-bit data to be in the appropriate layout
+ * expected by our 32-bit write messages.
+ */
+ unsigned type_size = 4;
+ unsigned bit_size = instr->src[0].is_ssa ?
+ instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
+ if (bit_size == 64) {
+ type_size = 8;
+ fs_reg tmp =
+ fs_reg(VGRF, alloc.allocate(alloc.sizes[val_reg.nr]), val_reg.type);
+ shuffle_64bit_data_for_32bit_write(bld,
+ retype(tmp, BRW_REGISTER_TYPE_F),
+ retype(val_reg, BRW_REGISTER_TYPE_DF),
+ instr->num_components);
+ val_reg = tmp;
+ }
+
+ unsigned type_slots = type_size / 4;
+
/* Combine groups of consecutive enabled channels in one write
* message. We use ffs to find the first enabled channel and then ffs on
* the bit-inverse, down-shifted writemask to determine the length of
unsigned first_component = ffs(writemask) - 1;
unsigned length = ffs(~(writemask >> first_component)) - 1;
+ /* We can't write more than 2 64-bit components at once. Limit the
+ * length of the write to what we can do and let the next iteration
+ * handle the rest
+ */
+ if (type_size > 4)
+ length = MIN2(2, length);
+
fs_reg offset_reg;
nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
if (const_offset) {
- offset_reg = brw_imm_ud(const_offset->u32[0] + 4 * first_component);
+ offset_reg = brw_imm_ud(const_offset->u32[0] +
+ type_size * first_component);
} else {
offset_reg = vgrf(glsl_type::uint_type);
bld.ADD(offset_reg,
retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
- brw_imm_ud(4 * first_component));
+ brw_imm_ud(type_size * first_component));
}
+
emit_untyped_write(bld, surf_index, offset_reg,
- offset(val_reg, bld, first_component),
- 1 /* dims */, length,
+ offset(val_reg, bld, first_component * type_slots),
+ 1 /* dims */, length * type_slots,
BRW_PREDICATE_NONE);
/* Clear the bits in the writemask that we just wrote, then try
assert(const_offset && "Indirect output stores not allowed");
new_dest = offset(new_dest, bld, const_offset->u32[0]);
- for (unsigned j = 0; j < instr->num_components; j++) {
+ unsigned num_components = instr->num_components;
+ unsigned bit_size = instr->src[0].is_ssa ?
+ instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
+ if (bit_size == 64) {
+ fs_reg tmp =
+ fs_reg(VGRF, alloc.allocate(2 * num_components),
+ BRW_REGISTER_TYPE_F);
+ shuffle_64bit_data_for_32bit_write(
+ bld, tmp, retype(src, BRW_REGISTER_TYPE_DF), num_components);
+ src = retype(tmp, src.type);
+ num_components *= 2;
+ }
+
+ for (unsigned j = 0; j < num_components; j++) {
bld.MOV(offset(new_dest, bld, j), offset(src, bld, j));
}
break;
fs_reg src = get_nir_src(instr->src[i].src);
switch (instr->src[i].src_type) {
case nir_tex_src_bias:
- srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
+ srcs[TEX_LOGICAL_SRC_LOD] =
+ retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
break;
case nir_tex_src_comparitor:
srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_REGISTER_TYPE_F);
switch (instr->op) {
case nir_texop_txf:
case nir_texop_txf_ms:
+ case nir_texop_txf_ms_mcs:
case nir_texop_samples_identical:
srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_D);
break;
case nir_tex_src_lod:
switch (instr->op) {
case nir_texop_txs:
- srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_UD);
+ srcs[TEX_LOGICAL_SRC_LOD] =
+ retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_UD);
break;
case nir_texop_txf:
- srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_D);
+ srcs[TEX_LOGICAL_SRC_LOD] =
+ retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_D);
break;
default:
- srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
+ srcs[TEX_LOGICAL_SRC_LOD] =
+ retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
break;
}
break;
break;
}
+ case nir_tex_src_ms_mcs:
+ assert(instr->op == nir_texop_txf_ms);
+ srcs[TEX_LOGICAL_SRC_MCS] = retype(src, BRW_REGISTER_TYPE_D);
+ break;
+
+ case nir_tex_src_plane: {
+ nir_const_value *const_plane =
+ nir_src_as_const_value(instr->src[i].src);
+ const uint32_t plane = const_plane->u32[0];
+ const uint32_t texture_index =
+ instr->texture_index +
+ stage_prog_data->binding_table.plane_start[plane] -
+ stage_prog_data->binding_table.texture_start;
+
+ srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture_index);
+ break;
+ }
+
default:
unreachable("unknown texture source");
}
}
- if (instr->op == nir_texop_txf_ms ||
- instr->op == nir_texop_samples_identical) {
+ if (srcs[TEX_LOGICAL_SRC_MCS].file == BAD_FILE &&
+ (instr->op == nir_texop_txf_ms ||
+ instr->op == nir_texop_samples_identical)) {
if (devinfo->gen >= 7 &&
key_tex->compressed_multisample_layout_mask & (1 << texture)) {
srcs[TEX_LOGICAL_SRC_MCS] =
else
opcode = SHADER_OPCODE_TXF_CMS_LOGICAL;
break;
+ case nir_texop_txf_ms_mcs:
+ opcode = SHADER_OPCODE_TXF_MCS_LOGICAL;
+ break;
case nir_texop_query_levels:
case nir_texop_txs:
opcode = SHADER_OPCODE_TXS_LOGICAL;
unreachable("unknown jump");
}
}
+
+/**
+ * This helper takes the result of a load operation that reads 32-bit elements
+ * in this format:
+ *
+ * x x x x x x x x
+ * y y y y y y y y
+ * z z z z z z z z
+ * w w w w w w w w
+ *
+ * and shuffles the data to get this:
+ *
+ * x y x y x y x y
+ * x y x y x y x y
+ * z w z w z w z w
+ * z w z w z w z w
+ *
+ * Which is exactly what we want if the load is reading 64-bit components
+ * like doubles, where x represents the low 32-bit of the x double component
+ * and y represents the high 32-bit of the x double component (likewise with
+ * z and w for double component y). The parameter @components represents
+ * the number of 64-bit components present in @src. This would typically be
+ * 2 at most, since we can only fit 2 double elements in the result of a
+ * vec4 load.
+ *
+ * Notice that @dst and @src can be the same register.
+ */
+void
+shuffle_32bit_load_result_to_64bit_data(const fs_builder &bld,
+ const fs_reg &dst,
+ const fs_reg &src,
+ uint32_t components)
+{
+ assert(type_sz(src.type) == 4);
+ assert(type_sz(dst.type) == 8);
+
+ /* A temporary that we will use to shuffle the 32-bit data of each
+ * component in the vector into valid 64-bit data. We can't write directly
+ * to dst because dst can be (and would usually be) the same as src
+ * and in that case the first MOV in the loop below would overwrite the
+ * data read in the second MOV.
+ */
+ fs_reg tmp = bld.vgrf(dst.type);
+
+ for (unsigned i = 0; i < components; i++) {
+ const fs_reg component_i = offset(src, bld, 2 * i);
+
+ bld.MOV(subscript(tmp, src.type, 0), component_i);
+ bld.MOV(subscript(tmp, src.type, 1), offset(component_i, bld, 1));
+
+ bld.MOV(offset(dst, bld, i), tmp);
+ }
+}
+
+/**
+ * This helper does the inverse operation of
+ * SHUFFLE_32BIT_LOAD_RESULT_TO_64BIT_DATA.
+ *
+ * We need to do this when we are going to use untyped write messsages that
+ * operate with 32-bit components in order to arrange our 64-bit data to be
+ * in the expected layout.
+ *
+ * Notice that callers of this function, unlike in the case of the inverse
+ * operation, would typically need to call this with dst and src being
+ * different registers, since they would otherwise corrupt the original
+ * 64-bit data they are about to write. Because of this the function checks
+ * that the src and dst regions involved in the operation do not overlap.
+ */
+void
+shuffle_64bit_data_for_32bit_write(const fs_builder &bld,
+ const fs_reg &dst,
+ const fs_reg &src,
+ uint32_t components)
+{
+ assert(type_sz(src.type) == 8);
+ assert(type_sz(dst.type) == 4);
+
+ assert(!src.in_range(dst, 2 * components * bld.dispatch_width() / 8));
+
+ for (unsigned i = 0; i < components; i++) {
+ const fs_reg component_i = offset(src, bld, i);
+ bld.MOV(offset(dst, bld, 2 * i), subscript(component_i, dst.type, 0));
+ bld.MOV(offset(dst, bld, 2 * i + 1), subscript(component_i, dst.type, 1));
+ }
+}