#include "brw_eu.h"
#include "brw_wm.h"
#include "brw_fs.h"
+#include "brw_cs.h"
#include "brw_cfg.h"
#include "brw_dead_control_flow.h"
#include "main/uniforms.h"
negate == r.negate &&
abs == r.abs &&
!reladdr && !r.reladdr &&
- memcmp(&fixed_hw_reg, &r.fixed_hw_reg, sizeof(fixed_hw_reg)) == 0 &&
+ ((file != HW_REG && file != IMM) ||
+ memcmp(&fixed_hw_reg, &r.fixed_hw_reg,
+ sizeof(fixed_hw_reg)) == 0) &&
stride == r.stride);
}
return MAX2(width * stride, 1) * type_sz(type);
}
-int
-fs_visitor::type_size(const struct glsl_type *type)
+extern "C" int
+type_size_scalar(const struct glsl_type *type)
{
unsigned int size, i;
case GLSL_TYPE_BOOL:
return type->components();
case GLSL_TYPE_ARRAY:
- return type_size(type->fields.array) * type->length;
+ return type_size_scalar(type->fields.array) * type->length;
case GLSL_TYPE_STRUCT:
size = 0;
for (i = 0; i < type->length; i++) {
- size += type_size(type->fields.structure[i].type);
+ size += type_size_scalar(type->fields.structure[i].type);
}
return size;
case GLSL_TYPE_SAMPLER:
case GLSL_TYPE_SUBROUTINE:
return 1;
case GLSL_TYPE_IMAGE:
+ return BRW_IMAGE_PARAM_SIZE;
case GLSL_TYPE_VOID:
case GLSL_TYPE_ERROR:
case GLSL_TYPE_INTERFACE:
else
return 1;
+ case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
+ case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
+ assert(src[3].file == IMM);
+ /* Surface coordinates. */
+ if (i == 0)
+ return src[3].fixed_hw_reg.dw1.ud;
+ /* Surface operation source (ignored for reads). */
+ else if (i == 1)
+ return 0;
+ else
+ return 1;
+
+ case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
+ case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
+ assert(src[3].file == IMM &&
+ src[4].file == IMM);
+ /* Surface coordinates. */
+ if (i == 0)
+ return src[3].fixed_hw_reg.dw1.ud;
+ /* Surface operation source. */
+ else if (i == 1)
+ return src[4].fixed_hw_reg.dw1.ud;
+ else
+ return 1;
+
+ case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
+ case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
+ assert(src[3].file == IMM &&
+ src[4].file == IMM);
+ const unsigned op = src[4].fixed_hw_reg.dw1.ud;
+ /* Surface coordinates. */
+ if (i == 0)
+ return src[3].fixed_hw_reg.dw1.ud;
+ /* Surface operation source. */
+ else if (i == 1 && op == BRW_AOP_CMPWR)
+ return 2;
+ else if (i == 1 && (op == BRW_AOP_INC || op == BRW_AOP_DEC ||
+ op == BRW_AOP_PREDEC))
+ return 0;
+ else
+ return 1;
+ }
+
default:
return 1;
}
break;
case CS_OPCODE_CS_TERMINATE:
+ case SHADER_OPCODE_BARRIER:
return 1;
default:
case IMM:
return 1;
case GRF:
+ case ATTR:
case HW_REG:
return DIV_ROUND_UP(components_read(arg) *
src[arg].component_size(exec_size),
case SHADER_OPCODE_TXL:
case SHADER_OPCODE_TXS:
case SHADER_OPCODE_LOD:
+ case SHADER_OPCODE_SAMPLEINFO:
return 1;
case FS_OPCODE_FB_WRITE:
return 2;
+ case FS_OPCODE_GET_BUFFER_SIZE:
case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
case SHADER_OPCODE_GEN4_SCRATCH_READ:
return 1;
fs_visitor::vgrf(const glsl_type *const type)
{
int reg_width = dispatch_width / 8;
- return fs_reg(GRF, alloc.allocate(type_size(type) * reg_width),
+ return fs_reg(GRF, alloc.allocate(type_size_scalar(type) * reg_width),
brw_type_for_base_type(type));
}
this->param_size = v->param_size;
}
+void
+fs_visitor::setup_vec4_uniform_value(unsigned param_offset,
+ const gl_constant_value *values,
+ unsigned n)
+{
+ static const gl_constant_value zero = { 0 };
+
+ for (unsigned i = 0; i < n; ++i)
+ stage_prog_data->param[param_offset + i] = &values[i];
+
+ for (unsigned i = n; i < 4; ++i)
+ stage_prog_data->param[param_offset + i] = &zero;
+}
+
fs_reg *
fs_visitor::emit_fragcoord_interpolation(bool pixel_center_integer,
bool origin_upper_left)
bld.MOV(wpos, this->pixel_y);
} else {
fs_reg pixel_y = this->pixel_y;
- float offset = (pixel_center_integer ? 0.0 : 0.5);
+ float offset = (pixel_center_integer ? 0.0f : 0.5f);
if (flip) {
pixel_y.negate = true;
- offset += key->drawable_height - 1.0;
+ offset += key->drawable_height - 1.0f;
}
bld.ADD(wpos, pixel_y, fs_reg(offset));
return reg;
}
-void
-fs_visitor::resolve_source_modifiers(fs_reg *src)
+fs_reg
+fs_visitor::resolve_source_modifiers(const fs_reg &src)
{
- if (!src->abs && !src->negate)
- return;
+ if (!src.abs && !src.negate)
+ return src;
+
+ fs_reg temp = bld.vgrf(src.type);
+ bld.MOV(temp, src);
- fs_reg temp = bld.vgrf(src->type);
- bld.MOV(temp, *src);
- *src = temp;
+ return temp;
}
void
}
}
}
+
+ /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
+ this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
}
void
*/
struct brw_vue_map prev_stage_vue_map;
brw_compute_vue_map(devinfo, &prev_stage_vue_map,
- key->input_slots_valid);
+ key->input_slots_valid,
+ shader_prog->SeparateShader);
int first_slot = 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
assert(prev_stage_vue_map.num_slots <= first_slot + 32);
for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
}
/* Each attribute is 4 setup channels, each of which is half a reg. */
- this->first_non_payload_grf =
- urb_start + prog_data->num_varying_inputs * 2;
+ this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
}
void
count++;
/* Each attribute is 4 regs. */
- this->first_non_payload_grf =
- payload.num_regs + prog_data->curb_read_length + count * 4;
+ this->first_non_payload_grf += count * 4;
unsigned vue_entries =
MAX2(count, vs_prog_data->base.vue_map.num_slots);
inst->src[i].file = HW_REG;
inst->src[i].fixed_hw_reg =
- retype(brw_vec8_grf(grf, 0), inst->src[i].type);
+ stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
+ inst->src[i].subreg_offset),
+ inst->exec_size * inst->src[i].stride,
+ inst->exec_size, inst->src[i].stride);
}
}
}
return progress;
}
-/*
- * Implements array access of uniforms by inserting a
- * PULL_CONSTANT_LOAD instruction.
+/**
+ * Assign UNIFORM file registers to either push constants or pull constants.
*
- * Unlike temporary GRF array access (where we don't support it due to
- * the difficulty of doing relative addressing on instruction
- * destinations), we could potentially do array access of uniforms
- * that were loaded in GRF space as push constants. In real-world
- * usage we've seen, though, the arrays being used are always larger
- * than we could load as push constants, so just always move all
- * uniform array access out to a pull constant buffer.
+ * We allow a fragment shader to have more than the specified minimum
+ * maximum number of fragment shader uniform components (64). If
+ * there are too many of these, they'd fill up all of register space.
+ * So, this will push some of them out to the pull constant buffer and
+ * update the program to load them. We also use pull constants for all
+ * indirect constant loads because we don't support indirect accesses in
+ * registers yet.
*/
void
-fs_visitor::move_uniform_array_access_to_pull_constants()
+fs_visitor::assign_constant_locations()
{
+ /* Only the first compile (SIMD8 mode) gets to decide on locations. */
if (dispatch_width != 8)
return;
+ unsigned int num_pull_constants = 0;
+
pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
memset(pull_constant_loc, -1, sizeof(pull_constant_loc[0]) * uniforms);
- /* Walk through and find array access of uniforms. Put a copy of that
- * uniform in the pull constant buffer.
+ bool is_live[uniforms];
+ memset(is_live, 0, sizeof(is_live));
+
+ /* First, we walk through the instructions and do two things:
+ *
+ * 1) Figure out which uniforms are live.
+ *
+ * 2) Find all indirect access of uniform arrays and flag them as needing
+ * to go into the pull constant buffer.
*
* Note that we don't move constant-indexed accesses to arrays. No
* testing has been done of the performance impact of this choice.
*/
foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
for (int i = 0 ; i < inst->sources; i++) {
- if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
+ if (inst->src[i].file != UNIFORM)
continue;
- int uniform = inst->src[i].reg;
-
- /* If this array isn't already present in the pull constant buffer,
- * add it.
- */
- if (pull_constant_loc[uniform] == -1) {
- const gl_constant_value **values = &stage_prog_data->param[uniform];
-
- assert(param_size[uniform]);
-
- for (int j = 0; j < param_size[uniform]; j++) {
- pull_constant_loc[uniform + j] = stage_prog_data->nr_pull_params;
+ if (inst->src[i].reladdr) {
+ int uniform = inst->src[i].reg;
- stage_prog_data->pull_param[stage_prog_data->nr_pull_params++] =
- values[j];
+ /* If this array isn't already present in the pull constant buffer,
+ * add it.
+ */
+ if (pull_constant_loc[uniform] == -1) {
+ assert(param_size[uniform]);
+ for (int j = 0; j < param_size[uniform]; j++)
+ pull_constant_loc[uniform + j] = num_pull_constants++;
}
+ } else {
+ /* Mark the the one accessed uniform as live */
+ int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
+ if (constant_nr >= 0 && constant_nr < (int) uniforms)
+ is_live[constant_nr] = true;
}
}
}
-}
-
-/**
- * Assign UNIFORM file registers to either push constants or pull constants.
- *
- * We allow a fragment shader to have more than the specified minimum
- * maximum number of fragment shader uniform components (64). If
- * there are too many of these, they'd fill up all of register space.
- * So, this will push some of them out to the pull constant buffer and
- * update the program to load them.
- */
-void
-fs_visitor::assign_constant_locations()
-{
- /* Only the first compile (SIMD8 mode) gets to decide on locations. */
- if (dispatch_width != 8)
- return;
-
- /* Find which UNIFORM registers are still in use. */
- bool is_live[uniforms];
- for (unsigned int i = 0; i < uniforms; i++) {
- is_live[i] = false;
- }
-
- foreach_block_and_inst(block, fs_inst, inst, cfg) {
- for (int i = 0; i < inst->sources; i++) {
- if (inst->src[i].file != UNIFORM)
- continue;
-
- int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
- if (constant_nr >= 0 && constant_nr < (int) uniforms)
- is_live[constant_nr] = true;
- }
- }
/* Only allow 16 registers (128 uniform components) as push constants.
*
} else {
/* Demote to a pull constant. */
push_constant_loc[i] = -1;
-
- int pull_index = stage_prog_data->nr_pull_params++;
- stage_prog_data->pull_param[pull_index] = stage_prog_data->param[i];
- pull_constant_loc[i] = pull_index;
+ pull_constant_loc[i] = num_pull_constants++;
}
}
stage_prog_data->nr_params = num_push_constants;
+ stage_prog_data->nr_pull_params = num_pull_constants;
/* Up until now, the param[] array has been indexed by reg + reg_offset
- * of UNIFORM registers. Condense it to only contain the uniforms we
- * chose to upload as push constants.
+ * of UNIFORM registers. Move pull constants into pull_param[] and
+ * condense param[] to only contain the uniforms we chose to push.
+ *
+ * NOTE: Because we are condensing the params[] array, we know that
+ * push_constant_loc[i] <= i and we can do it in one smooth loop without
+ * having to make a copy.
*/
for (unsigned int i = 0; i < uniforms; i++) {
- int remapped = push_constant_loc[i];
-
- if (remapped == -1)
- continue;
+ const gl_constant_value *value = stage_prog_data->param[i];
- assert(remapped <= (int)i);
- stage_prog_data->param[remapped] = stage_prog_data->param[i];
+ if (pull_constant_loc[i] != -1) {
+ stage_prog_data->pull_param[pull_constant_loc[i]] = value;
+ } else if (push_constant_loc[i] != -1) {
+ stage_prog_data->param[push_constant_loc[i]] = value;
+ }
}
}
continue;
/* Set up the annotation tracking for new generated instructions. */
- const fs_builder ibld = bld.annotate(inst->annotation, inst->ir)
- .at(block, inst);
+ const fs_builder ibld(this, block, inst);
fs_reg surf_index(stage_prog_data->binding_table.pull_constants_start);
fs_reg dst = vgrf(glsl_type::float_type);
inst->src[i].reladdr = NULL;
inst->src[i].stride = 1;
} else {
+ const fs_builder ubld = ibld.exec_all().group(8, 0);
fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
- ibld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
+ ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
dst, surf_index, offset);
inst->src[i].set_smear(pull_index & 3);
}
return false;
/* Look for a texturing instruction immediately before the final FB_WRITE. */
- fs_inst *fb_write = (fs_inst *) cfg->blocks[cfg->num_blocks - 1]->end();
+ bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
+ fs_inst *fb_write = (fs_inst *)block->end();
assert(fb_write->eot);
assert(fb_write->opcode == FS_OPCODE_FB_WRITE);
assert(!tex_inst->eot); /* We can't get here twice */
assert((tex_inst->offset & (0xff << 24)) == 0);
+ const fs_builder ibld(this, block, tex_inst);
+
tex_inst->offset |= fb_write->target << 24;
tex_inst->eot = true;
- tex_inst->dst = bld.null_reg_ud();
+ tex_inst->dst = ibld.null_reg_ud();
fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
/* If a header is present, marking the eot is sufficient. Otherwise, we need
if (tex_inst->header_size != 0)
return true;
- fs_reg send_header = bld.vgrf(BRW_REGISTER_TYPE_F,
- load_payload->sources + 1);
+ fs_reg send_header = ibld.vgrf(BRW_REGISTER_TYPE_F,
+ load_payload->sources + 1);
fs_reg *new_sources =
ralloc_array(mem_ctx, fs_reg, load_payload->sources + 1);
{
int write_len = inst->regs_written;
int first_write_grf = inst->dst.reg;
- bool needs_dep[BRW_MAX_MRF];
+ bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
assert(write_len < (int)sizeof(needs_dep) - 1);
memset(needs_dep, false, sizeof(needs_dep));
if (block->start() == scan_inst) {
for (int i = 0; i < write_len; i++) {
if (needs_dep[i])
- DEP_RESOLVE_MOV(bld.at(block, inst), first_write_grf + i);
+ DEP_RESOLVE_MOV(fs_builder(this, block, inst),
+ first_write_grf + i);
}
return;
}
if (reg >= first_write_grf &&
reg < first_write_grf + write_len &&
needs_dep[reg - first_write_grf]) {
- DEP_RESOLVE_MOV(bld.at(block, inst), reg);
+ DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
needs_dep[reg - first_write_grf] = false;
if (scan_inst->exec_size == 16)
needs_dep[reg - first_write_grf + 1] = false;
{
int write_len = inst->regs_written;
int first_write_grf = inst->dst.reg;
- bool needs_dep[BRW_MAX_MRF];
+ bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
assert(write_len < (int)sizeof(needs_dep) - 1);
memset(needs_dep, false, sizeof(needs_dep));
if (block->end() == scan_inst) {
for (int i = 0; i < write_len; i++) {
if (needs_dep[i])
- DEP_RESOLVE_MOV(bld.at(block, scan_inst), first_write_grf + i);
+ DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
+ first_write_grf + i);
}
return;
}
scan_inst->dst.reg >= first_write_grf &&
scan_inst->dst.reg < first_write_grf + write_len &&
needs_dep[scan_inst->dst.reg - first_write_grf]) {
- DEP_RESOLVE_MOV(bld.at(block, scan_inst), scan_inst->dst.reg);
+ DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
+ scan_inst->dst.reg);
needs_dep[scan_inst->dst.reg - first_write_grf] = false;
}
if (dst.file == MRF)
dst.reg = dst.reg & ~BRW_MRF_COMPR4;
- const fs_builder hbld = bld.exec_all().group(8, 0).at(block, inst);
+ const fs_builder ibld(this, block, inst);
+ const fs_builder hbld = ibld.exec_all().group(8, 0);
for (uint8_t i = 0; i < inst->header_size; i++) {
if (inst->src[i].file != BAD_FILE) {
dst = offset(dst, hbld, 1);
}
- const fs_builder ibld = bld.exec_all(inst->force_writemask_all)
- .group(inst->exec_size, inst->force_sechalf)
- .at(block, inst);
-
if (inst->dst.file == MRF && (inst->dst.reg & BRW_MRF_COMPR4) &&
inst->exec_size > 8) {
/* In this case, the payload portion of the LOAD_PAYLOAD isn't
{
bool progress = false;
- /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit operation
- * directly, but Cherryview cannot.
- */
- if (devinfo->gen >= 8 && !devinfo->is_cherryview)
- return false;
-
foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
- if (inst->opcode != BRW_OPCODE_MUL ||
- inst->dst.is_accumulator() ||
- (inst->dst.type != BRW_REGISTER_TYPE_D &&
- inst->dst.type != BRW_REGISTER_TYPE_UD))
- continue;
+ const fs_builder ibld(this, block, inst);
- const fs_builder ibld = bld.at(block, inst);
+ if (inst->opcode == BRW_OPCODE_MUL) {
+ if (inst->dst.is_accumulator() ||
+ (inst->dst.type != BRW_REGISTER_TYPE_D &&
+ inst->dst.type != BRW_REGISTER_TYPE_UD))
+ continue;
- /* The MUL instruction isn't commutative. On Gen <= 6, only the low
- * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
- * src1 are used.
- *
- * If multiplying by an immediate value that fits in 16-bits, do a
- * single MUL instruction with that value in the proper location.
- */
- if (inst->src[1].file == IMM &&
- inst->src[1].fixed_hw_reg.dw1.ud < (1 << 16)) {
- if (devinfo->gen < 7) {
- fs_reg imm(GRF, alloc.allocate(dispatch_width / 8),
- inst->dst.type);
- ibld.MOV(imm, inst->src[1]);
- ibld.MUL(inst->dst, imm, inst->src[0]);
- } else {
- ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
- }
- } else {
- /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
- * do 32-bit integer multiplication in one instruction, but instead
- * must do a sequence (which actually calculates a 64-bit result):
- *
- * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
- * mach(8) null g3<8,8,1>D g4<8,8,1>D
- * mov(8) g2<1>D acc0<8,8,1>D
- *
- * But on Gen > 6, the ability to use second accumulator register
- * (acc1) for non-float data types was removed, preventing a simple
- * implementation in SIMD16. A 16-channel result can be calculated by
- * executing the three instructions twice in SIMD8, once with quarter
- * control of 1Q for the first eight channels and again with 2Q for
- * the second eight channels.
- *
- * Which accumulator register is implicitly accessed (by AccWrEnable
- * for instance) is determined by the quarter control. Unfortunately
- * Ivybridge (and presumably Baytrail) has a hardware bug in which an
- * implicit accumulator access by an instruction with 2Q will access
- * acc1 regardless of whether the data type is usable in acc1.
- *
- * Specifically, the 2Q mach(8) writes acc1 which does not exist for
- * integer data types.
- *
- * Since we only want the low 32-bits of the result, we can do two
- * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
- * adjust the high result and add them (like the mach is doing):
- *
- * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
- * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
- * shl(8) g9<1>D g8<8,8,1>D 16D
- * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
- *
- * We avoid the shl instruction by realizing that we only want to add
- * the low 16-bits of the "high" result to the high 16-bits of the
- * "low" result and using proper regioning on the add:
- *
- * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
- * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
- * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
- *
- * Since it does not use the (single) accumulator register, we can
- * schedule multi-component multiplications much better.
+ /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit
+ * operation directly, but CHV/BXT cannot.
*/
+ if (devinfo->gen >= 8 &&
+ !devinfo->is_cherryview && !devinfo->is_broxton)
+ continue;
- if (inst->conditional_mod && inst->dst.is_null()) {
- inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
- inst->dst.type);
- }
- fs_reg low = inst->dst;
- fs_reg high(GRF, alloc.allocate(dispatch_width / 8),
- inst->dst.type);
+ if (inst->src[1].file == IMM &&
+ inst->src[1].fixed_hw_reg.dw1.ud < (1 << 16)) {
+ /* The MUL instruction isn't commutative. On Gen <= 6, only the low
+ * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
+ * src1 are used.
+ *
+ * If multiplying by an immediate value that fits in 16-bits, do a
+ * single MUL instruction with that value in the proper location.
+ */
+ if (devinfo->gen < 7) {
+ fs_reg imm(GRF, alloc.allocate(dispatch_width / 8),
+ inst->dst.type);
+ ibld.MOV(imm, inst->src[1]);
+ ibld.MUL(inst->dst, imm, inst->src[0]);
+ } else {
+ ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
+ }
+ } else {
+ /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
+ * do 32-bit integer multiplication in one instruction, but instead
+ * must do a sequence (which actually calculates a 64-bit result):
+ *
+ * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
+ * mach(8) null g3<8,8,1>D g4<8,8,1>D
+ * mov(8) g2<1>D acc0<8,8,1>D
+ *
+ * But on Gen > 6, the ability to use second accumulator register
+ * (acc1) for non-float data types was removed, preventing a simple
+ * implementation in SIMD16. A 16-channel result can be calculated by
+ * executing the three instructions twice in SIMD8, once with quarter
+ * control of 1Q for the first eight channels and again with 2Q for
+ * the second eight channels.
+ *
+ * Which accumulator register is implicitly accessed (by AccWrEnable
+ * for instance) is determined by the quarter control. Unfortunately
+ * Ivybridge (and presumably Baytrail) has a hardware bug in which an
+ * implicit accumulator access by an instruction with 2Q will access
+ * acc1 regardless of whether the data type is usable in acc1.
+ *
+ * Specifically, the 2Q mach(8) writes acc1 which does not exist for
+ * integer data types.
+ *
+ * Since we only want the low 32-bits of the result, we can do two
+ * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
+ * adjust the high result and add them (like the mach is doing):
+ *
+ * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
+ * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
+ * shl(8) g9<1>D g8<8,8,1>D 16D
+ * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
+ *
+ * We avoid the shl instruction by realizing that we only want to add
+ * the low 16-bits of the "high" result to the high 16-bits of the
+ * "low" result and using proper regioning on the add:
+ *
+ * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
+ * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
+ * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
+ *
+ * Since it does not use the (single) accumulator register, we can
+ * schedule multi-component multiplications much better.
+ */
- if (devinfo->gen >= 7) {
- fs_reg src1_0_w = inst->src[1];
- fs_reg src1_1_w = inst->src[1];
+ fs_reg orig_dst = inst->dst;
+ if (orig_dst.is_null() || orig_dst.file == MRF) {
+ inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
+ inst->dst.type);
+ }
+ fs_reg low = inst->dst;
+ fs_reg high(GRF, alloc.allocate(dispatch_width / 8),
+ inst->dst.type);
- if (inst->src[1].file == IMM) {
- src1_0_w.fixed_hw_reg.dw1.ud &= 0xffff;
- src1_1_w.fixed_hw_reg.dw1.ud >>= 16;
- } else {
- src1_0_w.type = BRW_REGISTER_TYPE_UW;
- if (src1_0_w.stride != 0) {
- assert(src1_0_w.stride == 1);
- src1_0_w.stride = 2;
+ if (devinfo->gen >= 7) {
+ fs_reg src1_0_w = inst->src[1];
+ fs_reg src1_1_w = inst->src[1];
+
+ if (inst->src[1].file == IMM) {
+ src1_0_w.fixed_hw_reg.dw1.ud &= 0xffff;
+ src1_1_w.fixed_hw_reg.dw1.ud >>= 16;
+ } else {
+ src1_0_w.type = BRW_REGISTER_TYPE_UW;
+ if (src1_0_w.stride != 0) {
+ assert(src1_0_w.stride == 1);
+ src1_0_w.stride = 2;
+ }
+
+ src1_1_w.type = BRW_REGISTER_TYPE_UW;
+ if (src1_1_w.stride != 0) {
+ assert(src1_1_w.stride == 1);
+ src1_1_w.stride = 2;
+ }
+ src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
}
+ ibld.MUL(low, inst->src[0], src1_0_w);
+ ibld.MUL(high, inst->src[0], src1_1_w);
+ } else {
+ fs_reg src0_0_w = inst->src[0];
+ fs_reg src0_1_w = inst->src[0];
- src1_1_w.type = BRW_REGISTER_TYPE_UW;
- if (src1_1_w.stride != 0) {
- assert(src1_1_w.stride == 1);
- src1_1_w.stride = 2;
+ src0_0_w.type = BRW_REGISTER_TYPE_UW;
+ if (src0_0_w.stride != 0) {
+ assert(src0_0_w.stride == 1);
+ src0_0_w.stride = 2;
}
- src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
- }
- ibld.MUL(low, inst->src[0], src1_0_w);
- ibld.MUL(high, inst->src[0], src1_1_w);
- } else {
- fs_reg src0_0_w = inst->src[0];
- fs_reg src0_1_w = inst->src[0];
- src0_0_w.type = BRW_REGISTER_TYPE_UW;
- if (src0_0_w.stride != 0) {
- assert(src0_0_w.stride == 1);
- src0_0_w.stride = 2;
- }
+ src0_1_w.type = BRW_REGISTER_TYPE_UW;
+ if (src0_1_w.stride != 0) {
+ assert(src0_1_w.stride == 1);
+ src0_1_w.stride = 2;
+ }
+ src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
- src0_1_w.type = BRW_REGISTER_TYPE_UW;
- if (src0_1_w.stride != 0) {
- assert(src0_1_w.stride == 1);
- src0_1_w.stride = 2;
+ ibld.MUL(low, src0_0_w, inst->src[1]);
+ ibld.MUL(high, src0_1_w, inst->src[1]);
}
- src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
- ibld.MUL(low, src0_0_w, inst->src[1]);
- ibld.MUL(high, src0_1_w, inst->src[1]);
- }
+ fs_reg dst = inst->dst;
+ dst.type = BRW_REGISTER_TYPE_UW;
+ dst.subreg_offset = 2;
+ dst.stride = 2;
- fs_reg dst = inst->dst;
- dst.type = BRW_REGISTER_TYPE_UW;
- dst.subreg_offset = 2;
- dst.stride = 2;
+ high.type = BRW_REGISTER_TYPE_UW;
+ high.stride = 2;
- high.type = BRW_REGISTER_TYPE_UW;
- high.stride = 2;
+ low.type = BRW_REGISTER_TYPE_UW;
+ low.subreg_offset = 2;
+ low.stride = 2;
- low.type = BRW_REGISTER_TYPE_UW;
- low.subreg_offset = 2;
- low.stride = 2;
+ ibld.ADD(dst, low, high);
- ibld.ADD(dst, low, high);
+ if (inst->conditional_mod || orig_dst.file == MRF) {
+ set_condmod(inst->conditional_mod,
+ ibld.MOV(orig_dst, inst->dst));
+ }
+ }
- if (inst->conditional_mod) {
- fs_reg null(retype(ibld.null_reg_f(), inst->dst.type));
- set_condmod(inst->conditional_mod,
- ibld.MOV(null, inst->dst));
+ } else if (inst->opcode == SHADER_OPCODE_MULH) {
+ /* Should have been lowered to 8-wide. */
+ assert(inst->exec_size <= 8);
+ const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
+ inst->dst.type);
+ fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
+ fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
+
+ if (devinfo->gen >= 8) {
+ /* Until Gen8, integer multiplies read 32-bits from one source,
+ * and 16-bits from the other, and relying on the MACH instruction
+ * to generate the high bits of the result.
+ *
+ * On Gen8, the multiply instruction does a full 32x32-bit
+ * multiply, but in order to do a 64-bit multiply we can simulate
+ * the previous behavior and then use a MACH instruction.
+ *
+ * FINISHME: Don't use source modifiers on src1.
+ */
+ assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
+ mul->src[1].type == BRW_REGISTER_TYPE_UD);
+ mul->src[1].type = (type_is_signed(mul->src[1].type) ?
+ BRW_REGISTER_TYPE_W : BRW_REGISTER_TYPE_UW);
+ mul->src[1].stride *= 2;
+
+ } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
+ inst->force_sechalf) {
+ /* Among other things the quarter control bits influence which
+ * accumulator register is used by the hardware for instructions
+ * that access the accumulator implicitly (e.g. MACH). A
+ * second-half instruction would normally map to acc1, which
+ * doesn't exist on Gen7 and up (the hardware does emulate it for
+ * floating-point instructions *only* by taking advantage of the
+ * extra precision of acc0 not normally used for floating point
+ * arithmetic).
+ *
+ * HSW and up are careful enough not to try to access an
+ * accumulator register that doesn't exist, but on earlier Gen7
+ * hardware we need to make sure that the quarter control bits are
+ * zero to avoid non-deterministic behaviour and emit an extra MOV
+ * to get the result masked correctly according to the current
+ * channel enables.
+ */
+ mach->force_sechalf = false;
+ mach->force_writemask_all = true;
+ mach->dst = ibld.vgrf(inst->dst.type);
+ ibld.MOV(inst->dst, mach->dst);
}
+ } else {
+ continue;
}
inst->remove(block);
}
}
+/**
+ * Initialize the header present in some typed and untyped surface
+ * messages.
+ */
+static fs_reg
+emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
+{
+ fs_builder ubld = bld.exec_all().group(8, 0);
+ const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
+ ubld.MOV(dst, fs_reg(0));
+ ubld.MOV(component(dst, 7), sample_mask);
+ return dst;
+}
+
+static void
+lower_surface_logical_send(const fs_builder &bld, fs_inst *inst, opcode op,
+ const fs_reg &sample_mask)
+{
+ /* Get the logical send arguments. */
+ const fs_reg &addr = inst->src[0];
+ const fs_reg &src = inst->src[1];
+ const fs_reg &surface = inst->src[2];
+ const UNUSED fs_reg &dims = inst->src[3];
+ const fs_reg &arg = inst->src[4];
+
+ /* Calculate the total number of components of the payload. */
+ const unsigned addr_sz = inst->components_read(0);
+ const unsigned src_sz = inst->components_read(1);
+ const unsigned header_sz = (sample_mask.file == BAD_FILE ? 0 : 1);
+ const unsigned sz = header_sz + addr_sz + src_sz;
+
+ /* Allocate space for the payload. */
+ fs_reg *const components = new fs_reg[sz];
+ const fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
+ unsigned n = 0;
+
+ /* Construct the payload. */
+ if (header_sz)
+ components[n++] = emit_surface_header(bld, sample_mask);
+
+ for (unsigned i = 0; i < addr_sz; i++)
+ components[n++] = offset(addr, bld, i);
+
+ for (unsigned i = 0; i < src_sz; i++)
+ components[n++] = offset(src, bld, i);
+
+ bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
+
+ /* Update the original instruction. */
+ inst->opcode = op;
+ inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
+ inst->header_size = header_sz;
+
+ inst->src[0] = payload;
+ inst->src[1] = surface;
+ inst->src[2] = arg;
+ inst->resize_sources(3);
+
+ delete[] components;
+}
+
bool
fs_visitor::lower_logical_sends()
{
bool progress = false;
foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
- const fs_builder ibld = bld.exec_all(inst->force_writemask_all)
- .group(inst->exec_size, inst->force_sechalf)
- .at(block, inst);
+ const fs_builder ibld(this, block, inst);
switch (inst->opcode) {
case FS_OPCODE_FB_WRITE_LOGICAL:
lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
break;
+ case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
+ lower_surface_logical_send(ibld, inst,
+ SHADER_OPCODE_UNTYPED_SURFACE_READ,
+ fs_reg(0xffff));
+ break;
+
+ case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
+ lower_surface_logical_send(ibld, inst,
+ SHADER_OPCODE_UNTYPED_SURFACE_WRITE,
+ ibld.sample_mask_reg());
+ break;
+
+ case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
+ lower_surface_logical_send(ibld, inst,
+ SHADER_OPCODE_UNTYPED_ATOMIC,
+ ibld.sample_mask_reg());
+ break;
+
+ case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
+ lower_surface_logical_send(ibld, inst,
+ SHADER_OPCODE_TYPED_SURFACE_READ,
+ fs_reg(0xffff));
+ break;
+
+ case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
+ lower_surface_logical_send(ibld, inst,
+ SHADER_OPCODE_TYPED_SURFACE_WRITE,
+ ibld.sample_mask_reg());
+ break;
+
+ case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
+ lower_surface_logical_send(ibld, inst,
+ SHADER_OPCODE_TYPED_ATOMIC,
+ ibld.sample_mask_reg());
+ break;
+
default:
continue;
}
const fs_inst *inst)
{
switch (inst->opcode) {
+ case BRW_OPCODE_MOV:
+ case BRW_OPCODE_SEL:
+ case BRW_OPCODE_NOT:
+ case BRW_OPCODE_AND:
+ case BRW_OPCODE_OR:
+ case BRW_OPCODE_XOR:
+ case BRW_OPCODE_SHR:
+ case BRW_OPCODE_SHL:
+ case BRW_OPCODE_ASR:
+ case BRW_OPCODE_CMP:
+ case BRW_OPCODE_CMPN:
+ case BRW_OPCODE_CSEL:
+ case BRW_OPCODE_F32TO16:
+ case BRW_OPCODE_F16TO32:
+ case BRW_OPCODE_BFREV:
+ case BRW_OPCODE_BFE:
+ case BRW_OPCODE_BFI1:
+ case BRW_OPCODE_BFI2:
+ case BRW_OPCODE_ADD:
+ case BRW_OPCODE_MUL:
+ case BRW_OPCODE_AVG:
+ case BRW_OPCODE_FRC:
+ case BRW_OPCODE_RNDU:
+ case BRW_OPCODE_RNDD:
+ case BRW_OPCODE_RNDE:
+ case BRW_OPCODE_RNDZ:
+ case BRW_OPCODE_LZD:
+ case BRW_OPCODE_FBH:
+ case BRW_OPCODE_FBL:
+ case BRW_OPCODE_CBIT:
+ case BRW_OPCODE_SAD2:
+ case BRW_OPCODE_MAD:
+ case BRW_OPCODE_LRP:
+ case SHADER_OPCODE_RCP:
+ case SHADER_OPCODE_RSQ:
+ case SHADER_OPCODE_SQRT:
+ case SHADER_OPCODE_EXP2:
+ case SHADER_OPCODE_LOG2:
+ case SHADER_OPCODE_POW:
+ case SHADER_OPCODE_INT_QUOTIENT:
+ case SHADER_OPCODE_INT_REMAINDER:
+ case SHADER_OPCODE_SIN:
+ case SHADER_OPCODE_COS: {
+ /* According to the PRMs:
+ * "A. In Direct Addressing mode, a source cannot span more than 2
+ * adjacent GRF registers.
+ * B. A destination cannot span more than 2 adjacent GRF registers."
+ *
+ * Look for the source or destination with the largest register region
+ * which is the one that is going to limit the overal execution size of
+ * the instruction due to this rule.
+ */
+ unsigned reg_count = inst->regs_written;
+
+ for (unsigned i = 0; i < inst->sources; i++)
+ reg_count = MAX2(reg_count, (unsigned)inst->regs_read(i));
+
+ /* Calculate the maximum execution size of the instruction based on the
+ * factor by which it goes over the hardware limit of 2 GRFs.
+ */
+ return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
+ }
+ case SHADER_OPCODE_MULH:
+ /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
+ * is 8-wide on Gen7+.
+ */
+ return (devinfo->gen >= 7 ? 8 : inst->exec_size);
+
case FS_OPCODE_FB_WRITE_LOGICAL:
/* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
* here.
else
return inst->exec_size;
+ case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
+ case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
+ case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
+ return 8;
+
default:
return inst->exec_size;
}
const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
if (lower_width != inst->exec_size) {
- /* Builder matching the original instruction. */
+ /* Builder matching the original instruction. We may also need to
+ * emit an instruction of width larger than the original, set the
+ * execution size of the builder to the highest of both for now so
+ * we're sure that both cases can be handled.
+ */
const fs_builder ibld = bld.at(block, inst)
.exec_all(inst->force_writemask_all)
- .group(inst->exec_size, inst->force_sechalf);
+ .group(MAX2(inst->exec_size, lower_width),
+ inst->force_sechalf);
/* Split the copies in chunks of the execution width of either the
* original or the lowered instruction, whichever is lower.
split_inst.exec_size = lower_width;
split_inst.eot = inst->eot && i == n - 1;
- /* Set exec_all if the lowered width is higher than the original
- * to avoid breaking the compiler invariant that no control
- * flow-masked instruction is wider than the shader's
- * dispatch_width. Then transform the sources and destination and
- * emit the lowered instruction.
+ /* Select the correct channel enables for the i-th group, then
+ * transform the sources and destination and emit the lowered
+ * instruction.
*/
- const fs_builder lbld = ibld.exec_all(lower_width > inst->exec_size)
- .group(lower_width, i);
+ const fs_builder lbld = ibld.group(lower_width, i);
for (unsigned j = 0; j < inst->sources; j++) {
if (inst->src[j].file != BAD_FILE &&
assert(devinfo->gen >= 7);
payload.num_regs = 1;
+
+ if (prog->SystemValuesRead & SYSTEM_BIT_LOCAL_INVOCATION_ID) {
+ const unsigned local_id_dwords =
+ brw_cs_prog_local_id_payload_dwords(prog, dispatch_width);
+ assert((local_id_dwords & 0x7) == 0);
+ const unsigned local_id_regs = local_id_dwords / 8;
+ payload.local_invocation_id_reg = payload.num_regs;
+ payload.num_regs += local_id_regs;
+ }
}
void
void
fs_visitor::optimize()
{
+ /* Start by validating the shader we currently have. */
+ validate();
+
/* bld is the common builder object pointing at the end of the program we
* used to translate it into i965 IR. For the optimization and lowering
* passes coming next, any code added after the end of the program without
* Ideally optimization passes wouldn't be part of the visitor so they
* wouldn't have access to bld at all, but they do, so just in case some
* pass forgets to ask for a location explicitly set it to NULL here to
- * make it trip.
+ * make it trip. The dispatch width is initialized to a bogus value to
+ * make sure that optimizations set the execution controls explicitly to
+ * match the code they are manipulating instead of relying on the defaults.
*/
- bld = bld.at(NULL, NULL);
+ bld = fs_builder(this, 64);
- split_virtual_grfs();
-
- move_uniform_array_access_to_pull_constants();
assign_constant_locations();
demote_pull_constants();
+ validate();
+
+ split_virtual_grfs();
+ validate();
+
#define OPT(pass, args...) ({ \
pass_num++; \
bool this_progress = pass(args); \
backend_shader::dump_instructions(filename); \
} \
\
+ validate(); \
+ \
progress = progress || this_progress; \
this_progress; \
})
OPT(lower_integer_multiplication);
lower_uniform_pull_constant_loads();
+
+ validate();
}
/**
struct gl_shader_program *prog,
unsigned *final_assembly_size)
{
- bool start_busy = false;
- double start_time = 0;
-
- if (unlikely(brw->perf_debug)) {
- start_busy = (brw->batch.last_bo &&
- drm_intel_bo_busy(brw->batch.last_bo));
- start_time = get_time();
- }
-
struct brw_shader *shader = NULL;
if (prog)
shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
if (simd16_cfg)
prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16);
- if (unlikely(brw->perf_debug) && shader) {
- if (shader->compiled_once)
- brw_wm_debug_recompile(brw, prog, key);
- shader->compiled_once = true;
-
- if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
- perf_debug("FS compile took %.03f ms and stalled the GPU\n",
- (get_time() - start_time) * 1000);
- }
- }
-
return g.get_assembly(final_assembly_size);
}
-extern "C" bool
-brw_fs_precompile(struct gl_context *ctx,
- struct gl_shader_program *shader_prog,
- struct gl_program *prog)
+fs_reg *
+fs_visitor::emit_cs_local_invocation_id_setup()
{
- struct brw_context *brw = brw_context(ctx);
- struct brw_wm_prog_key key;
-
- struct gl_fragment_program *fp = (struct gl_fragment_program *) prog;
- struct brw_fragment_program *bfp = brw_fragment_program(fp);
- bool program_uses_dfdy = fp->UsesDFdy;
+ assert(stage == MESA_SHADER_COMPUTE);
- memset(&key, 0, sizeof(key));
+ fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
- if (brw->gen < 6) {
- if (fp->UsesKill)
- key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
+ struct brw_reg src =
+ brw_vec8_grf(payload.local_invocation_id_reg, 0);
+ src = retype(src, BRW_REGISTER_TYPE_UD);
+ bld.MOV(*reg, src);
+ src.nr += dispatch_width / 8;
+ bld.MOV(offset(*reg, bld, 1), src);
+ src.nr += dispatch_width / 8;
+ bld.MOV(offset(*reg, bld, 2), src);
- if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
- key.iz_lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
+ return reg;
+}
- /* Just assume depth testing. */
- key.iz_lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
- key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
- }
+fs_reg *
+fs_visitor::emit_cs_work_group_id_setup()
+{
+ assert(stage == MESA_SHADER_COMPUTE);
- if (brw->gen < 6 || _mesa_bitcount_64(fp->Base.InputsRead &
- BRW_FS_VARYING_INPUT_MASK) > 16)
- key.input_slots_valid = fp->Base.InputsRead | VARYING_BIT_POS;
+ fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
- brw_setup_tex_for_precompile(brw, &key.tex, &fp->Base);
+ struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
+ struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
+ struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
- if (fp->Base.InputsRead & VARYING_BIT_POS) {
- key.drawable_height = ctx->DrawBuffer->Height;
- }
+ bld.MOV(*reg, r0_1);
+ bld.MOV(offset(*reg, bld, 1), r0_6);
+ bld.MOV(offset(*reg, bld, 2), r0_7);
- key.nr_color_regions = _mesa_bitcount_64(fp->Base.OutputsWritten &
- ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
- BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK)));
+ return reg;
+}
- if ((fp->Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
- key.render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer) ||
- key.nr_color_regions > 1;
- }
+const unsigned *
+brw_cs_emit(struct brw_context *brw,
+ void *mem_ctx,
+ const struct brw_cs_prog_key *key,
+ struct brw_cs_prog_data *prog_data,
+ struct gl_compute_program *cp,
+ struct gl_shader_program *prog,
+ unsigned *final_assembly_size)
+{
+ struct brw_shader *shader =
+ (struct brw_shader *) prog->_LinkedShaders[MESA_SHADER_COMPUTE];
- key.program_string_id = bfp->id;
+ if (unlikely(INTEL_DEBUG & DEBUG_CS))
+ brw_dump_ir("compute", prog, &shader->base, &cp->Base);
- uint32_t old_prog_offset = brw->wm.base.prog_offset;
- struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;
+ prog_data->local_size[0] = cp->LocalSize[0];
+ prog_data->local_size[1] = cp->LocalSize[1];
+ prog_data->local_size[2] = cp->LocalSize[2];
+ unsigned local_workgroup_size =
+ cp->LocalSize[0] * cp->LocalSize[1] * cp->LocalSize[2];
- bool success = brw_codegen_wm_prog(brw, shader_prog, bfp, &key);
+ cfg_t *cfg = NULL;
+ const char *fail_msg = NULL;
- brw->wm.base.prog_offset = old_prog_offset;
- brw->wm.prog_data = old_prog_data;
+ int st_index = -1;
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME)
+ st_index = brw_get_shader_time_index(brw, prog, &cp->Base, ST_CS);
- return success;
-}
+ /* Now the main event: Visit the shader IR and generate our CS IR for it.
+ */
+ fs_visitor v8(brw->intelScreen->compiler, brw,
+ mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
+ &cp->Base, 8, st_index);
+ if (!v8.run_cs()) {
+ fail_msg = v8.fail_msg;
+ } else if (local_workgroup_size <= 8 * brw->max_cs_threads) {
+ cfg = v8.cfg;
+ prog_data->simd_size = 8;
+ }
-void
-brw_setup_tex_for_precompile(struct brw_context *brw,
- struct brw_sampler_prog_key_data *tex,
- struct gl_program *prog)
-{
- const bool has_shader_channel_select = brw->is_haswell || brw->gen >= 8;
- unsigned sampler_count = _mesa_fls(prog->SamplersUsed);
- for (unsigned i = 0; i < sampler_count; i++) {
- if (!has_shader_channel_select && (prog->ShadowSamplers & (1 << i))) {
- /* Assume DEPTH_TEXTURE_MODE is the default: X, X, X, 1 */
- tex->swizzles[i] =
- MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_ONE);
+ fs_visitor v16(brw->intelScreen->compiler, brw,
+ mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
+ &cp->Base, 16, st_index);
+ if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
+ !fail_msg && !v8.simd16_unsupported &&
+ local_workgroup_size <= 16 * brw->max_cs_threads) {
+ /* Try a SIMD16 compile */
+ v16.import_uniforms(&v8);
+ if (!v16.run_cs()) {
+ perf_debug("SIMD16 shader failed to compile: %s", v16.fail_msg);
+ if (!cfg) {
+ fail_msg =
+ "Couldn't generate SIMD16 program and not "
+ "enough threads for SIMD8";
+ }
} else {
- /* Color sampler: assume no swizzling. */
- tex->swizzles[i] = SWIZZLE_XYZW;
+ cfg = v16.cfg;
+ prog_data->simd_size = 16;
}
}
+
+ if (unlikely(cfg == NULL)) {
+ assert(fail_msg);
+ prog->LinkStatus = false;
+ ralloc_strcat(&prog->InfoLog, fail_msg);
+ _mesa_problem(NULL, "Failed to compile compute shader: %s\n",
+ fail_msg);
+ return NULL;
+ }
+
+ fs_generator g(brw->intelScreen->compiler, brw,
+ mem_ctx, (void*) key, &prog_data->base, &cp->Base,
+ v8.promoted_constants, v8.runtime_check_aads_emit, "CS");
+ if (INTEL_DEBUG & DEBUG_CS) {
+ char *name = ralloc_asprintf(mem_ctx, "%s compute shader %d",
+ prog->Label ? prog->Label : "unnamed",
+ prog->Name);
+ g.enable_debug(name);
+ }
+
+ g.generate_code(cfg, prog_data->simd_size);
+
+ return g.get_assembly(final_assembly_size);
}