#include "glsl/ir_print_visitor.h"
gen8_fs_generator::gen8_fs_generator(struct brw_context *brw,
- struct brw_wm_compile *c,
+ void *mem_ctx,
+ const struct brw_wm_prog_key *key,
+ struct brw_wm_prog_data *prog_data,
struct gl_shader_program *shader_prog,
struct gl_fragment_program *fp,
bool dual_source_output)
- : gen8_generator(brw, shader_prog, fp ? &fp->Base : NULL, c), c(c), fp(fp),
- dual_source_output(dual_source_output)
+ : gen8_generator(brw, shader_prog, fp ? &fp->Base : NULL, mem_ctx),
+ key(key), prog_data(prog_data),
+ fp(fp), dual_source_output(dual_source_output)
{
}
{
}
-void
-gen8_fs_generator::mark_surface_used(unsigned surf_index)
-{
- assert(surf_index < BRW_MAX_SURFACES);
-
- c->prog_data.base.binding_table.size_bytes =
- MAX2(c->prog_data.base.binding_table.size_bytes, (surf_index + 1) * 4);
-}
-
void
gen8_fs_generator::generate_fb_write(fs_inst *ir)
{
MOV_RAW(brw_message_reg(ir->base_mrf), brw_vec8_grf(0, 0));
gen8_set_exec_size(mov, BRW_EXECUTE_16);
- if (ir->target > 0 && c->key.replicate_alpha) {
+ if (ir->target > 0 && key->replicate_alpha) {
/* Set "Source0 Alpha Present to RenderTarget" bit in the header. */
- OR(vec1(retype(brw_message_reg(ir->base_mrf), BRW_REGISTER_TYPE_UD)),
- vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
- brw_imm_ud(1 << 11));
+ gen8_instruction *inst =
+ OR(get_element_ud(brw_message_reg(ir->base_mrf), 0),
+ vec1(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)),
+ brw_imm_ud(1 << 11));
+ gen8_set_mask_control(inst, BRW_MASK_DISABLE);
}
if (ir->target > 0) {
/* Set the render target index for choosing BLEND_STATE. */
- MOV(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, ir->base_mrf, 2),
- BRW_REGISTER_TYPE_UD),
- brw_imm_ud(ir->target));
+ MOV_RAW(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, ir->base_mrf, 2),
+ brw_imm_ud(ir->target));
}
}
/* "Last Render Target Select" must be set on all writes to the last of
* the render targets (if using MRT), or always for a single RT scenario.
*/
- if ((ir->target == c->key.nr_color_regions - 1) || !c->key.nr_color_regions)
+ if ((ir->target == key->nr_color_regions - 1) || !key->nr_color_regions)
msg_control |= (1 << 4); /* Last Render Target Select */
uint32_t surf_index =
- c->prog_data.binding_table.render_target_start + ir->target;
+ prog_data->binding_table.render_target_start + ir->target;
gen8_set_dp_message(brw, inst,
GEN6_SFID_DATAPORT_RENDER_CACHE,
ir->header_present,
ir->eot);
- mark_surface_used(surf_index);
+ brw_mark_surface_used(&prog_data->base, surf_index);
}
void
}
uint32_t surf_index =
- c->prog_data.base.binding_table.texture_start + ir->sampler;
+ prog_data->base.binding_table.texture_start + ir->sampler;
gen8_instruction *inst = next_inst(BRW_OPCODE_SEND);
gen8_set_dst(brw, inst, dst);
ir->header_present,
simd_mode);
- mark_surface_used(surf_index);
+ brw_mark_surface_used(&prog_data->base, surf_index);
}
{
unsigned vstride, width;
- if (c->key.high_quality_derivatives) {
+ if (key->high_quality_derivatives) {
/* Produce accurate derivatives. */
vstride = BRW_VERTICAL_STRIDE_2;
width = BRW_WIDTH_2;
unsigned src1_swizzle;
unsigned src1_subnr;
- if (c->key.high_quality_derivatives) {
+ if (key->high_quality_derivatives) {
/* Produce accurate derivatives. */
hstride = BRW_HORIZONTAL_STRIDE_1;
src0_swizzle = BRW_SWIZZLE_XYXY;
}
void
-gen8_fs_generator::generate_scratch_write(fs_inst *inst, struct brw_reg dst)
+gen8_fs_generator::generate_scratch_write(fs_inst *ir, struct brw_reg src)
{
- assert(inst->mlen != 0);
- assert(!"TODO: Implement generate_scratch_write.");
+ MOV(retype(brw_message_reg(ir->base_mrf + 1), BRW_REGISTER_TYPE_UD),
+ retype(src, BRW_REGISTER_TYPE_UD));
+
+ struct brw_reg mrf =
+ retype(brw_message_reg(ir->base_mrf), BRW_REGISTER_TYPE_UD);
+
+ const int num_regs = dispatch_width / 8;
+
+ uint32_t msg_control;
+ if (num_regs == 1)
+ msg_control = BRW_DATAPORT_OWORD_BLOCK_2_OWORDS;
+ else
+ msg_control = BRW_DATAPORT_OWORD_BLOCK_4_OWORDS;
+
+ /* Set up the message header. This is g0, with g0.2 filled with
+ * the offset. We don't want to leave our offset around in g0 or
+ * it'll screw up texture samples, so set it up inside the message
+ * reg.
+ */
+ unsigned save_exec_size = default_state.exec_size;
+ default_state.exec_size = BRW_EXECUTE_8;
+
+ MOV_RAW(mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
+ /* set message header global offset field (reg 0, element 2) */
+ MOV_RAW(get_element_ud(mrf, 2), brw_imm_ud(ir->offset / 16));
+
+ struct brw_reg dst;
+ if (dispatch_width == 16)
+ dst = retype(vec16(brw_null_reg()), BRW_REGISTER_TYPE_UW);
+ else
+ dst = retype(vec8(brw_null_reg()), BRW_REGISTER_TYPE_UW);
+
+ default_state.exec_size = BRW_EXECUTE_16;
+
+ gen8_instruction *send = next_inst(BRW_OPCODE_SEND);
+ gen8_set_dst(brw, send, dst);
+ gen8_set_src0(brw, send, mrf);
+ gen8_set_dp_message(brw, send, GEN7_SFID_DATAPORT_DATA_CACHE,
+ 255, /* binding table index: stateless access */
+ GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE,
+ msg_control,
+ 1 + num_regs, /* mlen */
+ 0, /* rlen */
+ true, /* header present */
+ false); /* EOT */
+
+ default_state.exec_size = save_exec_size;
}
void
-gen8_fs_generator::generate_scratch_read(fs_inst *inst, struct brw_reg dst)
+gen8_fs_generator::generate_scratch_read(fs_inst *ir, struct brw_reg dst)
{
- assert(inst->mlen != 0);
- assert(!"TODO: Implement generate_scratch_read.");
+ struct brw_reg mrf =
+ retype(brw_message_reg(ir->base_mrf), BRW_REGISTER_TYPE_UD);
+
+ const int num_regs = dispatch_width / 8;
+
+ uint32_t msg_control;
+ if (num_regs == 1)
+ msg_control = BRW_DATAPORT_OWORD_BLOCK_2_OWORDS;
+ else
+ msg_control = BRW_DATAPORT_OWORD_BLOCK_4_OWORDS;
+
+ unsigned save_exec_size = default_state.exec_size;
+ default_state.exec_size = BRW_EXECUTE_8;
+
+ MOV_RAW(mrf, retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD));
+ /* set message header global offset field (reg 0, element 2) */
+ MOV_RAW(get_element_ud(mrf, 2), brw_imm_ud(ir->offset / 16));
+
+ gen8_instruction *send = next_inst(BRW_OPCODE_SEND);
+ gen8_set_dst(brw, send, retype(dst, BRW_REGISTER_TYPE_UW));
+ gen8_set_src0(brw, send, mrf);
+ gen8_set_dp_message(brw, send, GEN7_SFID_DATAPORT_DATA_CACHE,
+ 255, /* binding table index: stateless access */
+ BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
+ msg_control,
+ 1, /* mlen */
+ num_regs, /* rlen */
+ true, /* header present */
+ false); /* EOT */
+
+ default_state.exec_size = save_exec_size;
}
void
-gen8_fs_generator::generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst)
+gen8_fs_generator::generate_scratch_read_gen7(fs_inst *ir, struct brw_reg dst)
{
- assert(inst->mlen != 0);
- assert(!"TODO: Implement generate_scratch_read_gen7.");
+ unsigned save_exec_size = default_state.exec_size;
+ gen8_instruction *send = next_inst(BRW_OPCODE_SEND);
+
+ int num_regs = dispatch_width / 8;
+
+ /* According to the docs, offset is "A 12-bit HWord offset into the memory
+ * Immediate Memory buffer as specified by binding table 0xFF." An HWORD
+ * is 32 bytes, which happens to be the size of a register.
+ */
+ int offset = ir->offset / REG_SIZE;
+
+ /* The HW requires that the header is present; this is to get the g0.5
+ * scratch offset.
+ */
+ gen8_set_src0(brw, send, brw_vec8_grf(0, 0));
+ gen8_set_dst(brw, send, retype(dst, BRW_REGISTER_TYPE_UW));
+ gen8_set_dp_scratch_message(brw, send,
+ false, /* scratch read */
+ false, /* OWords */
+ false, /* invalidate after read */
+ num_regs,
+ offset,
+ 1, /* mlen - just g0 */
+ num_regs, /* rlen */
+ true, /* header present */
+ false); /* EOT */
+
+ default_state.exec_size = save_exec_size;
}
void
false, /* no header */
BRW_SAMPLER_SIMD_MODE_SIMD4X2);
- mark_surface_used(surf_index);
+ brw_mark_surface_used(&prog_data->base, surf_index);
}
void
false, /* no header */
simd_mode);
- mark_surface_used(surf_index);
+ brw_mark_surface_used(&prog_data->base, surf_index);
}
/**
HALT();
}
-void
+bool
gen8_fs_generator::patch_discard_jumps_to_fb_writes()
{
if (discard_halt_patches.is_empty())
- return;
+ return false;
/* There is a somewhat strange undocumented requirement of using
* HALT, according to the simulator. If some channel has HALTed to
}
this->discard_halt_patches.make_empty();
+ return true;
}
/**
default_state.exec_size = save_exec_size;
}
+/**
+ * Change the register's data type from UD to HF, doubling the strides in order
+ * to compensate for halving the data type width.
+ */
+static struct brw_reg
+ud_reg_to_hf(struct brw_reg r)
+{
+ assert(r.type == BRW_REGISTER_TYPE_UD);
+ r.type = BRW_REGISTER_TYPE_HF;
+
+ /* The BRW_*_STRIDE enums are defined so that incrementing the field
+ * doubles the real stride.
+ */
+ if (r.hstride != 0)
+ ++r.hstride;
+ if (r.vstride != 0)
+ ++r.vstride;
+
+ return r;
+}
+
void
-gen8_fs_generator::generate_code(exec_list *instructions)
+gen8_fs_generator::generate_pack_half_2x16_split(fs_inst *inst,
+ struct brw_reg dst,
+ struct brw_reg x,
+ struct brw_reg y)
{
- int last_native_inst_offset = next_inst_offset;
- const char *last_annotation_string = NULL;
- const void *last_annotation_ir = NULL;
+ assert(dst.type == BRW_REGISTER_TYPE_UD);
+ assert(x.type == BRW_REGISTER_TYPE_F);
+ assert(y.type == BRW_REGISTER_TYPE_F);
+ struct brw_reg dst_hf = ud_reg_to_hf(dst);
+
+ /* Give each 32-bit channel of dst the form below , where "." means
+ * unchanged.
+ * 0x....hhhh
+ */
+ MOV(dst_hf, y);
+
+ /* Now the form:
+ * 0xhhhh0000
+ */
+ SHL(dst, dst, brw_imm_ud(16u));
+
+ /* And, finally the form of packHalf2x16's output:
+ * 0xhhhhllll
+ */
+ MOV(dst_hf, x);
+}
+
+void
+gen8_fs_generator::generate_unpack_half_2x16_split(fs_inst *inst,
+ struct brw_reg dst,
+ struct brw_reg src)
+{
+ assert(dst.type == BRW_REGISTER_TYPE_F);
+ assert(src.type == BRW_REGISTER_TYPE_UD);
+
+ struct brw_reg src_hf = ud_reg_to_hf(src);
+
+ /* Each channel of src has the form of unpackHalf2x16's input: 0xhhhhllll.
+ * For the Y case, we wish to access only the upper word; therefore
+ * a 16-bit subregister offset is needed.
+ */
+ assert(inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X ||
+ inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y);
+ if (inst->opcode == FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y)
+ src_hf.subnr += 2;
+
+ MOV(dst, src_hf);
+}
+
+void
+gen8_fs_generator::generate_untyped_atomic(fs_inst *ir,
+ struct brw_reg dst,
+ struct brw_reg atomic_op,
+ struct brw_reg surf_index)
+{
+ assert(atomic_op.file == BRW_IMMEDIATE_VALUE &&
+ atomic_op.type == BRW_REGISTER_TYPE_UD &&
+ surf_index.file == BRW_IMMEDIATE_VALUE &&
+ surf_index.type == BRW_REGISTER_TYPE_UD);
+ assert((atomic_op.dw1.ud & ~0xf) == 0);
+
+ unsigned msg_control =
+ atomic_op.dw1.ud | /* Atomic Operation Type: BRW_AOP_* */
+ ((dispatch_width == 16 ? 0 : 1) << 4) | /* SIMD Mode */
+ (1 << 5); /* Return data expected */
+
+ gen8_instruction *inst = next_inst(BRW_OPCODE_SEND);
+ gen8_set_dst(brw, inst, retype(dst, BRW_REGISTER_TYPE_UD));
+ gen8_set_src0(brw, inst, brw_message_reg(ir->base_mrf));
+ gen8_set_dp_message(brw, inst, HSW_SFID_DATAPORT_DATA_CACHE_1,
+ surf_index.dw1.ud,
+ HSW_DATAPORT_DC_PORT1_UNTYPED_ATOMIC_OP,
+ msg_control,
+ ir->mlen,
+ dispatch_width / 8,
+ ir->header_present,
+ false);
+
+ brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
+}
+
+void
+gen8_fs_generator::generate_untyped_surface_read(fs_inst *ir,
+ struct brw_reg dst,
+ struct brw_reg surf_index)
+{
+ assert(surf_index.file == BRW_IMMEDIATE_VALUE &&
+ surf_index.type == BRW_REGISTER_TYPE_UD);
+
+ unsigned msg_control = 0xe | /* Enable only the R channel */
+ ((dispatch_width == 16 ? 1 : 2) << 4); /* SIMD Mode */
+
+ gen8_instruction *inst = next_inst(BRW_OPCODE_SEND);
+ gen8_set_dst(brw, inst, retype(dst, BRW_REGISTER_TYPE_UD));
+ gen8_set_src0(brw, inst, brw_message_reg(ir->base_mrf));
+ gen8_set_dp_message(brw, inst, HSW_SFID_DATAPORT_DATA_CACHE_1,
+ surf_index.dw1.ud,
+ HSW_DATAPORT_DC_PORT1_UNTYPED_SURFACE_READ,
+ msg_control,
+ ir->mlen,
+ dispatch_width / 8,
+ ir->header_present,
+ false);
+
+ brw_mark_surface_used(&prog_data->base, surf_index.dw1.ud);
+}
+
+void
+gen8_fs_generator::generate_code(exec_list *instructions,
+ struct annotation_info *annotation)
+{
if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
if (prog) {
- printf("Native code for fragment shader %d (SIMD%d dispatch):\n",
+ fprintf(stderr,
+ "Native code for %s fragment shader %d (SIMD%d dispatch):\n",
+ shader_prog->Label ? shader_prog->Label : "unnamed",
shader_prog->Name, dispatch_width);
} else if (fp) {
- printf("Native code for fragment program %d (SIMD%d dispatch):\n",
- prog->Id, dispatch_width);
+ fprintf(stderr,
+ "Native code for fragment program %d (SIMD%d dispatch):\n",
+ prog->Id, dispatch_width);
} else {
- printf("Native code for blorp program (SIMD%d dispatch):\n",
- dispatch_width);
+ fprintf(stderr, "Native code for blorp program (SIMD%d dispatch):\n",
+ dispatch_width);
}
}
fs_inst *ir = (fs_inst *) node;
struct brw_reg src[3], dst;
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- foreach_list(node, &cfg->block_list) {
- bblock_link *link = (bblock_link *)node;
- bblock_t *block = link->block;
-
- if (block->start == ir) {
- printf(" START B%d", block->block_num);
- foreach_list(predecessor_node, &block->parents) {
- bblock_link *predecessor_link =
- (bblock_link *)predecessor_node;
- bblock_t *predecessor_block = predecessor_link->block;
- printf(" <-B%d", predecessor_block->block_num);
- }
- printf("\n");
- }
- }
-
- if (last_annotation_ir != ir->ir) {
- last_annotation_ir = ir->ir;
- if (last_annotation_ir) {
- printf(" ");
- if (prog) {
- ((ir_instruction *) ir->ir)->print();
- } else if (prog) {
- const prog_instruction *fpi;
- fpi = (const prog_instruction *) ir->ir;
- printf("%d: ", (int)(fpi - prog->Instructions));
- _mesa_fprint_instruction_opt(stdout,
- fpi,
- 0, PROG_PRINT_DEBUG, NULL);
- }
- printf("\n");
- }
- }
- if (last_annotation_string != ir->annotation) {
- last_annotation_string = ir->annotation;
- if (last_annotation_string)
- printf(" %s\n", last_annotation_string);
- }
- }
+ if (unlikely(INTEL_DEBUG & DEBUG_WM))
+ annotate(brw, annotation, cfg, ir, next_inst_offset);
for (unsigned int i = 0; i < 3; i++) {
src[i] = brw_reg_from_fs_reg(&ir->src[i]);
default_state.predicate = ir->predicate;
default_state.predicate_inverse = ir->predicate_inverse;
default_state.saturate = ir->saturate;
+ default_state.mask_control = ir->force_writemask_all;
default_state.flag_subreg_nr = ir->flag_subreg;
if (dispatch_width == 16 && !ir->force_uncompressed)
else
default_state.exec_size = BRW_EXECUTE_8;
- /* fs_inst::force_sechalf is only used for original Gen4 code, so we
- * don't handle it. Add qtr_control to default_state if that changes.
- */
- assert(!ir->force_sechalf);
+ if (ir->force_uncompressed || dispatch_width == 8)
+ default_state.qtr_control = GEN6_COMPRESSION_1Q;
+ else if (ir->force_sechalf)
+ default_state.qtr_control = GEN6_COMPRESSION_2Q;
+ else
+ default_state.qtr_control = GEN6_COMPRESSION_1H;
switch (ir->opcode) {
case BRW_OPCODE_MOV:
break;
case BRW_OPCODE_F32TO16:
- F32TO16(dst, src[0]);
+ MOV(retype(dst, BRW_REGISTER_TYPE_HF), src[0]);
break;
case BRW_OPCODE_F16TO32:
- F16TO32(dst, src[0]);
+ MOV(dst, retype(src[0], BRW_REGISTER_TYPE_HF));
break;
case BRW_OPCODE_CMP:
break;
case FS_OPCODE_DDY:
/* Make sure fp->UsesDFdy flag got set (otherwise there's no
- * guarantee that c->key.render_to_fbo is set).
+ * guarantee that key->render_to_fbo is set).
*/
assert(fp->UsesDFdy);
- generate_ddy(ir, dst, src[0], c->key.render_to_fbo);
+ generate_ddy(ir, dst, src[0], key->render_to_fbo);
break;
case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
break;
case SHADER_OPCODE_UNTYPED_ATOMIC:
- assert(!"XXX: Missing Gen8 scalar support for untyped atomics");
+ generate_untyped_atomic(ir, dst, src[0], src[1]);
break;
case SHADER_OPCODE_UNTYPED_SURFACE_READ:
- assert(!"XXX: Missing Gen8 scalar support for untyped surface reads");
+ generate_untyped_surface_read(ir, dst, src[0]);
break;
case FS_OPCODE_SET_SIMD4X2_OFFSET:
break;
case FS_OPCODE_PACK_HALF_2x16_SPLIT:
- assert(!"XXX: Missing Gen8 scalar support for PACK_HALF_2x16_SPLIT");
+ generate_pack_half_2x16_split(ir, dst, src[0], src[1]);
break;
case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X:
case FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y:
- assert(!"XXX: Missing Gen8 scalar support for UNPACK_HALF_2x16_SPLIT");
+ generate_unpack_half_2x16_split(ir, dst, src[0]);
break;
case FS_OPCODE_PLACEHOLDER_HALT:
/* This is the place where the final HALT needs to be inserted if
* we've emitted any discards. If not, this will emit no code.
*/
- patch_discard_jumps_to_fb_writes();
+ if (!patch_discard_jumps_to_fb_writes()) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ annotation->ann_count--;
+ }
+ }
break;
default:
}
abort();
}
-
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- disassemble(stdout, last_native_inst_offset, next_inst_offset);
-
- foreach_list(node, &cfg->block_list) {
- bblock_link *link = (bblock_link *)node;
- bblock_t *block = link->block;
-
- if (block->end == ir) {
- printf(" END B%d", block->block_num);
- foreach_list(successor_node, &block->children) {
- bblock_link *successor_link =
- (bblock_link *)successor_node;
- bblock_t *successor_block = successor_link->block;
- printf(" ->B%d", successor_block->block_num);
- }
- printf("\n");
- }
- }
- }
-
- last_native_inst_offset = next_inst_offset;
- }
-
- if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
- printf("\n");
}
patch_jump_targets();
+ annotation_finalize(annotation, next_inst_offset);
}
const unsigned *
assert(simd8_instructions || simd16_instructions);
if (simd8_instructions) {
+ struct annotation_info annotation;
+ memset(&annotation, 0, sizeof(annotation));
+
dispatch_width = 8;
- generate_code(simd8_instructions);
+ generate_code(simd8_instructions, &annotation);
+
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ dump_assembly(store, annotation.ann_count, annotation.ann, brw, prog,
+ gen8_disassemble);
+ ralloc_free(annotation.ann);
+ }
}
if (simd16_instructions) {
/* Align to a 64-byte boundary. */
- while ((nr_inst * sizeof(gen8_instruction)) % 64)
+ while (next_inst_offset % 64)
NOP();
/* Save off the start of this SIMD16 program */
- c->prog_data.prog_offset_16 = nr_inst * sizeof(gen8_instruction);
+ prog_data->prog_offset_16 = next_inst_offset;
+
+ struct annotation_info annotation;
+ memset(&annotation, 0, sizeof(annotation));
dispatch_width = 16;
- generate_code(simd16_instructions);
+ generate_code(simd16_instructions, &annotation);
+
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
+ dump_assembly(store, annotation.ann_count, annotation.ann,
+ brw, prog, gen8_disassemble);
+ ralloc_free(annotation.ann);
+ }
}
*assembly_size = next_inst_offset;