*/
c.header_position_offset = ATTR_SIZE;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
delta = 3 * REG_SIZE;
else
delta = REG_SIZE;
c.nr_attrs = brw_count_bits(c.key.attrs);
- if (intel->is_ironlake)
+ if (intel->gen == 5)
c.nr_regs = (c.nr_attrs + 1) / 2 + 3; /* are vertices packed, or reg-aligned? */
else
c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */
/* _NEW_TRANSFORM */
key.nr_userclip = brw_count_bits(ctx->Transform.ClipPlanesEnabled);
- if (intel->is_ironlake)
+ if (intel->gen == 5)
key.clip_mode = BRW_CLIPMODE_KERNEL_CLIP;
else
key.clip_mode = BRW_CLIPMODE_NORMAL;
*/
assert(key->nr_urb_entries % 2 == 0);
- /* Although up to 16 concurrent Clip threads are allowed on IGDNG,
+ /* Although up to 16 concurrent Clip threads are allowed on Ironlake,
* only 2 threads can output VUEs at a time.
*/
- if (intel->is_ironlake)
+ if (intel->gen == 5)
clip.thread4.max_threads = 16 - 1;
else
clip.thread4.max_threads = 2 - 1;
for (j = 0; j < 3; j++) {
GLuint delta = c->nr_attrs*16 + 32;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
delta = c->nr_attrs * 16 + 32 * 3;
brw_MOV(&c->func, byte_offset(c->reg.vertex[j], delta), brw_imm_f(0));
for (i = 0; i < c->nr_attrs; i++) {
GLuint delta = i*16 + 32;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
delta = i * 16 + 32 * 3;
if (delta == c->offset[VERT_RESULT_EDGE]) {
if (i & 1) {
GLuint delta = i*16 + 32;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
delta = i * 16 + 32 * 3;
brw_MOV(p, deref_4f(dest_ptr, delta), brw_imm_f(0));
MIN2(ctx->Const.FragmentProgram.MaxNativeParameters,
ctx->Const.FragmentProgram.MaxEnvParams);
- if (intel->is_ironlake || intel->is_g4x || intel->gen >= 6) {
+ if (intel->is_g4x || intel->gen >= 5) {
brw->CMD_VF_STATISTICS = CMD_VF_STATISTICS_GM45;
brw->CMD_PIPELINE_SELECT = CMD_PIPELINE_SELECT_GM45;
brw->has_surface_tile_offset = GL_TRUE;
}
/* WM maximum threads is number of EUs times number of threads per EU. */
- if (intel->is_ironlake) {
+ if (intel->gen == 5) {
brw->urb.size = 1024;
brw->vs_max_threads = 72;
brw->wm_max_threads = 12 * 6;
#define BRW_SAMPLER_MESSAGE_SIMD8_LD 3
#define BRW_SAMPLER_MESSAGE_SIMD16_LD 3
-#define BRW_SAMPLER_MESSAGE_SAMPLE_IGDNG 0
-#define BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_IGDNG 1
-#define BRW_SAMPLER_MESSAGE_SAMPLE_LOD_IGDNG 2
-#define BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_IGDNG 3
+#define BRW_SAMPLER_MESSAGE_SAMPLE_GEN5 0
+#define BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5 1
+#define BRW_SAMPLER_MESSAGE_SAMPLE_LOD_GEN5 2
+#define BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5 3
-/* for IGDNG only */
+/* for GEN5 only */
#define BRW_SAMPLER_SIMD_MODE_SIMD4X2 0
#define BRW_SAMPLER_SIMD_MODE_SIMD8 1
#define BRW_SAMPLER_SIMD_MODE_SIMD16 2
OUT_RELOC(input->bo,
I915_GEM_DOMAIN_VERTEX, 0,
input->offset);
- if (intel->is_ironlake || intel->gen >= 6) {
+ if (intel->gen >= 5) {
OUT_RELOC(input->bo,
I915_GEM_DOMAIN_VERTEX, 0,
input->bo->size - 1);
(0 << BRW_VE0_SRC_OFFSET_SHIFT));
}
- if (intel->is_ironlake || intel->gen >= 6)
+ if (intel->gen >= 5)
OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
(comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
(comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
struct intel_context *intel = &brw->intel;
brw_set_src1(insn, brw_imm_d(0));
- if (intel->is_ironlake) {
- insn->bits3.math_igdng.function = function;
- insn->bits3.math_igdng.int_type = integer_type;
- insn->bits3.math_igdng.precision = low_precision;
- insn->bits3.math_igdng.saturate = saturate;
- insn->bits3.math_igdng.data_type = dataType;
- insn->bits3.math_igdng.snapshot = 0;
- insn->bits3.math_igdng.header_present = 0;
- insn->bits3.math_igdng.response_length = response_length;
- insn->bits3.math_igdng.msg_length = msg_length;
- insn->bits3.math_igdng.end_of_thread = 0;
- insn->bits2.send_igdng.sfid = BRW_MESSAGE_TARGET_MATH;
- insn->bits2.send_igdng.end_of_thread = 0;
+ if (intel->gen == 5) {
+ insn->bits3.math_gen5.function = function;
+ insn->bits3.math_gen5.int_type = integer_type;
+ insn->bits3.math_gen5.precision = low_precision;
+ insn->bits3.math_gen5.saturate = saturate;
+ insn->bits3.math_gen5.data_type = dataType;
+ insn->bits3.math_gen5.snapshot = 0;
+ insn->bits3.math_gen5.header_present = 0;
+ insn->bits3.math_gen5.response_length = response_length;
+ insn->bits3.math_gen5.msg_length = msg_length;
+ insn->bits3.math_gen5.end_of_thread = 0;
+ insn->bits2.send_gen5.sfid = BRW_MESSAGE_TARGET_MATH;
+ insn->bits2.send_gen5.end_of_thread = 0;
} else {
insn->bits3.math.function = function;
insn->bits3.math.int_type = integer_type;
{
brw_set_src1(insn, brw_imm_d(0));
- insn->bits3.urb_igdng.opcode = 1;
- insn->bits3.urb_igdng.offset = offset;
- insn->bits3.urb_igdng.swizzle_control = swizzle_control;
- insn->bits3.urb_igdng.allocate = allocate;
- insn->bits3.urb_igdng.used = used;
- insn->bits3.urb_igdng.complete = complete;
- insn->bits3.urb_igdng.header_present = 1;
- insn->bits3.urb_igdng.response_length = response_length;
- insn->bits3.urb_igdng.msg_length = msg_length;
- insn->bits3.urb_igdng.end_of_thread = end_of_thread;
- insn->bits2.send_igdng.sfid = BRW_MESSAGE_TARGET_URB;
- insn->bits2.send_igdng.end_of_thread = end_of_thread;
+ insn->bits3.urb_gen5.opcode = 1;
+ insn->bits3.urb_gen5.offset = offset;
+ insn->bits3.urb_gen5.swizzle_control = swizzle_control;
+ insn->bits3.urb_gen5.allocate = allocate;
+ insn->bits3.urb_gen5.used = used;
+ insn->bits3.urb_gen5.complete = complete;
+ insn->bits3.urb_gen5.header_present = 1;
+ insn->bits3.urb_gen5.response_length = response_length;
+ insn->bits3.urb_gen5.msg_length = msg_length;
+ insn->bits3.urb_gen5.end_of_thread = end_of_thread;
+ insn->bits2.send_gen5.sfid = BRW_MESSAGE_TARGET_URB;
+ insn->bits2.send_gen5.end_of_thread = end_of_thread;
}
static void brw_set_urb_message( struct brw_context *brw,
struct intel_context *intel = &brw->intel;
brw_set_src1(insn, brw_imm_d(0));
- if (intel->is_ironlake || intel->gen >= 6) {
- insn->bits3.urb_igdng.opcode = 0; /* ? */
- insn->bits3.urb_igdng.offset = offset;
- insn->bits3.urb_igdng.swizzle_control = swizzle_control;
- insn->bits3.urb_igdng.allocate = allocate;
- insn->bits3.urb_igdng.used = used; /* ? */
- insn->bits3.urb_igdng.complete = complete;
- insn->bits3.urb_igdng.header_present = 1;
- insn->bits3.urb_igdng.response_length = response_length;
- insn->bits3.urb_igdng.msg_length = msg_length;
- insn->bits3.urb_igdng.end_of_thread = end_of_thread;
+ if (intel->gen >= 5) {
+ insn->bits3.urb_gen5.opcode = 0; /* ? */
+ insn->bits3.urb_gen5.offset = offset;
+ insn->bits3.urb_gen5.swizzle_control = swizzle_control;
+ insn->bits3.urb_gen5.allocate = allocate;
+ insn->bits3.urb_gen5.used = used; /* ? */
+ insn->bits3.urb_gen5.complete = complete;
+ insn->bits3.urb_gen5.header_present = 1;
+ insn->bits3.urb_gen5.response_length = response_length;
+ insn->bits3.urb_gen5.msg_length = msg_length;
+ insn->bits3.urb_gen5.end_of_thread = end_of_thread;
if (intel->gen >= 6) {
/* For SNB, the SFID bits moved to the condmod bits, and
* EOT stayed in bits3 above. Does the EOT bit setting
*/
insn->header.destreg__conditionalmod = BRW_MESSAGE_TARGET_URB;
} else {
- insn->bits2.send_igdng.sfid = BRW_MESSAGE_TARGET_URB;
- insn->bits2.send_igdng.end_of_thread = end_of_thread;
+ insn->bits2.send_gen5.sfid = BRW_MESSAGE_TARGET_URB;
+ insn->bits2.send_gen5.end_of_thread = end_of_thread;
}
} else {
insn->bits3.urb.opcode = 0; /* ? */
struct intel_context *intel = &brw->intel;
brw_set_src1(insn, brw_imm_d(0));
- if (intel->is_ironlake) {
- insn->bits3.dp_write_igdng.binding_table_index = binding_table_index;
- insn->bits3.dp_write_igdng.msg_control = msg_control;
- insn->bits3.dp_write_igdng.pixel_scoreboard_clear = pixel_scoreboard_clear;
- insn->bits3.dp_write_igdng.msg_type = msg_type;
- insn->bits3.dp_write_igdng.send_commit_msg = 0;
- insn->bits3.dp_write_igdng.header_present = 1;
- insn->bits3.dp_write_igdng.response_length = response_length;
- insn->bits3.dp_write_igdng.msg_length = msg_length;
- insn->bits3.dp_write_igdng.end_of_thread = end_of_thread;
- insn->bits2.send_igdng.sfid = BRW_MESSAGE_TARGET_DATAPORT_WRITE;
- insn->bits2.send_igdng.end_of_thread = end_of_thread;
+ if (intel->gen == 5) {
+ insn->bits3.dp_write_gen5.binding_table_index = binding_table_index;
+ insn->bits3.dp_write_gen5.msg_control = msg_control;
+ insn->bits3.dp_write_gen5.pixel_scoreboard_clear = pixel_scoreboard_clear;
+ insn->bits3.dp_write_gen5.msg_type = msg_type;
+ insn->bits3.dp_write_gen5.send_commit_msg = 0;
+ insn->bits3.dp_write_gen5.header_present = 1;
+ insn->bits3.dp_write_gen5.response_length = response_length;
+ insn->bits3.dp_write_gen5.msg_length = msg_length;
+ insn->bits3.dp_write_gen5.end_of_thread = end_of_thread;
+ insn->bits2.send_gen5.sfid = BRW_MESSAGE_TARGET_DATAPORT_WRITE;
+ insn->bits2.send_gen5.end_of_thread = end_of_thread;
} else {
insn->bits3.dp_write.binding_table_index = binding_table_index;
insn->bits3.dp_write.msg_control = msg_control;
struct intel_context *intel = &brw->intel;
brw_set_src1(insn, brw_imm_d(0));
- if (intel->is_ironlake) {
- insn->bits3.dp_read_igdng.binding_table_index = binding_table_index;
- insn->bits3.dp_read_igdng.msg_control = msg_control;
- insn->bits3.dp_read_igdng.msg_type = msg_type;
- insn->bits3.dp_read_igdng.target_cache = target_cache;
- insn->bits3.dp_read_igdng.header_present = 1;
- insn->bits3.dp_read_igdng.response_length = response_length;
- insn->bits3.dp_read_igdng.msg_length = msg_length;
- insn->bits3.dp_read_igdng.pad1 = 0;
- insn->bits3.dp_read_igdng.end_of_thread = end_of_thread;
- insn->bits2.send_igdng.sfid = BRW_MESSAGE_TARGET_DATAPORT_READ;
- insn->bits2.send_igdng.end_of_thread = end_of_thread;
+ if (intel->gen == 5) {
+ insn->bits3.dp_read_gen5.binding_table_index = binding_table_index;
+ insn->bits3.dp_read_gen5.msg_control = msg_control;
+ insn->bits3.dp_read_gen5.msg_type = msg_type;
+ insn->bits3.dp_read_gen5.target_cache = target_cache;
+ insn->bits3.dp_read_gen5.header_present = 1;
+ insn->bits3.dp_read_gen5.response_length = response_length;
+ insn->bits3.dp_read_gen5.msg_length = msg_length;
+ insn->bits3.dp_read_gen5.pad1 = 0;
+ insn->bits3.dp_read_gen5.end_of_thread = end_of_thread;
+ insn->bits2.send_gen5.sfid = BRW_MESSAGE_TARGET_DATAPORT_READ;
+ insn->bits2.send_gen5.end_of_thread = end_of_thread;
} else {
insn->bits3.dp_read.binding_table_index = binding_table_index; /*0:7*/
insn->bits3.dp_read.msg_control = msg_control; /*8:11*/
assert(eot == 0);
brw_set_src1(insn, brw_imm_d(0));
- if (intel->is_ironlake) {
- insn->bits3.sampler_igdng.binding_table_index = binding_table_index;
- insn->bits3.sampler_igdng.sampler = sampler;
- insn->bits3.sampler_igdng.msg_type = msg_type;
- insn->bits3.sampler_igdng.simd_mode = simd_mode;
- insn->bits3.sampler_igdng.header_present = header_present;
- insn->bits3.sampler_igdng.response_length = response_length;
- insn->bits3.sampler_igdng.msg_length = msg_length;
- insn->bits3.sampler_igdng.end_of_thread = eot;
- insn->bits2.send_igdng.sfid = BRW_MESSAGE_TARGET_SAMPLER;
- insn->bits2.send_igdng.end_of_thread = eot;
+ if (intel->gen == 5) {
+ insn->bits3.sampler_gen5.binding_table_index = binding_table_index;
+ insn->bits3.sampler_gen5.sampler = sampler;
+ insn->bits3.sampler_gen5.msg_type = msg_type;
+ insn->bits3.sampler_gen5.simd_mode = simd_mode;
+ insn->bits3.sampler_gen5.header_present = header_present;
+ insn->bits3.sampler_gen5.response_length = response_length;
+ insn->bits3.sampler_gen5.msg_length = msg_length;
+ insn->bits3.sampler_gen5.end_of_thread = eot;
+ insn->bits2.send_gen5.sfid = BRW_MESSAGE_TARGET_SAMPLER;
+ insn->bits2.send_gen5.end_of_thread = eot;
} else if (intel->is_g4x) {
insn->bits3.sampler_g4x.binding_table_index = binding_table_index;
insn->bits3.sampler_g4x.sampler = sampler;
struct brw_instruction *insn;
GLuint br = 1;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
br = 2;
if (p->single_program_flow) {
struct intel_context *intel = &p->brw->intel;
GLuint br = 1;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
br = 2;
if (p->single_program_flow) {
struct brw_instruction *insn;
GLuint br = 1;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
br = 2;
if (p->single_program_flow)
struct brw_instruction *landing = &p->store[p->nr_insn];
GLuint jmpi = 1;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
jmpi = 2;
assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI);
*/
c.nr_attrs = brw_count_bits(c.key.attrs);
- if (intel->is_ironlake)
+ if (intel->gen == 5)
c.nr_regs = (c.nr_attrs + 1) / 2 + 3; /* are vertices packed, or reg-aligned? */
else
c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */
else
gs.thread4.max_threads = 0;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
gs.thread4.rendering_enable = 1;
if (INTEL_DEBUG & DEBUG_STATS)
if (intel->gen >= 6)
len = 7;
- else if (intel->is_g4x || intel->is_ironlake)
+ else if (intel->is_g4x || intel->gen == 5)
len = 6;
else
len = 5;
OUT_BATCH(0);
OUT_BATCH(0);
- if (intel->is_g4x || intel->is_ironlake || intel->gen >= 6)
+ if (intel->is_g4x || intel->gen >= 5)
OUT_BATCH(0);
if (intel->gen >= 6)
((region->height - 1) << 19));
OUT_BATCH(0);
- if (intel->is_g4x || intel->is_ironlake || intel->gen >= 6)
+ if (intel->is_g4x || intel->gen >= 5)
OUT_BATCH(0);
if (intel->gen >= 6)
OUT_BATCH(1); /* Indirect object upper bound */
OUT_BATCH(1); /* Instruction access upper bound */
ADVANCE_BATCH();
- } else if (intel->is_ironlake) {
+ } else if (intel->gen == 5) {
BEGIN_BATCH(8);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
OUT_BATCH(1); /* General state base address */
if (c->key.primitive == SF_UNFILLED_TRIS)
return;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
jmpi = 2;
brw_push_insn_state(p);
if (c->key.primitive == SF_UNFILLED_TRIS)
return;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
jmpi = 2;
brw_push_insn_state(p);
sf.thread3.dispatch_grf_start_reg = 3;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
sf.thread3.urb_entry_read_offset = 3;
else
sf.thread3.urb_entry_read_offset = 1;
/* Each SF thread produces 1 PUE, and there can be up to 24 (Pre-Ironlake) or
* 48 (Ironlake) threads.
*/
- if (intel->is_ironlake)
+ if (intel->gen == 5)
chipset_max_threads = 48;
else
chipset_max_threads = 24;
struct
{
GLuint pad0:8;
- GLuint rendering_enable:1; /* for IGDNG */
+ GLuint rendering_enable:1; /* for Ironlake */
GLuint pad4:1;
GLuint stats_enable:1;
GLuint nr_urb_entries:7;
GLfloat global_depth_offset_constant;
GLfloat global_depth_offset_scale;
- /* for IGDNG only */
+ /* for Ironlake only */
struct {
GLuint pad0:1;
GLuint grf_reg_count_1:3;
GLuint end_of_thread:1;
GLuint pad1:1;
GLuint sfid:4;
- } send_igdng; /* for IGDNG only */
+ } send_gen5; /* for Ironlake only */
} bits2;
GLuint msg_length:4;
GLuint pad1:2;
GLuint end_of_thread:1;
- } math_igdng;
+ } math_gen5;
struct {
GLuint binding_table_index:8;
GLuint msg_length:4;
GLuint pad1:2;
GLuint end_of_thread:1;
- } sampler_igdng;
+ } sampler_gen5;
struct brw_urb_immediate urb;
GLuint msg_length:4;
GLuint pad1:2;
GLuint end_of_thread:1;
- } urb_igdng;
+ } urb_gen5;
struct {
GLuint binding_table_index:8;
GLuint msg_length:4;
GLuint pad1:2;
GLuint end_of_thread:1;
- } dp_read_igdng;
+ } dp_read_gen5;
struct {
GLuint binding_table_index:8;
GLuint msg_length:4;
GLuint pad1:2;
GLuint end_of_thread:1;
- } dp_write_igdng;
+ } dp_write_gen5;
struct {
GLuint pad:16;
GLuint msg_length:4;
GLuint pad1:2;
GLuint end_of_thread:1;
- } generic_igdng;
+ } generic_gen5;
GLint d;
GLuint ud;
switch (mt->target) {
case GL_TEXTURE_CUBE_MAP:
- if (intel->is_ironlake) {
+ if (intel->gen == 5) {
GLuint align_h = 2, align_w = 4;
GLuint level;
GLuint x = 0;
brw->urb.constrained = 0;
- if (intel->is_ironlake) {
+ if (intel->gen == 5) {
brw->urb.nr_vs_entries = 128;
brw->urb.nr_sf_entries = 48;
if (check_urb_layout(brw)) {
if (intel->gen >= 6)
mrf = 6;
- else if (intel->is_ironlake)
+ else if (intel->gen == 5)
mrf = 8;
else
mrf = 4;
if (intel->gen >= 6)
c->prog_data.urb_entry_size = (attributes_in_vue + 4 + 7) / 8;
- else if (intel->is_ironlake)
+ else if (intel->gen == 5)
c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
else
c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
brw_MOV(p, offset(m0, 2), pos);
brw_MOV(p, offset(m0, 5), pos);
len_vertex_header = 4;
- } else if (intel->is_ironlake) {
+ } else if (intel->gen == 5) {
/* There are 20 DWs (D0-D19) in VUE header on Ironlake:
* dword 0-3 (m1) of the header is indices, point width, clip flags.
* dword 4-7 (m2) is the ndc position (set above)
loop_depth--;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
br = 2;
inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
*/
vs.thread1.single_program_flow = 0;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
vs.thread1.binding_table_entry_count = 0; /* hardware requirement */
else
vs.thread1.binding_table_entry_count = key->nr_surfaces;
vs.thread3.urb_entry_read_offset = 0;
vs.thread3.const_urb_entry_read_offset = key->curbe_offset * 2;
- if (intel->is_ironlake) {
+ if (intel->gen == 5) {
switch (key->nr_urb_entries) {
case 8:
case 12:
/* No samplers for ARB_vp programs:
*/
- /* It has to be set to 0 for IGDNG
+ /* It has to be set to 0 for Ironlake
*/
vs.vs5.sampler_count = 0;
}
/* Pre-Ironlake, the 8-wide sampler always took u,v,r. */
- if (!intel->is_ironlake && c->dispatch_width == 8)
+ if (intel->gen < 5 && c->dispatch_width == 8)
nr_texcoords = 3;
/* For shadow comparisons, we have to supply u,v,r. */
/* Fill in the shadow comparison reference value. */
if (shadow) {
- if (intel->is_ironlake) {
+ if (intel->gen == 5) {
/* Fill in the cube map array index value. */
brw_MOV(p, brw_message_reg(cur_mrf), brw_imm_f(0));
cur_mrf += mrf_per_channel;
cur_mrf += mrf_per_channel;
}
- if (intel->is_ironlake) {
+ if (intel->gen == 5) {
if (shadow)
- msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_IGDNG;
+ msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_GEN5;
else
- msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_IGDNG;
+ msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_GEN5;
} else {
/* Note that G45 and older determines shadow compare and dispatch width
* from message length for most messages.
* undefined, and trust the execution mask to keep the undefined pixels
* from mattering.
*/
- if (c->dispatch_width == 16 || !intel->is_ironlake) {
- if (intel->is_ironlake)
- msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_IGDNG;
+ if (c->dispatch_width == 16 || intel->gen < 5) {
+ if (intel->gen == 5)
+ msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5;
else
msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
mrf_per_channel = 2;
dst_retyped = retype(vec16(dst[0]), BRW_REGISTER_TYPE_UW);
response_length = 8;
} else {
- msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_IGDNG;
+ msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_GEN5;
mrf_per_channel = 1;
dst_retyped = retype(vec8(dst[0]), BRW_REGISTER_TYPE_UW);
response_length = 4;
struct brw_instruction *inst0, *inst1;
GLuint br = 1;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
br = 2;
assert(loop_depth > 0);
wm.thread1.depth_coef_urb_read_offset = 1;
wm.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
wm.thread1.binding_table_entry_count = 0; /* hardware requirement */
else
wm.thread1.binding_table_entry_count = key->nr_surfaces;
wm.thread3.const_urb_entry_read_length = key->curb_entry_read_length;
wm.thread3.const_urb_entry_read_offset = key->curbe_offset * 2;
- if (intel->is_ironlake)
+ if (intel->gen == 5)
wm.wm4.sampler_count = 0; /* hardware requirement */
else
wm.wm4.sampler_count = (key->sampler_count + 1) / 4;
#define IS_ILD(devid) (devid == PCI_CHIP_ILD_G)
#define IS_ILM(devid) (devid == PCI_CHIP_ILM_G)
-#define IS_IGDNG(devid) (IS_ILD(devid) || IS_ILM(devid))
+#define IS_GEN5(devid) (IS_ILD(devid) || IS_ILM(devid))
#define IS_915(devid) (devid == PCI_CHIP_I915_G || \
devid == PCI_CHIP_E7221_G || \
#define IS_965(devid) (IS_GEN4(devid) || \
IS_G4X(devid) || \
- IS_IGDNG(devid) || \
+ IS_GEN5(devid) || \
IS_GEN6(devid))
#define IS_9XX(devid) (IS_915(devid) || \
chipset = "Intel(R) B43";
break;
case PCI_CHIP_ILD_G:
- chipset = "Intel(R) IGDNG_D";
+ chipset = "Intel(R) Ironlake Desktop";
break;
case PCI_CHIP_ILM_G:
- chipset = "Intel(R) IGDNG_M";
+ chipset = "Intel(R) Ironlake Mobile";
break;
default:
chipset = "Unknown Intel Chipset";
intel->gen = 6;
intel->needs_ff_sync = GL_TRUE;
intel->has_luminance_srgb = GL_TRUE;
+ } else if (IS_GEN5(intel->intelScreen->deviceID)) {
+ intel->gen = 5;
+ intel->needs_ff_sync = GL_TRUE;
+ intel->has_luminance_srgb = GL_TRUE;
} else if (IS_965(intel->intelScreen->deviceID)) {
intel->gen = 4;
+ if (IS_G4X(intel->intelScreen->deviceID)) {
+ intel->has_luminance_srgb = GL_TRUE;
+ intel->is_g4x = GL_TRUE;
+ }
} else if (IS_9XX(intel->intelScreen->deviceID)) {
intel->gen = 3;
if (IS_945(intel->intelScreen->deviceID)) {
intel->gen = 2;
}
- if (IS_IGDNG(intel->intelScreen->deviceID)) {
- intel->is_ironlake = GL_TRUE;
- intel->needs_ff_sync = GL_TRUE;
- intel->has_luminance_srgb = GL_TRUE;
- } else if (IS_G4X(intel->intelScreen->deviceID)) {
- intel->has_luminance_srgb = GL_TRUE;
- intel->is_g4x = GL_TRUE;
- }
-
driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915");
if (intelScreen->deviceID == PCI_CHIP_I865_G)
*/
int gen;
GLboolean needs_ff_sync;
- GLboolean is_ironlake;
GLboolean is_g4x;
GLboolean is_945;
GLboolean has_luminance_srgb;