Saves ~480 bytes of code.
static void compile_clip_prog( struct brw_context *brw,
struct brw_clip_prog_key *key )
{
+ struct intel_context *intel = &brw->intel;
struct brw_clip_compile c;
const GLuint *program;
GLuint program_size;
c.func.single_program_flow = 1;
c.key = *key;
- c.need_ff_sync = BRW_IS_IGDNG(brw);
/* Need to locate the two positions present in vertex + header.
* These are currently hardcoded:
*/
c.header_position_offset = ATTR_SIZE;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
delta = 3 * REG_SIZE;
else
delta = REG_SIZE;
c.nr_attrs = brw_count_bits(c.key.attrs);
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
c.nr_regs = (c.nr_attrs + 1) / 2 + 3; /* are vertices packed, or reg-aligned? */
else
c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */
*/
static void upload_clip_prog(struct brw_context *brw)
{
- GLcontext *ctx = &brw->intel.ctx;
+ struct intel_context *intel = &brw->intel;
+ GLcontext *ctx = &intel->ctx;
struct brw_clip_prog_key key;
memset(&key, 0, sizeof(key));
/* _NEW_TRANSFORM */
key.nr_userclip = brw_count_bits(ctx->Transform.ClipPlanesEnabled);
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
key.clip_mode = BRW_CLIPMODE_KERNEL_CLIP;
else
key.clip_mode = BRW_CLIPMODE_NORMAL;
GLuint header_position_offset;
GLuint offset[VERT_ATTRIB_MAX];
- GLboolean need_ff_sync;
};
#define ATTR_SIZE (4*4)
static void brw_clip_line_alloc_regs( struct brw_clip_compile *c )
{
+ struct intel_context *intel = &c->func.brw->intel;
GLuint i = 0,j;
/* Register usage is static, precompute here:
i++;
}
- if (c->need_ff_sync) {
+ if (intel->needs_ff_sync) {
c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD);
i++;
}
clip_unit_create_from_key(struct brw_context *brw,
struct brw_clip_unit_key *key)
{
+ struct intel_context *intel = &brw->intel;
struct brw_clip_unit_state clip;
dri_bo *bo;
/* Although up to 16 concurrent Clip threads are allowed on IGDNG,
* only 2 threads can output VUEs at a time.
*/
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
clip.thread4.max_threads = 16 - 1;
else
clip.thread4.max_threads = 2 - 1;
void brw_clip_tri_alloc_regs( struct brw_clip_compile *c,
GLuint nr_verts )
{
+ struct intel_context *intel = &c->func.brw->intel;
GLuint i = 0,j;
/* Register usage is static, precompute here:
for (j = 0; j < 3; j++) {
GLuint delta = c->nr_attrs*16 + 32;
- if (BRW_IS_IGDNG(c->func.brw))
+ if (intel->is_ironlake)
delta = c->nr_attrs * 16 + 32 * 3;
brw_MOV(&c->func, byte_offset(c->reg.vertex[j], delta), brw_imm_f(0));
i++;
}
- if (c->need_ff_sync) {
+ if (intel->needs_ff_sync) {
c->reg.ff_sync = retype(brw_vec1_grf(i, 0), BRW_REGISTER_TYPE_UD);
i++;
}
void brw_clip_ff_sync(struct brw_clip_compile *c)
{
- if (c->need_ff_sync) {
+ struct intel_context *intel = &c->func.brw->intel;
+
+ if (intel->needs_ff_sync) {
struct brw_compile *p = &c->func;
struct brw_instruction *need_ff_sync;
void brw_clip_init_ff_sync(struct brw_clip_compile *c)
{
- if (c->need_ff_sync) {
+ struct intel_context *intel = &c->func.brw->intel;
+
+ if (intel->needs_ff_sync) {
struct brw_compile *p = &c->func;
brw_MOV(p, c->reg.ff_sync, brw_imm_ud(0));
OUT_RELOC(input->bo,
I915_GEM_DOMAIN_VERTEX, 0,
input->offset);
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
OUT_RELOC(input->bo,
I915_GEM_DOMAIN_VERTEX, 0,
input->bo->size - 1);
(format << BRW_VE0_FORMAT_SHIFT) |
(0 << BRW_VE0_SRC_OFFSET_SHIFT));
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
(comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
(comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
GLboolean saturate,
GLuint dataType )
{
+ struct intel_context *intel = &brw->intel;
brw_set_src1(insn, brw_imm_d(0));
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
insn->bits3.math_igdng.function = function;
insn->bits3.math_igdng.int_type = integer_type;
insn->bits3.math_igdng.precision = low_precision;
GLuint offset,
GLuint swizzle_control )
{
+ struct intel_context *intel = &brw->intel;
brw_set_src1(insn, brw_imm_d(0));
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
insn->bits3.urb_igdng.opcode = 0; /* ? */
insn->bits3.urb_igdng.offset = offset;
insn->bits3.urb_igdng.swizzle_control = swizzle_control;
GLuint response_length,
GLuint end_of_thread )
{
+ struct intel_context *intel = &brw->intel;
brw_set_src1(insn, brw_imm_d(0));
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
insn->bits3.dp_write_igdng.binding_table_index = binding_table_index;
insn->bits3.dp_write_igdng.msg_control = msg_control;
insn->bits3.dp_write_igdng.pixel_scoreboard_clear = pixel_scoreboard_clear;
GLuint response_length,
GLuint end_of_thread )
{
+ struct intel_context *intel = &brw->intel;
brw_set_src1(insn, brw_imm_d(0));
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
insn->bits3.dp_read_igdng.binding_table_index = binding_table_index;
insn->bits3.dp_read_igdng.msg_control = msg_control;
insn->bits3.dp_read_igdng.msg_type = msg_type;
GLuint header_present,
GLuint simd_mode)
{
+ struct intel_context *intel = &brw->intel;
assert(eot == 0);
brw_set_src1(insn, brw_imm_d(0));
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
insn->bits3.sampler_igdng.binding_table_index = binding_table_index;
insn->bits3.sampler_igdng.sampler = sampler;
insn->bits3.sampler_igdng.msg_type = msg_type;
struct brw_instruction *brw_ELSE(struct brw_compile *p,
struct brw_instruction *if_insn)
{
+ struct intel_context *intel = &p->brw->intel;
struct brw_instruction *insn;
GLuint br = 1;
- if (BRW_IS_IGDNG(p->brw))
+ if (intel->is_ironlake)
br = 2;
if (p->single_program_flow) {
void brw_ENDIF(struct brw_compile *p,
struct brw_instruction *patch_insn)
{
+ struct intel_context *intel = &p->brw->intel;
GLuint br = 1;
- if (BRW_IS_IGDNG(p->brw))
+ if (intel->is_ironlake)
br = 2;
if (p->single_program_flow) {
struct brw_instruction *brw_WHILE(struct brw_compile *p,
struct brw_instruction *do_insn)
{
+ struct intel_context *intel = &p->brw->intel;
struct brw_instruction *insn;
GLuint br = 1;
- if (BRW_IS_IGDNG(p->brw))
+ if (intel->is_ironlake)
br = 2;
if (p->single_program_flow)
void brw_land_fwd_jump(struct brw_compile *p,
struct brw_instruction *jmp_insn)
{
+ struct intel_context *intel = &p->brw->intel;
struct brw_instruction *landing = &p->store[p->nr_insn];
GLuint jmpi = 1;
- if (BRW_IS_IGDNG(p->brw))
+ if (intel->is_ironlake)
jmpi = 2;
assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI);
static void compile_gs_prog( struct brw_context *brw,
struct brw_gs_prog_key *key )
{
+ struct intel_context *intel = &brw->intel;
struct brw_gs_compile c;
const GLuint *program;
GLuint program_size;
memset(&c, 0, sizeof(c));
c.key = *key;
- c.need_ff_sync = BRW_IS_IGDNG(brw);
/* Need to locate the two positions present in vertex + header.
* These are currently hardcoded:
*/
c.nr_attrs = brw_count_bits(c.key.attrs);
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
c.nr_regs = (c.nr_attrs + 1) / 2 + 3; /* are vertices packed, or reg-aligned? */
else
c.nr_regs = (c.nr_attrs + 1) / 2 + 1; /* are vertices packed, or reg-aligned? */
GLuint nr_attrs;
GLuint nr_regs;
GLuint nr_bytes;
- GLboolean need_ff_sync;
};
#define ATTR_SIZE (4*4)
void brw_gs_quads( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
{
+ struct intel_context *intel = &c->func.brw->intel;
+
brw_gs_alloc_regs(c, 4);
/* Use polygons for correct edgeflag behaviour. Note that vertex 3
* is the PV for quads, but vertex 0 for polygons:
*/
- if (c->need_ff_sync)
+ if (intel->needs_ff_sync)
brw_gs_ff_sync(c, 1);
if (key->pv_first) {
brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_POLYGON << 2) | R02_PRIM_START));
void brw_gs_quad_strip( struct brw_gs_compile *c, struct brw_gs_prog_key *key )
{
+ struct intel_context *intel = &c->func.brw->intel;
+
brw_gs_alloc_regs(c, 4);
- if (c->need_ff_sync)
+ if (intel->needs_ff_sync)
brw_gs_ff_sync(c, 1);
if (key->pv_first) {
brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_POLYGON << 2) | R02_PRIM_START));
void brw_gs_tris( struct brw_gs_compile *c )
{
+ struct intel_context *intel = &c->func.brw->intel;
+
brw_gs_alloc_regs(c, 3);
- if (c->need_ff_sync)
+ if (intel->needs_ff_sync)
brw_gs_ff_sync(c, 1);
brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_TRILIST << 2) | R02_PRIM_START));
brw_gs_emit_vue(c, c->reg.vertex[1], 0, (_3DPRIM_TRILIST << 2));
void brw_gs_lines( struct brw_gs_compile *c )
{
+ struct intel_context *intel = &c->func.brw->intel;
+
brw_gs_alloc_regs(c, 2);
- if (c->need_ff_sync)
+ if (intel->needs_ff_sync)
brw_gs_ff_sync(c, 1);
brw_gs_emit_vue(c, c->reg.vertex[0], 0, ((_3DPRIM_LINESTRIP << 2) | R02_PRIM_START));
brw_gs_emit_vue(c, c->reg.vertex[1], 1, ((_3DPRIM_LINESTRIP << 2) | R02_PRIM_END));
void brw_gs_points( struct brw_gs_compile *c )
{
+ struct intel_context *intel = &c->func.brw->intel;
+
brw_gs_alloc_regs(c, 1);
- if (c->need_ff_sync)
+ if (intel->needs_ff_sync)
brw_gs_ff_sync(c, 1);
brw_gs_emit_vue(c, c->reg.vertex[0], 1, ((_3DPRIM_POINTLIST << 2) | R02_PRIM_START | R02_PRIM_END));
}
static dri_bo *
gs_unit_create_from_key(struct brw_context *brw, struct brw_gs_unit_key *key)
{
+ struct intel_context *intel = &brw->intel;
struct brw_gs_unit_state gs;
dri_bo *bo;
else
gs.thread4.max_threads = 0;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
gs.thread4.rendering_enable = 1;
if (INTEL_DEBUG & DEBUG_STATS)
{
struct intel_context *intel = &brw->intel;
struct intel_region *region = brw->state.depth_region;
- unsigned int len = (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw)) ? 6 : 5;
+ unsigned int len = (BRW_IS_G4X(brw) || intel->is_ironlake) ? 6 : 5;
if (region == NULL) {
BEGIN_BATCH(len, IGNORE_CLIPRECTS);
OUT_BATCH(0);
OUT_BATCH(0);
- if (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))
+ if (BRW_IS_G4X(brw) || intel->is_ironlake)
OUT_BATCH(0);
ADVANCE_BATCH();
((region->height - 1) << 19));
OUT_BATCH(0);
- if (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))
+ if (BRW_IS_G4X(brw) || intel->is_ironlake)
OUT_BATCH(0);
ADVANCE_BATCH();
/* Output the structure (brw_state_base_address) directly to the
* batchbuffer, so we can emit relocations inline.
*/
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
BEGIN_BATCH(8, IGNORE_CLIPRECTS);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
OUT_BATCH(1); /* General state base address */
static void do_flatshade_triangle( struct brw_sf_compile *c )
{
struct brw_compile *p = &c->func;
+ struct intel_context *intel = &p->brw->intel;
struct brw_reg ip = brw_ip_reg();
GLuint nr = brw_count_bits(c->key.attrs & VERT_RESULT_COLOR_BITS);
GLuint jmpi = 1;
if (c->key.primitive == SF_UNFILLED_TRIS)
return;
- if (BRW_IS_IGDNG(p->brw))
+ if (intel->is_ironlake)
jmpi = 2;
brw_push_insn_state(p);
static void do_flatshade_line( struct brw_sf_compile *c )
{
struct brw_compile *p = &c->func;
+ struct intel_context *intel = &p->brw->intel;
struct brw_reg ip = brw_ip_reg();
GLuint nr = brw_count_bits(c->key.attrs & VERT_RESULT_COLOR_BITS);
GLuint jmpi = 1;
if (c->key.primitive == SF_UNFILLED_TRIS)
return;
- if (BRW_IS_IGDNG(p->brw))
+ if (intel->is_ironlake)
jmpi = 2;
brw_push_insn_state(p);
sf_unit_create_from_key(struct brw_context *brw, struct brw_sf_unit_key *key,
dri_bo **reloc_bufs)
{
+ struct intel_context *intel = &brw->intel;
struct brw_sf_unit_state sf;
dri_bo *bo;
int chipset_max_threads;
sf.thread3.dispatch_grf_start_reg = 3;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
sf.thread3.urb_entry_read_offset = 3;
else
sf.thread3.urb_entry_read_offset = 1;
sf.thread4.nr_urb_entries = key->nr_urb_entries;
sf.thread4.urb_entry_allocation_size = key->sfsize - 1;
- /* Each SF thread produces 1 PUE, and there can be up to 24(Pre-IGDNG) or
- * 48(IGDNG) threads
+ /* Each SF thread produces 1 PUE, and there can be up to 24 (Pre-Ironlake) or
+ * 48 (Ironlake) threads.
*/
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
chipset_max_threads = 48;
else
chipset_max_threads = 24;
switch (mt->target) {
case GL_TEXTURE_CUBE_MAP:
- if (IS_IGDNG(intel->intelScreen->deviceID)) {
+ if (intel->is_ironlake) {
GLuint align_h = 2, align_w = 4;
GLuint level;
GLuint x = 0;
*/
static void recalculate_urb_fence( struct brw_context *brw )
{
+ struct intel_context *intel = &brw->intel;
GLuint csize = brw->curbe.total_size;
GLuint vsize = brw->vs.prog_data->urb_entry_size;
GLuint sfsize = brw->sf.prog_data->urb_entry_size;
brw->urb.constrained = 0;
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
brw->urb.nr_vs_entries = 128;
brw->urb.nr_sf_entries = 48;
if (check_urb_layout(brw)) {
*/
static void brw_vs_alloc_regs( struct brw_vs_compile *c )
{
+ struct intel_context *intel = &c->func.brw->intel;
GLuint i, reg = 0, mrf;
int attributes_in_vue;
c->first_output = reg;
c->first_overflow_output = 0;
- if (BRW_IS_IGDNG(c->func.brw))
+ if (intel->is_ironlake)
mrf = 8;
else
mrf = 4;
*/
attributes_in_vue = MAX2(c->nr_outputs, c->nr_inputs);
- if (BRW_IS_IGDNG(c->func.brw))
+ if (intel->is_ironlake)
c->prog_data.urb_entry_size = (attributes_in_vue + 6 + 3) / 4;
else
c->prog_data.urb_entry_size = (attributes_in_vue + 2 + 3) / 4;
static void emit_vertex_write( struct brw_vs_compile *c)
{
struct brw_compile *p = &c->func;
+ struct intel_context *intel = &p->brw->intel;
struct brw_reg m0 = brw_message_reg(0);
struct brw_reg pos = c->regs[PROGRAM_OUTPUT][VERT_RESULT_HPOS];
struct brw_reg ndc;
brw_set_access_mode(p, BRW_ALIGN_1);
brw_MOV(p, offset(m0, 2), ndc);
- if (BRW_IS_IGDNG(p->brw)) {
- /* There are 20 DWs (D0-D19) in VUE vertex header on IGDNG */
+ if (intel->is_ironlake) {
+ /* There are 20 DWs (D0-D19) in VUE vertex header on Ironlake */
brw_MOV(p, offset(m0, 3), pos); /* a portion of vertex header */
/* m4, m5 contain the distances from vertex to the user clip planeXXX.
* Seems it is useless for us.
#define MAX_LOOP_DEPTH 32
struct brw_compile *p = &c->func;
struct brw_context *brw = p->brw;
+ struct intel_context *intel = &brw->intel;
const GLuint nr_insns = c->vp->program.Base.NumInstructions;
GLuint insn, if_depth = 0, loop_depth = 0;
GLuint end_offset = 0;
loop_depth--;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
br = 2;
inst0 = inst1 = brw_WHILE(p, loop_inst[loop_depth]);
static dri_bo *
vs_unit_create_from_key(struct brw_context *brw, struct brw_vs_unit_key *key)
{
+ struct intel_context *intel = &brw->intel;
struct brw_vs_unit_state vs;
dri_bo *bo;
int chipset_max_threads;
*/
vs.thread1.single_program_flow = 0;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
vs.thread1.binding_table_entry_count = 0; /* hardware requirement */
else
vs.thread1.binding_table_entry_count = key->nr_surfaces;
vs.thread3.urb_entry_read_offset = 0;
vs.thread3.const_urb_entry_read_offset = key->curbe_offset * 2;
- if (BRW_IS_IGDNG(brw)) {
+ if (intel->is_ironlake) {
switch (key->nr_urb_entries) {
case 8:
case 12:
vs.thread4.urb_entry_allocation_size = key->urb_size - 1;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
chipset_max_threads = 72;
else if (BRW_IS_G4X(brw))
chipset_max_threads = 32;
GLboolean shadow)
{
struct brw_compile *p = &c->func;
+ struct intel_context *intel = &p->brw->intel;
struct brw_reg dst_retyped;
GLuint cur_mrf = 2, response_length;
GLuint i, nr_texcoords;
}
/* Pre-Ironlake, the 8-wide sampler always took u,v,r. */
- if (!BRW_IS_IGDNG(p->brw) && c->dispatch_width == 8)
+ if (!intel->is_ironlake && c->dispatch_width == 8)
nr_texcoords = 3;
/* For shadow comparisons, we have to supply u,v,r. */
/* Fill in the shadow comparison reference value. */
if (shadow) {
- if (BRW_IS_IGDNG(p->brw)) {
+ if (intel->is_ironlake) {
/* Fill in the cube map array index value. */
brw_MOV(p, brw_message_reg(cur_mrf), brw_imm_f(0));
cur_mrf += mrf_per_channel;
cur_mrf += mrf_per_channel;
}
- if (BRW_IS_IGDNG(p->brw)) {
+ if (intel->is_ironlake) {
if (shadow)
msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_COMPARE_IGDNG;
else
GLuint sampler)
{
struct brw_compile *p = &c->func;
+ struct intel_context *intel = &p->brw->intel;
GLuint msgLength;
GLuint msg_type;
GLuint mrf_per_channel;
* undefined, and trust the execution mask to keep the undefined pixels
* from mattering.
*/
- if (c->dispatch_width == 16 || !BRW_IS_IGDNG(p->brw)) {
- if (BRW_IS_IGDNG(p->brw))
+ if (c->dispatch_width == 16 || !intel->is_ironlake) {
+ if (intel->is_ironlake)
msg_type = BRW_SAMPLER_MESSAGE_SAMPLE_BIAS_IGDNG;
else
msg_type = BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS;
{
struct brw_compile *p = &c->func;
struct brw_context *brw = p->brw;
+ struct intel_context *intel = &brw->intel;
GLuint nr = 2;
GLuint channel;
brw_push_insn_state(p);
for (channel = 0; channel < 4; channel++) {
- if (c->dispatch_width == 16 && (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))) {
+ if (c->dispatch_width == 16 && (BRW_IS_G4X(brw) || intel->is_ironlake)) {
/* By setting the high bit of the MRF register number, we indicate
* that we want COMPR4 mode - instead of doing the usual destination
* + 1 for the second half we get destination + 4.
static void brw_wm_emit_glsl(struct brw_context *brw, struct brw_wm_compile *c)
{
+ struct intel_context *intel = &brw->intel;
#define MAX_IF_DEPTH 32
#define MAX_LOOP_DEPTH 32
struct brw_instruction *if_inst[MAX_IF_DEPTH], *loop_inst[MAX_LOOP_DEPTH];
struct brw_instruction *inst0, *inst1;
GLuint br = 1;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
br = 2;
loop_depth--;
key->max_threads = 1;
else {
/* WM maximum threads is number of EUs times number of threads per EU. */
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
key->max_threads = 12 * 6;
else if (BRW_IS_G4X(brw))
key->max_threads = 10 * 5;
wm_unit_create_from_key(struct brw_context *brw, struct brw_wm_unit_key *key,
dri_bo **reloc_bufs)
{
+ struct intel_context *intel = &brw->intel;
struct brw_wm_unit_state wm;
dri_bo *bo;
wm.thread1.depth_coef_urb_read_offset = 1;
wm.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
wm.thread1.binding_table_entry_count = 0; /* hardware requirement */
else
wm.thread1.binding_table_entry_count = key->nr_surfaces;
wm.thread3.const_urb_entry_read_length = key->curb_entry_read_length;
wm.thread3.const_urb_entry_read_offset = key->curbe_offset * 2;
- if (BRW_IS_IGDNG(brw))
+ if (intel->is_ironlake)
wm.wm4.sampler_count = 0; /* hardware requirement */
else
wm.wm4.sampler_count = (key->sampler_count + 1) / 4;
else
intel->gen = 2;
+ if (IS_IGDNG(intel->intelScreen->deviceID)) {
+ intel->is_ironlake = GL_TRUE;
+ intel->needs_ff_sync = GL_TRUE;
+ }
+
/* Dri stuff */
intel->hHWContext = driContextPriv->hHWContext;
intel->driFd = sPriv->fd;
* Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
*/
int gen;
+ GLboolean needs_ff_sync;
+ GLboolean is_ironlake;
struct intel_region *front_region;
struct intel_region *back_region;