if (ir->offset != NULL && ir->op != ir_txf)
inst->texture_offset = brw_texture_offset(ir->offset->as_constant());
- inst->sampler = texunit;
+ inst->sampler = sampler;
if (ir->shadow_comparitor)
inst->shadow_compare = true;
inst->header_present = ir->offset || intel->gen < 5;
inst->base_mrf = 2;
inst->mlen = inst->header_present + 1; /* always at least one */
- inst->sampler = texunit;
+ inst->sampler = sampler;
inst->dst = dst_reg(this, ir->type);
inst->shadow_compare = ir->shadow_comparitor != NULL;
{
struct gl_context *ctx = &brw->intel.ctx;
struct brw_sampler_state *samplers;
- int i;
- brw->sampler.count = 0;
- for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
- if (ctx->Texture.Unit[i]._ReallyEnabled)
- brw->sampler.count = i + 1;
- }
+ /* BRW_NEW_VERTEX_PROGRAM and BRW_NEW_FRAGMENT_PROGRAM */
+ struct gl_program *vs = (struct gl_program *) brw->vertex_program;
+ struct gl_program *fs = (struct gl_program *) brw->fragment_program;
+
+ GLbitfield SamplersUsed = vs->SamplersUsed | fs->SamplersUsed;
+
+ brw->sampler.count = _mesa_bitcount(SamplersUsed);
if (brw->sampler.count == 0)
return;
32, &brw->sampler.offset);
memset(samplers, 0, brw->sampler.count * sizeof(*samplers));
- for (i = 0; i < brw->sampler.count; i++) {
- if (ctx->Texture.Unit[i]._ReallyEnabled)
- brw_update_sampler_state(brw, i, i, &samplers[i]);
+ for (unsigned s = 0; s < brw->sampler.count; s++) {
+ if (SamplersUsed & (1 << s)) {
+ const unsigned unit = (fs->SamplersUsed & (1 << s)) ?
+ fs->SamplerUnits[s] : vs->SamplerUnits[s];
+ if (ctx->Texture.Unit[unit]._ReallyEnabled)
+ brw_update_sampler_state(brw, unit, s, &samplers[s]);
+ }
}
brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
const struct brw_tracked_state brw_samplers = {
.dirty = {
.mesa = _NEW_TEXTURE,
- .brw = BRW_NEW_BATCH,
+ .brw = BRW_NEW_BATCH |
+ BRW_NEW_VERTEX_PROGRAM |
+ BRW_NEW_FRAGMENT_PROGRAM,
.cache = 0
},
.emit = brw_upload_samplers,
static void
brw_update_texture_surfaces(struct brw_context *brw)
{
- struct gl_context *ctx = &brw->intel.ctx;
+ struct intel_context *intel = &brw->intel;
+ struct gl_context *ctx = &intel->ctx;
+
+ /* BRW_NEW_VERTEX_PROGRAM and BRW_NEW_FRAGMENT_PROGRAM:
+ * Unfortunately, we're stuck using the gl_program structs until the
+ * ARB_fragment_program front-end gets converted to GLSL IR. These
+ * have the downside that SamplerUnits is split and only contains the
+ * mappings for samplers active in that stage.
+ */
+ struct gl_program *vs = (struct gl_program *) brw->vertex_program;
+ struct gl_program *fs = (struct gl_program *) brw->fragment_program;
- for (unsigned i = 0; i < BRW_MAX_TEX_UNIT; i++) {
- const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
- const GLuint surf = SURF_INDEX_TEXTURE(i);
+ unsigned num_samplers = _mesa_bitcount(vs->SamplersUsed | fs->SamplersUsed);
- /* _NEW_TEXTURE */
- if (texUnit->_ReallyEnabled) {
- brw->intel.vtbl.update_texture_surface(ctx, i, brw->wm.surf_offset, surf);
- } else {
- brw->wm.surf_offset[surf] = 0;
+ for (unsigned s = 0; s < num_samplers; s++) {
+ brw->vs.surf_offset[SURF_INDEX_VS_TEXTURE(s)] = 0;
+ brw->wm.surf_offset[SURF_INDEX_TEXTURE(s)] = 0;
+
+ if (vs->SamplersUsed & (1 << s)) {
+ const unsigned unit = vs->SamplerUnits[s];
+
+ /* _NEW_TEXTURE */
+ if (ctx->Texture.Unit[unit]._ReallyEnabled) {
+ intel->vtbl.update_texture_surface(ctx, unit,
+ brw->vs.surf_offset,
+ SURF_INDEX_VS_TEXTURE(s));
+ }
}
- /* For now, just mirror the texture setup to the VS slots. */
- brw->vs.surf_offset[SURF_INDEX_VS_TEXTURE(i)] =
- brw->wm.surf_offset[surf];
+ if (fs->SamplersUsed & (1 << s)) {
+ const unsigned unit = fs->SamplerUnits[s];
+
+ /* _NEW_TEXTURE */
+ if (ctx->Texture.Unit[unit]._ReallyEnabled) {
+ intel->vtbl.update_texture_surface(ctx, unit,
+ brw->wm.surf_offset,
+ SURF_INDEX_TEXTURE(s));
+ }
+ }
}
brw->state.dirty.brw |= BRW_NEW_SURFACES;
const struct brw_tracked_state brw_texture_surfaces = {
.dirty = {
.mesa = _NEW_TEXTURE,
- .brw = BRW_NEW_BATCH,
+ .brw = BRW_NEW_BATCH |
+ BRW_NEW_VERTEX_PROGRAM |
+ BRW_NEW_FRAGMENT_PROGRAM,
.cache = 0
},
.emit = brw_update_texture_surfaces,
{
struct gl_context *ctx = &brw->intel.ctx;
struct gen7_sampler_state *samplers;
- int i;
- brw->sampler.count = 0;
- for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
- if (ctx->Texture.Unit[i]._ReallyEnabled)
- brw->sampler.count = i + 1;
- }
+ /* BRW_NEW_VERTEX_PROGRAM and BRW_NEW_FRAGMENT_PROGRAM */
+ struct gl_program *vs = (struct gl_program *) brw->vertex_program;
+ struct gl_program *fs = (struct gl_program *) brw->fragment_program;
+
+ GLbitfield SamplersUsed = vs->SamplersUsed | fs->SamplersUsed;
+
+ brw->sampler.count = _mesa_bitcount(SamplersUsed);
if (brw->sampler.count == 0)
return;
32, &brw->sampler.offset);
memset(samplers, 0, brw->sampler.count * sizeof(*samplers));
- for (i = 0; i < brw->sampler.count; i++) {
- if (ctx->Texture.Unit[i]._ReallyEnabled)
- gen7_update_sampler_state(brw, i, i, &samplers[i]);
+ for (unsigned s = 0; s < brw->sampler.count; s++) {
+ if (SamplersUsed & (1 << s)) {
+ const unsigned unit = (fs->SamplersUsed & (1 << s)) ?
+ fs->SamplerUnits[s] : vs->SamplerUnits[s];
+ if (ctx->Texture.Unit[unit]._ReallyEnabled)
+ gen7_update_sampler_state(brw, unit, s, &samplers[s]);
+ }
}
brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
const struct brw_tracked_state gen7_samplers = {
.dirty = {
.mesa = _NEW_TEXTURE,
- .brw = BRW_NEW_BATCH,
+ .brw = BRW_NEW_BATCH |
+ BRW_NEW_VERTEX_PROGRAM |
+ BRW_NEW_FRAGMENT_PROGRAM,
.cache = 0
},
.emit = gen7_upload_samplers,