uint32_t surf_offset[BRW_MAX_SURFACES];
} bind;
+ /** SAMPLER_STATE count and offset */
+ struct {
+ GLuint count;
+ uint32_t offset;
+ } sampler;
+
struct {
struct brw_vs_prog_data *prog_data;
int8_t *constant_map; /* variable array following prog_data */
drm_intel_bo *scratch_bo;
- GLuint sampler_count;
- uint32_t sampler_offset;
-
/** Offset in the program cache to the WM program */
uint32_t prog_offset;
extern const struct brw_tracked_state brw_polygon_stipple;
extern const struct brw_tracked_state brw_program_parameters;
extern const struct brw_tracked_state brw_recalculate_urb_fence;
+extern const struct brw_tracked_state brw_samplers;
extern const struct brw_tracked_state brw_sf_prog;
extern const struct brw_tracked_state brw_sf_unit;
extern const struct brw_tracked_state brw_sf_vp;
extern const struct brw_tracked_state brw_vs_unit;
extern const struct brw_tracked_state brw_wm_input_sizes;
extern const struct brw_tracked_state brw_wm_prog;
-extern const struct brw_tracked_state brw_wm_samplers;
extern const struct brw_tracked_state brw_renderbuffer_surfaces;
extern const struct brw_tracked_state brw_texture_surfaces;
extern const struct brw_tracked_state brw_binding_table;
&brw_renderbuffer_surfaces, /* must do before unit */
&brw_texture_surfaces, /* must do before unit */
&brw_binding_table,
- &brw_wm_samplers,
+
+ &brw_samplers,
/* These set up state for brw_psp_urb_cbs */
&brw_wm_unit,
&brw_texture_surfaces, /* must do before unit */
&brw_binding_table,
- &brw_wm_samplers,
+ &brw_samplers,
&gen6_sampler_state,
&gen6_vs_state,
brw->wm.sdc_offset[unit]) >> 5;
drm_intel_bo_emit_reloc(intel->batch.bo,
- brw->wm.sampler_offset +
+ brw->sampler.offset +
unit * sizeof(struct brw_sampler_state) +
offsetof(struct brw_sampler_state, ss2),
intel->batch.bo, brw->wm.sdc_offset[unit],
* FIXME: simplify all the different new texture state flags.
*/
static void
-brw_upload_wm_samplers(struct brw_context *brw)
+brw_upload_samplers(struct brw_context *brw)
{
struct gl_context *ctx = &brw->intel.ctx;
struct brw_sampler_state *samplers;
int i;
- brw->wm.sampler_count = 0;
+ brw->sampler.count = 0;
for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled)
- brw->wm.sampler_count = i + 1;
+ brw->sampler.count = i + 1;
}
- if (brw->wm.sampler_count == 0)
+ if (brw->sampler.count == 0)
return;
samplers = brw_state_batch(brw, AUB_TRACE_SAMPLER_STATE,
- brw->wm.sampler_count * sizeof(*samplers),
- 32, &brw->wm.sampler_offset);
- memset(samplers, 0, brw->wm.sampler_count * sizeof(*samplers));
+ brw->sampler.count * sizeof(*samplers),
+ 32, &brw->sampler.offset);
+ memset(samplers, 0, brw->sampler.count * sizeof(*samplers));
- for (i = 0; i < brw->wm.sampler_count; i++) {
+ for (i = 0; i < brw->sampler.count; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled)
brw_update_sampler_state(brw, i, &samplers[i]);
}
brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
}
-const struct brw_tracked_state brw_wm_samplers = {
+const struct brw_tracked_state brw_samplers = {
.dirty = {
.mesa = _NEW_TEXTURE,
.brw = BRW_NEW_BATCH,
.cache = 0
},
- .emit = brw_upload_wm_samplers,
+ .emit = brw_upload_samplers,
};
wm->wm4.sampler_count = 0; /* hardware requirement */
else {
/* CACHE_NEW_SAMPLER */
- wm->wm4.sampler_count = (brw->wm.sampler_count + 1) / 4;
+ wm->wm4.sampler_count = (brw->sampler.count + 1) / 4;
}
- if (brw->wm.sampler_count) {
+ if (brw->sampler.count) {
/* reloc */
wm->wm4.sampler_state_pointer = (intel->batch.bo->offset +
- brw->wm.sampler_offset) >> 5;
+ brw->sampler.offset) >> 5;
} else {
wm->wm4.sampler_state_pointer = 0;
}
}
/* Emit sampler state relocation */
- if (brw->wm.sampler_count != 0) {
+ if (brw->sampler.count != 0) {
drm_intel_bo_emit_reloc(intel->batch.bo,
brw->wm.state_offset +
offsetof(struct brw_wm_unit_state, wm4),
- intel->batch.bo, (brw->wm.sampler_offset |
+ intel->batch.bo, (brw->sampler.offset |
wm->wm4.stats_enable |
(wm->wm4.sampler_count << 2)),
I915_GEM_DOMAIN_INSTRUCTION, 0);
(4 - 2));
OUT_BATCH(0); /* VS */
OUT_BATCH(0); /* GS */
- OUT_BATCH(brw->wm.sampler_offset);
+ OUT_BATCH(brw->sampler.offset);
ADVANCE_BATCH();
}
dw2 |= GEN6_WM_FLOATING_POINT_MODE_ALT;
/* CACHE_NEW_SAMPLER */
- dw2 |= (ALIGN(brw->wm.sampler_count, 4) / 4) << GEN6_WM_SAMPLER_COUNT_SHIFT;
+ dw2 |= (ALIGN(brw->sampler.count, 4) / 4) << GEN6_WM_SAMPLER_COUNT_SHIFT;
dw4 |= (brw->wm.prog_data->first_curbe_grf <<
GEN6_WM_DISPATCH_START_GRF_SHIFT_0);
dw4 |= (brw->wm.prog_data->first_curbe_grf_16 <<
struct gen7_sampler_state *samplers;
int i;
- brw->wm.sampler_count = 0;
+ brw->sampler.count = 0;
for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled)
- brw->wm.sampler_count = i + 1;
+ brw->sampler.count = i + 1;
}
- if (brw->wm.sampler_count == 0)
+ if (brw->sampler.count == 0)
return;
samplers = brw_state_batch(brw, AUB_TRACE_SAMPLER_STATE,
- brw->wm.sampler_count * sizeof(*samplers),
- 32, &brw->wm.sampler_offset);
- memset(samplers, 0, brw->wm.sampler_count * sizeof(*samplers));
+ brw->sampler.count * sizeof(*samplers),
+ 32, &brw->sampler.offset);
+ memset(samplers, 0, brw->sampler.count * sizeof(*samplers));
- for (i = 0; i < brw->wm.sampler_count; i++) {
+ for (i = 0; i < brw->sampler.count; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled)
gen7_update_sampler_state(brw, i, &samplers[i]);
}
/* CACHE_NEW_SAMPLER */
BEGIN_BATCH(2);
OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS_PS << 16 | (2 - 2));
- OUT_BATCH(brw->wm.sampler_offset);
+ OUT_BATCH(brw->sampler.offset);
ADVANCE_BATCH();
/* CACHE_NEW_WM_PROG */
dw2 = dw4 = dw5 = 0;
- dw2 |= (ALIGN(brw->wm.sampler_count, 4) / 4) << GEN7_PS_SAMPLER_COUNT_SHIFT;
+ dw2 |= (ALIGN(brw->sampler.count, 4) / 4) << GEN7_PS_SAMPLER_COUNT_SHIFT;
/* Use ALT floating point mode for ARB fragment programs, because they
* require 0^0 == 1.