* This copies brw_stage_state::surf_offset[] into the indirect state section
* of the batchbuffer (allocated by brw_state_batch()).
*/
-void
+static void
brw_upload_binding_table(struct brw_context *brw,
GLbitfield brw_new_binding_table,
- struct brw_stage_state *stage_state,
- unsigned binding_table_entries,
- int shader_time_surf_index)
+ struct brw_stage_state *stage_state)
{
- if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
- gen7_create_shader_time_surface(brw, &stage_state->surf_offset[shader_time_surf_index]);
- }
+ /* CACHE_NEW_*_PROG */
+ struct brw_stage_prog_data *prog_data = stage_state->prog_data;
/* If there are no surfaces, skip making the binding table altogether. */
- if (binding_table_entries == 0) {
+ if (prog_data->binding_table.size_bytes == 0) {
if (stage_state->bind_bo_offset != 0) {
brw->state.dirty.brw |= brw_new_binding_table;
stage_state->bind_bo_offset = 0;
return;
}
- size_t table_size_in_bytes = binding_table_entries * sizeof(uint32_t);
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
+ gen7_create_shader_time_surface(brw, &stage_state->surf_offset[prog_data->binding_table.shader_time_start]);
+ }
uint32_t *bind = brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
- table_size_in_bytes, 32,
+ prog_data->binding_table.size_bytes, 32,
&stage_state->bind_bo_offset);
/* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
- memcpy(bind, stage_state->surf_offset, table_size_in_bytes);
+ memcpy(bind, stage_state->surf_offset, prog_data->binding_table.size_bytes);
brw->state.dirty.brw |= brw_new_binding_table;
}
static void
brw_vs_upload_binding_table(struct brw_context *brw)
{
- struct brw_stage_state *stage_state = &brw->vs.base;
- /* CACHE_NEW_VS_PROG */
- const struct brw_vec4_prog_data *prog_data = &brw->vs.prog_data->base;
-
- /* BRW_NEW_SURFACES and BRW_NEW_VS_CONSTBUF */
- brw_upload_binding_table(brw, BRW_NEW_VS_BINDING_TABLE, stage_state,
- prog_data->binding_table_size,
- SURF_INDEX_VEC4_SHADER_TIME);
+ brw_upload_binding_table(brw, BRW_NEW_VS_BINDING_TABLE, &brw->vs.base);
}
const struct brw_tracked_state brw_vs_binding_table = {
static void
brw_upload_wm_binding_table(struct brw_context *brw)
{
- struct brw_stage_state *stage_state = &brw->wm.base;
-
- /* BRW_NEW_SURFACES and CACHE_NEW_WM_PROG */
- brw_upload_binding_table(brw, BRW_NEW_PS_BINDING_TABLE, stage_state,
- brw->wm.prog_data->binding_table_size,
- SURF_INDEX_WM_SHADER_TIME);
+ brw_upload_binding_table(brw, BRW_NEW_PS_BINDING_TABLE, &brw->wm.base);
}
const struct brw_tracked_state brw_wm_binding_table = {
static void
brw_gs_upload_binding_table(struct brw_context *brw)
{
- struct brw_stage_state *stage_state = &brw->gs.base;
-
/* If there's no GS, skip changing anything. */
if (!brw->gs.prog_data)
return;
- /* CACHE_NEW_GS_PROG */
- const struct brw_vec4_prog_data *prog_data = &brw->gs.prog_data->base;
-
- /* BRW_NEW_SURFACES and BRW_NEW_GS_CONSTBUF */
- brw_upload_binding_table(brw, BRW_NEW_GS_BINDING_TABLE, stage_state,
- prog_data->binding_table_size,
- SURF_INDEX_VEC4_SHADER_TIME);
+ brw_upload_binding_table(brw, BRW_NEW_GS_BINDING_TABLE, &brw->gs.base);
}
const struct brw_tracked_state brw_gs_binding_table = {
struct exec_list *ir;
};
+/* Note: If adding fields that need anything besides a normal memcmp() for
+ * comparing them, be sure to go fix the the stage-specific
+ * prog_data_compare().
+ */
+struct brw_stage_prog_data {
+ struct {
+ /** size of our binding table. */
+ uint32_t size_bytes;
+
+ /** @{
+ * surface indices for the various groups of surfaces
+ */
+ uint32_t pull_constants_start;
+ uint32_t texture_start;
+ uint32_t gather_texture_start;
+ uint32_t ubo_start;
+ uint32_t shader_time_start;
+ /** @} */
+ } binding_table;
+};
+
/* Data about a particular attempt to compile a program. Note that
* there can be many of these, each in a different GL state
* corresponding to a different brw_wm_prog_key struct, with different
* struct!
*/
struct brw_wm_prog_data {
+ struct brw_stage_prog_data base;
+
GLuint curb_read_length;
GLuint num_varying_inputs;
GLuint reg_blocks_16;
GLuint total_scratch;
- unsigned binding_table_size;
+ struct {
+ /** @{
+ * surface indices the WM-specific surfaces
+ */
+ uint32_t render_target_start;
+ /** @} */
+ } binding_table;
GLuint nr_params; /**< number of float params/constants */
GLuint nr_pull_params;
* this struct!
*/
struct brw_vec4_prog_data {
+ struct brw_stage_prog_data base;
struct brw_vue_map vue_map;
/**
*/
GLuint urb_entry_size;
- unsigned binding_table_size;
-
/* These pointers must appear last. See brw_vec4_prog_data_compare(). */
const float **param;
const float **pull_param;
};
/**
- * Data shared between brw_context::vs and brw_context::gs
+ * Data shared between each programmable stage in the pipeline (vs, gs, and
+ * wm).
*/
struct brw_stage_state
{
+ struct brw_stage_prog_data *prog_data;
+
/**
* Optional scratch buffer used to store spilled register values and
* variably-indexed GRF arrays.
unsigned stride_dwords, unsigned offset_dwords);
void brw_upload_ubo_surfaces(struct brw_context *brw,
struct gl_shader *shader,
- uint32_t *surf_offsets);
+ struct brw_stage_state *stage_state,
+ struct brw_stage_prog_data *prog_data);
/* brw_surface_formats.c */
bool brw_is_hiz_depth_format(struct brw_context *ctx, gl_format format);
base_ir = inst->ir;
current_annotation = inst->annotation;
- fs_reg surf_index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
+ fs_reg surf_index = fs_reg(c->prog_data.base.binding_table.pull_constants_start);
fs_reg temp = fs_reg(this, glsl_type::float_type);
exec_list list = VARYING_PULL_CONSTANT_LOAD(temp,
surf_index,
assert(!inst->src[i].reladdr);
fs_reg dst = fs_reg(this, glsl_type::float_type);
- fs_reg index = fs_reg((unsigned)SURF_INDEX_FRAG_CONST_BUFFER);
+ fs_reg index = fs_reg(c->prog_data.base.binding_table.pull_constants_start);
fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
fs_inst *pull =
new(mem_ctx) fs_inst(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
}
}
+void
+fs_visitor::assign_binding_table_offsets()
+{
+ c->prog_data.binding_table.render_target_start = SURF_INDEX_DRAW(0);
+ c->prog_data.base.binding_table.texture_start = SURF_INDEX_TEXTURE(0);
+ c->prog_data.base.binding_table.ubo_start = SURF_INDEX_WM_UBO(0);
+ c->prog_data.base.binding_table.shader_time_start = SURF_INDEX_WM_SHADER_TIME;
+ c->prog_data.base.binding_table.gather_texture_start = SURF_INDEX_GATHER_TEXTURE(0);
+ c->prog_data.base.binding_table.pull_constants_start = SURF_INDEX_FRAG_CONST_BUFFER;
+
+ /* c->prog_data.base.binding_table.size will be set by mark_surface_used. */
+}
+
bool
fs_visitor::run()
{
sanity_param_count = fp->Base.Parameters->NumParameters;
uint32_t orig_nr_params = c->prog_data.nr_params;
+ assign_binding_table_offsets();
+
if (brw->gen >= 6)
setup_payload_gen6();
else
uint32_t const_offset);
bool run();
+ void assign_binding_table_offsets();
void setup_payload_gen4();
void setup_payload_gen6();
void assign_curb_setup();
{
assert(surf_index < BRW_MAX_WM_SURFACES);
- c->prog_data.binding_table_size =
- MAX2(c->prog_data.binding_table_size, surf_index + 1);
+ c->prog_data.base.binding_table.size_bytes =
+ MAX2(c->prog_data.base.binding_table.size_bytes, (surf_index + 1) * 4);
}
void
brw_pop_insn_state(p);
+ uint32_t surf_index =
+ c->prog_data.binding_table.render_target_start + inst->target;
brw_fb_WRITE(p,
dispatch_width,
inst->base_mrf,
implied_header,
msg_control,
- SURF_INDEX_DRAW(inst->target),
+ surf_index,
inst->mlen,
0,
eot,
inst->header_present);
- mark_surface_used(SURF_INDEX_DRAW(inst->target));
+ mark_surface_used(surf_index);
}
/* Computes the integer pixel x,y values from the origin.
src = retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW);
}
- uint32_t surface_index = inst->opcode == SHADER_OPCODE_TG4
- ? SURF_INDEX_GATHER_TEXTURE(inst->sampler)
- : SURF_INDEX_TEXTURE(inst->sampler);
+ uint32_t surface_index = (inst->opcode == SHADER_OPCODE_TG4
+ ? c->prog_data.base.binding_table.gather_texture_start
+ : c->prog_data.base.binding_table.texture_start) + inst->sampler;
brw_SAMPLE(p,
retype(dst, BRW_REGISTER_TYPE_UW),
*/
brw_MOV(p, payload_offset, offset);
brw_MOV(p, payload_value, value);
- brw_shader_time_add(p, payload, SURF_INDEX_WM_SHADER_TIME);
+ brw_shader_time_add(p, payload,
+ c->prog_data.base.binding_table.shader_time_start);
brw_pop_insn_state(p);
- mark_surface_used(SURF_INDEX_WM_SHADER_TIME);
+ mark_surface_used(c->prog_data.base.binding_table.shader_time_start);
}
void
*/
ir_constant *uniform_block = ir->operands[0]->as_constant();
ir_constant *const_offset = ir->operands[1]->as_constant();
- fs_reg surf_index = fs_reg((unsigned)SURF_INDEX_WM_UBO(uniform_block->value.u[0]));
+ fs_reg surf_index = fs_reg(c->prog_data.base.binding_table.ubo_start +
+ uniform_block->value.u[0]);
if (const_offset) {
fs_reg packed_consts = fs_reg(this, glsl_type::float_type);
packed_consts.type = result.type;
brw_upload_gs_ubo_surfaces(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
- struct brw_stage_state *stage_state = &brw->gs.base;
/* _NEW_PROGRAM */
struct gl_shader_program *prog = ctx->Shader.CurrentGeometryProgram;
if (!prog)
return;
+ /* CACHE_NEW_GS_PROG */
brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_GEOMETRY],
- &stage_state->surf_offset[SURF_INDEX_VEC4_UBO(0)]);
+ &brw->gs.base, &brw->gs.prog_data->base.base);
}
const struct brw_tracked_state brw_gs_ubo_surfaces = {
.dirty = {
.mesa = _NEW_PROGRAM,
.brw = BRW_NEW_BATCH | BRW_NEW_UNIFORM_BUFFER,
- .cache = 0,
+ .cache = CACHE_NEW_GS_PROG,
},
.emit = brw_upload_gs_ubo_surfaces,
};
const struct gl_program *prog,
struct brw_stage_state *stage_state,
const struct brw_vec4_prog_data *prog_data);
-void
-brw_upload_binding_table(struct brw_context *brw,
- GLbitfield brw_new_binding_table,
- struct brw_stage_state *stage_state,
- unsigned binding_table_entries,
- int shader_time_surf_index);
/* gen7_vs_state.c */
void
emit(SHADER_OPCODE_SHADER_TIME_ADD, dst_reg(), src_reg(dst));
}
+void
+vec4_visitor::assign_binding_table_offsets()
+{
+ prog_data->base.binding_table.texture_start = SURF_INDEX_VEC4_TEXTURE(0);
+ prog_data->base.binding_table.ubo_start = SURF_INDEX_VEC4_UBO(0);
+ prog_data->base.binding_table.shader_time_start = SURF_INDEX_VEC4_SHADER_TIME;
+ prog_data->base.binding_table.gather_texture_start = SURF_INDEX_VEC4_GATHER_TEXTURE(0);
+ prog_data->base.binding_table.pull_constants_start = SURF_INDEX_VEC4_CONST_BUFFER;
+
+ /* prog_data->base.binding_table.size will be set by mark_surface_used. */
+}
+
bool
vec4_visitor::run()
{
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
emit_shader_time_begin();
+ assign_binding_table_offsets();
+
emit_prolog();
/* Generate VS IR for main(). (the visitor only descends into
brw_vec4_prog_data_compare(const struct brw_vec4_prog_data *a,
const struct brw_vec4_prog_data *b)
{
- /* Compare all the struct up to the pointers. */
+ /* Compare all the struct (including the base) up to the pointers. */
if (memcmp(a, b, offsetof(struct brw_vec4_prog_data, param)))
return false;
bool run(void);
void fail(const char *msg, ...);
+ void assign_binding_table_offsets();
int virtual_grf_alloc(int size);
void setup_uniform_clipplane_values();
void setup_uniform_values(ir_variable *ir);
{
assert(surf_index < BRW_MAX_VEC4_SURFACES);
- prog_data->binding_table_size = MAX2(prog_data->binding_table_size,
- surf_index + 1);
+ prog_data->base.binding_table.size_bytes =
+ MAX2(prog_data->base.binding_table.size_bytes, (surf_index + 1) * 4);
}
void
break;
}
- uint32_t surface_index = inst->opcode == SHADER_OPCODE_TG4
- ? SURF_INDEX_VEC4_GATHER_TEXTURE(inst->sampler)
- : SURF_INDEX_VEC4_TEXTURE(inst->sampler);
+ uint32_t surface_index = (inst->opcode == SHADER_OPCODE_TG4
+ ? prog_data->base.binding_table.gather_texture_start
+ : prog_data->base.binding_table.texture_start) + inst->sampler;
brw_SAMPLE(p,
dst,
break;
case SHADER_OPCODE_SHADER_TIME_ADD:
- brw_shader_time_add(p, src[0], SURF_INDEX_VEC4_SHADER_TIME);
- mark_surface_used(SURF_INDEX_VEC4_SHADER_TIME);
+ brw_shader_time_add(p, src[0],
+ prog_data->base.binding_table.shader_time_start);
+ mark_surface_used(prog_data->base.binding_table.shader_time_start);
break;
case VS_OPCODE_UNPACK_FLAGS_SIMD4X2:
gp, &key);
assert(success);
}
+ brw->gs.base.prog_data = &brw->gs.prog_data->base.base;
+
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
src_reg packed_consts = src_reg(this, glsl_type::vec4_type);
packed_consts.type = result.type;
src_reg surf_index =
- src_reg(SURF_INDEX_VEC4_UBO(uniform_block->value.u[0]));
+ src_reg(prog_data->base.binding_table.ubo_start + uniform_block->value.u[0]);
if (const_offset_ir) {
offset = src_reg(const_offset / 16);
} else {
int base_offset)
{
int reg_offset = base_offset + orig_src.reg_offset;
- src_reg index = src_reg((unsigned)SURF_INDEX_VEC4_CONST_BUFFER);
+ src_reg index = src_reg(prog_data->base.binding_table.pull_constants_start);
src_reg offset = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset);
vec4_instruction *load;
#endif
result = src_reg(this, glsl_type::vec4_type);
- src_reg surf_index = src_reg(unsigned(SURF_INDEX_VEC4_CONST_BUFFER));
+ src_reg surf_index = src_reg(unsigned(prog_data->base.binding_table.pull_constants_start));
vec4_instruction *load =
new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD,
dst_reg(result), surf_index, reladdr);
(void) success;
assert(success);
}
+ brw->vs.base.prog_data = &brw->vs.prog_data->base.base;
+
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
const struct brw_vec4_prog_data *prog_data)
{
int i;
+ uint32_t surf_index = prog_data->base.binding_table.pull_constants_start;
/* Updates the ParamaterValues[i] pointers for all parameters of the
* basic type of PROGRAM_STATE_VAR.
if (stage_state->const_bo) {
drm_intel_bo_unreference(stage_state->const_bo);
stage_state->const_bo = NULL;
- stage_state->surf_offset[SURF_INDEX_VEC4_CONST_BUFFER] = 0;
+ stage_state->surf_offset[surf_index] = 0;
brw->state.dirty.brw |= brw_new_constbuf;
}
return;
drm_intel_gem_bo_unmap_gtt(stage_state->const_bo);
- const int surf = SURF_INDEX_VEC4_CONST_BUFFER;
brw->vtbl.create_constant_surface(brw, stage_state->const_bo, 0, size,
- &stage_state->surf_offset[surf], false);
+ &stage_state->surf_offset[surf_index],
+ false);
brw->state.dirty.brw |= brw_new_constbuf;
}
static void
brw_upload_vs_ubo_surfaces(struct brw_context *brw)
{
- struct brw_stage_state *stage_state = &brw->vs.base;
-
struct gl_context *ctx = &brw->ctx;
/* _NEW_PROGRAM */
struct gl_shader_program *prog = ctx->Shader.CurrentVertexProgram;
if (!prog)
return;
+ /* CACHE_NEW_VS_PROG */
brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_VERTEX],
- &stage_state->surf_offset[SURF_INDEX_VEC4_UBO(0)]);
+ &brw->vs.base, &brw->vs.prog_data->base.base);
}
const struct brw_tracked_state brw_vs_ubo_surfaces = {
.dirty = {
.mesa = _NEW_PROGRAM,
.brw = BRW_NEW_BATCH | BRW_NEW_UNIFORM_BUFFER,
- .cache = 0,
+ .cache = CACHE_NEW_VS_PROG,
},
.emit = brw_upload_vs_ubo_surfaces,
};
const struct brw_wm_prog_data *a = in_a;
const struct brw_wm_prog_data *b = in_b;
- /* Compare all the struct up to the pointers. */
+ /* Compare all the struct (including the base) up to the pointers. */
if (memcmp(a, b, offsetof(struct brw_wm_prog_data, param)))
return false;
(void) success;
assert(success);
}
+ brw->wm.base.prog_data = &brw->wm.prog_data->base;
}
(struct brw_fragment_program *) brw->fragment_program;
struct gl_program_parameter_list *params = fp->program.Base.Parameters;
const int size = brw->wm.prog_data->nr_pull_params * sizeof(float);
- const int surf_index = SURF_INDEX_FRAG_CONST_BUFFER;
+ const int surf_index =
+ brw->wm.prog_data->base.binding_table.pull_constants_start;
float *constants;
unsigned int i;
drm_intel_bo *bo = NULL;
unsigned pitch_minus_1 = 0;
uint32_t multisampling_state = 0;
+ uint32_t surf_index =
+ brw->wm.prog_data->binding_table.render_target_start + unit;
/* _NEW_BUFFERS */
const struct gl_framebuffer *fb = ctx->DrawBuffer;
surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
- &brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)]);
+ &brw->wm.base.surf_offset[surf_index]);
if (fb->Visual.samples > 1) {
/* On Gen6, null render targets seem to cause GPU hangs when
if (bo) {
drm_intel_bo_emit_reloc(brw->batch.bo,
- brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)] + 4,
+ brw->wm.base.surf_offset[surf_index] + 4,
bo, 0,
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
}
uint32_t format = 0;
/* _NEW_BUFFERS */
gl_format rb_format = _mesa_get_render_format(ctx, intel_rb_format(irb));
+ uint32_t surf_index =
+ brw->wm.prog_data->binding_table.render_target_start + unit;
assert(!layered);
region = irb->mt->region;
surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
- &brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)]);
+ &brw->wm.base.surf_offset[surf_index]);
format = brw->render_target_format[rb_format];
if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
}
drm_intel_bo_emit_reloc(brw->batch.bo,
- brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)] + 4,
+ brw->wm.base.surf_offset[surf_index] + 4,
region->bo,
surf[1] - region->bo->offset,
I915_GEM_DOMAIN_RENDER,
static void
update_stage_texture_surfaces(struct brw_context *brw,
const struct gl_program *prog,
- uint32_t *surf_offset,
+ struct brw_stage_state *stage_state,
bool for_gather)
{
if (!prog)
struct gl_context *ctx = &brw->ctx;
- unsigned num_samplers = _mesa_fls(prog->SamplersUsed);
+ uint32_t *surf_offset = stage_state->surf_offset;
+ if (for_gather)
+ surf_offset += stage_state->prog_data->binding_table.gather_texture_start;
+ else
+ surf_offset += stage_state->prog_data->binding_table.texture_start;
+ unsigned num_samplers = _mesa_fls(prog->SamplersUsed);
for (unsigned s = 0; s < num_samplers; s++) {
surf_offset[s] = 0;
struct gl_program *fs = (struct gl_program *) brw->fragment_program;
/* _NEW_TEXTURE */
- update_stage_texture_surfaces(brw, vs,
- brw->vs.base.surf_offset +
- SURF_INDEX_VEC4_TEXTURE(0),
- false);
- update_stage_texture_surfaces(brw, gs,
- brw->gs.base.surf_offset +
- SURF_INDEX_VEC4_TEXTURE(0),
- false);
- update_stage_texture_surfaces(brw, fs,
- brw->wm.base.surf_offset +
- SURF_INDEX_TEXTURE(0),
- false);
+ update_stage_texture_surfaces(brw, vs, &brw->vs.base, false);
+ update_stage_texture_surfaces(brw, gs, &brw->gs.base, false);
+ update_stage_texture_surfaces(brw, fs, &brw->wm.base, false);
/* emit alternate set of surface state for gather. this
* allows the surface format to be overriden for only the
* gather4 messages. */
if (vs && vs->UsesGather)
- update_stage_texture_surfaces(brw, vs,
- brw->vs.base.surf_offset +
- SURF_INDEX_VEC4_GATHER_TEXTURE(0),
- true);
+ update_stage_texture_surfaces(brw, vs, &brw->vs.base, true);
if (gs && gs->UsesGather)
- update_stage_texture_surfaces(brw, gs,
- brw->gs.base.surf_offset +
- SURF_INDEX_VEC4_GATHER_TEXTURE(0),
- true);
+ update_stage_texture_surfaces(brw, gs, &brw->gs.base, true);
if (fs && fs->UsesGather)
- update_stage_texture_surfaces(brw, fs,
- brw->wm.base.surf_offset +
- SURF_INDEX_GATHER_TEXTURE(0),
- true);
+ update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
void
brw_upload_ubo_surfaces(struct brw_context *brw,
struct gl_shader *shader,
- uint32_t *surf_offsets)
+ struct brw_stage_state *stage_state,
+ struct brw_stage_prog_data *prog_data)
{
struct gl_context *ctx = &brw->ctx;
if (!shader)
return;
+ uint32_t *surf_offsets =
+ &stage_state->surf_offset[prog_data->binding_table.ubo_start];
+
for (int i = 0; i < shader->NumUniformBlocks; i++) {
struct gl_uniform_buffer_binding *binding;
struct intel_buffer_object *intel_bo;
if (!prog)
return;
+ /* CACHE_NEW_WM_PROG */
brw_upload_ubo_surfaces(brw, prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
- &brw->wm.base.surf_offset[SURF_INDEX_WM_UBO(0)]);
+ &brw->wm.base, &brw->wm.prog_data->base);
}
const struct brw_tracked_state brw_wm_ubo_surfaces = {
.dirty = {
.mesa = _NEW_PROGRAM,
.brw = BRW_NEW_BATCH | BRW_NEW_UNIFORM_BUFFER,
- .cache = 0,
+ .cache = CACHE_NEW_WM_PROG,
},
.emit = brw_upload_wm_ubo_surfaces,
};
/* _NEW_BUFFERS */
const struct gl_framebuffer *fb = ctx->DrawBuffer;
+ uint32_t surf_index =
+ brw->wm.prog_data->binding_table.render_target_start + unit;
uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
- &brw->wm.base.surf_offset[SURF_INDEX_DRAW(unit)]);
+ &brw->wm.base.surf_offset[surf_index]);
memset(surf, 0, 8 * 4);
/* From the Ivybridge PRM, Volume 4, Part 1, page 65,
GLenum gl_target = rb->TexImage ?
rb->TexImage->TexObject->Target : GL_TEXTURE_2D;
- uint32_t surf_index = SURF_INDEX_DRAW(unit);
+ uint32_t surf_index =
+ brw->wm.prog_data->binding_table.render_target_start + unit;
uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
&brw->wm.base.surf_offset[surf_index]);