#include "blorp_priv.h"
#include "common/gen_device_info.h"
#include "common/gen_sample_positions.h"
-#include "intel_aub.h"
/**
* This file provides the blorp pipeline setup and execution functionality.
static void *
blorp_alloc_dynamic_state(struct blorp_batch *batch,
- enum aub_state_struct_type type,
uint32_t size,
uint32_t alignment,
uint32_t *offset);
}
uint32_t offset;
- void *state = blorp_alloc_dynamic_state(batch, AUB_TRACE_BLEND_STATE,
+ void *state = blorp_alloc_dynamic_state(batch,
GENX(BLEND_STATE_length) * 4,
64, &offset);
GENX(BLEND_STATE_pack)(NULL, state, &blend);
#endif
uint32_t offset;
- void *state = blorp_alloc_dynamic_state(batch, AUB_TRACE_CC_STATE,
+ void *state = blorp_alloc_dynamic_state(batch,
GENX(COLOR_CALC_STATE_length) * 4,
64, &offset);
GENX(COLOR_CALC_STATE_pack)(NULL, state, &cc);
GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, dw, &ds);
#else
uint32_t offset;
- void *state = blorp_alloc_dynamic_state(batch, AUB_TRACE_DEPTH_STENCIL_STATE,
+ void *state = blorp_alloc_dynamic_state(batch,
GENX(DEPTH_STENCIL_STATE_length) * 4,
64, &offset);
GENX(DEPTH_STENCIL_STATE_pack)(NULL, state, &ds);
};
uint32_t offset;
- void *state = blorp_alloc_dynamic_state(batch, AUB_TRACE_SAMPLER_STATE,
+ void *state = blorp_alloc_dynamic_state(batch,
GENX(SAMPLER_STATE_length) * 4,
32, &offset);
GENX(SAMPLER_STATE_pack)(NULL, state, &sampler);
{
uint32_t cc_vp_offset;
- void *state = blorp_alloc_dynamic_state(batch, AUB_TRACE_CC_VP_STATE,
+ void *state = blorp_alloc_dynamic_state(batch,
GENX(CC_VIEWPORT_length) * 4, 32,
&cc_vp_offset);
static void *
blorp_alloc_dynamic_state(struct blorp_batch *batch,
- enum aub_state_struct_type type,
uint32_t size,
uint32_t alignment,
uint32_t *offset)
brw->shader_time.bo, 0, ISL_FORMAT_RAW,
brw->shader_time.bo->size, 1, true);
}
- uint32_t *bind = brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
- prog_data->binding_table.size_bytes,
- 32,
- &stage_state->bind_bo_offset);
+ uint32_t *bind =
+ brw_state_batch(brw, prog_data->binding_table.size_bytes,
+ 32, &stage_state->bind_bo_offset);
/* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
memcpy(bind, stage_state->surf_offset,
/* BRW_NEW_VIEWPORT_COUNT */
const unsigned viewport_count = brw->clip.viewport_count;
- ccv = brw_state_batch(brw, AUB_TRACE_CC_VP_STATE,
- sizeof(*ccv) * viewport_count, 32,
+ ccv = brw_state_batch(brw, sizeof(*ccv) * viewport_count, 32,
&brw->cc.vp_offset);
/* _NEW_TRANSFORM */
struct gl_context *ctx = &brw->ctx;
struct brw_cc_unit_state *cc;
- cc = brw_state_batch(brw, AUB_TRACE_CC_STATE,
- sizeof(*cc), 64, &brw->cc.state_offset);
+ cc = brw_state_batch(brw, sizeof(*cc), 64, &brw->cc.state_offset);
memset(cc, 0, sizeof(*cc));
/* _NEW_STENCIL | _NEW_BUFFERS */
struct gl_context *ctx = &brw->ctx;
struct brw_clipper_viewport *vp;
- vp = brw_state_batch(brw, AUB_TRACE_CLIP_VP_STATE,
- sizeof(*vp), 32, &brw->clip.vp_offset);
+ vp = brw_state_batch(brw, sizeof(*vp), 32, &brw->clip.vp_offset);
const float maximum_post_clamp_delta = 4096;
float gbx = maximum_post_clamp_delta / ctx->ViewportArray[0].Width;
upload_clip_vp(brw);
- clip = brw_state_batch(brw, AUB_TRACE_CLIP_STATE,
- sizeof(*clip), 32, &brw->clip.state_offset);
+ clip = brw_state_batch(brw, sizeof(*clip), 32, &brw->clip.state_offset);
memset(clip, 0, sizeof(*clip));
/* BRW_NEW_PROGRAM_CACHE | BRW_NEW_CLIP_PROG_DATA */
#include "main/mtypes.h"
#include "brw_structs.h"
#include "compiler/brw_compiler.h"
-#include "intel_aub.h"
#include "isl/isl.h"
#include "blorp/blorp.h"
gen6_upload_push_constants(struct brw_context *brw,
const struct gl_program *prog,
const struct brw_stage_prog_data *prog_data,
- struct brw_stage_state *stage_state,
- enum aub_state_struct_type type);
+ struct brw_stage_state *stage_state);
bool
gen9_use_linear_1d_layout(const struct brw_context *brw,
{
struct brw_gs_unit_state *gs;
- gs = brw_state_batch(brw, AUB_TRACE_GS_STATE,
- sizeof(*gs), 32, &brw->ff_gs.state_offset);
+ gs = brw_state_batch(brw, sizeof(*gs), 32, &brw->ff_gs.state_offset);
memset(gs, 0, sizeof(*gs));
* format. This matches the sampler->BorderColor union exactly; just
* memcpy the values.
*/
- uint32_t *sdc = brw_state_batch(brw, AUB_TRACE_SAMPLER_DEFAULT_COLOR,
- 4 * 4, 64, sdc_offset);
+ uint32_t *sdc = brw_state_batch(brw, 4 * 4, 64, sdc_offset);
memcpy(sdc, color.ui, 4 * 4);
} else if (brw->is_haswell && (is_integer_format || is_stencil_sampling)) {
/* Haswell's integer border color support is completely insane:
* has the "Integer Surface Format" bit set. Even then, the
* arrangement of the RGBA data devolves into madness.
*/
- uint32_t *sdc = brw_state_batch(brw, AUB_TRACE_SAMPLER_DEFAULT_COLOR,
- 20 * 4, 512, sdc_offset);
+ uint32_t *sdc = brw_state_batch(brw, 20 * 4, 512, sdc_offset);
memset(sdc, 0, 20 * 4);
sdc = &sdc[16];
} else if (brw->gen == 5 || brw->gen == 6) {
struct gen5_sampler_default_color *sdc;
- sdc = brw_state_batch(brw, AUB_TRACE_SAMPLER_DEFAULT_COLOR,
- sizeof(*sdc), 32, sdc_offset);
+ sdc = brw_state_batch(brw, sizeof(*sdc), 32, sdc_offset);
memset(sdc, 0, sizeof(*sdc));
sdc->f[2] = color.f[2];
sdc->f[3] = color.f[3];
} else {
- float *sdc = brw_state_batch(brw, AUB_TRACE_SAMPLER_DEFAULT_COLOR,
- 4 * 4, 32, sdc_offset);
+ float *sdc = brw_state_batch(brw, 4 * 4, 32, sdc_offset);
memcpy(sdc, color.f, 4 * 4);
}
}
const int dwords = 4;
const int size_in_bytes = dwords * sizeof(uint32_t);
- uint32_t *sampler_state = brw_state_batch(brw, AUB_TRACE_SAMPLER_STATE,
+ uint32_t *sampler_state = brw_state_batch(brw,
sampler_count * size_in_bytes,
32, &stage_state->sampler_offset);
memset(sampler_state, 0, sampler_count * size_in_bytes);
float scale[3], translate[3];
const bool render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
- sfv = brw_state_batch(brw, AUB_TRACE_SF_VP_STATE,
- sizeof(*sfv), 32, &brw->sf.vp_offset);
+ sfv = brw_state_batch(brw, sizeof(*sfv), 32, &brw->sf.vp_offset);
memset(sfv, 0, sizeof(*sfv));
/* Accessing the fields Width and Height of gl_framebuffer to produce the
int chipset_max_threads;
bool render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
- sf = brw_state_batch(brw, AUB_TRACE_SF_STATE,
- sizeof(*sf), 64, &brw->sf.state_offset);
+ sf = brw_state_batch(brw, sizeof(*sf), 64, &brw->sf.state_offset);
memset(sf, 0, sizeof(*sf));
#define BRW_BATCH_STRUCT(brw, s) \
intel_batchbuffer_data(brw, (s), sizeof(*(s)), RENDER_RING)
-void *__brw_state_batch(struct brw_context *brw,
- enum aub_state_struct_type type,
- int size,
- int alignment,
- int index,
- uint32_t *out_offset);
-#define brw_state_batch(brw, type, size, alignment, out_offset) \
- __brw_state_batch(brw, type, size, alignment, 0, out_offset)
-
+void *brw_state_batch(struct brw_context *brw,
+ int size, int alignment, uint32_t *out_offset);
uint32_t brw_state_batch_size(struct brw_context *brw, uint32_t offset);
/* brw_wm_surface_state.c */
* buffers in at the top of the batchbuffer.
*/
void *
-__brw_state_batch(struct brw_context *brw,
- enum aub_state_struct_type type,
- int size,
- int alignment,
- int index,
- uint32_t *out_offset)
-
+brw_state_batch(struct brw_context *brw,
+ int size,
+ int alignment,
+ uint32_t *out_offset)
{
struct intel_batchbuffer *batch = &brw->batch;
uint32_t offset;
struct brw_vs_unit_state *vs;
- vs = brw_state_batch(brw, AUB_TRACE_VS_STATE,
- sizeof(*vs), 32, &stage_state->state_offset);
+ vs = brw_state_batch(brw, sizeof(*vs), 32, &stage_state->state_offset);
memset(vs, 0, sizeof(*vs));
/* BRW_NEW_PROGRAM_CACHE | BRW_NEW_VS_PROG_DATA */
brw_wm_prog_data(brw->wm.base.prog_data);
struct brw_wm_unit_state *wm;
- wm = brw_state_batch(brw, AUB_TRACE_WM_STATE,
- sizeof(*wm), 32, &brw->wm.base.state_offset);
+ wm = brw_state_batch(brw, sizeof(*wm), 32, &brw->wm.base.state_offset);
memset(wm, 0, sizeof(*wm));
if (prog_data->dispatch_8 && prog_data->dispatch_16) {
clear_color = intel_miptree_get_isl_clear_color(brw, mt);
}
- void *state = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
- brw->isl_dev.ss.size,
- brw->isl_dev.ss.align,
- surf_index, surf_offset);
+ void *state = brw_state_batch(brw,
+ brw->isl_dev.ss.size,
+ brw->isl_dev.ss.align,
+ surf_offset);
isl_surf_fill_state(&brw->isl_dev, state, .surf = &surf, .view = &view,
.address = mt->bo->offset64 + offset,
unsigned pitch,
bool rw)
{
- uint32_t *dw = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
+ uint32_t *dw = brw_state_batch(brw,
brw->isl_dev.ss.size,
brw->isl_dev.ss.align,
out_offset);
drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
offset_bytes,
buffer_obj->Size - offset_bytes);
- uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
- out_offset);
+ uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
uint32_t pitch_minus_1 = 4*stride_dwords - 1;
size_t size_dwords = buffer_obj->Size / 4;
uint32_t buffer_size_minus_1, width, height, depth, surface_format;
drm_intel_bo *bo = NULL;
unsigned pitch_minus_1 = 0;
uint32_t multisampling_state = 0;
- uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
- out_offset);
+ uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
if (samples > 1) {
/* On Gen6, null render targets seem to cause GPU hangs when
}
}
- surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32, &offset);
+ surf = brw_state_batch(brw, 6 * 4, 32, &offset);
format = brw->render_target_format[rb_format];
if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
nr_draw_buffers = 1;
size = sizeof(*blend) * nr_draw_buffers;
- blend = brw_state_batch(brw, AUB_TRACE_BLEND_STATE,
- size, 64, &brw->cc.blend_state_offset);
+ blend = brw_state_batch(brw, size, 64, &brw->cc.blend_state_offset);
memset(blend, 0, size);
struct gl_context *ctx = &brw->ctx;
struct gen6_color_calc_state *cc;
- cc = brw_state_batch(brw, AUB_TRACE_CC_STATE,
- sizeof(*cc), 64, &brw->cc.state_offset);
+ cc = brw_state_batch(brw, sizeof(*cc), 64, &brw->cc.state_offset);
memset(cc, 0, sizeof(*cc));
/* _NEW_COLOR */
gen6_upload_push_constants(struct brw_context *brw,
const struct gl_program *prog,
const struct brw_stage_prog_data *prog_data,
- struct brw_stage_state *stage_state,
- enum aub_state_struct_type type)
+ struct brw_stage_state *stage_state)
{
struct gl_context *ctx = &brw->ctx;
gl_constant_value *param;
int i;
- param = brw_state_batch(brw, type,
+ param = brw_state_batch(brw,
prog_data->nr_params * sizeof(gl_constant_value),
32, &stage_state->push_const_offset);
/* _NEW_BUFFERS */
depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
- ds = brw_state_batch(brw, AUB_TRACE_DEPTH_STENCIL_STATE,
- sizeof(*ds), 64,
+ ds = brw_state_batch(brw, sizeof(*ds), 64,
&brw->cc.depth_stencil_state_offset);
memset(ds, 0, sizeof(*ds));
struct brw_stage_prog_data *prog_data = brw->gs.base.prog_data;
_mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_GEOMETRY);
- gen6_upload_push_constants(brw, &gp->program, prog_data, stage_state,
- AUB_TRACE_VS_CONSTANTS);
+ gen6_upload_push_constants(brw, &gp->program, prog_data, stage_state);
}
if (brw->gen >= 7)
/* BRW_NEW_VIEWPORT_COUNT */
const unsigned viewport_count = brw->clip.viewport_count;
- scissor = brw_state_batch(brw, AUB_TRACE_SCISSOR_STATE,
- sizeof(*scissor) * viewport_count, 32,
+ scissor = brw_state_batch(brw, sizeof(*scissor) * viewport_count, 32,
&scissor_state_offset);
/* _NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT */
* space for the binding table. Anyway, in this case we know that we only
* use BRW_MAX_SOL_BINDINGS surfaces at most.
*/
- bind = brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
- sizeof(uint32_t) * BRW_MAX_SOL_BINDINGS,
+ bind = brw_state_batch(brw, sizeof(uint32_t) * BRW_MAX_SOL_BINDINGS,
32, &brw->ff_gs.bind_bo_offset);
/* BRW_NEW_SURFACES */
/* Might want to calculate nr_surfaces first, to avoid taking up so much
* space for the binding table.
*/
- bind = brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
- sizeof(uint32_t) * BRW_MAX_SURFACES,
+ bind = brw_state_batch(brw, sizeof(uint32_t) * BRW_MAX_SURFACES,
32, &brw->gs.base.bind_bo_offset);
/* BRW_NEW_SURFACES */
const uint32_t fb_width = _mesa_geometric_width(ctx->DrawBuffer);
const uint32_t fb_height = _mesa_geometric_height(ctx->DrawBuffer);
- sfv = brw_state_batch(brw, AUB_TRACE_SF_VP_STATE,
- sizeof(*sfv) * viewport_count,
+ sfv = brw_state_batch(brw, sizeof(*sfv) * viewport_count,
32, &brw->sf.vp_offset);
memset(sfv, 0, sizeof(*sfv) * viewport_count);
- clv = brw_state_batch(brw, AUB_TRACE_CLIP_VP_STATE,
- sizeof(*clv) * viewport_count,
+ clv = brw_state_batch(brw, sizeof(*clv) * viewport_count,
32, &brw->clip.vp_offset);
if (render_to_fbo) {
const struct brw_stage_prog_data *prog_data = brw->vs.base.prog_data;
_mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_VERTEX);
- gen6_upload_push_constants(brw, &vp->program, prog_data, stage_state,
- AUB_TRACE_VS_CONSTANTS);
+ gen6_upload_push_constants(brw, &vp->program, prog_data, stage_state);
if (brw->gen >= 7) {
if (brw->gen == 7 && !brw->is_haswell && !brw->is_baytrail)
_mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_FRAGMENT);
- gen6_upload_push_constants(brw, &fp->program, prog_data,
- stage_state, AUB_TRACE_WM_CONSTANTS);
+ gen6_upload_push_constants(brw, &fp->program, prog_data, stage_state);
if (brw->gen >= 7) {
gen7_upload_constant_state(brw, &brw->wm.base, true,
return;
uint32_t offset;
- uint32_t *desc = (uint32_t*) brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
- 8 * 4, 64, &offset);
+ uint32_t *desc = (uint32_t*) brw_state_batch(brw, 8 * 4, 64, &offset);
struct brw_stage_state *stage_state = &brw->cs.base;
struct brw_stage_prog_data *prog_data = stage_state->prog_data;
struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
brw->shader_time.bo->size, 1, true);
}
- uint32_t *bind = (uint32_t*) brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
- prog_data->binding_table.size_bytes,
- 32, &stage_state->bind_bo_offset);
+ uint32_t *bind = brw_state_batch(brw, prog_data->binding_table.size_bytes,
+ 32, &stage_state->bind_bo_offset);
uint32_t dwords = brw->gen < 8 ? 8 : 9;
BEGIN_BATCH(dwords);
brw_upload_cs_push_constants(struct brw_context *brw,
const struct gl_program *prog,
const struct brw_cs_prog_data *cs_prog_data,
- struct brw_stage_state *stage_state,
- enum aub_state_struct_type type)
+ struct brw_stage_state *stage_state)
{
struct gl_context *ctx = &brw->ctx;
const struct brw_stage_prog_data *prog_data =
gl_constant_value *param = (gl_constant_value*)
- brw_state_batch(brw, type, ALIGN(cs_prog_data->push.total.size, 64),
+ brw_state_batch(brw, ALIGN(cs_prog_data->push.total.size, 64),
64, &stage_state->push_const_offset);
assert(param);
_mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_COMPUTE);
brw_upload_cs_push_constants(brw, &cp->program, cs_prog_data,
- stage_state, AUB_TRACE_WM_CONSTANTS);
+ stage_state);
}
}
/* BRW_NEW_TES_PROG_DATA */
const struct brw_stage_prog_data *prog_data = brw->tes.base.prog_data;
_mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_TESS_EVAL);
- gen6_upload_push_constants(brw, &tep->program, prog_data, stage_state,
- AUB_TRACE_VS_CONSTANTS);
+ gen6_upload_push_constants(brw, &tep->program, prog_data, stage_state);
}
gen7_upload_constant_state(brw, stage_state, tep, _3DSTATE_CONSTANT_DS);
const struct brw_stage_prog_data *prog_data = brw->tcs.base.prog_data;
_mesa_shader_write_subroutine_indices(&brw->ctx, MESA_SHADER_TESS_CTRL);
- gen6_upload_push_constants(brw, &tcp->program, prog_data, stage_state,
- AUB_TRACE_VS_CONSTANTS);
+ gen6_upload_push_constants(brw, &tcp->program, prog_data, stage_state);
}
gen7_upload_constant_state(brw, stage_state, active, _3DSTATE_CONSTANT_HS);
const uint32_t fb_width = _mesa_geometric_width(ctx->DrawBuffer);
const uint32_t fb_height = _mesa_geometric_height(ctx->DrawBuffer);
- vp = brw_state_batch(brw, AUB_TRACE_SF_VP_STATE,
+ vp = brw_state_batch(brw,
sizeof(*vp) * viewport_count, 64,
&brw->sf.vp_offset);
/* Also assign to clip.vp_offset in case something uses it. */
* depth buffer’s corresponding state for all render target surfaces,
* including null.
*/
- uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 8 * 4, 32,
- out_offset);
+ uint32_t *surf = brw_state_batch(brw, 8 * 4, 32, out_offset);
memset(surf, 0, 8 * 4);
/* From the Ivybridge PRM, Volume 4, Part 1, page 65,
nr_draw_buffers = 1;
int size = 4 + 8 * nr_draw_buffers;
- uint32_t *blend = brw_state_batch(brw, AUB_TRACE_BLEND_STATE,
- size, 64, &brw->cc.blend_state_offset);
+ uint32_t *blend =
+ brw_state_batch(brw, size, 64, &brw->cc.blend_state_offset);
memset(blend, 0, size);
/* OpenGL specification 3.3 (page 196), section 4.1.3 says:
#include "isl/isl.h"
static uint32_t *
-gen8_allocate_surface_state(struct brw_context *brw,
- uint32_t *out_offset, int index)
+gen8_allocate_surface_state(struct brw_context *brw, uint32_t *out_offset)
{
- uint32_t *surf = __brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
- 64, 64, index, out_offset);
+ uint32_t *surf = brw_state_batch(brw, 64, 64, out_offset);
memset(surf, 0, 64);
return surf;
}
unsigned samples,
uint32_t *out_offset)
{
- uint32_t *surf = gen8_allocate_surface_state(brw, out_offset, -1);
+ uint32_t *surf = gen8_allocate_surface_state(brw, out_offset);
surf[0] = BRW_SURFACE_NULL << BRW_SURFACE_TYPE_SHIFT |
ISL_FORMAT_B8G8R8A8_UNORM << BRW_SURFACE_FORMAT_SHIFT |
const uint32_t fb_width = _mesa_geometric_width(ctx->DrawBuffer);
const uint32_t fb_height = _mesa_geometric_height(ctx->DrawBuffer);
- float *vp = brw_state_batch(brw, AUB_TRACE_SF_VP_STATE,
+ float *vp = brw_state_batch(brw,
16 * 4 * viewport_count,
64, &brw->sf.vp_offset);
/* Also assign to clip.vp_offset in case something uses it. */
static void *
blorp_alloc_dynamic_state(struct blorp_batch *batch,
- enum aub_state_struct_type type,
uint32_t size,
uint32_t alignment,
uint32_t *offset)
assert(batch->blorp->driver_ctx == batch->driver_batch);
struct brw_context *brw = batch->driver_batch;
- return brw_state_batch(brw, type, size, alignment, offset);
+ return brw_state_batch(brw, size, alignment, offset);
}
static void
assert(batch->blorp->driver_ctx == batch->driver_batch);
struct brw_context *brw = batch->driver_batch;
- uint32_t *bt_map = brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
+ uint32_t *bt_map = brw_state_batch(brw,
num_entries * sizeof(uint32_t), 32,
bt_offset);
for (unsigned i = 0; i < num_entries; i++) {
- surface_maps[i] = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
+ surface_maps[i] = brw_state_batch(brw,
state_size, state_alignment,
&(surface_offsets)[i]);
bt_map[i] = surface_offsets[i];
struct brw_context *brw = batch->driver_batch;
uint32_t offset;
- void *data = brw_state_batch(brw, AUB_TRACE_VERTEX_BUFFER,
- size, 32, &offset);
+ void *data = brw_state_batch(brw, size, 32, &offset);
*addr = (struct blorp_address) {
.buffer = brw->batch.bo,