BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
- OUT_RELOC(brw->batch.state_bo, 0, brw->vs.base.state_offset);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
if (brw->ff_gs.prog_active)
- OUT_RELOC(brw->batch.state_bo, 0, brw->ff_gs.state_offset | 1);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
else
OUT_BATCH(0);
- OUT_RELOC(brw->batch.state_bo, 0, brw->clip.state_offset | 1);
- OUT_RELOC(brw->batch.state_bo, 0, brw->sf.state_offset);
- OUT_RELOC(brw->batch.state_bo, 0, brw->wm.base.state_offset);
- OUT_RELOC(brw->batch.state_bo, 0, brw->cc.state_offset);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
+ OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
ADVANCE_BATCH();
brw->ctx.NewDriverState |= BRW_NEW_PSP;
OUT_BATCH(0);
OUT_BATCH(mocs_wb << 16);
/* Surface state base address: */
- OUT_RELOC64(brw->batch.state_bo, 0, mocs_wb << 4 | 1);
+ OUT_RELOC64(brw->batch.state.bo, 0, mocs_wb << 4 | 1);
/* Dynamic state base address: */
- OUT_RELOC64(brw->batch.state_bo, 0, mocs_wb << 4 | 1);
+ OUT_RELOC64(brw->batch.state.bo, 0, mocs_wb << 4 | 1);
/* Indirect object base address: MEDIA_OBJECT data */
OUT_BATCH(mocs_wb << 4 | 1);
OUT_BATCH(0);
* BINDING_TABLE_STATE
* SURFACE_STATE
*/
- OUT_RELOC(brw->batch.state_bo, 0, 1);
+ OUT_RELOC(brw->batch.state.bo, 0, 1);
/* Dynamic state base address:
* SAMPLER_STATE
* SAMPLER_BORDER_COLOR_STATE
* Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
* Disable is clear, which we rely on)
*/
- OUT_RELOC(brw->batch.state_bo, 0, 1);
+ OUT_RELOC(brw->batch.state.bo, 0, 1);
OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
BEGIN_BATCH(8);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
OUT_BATCH(1); /* General state base address */
- OUT_RELOC(brw->batch.state_bo, 0, 1); /* Surface state base address */
+ OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
OUT_BATCH(1); /* Indirect object base address */
OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
OUT_BATCH(0xfffff001); /* General state upper bound */
BEGIN_BATCH(6);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
OUT_BATCH(1); /* General state base address */
- OUT_RELOC(brw->batch.state_bo, 0, 1); /* Surface state base address */
+ OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
OUT_BATCH(1); /* Indirect object base address */
OUT_BATCH(1); /* General state upper bound */
OUT_BATCH(1); /* Indirect object upper bound */
return address.offset + delta;
} else {
if (GEN_GEN < 6 && brw_ptr_in_state_buffer(batch, location)) {
- offset = (char *) location - (char *) brw->batch.state_map;
+ offset = (char *) location - (char *) brw->batch.state.map;
return brw_state_reloc(batch, offset, address.bo,
address.offset + delta,
address.reloc_flags);
assert(!brw_ptr_in_state_buffer(batch, location));
- offset = (char *) location - (char *) brw->batch.map;
+ offset = (char *) location - (char *) brw->batch.batch.map;
return brw_batch_reloc(batch, offset, address.bo,
address.offset + delta,
address.reloc_flags);
clip.GuardbandClipTestEnable = true;
clip.ClipperViewportStatePointer =
- ro_bo(brw->batch.state_bo, brw->clip.vp_offset);
+ ro_bo(brw->batch.state.bo, brw->clip.vp_offset);
clip.ScreenSpaceViewportXMin = -1;
clip.ScreenSpaceViewportXMax = 1;
* domain.
*/
sf.SetupViewportStateOffset =
- ro_bo(brw->batch.state_bo, brw->sf.vp_offset);
+ ro_bo(brw->batch.state.bo, brw->sf.vp_offset);
sf.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
if (stage_state->sampler_count)
wm.SamplerStatePointer =
- ro_bo(brw->batch.state_bo, stage_state->sampler_offset);
+ ro_bo(brw->batch.state.bo, stage_state->sampler_offset);
#if GEN_GEN == 5
if (wm_prog_data->prog_offset_2)
wm.GRFRegisterCount2 = wm_prog_data->reg_blocks_2;
vs.StatisticsEnable = false;
vs.SamplerStatePointer =
- ro_bo(brw->batch.state_bo, stage_state->sampler_offset);
+ ro_bo(brw->batch.state.bo, stage_state->sampler_offset);
#endif
#if GEN_GEN == 5
cc.StatisticsEnable = brw->stats_wm;
cc.CCViewportStatePointer =
- ro_bo(brw->batch.state_bo, brw->cc.vp_offset);
+ ro_bo(brw->batch.state.bo, brw->cc.vp_offset);
#else
/* _NEW_COLOR */
cc.BlendConstantColorRed = ctx->Color.BlendColorUnclamped[0];
}
#if GEN_GEN < 6
samp_st.BorderColorPointer =
- ro_bo(brw->batch.state_bo, border_color_offset);
+ ro_bo(brw->batch.state.bo, border_color_offset);
#else
samp_st.BorderColorPointer = border_color_offset;
#endif
const struct gen_device_info *devinfo = &screen->devinfo;
if (!devinfo->has_llc) {
- batch->batch_cpu_map = malloc(BATCH_SZ);
- batch->map = batch->batch_cpu_map;
- batch->map_next = batch->map;
- batch->state_cpu_map = malloc(STATE_SZ);
- batch->state_map = batch->state_cpu_map;
+ batch->batch.cpu_map = malloc(BATCH_SZ);
+ batch->batch.map = batch->batch.cpu_map;
+ batch->map_next = batch->batch.map;
+ batch->state.cpu_map = malloc(STATE_SZ);
+ batch->state.map = batch->state.cpu_map;
}
init_reloc_list(&batch->batch_relocs, 250);
brw_bo_unreference(batch->last_bo);
batch->last_bo = NULL;
}
- batch->last_bo = batch->bo;
+ batch->last_bo = batch->batch.bo;
- batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
- if (!batch->batch_cpu_map) {
- batch->map = brw_bo_map(brw, batch->bo, MAP_READ | MAP_WRITE);
+ batch->batch.bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
+ if (!batch->batch.cpu_map) {
+ batch->batch.map =
+ brw_bo_map(brw, batch->batch.bo, MAP_READ | MAP_WRITE);
}
- batch->map_next = batch->map;
+ batch->map_next = batch->batch.map;
- batch->state_bo = brw_bo_alloc(bufmgr, "statebuffer", STATE_SZ, 4096);
- batch->state_bo->kflags =
+ batch->state.bo = brw_bo_alloc(bufmgr, "statebuffer", STATE_SZ, 4096);
+ batch->state.bo->kflags =
can_do_exec_capture(screen) ? EXEC_OBJECT_CAPTURE : 0;
- if (!batch->state_cpu_map) {
- batch->state_map =
- brw_bo_map(brw, batch->state_bo, MAP_READ | MAP_WRITE);
+ if (!batch->state.cpu_map) {
+ batch->state.map =
+ brw_bo_map(brw, batch->state.bo, MAP_READ | MAP_WRITE);
}
/* Avoid making 0 a valid state offset - otherwise the decoder will try
*/
batch->state_used = 1;
- add_exec_bo(batch, batch->bo);
- assert(batch->bo->index == 0);
+ add_exec_bo(batch, batch->batch.bo);
+ assert(batch->batch.bo->index == 0);
batch->needs_sol_reset = false;
batch->state_base_address_emitted = false;
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
- free(batch->batch_cpu_map);
- free(batch->state_cpu_map);
+ free(batch->batch.cpu_map);
+ free(batch->state.cpu_map);
for (int i = 0; i < batch->exec_count; i++) {
brw_bo_unreference(batch->exec_bos[i]);
free(batch->validation_list);
brw_bo_unreference(batch->last_bo);
- brw_bo_unreference(batch->bo);
- brw_bo_unreference(batch->state_bo);
+ brw_bo_unreference(batch->batch.bo);
+ brw_bo_unreference(batch->state.bo);
if (batch->state_batch_sizes)
_mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
}
const unsigned batch_used = USED_BATCH(*batch) * 4;
if (batch_used + sz >= BATCH_SZ && !batch->no_wrap) {
intel_batchbuffer_flush(brw);
- } else if (batch_used + sz >= batch->bo->size) {
+ } else if (batch_used + sz >= batch->batch.bo->size) {
const unsigned new_size =
- MIN2(batch->bo->size + batch->bo->size / 2, MAX_BATCH_SIZE);
- grow_buffer(brw, &batch->bo, &batch->map, &batch->batch_cpu_map,
- batch_used, new_size);
- batch->map_next = (void *) batch->map + batch_used;
- assert(batch_used + sz < batch->bo->size);
+ MIN2(batch->batch.bo->size + batch->batch.bo->size / 2,
+ MAX_BATCH_SIZE);
+ grow_buffer(brw, &batch->batch.bo, &batch->batch.map,
+ &batch->batch.cpu_map, batch_used, new_size);
+ batch->map_next = (void *) batch->batch.map + batch_used;
+ assert(batch_used + sz < batch->batch.bo->size);
}
/* The intel_batchbuffer_flush() calls above might have changed
if (batch->ring != RENDER_RING)
return;
- uint32_t *batch_data = brw_bo_map(brw, batch->bo, MAP_READ);
- uint32_t *state = brw_bo_map(brw, batch->state_bo, MAP_READ);
+ uint32_t *batch_data = brw_bo_map(brw, batch->batch.bo, MAP_READ);
+ uint32_t *state = brw_bo_map(brw, batch->state.bo, MAP_READ);
if (batch_data == NULL || state == NULL) {
fprintf(stderr, "WARNING: failed to map batchbuffer/statebuffer\n");
return;
}
uint32_t *end = batch_data + USED_BATCH(*batch);
- uint32_t batch_gtt_offset = batch->bo->gtt_offset;
- uint32_t state_gtt_offset = batch->state_bo->gtt_offset;
+ uint32_t batch_gtt_offset = batch->batch.bo->gtt_offset;
+ uint32_t state_gtt_offset = batch->state.bo->gtt_offset;
int length;
bool color = INTEL_DEBUG & DEBUG_COLOR;
}
}
- brw_bo_unmap(batch->bo);
- brw_bo_unmap(batch->state_bo);
+ brw_bo_unmap(batch->batch.bo);
+ brw_bo_unmap(batch->state.bo);
}
#else
static void do_batch_dump(struct brw_context *brw) { }
brw->batch.exec_count = 0;
brw->batch.aperture_space = 0;
- brw_bo_unreference(brw->batch.state_bo);
+ brw_bo_unreference(brw->batch.state.bo);
/* Create a new batchbuffer and reset the associated state: */
intel_batchbuffer_reset_and_clear_render_cache(brw);
struct intel_batchbuffer *batch = &brw->batch;
int ret = 0;
- if (batch->batch_cpu_map) {
- void *bo_map = brw_bo_map(brw, batch->bo, MAP_WRITE);
- memcpy(bo_map, batch->batch_cpu_map, 4 * USED_BATCH(*batch));
+ if (batch->batch.cpu_map) {
+ void *bo_map = brw_bo_map(brw, batch->batch.bo, MAP_WRITE);
+ memcpy(bo_map, batch->batch.cpu_map, 4 * USED_BATCH(*batch));
}
- if (batch->state_cpu_map) {
- void *bo_map = brw_bo_map(brw, batch->state_bo, MAP_WRITE);
- memcpy(bo_map, batch->state_cpu_map, batch->state_used);
+ if (batch->state.cpu_map) {
+ void *bo_map = brw_bo_map(brw, batch->state.bo, MAP_WRITE);
+ memcpy(bo_map, batch->state.cpu_map, batch->state_used);
}
- brw_bo_unmap(batch->bo);
- brw_bo_unmap(batch->state_bo);
+ brw_bo_unmap(batch->batch.bo);
+ brw_bo_unmap(batch->state.bo);
if (!brw->screen->no_hw) {
/* The requirement for using I915_EXEC_NO_RELOC are:
uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0;
/* Set statebuffer relocations */
- const unsigned state_index = batch->state_bo->index;
+ const unsigned state_index = batch->state.bo->index;
if (state_index < batch->exec_count &&
- batch->exec_bos[state_index] == batch->state_bo) {
+ batch->exec_bos[state_index] == batch->state.bo) {
struct drm_i915_gem_exec_object2 *entry =
&batch->validation_list[state_index];
- assert(entry->handle == batch->state_bo->gem_handle);
+ assert(entry->handle == batch->state.bo->gem_handle);
entry->relocation_count = batch->state_relocs.reloc_count;
entry->relocs_ptr = (uintptr_t) batch->state_relocs.relocs;
}
/* Set batchbuffer relocations */
struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[0];
- assert(entry->handle == batch->bo->gem_handle);
+ assert(entry->handle == batch->batch.bo->gem_handle);
entry->relocation_count = batch->batch_relocs.reloc_count;
entry->relocs_ptr = (uintptr_t) batch->batch_relocs.relocs;
intel_upload_finish(brw);
if (brw->throttle_batch[0] == NULL) {
- brw->throttle_batch[0] = brw->batch.bo;
+ brw->throttle_batch[0] = brw->batch.batch.bo;
brw_bo_reference(brw->throttle_batch[0]);
}
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- brw_bo_wait_rendering(brw->batch.bo);
+ brw_bo_wait_rendering(brw->batch.batch.bo);
}
/* Start a new batch buffer. */
struct brw_bo *target, uint32_t target_offset,
unsigned int reloc_flags)
{
- assert(batch_offset <= batch->bo->size - sizeof(uint32_t));
+ assert(batch_offset <= batch->batch.bo->size - sizeof(uint32_t));
return emit_reloc(batch, &batch->batch_relocs, batch_offset,
target, target_offset, reloc_flags);
struct brw_bo *target, uint32_t target_offset,
unsigned int reloc_flags)
{
- assert(state_offset <= batch->state_bo->size - sizeof(uint32_t));
+ assert(state_offset <= batch->state.bo->size - sizeof(uint32_t));
return emit_reloc(batch, &batch->state_relocs, state_offset,
target, target_offset, reloc_flags);
{
struct intel_batchbuffer *batch = &brw->batch;
- assert(size < batch->bo->size);
+ assert(size < batch->state.bo->size);
uint32_t offset = ALIGN(batch->state_used, alignment);
if (offset + size >= STATE_SZ && !batch->no_wrap) {
intel_batchbuffer_flush(brw);
offset = ALIGN(batch->state_used, alignment);
- } else if (offset + size >= batch->state_bo->size) {
+ } else if (offset + size >= batch->state.bo->size) {
const unsigned new_size =
- MIN2(batch->state_bo->size + batch->state_bo->size / 2,
+ MIN2(batch->state.bo->size + batch->state.bo->size / 2,
MAX_STATE_SIZE);
- grow_buffer(brw, &batch->state_bo, &batch->state_map,
- &batch->state_cpu_map, batch->state_used, new_size);
- assert(offset + size < batch->state_bo->size);
+ grow_buffer(brw, &batch->state.bo, &batch->state.map,
+ &batch->state.cpu_map, batch->state_used, new_size);
+ assert(offset + size < batch->state.bo->size);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
batch->state_used = offset + size;
*out_offset = offset;
- return batch->state_map + (offset >> 2);
+ return batch->state.map + (offset >> 2);
}
void