* data with different formats, which blorp does for stencil and depth
* data.
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
retry:
intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
/* Flush the sampler cache so any texturing from the destination is
* coherent.
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
brw_hiz_op_params::brw_hiz_op_params(struct intel_mipmap_tree *mt,
* must be issued before the rectangle primitive used for the depth
* buffer clear operation.
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
if (fb->MaxNumLayers > 0) {
for (unsigned layer = 0; layer < depth_irb->layer_count; layer++) {
* by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
* followed by Depth FLUSH'
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
/* Now, the HiZ buffer contains data that needs to be resolved to the depth
void brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
drm_intel_bo *bo, uint32_t offset,
uint32_t imm_lower, uint32_t imm_upper);
-void intel_batchbuffer_emit_mi_flush(struct brw_context *brw);
-void intel_emit_post_sync_nonzero_flush(struct brw_context *brw);
-void intel_emit_depth_stall_flushes(struct brw_context *brw);
+void brw_emit_mi_flush(struct brw_context *brw);
+void brw_emit_post_sync_nonzero_flush(struct brw_context *brw);
+void brw_emit_depth_stall_flushes(struct brw_context *brw);
void gen7_emit_vs_workaround_flush(struct brw_context *brw);
void gen7_emit_cs_stall_flush(struct brw_context *brw);
* the besides the draw code.
*/
if (brw->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
/* If indirect, emit a bunch of loads from the indirect BO. */
ADVANCE_BATCH();
if (brw->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
}
* write-flush must be issued before sending any DRAW commands on that
* render target.
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
/* If we had to fall back to plain clear for any buffers, clear those now
* by calling into meta.
GLuint fbo, rbo;
struct rect rect;
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
_mesa_meta_begin(ctx, MESA_META_ALL);
.mirror_x = mirror_x, .mirror_y = mirror_y };
adjust_mip_level(dst_mt, dst_irb->mt_level, dst_irb->mt_layer, &dims);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
_mesa_meta_begin(ctx, MESA_META_ALL);
brw_meta_stencil_blit(brw,
dst_mt, dst_irb->mt_level, dst_irb->mt_layer, &dims);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
void
if (dst->stencil_mt)
dst = dst->stencil_mt;
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
_mesa_meta_begin(ctx, MESA_META_ALL);
_mesa_GenFramebuffers(1, &fbo);
GL_RENDERBUFFER, rbo);
brw_meta_stencil_blit(brw, dst, 0, 0, &dims);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
_mesa_DeleteRenderbuffers(1, &rbo);
_mesa_DeleteFramebuffers(1, &fbo);
blit_bit = GL_COLOR_BUFFER_BIT;
}
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
_mesa_meta_begin(ctx, MESA_META_ALL);
_mesa_GenFramebuffers(2, fbos);
_mesa_meta_end(ctx);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
* non-pipelined state that will need the PIPE_CONTROL workaround.
*/
if (brw->gen == 6) {
- intel_emit_depth_stall_flushes(brw);
+ brw_emit_depth_stall_flushes(brw);
}
unsigned int len;
const int group = PIPELINE_STATS_COUNTERS;
const int num_counters = ctx->PerfMonitor.Groups[group].NumCounters;
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
for (int i = 0; i < num_counters; i++) {
if (BITSET_TEST(monitor->base.ActiveCounters[group], i)) {
* The amount of batch space it takes to emit an MI_REPORT_PERF_COUNT snapshot,
* including the required PIPE_CONTROL flushes.
*
- * Sandybridge is the worst case scenario: intel_batchbuffer_emit_mi_flush
+ * Sandybridge is the worst case scenario: brw_emit_mi_flush
* expands to three PIPE_CONTROLs which are 4 DWords each. We have to flush
* before and after MI_REPORT_PERF_COUNT, so multiply by two. Finally, add
* the 3 DWords for MI_REPORT_PERF_COUNT itself.
int batch_used = brw->batch.used;
/* Reports apparently don't always get written unless we flush first. */
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
if (brw->gen == 5) {
/* Ironlake requires two MI_REPORT_PERF_COUNT commands to write all
}
/* Reports apparently don't always get written unless we flush after. */
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
(void) batch_used;
assert(brw->batch.used - batch_used <= MI_REPORT_PERF_COUNT_BATCH_DWORDS * 4);
* already flushed (e.g., via a preceding MI_FLUSH).
*/
void
-intel_emit_depth_stall_flushes(struct brw_context *brw)
+brw_emit_depth_stall_flushes(struct brw_context *brw)
{
assert(brw->gen >= 6 && brw->gen <= 9);
* really our business. That leaves only stall at scoreboard.
*/
void
-intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
+brw_emit_post_sync_nonzero_flush(struct brw_context *brw)
{
brw_emit_pipe_control_flush(brw,
PIPE_CONTROL_CS_STALL |
* This is also used for the always_flush_cache driconf debug option.
*/
void
-intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
+brw_emit_mi_flush(struct brw_context *brw)
{
if (brw->batch.ring == BLT_RING && brw->gen >= 6) {
BEGIN_BATCH_BLT(4);
* Flush Enable =1, a PIPE_CONTROL with any non-zero
* post-sync-op is required.
*/
- intel_emit_post_sync_nonzero_flush(brw);
+ brw_emit_post_sync_nonzero_flush(brw);
}
}
brw_emit_pipe_control_flush(brw, flags);
return;
if (brw->gen == 6)
- intel_emit_post_sync_nonzero_flush(brw);
+ brw_emit_post_sync_nonzero_flush(brw);
brw_upload_invariant_state(brw);
/* Emit Sandybridge workaround flushes on every primitive, for safety. */
if (brw->gen == 6)
- intel_emit_post_sync_nonzero_flush(brw);
+ brw_emit_post_sync_nonzero_flush(brw);
brw_upload_programs(brw, pipeline);
merge_ctx_state(brw, &state);
/* 3DSTATE_DEPTH_BUFFER */
{
- intel_emit_depth_stall_flushes(brw);
+ brw_emit_depth_stall_flushes(brw);
BEGIN_BATCH(7);
/* 3DSTATE_DEPTH_BUFFER dw0 */
gen6_blorp_emit_depth_disable(struct brw_context *brw,
const brw_blorp_params *params)
{
- intel_emit_depth_stall_flushes(brw);
+ brw_emit_depth_stall_flushes(brw);
BEGIN_BATCH(7);
OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
uint32_t prog_offset = params->get_wm_prog(brw, &prog_data);
/* Emit workaround flushes when we switch from drawing to blorping. */
- intel_emit_post_sync_nonzero_flush(brw);
+ brw_emit_post_sync_nonzero_flush(brw);
gen6_emit_3dstate_multisample(brw, params->dst.num_samples);
gen6_emit_3dstate_sample_mask(brw,
*/
bool enable_hiz_ss = hiz || separate_stencil;
- intel_emit_depth_stall_flushes(brw);
+ brw_emit_depth_stall_flushes(brw);
irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
if (!irb)
write_primitives_generated(struct brw_context *brw,
drm_intel_bo *query_bo, int stream, int idx)
{
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
if (brw->gen >= 7 && stream > 0) {
brw_store_register_mem64(brw, query_bo,
write_xfb_primitives_written(struct brw_context *brw,
drm_intel_bo *bo, int stream, int idx)
{
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
if (brw->gen >= 7) {
brw_store_register_mem64(brw, bo, GEN7_SO_NUM_PRIMS_WRITTEN(stream), idx);
/* Emit a flush to make sure various parts of the pipeline are complete and
* we get an accurate value
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
brw_store_register_mem64(brw, bo, reg, idx);
}
* simplicity, just do a full flush.
*/
struct brw_context *brw = brw_context(ctx);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
* a workaround.
*/
if (brw->urb.gs_present && !gs_present)
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
brw->urb.gs_present = gs_present;
}
/* 3DSTATE_DEPTH_BUFFER */
{
- intel_emit_depth_stall_flushes(brw);
+ brw_emit_depth_stall_flushes(brw);
BEGIN_BATCH(7);
OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
static void
gen7_blorp_emit_depth_disable(struct brw_context *brw)
{
- intel_emit_depth_stall_flushes(brw);
+ brw_emit_depth_stall_flushes(brw);
BEGIN_BATCH(7);
OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
return;
}
- intel_emit_depth_stall_flushes(brw);
+ brw_emit_depth_stall_flushes(brw);
irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
if (!irb)
}
/* Flush any drawing so that the counters have the right values. */
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
/* Emit MI_STORE_REGISTER_MEM commands to write the values. */
for (int i = 0; i < streams; i++) {
(struct brw_transform_feedback_object *) obj;
/* Flush any drawing so that the counters have the right values. */
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
/* Save the SOL buffer offset register values. */
if (brw->gen < 8) {
return;
}
- intel_emit_depth_stall_flushes(brw);
+ brw_emit_depth_stall_flushes(brw);
/* _NEW_BUFFERS, _NEW_DEPTH, _NEW_STENCIL */
BEGIN_BATCH(8);
ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
return true;
}
intel_batchbuffer_data(brw, src_bits, dwords * 4, BLT_RING);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
return true;
}
OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
ADVANCE_BATCH_TILED(dst_y_tiled, false);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
intel_obj->range_map_bo[index] = NULL;
* flush. Once again, we wish for a domain tracker in libdrm to cover
* usage inside of a batchbuffer.
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
void
OUT_BATCH(expected_value);
ADVANCE_BATCH();
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
/* Save the register's value back to the buffer. */
BEGIN_BATCH(3);
OUT_BATCH(expected_value);
ADVANCE_BATCH();
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
/* Save the register's value back to the buffer. */
BEGIN_BATCH(3);
offset * sizeof(uint32_t));
ADVANCE_BATCH();
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
/* Set OACONTROL back to zero (everything off). */
BEGIN_BATCH(3);
if (!_mesa_set_search(brw->render_cache, bo))
return;
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
}
/**
* rendered to via a PBO at any point, so it seems better to just
* flush here unconditionally.
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
return;
}
assert(!fence->batch_bo);
assert(!fence->signalled);
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
fence->batch_bo = brw->batch.bo;
drm_intel_bo_reference(fence->batch_bo);
intel_batchbuffer_flush(brw);
* See the related comment in intelReadPixels() for a more detailed
* explanation.
*/
- intel_batchbuffer_emit_mi_flush(brw);
+ brw_emit_mi_flush(brw);
return;
}