#define emit( intel, state, size ) \
- intel_batchbuffer_data(intel, state, size, false)
+ intel_batchbuffer_data(intel, state, size)
static GLuint
get_dirty(struct i830_hw_state *state)
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel,
- get_state_size(state) + INTEL_PRIM_EMIT_SIZE,
- false);
+ get_state_size(state) +
+ INTEL_PRIM_EMIT_SIZE);
count = 0;
again:
aper_count = 0;
#define emit(intel, state, size ) \
- intel_batchbuffer_data(intel, state, size, false)
+ intel_batchbuffer_data(intel, state, size)
static GLuint
get_dirty(struct i915_hw_state *state)
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel,
- get_state_size(state) + INTEL_PRIM_EMIT_SIZE,
- false);
+ get_state_size(state) +
+ INTEL_PRIM_EMIT_SIZE);
count = 0;
again:
if (intel->batch.bo == NULL) {
intel->batch.reserved_space = BATCH_RESERVED;
intel->batch.state_batch_offset = intel->batch.bo->size;
intel->batch.used = 0;
- intel->batch.needs_sol_reset = false;
}
void
}
if (!intel->intelScreen->no_hw) {
- int flags = I915_EXEC_RENDER;
- if (batch->needs_sol_reset)
- flags |= I915_EXEC_GEN7_SOL_RESET;
-
if (ret == 0) {
if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
intel->vtbl.annotate_aub(intel);
- if (intel->hw_ctx == NULL || batch->is_blit) {
- ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
- flags);
- } else {
- ret = drm_intel_gem_bo_context_exec(batch->bo, intel->hw_ctx,
- 4 * batch->used, flags);
- }
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
+ I915_EXEC_RENDER);
}
}
void
intel_batchbuffer_data(struct intel_context *intel,
- const void *data, GLuint bytes, bool is_blit)
+ const void *data, GLuint bytes)
{
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(intel, bytes, is_blit);
+ intel_batchbuffer_require_space(intel, bytes);
__memcpy(intel->batch.map + intel->batch.used, data, bytes);
intel->batch.used += bytes >> 2;
}
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_context *intel,
- const void *data, GLuint bytes, bool is_blit);
+ const void *data, GLuint bytes);
bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
drm_intel_bo *buffer,
static INLINE void
intel_batchbuffer_require_space(struct intel_context *intel,
- GLuint sz, int is_blit)
+ GLuint sz)
{
-
- if (intel->gen >= 6 &&
- intel->batch.is_blit != is_blit && intel->batch.used) {
- intel_batchbuffer_flush(intel);
- }
-
- intel->batch.is_blit = is_blit;
-
#ifdef DEBUG
assert(sz < intel->maxBatchSize - BATCH_RESERVED);
#endif
}
static INLINE void
-intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
+intel_batchbuffer_begin(struct intel_context *intel, int n)
{
- intel_batchbuffer_require_space(intel, n * 4, is_blit);
+ intel_batchbuffer_require_space(intel, n * 4);
intel->batch.emit = intel->batch.used;
#ifdef DEBUG
*/
#define BATCH_LOCALS
-#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
-#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
+#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
}
}
-/**
- * Emits the packet for switching the blitter from X to Y tiled or back.
- *
- * This has to be called in a single BEGIN_BATCH_BLT_TILED() /
- * ADVANCE_BATCH_TILED(). This is because BCS_SWCTRL is saved and restored as
- * part of the power context, not a render context, and if the batchbuffer was
- * to get flushed between setting and blitting, or blitting and restoring, our
- * tiling state would leak into other unsuspecting applications (like the X
- * server).
- */
-static void
-set_blitter_tiling(struct intel_context *intel,
- bool dst_y_tiled, bool src_y_tiled)
-{
- assert(intel->gen >= 6);
-
- /* Idle the blitter before we update how tiling is interpreted. */
- OUT_BATCH(MI_FLUSH_DW);
- OUT_BATCH(0);
- OUT_BATCH(0);
- OUT_BATCH(0);
-
- OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
- OUT_BATCH(BCS_SWCTRL);
- OUT_BATCH((BCS_SWCTRL_DST_Y | BCS_SWCTRL_SRC_Y) << 16 |
- (dst_y_tiled ? BCS_SWCTRL_DST_Y : 0) |
- (src_y_tiled ? BCS_SWCTRL_SRC_Y : 0));
-}
-
-#define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) do { \
- BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0)); \
- if (dst_y_tiled || src_y_tiled) \
- set_blitter_tiling(intel, dst_y_tiled, src_y_tiled); \
- } while (0)
-
-#define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) do { \
- if (dst_y_tiled || src_y_tiled) \
- set_blitter_tiling(intel, false, false); \
- ADVANCE_BATCH(); \
- } while (0)
-
/**
* Implements a rectangular block transfer (blit) of pixels between two
* miptrees.
if (src_offset & 4095)
return false;
}
- if ((dst_y_tiled || src_y_tiled) && intel->gen < 6)
+ if (dst_y_tiled || src_y_tiled)
return false;
/* do space check before going any further */
if (pass >= 2)
return false;
- intel_batchbuffer_require_space(intel, 8 * 4, true);
+ intel_batchbuffer_require_space(intel, 8 * 4);
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__,
src_buffer, src_pitch, src_offset, src_x, src_y,
assert(dst_x < dst_x2);
assert(dst_y < dst_y2);
- BEGIN_BATCH_BLT_TILED(8, dst_y_tiled, src_y_tiled);
+ BEGIN_BATCH(8);
OUT_BATCH(CMD | (8 - 2));
OUT_BATCH(BR13 | (uint16_t)dst_pitch);
I915_GEM_DOMAIN_RENDER, 0,
src_offset);
- ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
+ ADVANCE_BATCH();
intel_batchbuffer_emit_mi_flush(intel);
intel_batchbuffer_flush(intel);
}
- BEGIN_BATCH_BLT(6);
+ BEGIN_BATCH(6);
OUT_BATCH(CMD | (6 - 2));
OUT_BATCH(BR13);
OUT_BATCH((y1 << 16) | x1);
intel_batchbuffer_require_space(intel,
(8 * 4) +
(3 * 4) +
- dwords * 4, true);
+ dwords * 4);
opcode = XY_SETUP_BLT_CMD;
if (cpp == 4)
if (dst_tiling != I915_TILING_NONE)
blit_cmd |= XY_DST_TILED;
- BEGIN_BATCH_BLT(8 + 3);
+ BEGIN_BATCH(8 + 3);
OUT_BATCH(opcode | (8 - 2));
OUT_BATCH(br13);
OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
OUT_BATCH(((y + h) << 16) | (x + w));
ADVANCE_BATCH();
- intel_batchbuffer_data(intel, src_bits, dwords * 4, true);
+ intel_batchbuffer_data(intel, src_bits, dwords * 4);
intel_batchbuffer_emit_mi_flush(intel);
intel_batchbuffer_flush(intel);
}
- bool dst_y_tiled = region->tiling == I915_TILING_Y;
-
- BEGIN_BATCH_BLT_TILED(6, dst_y_tiled, false);
+ BEGIN_BATCH(6);
OUT_BATCH(CMD | (6 - 2));
OUT_BATCH(BR13);
OUT_BATCH((y << 16) | x);
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
0);
OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
- ADVANCE_BATCH_TILED(dst_y_tiled, false);
+ ADVANCE_BATCH();
intel_batchbuffer_emit_mi_flush(intel);
}
#define BATCH_SZ (8192*sizeof(uint32_t))
uint32_t state_batch_offset;
- bool is_blit;
- bool needs_sol_reset;
};
/**
bool is_945;
bool has_swizzling;
- drm_intel_context *hw_ctx;
-
struct intel_batchbuffer batch;
drm_intel_bo *first_post_swapbuffers_batch;