struct iris_screen *screen,
struct iris_vtable *vtbl,
struct pipe_debug_callback *dbg,
- struct iris_batch **all_batches,
- const char *name,
+ struct iris_batch *all_batches,
+ enum iris_batch_name name,
uint8_t engine)
{
batch->screen = screen;
memset(batch->other_batches, 0, sizeof(batch->other_batches));
for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) {
- if (all_batches[i] != batch)
- batch->other_batches[j++] = all_batches[i];
+ if (&all_batches[i] != batch)
+ batch->other_batches[j++] = &all_batches[i];
}
if (unlikely(INTEL_DEBUG)) {
return ret;
}
+static const char *
+batch_name_to_string(enum iris_batch_name name)
+{
+ const char *names[IRIS_BATCH_COUNT] = {
+ [IRIS_BATCH_RENDER] = "render",
+ [IRIS_BATCH_COMPUTE] = "compute",
+ };
+ return names[name];
+}
+
/**
* Flush the batch buffer, submitting it to the GPU and resetting it so
* we're ready to emit the next batch.
}
fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5d+%5db (%0.1f%%) "
"(cmds), %4d BOs (%0.1fMb aperture)\n",
- file, line, batch->name, batch->hw_ctx_id,
+ file, line, batch_name_to_string(batch->name), batch->hw_ctx_id,
batch->primary_batch_size, second_bytes,
100.0f * bytes_for_commands / BATCH_SZ,
batch->exec_count,
/* Our target batch size - flush approximately at this point. */
#define BATCH_SZ (20 * 1024)
+enum iris_batch_name {
+ IRIS_BATCH_RENDER,
+ IRIS_BATCH_COMPUTE,
+};
+
#define IRIS_BATCH_COUNT 2
struct iris_address {
struct iris_vtable *vtbl;
struct pipe_debug_callback *dbg;
- /** The name of this batch for debug info (e.g. "render") */
- const char *name;
+ /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */
+ enum iris_batch_name name;
/** Current batchbuffer being queued up. */
struct iris_bo *bo;
struct iris_screen *screen,
struct iris_vtable *vtbl,
struct pipe_debug_callback *dbg,
- struct iris_batch **other_batches,
- const char *name,
+ struct iris_batch *all_batches,
+ enum iris_batch_name name,
uint8_t ring);
void iris_chain_to_new_batch(struct iris_batch *batch);
void iris_batch_free(struct iris_batch *batch);
filter = BLORP_FILTER_NEAREST;
}
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
struct blorp_batch blorp_batch;
blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
assert(src_box->depth == 1);
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
iris_batch_maybe_flush(batch, 1500);
if (remaining_entries < count) {
/* It's safe to flush because we're called outside of state upload. */
- if (iris_batch_references(&ice->render_batch, pool->bo))
- iris_batch_flush(&ice->render_batch);
- if (iris_batch_references(&ice->compute_batch, pool->bo))
- iris_batch_flush(&ice->compute_batch);
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (iris_batch_references(&ice->batches[i], pool->bo))
+ iris_batch_flush(&ice->batches[i]);
+ }
iris_reset_border_color_pool(pool, pool->bo->bufmgr);
}
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
assert(buffers != 0);
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
iris_batch_maybe_flush(batch, 1500);
struct iris_context *ice = (void *) ctx;
struct iris_resource *res = (void *) p_res;
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
const struct gen_device_info *devinfo = &batch->screen->devinfo;
iris_batch_maybe_flush(batch, 1500);
slab_destroy_child(&ice->transfer_pool);
- iris_batch_free(&ice->render_batch);
- iris_batch_free(&ice->compute_batch);
+ iris_batch_free(&ice->batches[IRIS_BATCH_RENDER]);
+ iris_batch_free(&ice->batches[IRIS_BATCH_COMPUTE]);
iris_destroy_binder(&ice->state.binder);
ralloc_free(ice);
genX_call(devinfo, init_state, ice);
genX_call(devinfo, init_blorp, ice);
- struct iris_batch *batches[IRIS_BATCH_COUNT] = {
- &ice->render_batch,
- &ice->compute_batch,
- };
- const char *batch_names[IRIS_BATCH_COUNT] = { "render", "compute", };
-
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
- iris_init_batch(batches[i], screen, &ice->vtbl, &ice->dbg,
- batches, batch_names[i], I915_EXEC_RENDER);
+ iris_init_batch(&ice->batches[i], screen, &ice->vtbl, &ice->dbg,
+ ice->batches, (enum iris_batch_name) i,
+ I915_EXEC_RENDER);
}
- ice->vtbl.init_render_context(screen, &ice->render_batch, &ice->vtbl,
- &ice->dbg);
- ice->vtbl.init_compute_context(screen, &ice->compute_batch, &ice->vtbl,
- &ice->dbg);
+ ice->vtbl.init_render_context(screen, &ice->batches[IRIS_BATCH_RENDER],
+ &ice->vtbl, &ice->dbg);
+ ice->vtbl.init_compute_context(screen, &ice->batches[IRIS_BATCH_COMPUTE],
+ &ice->vtbl, &ice->dbg);
return ctx;
}
struct blorp_context blorp;
- /** The main batch for rendering. */
- struct iris_batch render_batch;
-
- /** The batch for compute shader dispatch */
- struct iris_batch compute_batch;
+ struct iris_batch batches[IRIS_BATCH_COUNT];
struct {
struct iris_uncompiled_shader *uncompiled[MESA_SHADER_STAGES];
iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct iris_context *ice = (struct iris_context *) ctx;
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
ice->state.dirty |= ~0ull;
iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
{
struct iris_context *ice = (struct iris_context *) ctx;
- struct iris_batch *batch = &ice->compute_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE];
if (unlikely(INTEL_DEBUG & DEBUG_REEMIT))
ice->state.dirty |= ~0ull;
{
struct iris_screen *screen = (void *) ctx->screen;
struct iris_context *ice = (struct iris_context *)ctx;
- struct iris_batch *batch[IRIS_BATCH_COUNT] = {
- &ice->render_batch,
- &ice->compute_batch,
- };
/* XXX PIPE_FLUSH_DEFERRED */
- for (unsigned i = 0; i < ARRAY_SIZE(batch); i++)
- iris_batch_flush(batch[i]);
+ for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
+ iris_batch_flush(&ice->batches[i]);
if (!out_fence)
return;
pipe_reference_init(&fence->ref, 1);
- for (unsigned b = 0; b < ARRAY_SIZE(batch); b++) {
- if (!check_syncpt(ctx->screen, batch[b]->last_syncpt))
+ for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
+ if (!check_syncpt(ctx->screen, ice->batches[b].last_syncpt))
continue;
iris_syncpt_reference(screen, &fence->syncpt[fence->count++],
- batch[b]->last_syncpt);
+ ice->batches[b].last_syncpt);
}
*out_fence = fence;
}
struct pipe_fence_handle *fence)
{
struct iris_context *ice = (struct iris_context *)ctx;
- struct iris_batch *batch[IRIS_BATCH_COUNT] = {
- &ice->render_batch,
- &ice->compute_batch,
- };
- for (unsigned b = 0; b < ARRAY_SIZE(batch); b++) {
+
+ for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
for (unsigned i = 0; i < fence->count; i++) {
- iris_batch_add_syncpt(batch[b], fence->syncpt[i],
+ iris_batch_add_syncpt(&ice->batches[b], fence->syncpt[i],
I915_EXEC_FENCE_WAIT);
}
}
{
struct iris_context *ice = (void *) ctx;
- if (ice->render_batch.contains_draw) {
- iris_emit_pipe_control_flush(&ice->render_batch,
+ if (ice->batches[IRIS_BATCH_RENDER].contains_draw) {
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_CS_STALL);
- iris_emit_pipe_control_flush(&ice->render_batch,
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
}
- if (ice->compute_batch.contains_draw) {
- iris_emit_pipe_control_flush(&ice->compute_batch,
+ if (ice->batches[IRIS_BATCH_COMPUTE].contains_draw) {
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_COMPUTE],
PIPE_CONTROL_CS_STALL);
- iris_emit_pipe_control_flush(&ice->compute_batch,
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_COMPUTE],
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
}
}
// XXX: don't unconditionally emit flushes in both engines, we don't
// even know if we're even using e.g. the compute engine...
- if (ice->render_batch.contains_draw)
- iris_emit_pipe_control_flush(&ice->render_batch, bits);
- if (ice->compute_batch.contains_draw)
- iris_emit_pipe_control_flush(&ice->compute_batch, bits);
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (ice->batches[i].contains_draw)
+ iris_emit_pipe_control_flush(&ice->batches[i], bits);
+ }
}
void
static void
mark_available(struct iris_context *ice, struct iris_query *q)
{
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
unsigned flags = PIPE_CONTROL_WRITE_IMMEDIATE;
unsigned offset = offsetof(struct iris_query_snapshots, snapshots_landed);
static void
write_value(struct iris_context *ice, struct iris_query *q, unsigned offset)
{
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
const struct gen_device_info *devinfo = &batch->screen->devinfo;
if (!iris_is_query_pipelined(q)) {
*/
iris_emit_pipe_control_flush(batch, PIPE_CONTROL_DEPTH_STALL);
}
- iris_pipelined_write(&ice->render_batch, q,
+ iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q,
PIPE_CONTROL_WRITE_DEPTH_COUNT |
PIPE_CONTROL_DEPTH_STALL,
offset);
case PIPE_QUERY_TIME_ELAPSED:
case PIPE_QUERY_TIMESTAMP:
case PIPE_QUERY_TIMESTAMP_DISJOINT:
- iris_pipelined_write(&ice->render_batch, q,
+ iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q,
PIPE_CONTROL_WRITE_TIMESTAMP,
offset);
break;
static void
gpr0_to_bool(struct iris_context *ice)
{
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
ice->vtbl.load_register_imm64(batch, CS_GPR(1), 1ull);
static void
calculate_result_on_gpu(struct iris_context *ice, struct iris_query *q)
{
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
ice->vtbl.load_register_mem64(batch, CS_GPR(1), q->bo,
offsetof(struct iris_query_snapshots, start));
const struct gen_device_info *devinfo = &screen->devinfo;
if (!q->ready) {
- if (iris_batch_references(&ice->render_batch, q->bo))
- iris_batch_flush(&ice->render_batch);
+ if (iris_batch_references(&ice->batches[IRIS_BATCH_RENDER], q->bo))
+ iris_batch_flush(&ice->batches[IRIS_BATCH_RENDER]);
if (!q->map->snapshots_landed) {
if (wait)
{
struct iris_context *ice = (void *) ctx;
struct iris_query *q = (void *) query;
- struct iris_batch *batch = &ice->render_batch;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
const struct gen_device_info *devinfo = &batch->screen->devinfo;
unsigned snapshots_landed_offset =
offsetof(struct iris_query_snapshots, snapshots_landed);
(usage & PIPE_TRANSFER_MAP_DIRECTLY))
return NULL;
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
- iris_batch_references(&ice->render_batch, res->bo)) {
- iris_batch_flush(&ice->render_batch);
- }
-
- if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
- iris_batch_references(&ice->compute_batch, res->bo)) {
- iris_batch_flush(&ice->compute_batch);
+ if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (iris_batch_references(&ice->batches[i], res->bo))
+ iris_batch_flush(&ice->batches[i]);
+ }
}
if ((usage & PIPE_TRANSFER_DONTBLOCK) && iris_bo_busy(res->bo))
* be set in this packet."
*/
// XXX: does this need to happen at 3DSTATE_BTP_PS time?
- iris_emit_pipe_control_flush(&ice->render_batch,
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
#endif