if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
int bytes_for_commands = batch_bytes_used(batch);
+ int bytes_for_binder = batch->binder.insert_point;
if (batch->bo != batch->exec_bos[0])
bytes_for_commands += batch->primary_batch_size;
- fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%), "
- "%4d BOs (%0.1fMb aperture)\n",
+ fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%) "
+ "(cmds), %5db (%0.1f%%) (binder), %4d BOs (%0.1fMb aperture)\n",
file, line,
bytes_for_commands, 100.0f * bytes_for_commands / BATCH_SZ,
+ bytes_for_binder, 100.0f * bytes_for_binder / IRIS_BINDER_SIZE,
batch->exec_count,
(float) batch->aperture_space / (1024 * 1024));
dump_validation_list(batch);
#include "iris_bufmgr.h"
#include "iris_context.h"
-/* 64kb */
-#define BINDER_SIZE (64 * 1024)
-
/**
* Reserve a block of space in the binder.
*/
/* If we can't fit all stages in the binder, flush the batch which
* will cause us to gain a new empty binder.
*/
- if (binder->insert_point + size > BINDER_SIZE)
+ if (binder->insert_point + size > IRIS_BINDER_SIZE)
iris_batch_flush(batch);
uint32_t offset = binder->insert_point;
/* It had better fit now. */
- assert(offset + size <= BINDER_SIZE);
+ assert(offset + size <= IRIS_BINDER_SIZE);
binder->insert_point = align(binder->insert_point + size, 64);
iris_init_binder(struct iris_binder *binder, struct iris_bufmgr *bufmgr)
{
binder->bo =
- iris_bo_alloc(bufmgr, "binder", BINDER_SIZE, IRIS_MEMZONE_BINDER);
+ iris_bo_alloc(bufmgr, "binder", IRIS_BINDER_SIZE, IRIS_MEMZONE_BINDER);
binder->map = iris_bo_map(NULL, binder->bo, MAP_WRITE);
binder->insert_point = INIT_INSERT_POINT;
}
IRIS_DIRTY_BINDINGS_FS;
}
+ // XXX: don't do this unless things are dirty...
iris_binder_reserve_3d(batch, ice->shaders.prog);
ice->vtbl.upload_render_state(ice, batch, info);