static void
intel_batchbuffer_reset(struct intel_batchbuffer *batch,
- drm_bacon_bufmgr *bufmgr,
+ struct brw_bufmgr *bufmgr,
bool has_llc);
static bool
void
intel_batchbuffer_init(struct intel_batchbuffer *batch,
- drm_bacon_bufmgr *bufmgr,
+ struct brw_bufmgr *bufmgr,
bool has_llc)
{
intel_batchbuffer_reset(batch, bufmgr, has_llc);
batch->exec_array_size = 100;
batch->exec_bos =
malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
- batch->exec_objects =
- malloc(batch->exec_array_size * sizeof(batch->exec_objects[0]));
+ batch->validation_list =
+ malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
if (INTEL_DEBUG & DEBUG_BATCH) {
batch->state_batch_sizes =
static void
intel_batchbuffer_reset(struct intel_batchbuffer *batch,
- drm_bacon_bufmgr *bufmgr,
+ struct brw_bufmgr *bufmgr,
bool has_llc)
{
if (batch->last_bo != NULL) {
- drm_bacon_bo_unreference(batch->last_bo);
+ brw_bo_unreference(batch->last_bo);
batch->last_bo = NULL;
}
batch->last_bo = batch->bo;
- batch->bo = drm_bacon_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
+ batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
if (has_llc) {
- drm_bacon_bo_map(batch->bo, true);
- batch->map = batch->bo->virtual;
+ batch->map = brw_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
}
batch->map_next = batch->map;
for (int i = brw->batch.saved.exec_count;
i < brw->batch.exec_count; i++) {
if (brw->batch.exec_bos[i] != brw->batch.bo) {
- drm_bacon_bo_unreference(brw->batch.exec_bos[i]);
+ brw_bo_unreference(brw->batch.exec_bos[i]);
}
}
brw->batch.reloc_count = brw->batch.saved.reloc_count;
for (int i = 0; i < batch->exec_count; i++) {
if (batch->exec_bos[i] != batch->bo) {
- drm_bacon_bo_unreference(batch->exec_bos[i]);
+ brw_bo_unreference(batch->exec_bos[i]);
}
}
free(batch->relocs);
free(batch->exec_bos);
- free(batch->exec_objects);
+ free(batch->validation_list);
- drm_bacon_bo_unreference(batch->last_bo);
- drm_bacon_bo_unreference(batch->bo);
+ brw_bo_unreference(batch->last_bo);
+ brw_bo_unreference(batch->bo);
if (batch->state_batch_sizes)
_mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
}
if (batch->ring != RENDER_RING)
return;
- int ret = drm_bacon_bo_map(batch->bo, false);
- if (ret != 0) {
+ void *map = brw_bo_map(brw, batch->bo, MAP_READ);
+ if (map == NULL) {
fprintf(stderr,
- "WARNING: failed to map batchbuffer (%s), "
- "dumping uploaded data instead.\n", strerror(ret));
+ "WARNING: failed to map batchbuffer, "
+ "dumping uploaded data instead.\n");
}
- uint32_t *data = batch->bo->virtual ? batch->bo->virtual : batch->map;
+ uint32_t *data = map ? map : batch->map;
uint32_t *end = data + USED_BATCH(*batch);
- uint32_t gtt_offset = batch->bo->virtual ? batch->bo->offset64 : 0;
+ uint32_t gtt_offset = map ? batch->bo->offset64 : 0;
int length;
bool color = INTEL_DEBUG & DEBUG_COLOR;
switch (gen_group_get_opcode(inst) >> 16) {
case _3DSTATE_PIPELINED_POINTERS:
- /* TODO: Decode Gen4-5 pipelined pointers */
+ /* Note: these Gen4-5 pointers are full relocations rather than
+ * offsets from the start of the batch. So we need to subtract
+ * gtt_offset (the start of the batch) to obtain an offset we
+ * can add to the map and get at the data.
+ */
+ decode_struct(brw, spec, "VS_STATE", data, gtt_offset,
+ (p[1] & ~0x1fu) - gtt_offset, color);
+ if (p[2] & 1) {
+ decode_struct(brw, spec, "GS_STATE", data, gtt_offset,
+ (p[2] & ~0x1fu) - gtt_offset, color);
+ }
+ if (p[3] & 1) {
+ decode_struct(brw, spec, "CLIP_STATE", data, gtt_offset,
+ (p[3] & ~0x1fu) - gtt_offset, color);
+ }
+ decode_struct(brw, spec, "SF_STATE", data, gtt_offset,
+ (p[4] & ~0x1fu) - gtt_offset, color);
+ decode_struct(brw, spec, "WM_STATE", data, gtt_offset,
+ (p[5] & ~0x1fu) - gtt_offset, color);
+ decode_struct(brw, spec, "COLOR_CALC_STATE", data, gtt_offset,
+ (p[6] & ~0x3fu) - gtt_offset, color);
break;
case _3DSTATE_BINDING_TABLE_POINTERS_VS:
case _3DSTATE_BINDING_TABLE_POINTERS_HS:
}
}
- if (ret == 0) {
- drm_bacon_bo_unmap(batch->bo);
+ if (map != NULL) {
+ brw_bo_unmap(batch->bo);
}
}
#else
/* Unreference any BOs held by the previous batch, and reset counts. */
for (int i = 0; i < brw->batch.exec_count; i++) {
if (brw->batch.exec_bos[i] != brw->batch.bo) {
- drm_bacon_bo_unreference(brw->batch.exec_bos[i]);
+ brw_bo_unreference(brw->batch.exec_bos[i]);
}
brw->batch.exec_bos[i] = NULL;
}
* would otherwise be stored in the context (which for all intents and
* purposes means everything).
*/
- if (brw->hw_ctx == NULL)
+ if (brw->hw_ctx == 0)
brw->ctx.NewDriverState |= BRW_NEW_CONTEXT;
brw->ctx.NewDriverState |= BRW_NEW_BATCH;
- brw->ib.type = -1;
+ brw->ib.index_size = -1;
/* We need to periodically reap the shader time results, because rollover
* happens every few seconds. We also want to see results every once in a
PIPE_CONTROL_CS_STALL);
}
}
-
- /* Mark that the current program cache BO has been used by the GPU.
- * It will be reallocated if we need to put new programs in for the
- * next batch.
- */
- brw->cache.bo_used_by_gpu = true;
}
static void
*/
if (brw->need_swap_throttle && brw->throttle_batch[0]) {
if (brw->throttle_batch[1]) {
- if (!brw->disable_throttling)
- drm_bacon_bo_wait_rendering(brw->throttle_batch[1]);
- drm_bacon_bo_unreference(brw->throttle_batch[1]);
+ if (!brw->disable_throttling) {
+ /* Pass NULL rather than brw so we avoid perf_debug warnings;
+ * stalling is common and expected here...
+ */
+ brw_bo_wait_rendering(brw->throttle_batch[1]);
+ }
+ brw_bo_unreference(brw->throttle_batch[1]);
}
brw->throttle_batch[1] = brw->throttle_batch[0];
brw->throttle_batch[0] = NULL;
}
static void
-add_exec_bo(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
+add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
if (bo != batch->bo) {
for (int i = 0; i < batch->exec_count; i++) {
return;
}
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
}
if (batch->exec_count == batch->exec_array_size) {
batch->exec_bos =
realloc(batch->exec_bos,
batch->exec_array_size * sizeof(batch->exec_bos[0]));
- batch->exec_objects =
- realloc(batch->exec_objects,
- batch->exec_array_size * sizeof(batch->exec_objects[0]));
+ batch->validation_list =
+ realloc(batch->validation_list,
+ batch->exec_array_size * sizeof(batch->validation_list[0]));
}
struct drm_i915_gem_exec_object2 *validation_entry =
- &batch->exec_objects[batch->exec_count];
+ &batch->validation_list[batch->exec_count];
validation_entry->handle = bo->gem_handle;
if (bo == batch->bo) {
validation_entry->relocation_count = batch->reloc_count;
}
validation_entry->alignment = bo->align;
validation_entry->offset = bo->offset64;
- validation_entry->flags = 0;
+ validation_entry->flags = bo->kflags;
validation_entry->rsvd1 = 0;
validation_entry->rsvd2 = 0;
static int
execbuffer(int fd,
struct intel_batchbuffer *batch,
- drm_bacon_context *ctx,
+ uint32_t ctx_id,
int used,
int in_fence,
int *out_fence,
int flags)
{
- uint32_t ctx_id = 0;
- drm_bacon_gem_context_get_id(ctx, &ctx_id);
-
struct drm_i915_gem_execbuffer2 execbuf = {
- .buffers_ptr = (uintptr_t) batch->exec_objects,
+ .buffers_ptr = (uintptr_t) batch->validation_list,
.buffer_count = batch->exec_count,
.batch_start_offset = 0,
.batch_len = used,
ret = -errno;
for (int i = 0; i < batch->exec_count; i++) {
- drm_bacon_bo *bo = batch->exec_bos[i];
+ struct brw_bo *bo = batch->exec_bos[i];
bo->idle = false;
- /* Update drm_bacon_bo::offset64 */
- if (batch->exec_objects[i].offset != bo->offset64) {
+ /* Update brw_bo::offset64 */
+ if (batch->validation_list[i].offset != bo->offset64) {
DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
- bo->gem_handle, bo->offset64, batch->exec_objects[i].offset);
- bo->offset64 = batch->exec_objects[i].offset;
+ bo->gem_handle, bo->offset64, batch->validation_list[i].offset);
+ bo->offset64 = batch->validation_list[i].offset;
}
}
int ret = 0;
if (brw->has_llc) {
- drm_bacon_bo_unmap(batch->bo);
+ brw_bo_unmap(batch->bo);
} else {
- ret = drm_bacon_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
+ ret = brw_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
- ret = drm_bacon_bo_subdata(batch->bo,
+ ret = brw_bo_subdata(batch->bo,
batch->state_batch_offset,
batch->bo->size - batch->state_batch_offset,
(char *)batch->map + batch->state_batch_offset);
flags |= I915_EXEC_GEN7_SOL_RESET;
if (ret == 0) {
- void *hw_ctx = batch->ring != RENDER_RING ? NULL : brw->hw_ctx;
+ uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0;
/* Add the batch itself to the end of the validation list */
add_exec_bo(batch, batch->bo);
if (brw->throttle_batch[0] == NULL) {
brw->throttle_batch[0] = brw->batch.bo;
- drm_bacon_bo_reference(brw->throttle_batch[0]);
+ brw_bo_reference(brw->throttle_batch[0]);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- drm_bacon_bo_wait_rendering(brw->batch.bo);
+ brw_bo_wait_rendering(brw->batch.bo);
}
/* Start a new batch buffer. */
}
bool
-brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
+brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
for (int i = 0; i < batch->exec_count; i++) {
if (batch->exec_bos[i] == bo)
*/
uint64_t
brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
- drm_bacon_bo *target, uint32_t target_offset,
+ struct brw_bo *target, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
+ uint64_t offset64;
+
if (batch->reloc_count == batch->reloc_array_size) {
batch->reloc_array_size *= 2;
batch->relocs = realloc(batch->relocs,
batch->reloc_count++;
+ /* ensure gcc doesn't reload */
+ offset64 = *((volatile uint64_t *)&target->offset64);
reloc->offset = batch_offset;
reloc->delta = target_offset;
reloc->target_handle = target->gem_handle;
reloc->read_domains = read_domains;
reloc->write_domain = write_domain;
- reloc->presumed_offset = target->offset64;
+ reloc->presumed_offset = offset64;
/* Using the old buffer offset, write in what the right data would be, in
* case the buffer doesn't move and we can short-circuit the relocation
* processing in the kernel
*/
- return target->offset64 + target_offset;
+ return offset64 + target_offset;
}
void
static void
load_sized_register_mem(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset,
int size)
void
brw_load_register_mem(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset)
{
void
brw_load_register_mem64(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset)
{
*/
void
brw_store_register_mem32(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset)
+ struct brw_bo *bo, uint32_t reg, uint32_t offset)
{
assert(brw->gen >= 6);
*/
void
brw_store_register_mem64(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset)
+ struct brw_bo *bo, uint32_t reg, uint32_t offset)
{
assert(brw->gen >= 6);
* Write 32-bits of immediate data to a GPU memory buffer.
*/
void
-brw_store_data_imm32(struct brw_context *brw, drm_bacon_bo *bo,
+brw_store_data_imm32(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint32_t imm)
{
assert(brw->gen >= 6);
* Write 64-bits of immediate data to a GPU memory buffer.
*/
void
-brw_store_data_imm64(struct brw_context *brw, drm_bacon_bo *bo,
+brw_store_data_imm64(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint64_t imm)
{
assert(brw->gen >= 6);