#include "brw_defines.h"
#include "brw_state.h"
#include "common/gen_decoder.h"
+#include "common/gen_gem.h"
#include "util/hash_table.h"
#include <xf86drm.h>
-#include <i915_drm.h>
+#include "drm-uapi/i915_drm.h"
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
static void
intel_batchbuffer_reset(struct brw_context *brw);
+static void
+brw_new_batch(struct brw_context *brw);
static void
dump_validation_list(struct intel_batchbuffer *batch)
}
static struct gen_batch_decode_bo
-decode_get_bo(void *v_brw, uint64_t address)
+decode_get_bo(void *v_brw, bool ppgtt, uint64_t address)
{
struct brw_context *brw = v_brw;
struct intel_batchbuffer *batch = &brw->batch;
}
static unsigned
-decode_get_state_size(void *v_brw, uint32_t offset_from_dsba)
+decode_get_state_size(void *v_brw, uint64_t address, uint64_t base_address)
{
struct brw_context *brw = v_brw;
struct intel_batchbuffer *batch = &brw->batch;
- struct hash_entry *entry =
- _mesa_hash_table_search(batch->state_batch_sizes,
- (void *) (uintptr_t) offset_from_dsba);
- return entry ? (uintptr_t) entry->data : 0;
-}
-
-static bool
-uint_key_compare(const void *a, const void *b)
-{
- return a == b;
-}
-
-static uint32_t
-uint_key_hash(const void *key)
-{
- return (uintptr_t) key;
+ unsigned size = (uintptr_t)
+ _mesa_hash_table_u64_search(batch->state_batch_sizes,
+ address - base_address);
+ return size;
}
static void
struct intel_batchbuffer *batch = &brw->batch;
const struct gen_device_info *devinfo = &screen->devinfo;
- batch->use_shadow_copy = !devinfo->has_llc;
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
+ /* The shadow doesn't get relocs written so state decode fails. */
+ batch->use_shadow_copy = false;
+ } else
+ batch->use_shadow_copy = !devinfo->has_llc;
init_reloc_list(&batch->batch_relocs, 250);
init_reloc_list(&batch->state_relocs, 250);
if (INTEL_DEBUG & DEBUG_BATCH) {
batch->state_batch_sizes =
- _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
+ _mesa_hash_table_u64_create(NULL);
const unsigned decode_flags =
GEN_BATCH_DECODE_FULL |
static unsigned
add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
+ assert(bo->bufmgr == batch->batch.bo->bufmgr);
+
unsigned index = READ_ONCE(bo->index);
if (index < batch->exec_count && batch->exec_bos[index] == bo)
static void
recreate_growing_buffer(struct brw_context *brw,
struct brw_growing_bo *grow,
- const char *name, unsigned size)
+ const char *name, unsigned size,
+ enum brw_memory_zone memzone)
{
struct intel_screen *screen = brw->screen;
struct intel_batchbuffer *batch = &brw->batch;
struct brw_bufmgr *bufmgr = screen->bufmgr;
- grow->bo = brw_bo_alloc(bufmgr, name, size);
+ /* We can't grow buffers when using softpin, so just overallocate them. */
+ if (brw_using_softpin(bufmgr))
+ size *= 2;
+
+ grow->bo = brw_bo_alloc(bufmgr, name, size, memzone);
grow->bo->kflags |= can_do_exec_capture(screen) ? EXEC_OBJECT_CAPTURE : 0;
grow->partial_bo = NULL;
grow->partial_bo_map = NULL;
grow->partial_bytes = 0;
+ grow->memzone = memzone;
if (batch->use_shadow_copy)
grow->map = realloc(grow->map, grow->bo->size);
}
batch->last_bo = batch->batch.bo;
- recreate_growing_buffer(brw, &batch->batch, "batchbuffer", BATCH_SZ);
+ recreate_growing_buffer(brw, &batch->batch, "batchbuffer", BATCH_SZ,
+ BRW_MEMZONE_OTHER);
batch->map_next = batch->batch.map;
- recreate_growing_buffer(brw, &batch->state, "statebuffer", STATE_SZ);
+ recreate_growing_buffer(brw, &batch->state, "statebuffer", STATE_SZ,
+ BRW_MEMZONE_DYNAMIC);
/* Avoid making 0 a valid state offset - otherwise the decoder will try
* and decode data when we use offset 0 as a null pointer.
batch->needs_sol_reset = false;
batch->state_base_address_emitted = false;
- /* We don't know what ring the new batch will be sent to until we see the
- * first BEGIN_BATCH or BEGIN_BATCH_BLT. Mark it as unknown.
- */
- batch->ring = UNKNOWN_RING;
-
if (batch->state_batch_sizes)
- _mesa_hash_table_clear(batch->state_batch_sizes, NULL);
+ _mesa_hash_table_u64_clear(batch->state_batch_sizes, NULL);
}
static void
brw->batch.saved.exec_count = brw->batch.exec_count;
}
+bool
+intel_batchbuffer_saved_state_is_empty(struct brw_context *brw)
+{
+ struct intel_batchbuffer *batch = &brw->batch;
+ return (batch->saved.map_next == batch->batch.map);
+}
+
void
intel_batchbuffer_reset_to_saved(struct brw_context *brw)
{
brw->batch.map_next = brw->batch.saved.map_next;
if (USED_BATCH(brw->batch) == 0)
- brw->batch.ring = UNKNOWN_RING;
+ brw_new_batch(brw);
}
void
brw_bo_unreference(batch->batch.bo);
brw_bo_unreference(batch->state.bo);
if (batch->state_batch_sizes) {
- _mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
+ _mesa_hash_table_u64_destroy(batch->state_batch_sizes, NULL);
gen_batch_decode_ctx_finish(&batch->decoder);
}
}
struct brw_bufmgr *bufmgr = brw->bufmgr;
struct brw_bo *bo = grow->bo;
+ /* We can't grow buffers that are softpinned, as the growing mechanism
+ * involves putting a larger buffer at the same gtt_offset...and we've
+ * only allocated the smaller amount of VMA. Without relocations, this
+ * simply won't work. This should never happen, however.
+ */
+ assert(!(bo->kflags & EXEC_OBJECT_PINNED));
+
perf_debug("Growing %s - ran out of space\n", bo->name);
if (grow->partial_bo) {
finish_growing_bos(grow);
}
- struct brw_bo *new_bo = brw_bo_alloc(bufmgr, bo->name, new_size);
+ struct brw_bo *new_bo =
+ brw_bo_alloc(bufmgr, bo->name, new_size, grow->memzone);
/* Copy existing data to the new larger buffer */
grow->partial_bo_map = grow->map;
}
void
-intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
- enum brw_gpu_ring ring)
+intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz)
{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct intel_batchbuffer *batch = &brw->batch;
- /* If we're switching rings, implicitly flush the batch. */
- if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
- devinfo->gen >= 6) {
- intel_batchbuffer_flush(brw);
- }
-
const unsigned batch_used = USED_BATCH(*batch) * 4;
if (batch_used + sz >= BATCH_SZ && !batch->no_wrap) {
intel_batchbuffer_flush(brw);
batch->map_next = (void *) batch->batch.map + batch_used;
assert(batch_used + sz < batch->batch.bo->size);
}
-
- /* The intel_batchbuffer_flush() calls above might have changed
- * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
- */
- brw->batch.ring = ring;
}
/**
*/
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
brw_collect_and_report_shader_time(brw);
+
+ intel_batchbuffer_maybe_noop(brw);
}
/**
*/
brw_emit_query_end(brw);
- if (brw->batch.ring == RENDER_RING) {
- /* Work around L3 state leaks into contexts set MI_RESTORE_INHIBIT which
- * assume that the L3 cache is configured according to the hardware
- * defaults. On Kernel 4.16+, we no longer need to do this.
- */
- if (devinfo->gen >= 7 &&
- !(brw->screen->kernel_features & KERNEL_ALLOWS_CONTEXT_ISOLATION))
- gen7_restore_default_l3_config(brw);
-
- if (devinfo->is_haswell) {
- /* From the Haswell PRM, Volume 2b, Command Reference: Instructions,
- * 3DSTATE_CC_STATE_POINTERS > "Note":
- *
- * "SW must program 3DSTATE_CC_STATE_POINTERS command at the end of every
- * 3D batch buffer followed by a PIPE_CONTROL with RC flush and CS stall."
- *
- * From the example in the docs, it seems to expect a regular pipe control
- * flush here as well. We may have done it already, but meh.
- *
- * See also WaAvoidRCZCounterRollover.
- */
- brw_emit_mi_flush(brw);
- BEGIN_BATCH(2);
- OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
- OUT_BATCH(brw->cc.state_offset | 1);
- ADVANCE_BATCH();
- brw_emit_pipe_control_flush(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH |
- PIPE_CONTROL_CS_STALL);
- }
+ /* Work around L3 state leaks into contexts set MI_RESTORE_INHIBIT which
+ * assume that the L3 cache is configured according to the hardware
+ * defaults. On Kernel 4.16+, we no longer need to do this.
+ */
+ if (devinfo->gen >= 7 &&
+ !(brw->screen->kernel_features & KERNEL_ALLOWS_CONTEXT_ISOLATION))
+ gen7_restore_default_l3_config(brw);
- /* Do not restore push constant packets during context restore. */
- if (devinfo->gen >= 7)
- gen10_emit_isp_disable(brw);
+ if (devinfo->is_haswell) {
+ /* From the Haswell PRM, Volume 2b, Command Reference: Instructions,
+ * 3DSTATE_CC_STATE_POINTERS > "Note":
+ *
+ * "SW must program 3DSTATE_CC_STATE_POINTERS command at the end of every
+ * 3D batch buffer followed by a PIPE_CONTROL with RC flush and CS stall."
+ *
+ * From the example in the docs, it seems to expect a regular pipe control
+ * flush here as well. We may have done it already, but meh.
+ *
+ * See also WaAvoidRCZCounterRollover.
+ */
+ brw_emit_mi_flush(brw);
+ BEGIN_BATCH(2);
+ OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
+ OUT_BATCH(brw->cc.state_offset | 1);
+ ADVANCE_BATCH();
+ brw_emit_pipe_control_flush(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_CS_STALL);
}
+ /* Do not restore push constant packets during context restore. */
+ if (devinfo->gen >= 7)
+ gen10_emit_isp_disable(brw);
+
/* Emit MI_BATCH_BUFFER_END to finish our batch. Note that execbuf2
* requires our batch size to be QWord aligned, so we pad it out if
* necessary by emitting an extra MI_NOOP after the end.
*/
- intel_batchbuffer_require_space(brw, 8, brw->batch.ring);
+ intel_batchbuffer_require_space(brw, 8);
*brw->batch.map_next++ = MI_BATCH_BUFFER_END;
if (USED_BATCH(brw->batch) & 1) {
*brw->batch.map_next++ = MI_NOOP;
}
if (brw->need_flush_throttle) {
- __DRIscreen *dri_screen = brw->screen->driScrnPriv;
- drmCommandNone(dri_screen->fd, DRM_I915_GEM_THROTTLE);
+ drmCommandNone(brw->screen->fd, DRM_I915_GEM_THROTTLE);
brw->need_flush_throttle = false;
}
}
DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
bo->gem_handle, bo->gtt_offset,
batch->validation_list[i].offset);
+ assert(!(bo->kflags & EXEC_OBJECT_PINNED));
bo->gtt_offset = batch->validation_list[i].offset;
}
}
static int
submit_batch(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
{
- const struct gen_device_info *devinfo = &brw->screen->devinfo;
- __DRIscreen *dri_screen = brw->screen->driScrnPriv;
struct intel_batchbuffer *batch = &brw->batch;
int ret = 0;
* To avoid stalling, execobject.offset should match the current
* address of that object within the active context.
*/
- int flags = I915_EXEC_NO_RELOC;
+ int flags = I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
- if (devinfo->gen >= 6 && batch->ring == BLT_RING) {
- flags |= I915_EXEC_BLT;
- } else {
- flags |= I915_EXEC_RENDER;
- }
if (batch->needs_sol_reset)
flags |= I915_EXEC_GEN7_SOL_RESET;
- uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0;
-
/* Set statebuffer relocations */
const unsigned state_index = batch->state.bo->index;
if (state_index < batch->exec_count &&
} else {
/* Move the batch to the end of the validation list */
struct drm_i915_gem_exec_object2 tmp;
+ struct brw_bo *tmp_bo;
const unsigned index = batch->exec_count - 1;
tmp = *entry;
*entry = batch->validation_list[index];
batch->validation_list[index] = tmp;
+
+ tmp_bo = batch->exec_bos[0];
+ batch->exec_bos[0] = batch->exec_bos[index];
+ batch->exec_bos[index] = tmp_bo;
}
- ret = execbuffer(dri_screen->fd, batch, hw_ctx,
+ ret = execbuffer(brw->screen->fd, batch, brw->hw_ctx,
4 * USED_BATCH(*batch),
in_fence_fd, out_fence_fd, flags);
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
gen_print_batch(&batch->decoder, batch->batch.map,
4 * USED_BATCH(*batch),
- batch->batch.bo->gtt_offset);
+ batch->batch.bo->gtt_offset, false);
}
if (brw->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
bytes_for_commands, 100.0f * bytes_for_commands / BATCH_SZ,
bytes_for_state, 100.0f * bytes_for_state / STATE_SZ,
brw->batch.exec_count,
- (float) brw->batch.aperture_space / (1024 * 1024),
+ (float) (brw->batch.aperture_space / (1024 * 1024)),
brw->batch.batch_relocs.reloc_count,
brw->batch.state_relocs.reloc_count);
return ret;
}
-bool
-brw_batch_has_aperture_space(struct brw_context *brw, unsigned extra_space)
+void
+intel_batchbuffer_maybe_noop(struct brw_context *brw)
{
- return brw->batch.aperture_space + extra_space <=
- brw->screen->aperture_threshold;
+ if (!brw->frontend_noop || USED_BATCH(brw->batch) != 0)
+ return;
+
+ BEGIN_BATCH(1);
+ OUT_BATCH(MI_BATCH_BUFFER_END);
+ ADVANCE_BATCH();
}
bool
{
assert(target != NULL);
+ if (target->kflags & EXEC_OBJECT_PINNED) {
+ brw_use_pinned_bo(batch, target, reloc_flags & RELOC_WRITE);
+ return gen_canonical_address(target->gtt_offset + target_offset);
+ }
+
+ unsigned int index = add_exec_bo(batch, target);
+ struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
+
if (rlist->reloc_count == rlist->reloc_array_size) {
rlist->reloc_array_size *= 2;
rlist->relocs = realloc(rlist->relocs,
sizeof(struct drm_i915_gem_relocation_entry));
}
- unsigned int index = add_exec_bo(batch, target);
- struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
-
if (reloc_flags & RELOC_32BIT) {
/* Restrict this buffer to the low 32 bits of the address space.
*
return entry->offset + target_offset;
}
+void
+brw_use_pinned_bo(struct intel_batchbuffer *batch, struct brw_bo *bo,
+ unsigned writable_flag)
+{
+ assert(bo->kflags & EXEC_OBJECT_PINNED);
+ assert((writable_flag & ~EXEC_OBJECT_WRITE) == 0);
+
+ unsigned int index = add_exec_bo(batch, bo);
+ struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
+ assert(entry->offset == bo->gtt_offset);
+
+ if (writable_flag)
+ entry->flags |= EXEC_OBJECT_WRITE;
+}
+
uint64_t
brw_batch_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
struct brw_bo *target, uint32_t target_offset,
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
- _mesa_hash_table_insert(batch->state_batch_sizes,
- (void *) (uintptr_t) offset,
- (void *) (uintptr_t) size);
+ _mesa_hash_table_u64_insert(batch->state_batch_sizes,
+ offset, (void *) (uintptr_t) size);
}
batch->state_used = offset + size;
void
intel_batchbuffer_data(struct brw_context *brw,
- const void *data, GLuint bytes, enum brw_gpu_ring ring)
+ const void *data, GLuint bytes)
{
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(brw, bytes, ring);
+ intel_batchbuffer_require_space(brw, bytes);
memcpy(brw->batch.map_next, data, bytes);
brw->batch.map_next += bytes >> 2;
}
* Copies a 32-bit register.
*/
void
-brw_load_register_reg(struct brw_context *brw, uint32_t src, uint32_t dest)
+brw_load_register_reg(struct brw_context *brw, uint32_t dest, uint32_t src)
{
assert(brw->screen->devinfo.gen >= 8 || brw->screen->devinfo.is_haswell);
* Copies a 64-bit register.
*/
void
-brw_load_register_reg64(struct brw_context *brw, uint32_t src, uint32_t dest)
+brw_load_register_reg64(struct brw_context *brw, uint32_t dest, uint32_t src)
{
assert(brw->screen->devinfo.gen >= 8 || brw->screen->devinfo.is_haswell);