#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
-#include "intel_reg.h"
#include "intel_bufmgr.h"
#include "intel_buffers.h"
#include "intel_fbo.h"
#include <i915_drm.h>
static void
-intel_batchbuffer_reset(struct brw_context *brw);
+intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc);
void
-intel_batchbuffer_init(struct brw_context *brw)
+intel_batchbuffer_init(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc)
{
- intel_batchbuffer_reset(brw);
+ intel_batchbuffer_reset(batch, bufmgr, has_llc);
- if (!brw->has_llc) {
- brw->batch.cpu_map = malloc(BATCH_SZ);
- brw->batch.map = brw->batch.cpu_map;
- brw->batch.map_next = brw->batch.cpu_map;
+ if (!has_llc) {
+ batch->cpu_map = malloc(BATCH_SZ);
+ batch->map = batch->cpu_map;
+ batch->map_next = batch->cpu_map;
}
}
static void
-intel_batchbuffer_reset(struct brw_context *brw)
+intel_batchbuffer_reset(struct intel_batchbuffer *batch, dri_bufmgr *bufmgr,
+ bool has_llc)
{
- if (brw->batch.last_bo != NULL) {
- drm_intel_bo_unreference(brw->batch.last_bo);
- brw->batch.last_bo = NULL;
+ if (batch->last_bo != NULL) {
+ drm_intel_bo_unreference(batch->last_bo);
+ batch->last_bo = NULL;
}
- brw->batch.last_bo = brw->batch.bo;
+ batch->last_bo = batch->bo;
- brw_render_cache_set_clear(brw);
-
- brw->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
- BATCH_SZ, 4096);
- if (brw->has_llc) {
- drm_intel_bo_map(brw->batch.bo, true);
- brw->batch.map = brw->batch.bo->virtual;
+ batch->bo = drm_intel_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
+ if (has_llc) {
+ drm_intel_bo_map(batch->bo, true);
+ batch->map = batch->bo->virtual;
}
- brw->batch.map_next = brw->batch.map;
+ batch->map_next = batch->map;
- brw->batch.reserved_space = BATCH_RESERVED;
- brw->batch.state_batch_offset = brw->batch.bo->size;
- brw->batch.needs_sol_reset = false;
+ batch->reserved_space = BATCH_RESERVED;
+ batch->state_batch_offset = batch->bo->size;
+ batch->needs_sol_reset = false;
+ batch->state_base_address_emitted = false;
/* We don't know what ring the new batch will be sent to until we see the
* first BEGIN_BATCH or BEGIN_BATCH_BLT. Mark it as unknown.
*/
- brw->batch.ring = UNKNOWN_RING;
+ batch->ring = UNKNOWN_RING;
+}
+
+static void
+intel_batchbuffer_reset_and_clear_render_cache(struct brw_context *brw)
+{
+ intel_batchbuffer_reset(&brw->batch, brw->bufmgr, brw->has_llc);
+ brw_render_cache_set_clear(brw);
}
void
}
void
-intel_batchbuffer_free(struct brw_context *brw)
+intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
- free(brw->batch.cpu_map);
- drm_intel_bo_unreference(brw->batch.last_bo);
- drm_intel_bo_unreference(brw->batch.bo);
+ free(batch->cpu_map);
+ drm_intel_bo_unreference(batch->last_bo);
+ drm_intel_bo_unreference(batch->bo);
}
void
#ifdef DEBUG
assert(sz < BATCH_SZ - BATCH_RESERVED);
#endif
- if (intel_batchbuffer_space(brw) < sz)
+ if (intel_batchbuffer_space(&brw->batch) < sz)
intel_batchbuffer_flush(brw);
enum brw_gpu_ring prev_ring = brw->batch.ring;
struct intel_batchbuffer *batch = &brw->batch;
int ret;
- decode = drm_intel_decode_context_alloc(brw->intelScreen->deviceID);
+ decode = drm_intel_decode_context_alloc(brw->screen->deviceID);
if (!decode)
return;
void
intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw)
{
- /* We may need to enable and snapshot OA counters. */
- brw_perf_monitor_new_batch(brw);
+ /* Un-used currently */
}
/**
{
/* Create a new batchbuffer and reset the associated state: */
drm_intel_gem_bo_clear_relocs(brw->batch.bo, 0);
- intel_batchbuffer_reset(brw);
+ intel_batchbuffer_reset_and_clear_render_cache(brw);
/* If the kernel supports hardware contexts, then most hardware state is
* preserved between batches; we only need to re-emit state that is required
*/
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
brw_collect_and_report_shader_time(brw);
-
- if (INTEL_DEBUG & DEBUG_PERFMON)
- brw_dump_perf_monitors(brw);
}
/**
if (brw->gen >= 7)
gen7_restore_default_l3_config(brw);
- /* We may also need to snapshot and disable OA counters. */
- brw_perf_monitor_finish_batch(brw);
-
if (brw->is_haswell) {
/* From the Haswell PRM, Volume 2b, Command Reference: Instructions,
* 3DSTATE_CC_STATE_POINTERS > "Note":
}
if (brw->need_flush_throttle) {
- __DRIscreen *psp = brw->intelScreen->driScrnPriv;
- drmCommandNone(psp->fd, DRM_I915_GEM_THROTTLE);
+ __DRIscreen *dri_screen = brw->screen->driScrnPriv;
+ drmCommandNone(dri_screen->fd, DRM_I915_GEM_THROTTLE);
brw->need_flush_throttle = false;
}
}
/* TODO: Push this whole function into bufmgr.
*/
static int
-do_flush_locked(struct brw_context *brw)
+do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
{
struct intel_batchbuffer *batch = &brw->batch;
int ret = 0;
}
}
- if (!brw->intelScreen->no_hw) {
+ if (!brw->screen->no_hw) {
int flags;
if (brw->gen >= 6 && batch->ring == BLT_RING) {
brw_annotate_aub(brw);
if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
+ assert(in_fence_fd == -1);
+ assert(out_fence_fd == NULL);
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
NULL, 0, 0, flags);
} else {
- ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
- 4 * USED_BATCH(*batch), flags);
+ ret = drm_intel_gem_bo_fence_exec(batch->bo, brw->hw_ctx,
+ 4 * USED_BATCH(*batch),
+ in_fence_fd, out_fence_fd,
+ flags);
}
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
do_batch_dump(brw);
+ if (brw->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
+ brw_check_for_reset(brw);
+
if (ret != 0) {
fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
exit(1);
return ret;
}
+/**
+ * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
+ * of the fd.
+ *
+ * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
+ * of the returned fd.
+ */
int
-_intel_batchbuffer_flush(struct brw_context *brw,
- const char *file, int line)
+_intel_batchbuffer_flush_fence(struct brw_context *brw,
+ int in_fence_fd, int *out_fence_fd,
+ const char *file, int line)
{
int ret;
brw_finish_batch(brw);
/* Mark the end of the buffer. */
- intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
+ intel_batchbuffer_emit_dword(&brw->batch, MI_BATCH_BUFFER_END);
if (USED_BATCH(brw->batch) & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
- intel_batchbuffer_emit_dword(brw, MI_NOOP);
+ intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
}
intel_upload_finish(brw);
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!brw->no_batch_wrap);
- ret = do_flush_locked(brw);
+ ret = do_flush_locked(brw, in_fence_fd, out_fence_fd);
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
/* This is the only way buffers get added to the validate list.
*/
uint32_t
-intel_batchbuffer_reloc(struct brw_context *brw,
+intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
drm_intel_bo *buffer, uint32_t offset,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
int ret;
- ret = drm_intel_bo_emit_reloc(brw->batch.bo, offset,
+ ret = drm_intel_bo_emit_reloc(batch->bo, offset,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
}
uint64_t
-intel_batchbuffer_reloc64(struct brw_context *brw,
+intel_batchbuffer_reloc64(struct intel_batchbuffer *batch,
drm_intel_bo *buffer, uint32_t offset,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
- int ret = drm_intel_bo_emit_reloc(brw->batch.bo, offset,
+ int ret = drm_intel_bo_emit_reloc(batch->bo, offset,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
brw_store_data_imm32(struct brw_context *brw, drm_intel_bo *bo,
uint32_t offset, uint32_t imm)
{
- const int len = brw->gen >= 8 ? 4 : 3;
assert(brw->gen >= 6);
- BEGIN_BATCH(len);
- OUT_BATCH(MI_STORE_DATA_IMM | (len - 2));
- if (len > 3)
+ BEGIN_BATCH(4);
+ OUT_BATCH(MI_STORE_DATA_IMM | (4 - 2));
+ if (brw->gen >= 8)
OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
offset);
- else
+ else {
+ OUT_BATCH(0); /* MBZ */
OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
offset);
+ }
OUT_BATCH(imm);
ADVANCE_BATCH();
}
brw_store_data_imm64(struct brw_context *brw, drm_intel_bo *bo,
uint32_t offset, uint64_t imm)
{
- const int len = brw->gen >= 8 ? 5 : 4;
assert(brw->gen >= 6);
- BEGIN_BATCH(len);
- OUT_BATCH(MI_STORE_DATA_IMM | (len - 2));
- if (len > 4)
+ BEGIN_BATCH(5);
+ OUT_BATCH(MI_STORE_DATA_IMM | (5 - 2));
+ if (brw->gen >= 8)
OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
offset);
- else
+ else {
+ OUT_BATCH(0); /* MBZ */
OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
offset);
+ }
OUT_BATCH(imm & 0xffffffffu);
OUT_BATCH(imm >> 32);
ADVANCE_BATCH();