-/**************************************************************************
- *
+/*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
+ * distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
+ */
#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
#include "intel_fbo.h"
#include "brw_context.h"
#include "brw_defines.h"
+#include "brw_state.h"
#include <xf86drm.h>
#include <i915_drm.h>
if (!brw->has_llc) {
brw->batch.cpu_map = malloc(BATCH_SZ);
brw->batch.map = brw->batch.cpu_map;
+ brw->batch.map_next = brw->batch.cpu_map;
}
}
drm_intel_bo_map(brw->batch.bo, true);
brw->batch.map = brw->batch.bo->virtual;
}
+ brw->batch.map_next = brw->batch.map;
brw->batch.reserved_space = BATCH_RESERVED;
brw->batch.state_batch_offset = brw->batch.bo->size;
- brw->batch.used = 0;
brw->batch.needs_sol_reset = false;
/* We don't know what ring the new batch will be sent to until we see the
void
intel_batchbuffer_save_state(struct brw_context *brw)
{
- brw->batch.saved.used = brw->batch.used;
+ brw->batch.saved.map_next = brw->batch.map_next;
brw->batch.saved.reloc_count =
drm_intel_gem_bo_get_reloc_count(brw->batch.bo);
}
{
drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
- brw->batch.used = brw->batch.saved.used;
- if (brw->batch.used == 0)
+ brw->batch.map_next = brw->batch.saved.map_next;
+ if (USED_BATCH(brw->batch) == 0)
brw->batch.ring = UNKNOWN_RING;
}
drm_intel_bo_unreference(brw->batch.bo);
}
+void
+intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
+ enum brw_gpu_ring ring)
+{
+ /* If we're switching rings, implicitly flush the batch. */
+ if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
+ brw->gen >= 6) {
+ intel_batchbuffer_flush(brw);
+ }
+
+#ifdef DEBUG
+ assert(sz < BATCH_SZ - BATCH_RESERVED);
+#endif
+ if (intel_batchbuffer_space(brw) < sz)
+ intel_batchbuffer_flush(brw);
+
+ enum brw_gpu_ring prev_ring = brw->batch.ring;
+ /* The intel_batchbuffer_flush() calls above might have changed
+ * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
+ */
+ brw->batch.ring = ring;
+
+ if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
+ intel_batchbuffer_emit_render_ring_prelude(brw);
+}
+
static void
do_batch_dump(struct brw_context *brw)
{
drm_intel_decode_set_batch_pointer(decode,
batch->bo->virtual,
batch->bo->offset64,
- batch->used);
+ USED_BATCH(*batch));
} else {
fprintf(stderr,
"WARNING: failed to map batchbuffer (%s), "
drm_intel_decode_set_batch_pointer(decode,
batch->map,
batch->bo->offset64,
- batch->used);
+ USED_BATCH(*batch));
}
drm_intel_decode_set_output_file(decode, stderr);
brw_emit_query_end(brw);
if (brw->batch.ring == RENDER_RING) {
+ /* Work around L3 state leaks into contexts set MI_RESTORE_INHIBIT which
+ * assume that the L3 cache is configured according to the hardware
+ * defaults.
+ */
+ if (brw->gen >= 7)
+ gen7_restore_default_l3_config(brw);
+
/* We may also need to snapshot and disable OA counters. */
brw_perf_monitor_finish_batch(brw);
}
}
+/* Drop when RS headers get pulled to libdrm */
+#ifndef I915_EXEC_RESOURCE_STREAMER
+#define I915_EXEC_RESOURCE_STREAMER (1<<15)
+#endif
+
/* TODO: Push this whole function into bufmgr.
*/
static int
if (brw->has_llc) {
drm_intel_bo_unmap(batch->bo);
} else {
- ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
ret = drm_intel_bo_subdata(batch->bo,
batch->state_batch_offset,
if (brw->gen >= 6 && batch->ring == BLT_RING) {
flags = I915_EXEC_BLT;
} else {
- flags = I915_EXEC_RENDER;
+ flags = I915_EXEC_RENDER |
+ (brw->use_resource_streamer ? I915_EXEC_RESOURCE_STREAMER : 0);
}
if (batch->needs_sol_reset)
flags |= I915_EXEC_GEN7_SOL_RESET;
brw_annotate_aub(brw);
if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
- ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
- flags);
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
+ NULL, 0, 0, flags);
} else {
ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
- 4 * batch->used, flags);
+ 4 * USED_BATCH(*batch), flags);
}
}
{
int ret;
- if (brw->batch.used == 0)
+ if (USED_BATCH(brw->batch) == 0)
return 0;
if (brw->throttle_batch[0] == NULL) {
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
- int bytes_for_commands = 4 * brw->batch.used;
+ int bytes_for_commands = 4 * USED_BATCH(brw->batch);
int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
int total_bytes = bytes_for_commands + bytes_for_state;
fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
- if (brw->batch.used & 1) {
+ if (USED_BATCH(brw->batch) & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
intel_batchbuffer_emit_dword(brw, MI_NOOP);
}
drm_intel_bo_wait_rendering(brw->batch.bo);
}
+ if (brw->use_resource_streamer)
+ gen7_reset_hw_bt_pool_offsets(brw);
+
/* Start a new batch buffer. */
brw_new_batch(brw);
*/
uint32_t
intel_batchbuffer_reloc(struct brw_context *brw,
- drm_intel_bo *buffer,
+ drm_intel_bo *buffer, uint32_t offset,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
int ret;
- ret = drm_intel_bo_emit_reloc(brw->batch.bo, 4*brw->batch.used,
+ ret = drm_intel_bo_emit_reloc(brw->batch.bo, offset,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
uint64_t
intel_batchbuffer_reloc64(struct brw_context *brw,
- drm_intel_bo *buffer,
+ drm_intel_bo *buffer, uint32_t offset,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta)
{
- int ret = drm_intel_bo_emit_reloc(brw->batch.bo, 4*brw->batch.used,
+ int ret = drm_intel_bo_emit_reloc(brw->batch.bo, offset,
buffer, delta,
read_domains, write_domain);
assert(ret == 0);
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(brw, bytes, ring);
- memcpy(brw->batch.map + brw->batch.used, data, bytes);
- brw->batch.used += bytes >> 2;
+ memcpy(brw->batch.map_next, data, bytes);
+ brw->batch.map_next += bytes >> 2;
}
static void