struct pipe_reference reference;
struct pipe_fence_handle *gfx;
struct pipe_fence_handle *sdma;
+
+ /* If the context wasn't flushed at fence creation, this is non-NULL. */
+ struct {
+ struct r600_common_context *ctx;
+ unsigned ib_index;
+ } gfx_unflushed;
};
/*
FREE(b->global_symbol_offsets);
FREE(b->relocs);
FREE(b->disasm_string);
+ FREE(b->llvm_ir_string);
}
/*
* pipe_context
*/
+/**
+ * Write an EOP event.
+ *
+ * \param event EVENT_TYPE_*
+ * \param event_flags Optional cache flush flags (TC)
+ * \param data_sel 1 = fence, 3 = timestamp
+ * \param buf Buffer
+ * \param va GPU address
+ * \param old_value Previous fence value (for a bug workaround)
+ * \param new_value Fence value to write for this event.
+ */
+void r600_gfx_write_event_eop(struct r600_common_context *ctx,
+ unsigned event, unsigned event_flags,
+ unsigned data_sel,
+ struct r600_resource *buf, uint64_t va,
+ uint32_t old_fence, uint32_t new_fence)
+{
+ struct radeon_winsys_cs *cs = ctx->gfx.cs;
+ unsigned op = EVENT_TYPE(event) |
+ EVENT_INDEX(5) |
+ event_flags;
+
+ if (ctx->chip_class == CIK) {
+ /* Two EOP events are required to make all engines go idle
+ * (and optional cache flushes executed) before the timestamp
+ * is written.
+ */
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
+ radeon_emit(cs, op);
+ radeon_emit(cs, va);
+ radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+ radeon_emit(cs, old_fence); /* immediate data */
+ radeon_emit(cs, 0); /* unused */
+ }
+
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
+ radeon_emit(cs, op);
+ radeon_emit(cs, va);
+ radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+ radeon_emit(cs, new_fence); /* immediate data */
+ radeon_emit(cs, 0); /* unused */
+
+ if (buf)
+ r600_emit_reloc(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE,
+ RADEON_PRIO_QUERY);
+}
+
+unsigned r600_gfx_write_fence_dwords(struct r600_common_screen *screen)
+{
+ unsigned dwords = 6;
+
+ if (screen->chip_class == CIK)
+ dwords *= 2;
+
+ if (!screen->info.has_virtual_memory)
+ dwords += 2;
+
+ return dwords;
+}
+
+void r600_gfx_wait_fence(struct r600_common_context *ctx,
+ uint64_t va, uint32_t ref, uint32_t mask)
+{
+ struct radeon_winsys_cs *cs = ctx->gfx.cs;
+
+ radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+ radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, ref); /* reference value */
+ radeon_emit(cs, mask); /* mask */
+ radeon_emit(cs, 4); /* poll interval */
+}
+
void r600_draw_rectangle(struct blitter_context *blitter,
int x1, int y1, int x2, int y2, float depth,
enum blitter_attrib_type type,
uint64_t vram = 0, gtt = 0;
if (dst) {
- if (dst->domains & RADEON_DOMAIN_VRAM)
- vram += dst->buf->size;
- else if (dst->domains & RADEON_DOMAIN_GTT)
- gtt += dst->buf->size;
+ vram += dst->vram_usage;
+ gtt += dst->gart_usage;
}
if (src) {
- if (src->domains & RADEON_DOMAIN_VRAM)
- vram += src->buf->size;
- else if (src->domains & RADEON_DOMAIN_GTT)
- gtt += src->buf->size;
+ vram += src->vram_usage;
+ gtt += src->gart_usage;
}
/* Flush the GFX IB if DMA depends on it. */
* is too large.
*/
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
- !ctx->ws->cs_memory_below_limit(ctx->dma.cs, vram, gtt)) {
+ !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
}
* It improves texture upload performance by keeping the DMA
* engine busy while uploads are being submitted.
*/
- if (rctx->ws->cs_query_memory_usage(rctx->dma.cs) > 64 * 1024 * 1024) {
+ if (cs->used_vram + cs->used_gart > 64 * 1024 * 1024) {
rctx->dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
return;
}
{
struct pipe_screen *screen = ctx->screen;
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct radeon_winsys *ws = rctx->ws;
unsigned rflags = 0;
struct pipe_fence_handle *gfx_fence = NULL;
struct pipe_fence_handle *sdma_fence = NULL;
+ bool deferred_fence = false;
if (flags & PIPE_FLUSH_END_OF_FRAME)
rflags |= RADEON_FLUSH_END_OF_FRAME;
+ if (flags & PIPE_FLUSH_DEFERRED)
+ rflags |= RADEON_FLUSH_ASYNC;
if (rctx->dma.cs) {
rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
}
- rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
+
+ if (!radeon_emitted(rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
+ if (fence)
+ ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
+ if (!(rflags & RADEON_FLUSH_ASYNC))
+ ws->cs_sync_flush(rctx->gfx.cs);
+ } else {
+ /* Instead of flushing, create a deferred fence. Constraints:
+ * - The state tracker must allow a deferred flush.
+ * - The state tracker must request a fence.
+ * Thread safety in fence_finish must be ensured by the state tracker.
+ */
+ if (flags & PIPE_FLUSH_DEFERRED && fence) {
+ gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx.cs);
+ deferred_fence = true;
+ } else {
+ rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
+ }
+ }
/* Both engines can signal out of order, so we need to keep both fences. */
- if (gfx_fence || sdma_fence) {
+ if (fence) {
struct r600_multi_fence *multi_fence =
CALLOC_STRUCT(r600_multi_fence);
if (!multi_fence)
return;
multi_fence->reference.count = 1;
+ /* If both fences are NULL, fence_finish will always return true. */
multi_fence->gfx = gfx_fence;
multi_fence->sdma = sdma_fence;
+ if (deferred_fence) {
+ multi_fence->gfx_unflushed.ctx = rctx;
+ multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
+ }
+
screen->fence_reference(screen, fence, NULL);
*fence = (struct pipe_fence_handle*)multi_fence;
}
void radeon_clear_saved_cs(struct radeon_saved_cs *saved)
{
- unsigned i;
-
FREE(saved->ib);
-
- for (i = 0; i < saved->bo_count; i++)
- pb_reference(&saved->bo_list[i].buf, NULL);
FREE(saved->bo_list);
memset(saved, 0, sizeof(*saved));
memset(&rctx->debug, 0, sizeof(rctx->debug));
}
+static void r600_set_device_reset_callback(struct pipe_context *ctx,
+ const struct pipe_device_reset_callback *cb)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+
+ if (cb)
+ rctx->device_reset_callback = *cb;
+ else
+ memset(&rctx->device_reset_callback, 0,
+ sizeof(rctx->device_reset_callback));
+}
+
+bool r600_check_device_reset(struct r600_common_context *rctx)
+{
+ enum pipe_reset_status status;
+
+ if (!rctx->device_reset_callback.reset)
+ return false;
+
+ if (!rctx->b.get_device_reset_status)
+ return false;
+
+ status = rctx->b.get_device_reset_status(&rctx->b);
+ if (status == PIPE_NO_RESET)
+ return false;
+
+ rctx->device_reset_callback.reset(rctx->device_reset_callback.data, status);
+ return true;
+}
+
bool r600_common_context_init(struct r600_common_context *rctx,
- struct r600_common_screen *rscreen)
+ struct r600_common_screen *rscreen,
+ unsigned context_flags)
{
- util_slab_create(&rctx->pool_transfers,
- sizeof(struct r600_transfer), 64,
- UTIL_SLAB_SINGLETHREADED);
+ slab_create_child(&rctx->pool_transfers, &rscreen->pool_transfers);
rctx->screen = rscreen;
rctx->ws = rscreen->ws;
rctx->b.transfer_map = u_transfer_map_vtbl;
rctx->b.transfer_flush_region = u_transfer_flush_region_vtbl;
rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
- rctx->b.transfer_inline_write = u_default_transfer_inline_write;
- rctx->b.memory_barrier = r600_memory_barrier;
+ rctx->b.texture_subdata = u_default_texture_subdata;
+ rctx->b.memory_barrier = r600_memory_barrier;
rctx->b.flush = r600_flush_from_st;
rctx->b.set_debug_callback = r600_set_debug_callback;
+ /* evergreen_compute.c has a special codepath for global buffers.
+ * Everything else can use the direct path.
+ */
+ if ((rscreen->chip_class == EVERGREEN || rscreen->chip_class == CAYMAN) &&
+ (context_flags & PIPE_CONTEXT_COMPUTE_ONLY))
+ rctx->b.buffer_subdata = u_default_buffer_subdata;
+ else
+ rctx->b.buffer_subdata = r600_buffer_subdata;
+
if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 43) {
rctx->b.get_device_reset_status = r600_get_reset_status;
rctx->gpu_reset_counter =
RADEON_GPU_RESET_COUNTER);
}
- LIST_INITHEAD(&rctx->texture_buffers);
+ rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
r600_init_context_texture_functions(rctx);
r600_init_viewport_functions(rctx);
r600_texture_reference(&rctx->dcc_stats[i].tex, NULL);
}
+ if (rctx->query_result_shader)
+ rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
+
if (rctx->gfx.cs)
rctx->ws->cs_destroy(rctx->gfx.cs);
if (rctx->dma.cs)
u_upload_destroy(rctx->uploader);
}
- util_slab_destroy(&rctx->pool_transfers);
+ slab_destroy_child(&rctx->pool_transfers);
if (rctx->allocator_zeroed_memory) {
u_suballocator_destroy(rctx->allocator_zeroed_memory);
}
+ rctx->ws->fence_reference(&rctx->last_gfx_fence, NULL);
rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
}
-void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
-{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct r600_resource *rr = (struct r600_resource *)r;
-
- if (!r) {
- return;
- }
-
- /*
- * The idea is to compute a gross estimate of memory requirement of
- * each draw call. After each draw call, memory will be precisely
- * accounted. So the uncertainty is only on the current draw call.
- * In practice this gave very good estimate (+/- 10% of the target
- * memory limit).
- */
- if (rr->domains & RADEON_DOMAIN_VRAM)
- rctx->vram += rr->buf->size;
- else if (rr->domains & RADEON_DOMAIN_GTT)
- rctx->gtt += rr->buf->size;
-}
-
/*
* pipe_screen
*/
{ "notgsi", DBG_NO_TGSI, "Don't print the TGSI"},
{ "noasm", DBG_NO_ASM, "Don't print disassembled shaders"},
{ "preoptir", DBG_PREOPT_IR, "Print the LLVM IR before initial optimizations" },
+ { "checkir", DBG_CHECK_IR, "Enable additional sanity checks on shader IR" },
{ "testdma", DBG_TEST_DMA, "Invoke SDMA tests and exit." },
{ "mono", DBG_MONOLITHIC_SHADERS, "Use old-style monolithic shaders compiled on demand" },
{ "noce", DBG_NO_CE, "Disable the constant engine"},
{ "unsafemath", DBG_UNSAFE_MATH, "Enable unsafe math shader optimizations" },
+ { "nodccfb", DBG_NO_DCC_FB, "Disable separate DCC on the main framebuffer" },
DEBUG_NAMED_VALUE_END /* must be last */
};
if (rscreen->family <= CHIP_ARUBA) {
triple = "r600--";
} else {
- triple = "amdgcn--";
+ if (HAVE_LLVM < 0x0400) {
+ triple = "amdgcn--";
+ } else {
+ triple = "amdgcn-mesa-mesa3d";
+ }
}
switch(rscreen->family) {
/* Clang < 3.6 is missing Hainan in its list of
*max_threads_per_block = 256;
}
return sizeof(uint64_t);
+ case PIPE_COMPUTE_CAP_ADDRESS_BITS:
+ if (ret) {
+ uint32_t *address_bits = ret;
+ address_bits[0] = 32;
+ if (rscreen->chip_class >= SI)
+ address_bits[0] = 64;
+ }
+ return 1 * sizeof(uint32_t);
case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
if (ret) {
* 4 * MAX_MEM_ALLOC_SIZE.
*/
*max_global_size = MIN2(4 * max_mem_alloc_size,
- rscreen->info.gart_size +
- rscreen->info.vram_size);
+ MAX2(rscreen->info.gart_size,
+ rscreen->info.vram_size));
}
return sizeof(uint64_t);
if (ret) {
uint64_t *max_mem_alloc_size = ret;
- /* XXX: The limit in older kernels is 256 MB. We
- * should add a query here for newer kernels.
- */
- *max_mem_alloc_size = 256 * 1024 * 1024;
+ *max_mem_alloc_size = rscreen->info.max_alloc_size;
}
return sizeof(uint64_t);
*subgroup_size = r600_wavefront_size(rscreen->family);
}
return sizeof(uint32_t);
+ case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
+ if (ret) {
+ uint64_t *max_variable_threads_per_block = ret;
+ if (rscreen->chip_class >= SI && HAVE_LLVM >= 0x309 &&
+ ir_type == PIPE_SHADER_IR_TGSI)
+ *max_variable_threads_per_block = SI_MAX_VARIABLE_THREADS_PER_BLOCK;
+ else
+ *max_variable_threads_per_block = 0;
+ }
+ return sizeof(uint64_t);
}
fprintf(stderr, "unknown PIPE_COMPUTE_CAP %d\n", param);
}
static boolean r600_fence_finish(struct pipe_screen *screen,
+ struct pipe_context *ctx,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
struct r600_multi_fence *rfence = (struct r600_multi_fence *)fence;
+ struct r600_common_context *rctx =
+ ctx ? (struct r600_common_context*)ctx : NULL;
int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
if (rfence->sdma) {
if (!rfence->gfx)
return true;
+ /* Flush the gfx IB if it hasn't been flushed yet. */
+ if (rctx &&
+ rfence->gfx_unflushed.ctx == rctx &&
+ rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
+ rctx->gfx.flush(rctx, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
+ rfence->gfx_unflushed.ctx = NULL;
+
+ if (!timeout)
+ return false;
+
+ /* Recompute the timeout after all that. */
+ if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+ int64_t time = os_time_get_nano();
+ timeout = abs_timeout > time ? abs_timeout - time : 0;
+ }
+ }
+
return rws->fence_wait(rws, rfence->gfx, timeout);
}
info->device_memory_evicted =
ws->query_value(ws, RADEON_NUM_BYTES_MOVED) / 1024;
- /* Just return the number of evicted 64KB pages. */
- info->nr_device_memory_evictions = info->device_memory_evicted / 64;
+
+ if (rscreen->info.drm_major == 3 && rscreen->info.drm_minor >= 4)
+ info->nr_device_memory_evictions =
+ ws->query_value(ws, RADEON_NUM_EVICTIONS);
+ else
+ /* Just return the number of evicted 64KB pages. */
+ info->nr_device_memory_evictions = info->device_memory_evicted / 64;
}
struct pipe_resource *r600_resource_create_common(struct pipe_screen *screen,
const struct pipe_resource *templ)
{
- struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
-
if (templ->target == PIPE_BUFFER) {
- return r600_buffer_create(screen, templ,
- rscreen->info.gart_page_size);
+ return r600_buffer_create(screen, templ, 256);
} else {
return r600_texture_create(screen, templ);
}
rscreen->chip_class = rscreen->info.chip_class;
rscreen->debug_flags = debug_get_flags_option("R600_DEBUG", common_debug_options, 0);
+ slab_create_parent(&rscreen->pool_transfers, sizeof(struct r600_transfer), 64);
+
rscreen->force_aniso = MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
if (rscreen->force_aniso >= 0) {
printf("radeon: Forcing anisotropy filter to %ix\n",
printf("chip_class = %i\n", rscreen->info.chip_class);
printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.gart_size, 1024*1024));
printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_size, 1024*1024));
+ printf("max_alloc_size = %i MB\n",
+ (int)DIV_ROUND_UP(rscreen->info.max_alloc_size, 1024*1024));
printf("has_virtual_memory = %i\n", rscreen->info.has_virtual_memory);
printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
printf("has_sdma = %i\n", rscreen->info.has_sdma);
printf("has_uvd = %i\n", rscreen->info.has_uvd);
+ printf("me_fw_version = %i\n", rscreen->info.me_fw_version);
+ printf("pfp_fw_version = %i\n", rscreen->info.pfp_fw_version);
+ printf("ce_fw_version = %i\n", rscreen->info.ce_fw_version);
printf("vce_fw_version = %i\n", rscreen->info.vce_fw_version);
printf("vce_harvest_config = %i\n", rscreen->info.vce_harvest_config);
printf("clock_crystal_freq = %i\n", rscreen->info.clock_crystal_freq);
pipe_mutex_destroy(rscreen->aux_context_lock);
rscreen->aux_context->destroy(rscreen->aux_context);
+ slab_destroy_parent(&rscreen->pool_transfers);
+
rscreen->ws->destroy(rscreen->ws);
FREE(rscreen);
}
}
}
+bool r600_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor)
+{
+ return (rscreen->debug_flags & DBG_CHECK_IR) ||
+ r600_can_dump_shader(rscreen, processor);
+}
+
void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned value,
enum r600_coherency coher)