unsigned event, unsigned event_flags,
unsigned data_sel,
struct r600_resource *buf, uint64_t va,
- uint32_t old_fence, uint32_t new_fence)
+ uint32_t new_fence, unsigned query_type)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
unsigned op = EVENT_TYPE(event) |
EVENT_INDEX(5) |
event_flags;
+ unsigned sel = EOP_DATA_SEL(data_sel);
+
+ /* Wait for write confirmation before writing data, but don't send
+ * an interrupt. */
+ if (ctx->chip_class >= SI && data_sel != EOP_DATA_SEL_DISCARD)
+ sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
if (ctx->chip_class >= GFX9) {
+ /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
+ * counters) must immediately precede every timestamp event to
+ * prevent a GPU hang on GFX9.
+ *
+ * Occlusion queries don't need to do it here, because they
+ * always do ZPASS_DONE before the timestamp.
+ */
+ if (ctx->chip_class == GFX9 &&
+ query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
+ query_type != PIPE_QUERY_OCCLUSION_PREDICATE) {
+ struct r600_resource *scratch = ctx->eop_bug_scratch;
+
+ assert(16 * ctx->screen->info.num_render_backends <=
+ scratch->b.b.width0);
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
+ radeon_emit(cs, scratch->gpu_address);
+ radeon_emit(cs, scratch->gpu_address >> 32);
+
+ radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
+ RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
+ }
+
radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
radeon_emit(cs, op);
- radeon_emit(cs, EOP_DATA_SEL(data_sel));
+ radeon_emit(cs, sel);
radeon_emit(cs, va); /* address lo */
radeon_emit(cs, va >> 32); /* address hi */
radeon_emit(cs, new_fence); /* immediate data lo */
} else {
if (ctx->chip_class == CIK ||
ctx->chip_class == VI) {
+ struct r600_resource *scratch = ctx->eop_bug_scratch;
+ uint64_t va = scratch->gpu_address;
+
/* Two EOP events are required to make all engines go idle
* (and optional cache flushes executed) before the timestamp
* is written.
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
radeon_emit(cs, op);
radeon_emit(cs, va);
- radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
- radeon_emit(cs, old_fence); /* immediate data */
+ radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
+ radeon_emit(cs, 0); /* immediate data */
radeon_emit(cs, 0); /* unused */
+
+ radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
+ RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
}
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
radeon_emit(cs, op);
radeon_emit(cs, va);
- radeon_emit(cs, ((va >> 32) & 0xffff) | EOP_DATA_SEL(data_sel));
+ radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
radeon_emit(cs, new_fence); /* immediate data */
radeon_emit(cs, 0); /* unused */
}
}
void r600_draw_rectangle(struct blitter_context *blitter,
- int x1, int y1, int x2, int y2, float depth,
+ int x1, int y1, int x2, int y2,
+ float depth, unsigned num_instances,
enum blitter_attrib_type type,
- const union pipe_color_union *attrib)
+ const union blitter_attrib *attrib)
{
struct r600_common_context *rctx =
(struct r600_common_context*)util_blitter_get_pipe(blitter);
unsigned offset = 0;
float *vb;
- if (type == UTIL_BLITTER_ATTRIB_TEXCOORD) {
- util_blitter_draw_rectangle(blitter, x1, y1, x2, y2, depth, type, attrib);
- return;
- }
-
/* Some operations (like color resolve on r6xx) don't work
* with the conventional primitive types.
* One that works is PT_RECTLIST, which we use here. */
rctx->b.set_viewport_states(&rctx->b, 0, 1, &viewport);
/* Upload vertices. The hw rectangle has only 3 vertices,
- * I guess the 4th one is derived from the first 3.
+ * The 4th one is derived from the first 3.
* The vertex specification should match u_blitter's vertex element state. */
u_upload_alloc(rctx->b.stream_uploader, 0, sizeof(float) * 24,
rctx->screen->info.tcc_cache_line_size,
vb[18] = depth;
vb[19] = 1;
- if (attrib) {
- memcpy(vb+4, attrib->f, sizeof(float)*4);
- memcpy(vb+12, attrib->f, sizeof(float)*4);
- memcpy(vb+20, attrib->f, sizeof(float)*4);
+ switch (type) {
+ case UTIL_BLITTER_ATTRIB_COLOR:
+ memcpy(vb+4, attrib->color, sizeof(float)*4);
+ memcpy(vb+12, attrib->color, sizeof(float)*4);
+ memcpy(vb+20, attrib->color, sizeof(float)*4);
+ break;
+ case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
+ case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
+ vb[6] = vb[14] = vb[22] = attrib->texcoord.z;
+ vb[7] = vb[15] = vb[23] = attrib->texcoord.w;
+ /* fall through */
+ vb[4] = attrib->texcoord.x1;
+ vb[5] = attrib->texcoord.y1;
+ vb[12] = attrib->texcoord.x1;
+ vb[13] = attrib->texcoord.y2;
+ vb[20] = attrib->texcoord.x2;
+ vb[21] = attrib->texcoord.y1;
+ break;
+ default:; /* Nothing to do. */
}
/* draw */
- util_draw_vertex_buffer(&rctx->b, NULL, buf, blitter->vb_slot, offset,
- R600_PRIM_RECTANGLE_LIST, 3, 2);
+ struct pipe_vertex_buffer vbuffer = {};
+ vbuffer.buffer.resource = buf;
+ vbuffer.stride = 2 * 4 * sizeof(float); /* vertex size */
+ vbuffer.buffer_offset = offset;
+
+ rctx->b.set_vertex_buffers(&rctx->b, blitter->vb_slot, 1, &vbuffer);
+ util_draw_arrays_instanced(&rctx->b, R600_PRIM_RECTANGLE_LIST, 0, 3,
+ 0, num_instances);
pipe_resource_reference(&buf, NULL);
}
r600_resume_queries(ctx);
}
+static void r600_add_fence_dependency(struct r600_common_context *rctx,
+ struct pipe_fence_handle *fence)
+{
+ struct radeon_winsys *ws = rctx->ws;
+
+ if (rctx->dma.cs)
+ ws->cs_add_fence_dependency(rctx->dma.cs, fence);
+ ws->cs_add_fence_dependency(rctx->gfx.cs, fence);
+}
+
+static void r600_fence_server_sync(struct pipe_context *ctx,
+ struct pipe_fence_handle *fence)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct r600_multi_fence *rfence = (struct r600_multi_fence *)fence;
+
+ /* Only amdgpu needs to handle fence dependencies (for fence imports).
+ * radeon synchronizes all rings by default and will not implement
+ * fence imports.
+ */
+ if (rctx->screen->info.drm_major == 2)
+ return;
+
+ /* Only imported fences need to be handled by fence_server_sync,
+ * because the winsys handles synchronizations automatically for BOs
+ * within the process.
+ *
+ * Simply skip unflushed fences here, and the winsys will drop no-op
+ * dependencies (i.e. dependencies within the same ring).
+ */
+ if (rfence->gfx_unflushed.ctx)
+ return;
+
+ /* All unflushed commands will not start execution before
+ * this fence dependency is signalled.
+ *
+ * Should we flush the context to allow more GPU parallelism?
+ */
+ if (rfence->sdma)
+ r600_add_fence_dependency(rctx, rfence->sdma);
+ if (rfence->gfx)
+ r600_add_fence_dependency(rctx, rfence->gfx);
+}
+
static void r600_flush_from_st(struct pipe_context *ctx,
struct pipe_fence_handle **fence,
unsigned flags)
if (fence) {
struct r600_multi_fence *multi_fence =
CALLOC_STRUCT(r600_multi_fence);
- if (!multi_fence)
- return;
+ if (!multi_fence) {
+ ws->fence_reference(&sdma_fence, NULL);
+ ws->fence_reference(&gfx_fence, NULL);
+ goto finish;
+ }
multi_fence->reference.count = 1;
/* If both fences are NULL, fence_finish will always return true. */
screen->fence_reference(screen, fence, NULL);
*fence = (struct pipe_fence_handle*)multi_fence;
}
-
+finish:
if (!(flags & PIPE_FLUSH_DEFERRED)) {
if (rctx->dma.cs)
ws->cs_sync_flush(rctx->dma.cs);
void radeon_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs,
struct radeon_saved_cs *saved, bool get_buffer_list)
{
- void *buf;
+ uint32_t *buf;
unsigned i;
/* Save the IB chunks. */
rctx->b.memory_barrier = r600_memory_barrier;
rctx->b.flush = r600_flush_from_st;
rctx->b.set_debug_callback = r600_set_debug_callback;
+ rctx->b.fence_server_sync = r600_fence_server_sync;
rctx->dma_clear_buffer = r600_dma_clear_buffer_fallback;
/* evergreen_compute.c has a special codepath for global buffers.
r600_query_init(rctx);
cayman_init_msaa(&rctx->b);
+ if (rctx->chip_class == CIK ||
+ rctx->chip_class == VI ||
+ rctx->chip_class == GFX9) {
+ rctx->eop_bug_scratch = (struct r600_resource*)
+ pipe_buffer_create(&rscreen->b, 0, PIPE_USAGE_DEFAULT,
+ 16 * rscreen->info.num_render_backends);
+ if (!rctx->eop_bug_scratch)
+ return false;
+ }
+
rctx->allocator_zeroed_memory =
u_suballocator_create(&rctx->b, rscreen->info.gart_page_size,
0, PIPE_USAGE_DEFAULT, 0, true);
}
rctx->ws->fence_reference(&rctx->last_gfx_fence, NULL);
rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
+ r600_resource_reference(&rctx->eop_bug_scratch, NULL);
}
/*
{ "norbplus", DBG_NO_RB_PLUS, "Disable RB+." },
{ "sisched", DBG_SI_SCHED, "Enable LLVM SI Machine Instruction Scheduler." },
{ "mono", DBG_MONOLITHIC_SHADERS, "Use old-style monolithic shaders compiled on demand" },
- { "noce", DBG_NO_CE, "Disable the constant engine"},
{ "unsafemath", DBG_UNSAFE_MATH, "Enable unsafe math shader optimizations" },
{ "nodccfb", DBG_NO_DCC_FB, "Disable separate DCC on the main framebuffer" },
+ { "nodpbb", DBG_NO_DPBB, "Disable DPBB." },
+ { "nodfsm", DBG_NO_DFSM, "Disable DFSM." },
DEBUG_NAMED_VALUE_END /* must be last */
};
static void r600_disk_cache_create(struct r600_common_screen *rscreen)
{
/* Don't use the cache if shader dumping is enabled. */
- if (rscreen->debug_flags &
- (DBG_FS | DBG_VS | DBG_TCS | DBG_TES | DBG_GS | DBG_PS | DBG_CS))
+ if (rscreen->debug_flags & DBG_ALL_SHADERS)
return;
uint32_t mesa_timestamp;
}
#endif
if (res != -1) {
+ /* These flags affect shader compilation. */
+ uint64_t shader_debug_flags =
+ rscreen->debug_flags &
+ (DBG_FS_CORRECT_DERIVS_AFTER_KILL |
+ DBG_SI_SCHED |
+ DBG_UNSAFE_MATH);
+
rscreen->disk_shader_cache =
disk_cache_create(r600_get_family_name(rscreen),
timestamp_str,
- rscreen->debug_flags);
+ shader_debug_flags);
free(timestamp_str);
}
}
}
bool r600_common_screen_init(struct r600_common_screen *rscreen,
- struct radeon_winsys *ws, unsigned flags)
+ struct radeon_winsys *ws)
{
char family_name[32] = {}, llvm_string[32] = {}, kernel_version[128] = {};
struct utsname uname_data;
rscreen->family = rscreen->info.family;
rscreen->chip_class = rscreen->info.chip_class;
- rscreen->debug_flags = debug_get_flags_option("R600_DEBUG", common_debug_options, 0);
+ rscreen->debug_flags |= debug_get_flags_option("R600_DEBUG", common_debug_options, 0);
rscreen->has_rbplus = false;
rscreen->rbplus_allowed = false;
- /* Set the flag in debug_flags, so that the shader cache takes it
- * into account. */
- if (flags & PIPE_SCREEN_ENABLE_CORRECT_TGSI_DERIVATIVES_AFTER_KILL)
- rscreen->debug_flags |= DBG_FS_CORRECT_DERIVS_AFTER_KILL;
-
r600_disk_cache_create(rscreen);
slab_create_parent(&rscreen->pool_transfers, sizeof(struct r600_transfer), 64);
(void) mtx_init(&rscreen->gpu_load_mutex, mtx_plain);
if (rscreen->debug_flags & DBG_INFO) {
+ printf("pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
+ rscreen->info.pci_domain, rscreen->info.pci_bus,
+ rscreen->info.pci_dev, rscreen->info.pci_func);
printf("pci_id = 0x%x\n", rscreen->info.pci_id);
printf("family = %i (%s)\n", rscreen->info.family,
r600_get_family_name(rscreen));
printf("chip_class = %i\n", rscreen->info.chip_class);
+ printf("pte_fragment_size = %u\n", rscreen->info.pte_fragment_size);
+ printf("gart_page_size = %u\n", rscreen->info.gart_page_size);
printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.gart_size, 1024*1024));
printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_size, 1024*1024));
printf("vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(rscreen->info.vram_vis_size, 1024*1024));
printf("max_alloc_size = %i MB\n",
(int)DIV_ROUND_UP(rscreen->info.max_alloc_size, 1024*1024));
+ printf("min_alloc_size = %u\n", rscreen->info.min_alloc_size);
+ printf("has_dedicated_vram = %u\n", rscreen->info.has_dedicated_vram);
printf("has_virtual_memory = %i\n", rscreen->info.has_virtual_memory);
printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
+ printf("has_hw_decode = %u\n", rscreen->info.has_hw_decode);
printf("num_sdma_rings = %i\n", rscreen->info.num_sdma_rings);
- printf("has_hw_decode = %i\n", rscreen->info.has_hw_decode);
+ printf("num_compute_rings = %u\n", rscreen->info.num_compute_rings);
+ printf("uvd_fw_version = %u\n", rscreen->info.uvd_fw_version);
+ printf("vce_fw_version = %u\n", rscreen->info.vce_fw_version);
printf("me_fw_version = %i\n", rscreen->info.me_fw_version);
+ printf("me_fw_feature = %i\n", rscreen->info.me_fw_feature);
printf("pfp_fw_version = %i\n", rscreen->info.pfp_fw_version);
+ printf("pfp_fw_feature = %i\n", rscreen->info.pfp_fw_feature);
printf("ce_fw_version = %i\n", rscreen->info.ce_fw_version);
- printf("vce_fw_version = %i\n", rscreen->info.vce_fw_version);
+ printf("ce_fw_feature = %i\n", rscreen->info.ce_fw_feature);
printf("vce_harvest_config = %i\n", rscreen->info.vce_harvest_config);
printf("clock_crystal_freq = %i\n", rscreen->info.clock_crystal_freq);
+ printf("tcc_cache_line_size = %u\n", rscreen->info.tcc_cache_line_size);
printf("drm = %i.%i.%i\n", rscreen->info.drm_major,
rscreen->info.drm_minor, rscreen->info.drm_patchlevel);
printf("has_userptr = %i\n", rscreen->info.has_userptr);
+ printf("has_syncobj = %u\n", rscreen->info.has_syncobj);
printf("r600_max_quad_pipes = %i\n", rscreen->info.r600_max_quad_pipes);
printf("max_shader_clock = %i\n", rscreen->info.max_shader_clock);
printf("num_tile_pipes = %i\n", rscreen->info.num_tile_pipes);
printf("pipe_interleave_bytes = %i\n", rscreen->info.pipe_interleave_bytes);
printf("enabled_rb_mask = 0x%x\n", rscreen->info.enabled_rb_mask);
+ printf("max_alignment = %u\n", (unsigned)rscreen->info.max_alignment);
}
return true;
}
bool r600_can_dump_shader(struct r600_common_screen *rscreen,
unsigned processor)
{
- switch (processor) {
- case PIPE_SHADER_VERTEX:
- return (rscreen->debug_flags & DBG_VS) != 0;
- case PIPE_SHADER_TESS_CTRL:
- return (rscreen->debug_flags & DBG_TCS) != 0;
- case PIPE_SHADER_TESS_EVAL:
- return (rscreen->debug_flags & DBG_TES) != 0;
- case PIPE_SHADER_GEOMETRY:
- return (rscreen->debug_flags & DBG_GS) != 0;
- case PIPE_SHADER_FRAGMENT:
- return (rscreen->debug_flags & DBG_PS) != 0;
- case PIPE_SHADER_COMPUTE:
- return (rscreen->debug_flags & DBG_CS) != 0;
- default:
- return false;
- }
+ return rscreen->debug_flags & (1 << processor);
}
bool r600_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor)