rscreen->b.has_cp_dma = rscreen->b.info.drm_minor >= 27 &&
!(rscreen->b.debug_flags & DBG_NO_CP_DMA);
+ rscreen->b.barrier_flags.cp_to_L2 =
+ R600_CONTEXT_INV_VERTEX_CACHE |
+ R600_CONTEXT_INV_TEX_CACHE |
+ R600_CONTEXT_INV_CONST_CACHE;
+ rscreen->b.barrier_flags.compute_to_L2 = R600_CONTEXT_PS_PARTIAL_FLUSH;
+
rscreen->global_pool = compute_memory_pool_new(rscreen);
/* Create the auxiliary context. This must be done last. */
*/
unsigned dirty_tex_descriptor_counter;
+ struct {
+ /* Context flags to set so that all writes from earlier jobs
+ * in the CP are seen by L2 clients.
+ */
+ unsigned cp_to_L2;
+
+ /* Context flags to set so that all writes from earlier
+ * compute jobs are seen by L2 clients.
+ */
+ unsigned compute_to_L2;
+ } barrier_flags;
+
void (*query_opaque_metadata)(struct r600_common_screen *rscreen,
struct r600_texture *rtex,
struct radeon_bo_metadata *md);
HAVE_LLVM < 0x0308 ||
(sscreen->b.debug_flags & DBG_MONOLITHIC_SHADERS) != 0;
+ sscreen->b.barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SMEM_L1 |
+ SI_CONTEXT_INV_VMEM_L1 |
+ SI_CONTEXT_INV_GLOBAL_L2;
+ sscreen->b.barrier_flags.compute_to_L2 = SI_CONTEXT_CS_PARTIAL_FLUSH;
+
if (debug_get_bool_option("RADEON_DUMP_SHADERS", false))
sscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS;