ctx->b.streamout.suspended = true;
}
- /* flush is needed to avoid lockups on some chips with user fences
- * this will also flush the framebuffer cache
- */
+ /* flush the framebuffer cache */
ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_DB |
ctx->initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
}
-void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
-{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
- uint64_t va;
-
- r600_need_cs_space(ctx, 10, FALSE);
-
- va = r600_resource_va(&ctx->screen->b.b, (void*)fence_bo);
- va = va + (offset << 2);
-
- /* Use of WAIT_UNTIL is deprecated on Cayman+ */
- if (ctx->b.family >= CHIP_CAYMAN) {
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
- } else {
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
- }
-
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
- cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* ADDRESS_LO */
- /* DATA_SEL | INT_EN | ADDRESS_HI */
- cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF);
- cs->buf[cs->cdw++] = value; /* DATA_LO */
- cs->buf[cs->cdw++] = 0; /* DATA_HI */
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, fence_bo, RADEON_USAGE_WRITE);
-}
-
/* The max number of bytes to copy per packet. */
#define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
/*
* pipe_context
*/
-static struct r600_fence *r600_create_fence(struct r600_context *rctx)
-{
- struct r600_screen *rscreen = rctx->screen;
- struct r600_fence *fence = NULL;
-
- pipe_mutex_lock(rscreen->fences.mutex);
-
- if (!rscreen->fences.bo) {
- /* Create the shared buffer object */
- rscreen->fences.bo = (struct r600_resource*)
- pipe_buffer_create(&rscreen->b.b, PIPE_BIND_CUSTOM,
- PIPE_USAGE_STAGING, 4096);
- if (!rscreen->fences.bo) {
- R600_ERR("r600: failed to create bo for fence objects\n");
- goto out;
- }
- rscreen->fences.data = r600_buffer_map_sync_with_rings(&rctx->b, rscreen->fences.bo, PIPE_TRANSFER_READ_WRITE);
- }
-
- if (!LIST_IS_EMPTY(&rscreen->fences.pool)) {
- struct r600_fence *entry;
-
- /* Try to find a freed fence that has been signalled */
- LIST_FOR_EACH_ENTRY(entry, &rscreen->fences.pool, head) {
- if (rscreen->fences.data[entry->index] != 0) {
- LIST_DELINIT(&entry->head);
- fence = entry;
- break;
- }
- }
- }
-
- if (!fence) {
- /* Allocate a new fence */
- struct r600_fence_block *block;
- unsigned index;
-
- if ((rscreen->fences.next_index + 1) >= 1024) {
- R600_ERR("r600: too many concurrent fences\n");
- goto out;
- }
-
- index = rscreen->fences.next_index++;
-
- if (!(index % FENCE_BLOCK_SIZE)) {
- /* Allocate a new block */
- block = CALLOC_STRUCT(r600_fence_block);
- if (block == NULL)
- goto out;
-
- LIST_ADD(&block->head, &rscreen->fences.blocks);
- } else {
- block = LIST_ENTRY(struct r600_fence_block, rscreen->fences.blocks.next, head);
- }
-
- fence = &block->fences[index % FENCE_BLOCK_SIZE];
- fence->index = index;
- }
-
- pipe_reference_init(&fence->reference, 1);
-
- rscreen->fences.data[fence->index] = 0;
- r600_context_emit_fence(rctx, rscreen->fences.bo, fence->index, 1);
-
- /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */
- fence->sleep_bo = (struct r600_resource*)
- pipe_buffer_create(&rctx->screen->b.b, PIPE_BIND_CUSTOM,
- PIPE_USAGE_STAGING, 1);
- /* Add the fence as a dummy relocation. */
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE);
-
-out:
- pipe_mutex_unlock(rscreen->fences.mutex);
- return fence;
-}
static void r600_flush(struct pipe_context *ctx, unsigned flags)
{
unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct r600_fence **rfence = (struct r600_fence**)fence;
unsigned fflags;
fflags = flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0;
- if (rfence) {
- *rfence = r600_create_fence(rctx);
+ if (fence) {
+ *fence = rctx->b.ws->cs_create_fence(rctx->b.rings.gfx.cs);
}
/* flush gfx & dma ring, order does not matter as only one can be live */
if (rctx->b.rings.dma.cs) {
compute_memory_pool_delete(rscreen->global_pool);
}
- if (rscreen->fences.bo) {
- struct r600_fence_block *entry, *tmp;
-
- LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rscreen->fences.blocks, head) {
- LIST_DEL(&entry->head);
- FREE(entry);
- }
-
- rscreen->b.ws->buffer_unmap(rscreen->fences.bo->cs_buf);
- pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
- }
if (rscreen->trace_bo) {
rscreen->b.ws->buffer_unmap(rscreen->trace_bo->cs_buf);
pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL);
}
- pipe_mutex_destroy(rscreen->fences.mutex);
rscreen->b.ws->destroy(rscreen->b.ws);
FREE(rscreen);
}
-static void r600_fence_reference(struct pipe_screen *pscreen,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence)
-{
- struct r600_fence **oldf = (struct r600_fence**)ptr;
- struct r600_fence *newf = (struct r600_fence*)fence;
-
- if (pipe_reference(&(*oldf)->reference, &newf->reference)) {
- struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- pipe_mutex_lock(rscreen->fences.mutex);
- pipe_resource_reference((struct pipe_resource**)&(*oldf)->sleep_bo, NULL);
- LIST_ADDTAIL(&(*oldf)->head, &rscreen->fences.pool);
- pipe_mutex_unlock(rscreen->fences.mutex);
- }
-
- *ptr = fence;
-}
-
-static boolean r600_fence_signalled(struct pipe_screen *pscreen,
- struct pipe_fence_handle *fence)
-{
- struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- struct r600_fence *rfence = (struct r600_fence*)fence;
-
- return rscreen->fences.data[rfence->index] != 0;
-}
-
-static boolean r600_fence_finish(struct pipe_screen *pscreen,
- struct pipe_fence_handle *fence,
- uint64_t timeout)
-{
- struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- struct r600_fence *rfence = (struct r600_fence*)fence;
- int64_t start_time = 0;
- unsigned spins = 0;
-
- if (timeout != PIPE_TIMEOUT_INFINITE) {
- start_time = os_time_get();
-
- /* Convert to microseconds. */
- timeout /= 1000;
- }
-
- while (rscreen->fences.data[rfence->index] == 0) {
- /* Special-case infinite timeout - wait for the dummy BO to become idle */
- if (timeout == PIPE_TIMEOUT_INFINITE) {
- rscreen->b.ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE);
- break;
- }
-
- /* The dummy BO will be busy until the CS including the fence has completed, or
- * the GPU is reset. Don't bother continuing to spin when the BO is idle. */
- if (!rscreen->b.ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE))
- break;
-
- if (++spins % 256)
- continue;
-#ifdef PIPE_OS_UNIX
- sched_yield();
-#else
- os_time_sleep(10);
-#endif
- if (timeout != PIPE_TIMEOUT_INFINITE &&
- os_time_get() - start_time >= timeout) {
- break;
- }
- }
-
- return rscreen->fences.data[rfence->index] != 0;
-}
-
static uint64_t r600_get_timestamp(struct pipe_screen *screen)
{
struct r600_screen *rscreen = (struct r600_screen*)screen;
} else {
rscreen->b.b.is_format_supported = r600_is_format_supported;
}
- rscreen->b.b.fence_reference = r600_fence_reference;
- rscreen->b.b.fence_signalled = r600_fence_signalled;
- rscreen->b.b.fence_finish = r600_fence_finish;
rscreen->b.b.get_driver_query_info = r600_get_driver_query_info;
if (rscreen->b.info.has_uvd) {
rscreen->b.b.get_video_param = ruvd_get_video_param;
rscreen->has_cp_dma = rscreen->b.info.drm_minor >= 27 &&
!(rscreen->b.debug_flags & DBG_NO_CP_DMA);
- rscreen->fences.bo = NULL;
- rscreen->fences.data = NULL;
- rscreen->fences.next_index = 0;
- LIST_INITHEAD(&rscreen->fences.pool);
- LIST_INITHEAD(&rscreen->fences.blocks);
- pipe_mutex_init(rscreen->fences.mutex);
-
rscreen->global_pool = compute_memory_pool_new(rscreen);
rscreen->cs_count = 0;
struct pipe_viewport_state state;
};
-struct r600_pipe_fences {
- struct r600_resource *bo;
- unsigned *data;
- unsigned next_index;
- /* linked list of preallocated blocks */
- struct list_head blocks;
- /* linked list of freed fences */
- struct list_head pool;
- pipe_mutex mutex;
-};
-
/* This must start from 16. */
/* features */
#define DBG_NO_LLVM (1 << 17)
bool has_msaa;
bool has_cp_dma;
bool has_compressed_msaa_texturing;
- struct r600_pipe_fences fences;
/*for compute global memory binding, we allocate stuff here, instead of
* buffers.
uint32_t *buffer_constants;
};
-struct r600_fence {
- struct pipe_reference reference;
- unsigned index; /* in the shared bo */
- struct r600_resource *sleep_bo;
- struct list_head head;
-};
-
-#define FENCE_BLOCK_SIZE 16
-
-struct r600_fence_block {
- struct r600_fence fences[FENCE_BLOCK_SIZE];
- struct list_head head;
-};
-
struct r600_constbuf_state
{
struct r600_atom atom;
void r600_get_backend_mask(struct r600_context *ctx);
void r600_context_flush(struct r600_context *ctx, unsigned flags);
void r600_begin_new_cs(struct r600_context *ctx);
-void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence,
- unsigned offset, unsigned value);
void r600_flush_emit(struct r600_context *ctx);
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in);
void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw);
DEBUG_NAMED_VALUE_END /* must be last */
};
+static void r600_fence_reference(struct pipe_screen *screen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
+
+ rws->fence_reference(ptr, fence);
+}
+
+static boolean r600_fence_signalled(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence)
+{
+ struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
+
+ return rws->fence_wait(rws, fence, 0);
+}
+
+static boolean r600_fence_finish(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence,
+ uint64_t timeout)
+{
+ struct radeon_winsys *rws = ((struct r600_common_screen*)screen)->ws;
+
+ return rws->fence_wait(rws, fence, timeout);
+}
+
static bool r600_interpret_tiling(struct r600_common_screen *rscreen,
uint32_t tiling_config)
{
{
ws->query_info(ws, &rscreen->info);
+ rscreen->b.fence_finish = r600_fence_finish;
+ rscreen->b.fence_reference = r600_fence_reference;
+ rscreen->b.fence_signalled = r600_fence_signalled;
+
rscreen->ws = ws;
rscreen->family = rscreen->info.family;
rscreen->chip_class = rscreen->info.chip_class;
void r600_context_queries_resume(struct r600_context *ctx);
void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation,
int flag_wait);
-void si_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence,
- unsigned offset, unsigned value);
bool si_is_timer_query(unsigned type);
bool si_query_needs_begin(unsigned type);
/* Count in framebuffer cache flushes at the end of CS. */
num_dw += ctx->atoms.cache_flush->num_dw;
- /* Save 16 dwords for the fence mechanism. */
- num_dw += 16;
-
#if R600_TRACE_CS
if (ctx->screen->trace_bo) {
num_dw += R600_TRACE_CS_DWORDS;
R600_CONTEXT_INV_TEX_CACHE;
si_emit_cache_flush(&ctx->b, NULL);
- /* partial flush is needed to avoid lockups on some chips with user fences */
+ /* this is probably not needed anymore */
cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
si_all_descriptors_begin_new_cs(ctx);
}
-void si_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
-{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
- uint64_t va;
-
- si_need_cs_space(ctx, 10, FALSE);
-
- va = r600_resource_va(&ctx->screen->b.b, (void*)fence_bo);
- va = va + (offset << 2);
-
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
- cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* ADDRESS_LO */
- /* DATA_SEL | INT_EN | ADDRESS_HI */
- cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF);
- cs->buf[cs->cdw++] = value; /* DATA_LO */
- cs->buf[cs->cdw++] = 0; /* DATA_HI */
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, fence_bo, RADEON_USAGE_WRITE);
-}
-
static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
bool test_status_bit)
{
/*
* pipe_context
*/
-static struct r600_fence *r600_create_fence(struct r600_context *rctx)
-{
- struct r600_screen *rscreen = rctx->screen;
- struct r600_fence *fence = NULL;
-
- pipe_mutex_lock(rscreen->fences.mutex);
-
- if (!rscreen->fences.bo) {
- /* Create the shared buffer object */
- rscreen->fences.bo = r600_resource_create_custom(&rscreen->b.b,
- PIPE_USAGE_STAGING,
- 4096);
- if (!rscreen->fences.bo) {
- R600_ERR("r600: failed to create bo for fence objects\n");
- goto out;
- }
- rscreen->fences.data = rctx->b.ws->buffer_map(rscreen->fences.bo->cs_buf,
- rctx->b.rings.gfx.cs,
- PIPE_TRANSFER_READ_WRITE);
- }
-
- if (!LIST_IS_EMPTY(&rscreen->fences.pool)) {
- struct r600_fence *entry;
-
- /* Try to find a freed fence that has been signalled */
- LIST_FOR_EACH_ENTRY(entry, &rscreen->fences.pool, head) {
- if (rscreen->fences.data[entry->index] != 0) {
- LIST_DELINIT(&entry->head);
- fence = entry;
- break;
- }
- }
- }
-
- if (!fence) {
- /* Allocate a new fence */
- struct r600_fence_block *block;
- unsigned index;
-
- if ((rscreen->fences.next_index + 1) >= 1024) {
- R600_ERR("r600: too many concurrent fences\n");
- goto out;
- }
-
- index = rscreen->fences.next_index++;
-
- if (!(index % FENCE_BLOCK_SIZE)) {
- /* Allocate a new block */
- block = CALLOC_STRUCT(r600_fence_block);
- if (block == NULL)
- goto out;
-
- LIST_ADD(&block->head, &rscreen->fences.blocks);
- } else {
- block = LIST_ENTRY(struct r600_fence_block, rscreen->fences.blocks.next, head);
- }
-
- fence = &block->fences[index % FENCE_BLOCK_SIZE];
- fence->index = index;
- }
-
- pipe_reference_init(&fence->reference, 1);
-
- rscreen->fences.data[fence->index] = 0;
- si_context_emit_fence(rctx, rscreen->fences.bo, fence->index, 1);
-
- /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */
- fence->sleep_bo = r600_resource_create_custom(&rctx->screen->b.b, PIPE_USAGE_STAGING, 1);
-
- /* Add the fence as a dummy relocation. */
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE);
-
-out:
- pipe_mutex_unlock(rscreen->fences.mutex);
- return fence;
-}
-
-
void radeonsi_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
unsigned flags)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct r600_fence **rfence = (struct r600_fence**)fence;
struct pipe_query *render_cond = NULL;
boolean render_cond_cond = FALSE;
unsigned render_cond_mode = 0;
- if (rfence)
- *rfence = r600_create_fence(rctx);
+ if (fence) {
+ *fence = rctx->b.ws->cs_create_fence(rctx->b.rings.gfx.cs);
+ }
/* Disable render condition. */
if (rctx->current_render_cond) {
r600_common_screen_cleanup(&rscreen->b);
- if (rscreen->fences.bo) {
- struct r600_fence_block *entry, *tmp;
-
- LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rscreen->fences.blocks, head) {
- LIST_DEL(&entry->head);
- FREE(entry);
- }
-
- rscreen->b.ws->buffer_unmap(rscreen->fences.bo->cs_buf);
- r600_resource_reference(&rscreen->fences.bo, NULL);
- }
-
#if R600_TRACE_CS
if (rscreen->trace_bo) {
rscreen->ws->buffer_unmap(rscreen->trace_bo->cs_buf);
}
#endif
- pipe_mutex_destroy(rscreen->fences.mutex);
-
rscreen->b.ws->destroy(rscreen->b.ws);
FREE(rscreen);
}
-static void r600_fence_reference(struct pipe_screen *pscreen,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence)
-{
- struct r600_fence **oldf = (struct r600_fence**)ptr;
- struct r600_fence *newf = (struct r600_fence*)fence;
-
- if (pipe_reference(&(*oldf)->reference, &newf->reference)) {
- struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- pipe_mutex_lock(rscreen->fences.mutex);
- r600_resource_reference(&(*oldf)->sleep_bo, NULL);
- LIST_ADDTAIL(&(*oldf)->head, &rscreen->fences.pool);
- pipe_mutex_unlock(rscreen->fences.mutex);
- }
-
- *ptr = fence;
-}
-
-static boolean r600_fence_signalled(struct pipe_screen *pscreen,
- struct pipe_fence_handle *fence)
-{
- struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- struct r600_fence *rfence = (struct r600_fence*)fence;
-
- return rscreen->fences.data[rfence->index] != 0;
-}
-
-static boolean r600_fence_finish(struct pipe_screen *pscreen,
- struct pipe_fence_handle *fence,
- uint64_t timeout)
-{
- struct r600_screen *rscreen = (struct r600_screen *)pscreen;
- struct r600_fence *rfence = (struct r600_fence*)fence;
- int64_t start_time = 0;
- unsigned spins = 0;
-
- if (timeout != PIPE_TIMEOUT_INFINITE) {
- start_time = os_time_get();
-
- /* Convert to microseconds. */
- timeout /= 1000;
- }
-
- while (rscreen->fences.data[rfence->index] == 0) {
- /* Special-case infinite timeout - wait for the dummy BO to become idle */
- if (timeout == PIPE_TIMEOUT_INFINITE) {
- rscreen->b.ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE);
- break;
- }
-
- /* The dummy BO will be busy until the CS including the fence has completed, or
- * the GPU is reset. Don't bother continuing to spin when the BO is idle. */
- if (!rscreen->b.ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE))
- break;
-
- if (++spins % 256)
- continue;
-#ifdef PIPE_OS_UNIX
- sched_yield();
-#else
- os_time_sleep(10);
-#endif
- if (timeout != PIPE_TIMEOUT_INFINITE &&
- os_time_get() - start_time >= timeout) {
- break;
- }
- }
-
- return rscreen->fences.data[rfence->index] != 0;
-}
-
static uint64_t r600_get_timestamp(struct pipe_screen *screen)
{
struct r600_screen *rscreen = (struct r600_screen*)screen;
rscreen->b.b.get_compute_param = r600_get_compute_param;
rscreen->b.b.get_timestamp = r600_get_timestamp;
rscreen->b.b.is_format_supported = si_is_format_supported;
- rscreen->b.b.fence_reference = r600_fence_reference;
- rscreen->b.b.fence_signalled = r600_fence_signalled;
- rscreen->b.b.fence_finish = r600_fence_finish;
if (rscreen->b.info.has_uvd) {
rscreen->b.b.get_video_param = ruvd_get_video_param;
rscreen->b.b.is_video_format_supported = ruvd_is_format_supported;
if (debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE))
rscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS;
- rscreen->fences.bo = NULL;
- rscreen->fences.data = NULL;
- rscreen->fences.next_index = 0;
- LIST_INITHEAD(&rscreen->fences.pool);
- LIST_INITHEAD(&rscreen->fences.blocks);
- pipe_mutex_init(rscreen->fences.mutex);
-
#if R600_TRACE_CS
rscreen->cs_count = 0;
if (rscreen->info.drm_minor >= 28) {
struct si_pipe_compute;
-struct r600_pipe_fences {
- struct r600_resource *bo;
- unsigned *data;
- unsigned next_index;
- /* linked list of preallocated blocks */
- struct list_head blocks;
- /* linked list of freed fences */
- struct list_head pool;
- pipe_mutex mutex;
-};
-
struct r600_screen {
struct r600_common_screen b;
- struct r600_pipe_fences fences;
#if R600_TRACE_CS
struct r600_resource *trace_bo;
uint32_t *trace_ptr;
unsigned n_samplers;
};
-struct r600_fence {
- struct pipe_reference reference;
- unsigned index; /* in the shared bo */
- struct r600_resource *sleep_bo;
- struct list_head head;
-};
-
-#define FENCE_BLOCK_SIZE 16
-
-struct r600_fence_block {
- struct r600_fence fences[FENCE_BLOCK_SIZE];
- struct list_head head;
-};
-
#define SI_NUM_ATOMS(rctx) (sizeof((rctx)->atoms)/sizeof((rctx)->atoms.array[0]))
#define SI_NUM_SHADERS (PIPE_SHADER_FRAGMENT+1)