Thread offloading is not sometimes desirable, e.g. when mapping a buffer.
/* Reserve CS space. */
if (dwords > (R300_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
- r300->context.flush(&r300->context, 0, NULL);
+ r300_flush(&r300->context, R300_FLUSH_ASYNC, NULL);
}
/* Emit clear packets. */
FREE(r300);
}
-void r300_flush_cb(void *data)
+static void r300_flush_callback(void *data, unsigned flags)
{
struct r300_context* const cs_context_copy = data;
- cs_context_copy->context.flush(&cs_context_copy->context, 0, NULL);
+ r300_flush(&cs_context_copy->context, flags, NULL);
}
#define R300_INIT_ATOM(atomname, atomsize) \
r300_init_render_functions(r300);
r300_init_states(&r300->context);
- rws->cs_set_flush(r300->cs, r300_flush_cb, r300);
+ rws->cs_set_flush(r300->cs, r300_flush_callback, r300);
/* The KIL opcode needs the first texture unit to be enabled
* on r3xx-r4xx. In order to calm down the CS checker, we bind this
struct pipe_context* r300_create_context(struct pipe_screen* screen,
void *priv);
-void r300_flush_cb(void *data);
-
/* Context initialization. */
struct draw_stage* r300_draw_stage(struct r300_context* r300);
void r300_init_blit_functions(struct r300_context *r300);
void r300_decompress_zmask_locked_unsafe(struct r300_context *r300);
void r300_decompress_zmask_locked(struct r300_context *r300);
+/* r300_flush.c */
+void r300_flush(struct pipe_context *pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence);
+
/* r300_hyperz.c */
void r300_update_hyperz_state(struct r300_context* r300);
if (flushed)
return FALSE;
- r300->context.flush(&r300->context, 0, NULL);
+ r300_flush(&r300->context, R300_FLUSH_ASYNC, NULL);
flushed = TRUE;
goto validate;
}
#include "r300_cs.h"
#include "r300_emit.h"
-static void r300_flush(struct pipe_context* pipe,
- unsigned flags,
- struct pipe_fence_handle** fence)
+
+void r300_flush(struct pipe_context *pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence)
{
struct r300_context *r300 = r300_context(pipe);
struct r300_atom *atom;
r500_emit_index_bias(r300, 0);
r300->flush_counter++;
- r300->rws->cs_flush(r300->cs);
+ r300->rws->cs_flush(r300->cs, flags);
r300->dirty_hw = 0;
/* New kitchen sink, baby. */
* and we cannot emit an empty CS. We must write some regs then. */
CS_LOCALS(r300);
OUT_CS_REG(RB3D_COLOR_CHANNEL_MASK, 0);
- r300->rws->cs_flush(r300->cs);
+ r300->rws->cs_flush(r300->cs, flags);
} else {
/* Even if hw is not dirty, we should at least reset the CS in case
* the space checking failed for the first draw operation. */
- r300->rws->cs_flush(r300->cs);
+ r300->rws->cs_flush(r300->cs, flags);
}
}
+}
- if (flags & PIPE_FLUSH_FRAME) {
- r300->rws->cs_sync_flush(r300->cs);
- }
+static void r300_flush_wrapped(struct pipe_context *pipe,
+ unsigned flags,
+ struct pipe_fence_handle **fence)
+{
+ /* don't use the flags param, it means something else */
+ r300_flush(pipe, 0, fence);
}
void r300_init_flush_functions(struct r300_context* r300)
{
- r300->context.flush = r300_flush;
+ r300->context.flush = r300_flush_wrapped;
}
/* Reserve requested CS space. */
if (cs_dwords > (R300_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
- r300->context.flush(&r300->context, 0, NULL);
+ r300_flush(&r300->context, R300_FLUSH_ASYNC, NULL);
flushed = TRUE;
}
transfer->box.x, transfer->box.y, transfer->box.z,
&r300transfer->linear_texture->b.b.b, 0, &src_box);
+ /* XXX remove this. */
ctx->flush(ctx, 0, NULL);
}
if (!trans->linear_texture) {
/* Oh crap, the thing can't create the texture.
* Let's flush and try again. */
- ctx->flush(ctx, 0, NULL);
+ r300_flush(ctx, 0, NULL);
trans->linear_texture = r300_resource(
ctx->screen->resource_create(ctx->screen,
assert(!trans->linear_texture->tex.microtile &&
!trans->linear_texture->tex.macrotile[0]);
- /* Set the stride.
- *
- * Even though we are using an internal texture for this,
- * the transfer level, box and usage parameters still reflect
- * the arguments received to get_transfer. We just do the
- * right thing internally.
- */
+ /* Set the stride. */
trans->transfer.stride =
trans->linear_texture->tex.stride_in_bytes[0];
r300_copy_from_tiled_texture(ctx, trans);
/* Always referenced in the blit. */
- ctx->flush(ctx, 0, NULL);
+ r300_flush(ctx, 0, NULL);
}
return &trans->transfer;
}
trans->transfer.stride = tex->tex.stride_in_bytes[level];
trans->offset = r300_texture_get_offset(tex, level, box->z);
- if (referenced_cs)
- ctx->flush(ctx, PIPE_FLUSH_RENDER_CACHE, NULL);
+ if (referenced_cs &&
+ !(usage & PIPE_TRANSFER_UNSYNCHRONIZED))
+ r300_flush(ctx, 0, NULL);
return &trans->transfer;
}
return NULL;
#include "pipe/p_state.h"
#define R300_MAX_CMDBUF_DWORDS (16 * 1024)
+#define R300_FLUSH_ASYNC (1 << 0)
struct winsys_handle;
struct r300_winsys_screen;
* Flush a command stream.
*
* \param cs A command stream to flush.
+ * \param flags, R300_FLUSH_ASYNC or 0.
*/
- void (*cs_flush)(struct r300_winsys_cs *cs);
-
- /**
- * Wait until the last flush is completed.
- *
- * \param cs A command stream.
- */
- void (*cs_sync_flush)(struct r300_winsys_cs *cs);
+ void (*cs_flush)(struct r300_winsys_cs *cs, unsigned flags);
/**
* Set a flush callback which is called from winsys when flush is
* \param user A user pointer that will be passed to the flush callback.
*/
void (*cs_set_flush)(struct r300_winsys_cs *cs,
- void (*flush)(void *),
+ void (*flush)(void *ctx, unsigned flags),
void *user);
/**
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
if (flags & PB_USAGE_DONTBLOCK) {
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data);
+ cs->flush_cs(cs->flush_data, R300_FLUSH_ASYNC);
return NULL;
}
}
} else {
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data);
+ cs->flush_cs(cs->flush_data, 0);
+ } else {
+ /* Try to avoid busy-waiting in radeon_bo_wait. */
+ if (p_atomic_read(&bo->num_active_ioctls))
+ radeon_drm_cs_sync_flush(cs);
}
radeon_bo_wait((struct r300_winsys_bo*)bo);
/* Tiling determines how DRM treats the buffer data.
* We must flush CS when changing it if the buffer is referenced. */
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
- radeon_drm_cs_flush(rcs);
- radeon_drm_cs_sync_flush(rcs);
+ cs->flush_cs(cs->flush_data, 0);
}
while (p_atomic_read(&bo->num_active_ioctls)) {
return NULL;
}
-void radeon_drm_cs_sync_flush(struct r300_winsys_cs *rcs)
+void radeon_drm_cs_sync_flush(struct radeon_drm_cs *cs)
{
- struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
-
/* Wait for any pending ioctl to complete. */
if (cs->thread) {
pipe_thread_wait(cs->thread);
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
-void radeon_drm_cs_flush(struct r300_winsys_cs *rcs)
+static void radeon_drm_cs_flush(struct r300_winsys_cs *rcs, unsigned flags)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_cs_context *tmp;
- radeon_drm_cs_sync_flush(rcs);
+ radeon_drm_cs_sync_flush(cs);
/* If the CS is not empty, emit it in a newly-spawned thread. */
if (cs->base.cdw) {
for (i = 0; i < crelocs; i++)
p_atomic_inc(&cs->csc->relocs_bo[i]->num_active_ioctls);
- if (cs->ws->num_cpus > 1 && debug_get_option_thread()) {
+ if (cs->ws->num_cpus > 1 && debug_get_option_thread() &&
+ (flags & R300_FLUSH_ASYNC)) {
cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs->csc);
assert(cs->thread);
} else {
static void radeon_drm_cs_destroy(struct r300_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
- radeon_drm_cs_sync_flush(rcs);
+ radeon_drm_cs_sync_flush(cs);
radeon_cs_context_cleanup(&cs->csc1);
radeon_cs_context_cleanup(&cs->csc2);
p_atomic_dec(&cs->ws->num_cs);
}
static void radeon_drm_cs_set_flush(struct r300_winsys_cs *rcs,
- void (*flush)(void *), void *user)
+ void (*flush)(void *ctx, unsigned flags),
+ void *user)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
cs->flush_cs = flush;
ws->base.cs_validate = radeon_drm_cs_validate;
ws->base.cs_write_reloc = radeon_drm_cs_write_reloc;
ws->base.cs_flush = radeon_drm_cs_flush;
- ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
ws->base.cs_set_flush = radeon_drm_cs_set_flush;
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
}
struct radeon_drm_winsys *ws;
/* Flush CS. */
- void (*flush_cs)(void *);
+ void (*flush_cs)(void *ctx, unsigned flags);
void *flush_data;
pipe_thread thread;
return bo->num_cs_references;
}
-void radeon_drm_cs_flush(struct r300_winsys_cs *rcs);
-void radeon_drm_cs_sync_flush(struct r300_winsys_cs *rcs);
+void radeon_drm_cs_sync_flush(struct radeon_drm_cs *cs);
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws);
#endif