* Skip changing the flags otherwise. */
if (tex->tex.macrotile[tex->surface_level] !=
tex->tex.macrotile[level]) {
- /* Tiling determines how DRM treats the buffer data.
- * We must flush CS when changing it if the buffer is referenced. */
- if (r300->rws->cs_is_buffer_referenced(r300->cs, tex->cs_buf))
- r300->context.flush(&r300->context, 0, NULL);
-
- r300->rws->buffer_set_tiling(tex->buf,
+ r300->rws->buffer_set_tiling(tex->buf, r300->cs,
tex->tex.microtile, tex->tex.macrotile[level],
tex->tex.stride_in_bytes[0]);
tex->cs_buf = rws->buffer_get_cs_handle(tex->buf);
- rws->buffer_set_tiling(tex->buf,
+ rws->buffer_set_tiling(tex->buf, NULL,
tex->tex.microtile, tex->tex.macrotile[0],
tex->tex.stride_in_bytes[0]);
* Set tiling flags describing a memory layout of a buffer object.
*
* \param buf A winsys buffer object to set the flags for.
+ * \param cs A command stream to flush if the buffer is referenced by it.
* \param macrotile A macrotile flag.
* \param microtile A microtile flag.
* \param stride A stride of the buffer in bytes, for texturing.
* \note microtile and macrotile are not bitmasks!
*/
void (*buffer_set_tiling)(struct r300_winsys_bo *buf,
+ struct r300_winsys_cs *cs,
enum r300_buffer_tiling microtile,
enum r300_buffer_tiling macrotile,
unsigned stride);
}
static void radeon_bo_set_tiling(struct r300_winsys_bo *_buf,
+ struct r300_winsys_cs *rcs,
enum r300_buffer_tiling microtiled,
enum r300_buffer_tiling macrotiled,
uint32_t pitch)
{
struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf));
+ struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct drm_radeon_gem_set_tiling args = {};
+ /* Tiling determines how DRM treats the buffer data.
+ * We must flush CS when changing it if the buffer is referenced. */
+ if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
+ radeon_drm_cs_flush(rcs);
+ radeon_drm_cs_sync_flush(rcs);
+ }
+
+ while (p_atomic_read(&bo->num_active_ioctls)) {
+ sched_yield();
+ }
+
if (microtiled == R300_BUFFER_TILED)
args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE;
else if (microtiled == R300_BUFFER_SQUARETILED)
OUT_CS(&cs->base, index * RELOC_DWORDS);
}
-static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_async, param)
+static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
{
struct radeon_cs_context *csc = (struct radeon_cs_context*)param;
unsigned i;
return NULL;
}
-static void radeon_drm_cs_sync_flush(struct r300_winsys_cs *rcs)
+void radeon_drm_cs_sync_flush(struct r300_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
-static void radeon_drm_cs_emit(struct r300_winsys_cs *rcs)
+void radeon_drm_cs_flush(struct r300_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_cs_context *tmp;
p_atomic_inc(&cs->csc->relocs_bo[i]->num_active_ioctls);
if (debug_get_option_thread()) {
- cs->thread = pipe_thread_create(radeon_drm_cs_emit_async, cs->csc);
+ cs->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, cs->csc);
assert(cs->thread);
} else {
- radeon_drm_cs_emit_async(cs->csc);
+ radeon_drm_cs_emit_ioctl(cs->csc);
}
}
ws->base.cs_add_reloc = radeon_drm_cs_add_reloc;
ws->base.cs_validate = radeon_drm_cs_validate;
ws->base.cs_write_reloc = radeon_drm_cs_write_reloc;
- ws->base.cs_flush = radeon_drm_cs_emit;
+ ws->base.cs_flush = radeon_drm_cs_flush;
ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
ws->base.cs_set_flush = radeon_drm_cs_set_flush;
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
return bo->num_cs_references;
}
+void radeon_drm_cs_flush(struct r300_winsys_cs *rcs);
+void radeon_drm_cs_sync_flush(struct r300_winsys_cs *rcs);
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws);
#endif