This updates all occurences of cs_add_reloc.
assert(tex && tex->buf && "cbuf is marked, but NULL!");
r300->rws->cs_add_reloc(r300->cs, tex->cs_buf,
RADEON_USAGE_READWRITE,
- r300_surface(fb->cbufs[i])->domain);
+ r300_surface(fb->cbufs[i])->domain,
+ tex->b.b.nr_samples > 1 ?
+ RADEON_PRIO_COLOR_BUFFER_MSAA :
+ RADEON_PRIO_COLOR_BUFFER);
}
/* ...depth buffer... */
if (fb->zsbuf) {
assert(tex && tex->buf && "zsbuf is marked, but NULL!");
r300->rws->cs_add_reloc(r300->cs, tex->cs_buf,
RADEON_USAGE_READWRITE,
- r300_surface(fb->zsbuf)->domain);
+ r300_surface(fb->zsbuf)->domain,
+ tex->b.b.nr_samples > 1 ?
+ RADEON_PRIO_DEPTH_BUFFER_MSAA :
+ RADEON_PRIO_DEPTH_BUFFER);
}
}
/* The AA resolve buffer. */
if (aa->dest) {
r300->rws->cs_add_reloc(r300->cs, aa->dest->cs_buf,
RADEON_USAGE_WRITE,
- aa->dest->domain);
+ aa->dest->domain,
+ RADEON_PRIO_COLOR_BUFFER);
}
}
if (r300->textures_state.dirty) {
tex = r300_resource(texstate->sampler_views[i]->base.texture);
r300->rws->cs_add_reloc(r300->cs, tex->cs_buf, RADEON_USAGE_READ,
- tex->domain);
+ tex->domain, RADEON_PRIO_SHADER_TEXTURE_RO);
}
}
/* ...occlusion query buffer... */
if (r300->query_current)
r300->rws->cs_add_reloc(r300->cs, r300->query_current->cs_buf,
- RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT);
+ RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT,
+ RADEON_PRIO_MIN);
/* ...vertex buffer for SWTCL path... */
if (r300->vbo_cs)
r300->rws->cs_add_reloc(r300->cs, r300->vbo_cs,
- RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
+ RADEON_USAGE_READ, RADEON_DOMAIN_GTT,
+ RADEON_PRIO_MIN);
/* ...vertex buffers for HWTCL path... */
if (do_validate_vertex_buffers && r300->vertex_arrays_dirty) {
struct pipe_vertex_buffer *vbuf = r300->vertex_buffer;
r300->rws->cs_add_reloc(r300->cs, r300_resource(buf)->cs_buf,
RADEON_USAGE_READ,
- r300_resource(buf)->domain);
+ r300_resource(buf)->domain,
+ RADEON_PRIO_SHADER_BUFFER_RO);
}
}
/* ...and index buffer for HWTCL path. */
if (index_buffer)
r300->rws->cs_add_reloc(r300->cs, r300_resource(index_buffer)->cs_buf,
RADEON_USAGE_READ,
- r300_resource(index_buffer)->domain);
+ r300_resource(index_buffer)->domain,
+ RADEON_PRIO_MIN);
/* Now do the validation (flush is called inside cs_validate on failure). */
if (!r300->rws->cs_validate(r300->cs)) {
rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
}
}
- return rctx->ws->cs_add_reloc(ring->cs, rbo->cs_buf, usage, rbo->domains) * 4;
+ return rctx->ws->cs_add_reloc(ring->cs, rbo->cs_buf, usage,
+ rbo->domains, RADEON_PRIO_MIN) * 4;
}
static INLINE void r600_emit_reloc(struct r600_common_context *rctx,
{
int reloc_idx;
- reloc_idx = dec->ws->cs_add_reloc(dec->cs, cs_buf, usage, domain);
+ reloc_idx = dec->ws->cs_add_reloc(dec->cs, cs_buf, usage, domain,
+ RADEON_PRIO_MIN);
set_reg(dec, RUVD_GPCOM_VCPU_DATA0, off);
set_reg(dec, RUVD_GPCOM_VCPU_DATA1, reloc_idx * 4);
set_reg(dec, RUVD_GPCOM_VCPU_CMD, cmd << 1);
#ifndef RADEON_VCE_H
#define RADEON_VCE_H
-#define RVCE_RELOC(buf, usage, domain) (enc->ws->cs_add_reloc(enc->cs, (buf), (usage), domain))
+#define RVCE_RELOC(buf, usage, domain) (enc->ws->cs_add_reloc(enc->cs, (buf), (usage), domain, RADEON_PRIO_MIN))
#define RVCE_CS(value) (enc->cs->buf[enc->cs->cdw++] = (value))
#define RVCE_BEGIN(cmd) { uint32_t *begin = &enc->cs->buf[enc->cs->cdw++]; RVCE_CS(cmd)
static unsigned radeon_drm_cs_add_reloc(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage,
- enum radeon_bo_domain domains)
+ enum radeon_bo_domain domains,
+ enum radeon_bo_priority priority)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)buf;
enum radeon_bo_domain added_domains;
- unsigned index = radeon_add_reloc(cs, bo, usage, domains, 0, &added_domains);
+ unsigned index = radeon_add_reloc(cs, bo, usage, domains, priority, &added_domains);
if (added_domains & RADEON_DOMAIN_GTT)
cs->csc->used_gart += bo->base.size;
RADEON_DOMAIN_GTT);
/* Add the fence as a dummy relocation. */
cs->ws->base.cs_add_reloc(rcs, cs->ws->base.buffer_get_cs_handle(fence),
- RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT);
+ RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,
+ RADEON_PRIO_MIN);
return (struct pipe_fence_handle*)fence;
}
unsigned (*cs_add_reloc)(struct radeon_winsys_cs *cs,
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage,
- enum radeon_bo_domain domain/*,
- enum radeon_bo_priority priority*/);
+ enum radeon_bo_domain domain,
+ enum radeon_bo_priority priority);
/**
* Return TRUE if there is enough memory in VRAM and GTT for the relocs