I have moved 'last_flush' and 'binding' from r600_bo to winsys/radeon.
The other members are now part of r600_resource.
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view);
struct r600_pipe_resource_state *rstate;
struct r600_resource_texture *tmp = (struct r600_resource_texture*)texture;
- struct r600_resource *rbuffer;
unsigned format, endian;
uint32_t word4 = 0, yuv_format = 0, pitch = 0;
unsigned char swizzle[4], array_mode = 0, tile_type = 0;
- struct r600_bo *bo[2];
unsigned height, depth;
if (view == NULL)
word4 |= S_030010_NUM_FORMAT_ALL(V_030010_SQ_NUM_FORMAT_INT);
}
- rbuffer = &tmp->resource;
- bo[0] = rbuffer->bo;
- bo[1] = rbuffer->bo;
-
height = texture->height0;
depth = texture->depth0;
depth = texture->array_size;
}
- rstate->bo[0] = bo[0];
- rstate->bo[1] = bo[1];
+ rstate->bo[0] = &tmp->resource;
+ rstate->bo[1] = &tmp->resource;
rstate->bo_usage[0] = RADEON_USAGE_READ;
rstate->bo_usage[1] = RADEON_USAGE_READ;
const struct pipe_framebuffer_state *state, int cb)
{
struct r600_resource_texture *rtex;
- struct r600_resource *rbuffer;
struct r600_surface *surf;
unsigned level = state->cbufs[cb]->u.tex.level;
unsigned pitch, slice;
unsigned offset;
unsigned tile_type;
const struct util_format_description *desc;
- struct r600_bo *bo[3];
int i;
unsigned blend_clamp = 0, blend_bypass = 0;
rtex = rtex->flushed_depth_texture;
}
- rbuffer = &rtex->resource;
- bo[0] = rbuffer->bo;
- bo[1] = rbuffer->bo;
- bo[2] = rbuffer->bo;
-
/* XXX quite sure for dx10+ hw don't need any offset hacks */
offset = r600_texture_get_offset((struct r600_resource_texture *)state->cbufs[cb]->texture,
level, state->cbufs[cb]->u.tex.first_layer);
format = r600_translate_colorformat(surf->base.format);
swap = r600_translate_colorswap(surf->base.format);
- if (rbuffer->b.b.b.usage == PIPE_USAGE_STAGING) {
+ if (rtex->resource.b.b.b.usage == PIPE_USAGE_STAGING) {
endian = ENDIAN_NONE;
} else {
endian = r600_colorformat_endian_swap(format);
/* FIXME handle enabling of CB beyond BASE8 which has different offset */
r600_pipe_state_add_reg(rstate,
R_028C60_CB_COLOR0_BASE + cb * 0x3C,
- offset >> 8, 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
+ offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate,
R_028C78_CB_COLOR0_DIM + cb * 0x3C,
0x0, 0xFFFFFFFF, NULL, 0);
r600_pipe_state_add_reg(rstate,
R_028C70_CB_COLOR0_INFO + cb * 0x3C,
- color_info, 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
+ color_info, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate,
R_028C64_CB_COLOR0_PITCH + cb * 0x3C,
S_028C64_PITCH_TILE_MAX(pitch),
r600_pipe_state_add_reg(rstate,
R_028C74_CB_COLOR0_ATTRIB + cb * 0x3C,
S_028C74_NON_DISP_TILING_ORDER(tile_type),
- 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
+ 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
}
static void evergreen_db(struct r600_pipe_context *rctx, struct r600_pipe_state *rstate,
format = r600_translate_dbformat(rtex->real_format);
r600_pipe_state_add_reg(rstate, R_028048_DB_Z_READ_BASE,
- offset >> 8, 0xFFFFFFFF, rtex->resource.bo, RADEON_USAGE_READWRITE);
+ offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate, R_028050_DB_Z_WRITE_BASE,
- offset >> 8, 0xFFFFFFFF, rtex->resource.bo, RADEON_USAGE_READWRITE);
+ offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate, R_028008_DB_DEPTH_VIEW, 0x00000000, 0xFFFFFFFF, NULL, 0);
if (rtex->stencil) {
r600_texture_get_offset(rtex->stencil, level, first_layer);
r600_pipe_state_add_reg(rstate, R_02804C_DB_STENCIL_READ_BASE,
- stencil_offset >> 8, 0xFFFFFFFF, rtex->stencil->resource.bo, RADEON_USAGE_READWRITE);
+ stencil_offset >> 8, 0xFFFFFFFF, &rtex->stencil->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate, R_028054_DB_STENCIL_WRITE_BASE,
- stencil_offset >> 8, 0xFFFFFFFF, rtex->stencil->resource.bo, RADEON_USAGE_READWRITE);
+ stencil_offset >> 8, 0xFFFFFFFF, &rtex->stencil->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate, R_028044_DB_STENCIL_INFO,
- 1, 0xFFFFFFFF, rtex->stencil->resource.bo, RADEON_USAGE_READWRITE);
+ 1, 0xFFFFFFFF, &rtex->stencil->resource, RADEON_USAGE_READWRITE);
} else {
r600_pipe_state_add_reg(rstate, R_028044_DB_STENCIL_INFO,
0, 0xFFFFFFFF, NULL, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate, R_028040_DB_Z_INFO,
S_028040_ARRAY_MODE(rtex->array_mode[level]) | S_028040_FORMAT(format),
- 0xFFFFFFFF, rtex->resource.bo, RADEON_USAGE_READWRITE);
+ 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate, R_028058_DB_DEPTH_SIZE,
S_028058_PITCH_TILE_MAX(pitch),
0xFFFFFFFF, NULL, 0);
unsigned offset, unsigned stride,
enum radeon_bo_usage usage)
{
- rstate->bo[0] = rbuffer->bo;
+ rstate->bo[0] = rbuffer;
rstate->bo_usage[0] = usage;
rstate->val[0] = offset;
- rstate->val[1] = rbuffer->bo_size - offset - 1;
+ rstate->val[1] = rbuffer->buf->size - offset - 1;
rstate->val[2] = S_030008_ENDIAN_SWAP(r600_endian_swap(32)) |
S_030008_STRIDE(stride);
}
#include "../../winsys/radeon/drm/radeon_winsys.h"
#include "util/u_double_list.h"
+#include "util/u_vbuf_mgr.h"
#define R600_ERR(fmt, args...) \
fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
enum radeon_family r600_get_family(struct radeon *rw);
enum chip_class r600_get_family_class(struct radeon *radeon);
-/* r600_bo.c */
-struct r600_bo;
-struct radeon_winsys_cs;
+struct r600_resource {
+ struct u_vbuf_resource b;
-struct r600_bo *r600_bo(struct radeon *radeon,
- unsigned size, unsigned alignment,
- unsigned binding, unsigned usage);
-struct r600_bo *r600_bo_handle(struct radeon *radeon, struct winsys_handle *whandle,
- unsigned *stride, unsigned *array_mode);
-void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, struct radeon_winsys_cs *cs, unsigned usage);
-void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo);
-boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *pb_bo,
- unsigned stride, struct winsys_handle *whandle);
-
-void r600_bo_destroy(struct r600_bo *bo);
-
-/* this relies on the pipe_reference being the first member of r600_bo */
-static INLINE void r600_bo_reference(struct r600_bo **dst, struct r600_bo *src)
-{
- struct r600_bo *old = *dst;
-
- if (pipe_reference((struct pipe_reference *)(*dst), (struct pipe_reference *)src)) {
- r600_bo_destroy(old);
- }
- *dst = src;
-}
+ /* Winsys objects. */
+ struct pb_buffer *buf;
+ struct radeon_winsys_cs_handle *cs_buf;
+ /* Resource state. */
+ unsigned domains;
+};
/* R600/R700 STATES */
#define R600_GROUP_MAX 16
u32 value;
u32 mask;
struct r600_block *block;
- struct r600_bo *bo;
+ struct r600_resource *bo;
enum radeon_bo_usage bo_usage;
u32 id;
};
struct r600_pipe_resource_state {
unsigned id;
u32 val[8];
- struct r600_bo *bo[2];
+ struct r600_resource *bo[2];
enum radeon_bo_usage bo_usage[2];
};
#define R600_BLOCK_STATUS_RESOURCE_VERTEX (1 << 3)
struct r600_block_reloc {
- struct r600_bo *bo;
+ struct r600_resource *bo;
enum radeon_bo_usage bo_usage;
unsigned flush_flags;
unsigned flush_mask;
/* The buffer where query results are stored. It's used as a ring,
* data blocks for current query are stored sequentially from
* results_start to results_end, with wrapping on the buffer end */
- struct r600_bo *buffer;
+ struct r600_resource *buffer;
unsigned buffer_size;
/* linked list of queries */
struct list_head list;
struct r600_context {
struct radeon *radeon;
+ struct r600_screen *screen;
struct radeon_winsys_cs *cs;
struct r600_range *range;
unsigned init_dwords;
unsigned creloc;
- struct r600_bo **bo;
+ struct r600_resource **bo;
u32 *pm4;
unsigned pm4_cdwords;
u32 vgt_index_type;
u32 vgt_draw_initiator;
u32 indices_bo_offset;
- struct r600_bo *indices;
+ struct r600_resource *indices;
};
void r600_get_backend_mask(struct r600_context *ctx);
-int r600_context_init(struct r600_context *ctx, struct radeon *radeon);
+int r600_context_init(struct r600_context *ctx, struct r600_screen *screen, struct radeon *radeon);
void r600_context_fini(struct r600_context *ctx);
void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
void r600_context_queries_resume(struct r600_context *ctx, boolean flushed);
void r600_query_predication(struct r600_context *ctx, struct r600_query *query, int operation,
int flag_wait);
-void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence,
+void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence,
unsigned offset, unsigned value);
void r600_context_flush_all(struct r600_context *ctx, unsigned flush_flags);
void r600_context_flush_dest_caches(struct r600_context *ctx);
-int evergreen_context_init(struct r600_context *ctx, struct radeon *radeon);
+int evergreen_context_init(struct r600_context *ctx, struct r600_screen *screen, struct radeon *radeon);
void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
void evergreen_context_flush_dest_caches(struct r600_context *ctx);
void evergreen_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid);
struct r600_pipe_state *state,
u32 offset, u32 value, u32 mask,
u32 range_id, u32 block_id,
- struct r600_bo *bo,
+ struct r600_resource *bo,
enum radeon_bo_usage usage);
void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
u32 offset, u32 value, u32 mask,
- struct r600_bo *bo,
+ struct r600_resource *bo,
enum radeon_bo_usage usage);
#define r600_pipe_state_add_reg(state, offset, value, mask, bo, usage) _r600_pipe_state_add_reg(&rctx->ctx, state, offset, value, mask, CTX_RANGE_ID(offset), CTX_BLOCK_ID(offset), bo, usage)
}
static inline void r600_pipe_state_mod_reg_bo(struct r600_pipe_state *state,
- u32 value, struct r600_bo *bo,
+ u32 value, struct r600_resource *bo,
enum radeon_bo_usage usage)
{
state->regs[state->nregs].value = value;
ve->fs_size = bc.ndw*4;
- /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
- ve->fetch_shader = r600_bo(rctx->radeon, ve->fs_size, 256, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE);
+ ve->fetch_shader = (struct r600_resource*)
+ pipe_buffer_create(rctx->context.screen,
+ PIPE_BIND_CUSTOM,
+ PIPE_USAGE_IMMUTABLE, ve->fs_size);
if (ve->fetch_shader == NULL) {
r600_bytecode_clear(&bc);
return -ENOMEM;
}
- bytecode = r600_bo_map(rctx->radeon, ve->fetch_shader, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
+ bytecode = rctx->ws->buffer_map(ve->fetch_shader->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
if (bytecode == NULL) {
r600_bytecode_clear(&bc);
- r600_bo_reference(&ve->fetch_shader, NULL);
+ pipe_resource_reference((struct pipe_resource**)&ve->fetch_shader, NULL);
return -ENOMEM;
}
memcpy(bytecode, bc.bytecode, ve->fs_size);
}
- r600_bo_unmap(rctx->radeon, ve->fetch_shader);
+ rctx->ws->buffer_unmap(ve->fetch_shader->buf);
r600_bytecode_clear(&bc);
if (rctx->chip_class >= EVERGREEN)
struct r600_screen *rscreen = (struct r600_screen*)screen;
struct r600_resource *rbuffer = r600_resource(buf);
- if (rbuffer->bo) {
- r600_bo_reference(&rbuffer->bo, NULL);
- }
- rbuffer->bo = NULL;
+ pb_reference(&rbuffer->buf, NULL);
util_slab_free(&rscreen->pool_buffers, rbuffer);
}
if (rbuffer->b.user_ptr)
return (uint8_t*)rbuffer->b.user_ptr + transfer->box.x;
- data = r600_bo_map(rctx->screen->radeon, rbuffer->bo, rctx->ctx.cs, transfer->usage);
+ data = rctx->ws->buffer_map(rbuffer->buf, rctx->ctx.cs, transfer->usage);
if (!data)
return NULL;
if (rbuffer->b.user_ptr)
return;
- if (rbuffer->bo)
- r600_bo_unmap(rctx->screen->radeon, rbuffer->bo);
+ rctx->ws->buffer_unmap(rbuffer->buf);
}
static void r600_buffer_transfer_flush_region(struct pipe_context *pipe,
unsigned layer_stride)
{
struct r600_pipe_context *rctx = (struct r600_pipe_context*)pipe;
- struct radeon *radeon = rctx->screen->radeon;
struct r600_resource *rbuffer = r600_resource(resource);
uint8_t *map = NULL;
assert(rbuffer->b.user_ptr == NULL);
- map = r600_bo_map(radeon, rbuffer->bo, rctx->ctx.cs,
- PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD | usage);
+ map = rctx->ws->buffer_map(rbuffer->buf, rctx->ctx.cs,
+ PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD | usage);
memcpy(map + box->x, data, box->width);
- if (rbuffer->bo)
- r600_bo_unmap(radeon, rbuffer->bo);
+ rctx->ws->buffer_unmap(rbuffer->buf);
}
static const struct u_resource_vtbl r600_buffer_vtbl =
r600_buffer_transfer_inline_write /* transfer_inline_write */
};
+bool r600_init_resource(struct r600_screen *rscreen,
+ struct r600_resource *res,
+ unsigned size, unsigned alignment,
+ unsigned bind, unsigned usage)
+{
+ uint32_t initial_domain, domains;
+
+ /* Staging resources particpate in transfers and blits only
+ * and are used for uploads and downloads from regular
+ * resources. We generate them internally for some transfers.
+ */
+ if (usage == PIPE_USAGE_STAGING) {
+ domains = RADEON_DOMAIN_GTT;
+ initial_domain = RADEON_DOMAIN_GTT;
+ } else {
+ domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
+
+ switch(usage) {
+ case PIPE_USAGE_DYNAMIC:
+ case PIPE_USAGE_STREAM:
+ case PIPE_USAGE_STAGING:
+ initial_domain = RADEON_DOMAIN_GTT;
+ break;
+ case PIPE_USAGE_DEFAULT:
+ case PIPE_USAGE_STATIC:
+ case PIPE_USAGE_IMMUTABLE:
+ default:
+ initial_domain = RADEON_DOMAIN_VRAM;
+ break;
+ }
+ }
+
+ res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment, bind, initial_domain);
+ if (!res->buf) {
+ return false;
+ }
+
+ res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
+ res->domains = domains;
+ return true;
+}
+
struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
const struct pipe_resource *templ)
{
struct r600_screen *rscreen = (struct r600_screen*)screen;
struct r600_resource *rbuffer;
- struct r600_bo *bo;
/* XXX We probably want a different alignment for buffers and textures. */
unsigned alignment = 4096;
rbuffer->b.b.b.screen = screen;
rbuffer->b.b.vtbl = &r600_buffer_vtbl;
rbuffer->b.user_ptr = NULL;
- rbuffer->size = rbuffer->b.b.b.width0;
- rbuffer->bo_size = rbuffer->size;
-
- bo = r600_bo(rscreen->radeon,
- rbuffer->b.b.b.width0,
- alignment, rbuffer->b.b.b.bind,
- rbuffer->b.b.b.usage);
- if (bo == NULL) {
+ if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, templ->bind, templ->usage)) {
FREE(rbuffer);
return NULL;
}
- rbuffer->bo = bo;
return &rbuffer->b.b.b;
}
rbuffer->b.b.b.array_size = 1;
rbuffer->b.b.b.flags = 0;
rbuffer->b.user_ptr = ptr;
- rbuffer->bo = NULL;
- rbuffer->bo_size = 0;
+ rbuffer->buf = NULL;
return &rbuffer->b.b.b;
}
*/
static struct r600_fence *r600_create_fence(struct r600_pipe_context *ctx)
{
- struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
struct r600_fence *fence = NULL;
if (!ctx->fences.bo) {
/* Create the shared buffer object */
- ctx->fences.bo = r600_bo(ctx->radeon, 4096, 0, 0, 0);
+ ctx->fences.bo = (struct r600_resource*)
+ pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_STAGING, 4096);
if (!ctx->fences.bo) {
R600_ERR("r600: failed to create bo for fence objects\n");
return NULL;
}
- ctx->fences.data = r600_bo_map(ctx->radeon, ctx->fences.bo, rctx->ctx.cs,
- PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_WRITE);
+ ctx->fences.data = ctx->ws->buffer_map(ctx->fences.bo->buf, ctx->ctx.cs,
+ PIPE_TRANSFER_WRITE);
}
if (!LIST_IS_EMPTY(&ctx->fences.pool)) {
FREE(entry);
}
- r600_bo_unmap(rctx->radeon, rctx->fences.bo);
- r600_bo_reference(&rctx->fences.bo, NULL);
+ rctx->ws->buffer_unmap(rctx->fences.bo->buf);
+ pipe_resource_reference((struct pipe_resource**)&rctx->fences.bo, NULL);
}
r600_update_num_contexts(rctx->screen, -1);
/* Easy accessing of screen/winsys. */
rctx->screen = rscreen;
+ rctx->ws = rscreen->ws;
rctx->radeon = rscreen->radeon;
rctx->family = r600_get_family(rctx->radeon);
rctx->chip_class = r600_get_family_class(rctx->radeon);
case R600:
case R700:
r600_init_state_functions(rctx);
- if (r600_context_init(&rctx->ctx, rctx->radeon)) {
+ if (r600_context_init(&rctx->ctx, rctx->screen, rctx->radeon)) {
r600_destroy_context(&rctx->context);
return NULL;
}
case EVERGREEN:
case CAYMAN:
evergreen_init_state_functions(rctx);
- if (evergreen_context_init(&rctx->ctx, rctx->radeon)) {
+ if (evergreen_context_init(&rctx->ctx, rctx->screen, rctx->radeon)) {
r600_destroy_context(&rctx->context);
return NULL;
}
{
unsigned count;
struct pipe_vertex_element elements[PIPE_MAX_ATTRIBS];
- struct u_vbuf_elements *vmgr_elements;
- struct r600_bo *fetch_shader;
+ struct u_vbuf_elements *vmgr_elements;
+ struct r600_resource *fetch_shader;
unsigned fs_size;
struct r600_pipe_state rstate;
/* if offset is to big for fetch instructio we need to alterate
struct r600_pipe_shader {
struct r600_shader shader;
struct r600_pipe_state rstate;
- struct r600_bo *bo;
- struct r600_bo *bo_fetch;
+ struct r600_resource *bo;
+ struct r600_resource *bo_fetch;
struct r600_vertex_element vertex_elements;
struct tgsi_token *tokens;
};
};
struct r600_pipe_fences {
- struct r600_bo *bo;
+ struct r600_resource *bo;
unsigned *data;
unsigned next_index;
/* linked list of preallocated blocks */
enum chip_class chip_class;
void *custom_dsa_flush;
struct r600_screen *screen;
+ struct radeon_winsys *ws;
struct radeon *radeon;
struct r600_pipe_state *states[R600_PIPE_NSTATES];
struct r600_context ctx;
void r600_flush_depth_textures(struct r600_pipe_context *rctx);
/* r600_buffer.c */
+bool r600_init_resource(struct r600_screen *rscreen,
+ struct r600_resource *res,
+ unsigned size, unsigned alignment,
+ unsigned bind, unsigned usage);
struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
const struct pipe_resource *templ);
struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen,
struct pipe_resource *staging_texture;
};
-/* This gets further specialized into either buffer or texture
- * structures. Use the vtbl struct to choose between the two
- * underlying implementations.
- */
-struct r600_resource {
- struct u_vbuf_resource b;
- struct r600_bo *bo;
- u32 size;
- unsigned bo_size;
-};
-
struct r600_resource_texture {
struct r600_resource resource;
/* copy new shader */
if (shader->bo == NULL) {
- /* use PIPE_BIND_VERTEX_BUFFER so we use the cache buffer manager */
- shader->bo = r600_bo(rctx->radeon, rshader->bc.ndw * 4, 4096, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE);
+ shader->bo = (struct r600_resource*)
+ pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, rshader->bc.ndw * 4);
if (shader->bo == NULL) {
return -ENOMEM;
}
- ptr = (uint32_t*)r600_bo_map(rctx->radeon, shader->bo, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
+ ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->buf, rctx->ctx.cs, PIPE_TRANSFER_WRITE);
if (R600_BIG_ENDIAN) {
for (i = 0; i < rshader->bc.ndw; ++i) {
ptr[i] = bswap_32(rshader->bc.bytecode[i]);
} else {
memcpy(ptr, rshader->bc.bytecode, rshader->bc.ndw * sizeof(*ptr));
}
- r600_bo_unmap(rctx->radeon, shader->bo);
+ rctx->ws->buffer_unmap(shader->bo->buf);
}
/* build state */
switch (rshader->processor_type) {
void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
{
- r600_bo_reference(&shader->bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
r600_bytecode_clear(&shader->shader.bc);
memset(&shader->shader,0,sizeof(struct r600_shader));
struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view);
struct r600_pipe_resource_state *rstate;
struct r600_resource_texture *tmp = (struct r600_resource_texture*)texture;
- struct r600_resource *rbuffer;
unsigned format, endian;
uint32_t word4 = 0, yuv_format = 0, pitch = 0;
unsigned char swizzle[4], array_mode = 0, tile_type = 0;
- struct r600_bo *bo[2];
unsigned width, height, depth, offset_level, last_level;
if (view == NULL)
word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
}
- rbuffer = &tmp->resource;
- bo[0] = rbuffer->bo;
- bo[1] = rbuffer->bo;
-
offset_level = state->u.tex.first_level;
last_level = state->u.tex.last_level - offset_level;
width = u_minify(texture->width0, offset_level);
depth = texture->array_size;
}
- rstate->bo[0] = bo[0];
- rstate->bo[1] = bo[1];
+ rstate->bo[0] = &tmp->resource;
+ rstate->bo[1] = &tmp->resource;
rstate->bo_usage[0] = RADEON_USAGE_READ;
rstate->bo_usage[1] = RADEON_USAGE_READ;
const struct pipe_framebuffer_state *state, int cb)
{
struct r600_resource_texture *rtex;
- struct r600_resource *rbuffer;
struct r600_surface *surf;
unsigned level = state->cbufs[cb]->u.tex.level;
unsigned pitch, slice;
unsigned format, swap, ntype, endian;
unsigned offset;
const struct util_format_description *desc;
- struct r600_bo *bo[3];
int i;
surf = (struct r600_surface *)state->cbufs[cb];
rtex = rtex->flushed_depth_texture;
}
- rbuffer = &rtex->resource;
- bo[0] = rbuffer->bo;
- bo[1] = rbuffer->bo;
- bo[2] = rbuffer->bo;
-
/* XXX quite sure for dx10+ hw don't need any offset hacks */
offset = r600_texture_get_offset(rtex,
level, state->cbufs[cb]->u.tex.first_layer);
format = r600_translate_colorformat(surf->base.format);
swap = r600_translate_colorswap(surf->base.format);
- if(rbuffer->b.b.b.usage == PIPE_USAGE_STAGING) {
+ if(rtex->resource.b.b.b.usage == PIPE_USAGE_STAGING) {
endian = ENDIAN_NONE;
} else {
endian = r600_colorformat_endian_swap(format);
r600_pipe_state_add_reg(rstate,
R_028040_CB_COLOR0_BASE + cb * 4,
- offset >> 8, 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
+ offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate,
R_0280A0_CB_COLOR0_INFO + cb * 4,
- color_info, 0xFFFFFFFF, bo[0], RADEON_USAGE_READWRITE);
+ color_info, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate,
R_028060_CB_COLOR0_SIZE + cb * 4,
S_028060_PITCH_TILE_MAX(pitch) |
0x00000000, 0xFFFFFFFF, NULL, 0);
r600_pipe_state_add_reg(rstate,
R_0280E0_CB_COLOR0_FRAG + cb * 4,
- 0, 0xFFFFFFFF, bo[1], RADEON_USAGE_READWRITE);
+ 0, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate,
R_0280C0_CB_COLOR0_TILE + cb * 4,
- 0, 0xFFFFFFFF, bo[2], RADEON_USAGE_READWRITE);
+ 0, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate,
R_028100_CB_COLOR0_MASK + cb * 4,
0x00000000, 0xFFFFFFFF, NULL, 0);
const struct pipe_framebuffer_state *state)
{
struct r600_resource_texture *rtex;
- struct r600_resource *rbuffer;
struct r600_surface *surf;
unsigned level;
unsigned pitch, slice, format;
surf = (struct r600_surface *)state->zsbuf;
rtex = (struct r600_resource_texture*)state->zsbuf->texture;
- rbuffer = &rtex->resource;
-
/* XXX quite sure for dx10+ hw don't need any offset hacks */
offset = r600_texture_get_offset((struct r600_resource_texture *)state->zsbuf->texture,
level, state->zsbuf->u.tex.first_layer);
format = r600_translate_dbformat(state->zsbuf->texture->format);
r600_pipe_state_add_reg(rstate, R_02800C_DB_DEPTH_BASE,
- offset >> 8, 0xFFFFFFFF, rbuffer->bo, RADEON_USAGE_READWRITE);
+ offset >> 8, 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate, R_028000_DB_DEPTH_SIZE,
S_028000_PITCH_TILE_MAX(pitch) | S_028000_SLICE_TILE_MAX(slice),
0xFFFFFFFF, NULL, 0);
r600_pipe_state_add_reg(rstate, R_028004_DB_DEPTH_VIEW, 0x00000000, 0xFFFFFFFF, NULL, 0);
r600_pipe_state_add_reg(rstate, R_028010_DB_DEPTH_INFO,
S_028010_ARRAY_MODE(rtex->array_mode[level]) | S_028010_FORMAT(format),
- 0xFFFFFFFF, rbuffer->bo, RADEON_USAGE_READWRITE);
+ 0xFFFFFFFF, &rtex->resource, RADEON_USAGE_READWRITE);
r600_pipe_state_add_reg(rstate, R_028D34_DB_PREFETCH_LIMIT,
(surf->aligned_height / 8) - 1, 0xFFFFFFFF, NULL, 0);
}
enum radeon_bo_usage usage)
{
rstate->val[0] = offset;
- rstate->bo[0] = rbuffer->bo;
+ rstate->bo[0] = rbuffer;
rstate->bo_usage[0] = usage;
- rstate->val[1] = rbuffer->bo_size - offset - 1;
+ rstate->val[1] = rbuffer->buf->size - offset - 1;
rstate->val[2] = S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
S_038008_STRIDE(stride);
}
rctx->states[rstate->id] = NULL;
}
for (int i = 0; i < rstate->nregs; i++) {
- r600_bo_reference(&rstate->regs[i].bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&rstate->regs[i].bo, NULL);
}
free(rstate);
}
if (rctx->vertex_elements == state)
rctx->vertex_elements = NULL;
- r600_bo_reference(&v->fetch_shader, NULL);
+ pipe_resource_reference((struct pipe_resource**)&v->fetch_shader, NULL);
u_vbuf_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
FREE(state);
}
0xFFFFFFFF, NULL, 0);
r600_pipe_state_add_reg(&rctx->vs_const_buffer,
R_028980_ALU_CONST_CACHE_VS_0,
- offset >> 8, 0xFFFFFFFF, rbuffer->bo, RADEON_USAGE_READ);
+ offset >> 8, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
rstate = &rctx->vs_const_buffer_resource[index];
0xFFFFFFFF, NULL, 0);
r600_pipe_state_add_reg(&rctx->ps_const_buffer,
R_028940_ALU_CONST_CACHE_PS_0,
- offset >> 8, 0xFFFFFFFF, rbuffer->bo, RADEON_USAGE_READ);
+ offset >> 8, 0xFFFFFFFF, rbuffer, RADEON_USAGE_READ);
r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
rstate = &rctx->ps_const_buffer_resource[index];
rdraw.indices = NULL;
if (draw.index_buffer) {
rbuffer = (struct r600_resource*)draw.index_buffer;
- rdraw.indices = rbuffer->bo;
+ rdraw.indices = rbuffer;
rdraw.indices_bo_offset = draw.index_buffer_offset;
}
struct r600_pipe_state *state,
u32 offset, u32 value, u32 mask,
u32 range_id, u32 block_id,
- struct r600_bo *bo,
+ struct r600_resource *bo,
enum radeon_bo_usage usage)
{
struct r600_range *range;
void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
u32 offset, u32 value, u32 mask,
- struct r600_bo *bo,
+ struct r600_resource *bo,
enum radeon_bo_usage usage)
{
if (bo) assert(usage);
{
struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
struct r600_resource *resource = &rtex->resource;
- struct radeon *radeon = ((struct r600_screen*)screen)->radeon;
+ struct r600_screen *rscreen = (struct r600_screen*)screen;
- return r600_bo_get_winsys_handle(radeon, resource->bo,
- rtex->pitch_in_bytes[0], whandle);
+ return rscreen->ws->buffer_get_handle(resource->buf,
+ rtex->pitch_in_bytes[0], whandle);
}
static void r600_texture_destroy(struct pipe_screen *screen,
if (rtex->flushed_depth_texture)
pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
- if (resource->bo) {
- r600_bo_reference(&resource->bo, NULL);
- }
+ pb_reference(&resource->buf, NULL);
FREE(rtex);
}
unsigned array_mode,
unsigned pitch_in_bytes_override,
unsigned max_buffer_size,
- struct r600_bo *bo,
+ struct pb_buffer *buf,
boolean alloc_bo)
{
struct r600_resource_texture *rtex;
struct r600_resource *resource;
- struct radeon *radeon = ((struct r600_screen*)screen)->radeon;
+ struct r600_screen *rscreen = (struct r600_screen*)screen;
rtex = CALLOC_STRUCT(r600_resource_texture);
if (rtex == NULL)
resource->b.b.vtbl = &r600_texture_vtbl;
pipe_reference_init(&resource->b.b.b.reference, 1);
resource->b.b.b.screen = screen;
- resource->bo = bo;
rtex->pitch_override = pitch_in_bytes_override;
rtex->real_format = base->format;
rtex->size = stencil_offset + rtex->stencil->size;
}
- resource->size = rtex->size;
-
/* Now create the backing buffer. */
- if (!resource->bo && alloc_bo) {
+ if (!buf && alloc_bo) {
struct pipe_resource *ptex = &rtex->resource.b.b.b;
unsigned base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
- resource->bo = r600_bo(radeon, rtex->size, base_align, base->bind, base->usage);
- if (!resource->bo) {
+ if (!r600_init_resource(rscreen, resource, rtex->size, base_align, base->bind, base->usage)) {
pipe_resource_reference((struct pipe_resource**)&rtex->stencil, NULL);
FREE(rtex);
return NULL;
}
+ } else if (buf) {
+ resource->buf = buf;
+ resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
+ resource->domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
}
- if (rtex->stencil)
- rtex->stencil->resource.bo = rtex->resource.bo;
+ if (rtex->stencil) {
+ rtex->stencil->resource.buf = rtex->resource.buf;
+ rtex->stencil->resource.cs_buf = rtex->resource.cs_buf;
+ rtex->stencil->resource.domains = rtex->resource.domains;
+ }
return rtex;
}
FREE(surface);
}
-
struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
const struct pipe_resource *templ,
struct winsys_handle *whandle)
{
- struct radeon *rw = ((struct r600_screen*)screen)->radeon;
- struct r600_bo *bo = NULL;
+ struct r600_screen *rscreen = (struct r600_screen*)screen;
+ struct pb_buffer *buf = NULL;
unsigned stride = 0;
unsigned array_mode = 0;
+ enum radeon_bo_layout micro, macro;
/* Support only 2D textures without mipmaps */
if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
templ->depth0 != 1 || templ->last_level != 0)
return NULL;
- bo = r600_bo_handle(rw, whandle, &stride, &array_mode);
- if (bo == NULL) {
+ buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride, NULL);
+ if (!buf)
return NULL;
- }
+
+ rscreen->ws->buffer_get_tiling(buf, µ, ¯o);
+
+ if (macro == RADEON_LAYOUT_TILED)
+ array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
+ else if (micro == RADEON_LAYOUT_TILED)
+ array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
+ else
+ array_mode = 0;
return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
- stride, 0, bo, FALSE);
+ stride, 0, buf, FALSE);
}
int r600_texture_depth_flush(struct pipe_context *ctx,
{
struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- struct r600_bo *bo;
+ struct pb_buffer *buf;
enum pipe_format format = transfer->resource->format;
- struct radeon *radeon = rctx->screen->radeon;
unsigned offset = 0;
char *map;
if (rtransfer->staging_texture) {
- bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
+ buf = ((struct r600_resource *)rtransfer->staging_texture)->buf;
} else {
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
if (rtex->flushed_depth_texture)
- bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
+ buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf;
else
- bo = ((struct r600_resource *)transfer->resource)->bo;
+ buf = ((struct r600_resource *)transfer->resource)->buf;
offset = rtransfer->offset +
transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
}
- if (!(map = r600_bo_map(radeon, bo, rctx->ctx.cs, transfer->usage))) {
+ if (!(map = rctx->ws->buffer_map(buf, rctx->ctx.cs, transfer->usage))) {
return NULL;
}
struct pipe_transfer* transfer)
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- struct radeon *radeon = ((struct r600_screen*)ctx->screen)->radeon;
- struct r600_bo *bo;
+ struct r600_pipe_context *rctx = (struct r600_pipe_context*)ctx;
+ struct pb_buffer *buf;
if (rtransfer->staging_texture) {
- bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
+ buf = ((struct r600_resource *)rtransfer->staging_texture)->buf;
} else {
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
if (rtex->flushed_depth_texture) {
- bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
+ buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf;
} else {
- bo = ((struct r600_resource *)transfer->resource)->bo;
+ buf = ((struct r600_resource *)transfer->resource)->buf;
}
}
- r600_bo_unmap(radeon, bo);
+ rctx->ws->buffer_unmap(buf);
}
void r600_init_surface_functions(struct r600_pipe_context *r600)
C_SOURCES := \
evergreen_hw_context.c \
- r600_bo.c \
r600_drm.c \
r600_hw_context.c
-
return r600_context_add_block(ctx, r600_loop_consts, nreg, PKT3_SET_LOOP_CONST, EVERGREEN_LOOP_CONST_OFFSET);
}
-int evergreen_context_init(struct r600_context *ctx, struct radeon *radeon)
+int evergreen_context_init(struct r600_context *ctx, struct r600_screen *screen, struct radeon *radeon)
{
int r;
memset(ctx, 0, sizeof(struct r600_context));
ctx->radeon = radeon;
+ ctx->screen = screen;
LIST_INITHEAD(&ctx->query_list);
void evergreen_context_flush_dest_caches(struct r600_context *ctx)
{
- struct r600_bo *cb[12];
- struct r600_bo *db;
+ struct r600_resource *cb[12];
+ struct r600_resource *db;
if (!(ctx->flags & R600_CONTEXT_DST_CACHES_DIRTY))
return;
+++ /dev/null
-/*
- * Copyright 2010 Dave Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * on the rights to use, copy, modify, merge, publish, distribute, sub
- * license, and/or sell copies of the Software, and to permit persons to whom
- * the Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Dave Airlie
- */
-#include "r600_priv.h"
-#include "r600d.h"
-#include "state_tracker/drm_driver.h"
-
-struct r600_bo *r600_bo(struct radeon *radeon,
- unsigned size, unsigned alignment,
- unsigned binding, unsigned usage)
-{
- struct r600_bo *bo;
- struct pb_buffer *pb;
- uint32_t initial_domain, domains;
-
- /* Staging resources particpate in transfers and blits only
- * and are used for uploads and downloads from regular
- * resources. We generate them internally for some transfers.
- */
- if (usage == PIPE_USAGE_STAGING) {
- domains = RADEON_DOMAIN_GTT;
- initial_domain = RADEON_DOMAIN_GTT;
- } else {
- domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
-
- switch(usage) {
- case PIPE_USAGE_DYNAMIC:
- case PIPE_USAGE_STREAM:
- case PIPE_USAGE_STAGING:
- initial_domain = RADEON_DOMAIN_GTT;
- break;
- case PIPE_USAGE_DEFAULT:
- case PIPE_USAGE_STATIC:
- case PIPE_USAGE_IMMUTABLE:
- default:
- initial_domain = RADEON_DOMAIN_VRAM;
- break;
- }
- }
-
- pb = radeon->ws->buffer_create(radeon->ws, size, alignment, binding, initial_domain);
- if (!pb) {
- return NULL;
- }
-
- bo = calloc(1, sizeof(struct r600_bo));
- bo->domains = domains;
- bo->buf = pb;
- bo->cs_buf = radeon->ws->buffer_get_cs_handle(pb);
-
- pipe_reference_init(&bo->reference, 1);
- return bo;
-}
-
-struct r600_bo *r600_bo_handle(struct radeon *radeon, struct winsys_handle *whandle,
- unsigned *stride, unsigned *array_mode)
-{
- struct pb_buffer *pb;
- struct r600_bo *bo = calloc(1, sizeof(struct r600_bo));
-
- pb = bo->buf = radeon->ws->buffer_from_handle(radeon->ws, whandle, stride, NULL);
- if (!pb) {
- free(bo);
- return NULL;
- }
-
- pipe_reference_init(&bo->reference, 1);
- bo->domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
- bo->cs_buf = radeon->ws->buffer_get_cs_handle(pb);
-
- if (stride)
- *stride = whandle->stride;
-
- if (array_mode) {
- enum radeon_bo_layout micro, macro;
-
- radeon->ws->buffer_get_tiling(bo->buf, µ, ¯o);
-
- if (macro == RADEON_LAYOUT_TILED)
- *array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
- else if (micro == RADEON_LAYOUT_TILED)
- *array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
- else
- *array_mode = 0;
- }
- return bo;
-}
-
-void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, struct radeon_winsys_cs *cs, unsigned usage)
-{
- return radeon->ws->buffer_map(bo->buf, cs, usage);
-}
-
-void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo)
-{
- radeon->ws->buffer_unmap(bo->buf);
-}
-
-void r600_bo_destroy(struct r600_bo *bo)
-{
- pb_reference(&bo->buf, NULL);
- free(bo);
-}
-
-boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *bo,
- unsigned stride, struct winsys_handle *whandle)
-{
- return radeon->ws->buffer_get_handle(bo->buf, stride, whandle);
-}
* Jerome Glisse
*/
#include "r600_priv.h"
+#include "r600_pipe.h"
#include "r600d.h"
#include "util/u_memory.h"
#include <errno.h>
/* Get backends mask */
void r600_get_backend_mask(struct r600_context *ctx)
{
- struct r600_bo * buffer;
- u32 * results;
+ struct r600_resource *buffer;
+ u32 *results;
unsigned num_backends = ctx->radeon->info.r600_num_backends;
unsigned i, mask = 0;
/* otherwise backup path for older kernels */
/* create buffer for event data */
- buffer = r600_bo(ctx->radeon, ctx->max_db*16, 1, 0,
- PIPE_USAGE_STAGING);
+ buffer = (struct r600_resource*)
+ pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_STAGING, ctx->max_db*16);
if (!buffer)
goto err;
/* initialize buffer with zeroes */
- results = r600_bo_map(ctx->radeon, buffer, ctx->cs, PIPE_TRANSFER_WRITE);
+ results = ctx->screen->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE);
if (results) {
memset(results, 0, ctx->max_db * 4 * 4);
- r600_bo_unmap(ctx->radeon, buffer);
+ ctx->screen->ws->buffer_unmap(buffer->buf);
/* emit EVENT_WRITE for ZPASS_DONE */
ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
r600_context_flush(ctx, 0);
/* analyze results */
- results = r600_bo_map(ctx->radeon, buffer, ctx->cs, PIPE_TRANSFER_READ);
+ results = ctx->screen->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_READ);
if (results) {
for(i = 0; i < ctx->max_db; i++) {
/* at least highest bit will be set if backend is used */
if (results[i*4 + 1])
mask |= (1<<i);
}
- r600_bo_unmap(ctx->radeon, buffer);
+ ctx->screen->ws->buffer_unmap(buffer->buf);
}
}
- r600_bo_reference(&buffer, NULL);
+ pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
if (mask != 0) {
ctx->backend_mask = mask;
block = range->blocks[i];
if (block) {
for (int k = 1; k <= block->nbo; k++)
- r600_bo_reference(&block->reloc[k].bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
free(block);
}
}
range->blocks[CTX_BLOCK_ID(offset)] = NULL;
}
for (int k = 1; k <= block->nbo; k++) {
- r600_bo_reference(&block->reloc[k].bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
}
free(block);
}
return 0;
}
-int r600_context_init(struct r600_context *ctx, struct radeon *radeon)
+int r600_context_init(struct r600_context *ctx, struct r600_screen *screen, struct radeon *radeon)
{
int r;
memset(ctx, 0, sizeof(struct r600_context));
ctx->radeon = radeon;
+ ctx->screen = screen;
LIST_INITHEAD(&ctx->query_list);
}
void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
- unsigned flush_mask, struct r600_bo *bo)
+ unsigned flush_mask, struct r600_resource *bo)
{
/* if bo has already been flushed */
- if (!(~bo->last_flush & flush_flags)) {
- bo->last_flush &= flush_mask;
+ if (!(~bo->cs_buf->last_flush & flush_flags)) {
+ bo->cs_buf->last_flush &= flush_mask;
return;
}
G_0085F0_DB_ACTION_ENA(flush_flags))) {
if (ctx->flags & R600_CONTEXT_CHECK_EVENT_FLUSH) {
/* the rv670 seems to fail fbo-generatemipmap unless we flush the CB1 dest base ena */
- if ((bo->binding & BO_BOUND_TEXTURE) &&
+ if ((bo->cs_buf->binding & BO_BOUND_TEXTURE) &&
(flush_flags & S_0085F0_CB_ACTION_ENA(1))) {
if ((ctx->radeon->family == CHIP_RV670) ||
(ctx->radeon->family == CHIP_RS780) ||
ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, ctx->predicate_drawing);
ctx->pm4[ctx->pm4_cdwords++] = r600_context_bo_reloc(ctx, bo, RADEON_USAGE_WRITE);
}
- bo->last_flush = (bo->last_flush | flush_flags) & flush_mask;
+ bo->cs_buf->last_flush = (bo->cs_buf->last_flush | flush_flags) & flush_mask;
}
void r600_context_reg(struct r600_context *ctx,
if (block->pm4_bo_index[id]) {
/* find relocation */
reloc_id = block->pm4_bo_index[id];
- r600_bo_reference(&block->reloc[reloc_id].bo, reg->bo);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[reloc_id].bo, ®->bo->b.b.b);
block->reloc[reloc_id].bo_usage = reg->bo_usage;
/* always force dirty for relocs for now */
dirty |= R600_BLOCK_STATUS_DIRTY;
if (state == NULL) {
block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_RESOURCE_DIRTY);
if (block->reloc[1].bo)
- block->reloc[1].bo->binding &= ~BO_BOUND_TEXTURE;
+ block->reloc[1].bo->cs_buf->binding &= ~BO_BOUND_TEXTURE;
- r600_bo_reference(&block->reloc[1].bo, NULL);
- r600_bo_reference(&block->reloc[2].bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, NULL);
LIST_DELINIT(&block->list);
LIST_DELINIT(&block->enable_list);
return;
/* VERTEX RESOURCE, we preted there is 2 bo to relocate so
* we have single case btw VERTEX & TEXTURE resource
*/
- r600_bo_reference(&block->reloc[1].bo, state->bo[0]);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, &state->bo[0]->b.b.b);
block->reloc[1].bo_usage = state->bo_usage[0];
- r600_bo_reference(&block->reloc[2].bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, NULL);
} else {
/* TEXTURE RESOURCE */
- r600_bo_reference(&block->reloc[1].bo, state->bo[0]);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, &state->bo[0]->b.b.b);
block->reloc[1].bo_usage = state->bo_usage[0];
- r600_bo_reference(&block->reloc[2].bo, state->bo[1]);
+ pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, &state->bo[1]->b.b.b);
block->reloc[2].bo_usage = state->bo_usage[1];
- state->bo[0]->binding |= BO_BOUND_TEXTURE;
+ state->bo[0]->cs_buf->binding |= BO_BOUND_TEXTURE;
}
if (is_vertex)
r600_context_pipe_state_set_sampler_border(ctx, state, offset);
}
-struct r600_bo *r600_context_reg_bo(struct r600_context *ctx, unsigned offset)
+struct r600_resource *r600_context_reg_bo(struct r600_context *ctx, unsigned offset)
{
struct r600_range *range;
struct r600_block *block;
void r600_context_flush_dest_caches(struct r600_context *ctx)
{
- struct r600_bo *cb[8];
- struct r600_bo *db;
+ struct r600_resource *cb[8];
+ struct r600_resource *db;
int i;
if (!(ctx->flags & R600_CONTEXT_DST_CACHES_DIRTY))
/* restart */
for (int i = 0; i < ctx->creloc; i++) {
- ctx->bo[i]->last_flush = 0;
- r600_bo_reference(&ctx->bo[i], NULL);
+ ctx->bo[i]->cs_buf->last_flush = 0;
+ pipe_resource_reference((struct pipe_resource**)&ctx->bo[i], NULL);
}
ctx->creloc = 0;
ctx->pm4_dirty_cdwords = 0;
}
}
-void r600_context_emit_fence(struct r600_context *ctx, struct r600_bo *fence_bo, unsigned offset, unsigned value)
+void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
{
unsigned ndwords = 10;
u32 *results, *current_result;
if (wait)
- results = r600_bo_map(ctx->radeon, query->buffer, ctx->cs, PIPE_TRANSFER_READ);
+ results = ctx->screen->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_READ);
else
- results = r600_bo_map(ctx->radeon, query->buffer, ctx->cs, PIPE_TRANSFER_DONTBLOCK | PIPE_TRANSFER_READ);
+ results = ctx->screen->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_DONTBLOCK | PIPE_TRANSFER_READ);
if (!results)
return FALSE;
}
query->results_start = query->results_end;
- r600_bo_unmap(ctx->radeon, query->buffer);
+ ctx->screen->ws->buffer_unmap(query->buffer->buf);
return TRUE;
}
u32 *results;
int i;
- results = r600_bo_map(ctx->radeon, query->buffer, ctx->cs, PIPE_TRANSFER_WRITE);
+ results = ctx->screen->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE);
if (results) {
results = (u32*)((char*)results + query->results_end);
memset(results, 0, query->result_size);
results[(i * 4)+3] = 0x80000000;
}
}
- r600_bo_unmap(ctx->radeon, query->buffer);
+ ctx->screen->ws->buffer_unmap(query->buffer->buf);
}
}
* being written by the gpu, hence staging is probably a good
* usage pattern.
*/
- query->buffer = r600_bo(ctx->radeon, query->buffer_size, 1, 0,
- PIPE_USAGE_STAGING);
+ query->buffer = (struct r600_resource*)
+ pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, query->buffer_size);
if (!query->buffer) {
free(query);
return NULL;
void r600_context_query_destroy(struct r600_context *ctx, struct r600_query *query)
{
- r600_bo_reference(&query->buffer, NULL);
+ pipe_resource_reference((struct pipe_resource**)&query->buffer, NULL);
LIST_DELINIT(&query->list);
free(query);
}
#define BO_BOUND_TEXTURE 1
-struct r600_bo {
- struct pipe_reference reference; /* this must be the first member for the r600_bo_reference inline to work */
- /* DO NOT MOVE THIS ^ */
- struct pb_buffer *buf;
- struct radeon_winsys_cs_handle *cs_buf;
- unsigned domains;
- unsigned last_flush;
- unsigned binding;
-};
-
/*
* r600_hw_context.c
*/
void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
- unsigned flush_mask, struct r600_bo *rbo);
-struct r600_bo *r600_context_reg_bo(struct r600_context *ctx, unsigned offset);
+ unsigned flush_mask, struct r600_resource *rbo);
+struct r600_resource *r600_context_reg_bo(struct r600_context *ctx, unsigned offset);
int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
unsigned opcode, unsigned offset_base);
void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, struct r600_block *block);
void r600_init_cs(struct r600_context *ctx);
int r600_resource_init(struct r600_context *ctx, struct r600_range *range, unsigned offset, unsigned nblocks, unsigned stride, struct r600_reg *reg, int nreg, unsigned offset_base);
-static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_bo *rbo,
+static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, struct r600_resource *rbo,
enum radeon_bo_usage usage)
{
enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? rbo->domains : 0;
if (reloc_index >= ctx->creloc)
ctx->creloc = reloc_index+1;
- r600_bo_reference(&ctx->bo[reloc_index], rbo);
+ pipe_resource_reference((struct pipe_resource**)&ctx->bo[reloc_index], &rbo->b.b.b);
return reloc_index * 4;
}
struct radeon_bo {
struct pb_buffer base;
+
+ unsigned last_flush;
+ unsigned binding;
+
struct radeon_bomgr *mgr;
struct radeon_drm_winsys *rws;
};
struct winsys_handle;
-struct radeon_winsys_cs_handle; /* for write_reloc etc. */
+
+struct radeon_winsys_cs_handle { /* for write_reloc etc. */
+ struct {
+ struct pb_buffer base;
+ } _private;
+
+ unsigned last_flush;
+ unsigned binding;
+};
struct radeon_winsys_cs {
unsigned cdw; /* Number of used dwords. */