if (transfer->usage & PIPE_TRANSFER_WRITE) {
write = 1;
}
- data = radeon_ws_bo_map(rscreen->rw, rbuffer->r.bo, transfer->usage, rctx);
+ data = radeon_ws_bo_map(rscreen->rw, rbuffer->r.bo, transfer->usage, pipe);
if (!data)
return NULL;
{
struct r600_context *rctx = r600_context(ctx);
struct r600_query *rquery = NULL;
+#if 0
+ static int dc = 0;
+ char dname[256];
+#endif
/* flush upload buffers */
u_upload_flush(rctx->upload_vb);
/* suspend queries */
r600_queries_suspend(ctx);
+
+#if 0
+ sprintf(dname, "gallium-%08d.bof", dc);
+ if (dc < 2) {
+ radeon_ctx_dump_bof(rctx->ctx, dname);
+ R600_ERR("dumped %s\n", dname);
+ }
+ dc++;
+#endif
+
radeon_ctx_submit(rctx->ctx);
LIST_FOR_EACH_ENTRY(rquery, &rctx->query_list, list) {
r600_queries_resume(ctx);
}
-void r600_flush_ctx(void *data)
-{
- struct r600_context *rctx = data;
-
- rctx->context.flush(&rctx->context, 0, NULL);
-}
-
struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
{
struct r600_context *rctx = CALLOC_STRUCT(r600_context);
u32 *results;
int i;
- results = radeon_ws_bo_map(rscreen->rw, rquery->buffer, 0, r600_context(ctx));
+ results = radeon_ws_bo_map(rscreen->rw, rquery->buffer, 0, ctx);
for (i = 0; i < rquery->num_results; i += 4) {
start = (u64)results[i] | (u64)results[i + 1] << 32;
end = (u64)results[i + 2] | (u64)results[i + 3] << 32;
if (rpshader->bo == NULL) {
return -ENOMEM;
}
- data = radeon_ws_bo_map(rscreen->rw, rpshader->bo, 0, rctx);
+ data = radeon_ws_bo_map(rscreen->rw, rpshader->bo, 0, ctx);
memcpy(data, rshader->bc.bytecode, rshader->bc.ndw * 4);
radeon_ws_bo_unmap(rscreen->rw, rpshader->bo);
/* build state */
/* unreference old buffer and reference new one */
rstate->id = R600_PIPE_STATE_FRAMEBUFFER;
for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
+ pipe_surface_reference(&rctx->framebuffer.cbufs[i], NULL);
+ }
+ for (int i = 0; i < state->nr_cbufs; i++) {
pipe_surface_reference(&rctx->framebuffer.cbufs[i], state->cbufs[i]);
}
pipe_surface_reference(&rctx->framebuffer.zsbuf, state->zsbuf);
transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
}
- map = radeon_ws_bo_map(radeon, bo, 0, r600_context(ctx));
+ map = radeon_ws_bo_map(radeon, bo, 0, ctx);
if (!map) {
return NULL;
}
#define R600_QUERY_SIZE 1
#define R600_QUERY_PM4 128
-void r600_flush_ctx(void *data);
#endif
unsigned flags, void *ctx)
{
struct radeon_bo_pb *buf = radeon_bo_pb(_buf);
+ struct pipe_context *pctx = ctx;
+//printf("%s:%d ************************************************\n", __func__, __LINE__);
if (flags & PB_USAGE_UNSYNCHRONIZED) {
if (!buf->bo->data && radeon_bo_map(buf->mgr->radeon, buf->bo)) {
return NULL;
return buf->bo->data;
}
- if (p_atomic_read(&buf->bo->reference.count) > 1) {
- if (flags & PB_USAGE_DONTBLOCK) {
- return NULL;
- }
- if (ctx) {
- r600_flush_ctx(ctx);
- }
+ if (flags & PB_USAGE_DONTBLOCK) {
+ return NULL;
+ }
+ if (ctx) {
+ pctx->flush(pctx, 0, NULL);
}
if (flags & PB_USAGE_DONTBLOCK) {