}
/* emit begin query */
- va = r600_resource_va(ctx->b.screen, (void*)query->buffer.buf);
- va += query->buffer.results_end;
+ va = query->buffer.buf->gpu_address + query->buffer.results_end;
switch (query->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
}
- va = r600_resource_va(ctx->b.screen, (void*)query->buffer.buf);
+ va = query->buffer.buf->gpu_address;
+
/* emit end query */
switch (query->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
/* emit predicate packets for all data blocks */
for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
unsigned results_base = 0;
- uint64_t va = r600_resource_va(ctx->b.screen, &qbuf->buf->b.b);
+ uint64_t va = qbuf->buf->gpu_address;
while (results_base < qbuf->results_end) {
radeon_emit(cs, PKT3(PKT3_SET_PREDICATION, 1, 0));
uint32_t *results;
unsigned num_backends = ctx->screen->info.r600_num_backends;
unsigned i, mask = 0;
- uint64_t va;
/* if backend_map query is supported by the kernel */
if (ctx->screen->info.r600_backend_map_valid) {
PIPE_USAGE_STAGING, ctx->max_db*16);
if (!buffer)
goto err;
- va = r600_resource_va(ctx->b.screen, (void*)buffer);
/* initialize buffer with zeroes */
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
/* emit EVENT_WRITE for ZPASS_DONE */
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
- radeon_emit(cs, va);
- radeon_emit(cs, va >> 32);
+ radeon_emit(cs, buffer->gpu_address);
+ radeon_emit(cs, buffer->gpu_address >> 32);
r600_emit_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
} else {
- uint64_t va = r600_resource_va(rctx->b.screen,
- (void*)t[i]->b.buffer);
+ uint64_t va = r600_resource(t[i]->b.buffer)->gpu_address;
update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i);
}
if (rctx->streamout.append_bitmask & (1 << i)) {
- uint64_t va = r600_resource_va(rctx->b.screen,
- (void*)t[i]->buf_filled_size) +
+ uint64_t va = t[i]->buf_filled_size->gpu_address +
t[i]->buf_filled_size_offset;
/* Append. */
if (!t[i])
continue;
- va = r600_resource_va(rctx->b.screen,
- (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset;
+ va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
}
/* update colorbuffer state bits */
- rtex->cmask.base_address_reg =
- r600_resource_va(&rscreen->b, &rtex->cmask_buffer->b.b) >> 8;
+ rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
if (rscreen->chip_class >= SI)
rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
struct r600_texture *rtex;
struct r600_resource *resource;
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
- uint64_t va;
rtex = CALLOC_STRUCT(r600_texture);
if (rtex == NULL)
}
/* Initialize the CMASK base register value. */
- va = r600_resource_va(&rscreen->b, &rtex->resource.b.b);
- rtex->cmask.base_address_reg = (va + rtex->cmask.offset) >> 8;
+ rtex->cmask.base_address_reg =
+ (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
if (rscreen->debug_flags & DBG_VM) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
- r600_resource_va(screen, &rtex->resource.b.b),
- r600_resource_va(screen, &rtex->resource.b.b) + rtex->resource.buf->size,
+ rtex->resource.gpu_address,
+ rtex->resource.gpu_address + rtex->resource.buf->size,
base->width0, base->height0, util_max_layer(base, 0)+1, base->last_level+1,
base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
}