unsigned width0, unsigned height0)
{
- struct pipe_context *ctx = view->base.context;
struct r600_texture *tmp = (struct r600_texture*)view->base.texture;
uint64_t va;
int stride = util_format_get_blocksize(view->base.format);
swizzle_res = r600_get_swizzle_combined(desc->swizzle, swizzle, TRUE);
- va = r600_resource_va(ctx->screen, view->base.texture) + offset;
+ va = tmp->resource.gpu_address + offset;
view->tex_resource = &tmp->resource;
view->skip_mip_address_reloc = true;
} else if (texture->target == PIPE_TEXTURE_CUBE_ARRAY)
depth = texture->array_size / 6;
- va = r600_resource_va(ctx->screen, texture);
+ va = tmp->resource.gpu_address;
view->tex_resource = &tmp->resource;
view->tex_resource_words[0] = (S_030000_DIM(r600_tex_dim(texture->target, texture->nr_samples)) |
endian = ENDIAN_NONE;
}
- surf->cb_color_base =
- r600_resource_va(rctx->b.b.screen, pipe_buffer) >> 8;
+ surf->cb_color_base = r600_resource(pipe_buffer)->gpu_address >> 8;
surf->cb_color_pitch = (pitch / 8) - 1;
{
struct r600_screen *rscreen = rctx->screen;
struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
- struct pipe_resource *pipe_tex = surf->base.texture;
unsigned level = surf->base.u.tex.level;
unsigned pitch, slice;
unsigned color_info, color_attrib, color_dim = 0, color_view;
color_info |= S_028C70_COMPRESSION(1);
}
- base_offset = r600_resource_va(rctx->b.b.screen, pipe_tex);
+ base_offset = rtex->resource.gpu_address;
/* XXX handle enabling of CB beyond BASE8 which has different offset */
surf->cb_color_base = (base_offset + offset) >> 8;
struct r600_surface *surf)
{
struct r600_screen *rscreen = rctx->screen;
- struct pipe_screen *screen = &rscreen->b.b;
struct r600_texture *rtex = (struct r600_texture*)surf->base.texture;
uint64_t offset;
unsigned level, pitch, slice, format, array_mode;
format = r600_translate_dbformat(surf->base.format);
assert(format != ~0);
- offset = r600_resource_va(screen, surf->base.texture);
+ offset = rtex->resource.gpu_address;
offset += rtex->surface.level[level].offset;
pitch = (rtex->surface.level[level].nblk_x / 8) - 1;
slice = (rtex->surface.level[level].nblk_x * rtex->surface.level[level].nblk_y) / 64;
stile_split = eg_tile_split(stile_split);
stencil_offset = rtex->surface.stencil_level[level].offset;
- stencil_offset += r600_resource_va(screen, surf->base.texture);
+ stencil_offset += rtex->resource.gpu_address;
surf->db_stencil_base = stencil_offset >> 8;
surf->db_stencil_info = S_028044_FORMAT(V_028044_STENCIL_8) |
/* use htile only for first level */
if (rtex->htile_buffer && !level) {
- uint64_t va = r600_resource_va(&rctx->screen->b.b, &rtex->htile_buffer->b.b);
+ uint64_t va = rtex->htile_buffer->gpu_address;
surf->db_htile_data_base = va >> 8;
surf->db_htile_surface = S_028ABC_HTILE_WIDTH(1) |
S_028ABC_HTILE_HEIGHT(1) |
rbuffer = (struct r600_resource*)vb->buffer;
assert(rbuffer);
- va = r600_resource_va(&rctx->screen->b.b, &rbuffer->b.b);
- va += vb->buffer_offset;
+ va = rbuffer->gpu_address + vb->buffer_offset;
/* fetch resources start at index 992 */
radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
rbuffer = (struct r600_resource*)cb->buffer;
assert(rbuffer);
- va = r600_resource_va(&rctx->screen->b.b, &rbuffer->b.b);
- va += cb->buffer_offset;
+ va = rbuffer->gpu_address + cb->buffer_offset;
if (!gs_ring_buffer) {
r600_write_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4,
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
r600_write_context_reg(cs, R_0288A4_SQ_PGM_START_FS,
- (r600_resource_va(rctx->b.b.screen, &shader->buffer->b.b) + shader->offset) >> 8);
+ (shader->buffer->gpu_address + shader->offset) >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer,
RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA));
static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
{
- struct pipe_screen *screen = rctx->b.b.screen;
struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
struct r600_resource *rbuffer;
if (state->enable) {
rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
- (r600_resource_va(screen, &rbuffer->b.b)) >> 8);
+ rbuffer->gpu_address >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
RADEON_USAGE_READWRITE,
rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
- (r600_resource_va(screen, &rbuffer->b.b)) >> 8);
+ rbuffer->gpu_address >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer,
RADEON_USAGE_READWRITE,
r600_store_context_reg(cb, R_02884C_SQ_PGM_EXPORTS_PS, exports_ps);
r600_store_context_reg_seq(cb, R_028840_SQ_PGM_START_PS, 2);
- r600_store_value(cb, r600_resource_va(ctx->screen, (void *)shader->bo) >> 8);
+ r600_store_value(cb, shader->bo->gpu_address >> 8);
r600_store_value(cb, /* R_028844_SQ_PGM_RESOURCES_PS */
S_028844_NUM_GPRS(rshader->bc.ngpr) |
S_028844_PRIME_CACHE_ON_DRAW(1) |
S_028890_NUM_GPRS(rshader->bc.ngpr) |
S_028890_STACK_SIZE(rshader->bc.nstack));
r600_store_context_reg(cb, R_02888C_SQ_PGM_START_ES,
- r600_resource_va(ctx->screen, (void *)shader->bo) >> 8);
+ shader->bo->gpu_address >> 8);
/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
}
S_028878_NUM_GPRS(rshader->bc.ngpr) |
S_028878_STACK_SIZE(rshader->bc.nstack));
r600_store_context_reg(cb, R_028874_SQ_PGM_START_GS,
- r600_resource_va(ctx->screen, (void *)shader->bo) >> 8);
+ shader->bo->gpu_address >> 8);
/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
}
}
r600_store_context_reg(cb, R_02885C_SQ_PGM_START_VS,
- r600_resource_va(ctx->screen, (void *)shader->bo) >> 8);
+ shader->bo->gpu_address >> 8);
/* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
shader->pa_cl_vs_out_cntl =
bank_w = eg_bank_wh(rsrc->surface.bankw);
mt_aspect = eg_macro_tile_aspect(rsrc->surface.mtilea);
tile_split = eg_tile_split(rsrc->surface.tile_split);
- base += r600_resource_va(&rctx->screen->b.b, src);
- addr += r600_resource_va(&rctx->screen->b.b, dst);
+ base += rsrc->resource.gpu_address;
+ addr += rdst->resource.gpu_address;
} else {
/* L2T */
array_mode = evergreen_array_mode(dst_mode);
bank_w = eg_bank_wh(rdst->surface.bankw);
mt_aspect = eg_macro_tile_aspect(rdst->surface.mtilea);
tile_split = eg_tile_split(rdst->surface.tile_split);
- base += r600_resource_va(&rctx->screen->b.b, dst);
- addr += r600_resource_va(&rctx->screen->b.b, src);
+ base += rdst->resource.gpu_address;
+ addr += rsrc->resource.gpu_address;
}
size = (copy_height * pitch) / 4;