*/
#include "r600_formats.h"
#include "r600_shader.h"
+#include "r600_query.h"
#include "evergreend.h"
#include "pipe/p_shader_tokens.h"
return r600_translate_dbformat(format) != ~0U;
}
-boolean evergreen_is_format_supported(struct pipe_screen *screen,
- enum pipe_format format,
- enum pipe_texture_target target,
- unsigned sample_count,
- unsigned usage)
+bool evergreen_is_format_supported(struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned sample_count,
+ unsigned storage_sample_count,
+ unsigned usage)
{
struct r600_screen *rscreen = (struct r600_screen*)screen;
unsigned retval = 0;
if (target >= PIPE_MAX_TEXTURE_TYPES) {
R600_ERR("r600: unsupported texture type %d\n", target);
- return FALSE;
+ return false;
}
- if (!util_format_is_supported(format, usage))
- return FALSE;
+ if (MAX2(1, sample_count) != MAX2(1, storage_sample_count))
+ return false;
if (sample_count > 1) {
if (!rscreen->has_msaa)
- return FALSE;
+ return false;
switch (sample_count) {
case 2:
case 8:
break;
default:
- return FALSE;
+ return false;
}
}
S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0;
rs->pa_cl_clip_cntl =
S_028810_DX_CLIP_SPACE_DEF(state->clip_halfz) |
- S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip) |
- S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip) |
+ S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip_near) |
+ S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip_far) |
S_028810_DX_LINEAR_ATTR_CLIP_ENA(1) |
S_028810_DX_RASTERIZATION_KILL(state->rasterizer_discard);
rs->multisample_enable = state->multisample;
unsigned max_aniso = rscreen->force_aniso >= 0 ? rscreen->force_aniso
: state->max_anisotropy;
unsigned max_aniso_ratio = r600_tex_aniso_filter(max_aniso);
+ float max_lod = state->max_lod;
if (!ss) {
return NULL;
}
+ /* If the min_mip_filter is NONE, then the texture has no mipmapping and
+ * MIP_FILTER will also be set to NONE. However, if more then one LOD is
+ * configured, then the texture lookup seems to fail for some specific texture
+ * formats. Forcing the number of LODs to one in this case fixes it. */
+ if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE)
+ max_lod = state->min_lod;
+
ss->border_color_use = sampler_state_needs_border_color(state);
/* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */
/* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */
ss->tex_sampler_words[1] =
S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 8)) |
- S_03C004_MAX_LOD(S_FIXED(CLAMP(state->max_lod, 0, 15), 8));
+ S_03C004_MAX_LOD(S_FIXED(CLAMP(max_lod, 0, 15), 8));
/* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */
ss->tex_sampler_words[2] =
S_03C008_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 8)) |
unsigned char swizzle[4];
bool uncached;
bool force_swizzle;
+ bool size_in_bytes;
};
static void evergreen_fill_buffer_resource_words(struct r600_context *rctx,
S_030008_ENDIAN_SWAP(endian);
tex_resource_words[3] = swizzle_res | S_03000C_UNCACHED(params->uncached);
/*
- * in theory dword 4 is for number of elements, for use with resinfo,
- * but it seems to utterly fail to work, the amd gpu shader analyser
+ * dword 4 is for number of elements, for use with resinfo,
+ * albeit the amd gpu shader analyser
* uses a const buffer to store the element sizes for buffer txq
*/
- tex_resource_words[4] = 0;
+ tex_resource_words[4] = params->size_in_bytes ? params->size : (params->size / stride);
+
tex_resource_words[5] = tex_resource_words[6] = 0;
tex_resource_words[7] = S_03001C_TYPE(V_03001C_SQ_TEX_VTX_VALID_BUFFER);
}
view->tex_resource = &tmp->resource;
if (tmp->resource.gpu_address)
- LIST_ADDTAIL(&view->list, &rctx->texture_buffers);
+ list_addtail(&view->list, &rctx->texture_buffers);
return &view->base;
}
}
nbanks = eg_num_banks(rscreen->b.info.r600_num_banks);
- if (params->target == PIPE_TEXTURE_1D_ARRAY) {
- height = 1;
- depth = texture->array_size;
- } else if (params->target == PIPE_TEXTURE_2D_ARRAY) {
- depth = texture->array_size;
- } else if (params->target == PIPE_TEXTURE_CUBE_ARRAY)
- depth = texture->array_size / 6;
va = tmp->resource.gpu_address;
/* array type views and views into array types need to use layer offset */
dim = r600_tex_dim(tmp, params->target, texture->nr_samples);
+
+ if (dim == V_030000_SQ_TEX_DIM_1D_ARRAY) {
+ height = 1;
+ depth = texture->array_size;
+ } else if (dim == V_030000_SQ_TEX_DIM_2D_ARRAY ||
+ dim == V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA) {
+ depth = texture->array_size;
+ } else if (dim == V_030000_SQ_TEX_DIM_CUBEMAP)
+ depth = texture->array_size / 6;
+
tex_resource_words[0] = (S_030000_DIM(dim) |
S_030000_PITCH((pitch / 8) - 1) |
S_030000_TEX_WIDTH(width - 1));
static void evergreen_emit_config_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_config_state *a = (struct r600_config_state*)atom;
radeon_set_config_reg_seq(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, 3);
static void evergreen_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct pipe_clip_state *state = &rctx->clip_state.state;
radeon_set_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4);
surf->cb_color_view = 0;
/* Set the buffer range the GPU will have access to: */
- util_range_add(&r600_resource(pipe_buffer)->valid_buffer_range,
+ util_range_add(pipe_buffer, &r600_resource(pipe_buffer)->valid_buffer_range,
0, pipe_buffer->width0);
}
struct r600_surface *surf;
struct r600_texture *rtex;
uint32_t i, log_samples;
-
+ uint32_t target_mask = 0;
/* Flush TC when changing the framebuffer state, because the only
* client not using TC that can change textures is the framebuffer.
* Other places don't typically have to flush TC.
if (!surf)
continue;
+ target_mask |= (0xf << (i * 4));
+
rtex = (struct r600_texture*)surf->base.texture;
r600_context_add_resource_size(ctx, state->cbufs[i]->texture);
r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
}
- if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs) {
+ if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs ||
+ rctx->cb_misc_state.bound_cbufs_target_mask != target_mask) {
+ rctx->cb_misc_state.bound_cbufs_target_mask = target_mask;
rctx->cb_misc_state.nr_cbufs = state->nr_cbufs;
r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
}
}
/* 8xMSAA */
-static uint32_t sample_locs_8x[] = {
+static const uint32_t sample_locs_8x[] = {
FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples, int ps_iter_samples)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
unsigned max_dist = 0;
switch (nr_samples) {
}
static void evergreen_emit_image_state(struct r600_context *rctx, struct r600_atom *atom,
- int immed_id_base, int res_id_base, int offset)
+ int immed_id_base, int res_id_base, int offset, uint32_t pkt_flags)
{
struct r600_image_state *state = (struct r600_image_state *)atom;
struct pipe_framebuffer_state *fb_state = &rctx->framebuffer.state;
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_texture *rtex;
struct r600_resource *resource;
- uint32_t pkt_flags = 0;
int i;
for (i = 0; i < R600_MAX_IMAGES; i++) {
unsigned reloc, immed_reloc;
int idx = i + offset;
- idx += fb_state->nr_cbufs + (rctx->dual_src_blend ? 1 : 0);
+ if (!pkt_flags)
+ idx += fb_state->nr_cbufs + (rctx->dual_src_blend ? 1 : 0);
if (!image->base.resource)
continue;
RADEON_USAGE_READWRITE,
RADEON_PRIO_SHADER_RW_BUFFER);
- radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13);
+ if (pkt_flags)
+ radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13);
+ else
+ radeon_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + idx * 0x3C, 13);
radeon_emit(cs, image->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
radeon_emit(cs, image->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */
radeon_emit(cs, reloc);
- radeon_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8);
+ if (pkt_flags)
+ radeon_compute_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8);
+ else
+ radeon_set_context_reg(cs, R_028B9C_CB_IMMED0_BASE + (idx * 4), resource->immed_buffer->gpu_address >> 8);
+
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /**/
radeon_emit(cs, immed_reloc);
{
evergreen_emit_image_state(rctx, atom,
R600_IMAGE_IMMED_RESOURCE_OFFSET,
- R600_IMAGE_REAL_RESOURCE_OFFSET, 0);
+ R600_IMAGE_REAL_RESOURCE_OFFSET, 0, 0);
+}
+
+static void evergreen_emit_compute_image_state(struct r600_context *rctx, struct r600_atom *atom)
+{
+ evergreen_emit_image_state(rctx, atom,
+ EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_IMMED_RESOURCE_OFFSET,
+ EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_REAL_RESOURCE_OFFSET,
+ 0, RADEON_CP_PACKET3_COMPUTE_MODE);
}
static void evergreen_emit_fragment_buffer_state(struct r600_context *rctx, struct r600_atom *atom)
int offset = util_bitcount(rctx->fragment_images.enabled_mask);
evergreen_emit_image_state(rctx, atom,
R600_IMAGE_IMMED_RESOURCE_OFFSET,
- R600_IMAGE_REAL_RESOURCE_OFFSET, offset);
+ R600_IMAGE_REAL_RESOURCE_OFFSET, offset, 0);
+}
+
+static void evergreen_emit_compute_buffer_state(struct r600_context *rctx, struct r600_atom *atom)
+{
+ int offset = util_bitcount(rctx->compute_images.enabled_mask);
+ evergreen_emit_image_state(rctx, atom,
+ EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_IMMED_RESOURCE_OFFSET,
+ EG_FETCH_CONSTANTS_OFFSET_CS + R600_IMAGE_REAL_RESOURCE_OFFSET,
+ offset, RADEON_CP_PACKET3_COMPUTE_MODE);
}
static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
unsigned nr_cbufs = state->nr_cbufs;
unsigned i, tl, br;
if (tex->cmask_buffer && tex->cmask_buffer != &tex->resource) {
cmask_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
tex->cmask_buffer, RADEON_USAGE_READWRITE,
- RADEON_PRIO_CMASK);
+ RADEON_PRIO_SEPARATE_META);
} else {
cmask_reloc = reloc;
}
if (rctx->b.chip_class == EVERGREEN) {
evergreen_emit_msaa_state(rctx, rctx->framebuffer.nr_samples, rctx->ps_iter_samples);
} else {
- unsigned sc_mode_cntl_1 =
- EG_S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) |
- EG_S_028A4C_FORCE_EOV_REZ_ENABLE(1);
-
- if (rctx->framebuffer.nr_samples > 1)
- cayman_emit_msaa_sample_locs(cs, rctx->framebuffer.nr_samples);
- cayman_emit_msaa_config(cs, rctx->framebuffer.nr_samples,
- rctx->ps_iter_samples, 0, sc_mode_cntl_1);
+ cayman_emit_msaa_state(cs, rctx->framebuffer.nr_samples,
+ rctx->ps_iter_samples, 0);
}
}
static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a;
float offset_units = state->offset_units;
float offset_scale = state->offset_scale;
pa_su_poly_offset_db_fmt_cntl);
}
+uint32_t evergreen_construct_rat_mask(struct r600_context *rctx, struct r600_cb_misc_state *a,
+ unsigned nr_cbufs)
+{
+ unsigned base_mask = 0;
+ unsigned dirty_mask = a->image_rat_enabled_mask;
+ while (dirty_mask) {
+ unsigned idx = u_bit_scan(&dirty_mask);
+ base_mask |= (0xf << (idx * 4));
+ }
+ unsigned offset = util_last_bit(a->image_rat_enabled_mask);
+ dirty_mask = a->buffer_rat_enabled_mask;
+ while (dirty_mask) {
+ unsigned idx = u_bit_scan(&dirty_mask);
+ base_mask |= (0xf << (idx + offset) * 4);
+ }
+ return base_mask << (nr_cbufs * 4);
+}
+
static void evergreen_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
- unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1;
- unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1;
- unsigned rat_colormask = ((1ULL << ((unsigned)(a->nr_image_rats + a->nr_buffer_rats) * 4)) - 1) << (a->nr_cbufs * 4);
+ unsigned fb_colormask = a->bound_cbufs_target_mask;
+ unsigned ps_colormask = a->ps_color_export_mask;
+ unsigned rat_colormask = evergreen_construct_rat_mask(rctx, a, a->nr_cbufs);
radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
radeon_emit(cs, (a->blend_colormask & fb_colormask) | rat_colormask); /* R_028238_CB_TARGET_MASK */
/* This must match the used export instructions exactly.
static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_db_state *a = (struct r600_db_state*)atom;
if (a->rsurf && a->rsurf->db_htile_surface) {
radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control);
radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource,
- RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE);
+ RADEON_USAGE_READWRITE, RADEON_PRIO_SEPARATE_META);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc_idx);
} else {
static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom;
unsigned db_render_control = 0;
unsigned db_count_control = 0;
unsigned resource_offset,
unsigned pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
unsigned reg_alu_const_cache,
unsigned pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
va = rbuffer->gpu_address + cb->buffer_offset;
- if (!gs_ring_buffer) {
+ if (buffer_index < R600_MAX_HW_CONST_BUFFERS) {
radeon_set_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4,
DIV_ROUND_UP(cb->buffer_size, 256), pkt_flags);
radeon_set_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8,
pkt_flags);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
+ RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
}
- radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags);
- radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
- RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER));
-
radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags);
radeon_emit(cs, (buffer_id_base + buffer_index) * 8);
radeon_emit(cs, va); /* RESOURCEi_WORD0 */
- radeon_emit(cs, rbuffer->b.b.width0 - cb->buffer_offset - 1); /* RESOURCEi_WORD1 */
+ radeon_emit(cs, cb->buffer_size -1); /* RESOURCEi_WORD1 */
radeon_emit(cs, /* RESOURCEi_WORD2 */
S_030008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) |
S_030008_STRIDE(gs_ring_buffer ? 4 : 16) |
0);
}
+void evergreen_setup_scratch_buffers(struct r600_context *rctx) {
+ static const struct {
+ unsigned ring_base;
+ unsigned item_size;
+ unsigned ring_size;
+ } regs[EG_NUM_HW_STAGES] = {
+ [R600_HW_STAGE_PS] = { R_008C68_SQ_PSTMP_RING_BASE, R_028914_SQ_PSTMP_RING_ITEMSIZE, R_008C6C_SQ_PSTMP_RING_SIZE },
+ [R600_HW_STAGE_VS] = { R_008C60_SQ_VSTMP_RING_BASE, R_028910_SQ_VSTMP_RING_ITEMSIZE, R_008C64_SQ_VSTMP_RING_SIZE },
+ [R600_HW_STAGE_GS] = { R_008C58_SQ_GSTMP_RING_BASE, R_02890C_SQ_GSTMP_RING_ITEMSIZE, R_008C5C_SQ_GSTMP_RING_SIZE },
+ [R600_HW_STAGE_ES] = { R_008C50_SQ_ESTMP_RING_BASE, R_028908_SQ_ESTMP_RING_ITEMSIZE, R_008C54_SQ_ESTMP_RING_SIZE },
+ [EG_HW_STAGE_LS] = { R_008E10_SQ_LSTMP_RING_BASE, R_028830_SQ_LSTMP_RING_ITEMSIZE, R_008E14_SQ_LSTMP_RING_SIZE },
+ [EG_HW_STAGE_HS] = { R_008E18_SQ_HSTMP_RING_BASE, R_028834_SQ_HSTMP_RING_ITEMSIZE, R_008E1C_SQ_HSTMP_RING_SIZE }
+ };
+
+ for (unsigned i = 0; i < EG_NUM_HW_STAGES; i++) {
+ struct r600_pipe_shader *stage = rctx->hw_shader_stages[i].shader;
+
+ if (stage && unlikely(stage->scratch_space_needed)) {
+ r600_setup_scratch_area_for_shader(rctx, stage,
+ &rctx->scratch_buffers[i], regs[i].ring_base, regs[i].item_size, regs[i].ring_size);
+ }
+ }
+}
+
static void evergreen_emit_sampler_views(struct r600_context *rctx,
struct r600_samplerview_state *state,
unsigned resource_id_base, unsigned pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
static void evergreen_emit_tes_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
{
+ if (!rctx->tes_shader)
+ return;
evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL].views,
EG_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS, 0);
}
static void evergreen_emit_cs_sampler_views(struct r600_context *rctx, struct r600_atom *atom)
{
evergreen_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views,
- EG_FETCH_CONSTANTS_OFFSET_CS + 2, RADEON_CP_PACKET3_COMPUTE_MODE);
+ EG_FETCH_CONSTANTS_OFFSET_CS + R600_MAX_CONST_BUFFERS, RADEON_CP_PACKET3_COMPUTE_MODE);
+}
+
+static void evergreen_convert_border_color(union pipe_color_union *in,
+ union pipe_color_union *out,
+ enum pipe_format format)
+{
+ if (util_format_is_pure_integer(format) &&
+ !util_format_is_depth_or_stencil(format)) {
+ const struct util_format_description *d = util_format_description(format);
+
+ for (int i = 0; i < d->nr_channels; ++i) {
+ int cs = d->channel[i].size;
+ if (d->channel[i].type == UTIL_FORMAT_TYPE_SIGNED)
+ out->f[i] = (double)(in->i[i]) / ((1ul << (cs - 1)) - 1 );
+ else if (d->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
+ out->f[i] = (double)(in->ui[i]) / ((1ul << cs) - 1 );
+ else
+ out->f[i] = 0;
+ }
+
+ } else {
+ switch (format) {
+ case PIPE_FORMAT_X24S8_UINT:
+ case PIPE_FORMAT_X32_S8X24_UINT:
+ out->f[0] = (double)(in->ui[0]) / 255.0;
+ out->f[1] = out->f[2] = out->f[3] = 0.0f;
+ break;
+ default:
+ memcpy(out->f, in->f, 4 * sizeof(float));
+ }
+ }
}
static void evergreen_emit_sampler_states(struct r600_context *rctx,
unsigned border_index_reg,
unsigned pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint32_t dirty_mask = texinfo->states.dirty_mask;
+ union pipe_color_union border_color = {{0,0,0,1}};
+ union pipe_color_union *border_color_ptr = &border_color;
while (dirty_mask) {
struct r600_pipe_sampler_state *rstate;
rstate = texinfo->states.states[i];
assert(rstate);
+ if (rstate->border_color_use) {
+ struct r600_pipe_sampler_view *rview = texinfo->views.views[i];
+ if (rview) {
+ evergreen_convert_border_color(&rstate->border_color,
+ &border_color, rview->base.format);
+ } else {
+ border_color_ptr = &rstate->border_color;
+ }
+ }
+
radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0) | pkt_flags);
radeon_emit(cs, (resource_id_base + i) * 3);
radeon_emit_array(cs, rstate->tex_sampler_words, 3);
if (rstate->border_color_use) {
radeon_set_config_reg_seq(cs, border_index_reg, 5);
radeon_emit(cs, i);
- radeon_emit_array(cs, rstate->border_color.ui, 4);
+ radeon_emit_array(cs, border_color_ptr->ui, 4);
}
}
texinfo->states.dirty_mask = 0;
static void evergreen_emit_tes_sampler_states(struct r600_context *rctx, struct r600_atom *atom)
{
+ if (!rctx->tes_shader)
+ return;
evergreen_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_TESS_EVAL], 18,
R_00A414_TD_VS_SAMPLER0_BORDER_INDEX, 0);
}
static void cayman_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a)
{
struct r600_sample_mask *s = (struct r600_sample_mask*)a;
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint16_t mask = s->sample_mask;
radeon_set_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2);
static void evergreen_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_cso_state *state = (struct r600_cso_state*)a;
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
static void evergreen_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
uint32_t v = 0, v2 = 0, primid = 0, tf_param = 0;
static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
struct r600_resource *rbuffer;
spi_baryc_cntl |= spi_baryc_enable_bit[k];
have_perspective |= k < 3;
have_linear |= !(k < 3);
+ if (rshader->input[i].uses_interpolate_at_centroid) {
+ k = eg_get_interpolator_index(
+ rshader->input[i].interpolate,
+ TGSI_INTERPOLATE_LOC_CENTROID);
+ spi_baryc_cntl |= spi_baryc_enable_bit[k];
+ }
}
}
exports_ps |= 1;
}
- num_cout = rshader->nr_ps_color_exports;
+ num_cout = rshader->ps_export_highest + 1;
exports_ps |= S_02884C_EXPORT_COLORS(num_cout);
if (!exports_ps) {
exports_ps = 2;
}
shader->nr_ps_color_outputs = num_cout;
+ shader->ps_color_export_mask = rshader->ps_color_export_mask;
if (ninterp == 0) {
ninterp = 1;
have_perspective = TRUE;
unsigned pitch,
unsigned bpp)
{
- struct radeon_winsys_cs *cs = rctx->b.dma.cs;
+ struct radeon_cmdbuf *cs = rctx->b.dma.cs;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
size = (cheight * pitch) / 4;
/* emit reloc before writing cs so that cs is always in consistent state */
radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource,
- RADEON_USAGE_READ, RADEON_PRIO_SDMA_TEXTURE);
+ RADEON_USAGE_READ, 0);
radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource,
- RADEON_USAGE_WRITE, RADEON_PRIO_SDMA_TEXTURE);
+ RADEON_USAGE_WRITE, 0);
radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, sub_cmd, size));
radeon_emit(cs, base >> 8);
radeon_emit(cs, (detile << 31) | (array_mode << 27) |
goto fallback;
}
+ if (rctx->cmd_buf_is_compute) {
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ rctx->cmd_buf_is_compute = false;
+ }
+
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
evergreen_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width);
return;
memcpy(rctx->tess_state, default_outer_level, sizeof(float) * 4);
memcpy(rctx->tess_state+4, default_inner_level, sizeof(float) * 2);
- rctx->tess_state_dirty = true;
+ rctx->driver_consts[PIPE_SHADER_TESS_CTRL].tcs_default_levels_dirty = true;
}
static void evergreen_setup_immed_buffer(struct r600_context *rctx,
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_atomic_buffer_state *astate;
- int i, idx;
+ unsigned i, idx;
astate = &rctx->atomic_buffer_state;
if (!buffers || !buffers[idx].buffer) {
pipe_resource_reference(&abuf->buffer, NULL);
- astate->enabled_mask &= ~(1 << i);
continue;
}
buf = &buffers[idx];
pipe_resource_reference(&abuf->buffer, buf->buffer);
abuf->buffer_offset = buf->buffer_offset;
abuf->buffer_size = buf->buffer_size;
- astate->enabled_mask |= (1 << i);
}
}
static void evergreen_set_shader_buffers(struct pipe_context *ctx,
enum pipe_shader_type shader, unsigned start_slot,
unsigned count,
- const struct pipe_shader_buffer *buffers)
+ const struct pipe_shader_buffer *buffers,
+ unsigned writable_bitmask)
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_image_state *istate = NULL;
struct r600_tex_color_info color;
struct eg_buf_res_params buf_params;
struct r600_resource *resource;
- int i, idx;
+ unsigned i, idx;
unsigned old_mask;
- if (shader != PIPE_SHADER_FRAGMENT && count == 0)
+ if (shader != PIPE_SHADER_FRAGMENT &&
+ shader != PIPE_SHADER_COMPUTE && count == 0)
return;
- assert(shader == PIPE_SHADER_FRAGMENT);
- istate = &rctx->fragment_buffers;
+ if (shader == PIPE_SHADER_FRAGMENT)
+ istate = &rctx->fragment_buffers;
+ else if (shader == PIPE_SHADER_COMPUTE)
+ istate = &rctx->compute_buffers;
old_mask = istate->enabled_mask;
for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
buf_params.swizzle[3] = PIPE_SWIZZLE_W;
buf_params.force_swizzle = true;
buf_params.uncached = 1;
+ buf_params.size_in_bytes = true;
evergreen_fill_buffer_resource_words(rctx, &resource->b.b,
&buf_params,
&rview->skip_mip_address_reloc,
if (old_mask != istate->enabled_mask)
r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
- if (rctx->cb_misc_state.nr_buffer_rats != util_bitcount(istate->enabled_mask)) {
- rctx->cb_misc_state.nr_buffer_rats = util_bitcount(istate->enabled_mask);
+ /* construct the target mask */
+ if (rctx->cb_misc_state.buffer_rat_enabled_mask != istate->enabled_mask) {
+ rctx->cb_misc_state.buffer_rat_enabled_mask = istate->enabled_mask;
r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
}
- r600_mark_atom_dirty(rctx, &istate->atom);
+ if (shader == PIPE_SHADER_FRAGMENT)
+ r600_mark_atom_dirty(rctx, &istate->atom);
}
static void evergreen_set_shader_images(struct pipe_context *ctx,
const struct pipe_image_view *images)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- int i;
+ unsigned i;
struct r600_image_view *rview;
struct pipe_resource *image;
struct r600_resource *resource;
unsigned old_mask;
struct r600_image_state *istate = NULL;
int idx;
- if (shader != PIPE_SHADER_FRAGMENT && count == 0)
+ if (shader != PIPE_SHADER_FRAGMENT && shader != PIPE_SHADER_COMPUTE && count == 0)
return;
- istate = &rctx->fragment_images;
+ if (shader == PIPE_SHADER_FRAGMENT)
+ istate = &rctx->fragment_images;
+ else if (shader == PIPE_SHADER_COMPUTE)
+ istate = &rctx->compute_images;
+
+ assert (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE);
- assert (shader == PIPE_SHADER_FRAGMENT);
old_mask = istate->enabled_mask;
for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
unsigned res_type;
if (!images || !images[idx].resource) {
pipe_resource_reference((struct pipe_resource **)&rview->base.resource, NULL);
istate->enabled_mask &= ~(1 << i);
+ istate->compressed_colortex_mask &= ~(1 << i);
+ istate->compressed_depthtex_mask &= ~(1 << i);
continue;
}
if (old_mask != istate->enabled_mask)
r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
- if (rctx->cb_misc_state.nr_image_rats != util_bitcount(istate->enabled_mask)) {
- rctx->cb_misc_state.nr_image_rats = util_bitcount(istate->enabled_mask);
+ if (rctx->cb_misc_state.image_rat_enabled_mask != istate->enabled_mask) {
+ rctx->cb_misc_state.image_rat_enabled_mask = istate->enabled_mask;
r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
}
- r600_mark_atom_dirty(rctx, &istate->atom);
+ if (shader == PIPE_SHADER_FRAGMENT)
+ r600_mark_atom_dirty(rctx, &istate->atom);
+}
+
+static void evergreen_get_pipe_constant_buffer(struct r600_context *rctx,
+ enum pipe_shader_type shader, uint slot,
+ struct pipe_constant_buffer *cbuf)
+{
+ struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
+ struct pipe_constant_buffer *cb;
+ cbuf->user_buffer = NULL;
+
+ cb = &state->cb[slot];
+
+ cbuf->buffer_size = cb->buffer_size;
+ pipe_resource_reference(&cbuf->buffer, cb->buffer);
+}
+
+static void evergreen_get_shader_buffers(struct r600_context *rctx,
+ enum pipe_shader_type shader,
+ uint start_slot, uint count,
+ struct pipe_shader_buffer *sbuf)
+{
+ assert(shader == PIPE_SHADER_COMPUTE);
+ int idx, i;
+ struct r600_image_state *istate = &rctx->compute_buffers;
+ struct r600_image_view *rview;
+
+ for (i = start_slot, idx = 0; i < start_slot + count; i++, idx++) {
+
+ rview = &istate->views[i];
+
+ pipe_resource_reference(&sbuf[idx].buffer, rview->base.resource);
+ if (rview->base.resource) {
+ uint64_t rview_va = ((struct r600_resource *)rview->base.resource)->gpu_address;
+
+ uint64_t prog_va = rview->resource_words[0];
+
+ prog_va += ((uint64_t)G_030008_BASE_ADDRESS_HI(rview->resource_words[2])) << 32;
+ prog_va -= rview_va;
+
+ sbuf[idx].buffer_offset = prog_va & 0xffffffff;
+ sbuf[idx].buffer_size = rview->resource_words[1] + 1;;
+ } else {
+ sbuf[idx].buffer_offset = 0;
+ sbuf[idx].buffer_size = 0;
+ }
+ }
+}
+
+static void evergreen_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ st->saved_compute = rctx->cs_shader_state.shader;
+
+ /* save constant buffer 0 */
+ evergreen_get_pipe_constant_buffer(rctx, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
+ /* save ssbo 0 */
+ evergreen_get_shader_buffers(rctx, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
}
+
void evergreen_init_state_functions(struct r600_context *rctx)
{
unsigned id = 1;
}
r600_init_atom(rctx, &rctx->framebuffer.atom, id++, evergreen_emit_framebuffer_state, 0);
r600_init_atom(rctx, &rctx->fragment_images.atom, id++, evergreen_emit_fragment_image_state, 0);
+ r600_init_atom(rctx, &rctx->compute_images.atom, id++, evergreen_emit_compute_image_state, 0);
r600_init_atom(rctx, &rctx->fragment_buffers.atom, id++, evergreen_emit_fragment_buffer_state, 0);
+ r600_init_atom(rctx, &rctx->compute_buffers.atom, id++, evergreen_emit_compute_buffer_state, 0);
/* shader const */
r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, evergreen_emit_vs_constant_buffers, 0);
r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, evergreen_emit_gs_constant_buffers, 0);
else
rctx->b.b.get_sample_position = cayman_get_sample_position;
rctx->b.dma_copy = evergreen_dma_copy;
+ rctx->b.save_qbo_state = evergreen_save_qbo_state;
evergreen_init_compute_state_functions(rctx);
}
unsigned input_vertex_size, output_vertex_size;
unsigned input_patch_size, pervertex_output_patch_size, output_patch_size;
unsigned output_patch0_offset, perpatch_output_offset, lds_size;
- uint32_t values[16];
+ uint32_t values[8];
unsigned num_waves;
unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
unsigned wave_divisor = (16 * num_pipes);
if (rctx->lds_alloc != 0 &&
rctx->last_ls == ls &&
- !rctx->tess_state_dirty &&
rctx->last_num_tcs_input_cp == num_tcs_input_cp &&
rctx->last_tcs == tcs)
return;
rctx->lds_alloc = (lds_size | (num_waves << 14));
- memcpy(&values[8], rctx->tess_state, 6 * sizeof(float));
- values[14] = 0;
- values[15] = 0;
-
- rctx->tess_state_dirty = false;
rctx->last_ls = ls;
rctx->last_tcs = tcs;
rctx->last_num_tcs_input_cp = num_tcs_input_cp;
constbuf.user_buffer = values;
- constbuf.buffer_size = 16 * 4;
+ constbuf.buffer_size = 8 * 4;
rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
R600_LDS_INFO_CONST_BUFFER, &constbuf);
}
void evergreen_set_ls_hs_config(struct r600_context *rctx,
- struct radeon_winsys_cs *cs,
+ struct radeon_cmdbuf *cs,
uint32_t ls_hs_config)
{
radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
}
void evergreen_set_lds_alloc(struct r600_context *rctx,
- struct radeon_winsys_cs *cs,
+ struct radeon_cmdbuf *cs,
uint32_t lds_alloc)
{
radeon_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC, lds_alloc);
void eg_trace_emit(struct r600_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
unsigned reloc;
if (rctx->b.chip_class < EVERGREEN)
struct r600_resource *resource,
uint32_t pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
resource,
RADEON_USAGE_READ,
struct r600_resource *resource,
uint32_t pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint32_t event = EVENT_TYPE_PS_DONE;
uint32_t base_reg_0 = R_02872C_GDS_APPEND_COUNT_0;
uint32_t reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
uint32_t reg_val = (base_reg_0 + atomic->hw_idx * 4) >> 2;
+ if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
+ event = EVENT_TYPE_CS_DONE;
+
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
radeon_emit(cs, (dst_offset) & 0xffffffff);
struct r600_resource *resource,
uint32_t pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint32_t event = EVENT_TYPE_PS_DONE;
uint32_t reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
resource,
RADEON_PRIO_SHADER_RW_BUFFER);
uint64_t dst_offset = resource->gpu_address + (atomic->start * 4);
+ if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
+ event = EVENT_TYPE_CS_DONE;
+
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
radeon_emit(cs, (dst_offset) & 0xffffffff);
struct r600_resource *resource,
uint32_t pkt_flags)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
resource,
RADEON_USAGE_READ,
radeon_emit(cs, reloc);
}
-bool evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
- struct r600_shader_atomic *combined_atomics,
- uint8_t *atomic_used_mask_p)
+void evergreen_emit_atomic_buffer_setup_count(struct r600_context *rctx,
+ struct r600_pipe_shader *cs_shader,
+ struct r600_shader_atomic *combined_atomics,
+ uint8_t *atomic_used_mask_p)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
- struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
- unsigned pkt_flags = 0;
uint8_t atomic_used_mask = 0;
int i, j, k;
+ bool is_compute = cs_shader ? true : false;
- for (i = 0; i < EG_NUM_HW_STAGES; i++) {
+ for (i = 0; i < (is_compute ? 1 : EG_NUM_HW_STAGES); i++) {
uint8_t num_atomic_stage;
struct r600_pipe_shader *pshader;
- pshader = rctx->hw_shader_stages[i].shader;
+ if (is_compute)
+ pshader = cs_shader;
+ else
+ pshader = rctx->hw_shader_stages[i].shader;
if (!pshader)
continue;
}
}
}
+ *atomic_used_mask_p = atomic_used_mask;
+}
+
+void evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
+ bool is_compute,
+ struct r600_shader_atomic *combined_atomics,
+ uint8_t atomic_used_mask)
+{
+ struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
+ unsigned pkt_flags = 0;
+ uint32_t mask;
+
+ if (is_compute)
+ pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
+
+ mask = atomic_used_mask;
+ if (!mask)
+ return;
- uint32_t mask = atomic_used_mask;
while (mask) {
unsigned atomic_index = u_bit_scan(&mask);
struct r600_shader_atomic *atomic = &combined_atomics[atomic_index];
else
evergreen_emit_set_append_cnt(rctx, atomic, resource, pkt_flags);
}
- *atomic_used_mask_p = atomic_used_mask;
- return true;
}
void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
+ bool is_compute,
struct r600_shader_atomic *combined_atomics,
uint8_t *atomic_used_mask_p)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_atomic_buffer_state *astate = &rctx->atomic_buffer_state;
uint32_t pkt_flags = 0;
uint32_t event = EVENT_TYPE_PS_DONE;
- uint32_t mask = astate->enabled_mask;
+ uint32_t mask;
uint64_t dst_offset;
unsigned reloc;
+ if (is_compute)
+ pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
+
mask = *atomic_used_mask_p;
if (!mask)
return;
evergreen_emit_event_write_eos(rctx, atomic, resource, pkt_flags);
}
+ if (pkt_flags == RADEON_CP_PACKET3_COMPUTE_MODE)
+ event = EVENT_TYPE_CS_DONE;
+
++rctx->append_fence_id;
reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
r600_resource(rctx->append_fence),