return FALSE;
/* R11G11B10 is broken on R6xx. */
- if (rscreen->chip_class == R600 &&
+ if (rscreen->b.chip_class == R600 &&
format == PIPE_FORMAT_R11G11B10_FLOAT)
return FALSE;
}
}
- if ((usage & PIPE_BIND_SAMPLER_VIEW) &&
- r600_is_sampler_format_supported(screen, format)) {
- retval |= PIPE_BIND_SAMPLER_VIEW;
+ if (usage & PIPE_BIND_SAMPLER_VIEW) {
+ if (target == PIPE_BUFFER) {
+ if (r600_is_vertex_format_supported(format))
+ retval |= PIPE_BIND_SAMPLER_VIEW;
+ } else {
+ if (r600_is_sampler_format_supported(screen, format))
+ retval |= PIPE_BIND_SAMPLER_VIEW;
+ }
}
if ((usage & (PIPE_BIND_RENDER_TARGET |
static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a;
float offset_units = state->offset_units;
float offset_scale = state->offset_scale;
}
r600_write_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4);
- r600_write_value(cs, fui(offset_scale));
- r600_write_value(cs, fui(offset_units));
- r600_write_value(cs, fui(offset_scale));
- r600_write_value(cs, fui(offset_units));
+ radeon_emit(cs, fui(offset_scale));
+ radeon_emit(cs, fui(offset_units));
+ radeon_emit(cs, fui(offset_scale));
+ radeon_emit(cs, fui(offset_units));
}
static uint32_t r600_get_blend_control(const struct pipe_blend_state *state, unsigned i)
r600_init_command_buffer(&blend->buffer_no_blend, 20);
/* R600 does not support per-MRT blends */
- if (rctx->family > CHIP_R600)
+ if (rctx->b.family > CHIP_R600)
color_control |= S_028808_PER_MRT_BLEND(1);
if (state->logicop_enable) {
r600_store_context_reg(&blend->buffer, R_028804_CB_BLEND_CONTROL,
r600_get_blend_control(state, 0));
- if (rctx->family > CHIP_R600) {
+ if (rctx->b.family > CHIP_R600) {
r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8);
for (int i = 0; i < 8; i++) {
r600_store_value(&blend->buffer, r600_get_blend_control(state, i));
sc_mode_cntl = S_028A4C_MSAA_ENABLE(state->multisample) |
S_028A4C_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1);
- if (rctx->chip_class >= R700) {
+ if (rctx->b.chip_class >= R700) {
sc_mode_cntl |= S_028A4C_FORCE_EOV_REZ_ENABLE(1) |
S_028A4C_R700_ZMM_LINE_OFFSET(1) |
S_028A4C_R700_VPORT_SCISSOR_ENABLE(state->scissor);
r600_store_context_reg(&rs->buffer, R_0286D4_SPI_INTERP_CONTROL_0, spi_interp);
r600_store_context_reg(&rs->buffer, R_028A4C_PA_SC_MODE_CNTL, sc_mode_cntl);
r600_store_context_reg(&rs->buffer, R_028C08_PA_SU_VTX_CNTL,
- S_028C08_PIX_CENTER_HALF(state->gl_rasterization_rules) |
+ S_028C08_PIX_CENTER_HALF(state->half_pixel_center) |
S_028C08_QUANT_MODE(V_028C08_X_1_256TH));
r600_store_context_reg(&rs->buffer, R_028DFC_PA_SU_POLY_OFFSET_CLAMP, fui(state->offset_clamp));
r600_store_context_reg(&rs->buffer, R_028814_PA_SU_SC_MODE_CNTL,
uint64_t va;
int stride = util_format_get_blocksize(view->base.format);
unsigned format, num_format, format_comp, endian;
+ unsigned offset = view->base.u.buf.first_element * stride;
+ unsigned size = (view->base.u.buf.last_element - view->base.u.buf.first_element + 1) * stride;
r600_vertex_data_type(view->base.format,
&format, &num_format, &format_comp,
&endian);
- va = r600_resource_va(ctx->screen, view->base.texture);
+ va = r600_resource_va(ctx->screen, view->base.texture) + offset;
view->tex_resource = &tmp->resource;
view->skip_mip_address_reloc = true;
view->tex_resource_words[0] = va;
- view->tex_resource_words[1] = width0 - 1;
+ view->tex_resource_words[1] = size - 1;
view->tex_resource_words[2] = S_038008_BASE_ADDRESS_HI(va >> 32UL) |
S_038008_STRIDE(stride) |
S_038008_DATA_FORMAT(format) |
static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct pipe_clip_state *state = &rctx->clip_state.state;
r600_write_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4);
- r600_write_array(cs, 6*4, (unsigned*)state);
+ radeon_emit_array(cs, (unsigned*)state, 6*4);
}
static void r600_set_polygon_stipple(struct pipe_context *ctx,
static void r600_emit_scissor_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct pipe_scissor_state *state = &rctx->scissor.scissor;
- if (rctx->chip_class != R600 || rctx->scissor.enable) {
+ if (rctx->b.chip_class != R600 || rctx->scissor.enable) {
r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
- r600_write_value(cs, S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) |
+ radeon_emit(cs, S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) |
S_028240_WINDOW_OFFSET_DISABLE(1));
- r600_write_value(cs, S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy));
+ radeon_emit(cs, S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy));
} else {
r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
- r600_write_value(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
+ radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
S_028240_WINDOW_OFFSET_DISABLE(1));
- r600_write_value(cs, S_028244_BR_X(8192) | S_028244_BR_Y(8192));
+ radeon_emit(cs, S_028244_BR_X(8192) | S_028244_BR_Y(8192));
}
}
-static void r600_set_scissor_state(struct pipe_context *ctx,
- const struct pipe_scissor_state *state)
+static void r600_set_scissor_states(struct pipe_context *ctx,
+ unsigned start_slot,
+ unsigned num_scissors,
+ const struct pipe_scissor_state *state)
{
struct r600_context *rctx = (struct r600_context *)ctx;
rctx->scissor.scissor = *state;
- if (rctx->chip_class == R600 && !rctx->scissor.enable)
+ if (rctx->b.chip_class == R600 && !rctx->scissor.enable)
return;
rctx->scissor.atom.dirty = true;
buffer.target = PIPE_BUFFER;
buffer.format = PIPE_FORMAT_R8_UNORM;
buffer.bind = PIPE_BIND_CUSTOM;
- buffer.usage = PIPE_USAGE_STATIC;
+ buffer.usage = PIPE_USAGE_DEFAULT;
buffer.flags = 0;
buffer.width0 = size;
buffer.height0 = 1;
buffer.array_size = 1;
return (struct r600_resource*)
- r600_buffer_create(&rscreen->screen, &buffer, alignment);
+ r600_buffer_create(&rscreen->b.b, &buffer, alignment);
}
static void r600_init_color_surface(struct r600_context *rctx,
unsigned level = surf->base.u.tex.level;
unsigned pitch, slice;
unsigned color_info;
+ unsigned color_view;
unsigned format, swap, ntype, endian;
unsigned offset;
const struct util_format_description *desc;
bool blend_bypass = 0, blend_clamp = 1;
if (rtex->is_depth && !rtex->is_flushing_texture && !r600_can_read_depth(rtex)) {
- r600_init_flushed_depth_texture(&rctx->context, surf->base.texture, NULL);
+ r600_init_flushed_depth_texture(&rctx->b.b, surf->base.texture, NULL);
rtex = rtex->flushed_depth_texture;
assert(rtex);
}
offset = rtex->surface.level[level].offset;
- if (rtex->surface.level[level].mode < RADEON_SURF_MODE_1D) {
+ if (rtex->surface.level[level].mode == RADEON_SURF_MODE_LINEAR) {
+ assert(surf->base.u.tex.first_layer == surf->base.u.tex.last_layer);
offset += rtex->surface.level[level].slice_size *
- surf->base.u.tex.first_layer;
- }
+ surf->base.u.tex.first_layer;
+ color_view = 0;
+ } else
+ color_view = S_028080_SLICE_START(surf->base.u.tex.first_layer) |
+ S_028080_SLICE_MAX(surf->base.u.tex.last_layer);
+
pitch = rtex->surface.level[level].nblk_x / 8 - 1;
slice = (rtex->surface.level[level].nblk_x * rtex->surface.level[level].nblk_y) / 64;
if (slice) {
/* EXPORT_NORM is an optimzation that can be enabled for better
* performance in certain cases
*/
- if (rctx->chip_class == R600) {
+ if (rctx->b.chip_class == R600) {
/* EXPORT_NORM can be enabled if:
* - 11-bit or smaller UNORM/SNORM/SRGB
* - BLEND_CLAMP is enabled
pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask,
&rtex->resource.b.b);
- if (rtex->cmask_size) {
- surf->cb_color_cmask = rtex->cmask_offset >> 8;
- surf->cb_color_mask |= S_028100_CMASK_BLOCK_MAX(rtex->cmask_slice_tile_max);
+ if (rtex->cmask.size) {
+ surf->cb_color_cmask = rtex->cmask.offset >> 8;
+ surf->cb_color_mask |= S_028100_CMASK_BLOCK_MAX(rtex->cmask.slice_tile_max);
- if (rtex->fmask_size) {
+ if (rtex->fmask.size) {
color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE);
- surf->cb_color_fmask = rtex->fmask_offset >> 8;
- surf->cb_color_mask |= S_028100_FMASK_TILE_MAX(slice);
+ surf->cb_color_fmask = rtex->fmask.offset >> 8;
+ surf->cb_color_mask |= S_028100_FMASK_TILE_MAX(rtex->fmask.slice_tile_max);
} else { /* cmask only */
color_info |= S_0280A0_TILE_MODE(V_0280A0_CLEAR_ENABLE);
}
struct r600_cmask_info cmask;
struct r600_fmask_info fmask;
- r600_texture_get_cmask_info(rscreen, rtex, &cmask);
- r600_texture_get_fmask_info(rscreen, rtex, 8, &fmask);
+ r600_texture_get_cmask_info(&rscreen->b, rtex, &cmask);
+ r600_texture_get_fmask_info(&rscreen->b, rtex, 8, &fmask);
/* CMASK. */
if (!rctx->dummy_cmask ||
rctx->dummy_cmask = r600_buffer_create_helper(rscreen, cmask.size, cmask.alignment);
/* Set the contents to 0xCC. */
- ptr = pipe_buffer_map(&rctx->context, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer);
+ ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer);
memset(ptr, 0xCC, cmask.size);
- pipe_buffer_unmap(&rctx->context, transfer);
+ pipe_buffer_unmap(&rctx->b.b, transfer);
}
pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask,
&rctx->dummy_cmask->b.b);
surf->cb_color_cmask = 0;
surf->cb_color_fmask = 0;
surf->cb_color_mask = S_028100_CMASK_BLOCK_MAX(cmask.slice_tile_max) |
- S_028100_FMASK_TILE_MAX(slice);
+ S_028100_FMASK_TILE_MAX(fmask.slice_tile_max);
}
surf->cb_color_info = color_info;
-
- if (rtex->surface.level[level].mode < RADEON_SURF_MODE_1D) {
- surf->cb_color_view = 0;
- } else {
- surf->cb_color_view = S_028080_SLICE_START(surf->base.u.tex.first_layer) |
- S_028080_SLICE_MAX(surf->base.u.tex.last_layer);
- }
-
+ surf->cb_color_view = color_view;
surf->color_initialized = true;
}
surf->htile_enabled = 0;
/* use htile only for first level */
- if (rtex->htile && !level) {
- uint64_t va = r600_resource_va(&rctx->screen->screen, &rtex->htile->b.b);
+ if (rtex->htile_buffer && !level) {
+ uint64_t va = r600_resource_va(&rctx->screen->b.b, &rtex->htile_buffer->b.b);
surf->htile_enabled = 1;
surf->db_htile_data_base = va >> 8;
surf->db_htile_surface = S_028D24_HTILE_WIDTH(1) |
S_028D24_HTILE_HEIGHT(1) |
+ S_028D24_FULL_CACHE(1) |
S_028D24_LINEAR(1);
/* preload is not working properly on r6xx/r7xx */
surf->db_depth_info |= S_028010_TILE_SURFACE_ENABLE(1);
unsigned i;
if (rctx->framebuffer.state.nr_cbufs) {
- rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
-
- if (rctx->chip_class >= R700 &&
- rctx->framebuffer.state.cbufs[0]->texture->nr_samples > 1) {
- rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META;
- }
+ rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
+ rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB |
+ R600_CONTEXT_FLUSH_AND_INV_CB_META;
}
if (rctx->framebuffer.state.zsbuf) {
- rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
+ rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
+ rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB;
+
+ rtex = (struct r600_texture*)rctx->framebuffer.state.zsbuf->texture;
+ if (rctx->b.chip_class >= R700 && rtex->htile_buffer) {
+ rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META;
+ }
}
/* Set the new state. */
util_copy_framebuffer_state(&rctx->framebuffer.state, state);
rctx->framebuffer.export_16bpc = state->nr_cbufs != 0;
- rctx->framebuffer.cb0_is_integer = state->nr_cbufs &&
+ rctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] &&
util_format_is_pure_integer(state->cbufs[0]->format);
rctx->framebuffer.compressed_cb_mask = 0;
rctx->framebuffer.is_msaa_resolve = state->nr_cbufs == 2 &&
+ state->cbufs[0] && state->cbufs[1] &&
state->cbufs[0]->texture->nr_samples > 1 &&
state->cbufs[1]->texture->nr_samples <= 1;
-
- if (state->nr_cbufs)
- rctx->framebuffer.nr_samples = state->cbufs[0]->texture->nr_samples;
- else if (state->zsbuf)
- rctx->framebuffer.nr_samples = state->zsbuf->texture->nr_samples;
- else
- rctx->framebuffer.nr_samples = 0;
+ rctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state);
/* Colorbuffers. */
for (i = 0; i < state->nr_cbufs; i++) {
/* The resolve buffer must have CMASK and FMASK to prevent hardlocks on R6xx. */
- bool force_cmask_fmask = rctx->chip_class == R600 &&
+ bool force_cmask_fmask = rctx->b.chip_class == R600 &&
rctx->framebuffer.is_msaa_resolve &&
i == 1;
surf = (struct r600_surface*)state->cbufs[i];
+ if (!surf)
+ continue;
+
rtex = (struct r600_texture*)surf->base.texture;
r600_context_add_resource_size(ctx, state->cbufs[i]->texture);
rctx->framebuffer.export_16bpc = false;
}
- if (rtex->fmask_size && rtex->cmask_size) {
+ if (rtex->fmask.size && rtex->cmask.size) {
rctx->framebuffer.compressed_cb_mask |= 1 << i;
}
}
/* Update alpha-test state dependencies.
* Alpha-test is done on the first colorbuffer only. */
if (state->nr_cbufs) {
+ bool alphatest_bypass = false;
+
surf = (struct r600_surface*)state->cbufs[0];
- if (rctx->alphatest_state.bypass != surf->alphatest_bypass) {
- rctx->alphatest_state.bypass = surf->alphatest_bypass;
+ if (surf) {
+ alphatest_bypass = surf->alphatest_bypass;
+ }
+
+ if (rctx->alphatest_state.bypass != alphatest_bypass) {
+ rctx->alphatest_state.bypass = alphatest_bypass;
rctx->alphatest_state.atom.dirty = true;
}
}
rctx->alphatest_state.atom.dirty = true;
}
- r600_update_db_shader_control(rctx);
-
/* Calculate the CS size. */
rctx->framebuffer.atom.num_dw =
10 /*COLOR_INFO*/ + 4 /*SCISSOR*/ + 3 /*SHADER_CONTROL*/ + 8 /*MSAA*/;
if (rctx->framebuffer.state.nr_cbufs) {
- rctx->framebuffer.atom.num_dw += 6 * (2 + rctx->framebuffer.state.nr_cbufs);
- rctx->framebuffer.atom.num_dw += 6 * rctx->framebuffer.state.nr_cbufs; /* relocs */
-
+ rctx->framebuffer.atom.num_dw += 15 * rctx->framebuffer.state.nr_cbufs;
+ rctx->framebuffer.atom.num_dw += 3 * (2 + rctx->framebuffer.state.nr_cbufs);
}
if (rctx->framebuffer.state.zsbuf) {
- rctx->framebuffer.atom.num_dw += 18;
- } else if (rctx->screen->info.drm_minor >= 18) {
+ rctx->framebuffer.atom.num_dw += 16;
+ } else if (rctx->screen->b.info.drm_minor >= 18) {
rctx->framebuffer.atom.num_dw += 3;
}
- if (rctx->family > CHIP_R600 && rctx->family < CHIP_RV770) {
+ if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770) {
rctx->framebuffer.atom.num_dw += 2;
}
(((s2x) & 0xf) << 16) | (((s2y) & 0xf) << 20) | \
(((s3x) & 0xf) << 24) | (((s3y) & 0xf) << 28))
+
+static uint32_t sample_locs_2x[] = {
+ FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
+ FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
+};
+static unsigned max_dist_2x = 4;
+
+static uint32_t sample_locs_4x[] = {
+ FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
+ FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
+};
+static unsigned max_dist_4x = 6;
+static uint32_t sample_locs_8x[] = {
+ FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
+ FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
+};
+static unsigned max_dist_8x = 7;
+
+static void r600_get_sample_position(struct pipe_context *ctx,
+ unsigned sample_count,
+ unsigned sample_index,
+ float *out_value)
+{
+ int offset, index;
+ struct {
+ int idx:4;
+ } val;
+ switch (sample_count) {
+ case 1:
+ default:
+ out_value[0] = out_value[1] = 0.5;
+ break;
+ case 2:
+ offset = 4 * (sample_index * 2);
+ val.idx = (sample_locs_2x[0] >> offset) & 0xf;
+ out_value[0] = (float)(val.idx + 8) / 16.0f;
+ val.idx = (sample_locs_2x[0] >> (offset + 4)) & 0xf;
+ out_value[1] = (float)(val.idx + 8) / 16.0f;
+ break;
+ case 4:
+ offset = 4 * (sample_index * 2);
+ val.idx = (sample_locs_4x[0] >> offset) & 0xf;
+ out_value[0] = (float)(val.idx + 8) / 16.0f;
+ val.idx = (sample_locs_4x[0] >> (offset + 4)) & 0xf;
+ out_value[1] = (float)(val.idx + 8) / 16.0f;
+ break;
+ case 8:
+ offset = 4 * (sample_index % 4 * 2);
+ index = (sample_index / 4);
+ val.idx = (sample_locs_8x[index] >> offset) & 0xf;
+ out_value[0] = (float)(val.idx + 8) / 16.0f;
+ val.idx = (sample_locs_8x[index] >> (offset + 4)) & 0xf;
+ out_value[1] = (float)(val.idx + 8) / 16.0f;
+ break;
+ }
+}
+
static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples)
{
- static uint32_t sample_locs_2x[] = {
- FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
- FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4),
- };
- static unsigned max_dist_2x = 4;
- static uint32_t sample_locs_4x[] = {
- FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
- FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6),
- };
- static unsigned max_dist_4x = 6;
- static uint32_t sample_locs_8x[] = {
- FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3),
- FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7),
- };
- static unsigned max_dist_8x = 7;
-
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
unsigned max_dist = 0;
- if (rctx->family == CHIP_R600) {
+ if (rctx->b.family == CHIP_R600) {
switch (nr_samples) {
default:
nr_samples = 0;
break;
case 8:
r600_write_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2);
- r600_write_value(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */
- r600_write_value(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */
+ radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */
+ radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */
max_dist = max_dist_8x;
break;
}
switch (nr_samples) {
default:
r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
- r600_write_value(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
- r600_write_value(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
+ radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
+ radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
nr_samples = 0;
break;
case 2:
r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
- r600_write_value(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
- r600_write_value(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
+ radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
+ radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
max_dist = max_dist_2x;
break;
case 4:
r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
- r600_write_value(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
- r600_write_value(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
+ radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
+ radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
max_dist = max_dist_4x;
break;
case 8:
r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2);
- r600_write_value(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
- r600_write_value(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
+ radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */
+ radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */
max_dist = max_dist_8x;
break;
}
if (nr_samples > 1) {
r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
- r600_write_value(cs, S_028C00_LAST_PIXEL(1) |
+ radeon_emit(cs, S_028C00_LAST_PIXEL(1) |
S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */
- r600_write_value(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) |
+ radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) |
S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */
} else {
r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2);
- r600_write_value(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
- r600_write_value(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */
+ radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */
+ radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */
}
}
static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct pipe_framebuffer_state *state = &rctx->framebuffer.state;
unsigned nr_cbufs = state->nr_cbufs;
struct r600_surface **cb = (struct r600_surface**)&state->cbufs[0];
/* Colorbuffers. */
r600_write_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8);
for (i = 0; i < nr_cbufs; i++) {
- r600_write_value(cs, cb[i]->cb_color_info);
+ radeon_emit(cs, cb[i] ? cb[i]->cb_color_info : 0);
}
/* set CB_COLOR1_INFO for possible dual-src blending */
- if (i == 1) {
- r600_write_value(cs, cb[0]->cb_color_info);
+ if (i == 1 && cb[0]) {
+ radeon_emit(cs, cb[0]->cb_color_info);
i++;
}
for (; i < 8; i++) {
- r600_write_value(cs, 0);
+ radeon_emit(cs, 0);
}
if (nr_cbufs) {
- /* COLOR_BASE */
- r600_write_context_reg_seq(cs, R_028040_CB_COLOR0_BASE, nr_cbufs);
- for (i = 0; i < nr_cbufs; i++) {
- r600_write_value(cs, cb[i]->cb_color_base);
- }
-
- /* relocations */
for (i = 0; i < nr_cbufs; i++) {
- unsigned reloc = r600_context_bo_reloc(rctx,
- &rctx->rings.gfx,
- (struct r600_resource*)cb[i]->base.texture,
- RADEON_USAGE_READWRITE);
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, reloc);
+ unsigned reloc;
+
+ if (!cb[i])
+ continue;
+
+ /* COLOR_BASE */
+ r600_write_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base);
+
+ reloc = r600_context_bo_reloc(&rctx->b,
+ &rctx->b.rings.gfx,
+ (struct r600_resource*)cb[i]->base.texture,
+ RADEON_USAGE_READWRITE);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+
+ /* FMASK */
+ r600_write_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask);
+
+ reloc = r600_context_bo_reloc(&rctx->b,
+ &rctx->b.rings.gfx,
+ cb[i]->cb_buffer_fmask,
+ RADEON_USAGE_READWRITE);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+
+ /* CMASK */
+ r600_write_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask);
+
+ reloc = r600_context_bo_reloc(&rctx->b,
+ &rctx->b.rings.gfx,
+ cb[i]->cb_buffer_cmask,
+ RADEON_USAGE_READWRITE);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
}
r600_write_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs);
for (i = 0; i < nr_cbufs; i++) {
- r600_write_value(cs, cb[i]->cb_color_size);
+ radeon_emit(cs, cb[i] ? cb[i]->cb_color_size : 0);
}
r600_write_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs);
for (i = 0; i < nr_cbufs; i++) {
- r600_write_value(cs, cb[i]->cb_color_view);
+ radeon_emit(cs, cb[i] ? cb[i]->cb_color_view : 0);
}
r600_write_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs);
for (i = 0; i < nr_cbufs; i++) {
- r600_write_value(cs, cb[i]->cb_color_mask);
- }
-
- /* FMASK. */
- r600_write_context_reg_seq(cs, R_0280E0_CB_COLOR0_FRAG, nr_cbufs);
- for (i = 0; i < nr_cbufs; i++) {
- r600_write_value(cs, cb[i]->cb_color_fmask);
- }
- /* relocations */
- for (i = 0; i < nr_cbufs; i++) {
- unsigned reloc = r600_context_bo_reloc(rctx,
- &rctx->rings.gfx,
- cb[i]->cb_buffer_fmask,
- RADEON_USAGE_READWRITE);
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, reloc);
- }
-
- /* CMASK. */
- r600_write_context_reg_seq(cs, R_0280C0_CB_COLOR0_TILE, nr_cbufs);
- for (i = 0; i < nr_cbufs; i++) {
- r600_write_value(cs, cb[i]->cb_color_cmask);
- }
- /* relocations */
- for (i = 0; i < nr_cbufs; i++) {
- unsigned reloc = r600_context_bo_reloc(rctx,
- &rctx->rings.gfx,
- cb[i]->cb_buffer_cmask,
- RADEON_USAGE_READWRITE);
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, reloc);
+ radeon_emit(cs, cb[i] ? cb[i]->cb_color_mask : 0);
}
sbu |= SURFACE_BASE_UPDATE_COLOR_NUM(nr_cbufs);
}
/* SURFACE_BASE_UPDATE */
- if (rctx->family > CHIP_R600 && rctx->family < CHIP_RV770 && sbu) {
- r600_write_value(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0));
- r600_write_value(cs, sbu);
+ if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) {
+ radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0));
+ radeon_emit(cs, sbu);
sbu = 0;
}
/* Zbuffer. */
if (state->zsbuf) {
struct r600_surface *surf = (struct r600_surface*)state->zsbuf;
- unsigned reloc = r600_context_bo_reloc(rctx,
- &rctx->rings.gfx,
+ unsigned reloc = r600_context_bo_reloc(&rctx->b,
+ &rctx->b.rings.gfx,
(struct r600_resource*)state->zsbuf->texture,
RADEON_USAGE_READWRITE);
surf->pa_su_poly_offset_db_fmt_cntl);
r600_write_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2);
- r600_write_value(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */
- r600_write_value(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */
+ radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */
+ radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */
r600_write_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2);
- r600_write_value(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */
- r600_write_value(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */
+ radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */
+ radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, reloc);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
r600_write_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit);
sbu |= SURFACE_BASE_UPDATE_DEPTH;
- } else if (rctx->screen->info.drm_minor >= 18) {
+ } else if (rctx->screen->b.info.drm_minor >= 18) {
/* DRM 2.6.18 allows the INVALID format to disable depth/stencil.
* Older kernels are out of luck. */
r600_write_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID));
}
/* SURFACE_BASE_UPDATE */
- if (rctx->family > CHIP_R600 && rctx->family < CHIP_RV770 && sbu) {
- r600_write_value(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0));
- r600_write_value(cs, sbu);
+ if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) {
+ radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0));
+ radeon_emit(cs, sbu);
sbu = 0;
}
/* Framebuffer dimensions. */
r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2);
- r600_write_value(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
+ radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) |
S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */
- r600_write_value(cs, S_028244_BR_X(state->width) |
+ radeon_emit(cs, S_028244_BR_X(state->width) |
S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */
if (rctx->framebuffer.is_msaa_resolve) {
static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom;
if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) {
r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
- if (rctx->chip_class == R600) {
- r600_write_value(cs, 0xff); /* R_028238_CB_TARGET_MASK */
- r600_write_value(cs, 0xff); /* R_02823C_CB_SHADER_MASK */
+ if (rctx->b.chip_class == R600) {
+ radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */
+ radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */
} else {
- r600_write_value(cs, 0xf); /* R_028238_CB_TARGET_MASK */
- r600_write_value(cs, 0xf); /* R_02823C_CB_SHADER_MASK */
+ radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */
+ radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */
}
r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control);
} else {
unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1;
r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2);
- r600_write_value(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */
+ radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */
/* Always enable the first color output to make sure alpha-test works even without one. */
- r600_write_value(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */
+ radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */
r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL,
a->cb_color_control |
S_028808_MULTIWRITE_ENABLE(multiwrite));
static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_db_state *a = (struct r600_db_state*)atom;
if (a->rsurf && a->rsurf->htile_enabled) {
struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture;
unsigned reloc_idx;
- r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear));
+ r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
- reloc_idx = r600_context_bo_reloc(rctx, &rctx->rings.gfx, rtex->htile, RADEON_USAGE_READWRITE);
+ reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile_buffer, RADEON_USAGE_READWRITE);
cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
cs->buf[cs->cdw++] = reloc_idx;
} else {
static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom;
unsigned db_render_control = 0;
unsigned db_render_override =
S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE);
if (a->occlusion_query_enabled) {
- if (rctx->chip_class >= R700) {
+ if (rctx->b.chip_class >= R700) {
db_render_control |= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1);
}
db_render_override |= S_028D10_NOOP_CULL_DISABLE(1);
}
r600_write_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2);
- r600_write_value(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */
- r600_write_value(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */
+ radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */
+ radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */
r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control);
}
static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_config_state *a = (struct r600_config_state*)atom;
r600_write_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1);
+ r600_write_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2);
}
static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
uint32_t dirty_mask = rctx->vertex_buffer_state.dirty_mask;
while (dirty_mask) {
offset = vb->buffer_offset;
/* fetch resources start at index 320 */
- r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
- r600_write_value(cs, (320 + buffer_index) * 7);
- r600_write_value(cs, offset); /* RESOURCEi_WORD0 */
- r600_write_value(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */
- r600_write_value(cs, /* RESOURCEi_WORD2 */
+ radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
+ radeon_emit(cs, (320 + buffer_index) * 7);
+ radeon_emit(cs, offset); /* RESOURCEi_WORD0 */
+ radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */
+ radeon_emit(cs, /* RESOURCEi_WORD2 */
S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
S_038008_STRIDE(vb->stride));
- r600_write_value(cs, 0); /* RESOURCEi_WORD3 */
- r600_write_value(cs, 0); /* RESOURCEi_WORD4 */
- r600_write_value(cs, 0); /* RESOURCEi_WORD5 */
- r600_write_value(cs, 0xc0000000); /* RESOURCEi_WORD6 */
+ radeon_emit(cs, 0); /* RESOURCEi_WORD3 */
+ radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
+ radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
+ radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ));
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ));
}
}
unsigned reg_alu_constbuf_size,
unsigned reg_alu_const_cache)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
struct r600_resource *rbuffer;
unsigned offset;
unsigned buffer_index = ffs(dirty_mask) - 1;
-
+ unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER);
cb = &state->cb[buffer_index];
rbuffer = (struct r600_resource*)cb->buffer;
assert(rbuffer);
offset = cb->buffer_offset;
- r600_write_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
- ALIGN_DIVUP(cb->buffer_size >> 4, 16));
- r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
+ if (!gs_ring_buffer) {
+ r600_write_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
+ ALIGN_DIVUP(cb->buffer_size >> 4, 16));
+ r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
+ }
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ));
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ));
- r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
- r600_write_value(cs, (buffer_id_base + buffer_index) * 7);
- r600_write_value(cs, offset); /* RESOURCEi_WORD0 */
- r600_write_value(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */
- r600_write_value(cs, /* RESOURCEi_WORD2 */
- S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
- S_038008_STRIDE(16));
- r600_write_value(cs, 0); /* RESOURCEi_WORD3 */
- r600_write_value(cs, 0); /* RESOURCEi_WORD4 */
- r600_write_value(cs, 0); /* RESOURCEi_WORD5 */
- r600_write_value(cs, 0xc0000000); /* RESOURCEi_WORD6 */
+ radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
+ radeon_emit(cs, (buffer_id_base + buffer_index) * 7);
+ radeon_emit(cs, offset); /* RESOURCEi_WORD0 */
+ radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */
+ radeon_emit(cs, /* RESOURCEi_WORD2 */
+ S_038008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) |
+ S_038008_STRIDE(gs_ring_buffer ? 4 : 16));
+ radeon_emit(cs, 0); /* RESOURCEi_WORD3 */
+ radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
+ radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
+ radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ));
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ));
dirty_mask &= ~(1 << buffer_index);
}
struct r600_samplerview_state *state,
unsigned resource_id_base)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
uint32_t dirty_mask = state->dirty_mask;
while (dirty_mask) {
rview = state->views[resource_index];
assert(rview);
- r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
- r600_write_value(cs, (resource_id_base + resource_index) * 7);
- r600_write_array(cs, 7, rview->tex_resource_words);
+ radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0));
+ radeon_emit(cs, (resource_id_base + resource_index) * 7);
+ radeon_emit_array(cs, rview->tex_resource_words, 7);
- reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, rview->tex_resource,
+ reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->tex_resource,
RADEON_USAGE_READ);
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, reloc);
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, reloc);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
}
state->dirty_mask = 0;
}
unsigned resource_id_base,
unsigned border_color_reg)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
uint32_t dirty_mask = texinfo->states.dirty_mask;
while (dirty_mask) {
}
}
- r600_write_value(cs, PKT3(PKT3_SET_SAMPLER, 3, 0));
- r600_write_value(cs, (resource_id_base + i) * 3);
- r600_write_array(cs, 3, rstate->tex_sampler_words);
+ radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0));
+ radeon_emit(cs, (resource_id_base + i) * 3);
+ radeon_emit_array(cs, rstate->tex_sampler_words, 3);
if (rstate->border_color_use) {
unsigned offset;
offset = border_color_reg;
offset += i * 16;
r600_write_config_reg_seq(cs, offset, 4);
- r600_write_array(cs, 4, rstate->border_color.ui);
+ radeon_emit_array(cs, rstate->border_color.ui, 4);
}
}
texinfo->states.dirty_mask = 0;
static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
unsigned tmp;
tmp = S_009508_DISABLE_CUBE_ANISO(1) |
struct r600_sample_mask *s = (struct r600_sample_mask*)a;
uint8_t mask = s->sample_mask;
- r600_write_context_reg(rctx->rings.gfx.cs, R_028C48_PA_SC_AA_MASK,
+ r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK,
mask | (mask << 8) | (mask << 16) | (mask << 24));
}
static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a)
{
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
struct r600_cso_state *state = (struct r600_cso_state*)a;
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso;
r600_write_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8);
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0));
- r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, shader->buffer, RADEON_USAGE_READ));
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer, RADEON_USAGE_READ));
}
-void r600_init_state_functions(struct r600_context *rctx)
+static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
{
- unsigned id = 4;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
- /* !!!
- * To avoid GPU lockup registers must be emited in a specific order
- * (no kidding ...). The order below is important and have been
- * partialy infered from analyzing fglrx command stream.
- *
- * Don't reorder atom without carefully checking the effect (GPU lockup
- * or piglit regression).
- * !!!
- */
+ uint32_t v2 = 0, primid = 0;
- r600_init_atom(rctx, &rctx->framebuffer.atom, id++, r600_emit_framebuffer_state, 0);
+ if (state->geom_enable) {
+ uint32_t cut_val;
- /* shader const */
- r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, r600_emit_vs_constant_buffers, 0);
- r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, r600_emit_gs_constant_buffers, 0);
- r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, r600_emit_ps_constant_buffers, 0);
+ if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 128)
+ cut_val = V_028A40_GS_CUT_128;
+ else if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 256)
+ cut_val = V_028A40_GS_CUT_256;
+ else if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 512)
+ cut_val = V_028A40_GS_CUT_512;
+ else
+ cut_val = V_028A40_GS_CUT_1024;
- /* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change
- * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map)
- */
- r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, r600_emit_vs_sampler_states, 0);
- r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, r600_emit_gs_sampler_states, 0);
- r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, r600_emit_ps_sampler_states, 0);
- /* resource */
- r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, r600_emit_vs_sampler_views, 0);
- r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, r600_emit_gs_sampler_views, 0);
- r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, r600_emit_ps_sampler_views, 0);
- r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, r600_emit_vertex_buffers, 0);
+ v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
+ S_028A40_CUT_MODE(cut_val);
- r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 6);
- r600_init_atom(rctx, &rctx->vgt2_state.atom, id++, r600_emit_vgt2_state, 3);
+ if (rctx->gs_shader->current->shader.gs_prim_id_input)
+ primid = 1;
+ }
- r600_init_atom(rctx, &rctx->seamless_cube_map.atom, id++, r600_emit_seamless_cube_map, 3);
- r600_init_atom(rctx, &rctx->sample_mask.atom, id++, r600_emit_sample_mask, 3);
- rctx->sample_mask.sample_mask = ~0;
+ r600_write_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
+ r600_write_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
+}
- r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6);
- r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6);
- r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0);
- r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, r600_emit_cb_misc_state, 7);
- r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 6);
- r600_init_atom(rctx, &rctx->clip_state.atom, id++, r600_emit_clip_state, 26);
- r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, r600_emit_db_misc_state, 7);
- r600_init_atom(rctx, &rctx->db_state.atom, id++, r600_emit_db_state, 11);
- r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0);
- r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, r600_emit_polygon_offset, 6);
- r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0);
- r600_init_atom(rctx, &rctx->scissor.atom, id++, r600_emit_scissor_state, 4);
- r600_init_atom(rctx, &rctx->config_state.atom, id++, r600_emit_config_state, 3);
- r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4);
- r600_init_atom(rctx, &rctx->viewport.atom, id++, r600_emit_viewport_state, 8);
- r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5);
+static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
+{
+ struct pipe_screen *screen = rctx->b.b.screen;
+ struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
+ struct r600_resource *rbuffer;
- rctx->context.create_blend_state = r600_create_blend_state;
- rctx->context.create_depth_stencil_alpha_state = r600_create_dsa_state;
- rctx->context.create_rasterizer_state = r600_create_rs_state;
- rctx->context.create_sampler_state = r600_create_sampler_state;
- rctx->context.create_sampler_view = r600_create_sampler_view;
- rctx->context.set_framebuffer_state = r600_set_framebuffer_state;
- rctx->context.set_polygon_stipple = r600_set_polygon_stipple;
- rctx->context.set_scissor_state = r600_set_scissor_state;
+ r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
+
+ if (state->enable) {
+ rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
+ r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
+ (r600_resource_va(screen, &rbuffer->b.b)) >> 8);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READWRITE));
+ r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
+ state->esgs_ring.buffer_size >> 8);
+
+ rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
+ r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
+ (r600_resource_va(screen, &rbuffer->b.b)) >> 8);
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READWRITE));
+ r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
+ state->gsvs_ring.buffer_size >> 8);
+ } else {
+ r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
+ r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
+ }
+
+ r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
}
/* Adjust GPR allocation on R6xx/R7xx */
bool r600_adjust_gprs(struct r600_context *rctx)
{
unsigned num_ps_gprs = rctx->ps_shader->current->shader.bc.ngpr;
- unsigned num_vs_gprs = rctx->vs_shader->current->shader.bc.ngpr;
+ unsigned num_vs_gprs, num_es_gprs, num_gs_gprs;
unsigned new_num_ps_gprs = num_ps_gprs;
- unsigned new_num_vs_gprs = num_vs_gprs;
+ unsigned new_num_vs_gprs, new_num_es_gprs, new_num_gs_gprs;
unsigned cur_num_ps_gprs = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
unsigned cur_num_vs_gprs = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
+ unsigned cur_num_gs_gprs = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
+ unsigned cur_num_es_gprs = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
unsigned def_num_ps_gprs = rctx->default_ps_gprs;
unsigned def_num_vs_gprs = rctx->default_vs_gprs;
+ unsigned def_num_gs_gprs = 0;
+ unsigned def_num_es_gprs = 0;
unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs;
/* hardware will reserve twice num_clause_temp_gprs */
- unsigned max_gprs = def_num_ps_gprs + def_num_vs_gprs + def_num_clause_temp_gprs * 2;
- unsigned tmp;
+ unsigned max_gprs = def_num_gs_gprs + def_num_es_gprs + def_num_ps_gprs + def_num_vs_gprs + def_num_clause_temp_gprs * 2;
+ unsigned tmp, tmp2;
+
+ if (rctx->gs_shader) {
+ num_es_gprs = rctx->vs_shader->current->shader.bc.ngpr;
+ num_gs_gprs = rctx->gs_shader->current->shader.bc.ngpr;
+ num_vs_gprs = rctx->gs_shader->current->gs_copy_shader->shader.bc.ngpr;
+ } else {
+ num_es_gprs = 0;
+ num_gs_gprs = 0;
+ num_vs_gprs = rctx->vs_shader->current->shader.bc.ngpr;
+ }
+ new_num_vs_gprs = num_vs_gprs;
+ new_num_es_gprs = num_es_gprs;
+ new_num_gs_gprs = num_gs_gprs;
/* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */
- if (new_num_ps_gprs > cur_num_ps_gprs || new_num_vs_gprs > cur_num_vs_gprs) {
+ if (new_num_ps_gprs > cur_num_ps_gprs || new_num_vs_gprs > cur_num_vs_gprs ||
+ new_num_es_gprs > cur_num_es_gprs || new_num_gs_gprs > cur_num_gs_gprs) {
/* try to use switch back to default */
- if (new_num_ps_gprs > def_num_ps_gprs || new_num_vs_gprs > def_num_vs_gprs) {
+ if (new_num_ps_gprs > def_num_ps_gprs || new_num_vs_gprs > def_num_vs_gprs ||
+ new_num_gs_gprs > def_num_gs_gprs || new_num_es_gprs > def_num_es_gprs) {
/* always privilege vs stage so that at worst we have the
* pixel stage producing wrong output (not the vertex
* stage) */
- new_num_ps_gprs = max_gprs - (new_num_vs_gprs + def_num_clause_temp_gprs * 2);
+ new_num_ps_gprs = max_gprs - ((new_num_vs_gprs - new_num_es_gprs - new_num_gs_gprs) + def_num_clause_temp_gprs * 2);
new_num_vs_gprs = num_vs_gprs;
+ new_num_gs_gprs = num_gs_gprs;
+ new_num_es_gprs = num_es_gprs;
} else {
new_num_ps_gprs = def_num_ps_gprs;
new_num_vs_gprs = def_num_vs_gprs;
+ new_num_es_gprs = def_num_es_gprs;
+ new_num_gs_gprs = def_num_gs_gprs;
}
} else {
return true;
* it will lockup. So in this case just discard the draw command
* and don't change the current gprs repartitions.
*/
- if (num_ps_gprs > new_num_ps_gprs || num_vs_gprs > new_num_vs_gprs) {
- R600_ERR("ps & vs shader require too many register (%d + %d) "
+ if (num_ps_gprs > new_num_ps_gprs || num_vs_gprs > new_num_vs_gprs ||
+ num_gs_gprs > new_num_gs_gprs || num_es_gprs > new_num_es_gprs) {
+ R600_ERR("shaders require too many register (%d + %d + %d + %d) "
"for a combined maximum of %d\n",
- num_ps_gprs, num_vs_gprs, max_gprs);
+ num_ps_gprs, num_vs_gprs, num_es_gprs, num_gs_gprs, max_gprs);
return false;
}
tmp = S_008C04_NUM_PS_GPRS(new_num_ps_gprs) |
S_008C04_NUM_VS_GPRS(new_num_vs_gprs) |
S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs);
- if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp) {
+
+ tmp2 = S_008C08_NUM_ES_GPRS(new_num_es_gprs) |
+ S_008C08_NUM_GS_GPRS(new_num_gs_gprs);
+ if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp || rctx->config_state.sq_gpr_resource_mgmt_2 != tmp2) {
rctx->config_state.sq_gpr_resource_mgmt_1 = tmp;
+ rctx->config_state.sq_gpr_resource_mgmt_2 = tmp2;
rctx->config_state.atom.dirty = true;
- rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE;
+ rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
}
return true;
}
r600_init_command_buffer(cb, 256);
/* R6xx requires this packet at the start of each command buffer */
- if (rctx->chip_class == R600) {
+ if (rctx->b.chip_class == R600) {
r600_store_value(cb, PKT3(PKT3_START_3D_CMDBUF, 0, 0));
r600_store_value(cb, 0);
}
r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- family = rctx->family;
+ family = rctx->b.family;
ps_prio = 0;
vs_prio = 1;
gs_prio = 2;
num_es_stack_entries = 16;
break;
case CHIP_RV770:
- num_ps_gprs = 192;
+ num_ps_gprs = 130;
num_vs_gprs = 56;
num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 188;
+ num_gs_gprs = 31;
+ num_es_gprs = 31;
+ num_ps_threads = 180;
num_vs_threads = 60;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 256;
- num_vs_stack_entries = 256;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
+ num_gs_threads = 4;
+ num_es_threads = 4;
+ num_ps_stack_entries = 128;
+ num_vs_stack_entries = 128;
+ num_gs_stack_entries = 128;
+ num_es_stack_entries = 128;
break;
case CHIP_RV730:
case CHIP_RV740:
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
- num_ps_threads = 188;
+ num_ps_threads = 180;
num_vs_threads = 60;
- num_gs_threads = 0;
- num_es_threads = 0;
+ num_gs_threads = 4;
+ num_es_threads = 4;
num_ps_stack_entries = 128;
num_vs_stack_entries = 128;
num_gs_stack_entries = 0;
num_temp_gprs = 4;
num_gs_gprs = 0;
num_es_gprs = 0;
- num_ps_threads = 144;
+ num_ps_threads = 136;
num_vs_threads = 48;
- num_gs_threads = 0;
- num_es_threads = 0;
+ num_gs_threads = 4;
+ num_es_threads = 4;
num_ps_stack_entries = 128;
num_vs_stack_entries = 128;
num_gs_stack_entries = 0;
r600_store_config_reg(cb, R_009714_VC_ENHANCE, 0);
- if (rctx->chip_class >= R700) {
+ if (rctx->b.chip_class >= R700) {
r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0x00004000);
r600_store_config_reg(cb, R_009830_DB_DEBUG, 0);
r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x00420204);
r600_store_value(cb, 0); /* R_0286E0_SPI_FOG_FUNC_SCALE */
r600_store_value(cb, 0); /* R_0286E4_SPI_FOG_FUNC_BIAS */
- r600_store_context_reg_seq(cb, R_028D2C_DB_SRESULTS_COMPARE_STATE1, 2);
+ r600_store_context_reg_seq(cb, R_028D28_DB_SRESULTS_COMPARE_STATE0, 3);
+ r600_store_value(cb, 0); /* R_028D28_DB_SRESULTS_COMPARE_STATE0 */
r600_store_value(cb, 0); /* R_028D2C_DB_SRESULTS_COMPARE_STATE1 */
r600_store_value(cb, 0); /* R_028D30_DB_PRELOAD_CONTROL */
r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0);
r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF);
- if (rctx->chip_class >= R700) {
+ if (rctx->b.chip_class >= R700) {
r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA);
}
r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
r600_store_value(cb, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
- r600_store_context_reg_seq(cb, R_0288CC_SQ_PGM_CF_OFFSET_PS, 2);
+ r600_store_context_reg_seq(cb, R_0288CC_SQ_PGM_CF_OFFSET_PS, 5);
r600_store_value(cb, 0); /* R_0288CC_SQ_PGM_CF_OFFSET_PS */
r600_store_value(cb, 0); /* R_0288D0_SQ_PGM_CF_OFFSET_VS */
+ r600_store_value(cb, 0); /* R_0288D4_SQ_PGM_CF_OFFSET_GS */
+ r600_store_value(cb, 0); /* R_0288D8_SQ_PGM_CF_OFFSET_ES */
+ r600_store_value(cb, 0); /* R_0288DC_SQ_PGM_CF_OFFSET_FS */
r600_store_context_reg(cb, R_0288E0_SQ_VTX_SEMANTIC_CLEAR, ~0);
r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */
r600_store_context_reg(cb, R_0288A4_SQ_PGM_RESOURCES_FS, 0);
- r600_store_context_reg(cb, R_0288DC_SQ_PGM_CF_OFFSET_FS, 0);
- if (rctx->chip_class == R700 && rctx->screen->has_streamout)
+ if (rctx->b.chip_class == R700 && rctx->screen->b.has_streamout)
r600_store_context_reg(cb, R_028354_SX_SURFACE_SYNC, S_028354_SURFACE_SYNC_MASK(0xf));
r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0);
- if (rctx->screen->has_streamout) {
+ if (rctx->screen->b.has_streamout) {
r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
}
r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0, 0x1000FFF);
r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (32 * 4), 0x1000FFF);
+ r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (64 * 4), 0x1000FFF);
}
-void r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct r600_pipe_state *rstate = &shader->rstate;
+ struct r600_command_buffer *cb = &shader->command_buffer;
struct r600_shader *rshader = &shader->shader;
unsigned i, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z, spi_ps_in_control_1, db_shader_control;
int pos_index = -1, face_index = -1;
unsigned z_export = 0, stencil_export = 0;
unsigned sprite_coord_enable = rctx->rasterizer ? rctx->rasterizer->sprite_coord_enable : 0;
- rstate->nregs = 0;
+ if (!cb->buf) {
+ r600_init_command_buffer(cb, 64);
+ } else {
+ cb->num_dw = 0;
+ }
+ r600_store_context_reg_seq(cb, R_028644_SPI_PS_INPUT_CNTL_0, rshader->ninput);
for (i = 0; i < rshader->ninput; i++) {
if (rshader->input[i].name == TGSI_SEMANTIC_POSITION)
pos_index = i;
tmp |= S_028644_SEL_LINEAR(1);
}
- r600_pipe_state_add_reg(rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4,
- tmp);
+ r600_store_value(cb, tmp);
}
db_shader_control = 0;
S_0286CC_POSITION_CENTROID(rshader->input[pos_index].centroid) |
S_0286CC_POSITION_ADDR(rshader->input[pos_index].gpr) |
S_0286CC_BARYC_SAMPLE_CNTL(1));
- spi_input_z |= 1;
+ spi_input_z |= S_0286D8_PROVIDE_Z_TO_SPI(1);
}
spi_ps_in_control_1 = 0;
}
/* HW bug in original R600 */
- if (rctx->family == CHIP_R600)
+ if (rctx->b.family == CHIP_R600)
ufi = 1;
- r600_pipe_state_add_reg(rstate, R_0286CC_SPI_PS_IN_CONTROL_0, spi_ps_in_control_0);
- r600_pipe_state_add_reg(rstate, R_0286D0_SPI_PS_IN_CONTROL_1, spi_ps_in_control_1);
- r600_pipe_state_add_reg(rstate, R_0286D8_SPI_INPUT_Z, spi_input_z);
- r600_pipe_state_add_reg_bo(rstate,
- R_028840_SQ_PGM_START_PS,
- 0, shader->bo, RADEON_USAGE_READ);
- r600_pipe_state_add_reg(rstate,
- R_028850_SQ_PGM_RESOURCES_PS,
- S_028850_NUM_GPRS(rshader->bc.ngpr) |
- S_028850_STACK_SIZE(rshader->bc.nstack) |
- S_028850_UNCACHED_FIRST_INST(ufi));
- r600_pipe_state_add_reg(rstate,
- R_028854_SQ_PGM_EXPORTS_PS,
- exports_ps);
+ r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2);
+ r600_store_value(cb, spi_ps_in_control_0); /* R_0286CC_SPI_PS_IN_CONTROL_0 */
+ r600_store_value(cb, spi_ps_in_control_1); /* R_0286D0_SPI_PS_IN_CONTROL_1 */
+
+ r600_store_context_reg(cb, R_0286D8_SPI_INPUT_Z, spi_input_z);
+
+ r600_store_context_reg_seq(cb, R_028850_SQ_PGM_RESOURCES_PS, 2);
+ r600_store_value(cb, /* R_028850_SQ_PGM_RESOURCES_PS*/
+ S_028850_NUM_GPRS(rshader->bc.ngpr) |
+ S_028850_STACK_SIZE(rshader->bc.nstack) |
+ S_028850_UNCACHED_FIRST_INST(ufi));
+ r600_store_value(cb, exports_ps); /* R_028854_SQ_PGM_EXPORTS_PS */
+
+ r600_store_context_reg(cb, R_028840_SQ_PGM_START_PS, 0);
+ /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
+
/* only set some bits here, the other bits are set in the dsa state */
shader->db_shader_control = db_shader_control;
shader->ps_depth_export = z_export | stencil_export;
shader->flatshade = rctx->rasterizer->flatshade;
}
-void r600_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
{
- struct r600_context *rctx = (struct r600_context *)ctx;
- struct r600_pipe_state *rstate = &shader->rstate;
+ struct r600_command_buffer *cb = &shader->command_buffer;
struct r600_shader *rshader = &shader->shader;
unsigned spi_vs_out_id[10] = {};
unsigned i, tmp, nparams = 0;
- /* clear previous register */
- rstate->nregs = 0;
-
for (i = 0; i < rshader->noutput; i++) {
if (rshader->output[i].spi_sid) {
tmp = rshader->output[i].spi_sid << ((nparams & 3) * 8);
}
}
+ r600_init_command_buffer(cb, 32);
+
+ r600_store_context_reg_seq(cb, R_028614_SPI_VS_OUT_ID_0, 10);
for (i = 0; i < 10; i++) {
- r600_pipe_state_add_reg(rstate,
- R_028614_SPI_VS_OUT_ID_0 + i * 4,
- spi_vs_out_id[i]);
+ r600_store_value(cb, spi_vs_out_id[i]);
}
/* Certain attributes (position, psize, etc.) don't count as params.
if (nparams < 1)
nparams = 1;
- r600_pipe_state_add_reg(rstate,
- R_0286C4_SPI_VS_OUT_CONFIG,
- S_0286C4_VS_EXPORT_COUNT(nparams - 1));
- r600_pipe_state_add_reg(rstate,
- R_028868_SQ_PGM_RESOURCES_VS,
- S_028868_NUM_GPRS(rshader->bc.ngpr) |
- S_028868_STACK_SIZE(rshader->bc.nstack));
- r600_pipe_state_add_reg_bo(rstate,
- R_028858_SQ_PGM_START_VS,
- 0, shader->bo, RADEON_USAGE_READ);
+ r600_store_context_reg(cb, R_0286C4_SPI_VS_OUT_CONFIG,
+ S_0286C4_VS_EXPORT_COUNT(nparams - 1));
+ r600_store_context_reg(cb, R_028868_SQ_PGM_RESOURCES_VS,
+ S_028868_NUM_GPRS(rshader->bc.ngpr) |
+ S_028868_STACK_SIZE(rshader->bc.nstack));
+ r600_store_context_reg(cb, R_028858_SQ_PGM_START_VS, 0);
+ /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
shader->pa_cl_vs_out_cntl =
S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader->clip_dist_write & 0x0F) != 0) |
S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size);
}
+static unsigned r600_conv_prim_to_gs_out(unsigned mode)
+{
+ static const int prim_conv[] = {
+ V_028A6C_OUTPRIM_TYPE_POINTLIST,
+ V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+ V_028A6C_OUTPRIM_TYPE_TRISTRIP
+ };
+ assert(mode < Elements(prim_conv));
+
+ return prim_conv[mode];
+}
+
+void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_command_buffer *cb = &shader->command_buffer;
+ struct r600_shader *rshader = &shader->shader;
+ struct r600_shader *cp_shader = &shader->gs_copy_shader->shader;
+ unsigned gsvs_itemsize =
+ (cp_shader->ring_item_size * rshader->gs_max_out_vertices) >> 2;
+
+ r600_init_command_buffer(cb, 64);
+
+ /* VGT_GS_MODE is written by r600_emit_shader_stages */
+ r600_store_context_reg(cb, R_028AB8_VGT_VTX_CNT_EN, 1);
+
+ if (rctx->b.chip_class >= R700) {
+ r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT,
+ S_028B38_MAX_VERT_OUT(rshader->gs_max_out_vertices));
+ }
+ r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE,
+ r600_conv_prim_to_gs_out(rshader->gs_output_prim));
+
+ r600_store_context_reg_seq(cb, R_0288C8_SQ_GS_VERT_ITEMSIZE, 4);
+ r600_store_value(cb, cp_shader->ring_item_size >> 2);
+ r600_store_value(cb, 0);
+ r600_store_value(cb, 0);
+ r600_store_value(cb, 0);
+
+ r600_store_context_reg(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE,
+ (rshader->ring_item_size) >> 2);
+
+ r600_store_context_reg(cb, R_0288AC_SQ_GSVS_RING_ITEMSIZE,
+ gsvs_itemsize);
+
+ /* FIXME calculate these values somehow ??? */
+ r600_store_config_reg_seq(cb, R_0088C8_VGT_GS_PER_ES, 2);
+ r600_store_value(cb, 0x80); /* GS_PER_ES */
+ r600_store_value(cb, 0x100); /* ES_PER_GS */
+ r600_store_config_reg_seq(cb, R_0088E8_VGT_GS_PER_VS, 1);
+ r600_store_value(cb, 0x2); /* GS_PER_VS */
+
+ r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_GS,
+ S_02887C_NUM_GPRS(rshader->bc.ngpr) |
+ S_02887C_STACK_SIZE(rshader->bc.nstack));
+ r600_store_context_reg(cb, R_02886C_SQ_PGM_START_GS,
+ r600_resource_va(ctx->screen, (void *)shader->bo) >> 8);
+ /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
+}
+
+void r600_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+{
+ struct r600_command_buffer *cb = &shader->command_buffer;
+ struct r600_shader *rshader = &shader->shader;
+
+ r600_init_command_buffer(cb, 32);
+
+ r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES,
+ S_028890_NUM_GPRS(rshader->bc.ngpr) |
+ S_028890_STACK_SIZE(rshader->bc.nstack));
+ r600_store_context_reg(cb, R_028880_SQ_PGM_START_ES,
+ r600_resource_va(ctx->screen, (void *)shader->bo) >> 8);
+ /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
+}
+
+
void *r600_create_resolve_blend(struct r600_context *rctx)
{
struct pipe_blend_state blend;
blend.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO;
blend.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
}
- return r600_create_blend_state_mode(&rctx->context, &blend, V_028808_SPECIAL_RESOLVE_BOX);
+ return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX);
}
void *r700_create_resolve_blend(struct r600_context *rctx)
memset(&blend, 0, sizeof(blend));
blend.independent_blend_enable = true;
blend.rt[0].colormask = 0xf;
- return r600_create_blend_state_mode(&rctx->context, &blend, V_028808_SPECIAL_RESOLVE_BOX);
+ return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX);
}
void *r600_create_decompress_blend(struct r600_context *rctx)
memset(&blend, 0, sizeof(blend));
blend.independent_blend_enable = true;
blend.rt[0].colormask = 0xf;
- return r600_create_blend_state_mode(&rctx->context, &blend, V_028808_SPECIAL_EXPAND_SAMPLES);
+ return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_EXPAND_SAMPLES);
}
void *r600_create_db_flush_dsa(struct r600_context *rctx)
struct pipe_depth_stencil_alpha_state dsa;
boolean quirk = false;
- if (rctx->family == CHIP_RV610 || rctx->family == CHIP_RV630 ||
- rctx->family == CHIP_RV620 || rctx->family == CHIP_RV635)
+ if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 ||
+ rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635)
quirk = true;
memset(&dsa, 0, sizeof(dsa));
dsa.stencil[0].writemask = 0xff;
}
- return rctx->context.create_depth_stencil_alpha_state(&rctx->context, &dsa);
+ return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa);
}
void r600_update_db_shader_control(struct r600_context * rctx)
{
- bool dual_export = rctx->framebuffer.export_16bpc &&
- !rctx->ps_shader->current->ps_depth_export;
+ bool dual_export;
+ unsigned db_shader_control;
- unsigned db_shader_control = rctx->ps_shader->current->db_shader_control |
- S_02880C_DUAL_EXPORT_ENABLE(dual_export);
+ if (!rctx->ps_shader) {
+ return;
+ }
+
+ dual_export = rctx->framebuffer.export_16bpc &&
+ !rctx->ps_shader->current->ps_depth_export;
+
+ db_shader_control = rctx->ps_shader->current->db_shader_control |
+ S_02880C_DUAL_EXPORT_ENABLE(dual_export);
/* When alpha test is enabled we can't trust the hw to make the proper
* decision on the order in which ztest should be run related to fragment
unsigned pitch,
unsigned bpp)
{
- struct radeon_winsys_cs *cs = rctx->rings.dma.cs;
+ struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
uint64_t base, addr;
/* make sure that the dma ring is only one active */
- rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
+ rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
dst_mode = rdst->surface.level[dst_level].mode;
src_mode = rsrc->surface.level[src_level].mode;
cheight = cheight > copy_height ? copy_height : cheight;
size = (cheight * pitch) >> 2;
/* emit reloc before writting cs so that cs is always in consistent state */
- r600_context_bo_reloc(rctx, &rctx->rings.dma, &rsrc->resource, RADEON_USAGE_READ);
- r600_context_bo_reloc(rctx, &rctx->rings.dma, &rdst->resource, RADEON_USAGE_WRITE);
+ r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rsrc->resource, RADEON_USAGE_READ);
+ r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rdst->resource, RADEON_USAGE_WRITE);
cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, size);
cs->buf[cs->cdw++] = base >> 8;
cs->buf[cs->cdw++] = (detile << 31) | (array_mode << 27) |
return TRUE;
}
-boolean r600_dma_blit(struct pipe_context *ctx,
- struct pipe_resource *dst,
- unsigned dst_level,
- unsigned dst_x, unsigned dst_y, unsigned dst_z,
- struct pipe_resource *src,
- unsigned src_level,
- const struct pipe_box *src_box)
+static boolean r600_dma_blit(struct pipe_context *ctx,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dst_x, unsigned dst_y, unsigned dst_z,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_texture *rsrc = (struct r600_texture*)src;
struct r600_texture *rdst = (struct r600_texture*)dst;
unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode, copy_height;
unsigned src_w, dst_w;
+ unsigned src_x, src_y;
- if (rctx->rings.dma.cs == NULL) {
+ if (rctx->b.rings.dma.cs == NULL) {
return FALSE;
}
+
+ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
+ r600_dma_copy(rctx, dst, src, dst_x, src_box->x, src_box->width);
+ return TRUE;
+ }
+
if (src->format != dst->format) {
return FALSE;
}
+ src_x = util_format_get_nblocksx(src->format, src_box->x);
+ dst_x = util_format_get_nblocksx(src->format, dst_x);
+ src_y = util_format_get_nblocksy(src->format, src_box->y);
+ dst_y = util_format_get_nblocksy(src->format, dst_y);
+
bpp = rdst->surface.bpe;
dst_pitch = rdst->surface.level[dst_level].pitch_bytes;
src_pitch = rsrc->surface.level[src_level].pitch_bytes;
*/
src_offset= rsrc->surface.level[src_level].offset;
src_offset += rsrc->surface.level[src_level].slice_size * src_box->z;
- src_offset += src_box->y * src_pitch + src_box->x * bpp;
+ src_offset += src_y * src_pitch + src_x * bpp;
dst_offset = rdst->surface.level[dst_level].offset;
dst_offset += rdst->surface.level[dst_level].slice_size * dst_z;
dst_offset += dst_y * dst_pitch + dst_x * bpp;
r600_dma_copy(rctx, dst, src, dst_offset, src_offset, size);
} else {
return r600_dma_copy_tile(rctx, dst, dst_level, dst_x, dst_y, dst_z,
- src, src_level, src_box->x, src_box->y, src_box->z,
+ src, src_level, src_x, src_y, src_box->z,
copy_height, dst_pitch, bpp);
}
return TRUE;
}
+
+void r600_init_state_functions(struct r600_context *rctx)
+{
+ unsigned id = 4;
+
+ /* !!!
+ * To avoid GPU lockup registers must be emited in a specific order
+ * (no kidding ...). The order below is important and have been
+ * partialy infered from analyzing fglrx command stream.
+ *
+ * Don't reorder atom without carefully checking the effect (GPU lockup
+ * or piglit regression).
+ * !!!
+ */
+
+ r600_init_atom(rctx, &rctx->framebuffer.atom, id++, r600_emit_framebuffer_state, 0);
+
+ /* shader const */
+ r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, r600_emit_vs_constant_buffers, 0);
+ r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, r600_emit_gs_constant_buffers, 0);
+ r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, r600_emit_ps_constant_buffers, 0);
+
+ /* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change
+ * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map)
+ */
+ r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, r600_emit_vs_sampler_states, 0);
+ r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, r600_emit_gs_sampler_states, 0);
+ r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, r600_emit_ps_sampler_states, 0);
+ /* resource */
+ r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, r600_emit_vs_sampler_views, 0);
+ r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, r600_emit_gs_sampler_views, 0);
+ r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, r600_emit_ps_sampler_views, 0);
+ r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, r600_emit_vertex_buffers, 0);
+
+ r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 7);
+
+ r600_init_atom(rctx, &rctx->seamless_cube_map.atom, id++, r600_emit_seamless_cube_map, 3);
+ r600_init_atom(rctx, &rctx->sample_mask.atom, id++, r600_emit_sample_mask, 3);
+ rctx->sample_mask.sample_mask = ~0;
+
+ r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6);
+ r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6);
+ r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0);
+ r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, r600_emit_cb_misc_state, 7);
+ r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 6);
+ r600_init_atom(rctx, &rctx->clip_state.atom, id++, r600_emit_clip_state, 26);
+ r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, r600_emit_db_misc_state, 7);
+ r600_init_atom(rctx, &rctx->db_state.atom, id++, r600_emit_db_state, 11);
+ r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0);
+ r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, r600_emit_polygon_offset, 6);
+ r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0);
+ r600_init_atom(rctx, &rctx->scissor.atom, id++, r600_emit_scissor_state, 4);
+ r600_init_atom(rctx, &rctx->config_state.atom, id++, r600_emit_config_state, 3);
+ r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4);
+ r600_init_atom(rctx, &rctx->viewport.atom, id++, r600_emit_viewport_state, 8);
+ r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5);
+ rctx->atoms[id++] = &rctx->b.streamout.begin_atom;
+ r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23);
+ r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0);
+ r600_init_atom(rctx, &rctx->geometry_shader.atom, id++, r600_emit_shader, 0);
+ r600_init_atom(rctx, &rctx->export_shader.atom, id++, r600_emit_shader, 0);
+ r600_init_atom(rctx, &rctx->shader_stages.atom, id++, r600_emit_shader_stages, 0);
+ r600_init_atom(rctx, &rctx->gs_rings.atom, id++, r600_emit_gs_rings, 0);
+
+ rctx->b.b.create_blend_state = r600_create_blend_state;
+ rctx->b.b.create_depth_stencil_alpha_state = r600_create_dsa_state;
+ rctx->b.b.create_rasterizer_state = r600_create_rs_state;
+ rctx->b.b.create_sampler_state = r600_create_sampler_state;
+ rctx->b.b.create_sampler_view = r600_create_sampler_view;
+ rctx->b.b.set_framebuffer_state = r600_set_framebuffer_state;
+ rctx->b.b.set_polygon_stipple = r600_set_polygon_stipple;
+ rctx->b.b.set_scissor_states = r600_set_scissor_states;
+ rctx->b.b.get_sample_position = r600_get_sample_position;
+ rctx->b.dma_copy = r600_dma_blit;
+}
+/* this function must be last */