*/
#include "si_pipe.h"
-#include "si_shader.h"
#include "sid.h"
#include "radeon/r600_cs.h"
+#include "radeon/r600_query.h"
#include "util/u_dual_blend.h"
#include "util/u_format.h"
#include "util/u_format_s3tc.h"
#include "util/u_memory.h"
-#include "util/u_pstipple.h"
#include "util/u_resource.h"
/* Initialize an external atom (owned by ../radeon). */
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_state_blend *blend = sctx->queued.named.blend;
- uint32_t cb_target_mask, i;
+ /* CB_COLORn_INFO.FORMAT=INVALID should disable unbound colorbuffers,
+ * but you never know. */
+ uint32_t cb_target_mask = sctx->framebuffer.colorbuf_enabled_4bit;
+ unsigned i;
- /* CB_COLORn_INFO.FORMAT=INVALID disables empty colorbuffer slots. */
if (blend)
- cb_target_mask = blend->cb_target_mask;
- else
- cb_target_mask = 0xffffffff;
+ cb_target_mask &= blend->cb_target_mask;
/* Avoid a hang that happens when dual source blending is enabled
* but there is not enough color outputs. This is undefined behavior,
if (sctx->b.family == CHIP_STONEY) {
unsigned spi_shader_col_format =
sctx->ps_shader.cso ?
- sctx->ps_shader.current->key.ps.epilog.spi_shader_col_format : 0;
+ sctx->ps_shader.current->key.part.ps.epilog.spi_shader_col_format : 0;
unsigned sx_ps_downconvert = 0;
unsigned sx_blend_opt_epsilon = 0;
unsigned sx_blend_opt_control = 0;
S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_BLEND_DISABLED);
/* Only set dual source blending for MRT0 to avoid a hang. */
- if (i >= 1 && blend->dual_src_blend)
+ if (i >= 1 && blend->dual_src_blend) {
+ /* Vulkan does this for dual source blending. */
+ if (i == 1)
+ blend_cntl |= S_028780_ENABLE(1);
+
+ si_pm4_set_reg(pm4, R_028780_CB_BLEND0_CONTROL + i * 4, blend_cntl);
continue;
+ }
/* Only addition and subtraction equations are supported with
* dual source blending.
(eqRGB == PIPE_BLEND_MIN || eqRGB == PIPE_BLEND_MAX ||
eqA == PIPE_BLEND_MIN || eqA == PIPE_BLEND_MAX)) {
assert(!"Unsupported equation for dual source blending");
+ si_pm4_set_reg(pm4, R_028780_CB_BLEND0_CONTROL + i * 4, blend_cntl);
continue;
}
- if (!state->rt[j].colormask)
- continue;
-
/* cb_render_state will disable unused ones */
blend->cb_target_mask |= (unsigned)state->rt[j].colormask << (4 * i);
- if (!state->rt[j].blend_enable) {
+ if (!state->rt[j].colormask || !state->rt[j].blend_enable) {
si_pm4_set_reg(pm4, R_028780_CB_BLEND0_CONTROL + i * 4, blend_cntl);
continue;
}
}
if (sctx->b.family == CHIP_STONEY) {
+ /* Disable RB+ blend optimizations for dual source blending.
+ * Vulkan does this.
+ */
+ if (blend->dual_src_blend) {
+ for (int i = 0; i < 8; i++) {
+ sx_mrt_blend_opt[i] =
+ S_028760_COLOR_COMB_FCN(V_028760_OPT_COMB_NONE) |
+ S_028760_ALPHA_COMB_FCN(V_028760_OPT_COMB_NONE);
+ }
+ }
+
for (int i = 0; i < 8; i++)
si_pm4_set_reg(pm4, R_028760_SX_MRT0_BLEND_OPT + i * 4,
sx_mrt_blend_opt[i]);
static void si_emit_clip_regs(struct si_context *sctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct si_shader *vs = si_get_vs_state(sctx);
struct tgsi_shader_info *info = si_get_vs_info(sctx);
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
unsigned window_space =
info->properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
unsigned clipdist_mask =
info->writes_clipvertex ? SIX_BITS : info->clipdist_writemask;
- unsigned total_mask = clipdist_mask | (info->culldist_writemask << info->num_written_clipdistance);
+ unsigned ucp_mask = clipdist_mask ? 0 : rs->clip_plane_enable & SIX_BITS;
+ unsigned culldist_mask = info->culldist_writemask << info->num_written_clipdistance;
+ unsigned total_mask;
+ bool misc_vec_ena;
+
+ if (vs->key.opt.hw_vs.clip_disable) {
+ assert(!info->culldist_writemask);
+ clipdist_mask = 0;
+ culldist_mask = 0;
+ }
+ total_mask = clipdist_mask | culldist_mask;
+
+ /* Clip distances on points have no effect, so need to be implemented
+ * as cull distances. This applies for the clipvertex case as well.
+ *
+ * Setting this for primitives other than points should have no adverse
+ * effects.
+ */
+ clipdist_mask &= rs->clip_plane_enable;
+ culldist_mask |= clipdist_mask;
+
+ misc_vec_ena = info->writes_psize || info->writes_edgeflag ||
+ info->writes_layer || info->writes_viewport_index;
radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
S_02881C_USE_VTX_POINT_SIZE(info->writes_psize) |
S_02881C_USE_VTX_VIEWPORT_INDX(info->writes_viewport_index) |
S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0F) != 0) |
S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xF0) != 0) |
- S_02881C_VS_OUT_MISC_VEC_ENA(info->writes_psize ||
- info->writes_edgeflag ||
- info->writes_layer ||
- info->writes_viewport_index) |
- S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(1) |
- (sctx->queued.named.rasterizer->clip_plane_enable &
- clipdist_mask) | (info->culldist_writemask << 8));
+ S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
+ S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena) |
+ clipdist_mask | (culldist_mask << 8));
radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
- sctx->queued.named.rasterizer->pa_cl_clip_cntl |
- (clipdist_mask ? 0 :
- sctx->queued.named.rasterizer->clip_plane_enable & SIX_BITS) |
+ rs->pa_cl_clip_cntl |
+ ucp_mask |
S_028810_CLIP_DISABLE(window_space));
/* reuse needs to be set off if we write oViewport */
{
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
- if (!rs || !rs->uses_poly_offset || !sctx->framebuffer.state.zsbuf)
+ if (!rs || !rs->uses_poly_offset || !sctx->framebuffer.state.zsbuf) {
+ si_pm4_bind_state(sctx, poly_offset, NULL);
return;
+ }
+ /* Use the user format, not db_render_format, so that the polygon
+ * offset behaves as expected by applications.
+ */
switch (sctx->framebuffer.state.zsbuf->texture->format) {
case PIPE_FORMAT_Z16_UNORM:
si_pm4_bind_state(sctx, poly_offset, &rs->pm4_poly_offset[0]);
if (!state)
return;
- if (sctx->framebuffer.nr_samples > 1 &&
- (!old_rs || old_rs->multisample_enable != rs->multisample_enable)) {
+ if (!old_rs || old_rs->multisample_enable != rs->multisample_enable) {
si_mark_atom_dirty(sctx, &sctx->db_render_state);
- if (sctx->b.family >= CHIP_POLARIS10)
+ /* Update the small primitive filter workaround if necessary. */
+ if (sctx->b.family >= CHIP_POLARIS10 &&
+ sctx->framebuffer.nr_samples > 1)
si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom);
}
si_mark_atom_dirty(sctx, &sctx->db_render_state);
}
+static void si_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st)
+{
+ struct si_context *sctx = (struct si_context*)ctx;
+
+ st->saved_compute = sctx->cs_shader_state.program;
+
+ si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
+ si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
+}
+
static void si_emit_db_render_state(struct si_context *sctx, struct r600_atom *state)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(sctx->db_stencil_disable_expclear) |
S_028010_DECOMPRESS_Z_ON_FLUSH(sctx->framebuffer.nr_samples >= 4));
- db_shader_control = S_02880C_ALPHA_TO_MASK_DISABLE(sctx->framebuffer.cb0_is_integer) |
- sctx->ps_db_shader_control;
+ db_shader_control = sctx->ps_db_shader_control;
/* Bug workaround for smoothing (overrasterization) on SI. */
if (sctx->b.chip_class == SI && sctx->smoothing_enabled) {
}
/* Disable the gl_SampleMask fragment shader output if MSAA is disabled. */
- if (sctx->framebuffer.nr_samples <= 1 || (rs && !rs->multisample_enable))
+ if (!rs || !rs->multisample_enable)
db_shader_control &= C_02880C_MASK_EXPORT_ENABLE;
if (sctx->b.family == CHIP_STONEY &&
case PIPE_FORMAT_Z16_UNORM:
return V_008F14_IMG_DATA_FORMAT_16;
case PIPE_FORMAT_X24S8_UINT:
+ case PIPE_FORMAT_S8X24_UINT:
+ /*
+ * Implemented as an 8_8_8_8 data format to fix texture
+ * gathers in stencil sampling. This affects at least
+ * GL45-CTS.texture_cube_map_array.sampling on VI.
+ */
+ return V_008F14_IMG_DATA_FORMAT_8_8_8_8;
case PIPE_FORMAT_Z24X8_UNORM:
case PIPE_FORMAT_Z24_UNORM_S8_UINT:
return V_008F14_IMG_DATA_FORMAT_8_24;
case PIPE_FORMAT_X8Z24_UNORM:
- case PIPE_FORMAT_S8X24_UINT:
case PIPE_FORMAT_S8_UINT_Z24_UNORM:
return V_008F14_IMG_DATA_FORMAT_24_8;
case PIPE_FORMAT_S8_UINT:
const struct util_format_description *desc,
int first_non_void)
{
- unsigned type;
int i;
if (desc->format == PIPE_FORMAT_R11G11B10_FLOAT)
return V_008F0C_BUF_DATA_FORMAT_10_11_11;
assert(first_non_void >= 0);
- type = desc->channel[first_non_void].type;
-
- if (type == UTIL_FORMAT_TYPE_FIXED)
- return V_008F0C_BUF_DATA_FORMAT_INVALID;
if (desc->nr_channels == 4 &&
desc->channel[0].size == 10 &&
}
break;
case 32:
- /* From the Southern Islands ISA documentation about MTBUF:
- * 'Memory reads of data in memory that is 32 or 64 bits do not
- * undergo any format conversion.'
- */
- if (type != UTIL_FORMAT_TYPE_FLOAT &&
- !desc->channel[first_non_void].pure_integer)
- return V_008F0C_BUF_DATA_FORMAT_INVALID;
-
switch (desc->nr_channels) {
case 1:
return V_008F0C_BUF_DATA_FORMAT_32;
switch (desc->channel[first_non_void].type) {
case UTIL_FORMAT_TYPE_SIGNED:
- if (desc->channel[first_non_void].normalized)
- return V_008F0C_BUF_NUM_FORMAT_SNORM;
- else if (desc->channel[first_non_void].pure_integer)
+ case UTIL_FORMAT_TYPE_FIXED:
+ if (desc->channel[first_non_void].size >= 32 ||
+ desc->channel[first_non_void].pure_integer)
return V_008F0C_BUF_NUM_FORMAT_SINT;
+ else if (desc->channel[first_non_void].normalized)
+ return V_008F0C_BUF_NUM_FORMAT_SNORM;
else
return V_008F0C_BUF_NUM_FORMAT_SSCALED;
break;
case UTIL_FORMAT_TYPE_UNSIGNED:
- if (desc->channel[first_non_void].normalized)
- return V_008F0C_BUF_NUM_FORMAT_UNORM;
- else if (desc->channel[first_non_void].pure_integer)
+ if (desc->channel[first_non_void].size >= 32 ||
+ desc->channel[first_non_void].pure_integer)
return V_008F0C_BUF_NUM_FORMAT_UINT;
+ else if (desc->channel[first_non_void].normalized)
+ return V_008F0C_BUF_NUM_FORMAT_UNORM;
else
return V_008F0C_BUF_NUM_FORMAT_USCALED;
break;
}
}
-static bool si_is_vertex_format_supported(struct pipe_screen *screen, enum pipe_format format)
+static unsigned si_is_vertex_format_supported(struct pipe_screen *screen,
+ enum pipe_format format,
+ unsigned usage)
{
const struct util_format_description *desc;
int first_non_void;
unsigned data_format;
+ assert((usage & ~(PIPE_BIND_SHADER_IMAGE |
+ PIPE_BIND_SAMPLER_VIEW |
+ PIPE_BIND_VERTEX_BUFFER)) == 0);
+
desc = util_format_description(format);
+
+ /* There are no native 8_8_8 or 16_16_16 data formats, and we currently
+ * select 8_8_8_8 and 16_16_16_16 instead. This works reasonably well
+ * for read-only access (with caveats surrounding bounds checks), but
+ * obviously fails for write access which we have to implement for
+ * shader images. Luckily, OpenGL doesn't expect this to be supported
+ * anyway, and so the only impact is on PBO uploads / downloads, which
+ * shouldn't be expected to be fast for GL_RGB anyway.
+ */
+ if (desc->block.bits == 3 * 8 ||
+ desc->block.bits == 3 * 16) {
+ if (usage & (PIPE_BIND_SHADER_IMAGE | PIPE_BIND_SAMPLER_VIEW)) {
+ usage &= ~(PIPE_BIND_SHADER_IMAGE | PIPE_BIND_SAMPLER_VIEW);
+ if (!usage)
+ return 0;
+ }
+ }
+
first_non_void = util_format_get_first_non_void_channel(format);
data_format = si_translate_buffer_dataformat(screen, desc, first_non_void);
- return data_format != V_008F0C_BUF_DATA_FORMAT_INVALID;
+ if (data_format == V_008F0C_BUF_DATA_FORMAT_INVALID)
+ return 0;
+
+ return usage;
}
static bool si_is_colorbuffer_format_supported(enum pipe_format format)
if (usage & (PIPE_BIND_SAMPLER_VIEW |
PIPE_BIND_SHADER_IMAGE)) {
if (target == PIPE_BUFFER) {
- if (si_is_vertex_format_supported(screen, format))
- retval |= usage & (PIPE_BIND_SAMPLER_VIEW |
- PIPE_BIND_SHADER_IMAGE);
+ retval |= si_is_vertex_format_supported(
+ screen, format, usage & (PIPE_BIND_SAMPLER_VIEW |
+ PIPE_BIND_SHADER_IMAGE));
} else {
if (si_is_sampler_format_supported(screen, format))
retval |= usage & (PIPE_BIND_SAMPLER_VIEW |
retval |= PIPE_BIND_DEPTH_STENCIL;
}
- if ((usage & PIPE_BIND_VERTEX_BUFFER) &&
- si_is_vertex_format_supported(screen, format)) {
- retval |= PIPE_BIND_VERTEX_BUFFER;
+ if (usage & PIPE_BIND_VERTEX_BUFFER) {
+ retval |= si_is_vertex_format_supported(screen, format,
+ PIPE_BIND_VERTEX_BUFFER);
}
if ((usage & PIPE_BIND_LINEAR) &&
if (sctx->b.chip_class >= VI) {
unsigned max_uncompressed_block_size = 2;
- if (rtex->surface.nsamples > 1) {
+ if (rtex->resource.b.b.nr_samples > 1) {
if (rtex->surface.bpe == 1)
max_uncompressed_block_size = 0;
else if (rtex->surface.bpe == 2)
uint64_t z_offs, s_offs;
uint32_t db_htile_data_base, db_htile_surface;
- format = si_translate_dbformat(rtex->resource.b.b.format);
+ format = si_translate_dbformat(rtex->db_render_format);
if (format == V_028040_Z_INVALID) {
R600_ERR("Invalid DB format: %d, disabling DB.\n", rtex->resource.b.b.format);
z_offs += rtex->surface.level[level].offset;
s_offs += rtex->surface.stencil_level[level].offset;
- db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(1);
+ db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!rtex->tc_compatible_htile);
z_info = S_028040_FORMAT(format);
if (rtex->resource.b.b.nr_samples > 1) {
*/
if (rtex->resource.b.b.nr_samples <= 1)
s_info |= S_028044_ALLOW_EXPCLEAR(1);
- } else
- /* Use all of the htile_buffer for depth if there's no stencil. */
+ } else if (!rtex->tc_compatible_htile) {
+ /* Use all of the htile_buffer for depth if there's no stencil.
+ * This must not be set when TC-compatible HTILE is enabled
+ * due to a hw bug.
+ */
s_info |= S_028044_TILE_STENCIL_DISABLE(1);
+ }
uint64_t va = rtex->htile_buffer->gpu_address;
db_htile_data_base = va >> 8;
db_htile_surface = S_028ABC_FULL_CACHE(1);
+
+ if (rtex->tc_compatible_htile) {
+ db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
+
+ switch (rtex->resource.b.b.nr_samples) {
+ case 0:
+ case 1:
+ z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(5);
+ break;
+ case 2:
+ case 4:
+ z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(3);
+ break;
+ case 8:
+ z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(2);
+ break;
+ default:
+ assert(0);
+ }
+ }
} else {
db_htile_data_base = 0;
db_htile_surface = 0;
struct pipe_constant_buffer constbuf = {0};
struct r600_surface *surf = NULL;
struct r600_texture *rtex;
- bool old_cb0_is_integer = sctx->framebuffer.cb0_is_integer;
bool old_any_dst_linear = sctx->framebuffer.any_dst_linear;
unsigned old_nr_samples = sctx->framebuffer.nr_samples;
int i;
si_dec_framebuffer_counters(&sctx->framebuffer.state);
util_copy_framebuffer_state(&sctx->framebuffer.state, state);
+ sctx->framebuffer.colorbuf_enabled_4bit = 0;
sctx->framebuffer.spi_shader_col_format = 0;
sctx->framebuffer.spi_shader_col_format_alpha = 0;
sctx->framebuffer.spi_shader_col_format_blend = 0;
sctx->framebuffer.compressed_cb_mask = 0;
sctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state);
sctx->framebuffer.log_samples = util_logbase2(sctx->framebuffer.nr_samples);
- sctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] &&
- util_format_is_pure_integer(state->cbufs[0]->format);
sctx->framebuffer.any_dst_linear = false;
- if (sctx->framebuffer.cb0_is_integer != old_cb0_is_integer)
- si_mark_atom_dirty(sctx, &sctx->db_render_state);
-
for (i = 0; i < state->nr_cbufs; i++) {
if (!state->cbufs[i])
continue;
si_initialize_color_surface(sctx, surf);
}
+ sctx->framebuffer.colorbuf_enabled_4bit |= 0xf << (i * 4);
sctx->framebuffer.spi_shader_col_format |=
surf->spi_shader_col_format << (i * 4);
sctx->framebuffer.spi_shader_col_format_alpha |=
if (surf->color_is_int8)
sctx->framebuffer.color_is_int8 |= 1 << i;
- if (rtex->fmask.size && rtex->cmask.size) {
+ if (rtex->fmask.size) {
sctx->framebuffer.compressed_cb_mask |= 1 << i;
}
- if (surf->level_info->mode == RADEON_SURF_MODE_LINEAR_ALIGNED)
+ if (rtex->surface.is_linear)
sctx->framebuffer.any_dst_linear = true;
r600_context_add_resource_size(ctx, surf->base.texture);
if (state->zsbuf) {
surf = (struct r600_surface*)state->zsbuf;
+ rtex = (struct r600_texture*)surf->base.texture;
if (!surf->depth_initialized) {
si_init_depth_surface(sctx, surf);
/* Colorbuffers. */
for (i = 0; i < nr_cbufs; i++) {
+ const struct radeon_surf_level *level_info;
unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
unsigned cb_color_base, cb_color_fmask, cb_color_attrib;
unsigned cb_color_pitch, cb_color_slice, cb_color_fmask_slice;
}
tex = (struct r600_texture *)cb->base.texture;
+ level_info = &tex->surface.level[cb->base.u.tex.level];
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
&tex->resource, RADEON_USAGE_READWRITE,
- tex->surface.nsamples > 1 ?
+ tex->resource.b.b.nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
RADEON_PRIO_COLOR_BUFFER);
RADEON_PRIO_DCC);
/* Compute mutable surface parameters. */
- pitch_tile_max = cb->level_info->nblk_x / 8 - 1;
- slice_tile_max = cb->level_info->nblk_x *
- cb->level_info->nblk_y / 64 - 1;
+ pitch_tile_max = level_info->nblk_x / 8 - 1;
+ slice_tile_max = level_info->nblk_x *
+ level_info->nblk_y / 64 - 1;
tile_mode_index = si_tile_mode_index(tex, cb->base.u.tex.level, false);
- cb_color_base = (tex->resource.gpu_address + cb->level_info->offset) >> 8;
+ cb_color_base = (tex->resource.gpu_address + level_info->offset) >> 8;
cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
cb_color_attrib = cb->cb_color_attrib |
cb_color_info = cb->cb_color_info | tex->cb_color_info;
- if (tex->dcc_offset && cb->level_info->dcc_enabled) {
+ if (tex->dcc_offset && cb->base.u.tex.level < tex->surface.num_dcc_levels) {
bool is_msaa_resolve_dst = state->cbufs[0] &&
state->cbufs[0]->texture->nr_samples > 1 &&
state->cbufs[1] == &cb->base &&
/* On Polaris, the small primitive filter uses the sample locations
* even when MSAA is off, so we need to make sure they're set to 0.
*/
- if ((nr_samples > 1 || sctx->b.family >= CHIP_POLARIS10) &&
+ if (sctx->b.family >= CHIP_POLARIS10)
+ nr_samples = MAX2(nr_samples, 1);
+
+ if (nr_samples >= 1 &&
(nr_samples != sctx->msaa_sample_locs.nr_samples)) {
sctx->msaa_sample_locs.nr_samples = nr_samples;
cayman_emit_msaa_sample_locs(cs, nr_samples);
{
const struct util_format_description *desc;
int first_non_void;
- uint64_t va;
unsigned stride;
unsigned num_records;
unsigned num_format, data_format;
desc = util_format_description(format);
first_non_void = util_format_get_first_non_void_channel(format);
stride = desc->block.bits / 8;
- va = buf->gpu_address + offset;
num_format = si_translate_buffer_numformat(&screen->b.b, desc, first_non_void);
data_format = si_translate_buffer_dataformat(&screen->b.b, desc, first_non_void);
if (screen->b.chip_class >= VI)
num_records *= stride;
- state[4] = va;
- state[5] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
- S_008F04_STRIDE(stride);
+ state[4] = 0;
+ state[5] = S_008F04_STRIDE(stride);
state[6] = num_records;
state[7] = S_008F0C_DST_SEL_X(si_map_swizzle(desc->swizzle[0])) |
S_008F0C_DST_SEL_Y(si_map_swizzle(desc->swizzle[1])) |
if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) {
const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0};
const unsigned char swizzle_yyyy[4] = {1, 1, 1, 1};
+ const unsigned char swizzle_wwww[4] = {3, 3, 3, 3};
switch (pipe_format) {
case PIPE_FORMAT_S8_UINT_Z24_UNORM:
- case PIPE_FORMAT_X24S8_UINT:
case PIPE_FORMAT_X32_S8X24_UINT:
case PIPE_FORMAT_X8Z24_UNORM:
util_format_compose_swizzles(swizzle_yyyy, state_swizzle, swizzle);
break;
+ case PIPE_FORMAT_X24S8_UINT:
+ /*
+ * X24S8 is implemented as an 8_8_8_8 data format, to
+ * fix texture gathers. This affects at least
+ * GL45-CTS.texture_cube_map_array.sampling on VI.
+ */
+ util_format_compose_swizzles(swizzle_wwww, state_swizzle, swizzle);
+ break;
default:
util_format_compose_swizzles(swizzle_xxxx, state_swizzle, swizzle);
}
view->base.reference.count = 1;
view->base.context = ctx;
- /* NULL resource, obey swizzle (only ZERO and ONE make sense). */
- if (!texture) {
- view->state[3] = S_008F1C_DST_SEL_X(si_map_swizzle(state->swizzle_r)) |
- S_008F1C_DST_SEL_Y(si_map_swizzle(state->swizzle_g)) |
- S_008F1C_DST_SEL_Z(si_map_swizzle(state->swizzle_b)) |
- S_008F1C_DST_SEL_W(si_map_swizzle(state->swizzle_a)) |
- S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D);
- return &view->base;
- }
-
+ assert(texture);
pipe_resource_reference(&view->base.texture, texture);
if (state->format == PIPE_FORMAT_X24S8_UINT ||
state->u.buf.offset,
state->u.buf.size,
view->state);
-
- LIST_ADDTAIL(&view->list, &sctx->b.texture_buffers);
return &view->base;
}
surflevel = tmp->surface.level;
if (tmp->db_compatible) {
+ if (!view->is_stencil_sampler)
+ pipe_format = tmp->db_render_format;
+
switch (pipe_format) {
case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
pipe_format = PIPE_FORMAT_Z32_FLOAT;
{
struct si_sampler_view *view = (struct si_sampler_view *)state;
- if (state->texture && state->texture->target == PIPE_BUFFER)
- LIST_DELINIT(&view->list);
-
pipe_resource_reference(&state->texture, NULL);
FREE(view);
}
}
}
+#ifdef DEBUG
+ rstate->magic = SI_SAMPLER_STATE_MAGIC;
+#endif
rstate->val[0] = (S_008F30_CLAMP_X(si_tex_wrap(state->wrap_s)) |
S_008F30_CLAMP_Y(si_tex_wrap(state->wrap_t)) |
S_008F30_CLAMP_Z(si_tex_wrap(state->wrap_r)) |
static void si_delete_sampler_state(struct pipe_context *ctx, void *state)
{
+#ifdef DEBUG
+ struct si_sampler_state *s = state;
+
+ assert(s->magic == SI_SAMPLER_STATE_MAGIC);
+ s->magic = 0;
+#endif
free(state);
}
const struct pipe_vertex_element *elements)
{
struct si_vertex_element *v = CALLOC_STRUCT(si_vertex_element);
+ bool used[SI_NUM_VERTEX_BUFFERS] = {};
int i;
assert(count <= SI_MAX_ATTRIBS);
v->count = count;
for (i = 0; i < count; ++i) {
const struct util_format_description *desc;
+ const struct util_format_channel_description *channel;
unsigned data_format, num_format;
int first_non_void;
+ unsigned vbo_index = elements[i].vertex_buffer_index;
+
+ if (vbo_index >= SI_NUM_VERTEX_BUFFERS) {
+ FREE(v);
+ return NULL;
+ }
+
+ if (!used[vbo_index]) {
+ v->first_vb_use_mask |= 1 << i;
+ used[vbo_index] = true;
+ }
desc = util_format_description(elements[i].src_format);
first_non_void = util_format_get_first_non_void_channel(elements[i].src_format);
data_format = si_translate_buffer_dataformat(ctx->screen, desc, first_non_void);
num_format = si_translate_buffer_numformat(ctx->screen, desc, first_non_void);
+ channel = first_non_void >= 0 ? &desc->channel[first_non_void] : NULL;
v->rsrc_word3[i] = S_008F0C_DST_SEL_X(si_map_swizzle(desc->swizzle[0])) |
S_008F0C_DST_SEL_Y(si_map_swizzle(desc->swizzle[1])) |
S_008F0C_NUM_FORMAT(num_format) |
S_008F0C_DATA_FORMAT(data_format);
v->format_size[i] = desc->block.bits / 8;
+
+ /* The hardware always treats the 2-bit alpha channel as
+ * unsigned, so a shader workaround is needed.
+ */
+ if (data_format == V_008F0C_BUF_DATA_FORMAT_2_10_10_10) {
+ if (num_format == V_008F0C_BUF_NUM_FORMAT_SNORM) {
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_A2_SNORM << (4 * i);
+ } else if (num_format == V_008F0C_BUF_NUM_FORMAT_SSCALED) {
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_A2_SSCALED << (4 * i);
+ } else if (num_format == V_008F0C_BUF_NUM_FORMAT_SINT) {
+ /* This isn't actually used in OpenGL. */
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_A2_SINT << (4 * i);
+ }
+ } else if (channel && channel->type == UTIL_FORMAT_TYPE_FIXED) {
+ if (desc->swizzle[3] == PIPE_SWIZZLE_1)
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_RGBX_32_FIXED << (4 * i);
+ else
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_RGBA_32_FIXED << (4 * i);
+ } else if (channel && channel->size == 32 && !channel->pure_integer) {
+ if (channel->type == UTIL_FORMAT_TYPE_SIGNED) {
+ if (channel->normalized) {
+ if (desc->swizzle[3] == PIPE_SWIZZLE_1)
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_RGBX_32_SNORM << (4 * i);
+ else
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_RGBA_32_SNORM << (4 * i);
+ } else {
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_RGBA_32_SSCALED << (4 * i);
+ }
+ } else if (channel->type == UTIL_FORMAT_TYPE_UNSIGNED) {
+ if (channel->normalized) {
+ if (desc->swizzle[3] == PIPE_SWIZZLE_1)
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_RGBX_32_UNORM << (4 * i);
+ else
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_RGBA_32_UNORM << (4 * i);
+ } else {
+ v->fix_fetch |= (uint64_t)SI_FIX_FETCH_RGBA_32_USCALED << (4 * i);
+ }
+ }
+ }
+
+ /* We work around the fact that 8_8_8 and 16_16_16 data formats
+ * do not exist by using the corresponding 4-component formats.
+ * This requires a fixup of the descriptor for bounds checks.
+ */
+ if (desc->block.bits == 3 * 8 ||
+ desc->block.bits == 3 * 16) {
+ v->fix_size3 |= (desc->block.bits / 24) << (2 * i);
+ }
}
memcpy(v->elements, elements, sizeof(struct pipe_vertex_element) * count);
for (i = 0; i < count; i++) {
const struct pipe_vertex_buffer *src = buffers + i;
struct pipe_vertex_buffer *dsti = dst + i;
+ struct pipe_resource *buf = src->buffer;
- pipe_resource_reference(&dsti->buffer, src->buffer);
+ pipe_resource_reference(&dsti->buffer, buf);
dsti->buffer_offset = src->buffer_offset;
dsti->stride = src->stride;
- r600_context_add_resource_size(ctx, src->buffer);
+ r600_context_add_resource_size(ctx, buf);
+ if (buf)
+ r600_resource(buf)->bind_history |= PIPE_BIND_VERTEX_BUFFER;
}
} else {
for (i = 0; i < count; i++) {
struct si_context *sctx = (struct si_context *)ctx;
if (ib) {
- pipe_resource_reference(&sctx->index_buffer.buffer, ib->buffer);
+ struct pipe_resource *buf = ib->buffer;
+
+ pipe_resource_reference(&sctx->index_buffer.buffer, buf);
memcpy(&sctx->index_buffer, ib, sizeof(*ib));
- r600_context_add_resource_size(ctx, ib->buffer);
+ r600_context_add_resource_size(ctx, buf);
+ if (buf)
+ r600_resource(buf)->bind_history |= PIPE_BIND_INDEX_BUFFER;
} else {
pipe_resource_reference(&sctx->index_buffer.buffer, NULL);
}
pipe_resource_reference(&cb.buffer, NULL);
}
-static void si_texture_barrier(struct pipe_context *ctx)
+static void si_texture_barrier(struct pipe_context *ctx, unsigned flags)
{
struct si_context *sctx = (struct si_context *)ctx;
sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1 |
SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_FLUSH_AND_INV_CB |
- SI_CONTEXT_CS_PARTIAL_FLUSH;
+ SI_CONTEXT_FLUSH_AND_INV_CB;
}
+/* This only ensures coherency for shader image/buffer stores. */
static void si_memory_barrier(struct pipe_context *ctx, unsigned flags)
{
struct si_context *sctx = (struct si_context *)ctx;
}
if (flags & PIPE_BARRIER_INDEX_BUFFER) {
- sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1;
-
- /* Indices are read through TC L2 since VI. */
+ /* Indices are read through TC L2 since VI.
+ * L1 isn't used.
+ */
if (sctx->screen->b.chip_class <= CIK)
- sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
+ sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
}
if (flags & PIPE_BARRIER_FRAMEBUFFER)
sctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER;
- if (flags & (PIPE_BARRIER_MAPPED_BUFFER |
- PIPE_BARRIER_FRAMEBUFFER |
- PIPE_BARRIER_INDIRECT_BUFFER)) {
- /* Not sure if INV_GLOBAL_L2 is the best thing here.
- *
- * We need to make sure that TC L1 & L2 are written back to
- * memory, because neither CPU accesses nor CB fetches consider
- * TC, but there's no need to invalidate any TC cache lines. */
- sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
- }
+ if (flags & (PIPE_BARRIER_FRAMEBUFFER |
+ PIPE_BARRIER_INDIRECT_BUFFER))
+ sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
}
static void *si_create_blend_custom(struct si_context *sctx, unsigned mode)
si_init_external_atom(sctx, &sctx->b.scissors.atom, &sctx->atoms.s.scissors);
si_init_external_atom(sctx, &sctx->b.viewports.atom, &sctx->atoms.s.viewports);
- si_init_atom(sctx, &sctx->cache_flush, &sctx->atoms.s.cache_flush, si_emit_cache_flush);
si_init_atom(sctx, &sctx->framebuffer.atom, &sctx->atoms.s.framebuffer, si_emit_framebuffer_state);
si_init_atom(sctx, &sctx->msaa_sample_locs.atom, &sctx->atoms.s.msaa_sample_locs, si_emit_msaa_sample_locs);
si_init_atom(sctx, &sctx->db_render_state, &sctx->atoms.s.db_render_state, si_emit_db_render_state);
sctx->b.b.set_active_query_state = si_set_active_query_state;
sctx->b.set_occlusion_query_state = si_set_occlusion_query_state;
+ sctx->b.save_qbo_state = si_save_qbo_state;
sctx->b.need_gfx_cs_space = si_need_gfx_cs_space;
sctx->b.b.draw_vbo = si_draw_vbo;
raster_config_1 = 0x0000002a;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
raster_config = 0x16000012;
raster_config_1 = 0x00000000;
break;
si_pm4_set_reg(pm4, R_028AC0_DB_SRESULTS_COMPARE_STATE0, 0x0);
si_pm4_set_reg(pm4, R_028AC4_DB_SRESULTS_COMPARE_STATE1, 0x0);
si_pm4_set_reg(pm4, R_028AC8_DB_PRELOAD_CONTROL, 0x0);
- si_pm4_set_reg(pm4, R_02800C_DB_RENDER_OVERRIDE,
- S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) |
- S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE));
+ si_pm4_set_reg(pm4, R_02800C_DB_RENDER_OVERRIDE, 0);
si_pm4_set_reg(pm4, R_028400_VGT_MAX_VTX_INDX, ~0);
si_pm4_set_reg(pm4, R_028404_VGT_MIN_VTX_INDX, 0);
si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET, 0);
if (sctx->b.chip_class >= CIK) {
+ /* If this is 0, Bonaire can hang even if GS isn't being used.
+ * Other chips are unaffected. These are suboptimal values,
+ * but we don't use on-chip GS.
+ */
+ si_pm4_set_reg(pm4, R_028A44_VGT_GS_ONCHIP_CNTL,
+ S_028A44_ES_VERTS_PER_SUBGRP(64) |
+ S_028A44_GS_PRIMS_PER_SUBGRP(4));
+
si_pm4_set_reg(pm4, R_00B51C_SPI_SHADER_PGM_RSRC3_LS, S_00B51C_CU_EN(0xffff));
si_pm4_set_reg(pm4, R_00B41C_SPI_SHADER_PGM_RSRC3_HS, 0);
si_pm4_set_reg(pm4, R_00B31C_SPI_SHADER_PGM_RSRC3_ES, S_00B31C_CU_EN(0xffff));