util_blitter_save_vertex_elements(rctx->blitter, rctx->vertex_fetch_shader.cso);
util_blitter_save_vertex_shader(rctx->blitter, rctx->vs_shader);
util_blitter_save_geometry_shader(rctx->blitter, rctx->gs_shader);
+ util_blitter_save_tessctrl_shader(rctx->blitter, rctx->tcs_shader);
+ util_blitter_save_tesseval_shader(rctx->blitter, rctx->tes_shader);
util_blitter_save_so_targets(rctx->blitter, rctx->b.streamout.num_targets,
(struct pipe_stream_output_target**)rctx->b.streamout.targets);
util_blitter_save_rasterizer(rctx->blitter, rctx->rasterizer_state.cso);
if (op & R600_SAVE_FRAGMENT_STATE) {
- util_blitter_save_viewport(rctx->blitter, &rctx->viewport[0].state);
- util_blitter_save_scissor(rctx->blitter, &rctx->scissor[0].scissor);
+ util_blitter_save_viewport(rctx->blitter, &rctx->viewport.state[0]);
+ util_blitter_save_scissor(rctx->blitter, &rctx->scissor.scissor[0]);
util_blitter_save_fragment_shader(rctx->blitter, rctx->ps_shader);
util_blitter_save_blend(rctx->blitter, rctx->blend_state.cso);
util_blitter_save_depth_stencil_alpha(rctx->blitter, rctx->dsa_state.cso);
(struct pipe_sampler_view**)rctx->samplers[PIPE_SHADER_FRAGMENT].views.views);
}
- if ((op & R600_DISABLE_RENDER_COND) && rctx->b.current_render_cond) {
- util_blitter_save_render_condition(rctx->blitter,
- rctx->b.current_render_cond,
- rctx->b.current_render_cond_cond,
- rctx->b.current_render_cond_mode);
- }
+ if (op & R600_DISABLE_RENDER_COND)
+ rctx->b.render_cond_force_off = true;
}
static void r600_blitter_end(struct pipe_context *ctx)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- r600_resume_nontimer_queries(&rctx->b);
+
+ rctx->b.render_cond_force_off = false;
+ r600_resume_nontimer_queries(&rctx->b);
}
static unsigned u_max_sample(struct pipe_resource *r)
static void r600_blit_decompress_depth_in_place(struct r600_context *rctx,
struct r600_texture *texture,
+ bool is_stencil_sampler,
unsigned first_level, unsigned last_level,
unsigned first_layer, unsigned last_layer)
{
struct pipe_surface *zsurf, surf_tmpl = {{0}};
unsigned layer, max_layer, checked_last_layer, level;
+ unsigned *dirty_level_mask;
/* Enable decompression in DB_RENDER_CONTROL */
- rctx->db_misc_state.flush_depthstencil_in_place = true;
+ if (is_stencil_sampler) {
+ rctx->db_misc_state.flush_stencil_inplace = true;
+ dirty_level_mask = &texture->stencil_dirty_level_mask;
+ } else {
+ rctx->db_misc_state.flush_depth_inplace = true;
+ dirty_level_mask = &texture->dirty_level_mask;
+ }
r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
surf_tmpl.format = texture->resource.b.b.format;
for (level = first_level; level <= last_level; level++) {
- if (!(texture->dirty_level_mask & (1 << level)))
+ if (!(*dirty_level_mask & (1 << level)))
continue;
surf_tmpl.u.tex.level = level;
/* The texture will always be dirty if some layers or samples aren't flushed.
* I don't think this case occurs often though. */
if (first_layer == 0 && last_layer == max_layer) {
- texture->dirty_level_mask &= ~(1 << level);
+ *dirty_level_mask &= ~(1 << level);
}
}
/* Disable decompression in DB_RENDER_CONTROL */
- rctx->db_misc_state.flush_depthstencil_in_place = false;
+ rctx->db_misc_state.flush_depth_inplace = false;
+ rctx->db_misc_state.flush_stencil_inplace = false;
r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
}
while (depth_texture_mask) {
struct pipe_sampler_view *view;
+ struct r600_pipe_sampler_view *rview;
struct r600_texture *tex;
i = u_bit_scan(&depth_texture_mask);
view = &textures->views[i]->base;
assert(view);
+ rview = (struct r600_pipe_sampler_view*)view;
tex = (struct r600_texture *)view->texture;
assert(tex->is_depth && !tex->is_flushing_texture);
if (rctx->b.chip_class >= EVERGREEN ||
r600_can_read_depth(tex)) {
r600_blit_decompress_depth_in_place(rctx, tex,
+ rview->is_stencil_sampler,
view->u.tex.first_level, view->u.tex.last_level,
0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level));
} else {
if (rtex->is_depth && !rtex->is_flushing_texture) {
if (rctx->b.chip_class >= EVERGREEN ||
r600_can_read_depth(rtex)) {
- r600_blit_decompress_depth_in_place(rctx, rtex,
+ r600_blit_decompress_depth_in_place(rctx, rtex, false,
level, level,
first_layer, last_layer);
+ if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
+ r600_blit_decompress_depth_in_place(rctx, rtex, true,
+ level, level,
+ first_layer, last_layer);
+ }
} else {
if (!r600_init_flushed_depth_texture(ctx, tex, NULL))
return false; /* error */
* Can we somehow flush the index buffer cache? Starting a new IB seems
* to do the trick. */
if (rctx->b.chip_class <= R700)
- rctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
/**
* Global buffers are not really resources, they are are actually offsets
* into a single global resource (r600_screen::global_pool). The means
- * they don't have their own cs_buf handle, so they cannot be passed
+ * they don't have their own buf handle, so they cannot be passed
* to r600_copy_buffer() and must be handled separately.
*/
static void r600_copy_global_buffer(struct pipe_context *ctx,
src = (struct pipe_resource *)pool->bo;
} else {
if (item->real_buffer == NULL) {
- item->real_buffer = (struct r600_resource*)
+ item->real_buffer =
r600_compute_buffer_alloc_vram(pool->screen,
item->size_in_dw * 4);
}
dst = (struct pipe_resource *)pool->bo;
} else {
if (item->real_buffer == NULL) {
- item->real_buffer = (struct r600_resource*)
+ item->real_buffer =
r600_compute_buffer_alloc_vram(pool->screen,
item->size_in_dw * 4);
}
} else {
uint32_t *map = r600_buffer_map_sync_with_rings(&rctx->b, r600_resource(dst),
PIPE_TRANSFER_WRITE);
+ map += offset / 4;
size /= 4;
for (unsigned i = 0; i < size; i++)
*map++ = value;
util_blitter_default_dst_texture(&dst_templ, dst, dst_level, dstz);
util_blitter_default_src_texture(&src_templ, src, src_level);
- if (util_format_is_compressed(src->format)) {
+ if (util_format_is_compressed(src->format) ||
+ util_format_is_compressed(dst->format)) {
unsigned blocksize = util_format_get_blocksize(src->format);
if (blocksize == 8)