#include "compute_memory_pool.h"
#include "evergreen_compute.h"
#include "util/u_surface.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "evergreend.h"
enum r600_blitter_op /* bitmask */
{
struct r600_context *rctx = (struct r600_context *)ctx;
+ if (rctx->cmd_buf_is_compute) {
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ rctx->cmd_buf_is_compute = false;
+ }
+
util_blitter_save_vertex_buffer_slot(rctx->blitter, rctx->vertex_buffer_state.vb);
util_blitter_save_vertex_elements(rctx->blitter, rctx->vertex_fetch_shader.cso);
util_blitter_save_vertex_shader(rctx->blitter, rctx->vs_shader);
util_blitter_save_rasterizer(rctx->blitter, rctx->rasterizer_state.cso);
if (op & R600_SAVE_FRAGMENT_STATE) {
- util_blitter_save_viewport(rctx->blitter, &rctx->viewport.state[0]);
- util_blitter_save_scissor(rctx->blitter, &rctx->scissor.scissor[0]);
+ util_blitter_save_viewport(rctx->blitter, &rctx->b.viewports.states[0]);
+ util_blitter_save_scissor(rctx->blitter, &rctx->b.scissors.states[0]);
util_blitter_save_fragment_shader(rctx->blitter, rctx->ps_shader);
util_blitter_save_blend(rctx->blitter, rctx->blend_state.cso);
util_blitter_save_depth_stencil_alpha(rctx->blitter, rctx->dsa_state.cso);
rview = (struct r600_pipe_sampler_view*)view;
tex = (struct r600_texture *)view->texture;
- assert(tex->is_depth && !tex->is_flushing_texture);
+ assert(tex->db_compatible);
- if (rctx->b.chip_class >= EVERGREEN ||
- r600_can_read_depth(tex)) {
+ if (r600_can_sample_zs(tex, rview->is_stencil_sampler)) {
r600_blit_decompress_depth_in_place(rctx, tex,
rview->is_stencil_sampler,
view->u.tex.first_level, view->u.tex.last_level,
}
}
+void r600_decompress_depth_images(struct r600_context *rctx,
+ struct r600_image_state *images)
+{
+ unsigned i;
+ unsigned depth_texture_mask = images->compressed_depthtex_mask;
+
+ while (depth_texture_mask) {
+ struct r600_image_view *view;
+ struct r600_texture *tex;
+
+ i = u_bit_scan(&depth_texture_mask);
+
+ view = &images->views[i];
+ assert(view);
+
+ tex = (struct r600_texture *)view->base.resource;
+ assert(tex->db_compatible);
+
+ if (r600_can_sample_zs(tex, false)) {
+ r600_blit_decompress_depth_in_place(rctx, tex,
+ false,
+ view->base.u.tex.level,
+ view->base.u.tex.level,
+ 0, util_max_layer(&tex->resource.b.b, view->base.u.tex.level));
+ } else {
+ r600_blit_decompress_depth(&rctx->b.b, tex, NULL,
+ view->base.u.tex.level,
+ view->base.u.tex.level,
+ 0, util_max_layer(&tex->resource.b.b, view->base.u.tex.level),
+ 0, u_max_sample(&tex->resource.b.b));
+ }
+ }
+}
+
static void r600_blit_decompress_color(struct pipe_context *ctx,
struct r600_texture *rtex,
unsigned first_level, unsigned last_level,
}
}
+void r600_decompress_color_images(struct r600_context *rctx,
+ struct r600_image_state *images)
+{
+ unsigned i;
+ unsigned mask = images->compressed_colortex_mask;
+
+ while (mask) {
+ struct r600_image_view *view;
+ struct r600_texture *tex;
+
+ i = u_bit_scan(&mask);
+
+ view = &images->views[i];
+ assert(view);
+
+ tex = (struct r600_texture *)view->base.resource;
+ assert(tex->cmask.size);
+
+ r600_blit_decompress_color(&rctx->b.b, tex,
+ view->base.u.tex.level, view->base.u.tex.level,
+ view->base.u.tex.first_layer,
+ view->base.u.tex.last_layer);
+ }
+}
+
/* Helper for decompressing a portion of a color or depth resource before
* blitting if any decompression is needed.
* The driver doesn't decompress resources automatically while u_blitter is
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_texture *rtex = (struct r600_texture*)tex;
- if (rtex->is_depth && !rtex->is_flushing_texture) {
- if (rctx->b.chip_class >= EVERGREEN ||
- r600_can_read_depth(rtex)) {
+ if (rtex->db_compatible) {
+ if (r600_can_sample_zs(rtex, false)) {
r600_blit_decompress_depth_in_place(rctx, rtex, false,
level, level,
first_layer, last_layer);
- if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
+ if (rtex->surface.has_stencil) {
r600_blit_decompress_depth_in_place(rctx, rtex, true,
level, level,
first_layer, last_layer);
}
static void r600_clear(struct pipe_context *ctx, unsigned buffers,
+ const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *color,
double depth, unsigned stencil)
{
* array are clear to different value. To simplify code just
* disable fast clear for texture array.
*/
- /* Only use htile for first level */
- if (rtex->htile_buffer && !level &&
+ if (r600_htile_enabled(rtex, level) &&
fb->zsbuf->u.tex.first_layer == 0 &&
fb->zsbuf->u.tex.last_layer == util_max_layer(&rtex->resource.b.b, level)) {
if (rtex->depth_clear_value != depth) {
r600_blitter_begin(ctx, R600_CLEAR);
util_blitter_clear(rctx->blitter, fb->width, fb->height,
util_framebuffer_get_num_layers(fb),
- buffers, color, depth, stencil);
+ buffers, color, depth, stencil,
+ util_framebuffer_get_num_samples(fb) > 1);
r600_blitter_end(ctx);
/* disable fast clear */
struct pipe_surface *dst,
const union pipe_color_union *color,
unsigned dstx, unsigned dsty,
- unsigned width, unsigned height)
+ unsigned width, unsigned height,
+ bool render_condition_enabled)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- r600_blitter_begin(ctx, R600_CLEAR_SURFACE);
+ r600_blitter_begin(ctx, R600_CLEAR_SURFACE |
+ (render_condition_enabled ? 0 : R600_DISABLE_RENDER_COND));
util_blitter_clear_render_target(rctx->blitter, dst, color,
dstx, dsty, width, height);
r600_blitter_end(ctx);
double depth,
unsigned stencil,
unsigned dstx, unsigned dsty,
- unsigned width, unsigned height)
+ unsigned width, unsigned height,
+ bool render_condition_enabled)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- r600_blitter_begin(ctx, R600_CLEAR_SURFACE);
+ r600_blitter_begin(ctx, R600_CLEAR_SURFACE |
+ (render_condition_enabled ? 0 : R600_DISABLE_RENDER_COND));
util_blitter_clear_depth_stencil(rctx->blitter, dst, clear_flags, depth, stencil,
dstx, dsty, width, height);
r600_blitter_end(ctx);
} else {
util_resource_copy_region(ctx, dst, 0, dstx, 0, 0, src, 0, src_box);
}
-
- /* The index buffer (VGT) doesn't seem to see the result of the copying.
- * Can we somehow flush the index buffer cache? Starting a new IB seems
- * to do the trick. */
- if (rctx->b.chip_class <= R700)
- rctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
/**
}
static void r600_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
- unsigned offset, unsigned size, unsigned value,
- bool is_framebuffer)
+ uint64_t offset, uint64_t size, unsigned value,
+ enum r600_coherency coher)
{
struct r600_context *rctx = (struct r600_context*)ctx;
if (rctx->screen->b.has_cp_dma &&
rctx->b.chip_class >= EVERGREEN &&
offset % 4 == 0 && size % 4 == 0) {
- evergreen_cp_dma_clear_buffer(rctx, dst, offset, size, value);
+ evergreen_cp_dma_clear_buffer(rctx, dst, offset, size, value, coher);
} else if (rctx->screen->b.has_streamout && offset % 4 == 0 && size % 4 == 0) {
union pipe_color_union clear_value;
clear_value.ui[0] = value;
src_heightFL = u_minify(src->height0, src_level);
util_blitter_default_dst_texture(&dst_templ, dst, dst_level, dstz);
- util_blitter_default_src_texture(&src_templ, src, src_level);
+ util_blitter_default_src_texture(rctx->blitter, &src_templ, src, src_level);
if (util_format_is_compressed(src->format) ||
util_format_is_compressed(dst->format)) {
dst_templ.format = PIPE_FORMAT_R8_UNORM;
src_templ.format = PIPE_FORMAT_R8_UNORM;
break;
- case 2:
+ case 2:
dst_templ.format = PIPE_FORMAT_R8G8_UNORM;
src_templ.format = PIPE_FORMAT_R8G8_UNORM;
break;
dst_templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
src_templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
break;
- case 8:
- dst_templ.format = PIPE_FORMAT_R16G16B16A16_UINT;
- src_templ.format = PIPE_FORMAT_R16G16B16A16_UINT;
- break;
- case 16:
- dst_templ.format = PIPE_FORMAT_R32G32B32A32_UINT;
- src_templ.format = PIPE_FORMAT_R32G32B32A32_UINT;
- break;
+ case 8:
+ dst_templ.format = PIPE_FORMAT_R16G16B16A16_UINT;
+ src_templ.format = PIPE_FORMAT_R16G16B16A16_UINT;
+ break;
+ case 16:
+ dst_templ.format = PIPE_FORMAT_R32G32B32A32_UINT;
+ src_templ.format = PIPE_FORMAT_R32G32B32A32_UINT;
+ break;
default:
fprintf(stderr, "Unhandled format %s with blocksize %u\n",
util_format_short_name(src->format), blocksize);
}
}
- dst_view = r600_create_surface_custom(ctx, dst, &dst_templ, dst_width, dst_height);
+ dst_view = r600_create_surface_custom(ctx, dst, &dst_templ,
+ /* we don't care about these two for r600g */
+ dst->width0, dst->height0,
+ dst_width, dst_height);
if (rctx->b.chip_class >= EVERGREEN) {
src_view = evergreen_create_sampler_view_custom(ctx, src, &src_templ,
pipe_sampler_view_reference(&src_view, NULL);
}
-/* For MSAA integer resolving to work, we change the format to NORM using this function. */
-static enum pipe_format int_to_norm_format(enum pipe_format format)
-{
- switch (format) {
-#define REPLACE_FORMAT_SIGN(format,sign) \
- case PIPE_FORMAT_##format##_##sign##INT: \
- return PIPE_FORMAT_##format##_##sign##NORM
-#define REPLACE_FORMAT(format) \
- REPLACE_FORMAT_SIGN(format, U); \
- REPLACE_FORMAT_SIGN(format, S)
-
- REPLACE_FORMAT_SIGN(B10G10R10A2, U);
- REPLACE_FORMAT(R8);
- REPLACE_FORMAT(R8G8);
- REPLACE_FORMAT(R8G8B8X8);
- REPLACE_FORMAT(R8G8B8A8);
- REPLACE_FORMAT(A8);
- REPLACE_FORMAT(I8);
- REPLACE_FORMAT(L8);
- REPLACE_FORMAT(L8A8);
- REPLACE_FORMAT(R16);
- REPLACE_FORMAT(R16G16);
- REPLACE_FORMAT(R16G16B16X16);
- REPLACE_FORMAT(R16G16B16A16);
- REPLACE_FORMAT(A16);
- REPLACE_FORMAT(I16);
- REPLACE_FORMAT(L16);
- REPLACE_FORMAT(L16A16);
-
-#undef REPLACE_FORMAT
-#undef REPLACE_FORMAT_SIGN
- default:
- return format;
- }
-}
-
static bool do_hardware_msaa_resolve(struct pipe_context *ctx,
const struct pipe_blit_info *info)
{
struct r600_texture *dst = (struct r600_texture*)info->dst.resource;
unsigned dst_width = u_minify(info->dst.resource->width0, info->dst.level);
unsigned dst_height = u_minify(info->dst.resource->height0, info->dst.level);
- enum pipe_format format = int_to_norm_format(info->dst.format);
+ enum pipe_format format = info->src.format;
unsigned sample_mask =
rctx->b.chip_class == CAYMAN ? ~0 :
((1ull << MAX2(1, info->src.resource->nr_samples)) - 1);
-
- if (info->src.resource->nr_samples > 1 &&
- info->dst.resource->nr_samples <= 1 &&
- util_max_layer(info->src.resource, 0) == 0 &&
- util_max_layer(info->dst.resource, info->dst.level) == 0 &&
- info->dst.format == info->src.format &&
- !util_format_is_pure_integer(format) &&
- !util_format_is_depth_or_stencil(format) &&
+ struct pipe_resource *tmp, templ;
+ struct pipe_blit_info blit;
+
+ /* Check basic requirements for hw resolve. */
+ if (!(info->src.resource->nr_samples > 1 &&
+ info->dst.resource->nr_samples <= 1 &&
+ !util_format_is_pure_integer(format) &&
+ !util_format_is_depth_or_stencil(format) &&
+ util_max_layer(info->src.resource, 0) == 0))
+ return false;
+
+ /* Check the remaining requirements for hw resolve. */
+ if (util_max_layer(info->dst.resource, info->dst.level) == 0 &&
+ util_is_format_compatible(util_format_description(info->src.format),
+ util_format_description(info->dst.format)) &&
!info->scissor_enable &&
(info->mask & PIPE_MASK_RGBA) == PIPE_MASK_RGBA &&
dst_width == info->src.resource->width0 &&
info->src.box.width == dst_width &&
info->src.box.height == dst_height &&
info->src.box.depth == 1 &&
- dst->surface.level[info->dst.level].mode >= RADEON_SURF_MODE_1D &&
+ dst->surface.u.legacy.level[info->dst.level].mode >= RADEON_SURF_MODE_1D &&
(!dst->cmask.size || !dst->dirty_level_mask) /* dst cannot be fast-cleared */) {
r600_blitter_begin(ctx, R600_COLOR_RESOLVE |
(info->render_condition_enable ? 0 : R600_DISABLE_RENDER_COND));
r600_blitter_end(ctx);
return true;
}
- return false;
+
+ /* Shader-based resolve is VERY SLOW. Instead, resolve into
+ * a temporary texture and blit.
+ */
+ memset(&templ, 0, sizeof(templ));
+ templ.target = PIPE_TEXTURE_2D;
+ templ.format = info->src.resource->format;
+ templ.width0 = info->src.resource->width0;
+ templ.height0 = info->src.resource->height0;
+ templ.depth0 = 1;
+ templ.array_size = 1;
+ templ.usage = PIPE_USAGE_DEFAULT;
+ templ.flags = R600_RESOURCE_FLAG_FORCE_TILING;
+
+ tmp = ctx->screen->resource_create(ctx->screen, &templ);
+ if (!tmp)
+ return false;
+
+ /* resolve */
+ r600_blitter_begin(ctx, R600_COLOR_RESOLVE |
+ (info->render_condition_enable ? 0 : R600_DISABLE_RENDER_COND));
+ util_blitter_custom_resolve_color(rctx->blitter, tmp, 0, 0,
+ info->src.resource, info->src.box.z,
+ sample_mask, rctx->custom_blend_resolve,
+ format);
+ r600_blitter_end(ctx);
+
+ /* blit */
+ blit = *info;
+ blit.src.resource = tmp;
+ blit.src.box.z = 0;
+
+ r600_blitter_begin(ctx, R600_BLIT |
+ (info->render_condition_enable ? 0 : R600_DISABLE_RENDER_COND));
+ util_blitter_blit(rctx->blitter, &blit);
+ r600_blitter_end(ctx);
+
+ pipe_resource_reference(&tmp, NULL);
+ return true;
}
static void r600_blit(struct pipe_context *ctx,
const struct pipe_blit_info *info)
{
struct r600_context *rctx = (struct r600_context*)ctx;
+ struct r600_texture *rdst = (struct r600_texture *)info->dst.resource;
if (do_hardware_msaa_resolve(ctx, info)) {
return;
}
+ /* Using SDMA for copying to a linear texture in GTT is much faster.
+ * This improves DRI PRIME performance.
+ *
+ * resource_copy_region can't do this yet, because dma_copy calls it
+ * on failure (recursion).
+ */
+ if (rdst->surface.u.legacy.level[info->dst.level].mode ==
+ RADEON_SURF_MODE_LINEAR_ALIGNED &&
+ rctx->b.dma_copy &&
+ util_can_blit_via_copy_region(info, false)) {
+ rctx->b.dma_copy(ctx, info->dst.resource, info->dst.level,
+ info->dst.box.x, info->dst.box.y,
+ info->dst.box.z,
+ info->src.resource, info->src.level,
+ &info->src.box);
+ return;
+ }
+
assert(util_blitter_is_blit_supported(rctx->blitter, info));
/* The driver doesn't decompress resources automatically while