}
}
-void
+bool
fd5_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
{
struct fd_batch *batch;
if (!can_do_blit(info)) {
- fd_blitter_blit(ctx, info);
- return;
+ return false;
}
batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
batch->needs_flush = true;
fd_batch_flush(batch, false, false);
+
+ return true;
}
unsigned
#include "freedreno_context.h"
-void fd5_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info);
+bool fd5_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info);
unsigned fd5_tile_mode(const struct pipe_resource *tmpl);
#endif /* FD5_BLIT_H_ */
struct fd_context *ctx = fd_context(pctx);
if (!can_do_blit(info)) {
- fd_blitter_pipe_begin(ctx, info->render_condition_enable, false, FD_STAGE_BLIT);
fd_blitter_blit(ctx, info);
- fd_blitter_pipe_end(ctx);
return;
}
src_templ->swizzle_a = PIPE_SWIZZLE_W;
}
-void
+bool
fd_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
{
struct pipe_resource *dst = info->dst.resource;
struct pipe_context *pipe = &ctx->base;
struct pipe_surface *dst_view, dst_templ;
struct pipe_sampler_view src_templ, *src_view;
+ bool discard = false;
+
+ if (!info->scissor_enable && !info->alpha_blend) {
+ discard = util_texrange_covers_whole_level(info->dst.resource,
+ info->dst.level, info->dst.box.x, info->dst.box.y,
+ info->dst.box.z, info->dst.box.width,
+ info->dst.box.height, info->dst.box.depth);
+ }
+
+ fd_blitter_pipe_begin(ctx, info->render_condition_enable, discard, FD_STAGE_BLIT);
/* Initialize the surface. */
default_dst_texture(&dst_templ, dst, info->dst.level,
pipe_surface_reference(&dst_view, NULL);
pipe_sampler_view_reference(&src_view, NULL);
+
+ fd_blitter_pipe_end(ctx);
+
+ /* The fallback blitter must never fail: */
+ return true;
}
/**
#include "freedreno_context.h"
-void fd_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info);
+bool fd_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info);
void fd_resource_copy_region(struct pipe_context *pctx,
struct pipe_resource *dst,
slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
- if (!ctx->blit)
- ctx->blit = fd_blitter_blit;
-
fd_draw_init(pctx);
fd_resource_context_init(pctx);
fd_query_context_init(pctx);
void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
/* blitter: */
- void (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
+ bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
/* simple gpu "memcpy": */
void (*mem_to_mem)(struct fd_ringbuffer *ring, struct pipe_resource *dst,
static void
do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
{
+ struct pipe_context *pctx = &ctx->base;
+
/* TODO size threshold too?? */
if (!fallback) {
/* do blit on gpu: */
- fd_blitter_pipe_begin(ctx, false, true, FD_STAGE_BLIT);
- ctx->blit(ctx, blit);
- fd_blitter_pipe_end(ctx);
+ pctx->blit(pctx, blit);
} else {
/* do blit on cpu: */
- util_resource_copy_region(&ctx->base,
+ util_resource_copy_region(pctx,
blit->dst.resource, blit->dst.level, blit->dst.box.x,
blit->dst.box.y, blit->dst.box.z,
blit->src.resource, blit->src.level, &blit->src.box);
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_blit_info info = *blit_info;
- bool discard = false;
if (info.render_condition_enable && !fd_render_condition_check(pctx))
return;
- if (!info.scissor_enable && !info.alpha_blend) {
- discard = util_texrange_covers_whole_level(info.dst.resource,
- info.dst.level, info.dst.box.x, info.dst.box.y,
- info.dst.box.z, info.dst.box.width,
- info.dst.box.height, info.dst.box.depth);
- }
-
if (util_try_blit_via_copy_region(pctx, &info)) {
return; /* done */
}
return;
}
- fd_blitter_pipe_begin(ctx, info.render_condition_enable, discard, FD_STAGE_BLIT);
- ctx->blit(ctx, &info);
- fd_blitter_pipe_end(ctx);
+ if (!(ctx->blit && ctx->blit(ctx, &info)))
+ fd_blitter_blit(ctx, &info);
}
void