X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fblorp%2Fblorp_blit.c;h=3aac0abf274c1612e807d0150c531a6d5b81fc1d;hb=HEAD;hp=27af21f2548198696a92b036a142faf6b982b724;hpb=21943c35f75d90f464f8495f5282037e1c7c79d0;p=mesa.git diff --git a/src/intel/blorp/blorp_blit.c b/src/intel/blorp/blorp_blit.c index 27af21f2548..3aac0abf274 100644 --- a/src/intel/blorp/blorp_blit.c +++ b/src/intel/blorp/blorp_blit.c @@ -21,27 +21,19 @@ * IN THE SOFTWARE. */ -#include "compiler/nir/nir_builder.h" +#include "blorp_nir_builder.h" +#include "compiler/nir/nir_format_convert.h" #include "blorp_priv.h" -#include "brw_meta_util.h" + +#include "util/format_rgb9e5.h" +/* header-only include needed for _mesa_unorm_to_float and friends. */ +#include "mesa/main/format_utils.h" +#include "util/u_math.h" #define FILE_DEBUG_FLAG DEBUG_BLORP -/** - * Enum to specify the order of arguments in a sampler message - */ -enum sampler_message_arg -{ - SAMPLER_MESSAGE_ARG_U_FLOAT, - SAMPLER_MESSAGE_ARG_V_FLOAT, - SAMPLER_MESSAGE_ARG_U_INT, - SAMPLER_MESSAGE_ARG_V_INT, - SAMPLER_MESSAGE_ARG_R_INT, - SAMPLER_MESSAGE_ARG_SI_INT, - SAMPLER_MESSAGE_ARG_MCS_INT, - SAMPLER_MESSAGE_ARG_ZERO_INT, -}; +static const bool split_blorp_blit_debug = false; struct brw_blorp_blit_vars { /* Input values from brw_blorp_wm_inputs */ @@ -51,21 +43,13 @@ struct brw_blorp_blit_vars { nir_variable *v_src_z; nir_variable *v_src_offset; nir_variable *v_dst_offset; - - /* gl_FragCoord */ - nir_variable *frag_coord; - - /* gl_FragColor */ - nir_variable *color_out; + nir_variable *v_src_inv_size; }; static void brw_blorp_blit_vars_init(nir_builder *b, struct brw_blorp_blit_vars *v, const struct brw_blorp_blit_prog_key *key) { - /* Blended and scaled blits never use pixel discard. */ - assert(!key->use_kill || !(key->blend && key->blit_scaled)); - #define LOAD_INPUT(name, type)\ v->v_##name = BLORP_CREATE_NIR_INPUT(b->shader, name, type); @@ -75,17 +59,9 @@ brw_blorp_blit_vars_init(nir_builder *b, struct brw_blorp_blit_vars *v, LOAD_INPUT(src_z, glsl_uint_type()) LOAD_INPUT(src_offset, glsl_vector_type(GLSL_TYPE_UINT, 2)) LOAD_INPUT(dst_offset, glsl_vector_type(GLSL_TYPE_UINT, 2)) + LOAD_INPUT(src_inv_size, glsl_vector_type(GLSL_TYPE_FLOAT, 2)) #undef LOAD_INPUT - - v->frag_coord = nir_variable_create(b->shader, nir_var_shader_in, - glsl_vec4_type(), "gl_FragCoord"); - v->frag_coord->data.location = VARYING_SLOT_POS; - v->frag_coord->data.origin_upper_left = true; - - v->color_out = nir_variable_create(b->shader, nir_var_shader_out, - glsl_vec4_type(), "gl_FragColor"); - v->color_out->data.location = FRAG_RESULT_COLOR; } static nir_ssa_def * @@ -93,7 +69,7 @@ blorp_blit_get_frag_coords(nir_builder *b, const struct brw_blorp_blit_prog_key *key, struct brw_blorp_blit_vars *v) { - nir_ssa_def *coord = nir_f2i(b, nir_load_var(b, v->frag_coord)); + nir_ssa_def *coord = nir_f2i32(b, nir_load_frag_coord(b)); /* Account for destination surface intratile offset * @@ -129,7 +105,7 @@ blorp_blit_apply_transform(nir_builder *b, nir_ssa_def *src_pos, nir_ssa_def *mul = nir_vec2(b, nir_channel(b, coord_transform, 0), nir_channel(b, coord_transform, 2)); - return nir_ffma(b, src_pos, mul, offset); + return nir_fadd(b, nir_fmul(b, src_pos, mul), offset); } static inline void @@ -170,8 +146,6 @@ blorp_create_nir_tex_instr(nir_builder *b, struct brw_blorp_blit_vars *v, tex->is_shadow = false; /* Blorp only has one texture and it's bound at unit 0 */ - tex->texture = NULL; - tex->sampler = NULL; tex->texture_index = 0; tex->sampler_index = 0; @@ -194,10 +168,18 @@ blorp_create_nir_tex_instr(nir_builder *b, struct brw_blorp_blit_vars *v, static nir_ssa_def * blorp_nir_tex(nir_builder *b, struct brw_blorp_blit_vars *v, - nir_ssa_def *pos, nir_alu_type dst_type) + const struct brw_blorp_blit_prog_key *key, nir_ssa_def *pos) { + if (key->need_src_offset) + pos = nir_fadd(b, pos, nir_i2f32(b, nir_load_var(b, v->v_src_offset))); + + /* If the sampler requires normalized coordinates, we need to compensate. */ + if (key->src_coords_normalized) + pos = nir_fmul(b, pos, nir_load_var(b, v->v_src_inv_size)); + nir_tex_instr *tex = - blorp_create_nir_tex_instr(b, v, nir_texop_tex, pos, 2, dst_type); + blorp_create_nir_tex_instr(b, v, nir_texop_tex, pos, 2, + key->texture_data_type); assert(pos->num_components == 2); tex->sampler_dim = GLSL_SAMPLER_DIM_2D; @@ -254,7 +236,8 @@ blorp_nir_txf_ms(nir_builder *b, struct brw_blorp_blit_vars *v, } static nir_ssa_def * -blorp_nir_txf_ms_mcs(nir_builder *b, struct brw_blorp_blit_vars *v, nir_ssa_def *pos) +blorp_blit_txf_ms_mcs(nir_builder *b, struct brw_blorp_blit_vars *v, + nir_ssa_def *pos) { nir_tex_instr *tex = blorp_create_nir_tex_instr(b, v, nir_texop_txf_ms_mcs, @@ -267,25 +250,6 @@ blorp_nir_txf_ms_mcs(nir_builder *b, struct brw_blorp_blit_vars *v, nir_ssa_def return &tex->dest.ssa; } -static nir_ssa_def * -nir_mask_shift_or(struct nir_builder *b, nir_ssa_def *dst, nir_ssa_def *src, - uint32_t src_mask, int src_left_shift) -{ - nir_ssa_def *masked = nir_iand(b, src, nir_imm_int(b, src_mask)); - - nir_ssa_def *shifted; - if (src_left_shift > 0) { - shifted = nir_ishl(b, masked, nir_imm_int(b, src_left_shift)); - } else if (src_left_shift < 0) { - shifted = nir_ushr(b, masked, nir_imm_int(b, -src_left_shift)); - } else { - assert(src_left_shift == 0); - shifted = masked; - } - - return nir_ior(b, dst, shifted); -} - /** * Emit code to compensate for the difference between Y and W tiling. * @@ -589,15 +553,16 @@ static inline int count_trailing_one_bits(unsigned value) #ifdef HAVE___BUILTIN_CTZ return __builtin_ctz(~value); #else - return _mesa_bitcount(value & ~(value + 1)); + return util_bitcount(value & ~(value + 1)); #endif } static nir_ssa_def * -blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v, - nir_ssa_def *pos, unsigned tex_samples, - enum isl_aux_usage tex_aux_usage, - nir_alu_type dst_type) +blorp_nir_combine_samples(nir_builder *b, struct brw_blorp_blit_vars *v, + nir_ssa_def *pos, unsigned tex_samples, + enum isl_aux_usage tex_aux_usage, + nir_alu_type dst_type, + enum blorp_filter filter) { /* If non-null, this is the outer-most if statement */ nir_if *outer_if = NULL; @@ -606,8 +571,37 @@ blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v, nir_local_variable_create(b->impl, glsl_vec4_type(), "color"); nir_ssa_def *mcs = NULL; - if (tex_aux_usage == ISL_AUX_USAGE_MCS) - mcs = blorp_nir_txf_ms_mcs(b, v, pos); + if (isl_aux_usage_has_mcs(tex_aux_usage)) + mcs = blorp_blit_txf_ms_mcs(b, v, pos); + + nir_op combine_op; + switch (filter) { + case BLORP_FILTER_AVERAGE: + assert(dst_type == nir_type_float); + combine_op = nir_op_fadd; + break; + + case BLORP_FILTER_MIN_SAMPLE: + switch (dst_type) { + case nir_type_int: combine_op = nir_op_imin; break; + case nir_type_uint: combine_op = nir_op_umin; break; + case nir_type_float: combine_op = nir_op_fmin; break; + default: unreachable("Invalid dst_type"); + } + break; + + case BLORP_FILTER_MAX_SAMPLE: + switch (dst_type) { + case nir_type_int: combine_op = nir_op_imax; break; + case nir_type_uint: combine_op = nir_op_umax; break; + case nir_type_float: combine_op = nir_op_fmax; break; + default: unreachable("Invalid dst_type"); + } + break; + + default: + unreachable("Invalid filter"); + } /* We add together samples using a binary tree structure, e.g. for 4x MSAA: * @@ -641,7 +635,7 @@ blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v, nir_ssa_def *texture_data[5]; unsigned stack_depth = 0; for (unsigned i = 0; i < tex_samples; ++i) { - assert(stack_depth == _mesa_bitcount(i)); /* Loop invariant */ + assert(stack_depth == util_bitcount(i)); /* Loop invariant */ /* Push sample i onto the stack */ assert(stack_depth < ARRAY_SIZE(texture_data)); @@ -651,7 +645,7 @@ blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v, nir_imm_int(b, i)); texture_data[stack_depth++] = blorp_nir_txf_ms(b, v, ms_pos, mcs, dst_type); - if (i == 0 && tex_aux_usage == ISL_AUX_USAGE_MCS) { + if (i == 0 && isl_aux_usage_has_mcs(tex_aux_usage)) { /* The Ivy Bridge PRM, Vol4 Part1 p27 (Multisample Control Surface) * suggests an optimization: * @@ -665,6 +659,11 @@ blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v, * ld2dms are equivalent (since all samples are on sample slice 0). * Since we have already sampled from sample 0, all we need to do is * skip the remaining fetches and averaging if MCS is zero. + * + * It's also trivial to detect when the MCS has the magic clear color + * value. In this case, the txf we did on sample 0 will return the + * clear color and we can skip the remaining fetches just like we do + * when MCS == 0. */ nir_ssa_def *mcs_zero = nir_ieq(b, nir_channel(b, mcs, 0), nir_imm_int(b, 0)); @@ -672,9 +671,11 @@ blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v, mcs_zero = nir_iand(b, mcs_zero, nir_ieq(b, nir_channel(b, mcs, 1), nir_imm_int(b, 0))); } + nir_ssa_def *mcs_clear = + blorp_nir_mcs_is_clear_color(b, mcs, tex_samples); nir_if *if_stmt = nir_if_create(b->shader); - if_stmt->condition = nir_src_for_ssa(mcs_zero); + if_stmt->condition = nir_src_for_ssa(nir_ior(b, mcs_zero, mcs_clear)); nir_cf_node_insert(b->cursor, &if_stmt->cf_node); b->cursor = nir_after_cf_list(&if_stmt->then_list); @@ -688,18 +689,22 @@ blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v, assert(stack_depth >= 2); --stack_depth; - assert(dst_type == nir_type_float); texture_data[stack_depth - 1] = - nir_fadd(b, texture_data[stack_depth - 1], - texture_data[stack_depth]); + nir_build_alu(b, combine_op, + texture_data[stack_depth - 1], + texture_data[stack_depth], + NULL, NULL); } } /* We should have just 1 sample on the stack now. */ assert(stack_depth == 1); - texture_data[0] = nir_fmul(b, texture_data[0], - nir_imm_float(b, 1.0 / tex_samples)); + if (filter == BLORP_FILTER_AVERAGE) { + assert(dst_type == nir_type_float); + texture_data[0] = nir_fmul(b, texture_data[0], + nir_imm_float(b, 1.0 / tex_samples)); + } nir_store_var(b, color, texture_data[0], 0xf); @@ -709,18 +714,6 @@ blorp_nir_manual_blend_average(nir_builder *b, struct brw_blorp_blit_vars *v, return nir_load_var(b, color); } -static inline nir_ssa_def * -nir_imm_vec2(nir_builder *build, float x, float y) -{ - nir_const_value v; - - memset(&v, 0, sizeof(v)); - v.f32[0] = x; - v.f32[1] = y; - - return nir_build_imm(build, 4, 32, v); -} - static nir_ssa_def * blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos, unsigned tex_samples, @@ -760,7 +753,7 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos, nir_ssa_def *sample_off = nir_imm_vec2(b, sample_off_x, sample_off_y); nir_ssa_def *sample_coords = nir_fadd(b, pos_xy, sample_off); - nir_ssa_def *sample_coords_int = nir_f2i(b, sample_coords); + nir_ssa_def *sample_coords_int = nir_f2i32(b, sample_coords); /* The MCS value we fetch has to match up with the pixel that we're * sampling from. Since we sample from different pixels in each @@ -768,14 +761,22 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos, * here inside the loop after computing the pixel coordinates. */ nir_ssa_def *mcs = NULL; - if (key->tex_aux_usage == ISL_AUX_USAGE_MCS) - mcs = blorp_nir_txf_ms_mcs(b, v, sample_coords_int); + if (isl_aux_usage_has_mcs(key->tex_aux_usage)) + mcs = blorp_blit_txf_ms_mcs(b, v, sample_coords_int); /* Compute sample index and map the sample index to a sample number. * Sample index layout shows the numbering of slots in a rectangular * grid of samples with in a pixel. Sample number layout shows the * rectangular grid of samples roughly corresponding to the real sample * locations with in a pixel. + * + * In the case of 2x MSAA, the layout of sample indices is reversed from + * the layout of sample numbers: + * + * sample index layout : --------- sample number layout : --------- + * | 0 | 1 | | 1 | 0 | + * --------- --------- + * * In case of 4x MSAA, layout of sample indices matches the layout of * sample numbers: * --------- @@ -817,9 +818,11 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos, nir_ssa_def *sample = nir_fdot2(b, frac, nir_imm_vec2(b, key->x_scale, key->x_scale * key->y_scale)); - sample = nir_f2i(b, sample); + sample = nir_f2i32(b, sample); - if (tex_samples == 8) { + if (tex_samples == 2) { + sample = nir_isub(b, nir_imm_int(b, 1), sample); + } else if (tex_samples == 8) { sample = nir_iand(b, nir_ishr(b, nir_imm_int(b, 0x64210573), nir_ishl(b, sample, nir_imm_int(b, 2))), nir_imm_int(b, 0xf)); @@ -851,6 +854,176 @@ blorp_nir_manual_blend_bilinear(nir_builder *b, nir_ssa_def *pos, frac_y); } +/** Perform a color bit-cast operation + * + * For copy operations involving CCS, we may need to use different formats for + * the source and destination surfaces. The two formats must both be UINT + * formats and must have the same size but may have different bit layouts. + * For instance, we may be copying from R8G8B8A8_UINT to R32_UINT or R32_UINT + * to R16G16_UINT. This function generates code to shuffle bits around to get + * us from one to the other. + */ +static nir_ssa_def * +bit_cast_color(struct nir_builder *b, nir_ssa_def *color, + const struct brw_blorp_blit_prog_key *key) +{ + if (key->src_format == key->dst_format) + return color; + + const struct isl_format_layout *src_fmtl = + isl_format_get_layout(key->src_format); + const struct isl_format_layout *dst_fmtl = + isl_format_get_layout(key->dst_format); + + /* They must be formats with the same bit size */ + assert(src_fmtl->bpb == dst_fmtl->bpb); + + if (src_fmtl->bpb <= 32) { + assert(src_fmtl->channels.r.type == ISL_UINT || + src_fmtl->channels.r.type == ISL_UNORM); + assert(dst_fmtl->channels.r.type == ISL_UINT || + dst_fmtl->channels.r.type == ISL_UNORM); + + nir_ssa_def *packed = nir_imm_int(b, 0); + for (unsigned c = 0; c < 4; c++) { + if (src_fmtl->channels_array[c].bits == 0) + continue; + + const unsigned chan_start_bit = src_fmtl->channels_array[c].start_bit; + const unsigned chan_bits = src_fmtl->channels_array[c].bits; + + nir_ssa_def *chan = nir_channel(b, color, c); + if (src_fmtl->channels_array[c].type == ISL_UNORM) + chan = nir_format_float_to_unorm(b, chan, &chan_bits); + + packed = nir_ior(b, packed, nir_shift(b, chan, chan_start_bit)); + } + + nir_ssa_def *chans[4] = { }; + for (unsigned c = 0; c < 4; c++) { + if (dst_fmtl->channels_array[c].bits == 0) { + chans[c] = nir_imm_int(b, 0); + continue; + } + + const unsigned chan_start_bit = dst_fmtl->channels_array[c].start_bit; + const unsigned chan_bits = dst_fmtl->channels_array[c].bits; + chans[c] = nir_iand(b, nir_shift(b, packed, -(int)chan_start_bit), + nir_imm_int(b, BITFIELD_MASK(chan_bits))); + + if (dst_fmtl->channels_array[c].type == ISL_UNORM) + chans[c] = nir_format_unorm_to_float(b, chans[c], &chan_bits); + } + color = nir_vec(b, chans, 4); + } else { + /* This path only supports UINT formats */ + assert(src_fmtl->channels.r.type == ISL_UINT); + assert(dst_fmtl->channels.r.type == ISL_UINT); + + const unsigned src_bpc = src_fmtl->channels.r.bits; + const unsigned dst_bpc = dst_fmtl->channels.r.bits; + + assert(src_fmtl->channels.g.bits == 0 || + src_fmtl->channels.g.bits == src_fmtl->channels.r.bits); + assert(src_fmtl->channels.b.bits == 0 || + src_fmtl->channels.b.bits == src_fmtl->channels.r.bits); + assert(src_fmtl->channels.a.bits == 0 || + src_fmtl->channels.a.bits == src_fmtl->channels.r.bits); + assert(dst_fmtl->channels.g.bits == 0 || + dst_fmtl->channels.g.bits == dst_fmtl->channels.r.bits); + assert(dst_fmtl->channels.b.bits == 0 || + dst_fmtl->channels.b.bits == dst_fmtl->channels.r.bits); + assert(dst_fmtl->channels.a.bits == 0 || + dst_fmtl->channels.a.bits == dst_fmtl->channels.r.bits); + + /* Restrict to only the channels we actually have */ + const unsigned src_channels = + isl_format_get_num_channels(key->src_format); + color = nir_channels(b, color, (1 << src_channels) - 1); + + color = nir_format_bitcast_uvec_unmasked(b, color, src_bpc, dst_bpc); + } + + /* Blorp likes to assume that colors are vec4s */ + nir_ssa_def *u = nir_ssa_undef(b, 1, 32); + nir_ssa_def *chans[4] = { u, u, u, u }; + for (unsigned i = 0; i < color->num_components; i++) + chans[i] = nir_channel(b, color, i); + return nir_vec4(b, chans[0], chans[1], chans[2], chans[3]); +} + +static nir_ssa_def * +select_color_channel(struct nir_builder *b, nir_ssa_def *color, + nir_alu_type data_type, + enum isl_channel_select chan) +{ + if (chan == ISL_CHANNEL_SELECT_ZERO) { + return nir_imm_int(b, 0); + } else if (chan == ISL_CHANNEL_SELECT_ONE) { + switch (data_type) { + case nir_type_int: + case nir_type_uint: + return nir_imm_int(b, 1); + case nir_type_float: + return nir_imm_float(b, 1); + default: + unreachable("Invalid data type"); + } + } else { + assert((unsigned)(chan - ISL_CHANNEL_SELECT_RED) < 4); + return nir_channel(b, color, chan - ISL_CHANNEL_SELECT_RED); + } +} + +static nir_ssa_def * +swizzle_color(struct nir_builder *b, nir_ssa_def *color, + struct isl_swizzle swizzle, nir_alu_type data_type) +{ + return nir_vec4(b, + select_color_channel(b, color, data_type, swizzle.r), + select_color_channel(b, color, data_type, swizzle.g), + select_color_channel(b, color, data_type, swizzle.b), + select_color_channel(b, color, data_type, swizzle.a)); +} + +static nir_ssa_def * +convert_color(struct nir_builder *b, nir_ssa_def *color, + const struct brw_blorp_blit_prog_key *key) +{ + /* All of our color conversions end up generating a single-channel color + * value that we need to write out. + */ + nir_ssa_def *value; + + if (key->dst_format == ISL_FORMAT_R24_UNORM_X8_TYPELESS) { + /* The destination image is bound as R32_UINT but the data needs to be + * in R24_UNORM_X8_TYPELESS. The bottom 24 are the actual data and the + * top 8 need to be zero. We can accomplish this by simply multiplying + * by a factor to scale things down. + */ + unsigned factor = (1 << 24) - 1; + value = nir_fsat(b, nir_channel(b, color, 0)); + value = nir_f2i32(b, nir_fmul(b, value, nir_imm_float(b, factor))); + } else if (key->dst_format == ISL_FORMAT_L8_UNORM_SRGB) { + value = nir_format_linear_to_srgb(b, nir_channel(b, color, 0)); + } else if (key->dst_format == ISL_FORMAT_R8G8B8_UNORM_SRGB) { + value = nir_format_linear_to_srgb(b, color); + } else if (key->dst_format == ISL_FORMAT_R9G9B9E5_SHAREDEXP) { + value = nir_format_pack_r9g9b9e5(b, color); + } else { + unreachable("Unsupported format conversion"); + } + + nir_ssa_def *out_comps[4]; + for (unsigned i = 0; i < 4; i++) { + if (i < value->num_components) + out_comps[i] = nir_channel(b, value, i); + else + out_comps[i] = nir_ssa_undef(b, 1, 32); + } + return nir_vec(b, out_comps, 4); +} + /** * Generator for WM programs used in BLORP blits. * @@ -1008,18 +1181,6 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, void *mem_ctx, assert(key->persample_msaa_dispatch); } - if (key->blend) { - /* We are blending, which means we won't have an opportunity to - * translate the tiling and sample count for the texture surface. So - * the surface state for the texture must be configured with the correct - * tiling and sample count. - */ - assert(!key->src_tiled_w); - assert(key->tex_samples == key->src_samples); - assert(key->tex_layout == key->src_layout); - assert(key->tex_samples > 0); - } - if (key->persample_msaa_dispatch) { /* It only makes sense to do persample dispatch if the render target is * configured as multisampled. @@ -1038,7 +1199,7 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, void *mem_ctx, (key->dst_samples <= 1)); nir_builder b; - nir_builder_init_simple_shader(&b, mem_ctx, MESA_SHADER_FRAGMENT, NULL); + blorp_nir_init_shader(&b, mem_ctx, MESA_SHADER_FRAGMENT, NULL); struct brw_blorp_blit_vars v; brw_blorp_blit_vars_init(&b, &v, key); @@ -1073,6 +1234,20 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, void *mem_ctx, key->dst_layout); } + nir_ssa_def *comp = NULL; + if (key->dst_rgb) { + /* The destination image is bound as a red texture three times as wide + * as the actual image. Our shader is effectively running one color + * component at a time. We need to save off the component and adjust + * the destination position. + */ + assert(dst_pos->num_components == 2); + nir_ssa_def *dst_x = nir_channel(&b, dst_pos, 0); + comp = nir_umod(&b, dst_x, nir_imm_int(&b, 3)); + dst_pos = nir_vec2(&b, nir_idiv(&b, dst_x, nir_imm_int(&b, 3)), + nir_channel(&b, dst_pos, 1)); + } + /* Now (X, Y, S) = decode_msaa(dst_samples, detile(dst_tiling, offset)). * * That is: X, Y and S now contain the true coordinates and sample index of @@ -1081,12 +1256,10 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, void *mem_ctx, * If we need to kill pixels that are outside the destination rectangle, * now is the time to do it. */ - if (key->use_kill) { - assert(!(key->blend && key->blit_scaled)); + if (key->use_kill) blorp_nir_discard_if_outside_rect(&b, dst_pos, &v); - } - src_pos = blorp_blit_apply_transform(&b, nir_i2f(&b, dst_pos), &v); + src_pos = blorp_blit_apply_transform(&b, nir_i2f32(&b, dst_pos), &v); if (dst_pos->num_components == 3) { /* The sample coordinate is an integer that we want left alone but * blorp_blit_apply_transform() blindly applies the transform to all @@ -1107,11 +1280,88 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, void *mem_ctx, * that we want to texture from. Exception: if we are blending, then S is * irrelevant, because we are going to fetch all samples. */ - if (key->blend && !key->blit_scaled) { + switch (key->filter) { + case BLORP_FILTER_NONE: + case BLORP_FILTER_NEAREST: + case BLORP_FILTER_SAMPLE_0: + /* We're going to use texelFetch, so we need integers */ + if (src_pos->num_components == 2) { + src_pos = nir_f2i32(&b, src_pos); + } else { + assert(src_pos->num_components == 3); + src_pos = nir_vec3(&b, nir_channel(&b, nir_f2i32(&b, src_pos), 0), + nir_channel(&b, nir_f2i32(&b, src_pos), 1), + nir_channel(&b, src_pos, 2)); + } + + /* We aren't blending, which means we just want to fetch a single + * sample from the source surface. The address that we want to fetch + * from is related to the X, Y and S values according to the formula: + * + * (X, Y, S) = decode_msaa(src_samples, detile(src_tiling, offset)). + * + * If the actual tiling and sample count of the source surface are + * not the same as the configuration of the texture, then we need to + * adjust the coordinates to compensate for the difference. + */ + if (tex_tiled_w != key->src_tiled_w || + key->tex_samples != key->src_samples || + key->tex_layout != key->src_layout) { + src_pos = blorp_nir_encode_msaa(&b, src_pos, key->src_samples, + key->src_layout); + /* Now (X, Y, S) = detile(src_tiling, offset) */ + if (tex_tiled_w != key->src_tiled_w) + src_pos = blorp_nir_retile_w_to_y(&b, src_pos); + /* Now (X, Y, S) = detile(tex_tiling, offset) */ + src_pos = blorp_nir_decode_msaa(&b, src_pos, key->tex_samples, + key->tex_layout); + } + + if (key->need_src_offset) + src_pos = nir_iadd(&b, src_pos, nir_load_var(&b, v.v_src_offset)); + + /* Now (X, Y, S) = decode_msaa(tex_samples, detile(tex_tiling, offset)). + * + * In other words: X, Y, and S now contain values which, when passed to + * the texturing unit, will cause data to be read from the correct + * memory location. So we can fetch the texel now. + */ + if (key->src_samples == 1) { + color = blorp_nir_txf(&b, &v, src_pos, key->texture_data_type); + } else { + nir_ssa_def *mcs = NULL; + if (isl_aux_usage_has_mcs(key->tex_aux_usage)) + mcs = blorp_blit_txf_ms_mcs(&b, &v, src_pos); + + color = blorp_nir_txf_ms(&b, &v, src_pos, mcs, key->texture_data_type); + } + break; + + case BLORP_FILTER_BILINEAR: + assert(!key->src_tiled_w); + assert(key->tex_samples == key->src_samples); + assert(key->tex_layout == key->src_layout); + + if (key->src_samples == 1) { + color = blorp_nir_tex(&b, &v, key, src_pos); + } else { + assert(!key->use_kill); + color = blorp_nir_manual_blend_bilinear(&b, src_pos, key->src_samples, + key, &v); + } + break; + + case BLORP_FILTER_AVERAGE: + case BLORP_FILTER_MIN_SAMPLE: + case BLORP_FILTER_MAX_SAMPLE: + assert(!key->src_tiled_w); + assert(key->tex_samples == key->src_samples); + assert(key->tex_layout == key->src_layout); + /* Resolves (effecively) use texelFetch, so we need integers and we * don't care about the sample index if we got one. */ - src_pos = nir_f2i(&b, nir_channels(&b, src_pos, 0x3)); + src_pos = nir_f2i32(&b, nir_channels(&b, src_pos, 0x3)); if (devinfo->gen == 6) { /* Because gen6 only supports 4x interleved MSAA, we can do all the @@ -1121,75 +1371,64 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, void *mem_ctx, * representing the four samples that maxe up a pixel. So we need * to multiply our X and Y coordinates each by 2 and then add 1. */ - src_pos = nir_ishl(&b, src_pos, nir_imm_int(&b, 1)); - src_pos = nir_iadd(&b, src_pos, nir_imm_int(&b, 1)); - src_pos = nir_i2f(&b, src_pos); - color = blorp_nir_tex(&b, &v, src_pos, key->texture_data_type); + assert(key->src_coords_normalized); + assert(key->filter == BLORP_FILTER_AVERAGE); + src_pos = nir_fadd(&b, + nir_i2f32(&b, src_pos), + nir_imm_float(&b, 0.5f)); + color = blorp_nir_tex(&b, &v, key, src_pos); } else { /* Gen7+ hardware doesn't automaticaly blend. */ - color = blorp_nir_manual_blend_average(&b, &v, src_pos, key->src_samples, - key->tex_aux_usage, - key->texture_data_type); + color = blorp_nir_combine_samples(&b, &v, src_pos, key->src_samples, + key->tex_aux_usage, + key->texture_data_type, + key->filter); } - } else if (key->blend && key->blit_scaled) { - assert(!key->use_kill); - color = blorp_nir_manual_blend_bilinear(&b, src_pos, key->src_samples, key, &v); - } else { - if (key->bilinear_filter) { - color = blorp_nir_tex(&b, &v, src_pos, key->texture_data_type); - } else { - /* We're going to use texelFetch, so we need integers */ - if (src_pos->num_components == 2) { - src_pos = nir_f2i(&b, src_pos); - } else { - assert(src_pos->num_components == 3); - src_pos = nir_vec3(&b, nir_channel(&b, nir_f2i(&b, src_pos), 0), - nir_channel(&b, nir_f2i(&b, src_pos), 1), - nir_channel(&b, src_pos, 2)); - } + break; - /* We aren't blending, which means we just want to fetch a single - * sample from the source surface. The address that we want to fetch - * from is related to the X, Y and S values according to the formula: - * - * (X, Y, S) = decode_msaa(src_samples, detile(src_tiling, offset)). - * - * If the actual tiling and sample count of the source surface are - * not the same as the configuration of the texture, then we need to - * adjust the coordinates to compensate for the difference. - */ - if (tex_tiled_w != key->src_tiled_w || - key->tex_samples != key->src_samples || - key->tex_layout != key->src_layout) { - src_pos = blorp_nir_encode_msaa(&b, src_pos, key->src_samples, - key->src_layout); - /* Now (X, Y, S) = detile(src_tiling, offset) */ - if (tex_tiled_w != key->src_tiled_w) - src_pos = blorp_nir_retile_w_to_y(&b, src_pos); - /* Now (X, Y, S) = detile(tex_tiling, offset) */ - src_pos = blorp_nir_decode_msaa(&b, src_pos, key->tex_samples, - key->tex_layout); - } + default: + unreachable("Invalid blorp filter"); + } - if (key->need_src_offset) - src_pos = nir_iadd(&b, src_pos, nir_load_var(&b, v.v_src_offset)); + if (!isl_swizzle_is_identity(key->src_swizzle)) { + color = swizzle_color(&b, color, key->src_swizzle, + key->texture_data_type); + } - /* Now (X, Y, S) = decode_msaa(tex_samples, detile(tex_tiling, offset)). - * - * In other words: X, Y, and S now contain values which, when passed to - * the texturing unit, will cause data to be read from the correct - * memory location. So we can fetch the texel now. - */ - if (key->src_samples == 1) { - color = blorp_nir_txf(&b, &v, src_pos, key->texture_data_type); - } else { - nir_ssa_def *mcs = NULL; - if (key->tex_aux_usage == ISL_AUX_USAGE_MCS) - mcs = blorp_nir_txf_ms_mcs(&b, &v, src_pos); + if (!isl_swizzle_is_identity(key->dst_swizzle)) { + color = swizzle_color(&b, color, isl_swizzle_invert(key->dst_swizzle), + nir_type_int); + } - color = blorp_nir_txf_ms(&b, &v, src_pos, mcs, key->texture_data_type); - } - } + if (key->format_bit_cast) { + assert(isl_swizzle_is_identity(key->src_swizzle)); + assert(isl_swizzle_is_identity(key->dst_swizzle)); + color = bit_cast_color(&b, color, key); + } else if (key->dst_format) { + color = convert_color(&b, color, key); + } else if (key->uint32_to_sint) { + /* Normally the hardware will take care of converting values from/to + * the source and destination formats. But a few cases need help. + * + * The Skylake PRM, volume 07, page 658 has a programming note: + * + * "When using SINT or UINT rendertarget surface formats, Blending + * must be DISABLED. The Pre-Blend Color Clamp Enable and Color + * Clamp Range fields are ignored, and an implied clamp to the + * rendertarget surface format is performed." + * + * For UINT to SINT blits, our sample operation gives us a uint32_t, + * but our render target write expects a signed int32_t number. If we + * simply passed the value along, the hardware would interpret a value + * with bit 31 set as a negative value, clamping it to the largest + * negative number the destination format could represent. But the + * actual source value is a positive number, so we want to clamp it + * to INT_MAX. To fix this, we explicitly take min(color, INT_MAX). + */ + color = nir_umin(&b, color, nir_imm_int(&b, INT32_MAX)); + } else if (key->sint32_to_uint) { + /* Similar to above, but clamping negative numbers to zero. */ + color = nir_imax(&b, color, nir_imm_int(&b, 0)); } if (key->dst_rgb) { @@ -1199,8 +1438,6 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, void *mem_ctx, * from the source color and write that to destination red. */ assert(dst_pos->num_components == 2); - nir_ssa_def *comp = - nir_umod(&b, nir_channel(&b, dst_pos, 0), nir_imm_int(&b, 3)); nir_ssa_def *color_component = nir_bcsel(&b, nir_ieq(&b, comp, nir_imm_int(&b, 0)), @@ -1213,43 +1450,69 @@ brw_blorp_build_nir_shader(struct blorp_context *blorp, void *mem_ctx, color = nir_vec4(&b, color_component, u, u, u); } - nir_store_var(&b, v.color_out, color, 0xf); + if (key->dst_usage == ISL_SURF_USAGE_RENDER_TARGET_BIT) { + nir_variable *color_out = + nir_variable_create(b.shader, nir_var_shader_out, + glsl_vec4_type(), "gl_FragColor"); + color_out->data.location = FRAG_RESULT_COLOR; + nir_store_var(&b, color_out, color, 0xf); + } else if (key->dst_usage == ISL_SURF_USAGE_DEPTH_BIT) { + nir_variable *depth_out = + nir_variable_create(b.shader, nir_var_shader_out, + glsl_float_type(), "gl_FragDepth"); + depth_out->data.location = FRAG_RESULT_DEPTH; + nir_store_var(&b, depth_out, nir_channel(&b, color, 0), 0x1); + } else if (key->dst_usage == ISL_SURF_USAGE_STENCIL_BIT) { + nir_variable *stencil_out = + nir_variable_create(b.shader, nir_var_shader_out, + glsl_int_type(), "gl_FragStencilRef"); + stencil_out->data.location = FRAG_RESULT_STENCIL; + nir_store_var(&b, stencil_out, nir_channel(&b, color, 0), 0x1); + } else { + unreachable("Invalid destination usage"); + } return b.shader; } -static void -brw_blorp_get_blit_kernel(struct blorp_context *blorp, +static bool +brw_blorp_get_blit_kernel(struct blorp_batch *batch, struct blorp_params *params, const struct brw_blorp_blit_prog_key *prog_key) { - if (blorp->lookup_shader(blorp, prog_key, sizeof(*prog_key), + struct blorp_context *blorp = batch->blorp; + + if (blorp->lookup_shader(batch, prog_key, sizeof(*prog_key), ¶ms->wm_prog_kernel, ¶ms->wm_prog_data)) - return; + return true; void *mem_ctx = ralloc_context(NULL); const unsigned *program; - unsigned program_size; struct brw_wm_prog_data prog_data; nir_shader *nir = brw_blorp_build_nir_shader(blorp, mem_ctx, prog_key); + nir->info.name = ralloc_strdup(nir, "BLORP-blit"); + struct brw_wm_prog_key wm_key; brw_blorp_init_wm_prog_key(&wm_key); - wm_key.tex.compressed_multisample_layout_mask = - prog_key->tex_aux_usage == ISL_AUX_USAGE_MCS; - wm_key.tex.msaa_16 = prog_key->tex_samples == 16; + wm_key.base.tex.compressed_multisample_layout_mask = + isl_aux_usage_has_mcs(prog_key->tex_aux_usage); + wm_key.base.tex.msaa_16 = prog_key->tex_samples == 16; wm_key.multisample_fbo = prog_key->rt_samples > 1; program = blorp_compile_fs(blorp, mem_ctx, nir, &wm_key, false, - &prog_data, &program_size); + &prog_data); - blorp->upload_shader(blorp, prog_key, sizeof(*prog_key), - program, program_size, - &prog_data.base, sizeof(prog_data), - ¶ms->wm_prog_kernel, ¶ms->wm_prog_data); + bool result = + blorp->upload_shader(batch, MESA_SHADER_FRAGMENT, + prog_key, sizeof(*prog_key), + program, prog_data.base.program_size, + &prog_data.base, sizeof(prog_data), + ¶ms->wm_prog_kernel, ¶ms->wm_prog_data); ralloc_free(mem_ctx); + return result; } static void @@ -1300,10 +1563,15 @@ surf_get_intratile_offset_px(struct brw_blorp_surface_info *info, } } -static void -surf_convert_to_single_slice(const struct isl_device *isl_dev, - struct brw_blorp_surface_info *info) +void +blorp_surf_convert_to_single_slice(const struct isl_device *isl_dev, + struct brw_blorp_surface_info *info) { + bool ok UNUSED; + + /* It would be insane to try and do this on a compressed surface */ + assert(info->aux_usage == ISL_AUX_USAGE_NONE); + /* Just bail if we have nothing to do. */ if (info->surf.dim == ISL_SURF_DIM_2D && info->view.base_level == 0 && info->view.base_array_layer == 0 && @@ -1321,42 +1589,25 @@ surf_convert_to_single_slice(const struct isl_device *isl_dev, else layer = info->view.base_array_layer; - uint32_t x_offset_sa, y_offset_sa; - isl_surf_get_image_offset_sa(&info->surf, info->view.base_level, - layer, z, &x_offset_sa, &y_offset_sa); - uint32_t byte_offset; - isl_tiling_get_intratile_offset_sa(isl_dev, info->surf.tiling, - info->surf.format, info->surf.row_pitch, - x_offset_sa, y_offset_sa, - &byte_offset, - &info->tile_x_sa, &info->tile_y_sa); + isl_surf_get_image_surf(isl_dev, &info->surf, + info->view.base_level, layer, z, + &info->surf, + &byte_offset, &info->tile_x_sa, &info->tile_y_sa); info->addr.offset += byte_offset; - const uint32_t slice_width_px = - minify(info->surf.logical_level0_px.width, info->view.base_level); - const uint32_t slice_height_px = - minify(info->surf.logical_level0_px.height, info->view.base_level); - uint32_t tile_x_px, tile_y_px; surf_get_intratile_offset_px(info, &tile_x_px, &tile_y_px); - struct isl_surf_init_info init_info = { - .dim = ISL_SURF_DIM_2D, - .format = info->surf.format, - .width = slice_width_px + tile_x_px, - .height = slice_height_px + tile_y_px, - .depth = 1, - .levels = 1, - .array_len = 1, - .samples = info->surf.samples, - .min_pitch = info->surf.row_pitch, - .usage = info->surf.usage, - .tiling_flags = 1 << info->surf.tiling, - }; - - isl_surf_init_s(isl_dev, &info->surf, &init_info); - assert(info->surf.row_pitch == init_info.min_pitch); + /* Instead of using the X/Y Offset fields in RENDER_SURFACE_STATE, we place + * the image at the tile boundary and offset our sampling or rendering. + * For this reason, we need to grow the image by the offset to ensure that + * the hardware doesn't think we've gone past the edge. + */ + info->surf.logical_level0_px.w += tile_x_px; + info->surf.logical_level0_px.h += tile_y_px; + info->surf.phys_level0_sa.w += info->tile_x_sa; + info->surf.phys_level0_sa.h += info->tile_y_sa; /* The view is also different now. */ info->view.base_level = 0; @@ -1366,28 +1617,28 @@ surf_convert_to_single_slice(const struct isl_device *isl_dev, info->z_offset = 0; } -static void -surf_fake_interleaved_msaa(const struct isl_device *isl_dev, - struct brw_blorp_surface_info *info) +void +blorp_surf_fake_interleaved_msaa(const struct isl_device *isl_dev, + struct brw_blorp_surface_info *info) { assert(info->surf.msaa_layout == ISL_MSAA_LAYOUT_INTERLEAVED); /* First, we need to convert it to a simple 1-level 1-layer 2-D surface */ - surf_convert_to_single_slice(isl_dev, info); + blorp_surf_convert_to_single_slice(isl_dev, info); info->surf.logical_level0_px = info->surf.phys_level0_sa; info->surf.samples = 1; info->surf.msaa_layout = ISL_MSAA_LAYOUT_NONE; } -static void -surf_retile_w_to_y(const struct isl_device *isl_dev, - struct brw_blorp_surface_info *info) +void +blorp_surf_retile_w_to_y(const struct isl_device *isl_dev, + struct brw_blorp_surface_info *info) { assert(info->surf.tiling == ISL_TILING_W); /* First, we need to convert it to a simple 1-level 1-layer 2-D surface */ - surf_convert_to_single_slice(isl_dev, info); + blorp_surf_convert_to_single_slice(isl_dev, info); /* On gen7+, we don't have interleaved multisampling for color render * targets so we have to fake it. @@ -1396,7 +1647,7 @@ surf_retile_w_to_y(const struct isl_device *isl_dev, */ if (isl_dev->info->gen > 6 && info->surf.msaa_layout == ISL_MSAA_LAYOUT_INTERLEAVED) { - surf_fake_interleaved_msaa(isl_dev, info); + blorp_surf_fake_interleaved_msaa(isl_dev, info); } if (isl_dev->info->gen == 6) { @@ -1421,18 +1672,165 @@ surf_retile_w_to_y(const struct isl_device *isl_dev, info->tile_y_sa /= 2; } -static void -do_blorp_blit(struct blorp_batch *batch, - struct blorp_params *params, - struct brw_blorp_blit_prog_key *wm_prog_key, - float src_x0, float src_y0, - float src_x1, float src_y1, - float dst_x0, float dst_y0, - float dst_x1, float dst_y1, - bool mirror_x, bool mirror_y) +static bool +can_shrink_surface(const struct brw_blorp_surface_info *surf) +{ + /* The current code doesn't support offsets into the aux buffers. This + * should be possible, but we need to make sure the offset is page + * aligned for both the surface and the aux buffer surface. Generally + * this mean using the page aligned offset for the aux buffer. + * + * Currently the cases where we must split the blit are limited to cases + * where we don't have a aux buffer. + */ + if (surf->aux_addr.buffer != NULL) + return false; + + /* We can't support splitting the blit for gen <= 7, because the qpitch + * size is calculated by the hardware based on the surface height for + * gen <= 7. In gen >= 8, the qpitch is controlled by the driver. + */ + if (surf->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY) + return false; + + return true; +} + +static unsigned +get_max_surface_size(const struct gen_device_info *devinfo, + const struct brw_blorp_surface_info *surf) +{ + const unsigned max = devinfo->gen >= 7 ? 16384 : 8192; + if (split_blorp_blit_debug && can_shrink_surface(surf)) + return max >> 4; /* A smaller restriction when debug is enabled */ + else + return max; +} + +struct blt_axis { + double src0, src1, dst0, dst1; + bool mirror; +}; + +struct blt_coords { + struct blt_axis x, y; +}; + +static enum isl_format +get_red_format_for_rgb_format(enum isl_format format) +{ + const struct isl_format_layout *fmtl = isl_format_get_layout(format); + + switch (fmtl->channels.r.bits) { + case 8: + switch (fmtl->channels.r.type) { + case ISL_UNORM: + return ISL_FORMAT_R8_UNORM; + case ISL_SNORM: + return ISL_FORMAT_R8_SNORM; + case ISL_UINT: + return ISL_FORMAT_R8_UINT; + case ISL_SINT: + return ISL_FORMAT_R8_SINT; + default: + unreachable("Invalid 8-bit RGB channel type"); + } + case 16: + switch (fmtl->channels.r.type) { + case ISL_UNORM: + return ISL_FORMAT_R16_UNORM; + case ISL_SNORM: + return ISL_FORMAT_R16_SNORM; + case ISL_SFLOAT: + return ISL_FORMAT_R16_FLOAT; + case ISL_UINT: + return ISL_FORMAT_R16_UINT; + case ISL_SINT: + return ISL_FORMAT_R16_SINT; + default: + unreachable("Invalid 8-bit RGB channel type"); + } + case 32: + switch (fmtl->channels.r.type) { + case ISL_SFLOAT: + return ISL_FORMAT_R32_FLOAT; + case ISL_UINT: + return ISL_FORMAT_R32_UINT; + case ISL_SINT: + return ISL_FORMAT_R32_SINT; + default: + unreachable("Invalid 8-bit RGB channel type"); + } + default: + unreachable("Invalid number of red channel bits"); + } +} + +void +surf_fake_rgb_with_red(const struct isl_device *isl_dev, + struct brw_blorp_surface_info *info) +{ + blorp_surf_convert_to_single_slice(isl_dev, info); + + info->surf.logical_level0_px.width *= 3; + info->surf.phys_level0_sa.width *= 3; + info->tile_x_sa *= 3; + + enum isl_format red_format = + get_red_format_for_rgb_format(info->view.format); + + assert(isl_format_get_layout(red_format)->channels.r.type == + isl_format_get_layout(info->view.format)->channels.r.type); + assert(isl_format_get_layout(red_format)->channels.r.bits == + isl_format_get_layout(info->view.format)->channels.r.bits); + + info->surf.format = info->view.format = red_format; +} + +enum blit_shrink_status { + BLIT_NO_SHRINK = 0, + BLIT_SRC_WIDTH_SHRINK = (1 << 0), + BLIT_DST_WIDTH_SHRINK = (1 << 1), + BLIT_SRC_HEIGHT_SHRINK = (1 << 2), + BLIT_DST_HEIGHT_SHRINK = (1 << 3), +}; + +/* Try to blit. If the surface parameters exceed the size allowed by hardware, + * then enum blit_shrink_status will be returned. If BLIT_NO_SHRINK is + * returned, then the blit was successful. + */ +static enum blit_shrink_status +try_blorp_blit(struct blorp_batch *batch, + struct blorp_params *params, + struct brw_blorp_blit_prog_key *wm_prog_key, + struct blt_coords *coords) { const struct gen_device_info *devinfo = batch->blorp->isl_dev->info; + if (params->dst.surf.usage & ISL_SURF_USAGE_DEPTH_BIT) { + if (devinfo->gen >= 7) { + /* We can render as depth on Gen5 but there's no real advantage since + * it doesn't support MSAA or HiZ. On Gen4, we can't always render + * to depth due to issues with depth buffers and mip-mapping. On + * Gen6, we can do everything but we have weird offsetting for HiZ + * and stencil. It's easier to just render using the color pipe + * on those platforms. + */ + wm_prog_key->dst_usage = ISL_SURF_USAGE_DEPTH_BIT; + } else { + wm_prog_key->dst_usage = ISL_SURF_USAGE_RENDER_TARGET_BIT; + } + } else if (params->dst.surf.usage & ISL_SURF_USAGE_STENCIL_BIT) { + assert(params->dst.surf.format == ISL_FORMAT_R8_UINT); + if (devinfo->gen >= 9) { + wm_prog_key->dst_usage = ISL_SURF_USAGE_STENCIL_BIT; + } else { + wm_prog_key->dst_usage = ISL_SURF_USAGE_RENDER_TARGET_BIT; + } + } else { + wm_prog_key->dst_usage = ISL_SURF_USAGE_RENDER_TARGET_BIT; + } + if (isl_format_has_sint_channel(params->src.view.format)) { wm_prog_key->texture_data_type = nir_type_int; } else if (isl_format_has_uint_channel(params->src.view.format)) { @@ -1456,17 +1854,38 @@ do_blorp_blit(struct blorp_batch *batch, /* Round floating point values to nearest integer to avoid "off by one texel" * kind of errors when blitting. */ - params->x0 = params->wm_inputs.discard_rect.x0 = roundf(dst_x0); - params->y0 = params->wm_inputs.discard_rect.y0 = roundf(dst_y0); - params->x1 = params->wm_inputs.discard_rect.x1 = roundf(dst_x1); - params->y1 = params->wm_inputs.discard_rect.y1 = roundf(dst_y1); + params->x0 = params->wm_inputs.discard_rect.x0 = round(coords->x.dst0); + params->y0 = params->wm_inputs.discard_rect.y0 = round(coords->y.dst0); + params->x1 = params->wm_inputs.discard_rect.x1 = round(coords->x.dst1); + params->y1 = params->wm_inputs.discard_rect.y1 = round(coords->y.dst1); brw_blorp_setup_coord_transform(¶ms->wm_inputs.coord_transform[0], - src_x0, src_x1, dst_x0, dst_x1, mirror_x); + coords->x.src0, coords->x.src1, + coords->x.dst0, coords->x.dst1, + coords->x.mirror); brw_blorp_setup_coord_transform(¶ms->wm_inputs.coord_transform[1], - src_y0, src_y1, dst_y0, dst_y1, mirror_y); + coords->y.src0, coords->y.src1, + coords->y.dst0, coords->y.dst1, + coords->y.mirror); + + + if (devinfo->gen == 4) { + /* The MinLOD and MinimumArrayElement don't work properly for cube maps. + * Convert them to a single slice on gen4. + */ + if (params->dst.surf.usage & ISL_SURF_USAGE_CUBE_BIT) { + blorp_surf_convert_to_single_slice(batch->blorp->isl_dev, ¶ms->dst); + wm_prog_key->need_dst_offset = true; + } + + if (params->src.surf.usage & ISL_SURF_USAGE_CUBE_BIT) { + blorp_surf_convert_to_single_slice(batch->blorp->isl_dev, ¶ms->src); + wm_prog_key->need_src_offset = true; + } + } if (devinfo->gen > 6 && + !isl_surf_usage_is_depth_or_stencil(wm_prog_key->dst_usage) && params->dst.surf.msaa_layout == ISL_MSAA_LAYOUT_INTERLEAVED) { assert(params->dst.surf.samples > 1); @@ -1489,13 +1908,14 @@ do_blorp_blit(struct blorp_batch *batch, params->x1 = ALIGN(params->x1, 2) * px_size_sa.width; params->y1 = ALIGN(params->y1, 2) * px_size_sa.height; - surf_fake_interleaved_msaa(batch->blorp->isl_dev, ¶ms->dst); + blorp_surf_fake_interleaved_msaa(batch->blorp->isl_dev, ¶ms->dst); wm_prog_key->use_kill = true; wm_prog_key->need_dst_offset = true; } - if (params->dst.surf.tiling == ISL_TILING_W) { + if (params->dst.surf.tiling == ISL_TILING_W && + wm_prog_key->dst_usage != ISL_SURF_USAGE_STENCIL_BIT) { /* We must modify the rectangle we send through the rendering pipeline * (and the size and x/y offset of the destination surface), to account * for the fact that we are mapping it as Y-tiled when it is in fact @@ -1550,7 +1970,7 @@ do_blorp_blit(struct blorp_batch *batch, params->y1 = ALIGN(params->y1, y_align) / 2; /* Retile the surface to Y-tiled */ - surf_retile_w_to_y(batch->blorp->isl_dev, ¶ms->dst); + blorp_surf_retile_w_to_y(batch->blorp->isl_dev, ¶ms->dst); wm_prog_key->dst_tiled_w = true; wm_prog_key->use_kill = true; @@ -1576,7 +1996,7 @@ do_blorp_blit(struct blorp_batch *batch, * * TODO: what if this makes the texture size too large? */ - surf_retile_w_to_y(batch->blorp->isl_dev, ¶ms->src); + blorp_surf_retile_w_to_y(batch->blorp->isl_dev, ¶ms->src); wm_prog_key->src_tiled_w = true; wm_prog_key->need_src_offset = true; @@ -1605,6 +2025,72 @@ do_blorp_blit(struct blorp_batch *batch, params->num_samples = params->dst.surf.samples; + if ((wm_prog_key->filter == BLORP_FILTER_AVERAGE || + wm_prog_key->filter == BLORP_FILTER_BILINEAR) && + batch->blorp->isl_dev->info->gen <= 6) { + /* Gen4-5 don't support non-normalized texture coordinates */ + wm_prog_key->src_coords_normalized = true; + params->wm_inputs.src_inv_size[0] = + 1.0f / minify(params->src.surf.logical_level0_px.width, + params->src.view.base_level); + params->wm_inputs.src_inv_size[1] = + 1.0f / minify(params->src.surf.logical_level0_px.height, + params->src.view.base_level); + } + + if (isl_format_get_layout(params->dst.view.format)->bpb % 3 == 0) { + /* We can't render to RGB formats natively because they aren't a + * power-of-two size. Instead, we fake them by using a red format + * with the same channel type and size and emitting shader code to + * only write one channel at a time. + */ + params->x0 *= 3; + params->x1 *= 3; + + /* If it happens to be sRGB, we need to force a conversion */ + if (params->dst.view.format == ISL_FORMAT_R8G8B8_UNORM_SRGB) + wm_prog_key->dst_format = ISL_FORMAT_R8G8B8_UNORM_SRGB; + + surf_fake_rgb_with_red(batch->blorp->isl_dev, ¶ms->dst); + + wm_prog_key->dst_rgb = true; + wm_prog_key->need_dst_offset = true; + } else if (isl_format_is_rgbx(params->dst.view.format)) { + /* We can handle RGBX formats easily enough by treating them as RGBA */ + params->dst.view.format = + isl_format_rgbx_to_rgba(params->dst.view.format); + } else if (params->dst.view.format == ISL_FORMAT_R24_UNORM_X8_TYPELESS && + wm_prog_key->dst_usage != ISL_SURF_USAGE_DEPTH_BIT) { + wm_prog_key->dst_format = params->dst.view.format; + params->dst.view.format = ISL_FORMAT_R32_UINT; + } else if (params->dst.view.format == ISL_FORMAT_A4B4G4R4_UNORM) { + params->dst.view.swizzle = + isl_swizzle_compose(params->dst.view.swizzle, + ISL_SWIZZLE(ALPHA, RED, GREEN, BLUE)); + params->dst.view.format = ISL_FORMAT_B4G4R4A4_UNORM; + } else if (params->dst.view.format == ISL_FORMAT_L8_UNORM_SRGB) { + wm_prog_key->dst_format = params->dst.view.format; + params->dst.view.format = ISL_FORMAT_R8_UNORM; + } else if (params->dst.view.format == ISL_FORMAT_R9G9B9E5_SHAREDEXP) { + wm_prog_key->dst_format = params->dst.view.format; + params->dst.view.format = ISL_FORMAT_R32_UINT; + } + + if (devinfo->gen <= 7 && !devinfo->is_haswell && + !isl_swizzle_is_identity(params->src.view.swizzle)) { + wm_prog_key->src_swizzle = params->src.view.swizzle; + params->src.view.swizzle = ISL_SWIZZLE_IDENTITY; + } else { + wm_prog_key->src_swizzle = ISL_SWIZZLE_IDENTITY; + } + + if (!isl_swizzle_supports_rendering(devinfo, params->dst.view.swizzle)) { + wm_prog_key->dst_swizzle = params->dst.view.swizzle; + params->dst.view.swizzle = ISL_SWIZZLE_IDENTITY; + } else { + wm_prog_key->dst_swizzle = ISL_SWIZZLE_IDENTITY; + } + if (params->src.tile_x_sa || params->src.tile_y_sa) { assert(wm_prog_key->need_src_offset); surf_get_intratile_offset_px(¶ms->src, @@ -1626,9 +2112,208 @@ do_blorp_blit(struct blorp_batch *batch, /* For some texture types, we need to pass the layer through the sampler. */ params->wm_inputs.src_z = params->src.z_offset; - brw_blorp_get_blit_kernel(batch->blorp, params, wm_prog_key); + if (!brw_blorp_get_blit_kernel(batch, params, wm_prog_key)) + return 0; + + if (!blorp_ensure_sf_program(batch, params)) + return 0; + + unsigned result = 0; + unsigned max_src_surface_size = get_max_surface_size(devinfo, ¶ms->src); + if (params->src.surf.logical_level0_px.width > max_src_surface_size) + result |= BLIT_SRC_WIDTH_SHRINK; + if (params->src.surf.logical_level0_px.height > max_src_surface_size) + result |= BLIT_SRC_HEIGHT_SHRINK; + + unsigned max_dst_surface_size = get_max_surface_size(devinfo, ¶ms->dst); + if (params->dst.surf.logical_level0_px.width > max_dst_surface_size) + result |= BLIT_DST_WIDTH_SHRINK; + if (params->dst.surf.logical_level0_px.height > max_dst_surface_size) + result |= BLIT_DST_HEIGHT_SHRINK; + + if (result == 0) { + if (wm_prog_key->dst_usage == ISL_SURF_USAGE_DEPTH_BIT) { + params->depth = params->dst; + memset(¶ms->dst, 0, sizeof(params->dst)); + } else if (wm_prog_key->dst_usage == ISL_SURF_USAGE_STENCIL_BIT) { + params->stencil = params->dst; + params->stencil_mask = 0xff; + memset(¶ms->dst, 0, sizeof(params->dst)); + } - batch->blorp->exec(batch, params); + batch->blorp->exec(batch, params); + } + + return result; +} + +/* Adjust split blit source coordinates for the current destination + * coordinates. + */ +static void +adjust_split_source_coords(const struct blt_axis *orig, + struct blt_axis *split_coords, + double scale) +{ + /* When scale is greater than 0, then we are growing from the start, so + * src0 uses delta0, and src1 uses delta1. When scale is less than 0, the + * source range shrinks from the end. In that case src0 is adjusted by + * delta1, and src1 is adjusted by delta0. + */ + double delta0 = scale * (split_coords->dst0 - orig->dst0); + double delta1 = scale * (split_coords->dst1 - orig->dst1); + split_coords->src0 = orig->src0 + (scale >= 0.0 ? delta0 : delta1); + split_coords->src1 = orig->src1 + (scale >= 0.0 ? delta1 : delta0); +} + +static struct isl_extent2d +get_px_size_sa(const struct isl_surf *surf) +{ + static const struct isl_extent2d one_to_one = { .w = 1, .h = 1 }; + + if (surf->msaa_layout != ISL_MSAA_LAYOUT_INTERLEAVED) + return one_to_one; + else + return isl_get_interleaved_msaa_px_size_sa(surf->samples); +} + +static void +shrink_surface_params(const struct isl_device *dev, + struct brw_blorp_surface_info *info, + double *x0, double *x1, double *y0, double *y1) +{ + uint32_t byte_offset, x_offset_sa, y_offset_sa, size; + struct isl_extent2d px_size_sa; + int adjust; + + blorp_surf_convert_to_single_slice(dev, info); + + px_size_sa = get_px_size_sa(&info->surf); + + /* Because this gets called after we lower compressed images, the tile + * offsets may be non-zero and we need to incorporate them in our + * calculations. + */ + x_offset_sa = (uint32_t)*x0 * px_size_sa.w + info->tile_x_sa; + y_offset_sa = (uint32_t)*y0 * px_size_sa.h + info->tile_y_sa; + isl_tiling_get_intratile_offset_sa(info->surf.tiling, + info->surf.format, info->surf.row_pitch_B, + x_offset_sa, y_offset_sa, + &byte_offset, + &info->tile_x_sa, &info->tile_y_sa); + + info->addr.offset += byte_offset; + + adjust = (int)info->tile_x_sa / px_size_sa.w - (int)*x0; + *x0 += adjust; + *x1 += adjust; + info->tile_x_sa = 0; + + adjust = (int)info->tile_y_sa / px_size_sa.h - (int)*y0; + *y0 += adjust; + *y1 += adjust; + info->tile_y_sa = 0; + + size = MIN2((uint32_t)ceil(*x1), info->surf.logical_level0_px.width); + info->surf.logical_level0_px.width = size; + info->surf.phys_level0_sa.width = size * px_size_sa.w; + + size = MIN2((uint32_t)ceil(*y1), info->surf.logical_level0_px.height); + info->surf.logical_level0_px.height = size; + info->surf.phys_level0_sa.height = size * px_size_sa.h; +} + +static void +do_blorp_blit(struct blorp_batch *batch, + const struct blorp_params *orig_params, + struct brw_blorp_blit_prog_key *wm_prog_key, + const struct blt_coords *orig) +{ + struct blorp_params params; + struct blt_coords blit_coords; + struct blt_coords split_coords = *orig; + double w = orig->x.dst1 - orig->x.dst0; + double h = orig->y.dst1 - orig->y.dst0; + double x_scale = (orig->x.src1 - orig->x.src0) / w; + double y_scale = (orig->y.src1 - orig->y.src0) / h; + if (orig->x.mirror) + x_scale = -x_scale; + if (orig->y.mirror) + y_scale = -y_scale; + + enum blit_shrink_status shrink = BLIT_NO_SHRINK; + if (split_blorp_blit_debug) { + if (can_shrink_surface(&orig_params->src)) + shrink |= BLIT_SRC_WIDTH_SHRINK | BLIT_SRC_HEIGHT_SHRINK; + if (can_shrink_surface(&orig_params->dst)) + shrink |= BLIT_DST_WIDTH_SHRINK | BLIT_DST_HEIGHT_SHRINK; + } + + bool x_done, y_done; + do { + params = *orig_params; + blit_coords = split_coords; + + if (shrink & (BLIT_SRC_WIDTH_SHRINK | BLIT_SRC_HEIGHT_SHRINK)) { + shrink_surface_params(batch->blorp->isl_dev, ¶ms.src, + &blit_coords.x.src0, &blit_coords.x.src1, + &blit_coords.y.src0, &blit_coords.y.src1); + wm_prog_key->need_src_offset = false; + } + + if (shrink & (BLIT_DST_WIDTH_SHRINK | BLIT_DST_HEIGHT_SHRINK)) { + shrink_surface_params(batch->blorp->isl_dev, ¶ms.dst, + &blit_coords.x.dst0, &blit_coords.x.dst1, + &blit_coords.y.dst0, &blit_coords.y.dst1); + wm_prog_key->need_dst_offset = false; + } + + enum blit_shrink_status result = + try_blorp_blit(batch, ¶ms, wm_prog_key, &blit_coords); + + if (result & (BLIT_SRC_WIDTH_SHRINK | BLIT_SRC_HEIGHT_SHRINK)) + assert(can_shrink_surface(&orig_params->src)); + + if (result & (BLIT_DST_WIDTH_SHRINK | BLIT_DST_HEIGHT_SHRINK)) + assert(can_shrink_surface(&orig_params->dst)); + + if (result & (BLIT_SRC_WIDTH_SHRINK | BLIT_DST_WIDTH_SHRINK)) { + w /= 2.0; + assert(w >= 1.0); + split_coords.x.dst1 = MIN2(split_coords.x.dst0 + w, orig->x.dst1); + adjust_split_source_coords(&orig->x, &split_coords.x, x_scale); + } + if (result & (BLIT_SRC_HEIGHT_SHRINK | BLIT_DST_HEIGHT_SHRINK)) { + h /= 2.0; + assert(h >= 1.0); + split_coords.y.dst1 = MIN2(split_coords.y.dst0 + h, orig->y.dst1); + adjust_split_source_coords(&orig->y, &split_coords.y, y_scale); + } + + if (result) { + /* We may get less bits set on result than we had already, so make + * sure we remember all the ways in which a resize is required. + */ + shrink |= result; + continue; + } + + y_done = (orig->y.dst1 - split_coords.y.dst1 < 0.5); + x_done = y_done && (orig->x.dst1 - split_coords.x.dst1 < 0.5); + if (x_done) { + break; + } else if (y_done) { + split_coords.x.dst0 += w; + split_coords.x.dst1 = MIN2(split_coords.x.dst0 + w, orig->x.dst1); + split_coords.y.dst0 = orig->y.dst0; + split_coords.y.dst1 = MIN2(split_coords.y.dst0 + h, orig->y.dst1); + adjust_split_source_coords(&orig->x, &split_coords.x, x_scale); + } else { + split_coords.y.dst0 += h; + split_coords.y.dst1 = MIN2(split_coords.y.dst0 + h, orig->y.dst1); + adjust_split_source_coords(&orig->y, &split_coords.y, y_scale); + } + } while (true); } void @@ -1643,11 +2328,27 @@ blorp_blit(struct blorp_batch *batch, float src_x1, float src_y1, float dst_x0, float dst_y0, float dst_x1, float dst_y1, - GLenum filter, bool mirror_x, bool mirror_y) + enum blorp_filter filter, + bool mirror_x, bool mirror_y) { struct blorp_params params; blorp_params_init(¶ms); + /* We cannot handle combined depth and stencil. */ + if (src_surf->surf->usage & ISL_SURF_USAGE_STENCIL_BIT) + assert(src_surf->surf->format == ISL_FORMAT_R8_UINT); + if (dst_surf->surf->usage & ISL_SURF_USAGE_STENCIL_BIT) + assert(dst_surf->surf->format == ISL_FORMAT_R8_UINT); + + if (dst_surf->surf->usage & ISL_SURF_USAGE_STENCIL_BIT) { + assert(src_surf->surf->usage & ISL_SURF_USAGE_STENCIL_BIT); + /* Prior to Broadwell, we can't render to R8_UINT */ + if (batch->blorp->isl_dev->info->gen < 8) { + src_format = ISL_FORMAT_R8_UNORM; + dst_format = ISL_FORMAT_R8_UNORM; + } + } + brw_blorp_surface_info_init(batch->blorp, ¶ms.src, src_surf, src_level, src_layer, src_format, false); brw_blorp_surface_info_init(batch->blorp, ¶ms.dst, dst_surf, dst_level, @@ -1656,15 +2357,20 @@ blorp_blit(struct blorp_batch *batch, params.src.view.swizzle = src_swizzle; params.dst.view.swizzle = dst_swizzle; + const struct isl_format_layout *src_fmtl = + isl_format_get_layout(params.src.view.format); + struct brw_blorp_blit_prog_key wm_prog_key = { - .shader_type = BLORP_SHADER_TYPE_BLIT + .shader_type = BLORP_SHADER_TYPE_BLIT, + .filter = filter, + .sint32_to_uint = src_fmtl->channels.r.bits == 32 && + isl_format_has_sint_channel(params.src.view.format) && + isl_format_has_uint_channel(params.dst.view.format), + .uint32_to_sint = src_fmtl->channels.r.bits == 32 && + isl_format_has_uint_channel(params.src.view.format) && + isl_format_has_sint_channel(params.dst.view.format), }; - /* Scaled blitting or not. */ - wm_prog_key.blit_scaled = - ((dst_x1 - dst_x0) == (src_x1 - src_x0) && - (dst_y1 - dst_y0) == (src_y1 - src_y0)) ? false : true; - /* Scaling factors used for bilinear filtering in multisample scaled * blits. */ @@ -1674,26 +2380,6 @@ blorp_blit(struct blorp_batch *batch, wm_prog_key.x_scale = 2.0f; wm_prog_key.y_scale = params.src.surf.samples / wm_prog_key.x_scale; - if (filter == GL_LINEAR && - params.src.surf.samples <= 1 && params.dst.surf.samples <= 1) - wm_prog_key.bilinear_filter = true; - - if ((params.src.surf.usage & ISL_SURF_USAGE_DEPTH_BIT) == 0 && - (params.src.surf.usage & ISL_SURF_USAGE_STENCIL_BIT) == 0 && - !isl_format_has_int_channel(params.src.surf.format) && - params.src.surf.samples > 1 && params.dst.surf.samples <= 1) { - /* We are downsampling a non-integer color buffer, so blend. - * - * Regarding integer color buffers, the OpenGL ES 3.2 spec says: - * - * "If the source formats are integer types or stencil values, a - * single sample's value is selected for each pixel." - * - * This implies we should not blend in that case. - */ - wm_prog_key.blend = true; - } - params.wm_inputs.rect_grid.x1 = minify(params.src.surf.logical_level0_px.width, src_level) * wm_prog_key.x_scale - 1.0f; @@ -1701,47 +2387,198 @@ blorp_blit(struct blorp_batch *batch, minify(params.src.surf.logical_level0_px.height, src_level) * wm_prog_key.y_scale - 1.0f; - do_blorp_blit(batch, ¶ms, &wm_prog_key, - src_x0, src_y0, src_x1, src_y1, - dst_x0, dst_y0, dst_x1, dst_y1, - mirror_x, mirror_y); + struct blt_coords coords = { + .x = { + .src0 = src_x0, + .src1 = src_x1, + .dst0 = dst_x0, + .dst1 = dst_x1, + .mirror = mirror_x + }, + .y = { + .src0 = src_y0, + .src1 = src_y1, + .dst0 = dst_y0, + .dst1 = dst_y1, + .mirror = mirror_y + } + }; + + do_blorp_blit(batch, ¶ms, &wm_prog_key, &coords); } static enum isl_format -get_copy_format_for_bpb(unsigned bpb) +get_copy_format_for_bpb(const struct isl_device *isl_dev, unsigned bpb) { - /* The choice of UNORM and UINT formats is very intentional here. Most of - * the time, we want to use a UINT format to avoid any rounding error in - * the blit. For stencil blits, R8_UINT is required by the hardware. + /* The choice of UNORM and UINT formats is very intentional here. Most + * of the time, we want to use a UINT format to avoid any rounding error + * in the blit. For stencil blits, R8_UINT is required by the hardware. * (It's the only format allowed in conjunction with W-tiling.) Also we * intentionally use the 4-channel formats whenever we can. This is so - * that, when we do a RGB <-> RGBX copy, the two formats will line up even - * though one of them is 3/4 the size of the other. The choice of UNORM - * vs. UINT is also very intentional because Haswell doesn't handle 8 or - * 16-bit RGB UINT formats at all so we have to use UNORM there. + * that, when we do a RGB <-> RGBX copy, the two formats will line up + * even though one of them is 3/4 the size of the other. The choice of + * UNORM vs. UINT is also very intentional because we don't have 8 or + * 16-bit RGB UINT formats until Sky Lake so we have to use UNORM there. * Fortunately, the only time we should ever use two different formats in * the table below is for RGB -> RGBA blits and so we will never have any * UNORM/UINT mismatch. */ - switch (bpb) { - case 8: return ISL_FORMAT_R8_UINT; - case 16: return ISL_FORMAT_R8G8_UINT; - case 24: return ISL_FORMAT_R8G8B8_UNORM; - case 32: return ISL_FORMAT_R8G8B8A8_UNORM; - case 48: return ISL_FORMAT_R16G16B16_UNORM; - case 64: return ISL_FORMAT_R16G16B16A16_UNORM; - case 96: return ISL_FORMAT_R32G32B32_UINT; - case 128:return ISL_FORMAT_R32G32B32A32_UINT; + if (ISL_DEV_GEN(isl_dev) >= 9) { + switch (bpb) { + case 8: return ISL_FORMAT_R8_UINT; + case 16: return ISL_FORMAT_R8G8_UINT; + case 24: return ISL_FORMAT_R8G8B8_UINT; + case 32: return ISL_FORMAT_R8G8B8A8_UINT; + case 48: return ISL_FORMAT_R16G16B16_UINT; + case 64: return ISL_FORMAT_R16G16B16A16_UINT; + case 96: return ISL_FORMAT_R32G32B32_UINT; + case 128:return ISL_FORMAT_R32G32B32A32_UINT; + default: + unreachable("Unknown format bpb"); + } + } else { + switch (bpb) { + case 8: return ISL_FORMAT_R8_UINT; + case 16: return ISL_FORMAT_R8G8_UINT; + case 24: return ISL_FORMAT_R8G8B8_UNORM; + case 32: return ISL_FORMAT_R8G8B8A8_UNORM; + case 48: return ISL_FORMAT_R16G16B16_UNORM; + case 64: return ISL_FORMAT_R16G16B16A16_UNORM; + case 96: return ISL_FORMAT_R32G32B32_UINT; + case 128:return ISL_FORMAT_R32G32B32A32_UINT; + default: + unreachable("Unknown format bpb"); + } + } +} + +/** Returns a UINT format that is CCS-compatible with the given format + * + * The PRM's say absolutely nothing about how render compression works. The + * only thing they provide is a list of formats on which it is and is not + * supported. Empirical testing indicates that the compression is only based + * on the bit-layout of the format and the channel encoding doesn't matter. + * So, while texture views don't work in general, you can create a view as + * long as the bit-layout of the formats are the same. + * + * Fortunately, for every render compression capable format, the UINT format + * with the same bit layout also supports render compression. This means that + * we only need to handle UINT formats for copy operations. In order to do + * copies between formats with different bit layouts, we attach both with a + * UINT format and use bit_cast_color() to generate code to do the bit-cast + * operation between the two bit layouts. + */ +static enum isl_format +get_ccs_compatible_copy_format(const struct isl_format_layout *fmtl) +{ + switch (fmtl->format) { + case ISL_FORMAT_R32G32B32A32_FLOAT: + case ISL_FORMAT_R32G32B32A32_SINT: + case ISL_FORMAT_R32G32B32A32_UINT: + case ISL_FORMAT_R32G32B32A32_UNORM: + case ISL_FORMAT_R32G32B32A32_SNORM: + case ISL_FORMAT_R32G32B32X32_FLOAT: + return ISL_FORMAT_R32G32B32A32_UINT; + + case ISL_FORMAT_R16G16B16A16_UNORM: + case ISL_FORMAT_R16G16B16A16_SNORM: + case ISL_FORMAT_R16G16B16A16_SINT: + case ISL_FORMAT_R16G16B16A16_UINT: + case ISL_FORMAT_R16G16B16A16_FLOAT: + case ISL_FORMAT_R16G16B16X16_UNORM: + case ISL_FORMAT_R16G16B16X16_FLOAT: + return ISL_FORMAT_R16G16B16A16_UINT; + + case ISL_FORMAT_R32G32_FLOAT: + case ISL_FORMAT_R32G32_SINT: + case ISL_FORMAT_R32G32_UINT: + case ISL_FORMAT_R32G32_UNORM: + case ISL_FORMAT_R32G32_SNORM: + return ISL_FORMAT_R32G32_UINT; + + case ISL_FORMAT_B8G8R8A8_UNORM: + case ISL_FORMAT_B8G8R8A8_UNORM_SRGB: + case ISL_FORMAT_R8G8B8A8_UNORM: + case ISL_FORMAT_R8G8B8A8_UNORM_SRGB: + case ISL_FORMAT_R8G8B8A8_SNORM: + case ISL_FORMAT_R8G8B8A8_SINT: + case ISL_FORMAT_R8G8B8A8_UINT: + case ISL_FORMAT_B8G8R8X8_UNORM: + case ISL_FORMAT_B8G8R8X8_UNORM_SRGB: + case ISL_FORMAT_R8G8B8X8_UNORM: + case ISL_FORMAT_R8G8B8X8_UNORM_SRGB: + return ISL_FORMAT_R8G8B8A8_UINT; + + case ISL_FORMAT_R16G16_UNORM: + case ISL_FORMAT_R16G16_SNORM: + case ISL_FORMAT_R16G16_SINT: + case ISL_FORMAT_R16G16_UINT: + case ISL_FORMAT_R16G16_FLOAT: + return ISL_FORMAT_R16G16_UINT; + + case ISL_FORMAT_R32_SINT: + case ISL_FORMAT_R32_UINT: + case ISL_FORMAT_R32_FLOAT: + case ISL_FORMAT_R32_UNORM: + case ISL_FORMAT_R32_SNORM: + return ISL_FORMAT_R32_UINT; + + case ISL_FORMAT_B10G10R10A2_UNORM: + case ISL_FORMAT_B10G10R10A2_UNORM_SRGB: + case ISL_FORMAT_R10G10B10A2_UNORM: + case ISL_FORMAT_R10G10B10A2_UNORM_SRGB: + case ISL_FORMAT_R10G10B10_FLOAT_A2_UNORM: + case ISL_FORMAT_R10G10B10A2_UINT: + return ISL_FORMAT_R10G10B10A2_UINT; + + case ISL_FORMAT_R16_UNORM: + case ISL_FORMAT_R16_SNORM: + case ISL_FORMAT_R16_SINT: + case ISL_FORMAT_R16_UINT: + case ISL_FORMAT_R16_FLOAT: + return ISL_FORMAT_R16_UINT; + + case ISL_FORMAT_R8G8_UNORM: + case ISL_FORMAT_R8G8_SNORM: + case ISL_FORMAT_R8G8_SINT: + case ISL_FORMAT_R8G8_UINT: + return ISL_FORMAT_R8G8_UINT; + + case ISL_FORMAT_B5G5R5X1_UNORM: + case ISL_FORMAT_B5G5R5X1_UNORM_SRGB: + case ISL_FORMAT_B5G5R5A1_UNORM: + case ISL_FORMAT_B5G5R5A1_UNORM_SRGB: + return ISL_FORMAT_B5G5R5A1_UNORM; + + case ISL_FORMAT_A4B4G4R4_UNORM: + case ISL_FORMAT_B4G4R4A4_UNORM: + case ISL_FORMAT_B4G4R4A4_UNORM_SRGB: + return ISL_FORMAT_B4G4R4A4_UNORM; + + case ISL_FORMAT_B5G6R5_UNORM: + case ISL_FORMAT_B5G6R5_UNORM_SRGB: + return ISL_FORMAT_B5G6R5_UNORM; + + case ISL_FORMAT_A1B5G5R5_UNORM: + return ISL_FORMAT_A1B5G5R5_UNORM; + + case ISL_FORMAT_A8_UNORM: + case ISL_FORMAT_R8_UNORM: + case ISL_FORMAT_R8_SNORM: + case ISL_FORMAT_R8_SINT: + case ISL_FORMAT_R8_UINT: + return ISL_FORMAT_R8_UINT; + default: - unreachable("Unknown format bpb"); + unreachable("Not a compressible format"); } } -static void -surf_convert_to_uncompressed(const struct isl_device *isl_dev, - struct brw_blorp_surface_info *info, - uint32_t *x, uint32_t *y, - uint32_t *width, uint32_t *height) +void +blorp_surf_convert_to_uncompressed(const struct isl_device *isl_dev, + struct brw_blorp_surface_info *info, + uint32_t *x, uint32_t *y, + uint32_t *width, uint32_t *height) { const struct isl_format_layout *fmtl = isl_format_get_layout(info->surf.format); @@ -1753,9 +2590,9 @@ surf_convert_to_uncompressed(const struct isl_device *isl_dev, * ones with the same bpb) and divide x, y, width, and height by the * block size. */ - surf_convert_to_single_slice(isl_dev, info); + blorp_surf_convert_to_single_slice(isl_dev, info); - if (width || height) { + if (width && height) { #ifndef NDEBUG uint32_t right_edge_px = info->tile_x_sa + *x + *width; uint32_t bottom_edge_px = info->tile_y_sa + *y + *height; @@ -1768,20 +2605,15 @@ surf_convert_to_uncompressed(const struct isl_device *isl_dev, *height = DIV_ROUND_UP(*height, fmtl->bh); } - assert(*x % fmtl->bw == 0); - assert(*y % fmtl->bh == 0); - *x /= fmtl->bw; - *y /= fmtl->bh; - - info->surf.logical_level0_px.width = - DIV_ROUND_UP(info->surf.logical_level0_px.width, fmtl->bw); - info->surf.logical_level0_px.height = - DIV_ROUND_UP(info->surf.logical_level0_px.height, fmtl->bh); + if (x && y) { + assert(*x % fmtl->bw == 0); + assert(*y % fmtl->bh == 0); + *x /= fmtl->bw; + *y /= fmtl->bh; + } - assert(info->surf.phys_level0_sa.width % fmtl->bw == 0); - assert(info->surf.phys_level0_sa.height % fmtl->bh == 0); - info->surf.phys_level0_sa.width /= fmtl->bw; - info->surf.phys_level0_sa.height /= fmtl->bh; + info->surf.logical_level0_px = isl_surf_get_logical_level0_el(&info->surf); + info->surf.phys_level0_sa = isl_surf_get_phys_level0_el(&info->surf); assert(info->tile_x_sa % fmtl->bw == 0); assert(info->tile_y_sa % fmtl->bh == 0); @@ -1789,41 +2621,7 @@ surf_convert_to_uncompressed(const struct isl_device *isl_dev, info->tile_y_sa /= fmtl->bh; /* It's now an uncompressed surface so we need an uncompressed format */ - info->surf.format = get_copy_format_for_bpb(fmtl->bpb); -} - -static void -surf_fake_rgb_with_red(const struct isl_device *isl_dev, - struct brw_blorp_surface_info *info, - uint32_t *x, uint32_t *width) -{ - surf_convert_to_single_slice(isl_dev, info); - - info->surf.logical_level0_px.width *= 3; - info->surf.phys_level0_sa.width *= 3; - *x *= 3; - *width *= 3; - - enum isl_format red_format; - switch (info->view.format) { - case ISL_FORMAT_R8G8B8_UNORM: - red_format = ISL_FORMAT_R8_UNORM; - break; - case ISL_FORMAT_R16G16B16_UNORM: - red_format = ISL_FORMAT_R16_UNORM; - break; - case ISL_FORMAT_R32G32B32_UINT: - red_format = ISL_FORMAT_R32_UINT; - break; - default: - unreachable("Invalid RGB copy destination format"); - } - assert(isl_format_get_layout(red_format)->channels.r.type == - isl_format_get_layout(info->view.format)->channels.r.type); - assert(isl_format_get_layout(red_format)->channels.r.bits == - isl_format_get_layout(info->view.format)->channels.r.bits); - - info->surf.format = info->view.format = red_format; + info->surf.format = get_copy_format_for_bpb(isl_dev, fmtl->bpb); } void @@ -1836,6 +2634,7 @@ blorp_copy(struct blorp_batch *batch, uint32_t dst_x, uint32_t dst_y, uint32_t src_width, uint32_t src_height) { + const struct isl_device *isl_dev = batch->blorp->isl_dev; struct blorp_params params; if (src_width == 0 || src_height == 0) @@ -1848,7 +2647,10 @@ blorp_copy(struct blorp_batch *batch, dst_layer, ISL_FORMAT_UNSUPPORTED, true); struct brw_blorp_blit_prog_key wm_prog_key = { - .shader_type = BLORP_SHADER_TYPE_BLIT + .shader_type = BLORP_SHADER_TYPE_BLIT, + .filter = BLORP_FILTER_NONE, + .need_src_offset = src_surf->tile_x_sa || src_surf->tile_y_sa, + .need_dst_offset = dst_surf->tile_x_sa || dst_surf->tile_y_sa, }; const struct isl_format_layout *src_fmtl = @@ -1856,17 +2658,121 @@ blorp_copy(struct blorp_batch *batch, const struct isl_format_layout *dst_fmtl = isl_format_get_layout(params.dst.surf.format); - params.src.view.format = get_copy_format_for_bpb(src_fmtl->bpb); + assert(params.src.aux_usage == ISL_AUX_USAGE_NONE || + params.src.aux_usage == ISL_AUX_USAGE_HIZ || + params.src.aux_usage == ISL_AUX_USAGE_HIZ_CCS_WT || + params.src.aux_usage == ISL_AUX_USAGE_MCS || + params.src.aux_usage == ISL_AUX_USAGE_MCS_CCS || + params.src.aux_usage == ISL_AUX_USAGE_CCS_E || + params.src.aux_usage == ISL_AUX_USAGE_GEN12_CCS_E || + params.src.aux_usage == ISL_AUX_USAGE_STC_CCS); + + if (isl_aux_usage_has_hiz(params.src.aux_usage)) { + /* In order to use HiZ, we have to use the real format for the source. + * Depth <-> Color copies are not allowed. + */ + params.src.view.format = params.src.surf.format; + params.dst.view.format = params.src.surf.format; + } else if ((params.dst.surf.usage & ISL_SURF_USAGE_DEPTH_BIT) && + isl_dev->info->gen >= 7) { + /* On Gen7 and higher, we use actual depth writes for blits into depth + * buffers so we need the real format. + */ + params.src.view.format = params.dst.surf.format; + params.dst.view.format = params.dst.surf.format; + } else if (params.dst.aux_usage == ISL_AUX_USAGE_CCS_E || + params.dst.aux_usage == ISL_AUX_USAGE_GEN12_CCS_E) { + params.dst.view.format = get_ccs_compatible_copy_format(dst_fmtl); + if (params.src.aux_usage == ISL_AUX_USAGE_CCS_E || + params.src.aux_usage == ISL_AUX_USAGE_GEN12_CCS_E) { + params.src.view.format = get_ccs_compatible_copy_format(src_fmtl); + } else if (src_fmtl->bpb == dst_fmtl->bpb) { + params.src.view.format = params.dst.view.format; + } else { + params.src.view.format = + get_copy_format_for_bpb(isl_dev, src_fmtl->bpb); + } + } else if (params.src.aux_usage == ISL_AUX_USAGE_CCS_E || + params.src.aux_usage == ISL_AUX_USAGE_GEN12_CCS_E) { + params.src.view.format = get_ccs_compatible_copy_format(src_fmtl); + if (src_fmtl->bpb == dst_fmtl->bpb) { + params.dst.view.format = params.src.view.format; + } else { + params.dst.view.format = + get_copy_format_for_bpb(isl_dev, dst_fmtl->bpb); + } + } else { + params.dst.view.format = get_copy_format_for_bpb(isl_dev, dst_fmtl->bpb); + params.src.view.format = get_copy_format_for_bpb(isl_dev, src_fmtl->bpb); + } + + if (params.src.aux_usage == ISL_AUX_USAGE_CCS_E) { + /* It's safe to do a blorp_copy between things which are sRGB with CCS_E + * enabled even though CCS_E doesn't technically do sRGB on SKL because + * we stomp everything to UINT anyway. The one thing we have to be + * careful of is clear colors. Because fast clear colors for sRGB on + * gen9 are encoded as the float values between format conversion and + * sRGB curve application, a given clear color float will convert to the + * same bits regardless of whether the format is UNORM or sRGB. + * Therefore, we can handle sRGB without any special cases. + */ + UNUSED enum isl_format linear_src_format = + isl_format_srgb_to_linear(src_surf->surf->format); + assert(isl_formats_are_ccs_e_compatible(batch->blorp->isl_dev->info, + linear_src_format, + params.src.view.format)); + uint32_t packed[4]; + isl_color_value_pack(¶ms.src.clear_color, + linear_src_format, packed); + isl_color_value_unpack(¶ms.src.clear_color, + params.src.view.format, packed); + } + + if (params.dst.aux_usage == ISL_AUX_USAGE_CCS_E) { + /* See above where we handle linear_src_format */ + UNUSED enum isl_format linear_dst_format = + isl_format_srgb_to_linear(dst_surf->surf->format); + assert(isl_formats_are_ccs_e_compatible(batch->blorp->isl_dev->info, + linear_dst_format, + params.dst.view.format)); + uint32_t packed[4]; + isl_color_value_pack(¶ms.dst.clear_color, + linear_dst_format, packed); + isl_color_value_unpack(¶ms.dst.clear_color, + params.dst.view.format, packed); + } + + if (params.src.view.format != params.dst.view.format) { + enum isl_format src_cast_format = params.src.view.format; + enum isl_format dst_cast_format = params.dst.view.format; + + /* The BLORP bitcast code gets confused by RGB formats. Just treat them + * as RGBA and then everything will be happy. This is perfectly safe + * because BLORP likes to treat things as if they have vec4 colors all + * the time anyway. + */ + if (isl_format_get_layout(src_cast_format)->bpb % 3 == 0) + src_cast_format = isl_format_rgb_to_rgba(src_cast_format); + if (isl_format_get_layout(dst_cast_format)->bpb % 3 == 0) + dst_cast_format = isl_format_rgb_to_rgba(dst_cast_format); + + if (src_cast_format != dst_cast_format) { + wm_prog_key.format_bit_cast = true; + wm_prog_key.src_format = src_cast_format; + wm_prog_key.dst_format = dst_cast_format; + } + } + if (src_fmtl->bw > 1 || src_fmtl->bh > 1) { - surf_convert_to_uncompressed(batch->blorp->isl_dev, ¶ms.src, - &src_x, &src_y, &src_width, &src_height); + blorp_surf_convert_to_uncompressed(batch->blorp->isl_dev, ¶ms.src, + &src_x, &src_y, + &src_width, &src_height); wm_prog_key.need_src_offset = true; } - params.dst.view.format = get_copy_format_for_bpb(dst_fmtl->bpb); if (dst_fmtl->bw > 1 || dst_fmtl->bh > 1) { - surf_convert_to_uncompressed(batch->blorp->isl_dev, ¶ms.dst, - &dst_x, &dst_y, NULL, NULL); + blorp_surf_convert_to_uncompressed(batch->blorp->isl_dev, ¶ms.dst, + &dst_x, &dst_y, NULL, NULL); wm_prog_key.need_dst_offset = true; } @@ -1876,15 +2782,142 @@ blorp_copy(struct blorp_batch *batch, uint32_t dst_width = src_width; uint32_t dst_height = src_height; - if (dst_fmtl->bpb % 3 == 0) { - surf_fake_rgb_with_red(batch->blorp->isl_dev, ¶ms.dst, - &dst_x, &dst_width); - wm_prog_key.dst_rgb = true; - wm_prog_key.need_dst_offset = true; + struct blt_coords coords = { + .x = { + .src0 = src_x, + .src1 = src_x + src_width, + .dst0 = dst_x, + .dst1 = dst_x + dst_width, + .mirror = false + }, + .y = { + .src0 = src_y, + .src1 = src_y + src_height, + .dst0 = dst_y, + .dst1 = dst_y + dst_height, + .mirror = false + } + }; + + do_blorp_blit(batch, ¶ms, &wm_prog_key, &coords); +} + +static enum isl_format +isl_format_for_size(unsigned size_B) +{ + switch (size_B) { + case 1: return ISL_FORMAT_R8_UINT; + case 2: return ISL_FORMAT_R8G8_UINT; + case 4: return ISL_FORMAT_R8G8B8A8_UINT; + case 8: return ISL_FORMAT_R16G16B16A16_UINT; + case 16: return ISL_FORMAT_R32G32B32A32_UINT; + default: + unreachable("Not a power-of-two format size"); } +} - do_blorp_blit(batch, ¶ms, &wm_prog_key, - src_x, src_y, src_x + src_width, src_y + src_height, - dst_x, dst_y, dst_x + dst_width, dst_y + dst_height, - false, false); +/** + * Returns the greatest common divisor of a and b that is a power of two. + */ +static uint64_t +gcd_pow2_u64(uint64_t a, uint64_t b) +{ + assert(a > 0 || b > 0); + + unsigned a_log2 = ffsll(a) - 1; + unsigned b_log2 = ffsll(b) - 1; + + /* If either a or b is 0, then a_log2 or b_log2 till be UINT_MAX in which + * case, the MIN2() will take the other one. If both are 0 then we will + * hit the assert above. + */ + return 1 << MIN2(a_log2, b_log2); +} + +static void +do_buffer_copy(struct blorp_batch *batch, + struct blorp_address *src, + struct blorp_address *dst, + int width, int height, int block_size) +{ + /* The actual format we pick doesn't matter as blorp will throw it away. + * The only thing that actually matters is the size. + */ + enum isl_format format = isl_format_for_size(block_size); + + UNUSED bool ok; + struct isl_surf surf; + ok = isl_surf_init(batch->blorp->isl_dev, &surf, + .dim = ISL_SURF_DIM_2D, + .format = format, + .width = width, + .height = height, + .depth = 1, + .levels = 1, + .array_len = 1, + .samples = 1, + .row_pitch_B = width * block_size, + .usage = ISL_SURF_USAGE_TEXTURE_BIT | + ISL_SURF_USAGE_RENDER_TARGET_BIT, + .tiling_flags = ISL_TILING_LINEAR_BIT); + assert(ok); + + struct blorp_surf src_blorp_surf = { + .surf = &surf, + .addr = *src, + }; + + struct blorp_surf dst_blorp_surf = { + .surf = &surf, + .addr = *dst, + }; + + blorp_copy(batch, &src_blorp_surf, 0, 0, &dst_blorp_surf, 0, 0, + 0, 0, 0, 0, width, height); +} + +void +blorp_buffer_copy(struct blorp_batch *batch, + struct blorp_address src, + struct blorp_address dst, + uint64_t size) +{ + const struct gen_device_info *devinfo = batch->blorp->isl_dev->info; + uint64_t copy_size = size; + + /* This is maximum possible width/height our HW can handle */ + uint64_t max_surface_dim = 1 << (devinfo->gen >= 7 ? 14 : 13); + + /* First, we compute the biggest format that can be used with the + * given offsets and size. + */ + int bs = 16; + bs = gcd_pow2_u64(bs, src.offset); + bs = gcd_pow2_u64(bs, dst.offset); + bs = gcd_pow2_u64(bs, size); + + /* First, we make a bunch of max-sized copies */ + uint64_t max_copy_size = max_surface_dim * max_surface_dim * bs; + while (copy_size >= max_copy_size) { + do_buffer_copy(batch, &src, &dst, max_surface_dim, max_surface_dim, bs); + copy_size -= max_copy_size; + src.offset += max_copy_size; + dst.offset += max_copy_size; + } + + /* Now make a max-width copy */ + uint64_t height = copy_size / (max_surface_dim * bs); + assert(height < max_surface_dim); + if (height != 0) { + uint64_t rect_copy_size = height * max_surface_dim * bs; + do_buffer_copy(batch, &src, &dst, max_surface_dim, height, bs); + copy_size -= rect_copy_size; + src.offset += rect_copy_size; + dst.offset += rect_copy_size; + } + + /* Finally, make a small copy to finish it off */ + if (copy_size != 0) { + do_buffer_copy(batch, &src, &dst, copy_size / bs, 1, bs); + } }