X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Ffreedreno%2Fvulkan%2Ftu_clear_blit.c;h=1d25147a6cfcdd316764c737d431a14a96bb5192;hb=5afaec3741384da0702d3bf75aebd0af6fd07d8b;hp=0a6fc2a933c8684a7baa9ee71c1d7e1cb032b875;hpb=093c4137224a59536af9e8062f5c98c2dc136ba1;p=mesa.git diff --git a/src/freedreno/vulkan/tu_clear_blit.c b/src/freedreno/vulkan/tu_clear_blit.c index 0a6fc2a933c..1d25147a6cf 100644 --- a/src/freedreno/vulkan/tu_clear_blit.c +++ b/src/freedreno/vulkan/tu_clear_blit.c @@ -16,178 +16,10 @@ #include "util/format_srgb.h" #include "util/u_half.h" -/* helper functions previously in tu_formats.c */ - -static uint32_t -tu_pack_mask(int bits) -{ - assert(bits <= 32); - return (1ull << bits) - 1; -} - static uint32_t tu_pack_float32_for_unorm(float val, int bits) { - const uint32_t max = tu_pack_mask(bits); - if (val < 0.0f) - return 0; - else if (val > 1.0f) - return max; - else - return _mesa_lroundevenf(val * (float) max); -} - -static uint32_t -tu_pack_float32_for_snorm(float val, int bits) -{ - const int32_t max = tu_pack_mask(bits - 1); - int32_t tmp; - if (val < -1.0f) - tmp = -max; - else if (val > 1.0f) - tmp = max; - else - tmp = _mesa_lroundevenf(val * (float) max); - - return tmp & tu_pack_mask(bits); -} - -static uint32_t -tu_pack_float32_for_uscaled(float val, int bits) -{ - const uint32_t max = tu_pack_mask(bits); - if (val < 0.0f) - return 0; - else if (val > (float) max) - return max; - else - return (uint32_t) val; -} - -static uint32_t -tu_pack_float32_for_sscaled(float val, int bits) -{ - const int32_t max = tu_pack_mask(bits - 1); - const int32_t min = -max - 1; - int32_t tmp; - if (val < (float) min) - tmp = min; - else if (val > (float) max) - tmp = max; - else - tmp = (int32_t) val; - - return tmp & tu_pack_mask(bits); -} - -static uint32_t -tu_pack_uint32_for_uint(uint32_t val, int bits) -{ - return val & tu_pack_mask(bits); -} - -static uint32_t -tu_pack_int32_for_sint(int32_t val, int bits) -{ - return val & tu_pack_mask(bits); -} - -static uint32_t -tu_pack_float32_for_sfloat(float val, int bits) -{ - assert(bits == 16 || bits == 32); - return bits == 16 ? util_float_to_half(val) : fui(val); -} - -union tu_clear_component_value { - float float32; - int32_t int32; - uint32_t uint32; -}; - -static uint32_t -tu_pack_clear_component_value(union tu_clear_component_value val, - const struct util_format_channel_description *ch) -{ - uint32_t packed; - - switch (ch->type) { - case UTIL_FORMAT_TYPE_UNSIGNED: - /* normalized, scaled, or pure integer */ - if (ch->normalized) - packed = tu_pack_float32_for_unorm(val.float32, ch->size); - else if (ch->pure_integer) - packed = tu_pack_uint32_for_uint(val.uint32, ch->size); - else - packed = tu_pack_float32_for_uscaled(val.float32, ch->size); - break; - case UTIL_FORMAT_TYPE_SIGNED: - /* normalized, scaled, or pure integer */ - if (ch->normalized) - packed = tu_pack_float32_for_snorm(val.float32, ch->size); - else if (ch->pure_integer) - packed = tu_pack_int32_for_sint(val.int32, ch->size); - else - packed = tu_pack_float32_for_sscaled(val.float32, ch->size); - break; - case UTIL_FORMAT_TYPE_FLOAT: - packed = tu_pack_float32_for_sfloat(val.float32, ch->size); - break; - default: - unreachable("unexpected channel type"); - packed = 0; - break; - } - - assert((packed & tu_pack_mask(ch->size)) == packed); - return packed; -} - -static const struct util_format_channel_description * -tu_get_format_channel_description(const struct util_format_description *desc, - int comp) -{ - switch (desc->swizzle[comp]) { - case PIPE_SWIZZLE_X: - return &desc->channel[0]; - case PIPE_SWIZZLE_Y: - return &desc->channel[1]; - case PIPE_SWIZZLE_Z: - return &desc->channel[2]; - case PIPE_SWIZZLE_W: - return &desc->channel[3]; - default: - return NULL; - } -} - -static union tu_clear_component_value -tu_get_clear_component_value(const VkClearValue *val, int comp, - enum util_format_colorspace colorspace) -{ - assert(comp < 4); - - union tu_clear_component_value tmp; - switch (colorspace) { - case UTIL_FORMAT_COLORSPACE_ZS: - assert(comp < 2); - if (comp == 0) - tmp.float32 = val->depthStencil.depth; - else - tmp.uint32 = val->depthStencil.stencil; - break; - case UTIL_FORMAT_COLORSPACE_SRGB: - if (comp < 3) { - tmp.float32 = util_format_linear_to_srgb_float(val->color.float32[comp]); - break; - } - default: - assert(comp < 4); - tmp.uint32 = val->color.uint32[comp]; - break; - } - - return tmp; + return _mesa_lroundevenf(CLAMP(val, 0.0f, 1.0f) * (float) ((1 << bits) - 1)); } /* r2d_ = BLIT_OP_SCALE operations */ @@ -275,10 +107,10 @@ r2d_coords(struct tu_cs *cs, return; tu_cs_emit_regs(cs, - A6XX_GRAS_2D_SRC_TL_X(.x = src->x), - A6XX_GRAS_2D_SRC_BR_X(.x = src->x + extent->width - 1), - A6XX_GRAS_2D_SRC_TL_Y(.y = src->y), - A6XX_GRAS_2D_SRC_BR_Y(.y = src->y + extent->height - 1)); + A6XX_GRAS_2D_SRC_TL_X(src->x), + A6XX_GRAS_2D_SRC_BR_X(src->x + extent->width - 1), + A6XX_GRAS_2D_SRC_TL_Y(src->y), + A6XX_GRAS_2D_SRC_BR_Y(src->y + extent->height - 1)); } static void @@ -323,7 +155,7 @@ r2d_clear_value(struct tu_cs *cs, VkFormat format, const VkClearValue *val) linear = util_format_linear_to_srgb_float(val->color.float32[i]); if (ch->type == UTIL_FORMAT_TYPE_SIGNED) - clear_value[i] = tu_pack_float32_for_snorm(linear, 8); + clear_value[i] = _mesa_lroundevenf(CLAMP(linear, -1.0f, 1.0f) * 127.0f); else clear_value[i] = tu_pack_float32_for_unorm(linear, 8); } else if (ifmt == R2D_FLOAT16) { @@ -346,11 +178,14 @@ r2d_src(struct tu_cmd_buffer *cmd, struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer, - bool linear_filter) + VkFilter filter) { + uint32_t src_info = iview->SP_PS_2D_SRC_INFO; + if (filter != VK_FILTER_NEAREST) + src_info |= A6XX_SP_PS_2D_SRC_INFO_FILTER; + tu_cs_emit_pkt4(cs, REG_A6XX_SP_PS_2D_SRC_INFO, 5); - tu_cs_emit(cs, iview->SP_PS_2D_SRC_INFO | - COND(linear_filter, A6XX_SP_PS_2D_SRC_INFO_FILTER)); + tu_cs_emit(cs, src_info); tu_cs_emit(cs, iview->SP_PS_2D_SRC_SIZE); tu_cs_image_ref_2d(cs, iview, layer, true); @@ -405,32 +240,39 @@ r2d_dst_buffer(struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch .srgb = vk_format_is_srgb(vk_format)), A6XX_RB_2D_DST_LO((uint32_t) va), A6XX_RB_2D_DST_HI(va >> 32), - A6XX_RB_2D_DST_SIZE(.pitch = pitch)); + A6XX_RB_2D_DST_PITCH(pitch)); } static void r2d_setup_common(struct tu_cmd_buffer *cmd, struct tu_cs *cs, VkFormat vk_format, + VkImageAspectFlags aspect_mask, enum a6xx_rotation rotation, bool clear, - uint8_t mask, + bool ubwc, bool scissor) { enum a6xx_format format = tu6_base_format(vk_format); enum a6xx_2d_ifmt ifmt = format_to_ifmt(format); uint32_t unknown_8c01 = 0; - if (format == FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8) { - /* preserve depth channels */ - if (mask == 0x8) - unknown_8c01 = 0x00084001; + if ((vk_format == VK_FORMAT_D24_UNORM_S8_UINT || + vk_format == VK_FORMAT_X8_D24_UNORM_PACK32) && ubwc) { + format = FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8; + } + + /* note: the only format with partial clearing is D24S8 */ + if (vk_format == VK_FORMAT_D24_UNORM_S8_UINT) { /* preserve stencil channel */ - if (mask == 0x7) + if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) unknown_8c01 = 0x08000041; + /* preserve depth channels */ + if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) + unknown_8c01 = 0x00084001; } - tu_cs_emit_pkt4(cs, REG_A6XX_RB_UNKNOWN_8C01, 1); + tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_UNKNOWN_8C01, 1); tu_cs_emit(cs, unknown_8c01); uint32_t blit_cntl = A6XX_RB_2D_BLIT_CNTL( @@ -452,7 +294,7 @@ r2d_setup_common(struct tu_cmd_buffer *cmd, if (format == FMT6_10_10_10_2_UNORM_DEST) format = FMT6_16_16_16_16_FLOAT; - tu_cs_emit_regs(cs, A6XX_SP_2D_SRC_FORMAT( + tu_cs_emit_regs(cs, A6XX_SP_2D_DST_FORMAT( .sint = vk_format_is_sint(vk_format), .uint = vk_format_is_uint(vk_format), .color_format = format, @@ -464,13 +306,14 @@ static void r2d_setup(struct tu_cmd_buffer *cmd, struct tu_cs *cs, VkFormat vk_format, + VkImageAspectFlags aspect_mask, enum a6xx_rotation rotation, bool clear, - uint8_t mask) + bool ubwc) { tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM); - r2d_setup_common(cmd, cs, vk_format, rotation, clear, mask, false); + r2d_setup_common(cmd, cs, vk_format, aspect_mask, rotation, clear, ubwc, false); } static void @@ -482,15 +325,62 @@ r2d_run(struct tu_cmd_buffer *cmd, struct tu_cs *cs) /* r3d_ = shader path operations */ +void +tu_init_clear_blit_shaders(struct tu6_global *global) +{ +#define MOV(args...) { .cat1 = { .opc_cat = 1, .src_type = TYPE_S32, .dst_type = TYPE_S32, args } } +#define CAT2(op, args...) { .cat2 = { .opc_cat = 2, .opc = (op) & 63, .full = 1, args } } +#define CAT3(op, args...) { .cat3 = { .opc_cat = 3, .opc = (op) & 63, args } } + + static const instr_t vs_code[] = { + /* r0.xyz = r0.w ? c1.xyz : c0.xyz + * r1.xy = r0.w ? c1.zw : c0.zw + * r0.w = 1.0f + */ + CAT3(OPC_SEL_B32, .repeat = 2, .dst = 0, + .c1 = {.src1_c = 1, .src1 = 4}, .src1_r = 1, + .src2 = 3, + .c2 = {.src3_c = 1, .dummy = 1, .src3 = 0}), + CAT3(OPC_SEL_B32, .repeat = 1, .dst = 4, + .c1 = {.src1_c = 1, .src1 = 6}, .src1_r = 1, + .src2 = 3, + .c2 = {.src3_c = 1, .dummy = 1, .src3 = 2}), + MOV(.dst = 3, .src_im = 1, .fim_val = 1.0f ), + { .cat0 = { .opc = OPC_END } }, + }; + + static const instr_t fs_blit[] = { + /* " bary.f (ei)r63.x, 0, r0.x" note the blob doesn't have this in its + * blit path (its not clear what allows it to not have it) + */ + CAT2(OPC_BARY_F, .ei = 1, .full = 1, .dst = 63 * 4, .src1_im = 1), + { .cat0 = { .opc = OPC_END } }, + }; + + memcpy(&global->shaders[GLOBAL_SH_VS], vs_code, sizeof(vs_code)); + memcpy(&global->shaders[GLOBAL_SH_FS_BLIT], fs_blit, sizeof(fs_blit)); + + for (uint32_t num_rts = 0; num_rts <= MAX_RTS; num_rts++) { + instr_t *code = global->shaders[GLOBAL_SH_FS_CLEAR0 + num_rts]; + for (uint32_t i = 0; i < num_rts; i++) { + /* (rpt3)mov.s32s32 r0.x, (r)c[i].x */ + *code++ = (instr_t) MOV(.repeat = 3, .dst = i * 4, .src_c = 1, .src_r = 1, .src = i * 4); + } + *code++ = (instr_t) { .cat0 = { .opc = OPC_END } }; + } +} + static void -r3d_pipeline(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool blit, uint32_t num_rts) +r3d_common(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool blit, uint32_t num_rts, + bool layered_clear) { + struct ir3_const_state dummy_const_state = {}; struct ir3_shader dummy_shader = {}; struct ir3_shader_variant vs = { .type = MESA_SHADER_VERTEX, .instrlen = 1, - .constlen = 2, + .constlen = 4, .info.max_reg = 1, .inputs_count = 1, .inputs[0] = { @@ -508,12 +398,18 @@ r3d_pipeline(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool blit, uint32_t nu .regid = regid(1, 0), }, .shader = &dummy_shader, + .const_state = &dummy_const_state, }; + if (layered_clear) { + vs.outputs[1].slot = VARYING_SLOT_LAYER; + vs.outputs[1].regid = regid(1, 1); + vs.outputs_count = 2; + } struct ir3_shader_variant fs = { .type = MESA_SHADER_FRAGMENT, .instrlen = 1, /* max of 9 instructions with num_rts = 8 */ - .constlen = num_rts, + .constlen = align(num_rts, 4), .info.max_reg = MAX2(num_rts, 1) - 1, .total_in = blit ? 2 : 0, .num_samp = blit ? 1 : 0, @@ -536,72 +432,33 @@ r3d_pipeline(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool blit, uint32_t nu .cmd = 4, }, .shader = &dummy_shader, + .const_state = &dummy_const_state, }; - static const instr_t vs_code[] = { - /* r0.xyz = r0.w ? c1.xyz : c0.xyz - * r1.xy = r0.w ? c1.zw : c0.zw - * r0.w = 1.0f - */ - { .cat3 = { - .opc_cat = 3, .opc = OPC_SEL_B32 & 63, .repeat = 2, .dst = 0, - .c1 = {.src1_c = 1, .src1 = 4}, .src1_r = 1, - .src2 = 3, - .c2 = {.src3_c = 1, .dummy = 1, .src3 = 0}, - } }, - { .cat3 = { - .opc_cat = 3, .opc = OPC_SEL_B32 & 63, .repeat = 1, .dst = 4, - .c1 = {.src1_c = 1, .src1 = 6}, .src1_r = 1, - .src2 = 3, - .c2 = {.src3_c = 1, .dummy = 1, .src3 = 2}, - } }, - { .cat1 = { .opc_cat = 1, .src_type = TYPE_F32, .dst_type = TYPE_F32, .dst = 3, - .src_im = 1, .fim_val = 1.0f } }, - { .cat0 = { .opc = OPC_END } }, - }; -#define FS_OFFSET (16 * sizeof(instr_t)) - STATIC_ASSERT(sizeof(vs_code) <= FS_OFFSET); - - /* shaders */ - struct ts_cs_memory shaders = { }; - VkResult result = tu_cs_alloc(&cmd->sub_cs, 2, 16 * sizeof(instr_t), &shaders); - assert(result == VK_SUCCESS); - - memcpy(shaders.map, vs_code, sizeof(vs_code)); - - instr_t *fs_code = (instr_t*) ((uint8_t*) shaders.map + FS_OFFSET); - for (uint32_t i = 0; i < num_rts; i++) { - /* (rpt3)mov.s32s32 r0.x, (r)c[i].x */ - *fs_code++ = (instr_t) { .cat1 = { - .opc_cat = 1, .src_type = TYPE_S32, .dst_type = TYPE_S32, - .repeat = 3, .dst = i * 4, .src_c = 1, .src_r = 1, .src = i * 4 - } }; - } - - /* " bary.f (ei)r63.x, 0, r0.x" note the blob doesn't have this in its - * blit path (its not clear what allows it to not have it) - */ - if (blit) { - *fs_code++ = (instr_t) { .cat2 = { - .opc_cat = 2, .opc = OPC_BARY_F & 63, .ei = 1, .full = 1, - .dst = regid(63, 0), .src1_im = 1 - } }; - } - *fs_code++ = (instr_t) { .cat0 = { .opc = OPC_END } }; - /* note: assumed <= 16 instructions (MAX_RTS is 8) */ - - tu_cs_emit_regs(cs, A6XX_HLSQ_UPDATE_CNTL(0x7ffff)); - - tu6_emit_xs_config(cs, MESA_SHADER_VERTEX, &vs, shaders.iova); + tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD( + .vs_state = true, + .hs_state = true, + .ds_state = true, + .gs_state = true, + .fs_state = true, + .cs_state = true, + .gfx_ibo = true, + .cs_ibo = true, + .gfx_shared_const = true, + .gfx_bindless = 0x1f, + .cs_bindless = 0x1f)); + + tu6_emit_xs_config(cs, MESA_SHADER_VERTEX, &vs, global_iova(cmd, shaders[GLOBAL_SH_VS])); tu6_emit_xs_config(cs, MESA_SHADER_TESS_CTRL, NULL, 0); tu6_emit_xs_config(cs, MESA_SHADER_TESS_EVAL, NULL, 0); tu6_emit_xs_config(cs, MESA_SHADER_GEOMETRY, NULL, 0); - tu6_emit_xs_config(cs, MESA_SHADER_FRAGMENT, &fs, shaders.iova + FS_OFFSET); + tu6_emit_xs_config(cs, MESA_SHADER_FRAGMENT, &fs, + global_iova(cmd, shaders[blit ? GLOBAL_SH_FS_BLIT : (GLOBAL_SH_FS_CLEAR0 + num_rts)])); tu_cs_emit_regs(cs, A6XX_PC_PRIMITIVE_CNTL_0()); tu_cs_emit_regs(cs, A6XX_VFD_CONTROL_0()); - tu6_emit_vpc(cs, &vs, NULL, &fs, NULL); + tu6_emit_vpc(cs, &vs, NULL, NULL, NULL, &fs, 0, false); /* REPL_MODE for varying with RECTLIST (2 vertices only) */ tu_cs_emit_regs(cs, A6XX_VPC_VARYING_INTERP_MODE(0, 0)); @@ -614,16 +471,19 @@ r3d_pipeline(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool blit, uint32_t nu .persp_division_disable = 1, .vp_xform_disable = 1, .vp_clip_code_ignore = 1, - .clip_disable = 1), - A6XX_GRAS_UNKNOWN_8001(0)); + .clip_disable = 1)); tu_cs_emit_regs(cs, A6XX_GRAS_SU_CNTL()); // XXX msaa enable? tu_cs_emit_regs(cs, - A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0(.x = 0, .y = 0), - A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0(.x = 0x7fff, .y = 0x7fff)); + A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL(0, .x = 0, .y = 0), + A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR(0, .x = 0x7fff, .y = 0x7fff)); + tu_cs_emit_regs(cs, + A6XX_GRAS_SC_SCREEN_SCISSOR_TL(0, .x = 0, .y = 0), + A6XX_GRAS_SC_SCREEN_SCISSOR_BR(0, .x = 0x7fff, .y = 0x7fff)); + tu_cs_emit_regs(cs, - A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0(.x = 0, .y = 0), - A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0(.x = 0x7fff, .y = 0x7fff)); + A6XX_VFD_INDEX_OFFSET(), + A6XX_VFD_INSTANCE_START_OFFSET()); } static void @@ -704,9 +564,9 @@ r3d_src_common(struct tu_cmd_buffer *cmd, const uint32_t *tex_const, uint32_t offset_base, uint32_t offset_ubwc, - bool linear_filter) + VkFilter filter) { - struct ts_cs_memory texture = { }; + struct tu_cs_memory texture = { }; VkResult result = tu_cs_alloc(&cmd->sub_cs, 2, /* allocate space for a sampler too */ A6XX_TEX_CONST_DWORDS, &texture); @@ -721,8 +581,8 @@ r3d_src_common(struct tu_cmd_buffer *cmd, texture.map[8] = ubwc_addr >> 32; texture.map[A6XX_TEX_CONST_DWORDS + 0] = - A6XX_TEX_SAMP_0_XY_MAG(linear_filter ? A6XX_TEX_LINEAR : A6XX_TEX_NEAREST) | - A6XX_TEX_SAMP_0_XY_MIN(linear_filter ? A6XX_TEX_LINEAR : A6XX_TEX_NEAREST) | + A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(filter, false)) | + A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(filter, false)) | A6XX_TEX_SAMP_0_WRAP_S(A6XX_TEX_CLAMP_TO_EDGE) | A6XX_TEX_SAMP_0_WRAP_T(A6XX_TEX_CLAMP_TO_EDGE) | A6XX_TEX_SAMP_0_WRAP_R(A6XX_TEX_CLAMP_TO_EDGE) | @@ -764,12 +624,12 @@ r3d_src(struct tu_cmd_buffer *cmd, struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer, - bool linear_filter) + VkFilter filter) { r3d_src_common(cmd, cs, iview->descriptor, iview->layer_size * layer, iview->ubwc_layer_size * layer, - linear_filter); + filter); } static void @@ -794,7 +654,6 @@ r3d_src_buffer(struct tu_cmd_buffer *cmd, A6XX_TEX_CONST_0_SWIZ_W(vk_format == VK_FORMAT_R8_UNORM ? A6XX_TEX_X : A6XX_TEX_W); desc[1] = A6XX_TEX_CONST_1_WIDTH(width) | A6XX_TEX_CONST_1_HEIGHT(height); desc[2] = - A6XX_TEX_CONST_2_FETCHSIZE(tu6_fetchsize(vk_format)) | A6XX_TEX_CONST_2_PITCH(pitch) | A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D); desc[3] = 0; @@ -803,7 +662,7 @@ r3d_src_buffer(struct tu_cmd_buffer *cmd, for (uint32_t i = 6; i < A6XX_TEX_CONST_DWORDS; i++) desc[i] = 0; - r3d_src_common(cmd, cs, desc, 0, 0, false); + r3d_src_common(cmd, cs, desc, 0, 0, VK_FILTER_NEAREST); } static void @@ -840,23 +699,48 @@ r3d_dst_buffer(struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch tu_cs_emit_regs(cs, A6XX_RB_RENDER_CNTL()); } +static uint8_t +aspect_write_mask(VkFormat vk_format, VkImageAspectFlags aspect_mask) +{ + uint8_t mask = 0xf; + assert(aspect_mask); + /* note: the only format with partial writing is D24S8, + * clear/blit uses the _AS_R8G8B8A8 format to access it + */ + if (vk_format == VK_FORMAT_D24_UNORM_S8_UINT) { + if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) + mask = 0x7; + if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) + mask = 0x8; + } + return mask; +} + static void r3d_setup(struct tu_cmd_buffer *cmd, struct tu_cs *cs, VkFormat vk_format, + VkImageAspectFlags aspect_mask, enum a6xx_rotation rotation, bool clear, - uint8_t mask) + bool ubwc) { + enum a6xx_format format = tu6_base_format(vk_format); + + if ((vk_format == VK_FORMAT_D24_UNORM_S8_UINT || + vk_format == VK_FORMAT_X8_D24_UNORM_PACK32) && ubwc) { + format = FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8; + } + if (!cmd->state.pass) { tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM); - tu6_emit_window_scissor(cs, 0, 0, 0x7fff, 0x7fff); + tu6_emit_window_scissor(cs, 0, 0, 0x3fff, 0x3fff); } tu_cs_emit_regs(cs, A6XX_GRAS_BIN_CONTROL(.dword = 0xc00000)); tu_cs_emit_regs(cs, A6XX_RB_BIN_CONTROL(.dword = 0xc00000)); - r3d_pipeline(cmd, cs, !clear, clear ? 1 : 0); + r3d_common(cmd, cs, !clear, clear ? 1 : 0, false); tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2); tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(0xfc) | @@ -887,11 +771,12 @@ r3d_setup(struct tu_cmd_buffer *cmd, tu_cs_emit_regs(cs, A6XX_SP_FS_RENDER_COMPONENTS(.rt0 = 0xf)); tu_cs_emit_regs(cs, A6XX_SP_FS_MRT_REG(0, - .color_format = tu6_base_format(vk_format), + .color_format = format, .color_sint = vk_format_is_sint(vk_format), .color_uint = vk_format_is_uint(vk_format))); - tu_cs_emit_regs(cs, A6XX_RB_MRT_CONTROL(0, .component_enable = mask)); + tu_cs_emit_regs(cs, A6XX_RB_MRT_CONTROL(0, + .component_enable = aspect_write_mask(vk_format, aspect_mask))); tu_cs_emit_regs(cs, A6XX_RB_SRGB_CNTL(vk_format_is_srgb(vk_format))); tu_cs_emit_regs(cs, A6XX_SP_SRGB_CNTL(vk_format_is_srgb(vk_format))); } @@ -920,7 +805,7 @@ struct blit_ops { struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer, - bool linear_filter); + VkFilter filter); void (*src_buffer)(struct tu_cmd_buffer *cmd, struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch, @@ -930,9 +815,10 @@ struct blit_ops { void (*setup)(struct tu_cmd_buffer *cmd, struct tu_cs *cs, VkFormat vk_format, + VkImageAspectFlags aspect_mask, enum a6xx_rotation rotation, bool clear, - uint8_t mask); + bool ubwc); void (*run)(struct tu_cmd_buffer *cmd, struct tu_cs *cs); }; @@ -969,13 +855,46 @@ coords(const struct blit_ops *ops, ops->coords(cs, (const VkOffset2D*) dst, (const VkOffset2D*) src, (const VkExtent2D*) extent); } +static VkFormat +copy_format(VkFormat format, VkImageAspectFlags aspect_mask, bool copy_buffer) +{ + if (vk_format_is_compressed(format)) { + switch (vk_format_get_blocksize(format)) { + case 1: return VK_FORMAT_R8_UINT; + case 2: return VK_FORMAT_R16_UINT; + case 4: return VK_FORMAT_R32_UINT; + case 8: return VK_FORMAT_R32G32_UINT; + case 16:return VK_FORMAT_R32G32B32A32_UINT; + default: + unreachable("unhandled format size"); + } + } + + switch (format) { + case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM: + if (aspect_mask == VK_IMAGE_ASPECT_PLANE_1_BIT) + return VK_FORMAT_R8G8_UNORM; + /* fallthrough */ + case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM: + return VK_FORMAT_R8_UNORM; + case VK_FORMAT_D24_UNORM_S8_UINT: + if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT && copy_buffer) + return VK_FORMAT_R8_UNORM; + /* fallthrough */ + default: + return format; + case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: + return VK_FORMAT_R32_UINT; + } +} + static void -tu_image_view_blit2(struct tu_image_view *iview, - struct tu_image *image, - VkFormat format, - const VkImageSubresourceLayers *subres, - uint32_t layer, - bool stencil_read) +tu_image_view_copy_blit(struct tu_image_view *iview, + struct tu_image *image, + VkFormat format, + const VkImageSubresourceLayers *subres, + uint32_t layer, + bool stencil_read) { VkImageAspectFlags aspect_mask = subres->aspectMask; @@ -998,7 +917,19 @@ tu_image_view_blit2(struct tu_image_view *iview, .baseArrayLayer = subres->baseArrayLayer + layer, .layerCount = 1, }, - }); + }, false); +} + +static void +tu_image_view_copy(struct tu_image_view *iview, + struct tu_image *image, + VkFormat format, + const VkImageSubresourceLayers *subres, + uint32_t layer, + bool stencil_read) +{ + format = copy_format(format, subres->aspectMask, false); + tu_image_view_copy_blit(iview, image, format, subres, layer, stencil_read); } static void @@ -1007,7 +938,7 @@ tu_image_view_blit(struct tu_image_view *iview, const VkImageSubresourceLayers *subres, uint32_t layer) { - tu_image_view_blit2(iview, image, image->vk_format, subres, layer, false); + tu_image_view_copy_blit(iview, image, image->vk_format, subres, layer, false); } static void @@ -1051,15 +982,6 @@ tu6_blit_image(struct tu_cmd_buffer *cmd, layers = info->dstSubresource.layerCount; } - uint8_t mask = 0xf; - if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) { - assert(info->srcSubresource.aspectMask == info->dstSubresource.aspectMask); - if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) - mask = 0x7; - if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) - mask = 0x8; - } - /* BC1_RGB_* formats need to have their last components overriden with 1 * when sampling, which is normally handled with the texture descriptor * swizzle. The 2d path can't handle that, so use the 3d path. @@ -1070,14 +992,16 @@ tu6_blit_image(struct tu_cmd_buffer *cmd, if (dst_image->samples > 1 || src_image->vk_format == VK_FORMAT_BC1_RGB_UNORM_BLOCK || - src_image->vk_format == VK_FORMAT_BC1_RGB_SRGB_BLOCK) + src_image->vk_format == VK_FORMAT_BC1_RGB_SRGB_BLOCK || + filter == VK_FILTER_CUBIC_EXT) ops = &r3d_ops; /* TODO: shader path fails some of blit_image.all_formats.generate_mipmaps.* tests, * figure out why (should be able to pass all tests with only shader path) */ - ops->setup(cmd, cs, dst_image->vk_format, rotate[mirror_y][mirror_x], false, mask); + ops->setup(cmd, cs, dst_image->vk_format, info->dstSubresource.aspectMask, + rotate[mirror_y][mirror_x], false, dst_image->layout[0].ubwc); if (ops == &r3d_ops) { r3d_coords_raw(cs, (float[]) { @@ -1093,10 +1017,10 @@ tu6_blit_image(struct tu_cmd_buffer *cmd, A6XX_GRAS_2D_DST_BR(.x = MAX2(info->dstOffsets[0].x, info->dstOffsets[1].x) - 1, .y = MAX2(info->dstOffsets[0].y, info->dstOffsets[1].y) - 1)); tu_cs_emit_regs(cs, - A6XX_GRAS_2D_SRC_TL_X(.x = MIN2(info->srcOffsets[0].x, info->srcOffsets[1].x)), - A6XX_GRAS_2D_SRC_BR_X(.x = MAX2(info->srcOffsets[0].x, info->srcOffsets[1].x) - 1), - A6XX_GRAS_2D_SRC_TL_Y(.y = MIN2(info->srcOffsets[0].y, info->srcOffsets[1].y)), - A6XX_GRAS_2D_SRC_BR_Y(.y = MAX2(info->srcOffsets[0].y, info->srcOffsets[1].y) - 1)); + A6XX_GRAS_2D_SRC_TL_X(MIN2(info->srcOffsets[0].x, info->srcOffsets[1].x)), + A6XX_GRAS_2D_SRC_BR_X(MAX2(info->srcOffsets[0].x, info->srcOffsets[1].x) - 1), + A6XX_GRAS_2D_SRC_TL_Y(MIN2(info->srcOffsets[0].y, info->srcOffsets[1].y)), + A6XX_GRAS_2D_SRC_BR_Y(MAX2(info->srcOffsets[0].y, info->srcOffsets[1].y) - 1)); } struct tu_image_view dst, src; @@ -1105,7 +1029,7 @@ tu6_blit_image(struct tu_cmd_buffer *cmd, for (uint32_t i = 0; i < layers; i++) { ops->dst(cs, &dst, i); - ops->src(cmd, cs, &src, i, filter == VK_FILTER_LINEAR); + ops->src(cmd, cs, &src, i, filter); ops->run(cmd, cs); } } @@ -1132,21 +1056,6 @@ tu_CmdBlitImage(VkCommandBuffer commandBuffer, tu6_blit_image(cmd, src_image, dst_image, pRegions + i, filter); } -static VkFormat -copy_format(VkFormat format) -{ - switch (vk_format_get_blocksizebits(format)) { - case 8: return VK_FORMAT_R8_UINT; - case 16: return VK_FORMAT_R16_UINT; - case 32: return VK_FORMAT_R32_UINT; - case 64: return VK_FORMAT_R32G32_UINT; - case 96: return VK_FORMAT_R32G32B32_UINT; - case 128:return VK_FORMAT_R32G32B32A32_UINT; - default: - unreachable("unhandled format size"); - } -} - static void copy_compressed(VkFormat format, VkOffset3D *offset, @@ -1181,47 +1090,36 @@ tu_copy_buffer_to_image(struct tu_cmd_buffer *cmd, { struct tu_cs *cs = &cmd->cs; uint32_t layers = MAX2(info->imageExtent.depth, info->imageSubresource.layerCount); - VkFormat dst_format = dst_image->vk_format; - VkFormat src_format = dst_image->vk_format; + VkFormat src_format = + copy_format(dst_image->vk_format, info->imageSubresource.aspectMask, true); const struct blit_ops *ops = &r2d_ops; - uint8_t mask = 0xf; - - if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) { - switch (info->imageSubresource.aspectMask) { - case VK_IMAGE_ASPECT_STENCIL_BIT: - src_format = VK_FORMAT_R8_UNORM; /* changes how src buffer is interpreted */ - mask = 0x8; - ops = &r3d_ops; - break; - case VK_IMAGE_ASPECT_DEPTH_BIT: - mask = 0x7; - break; - } + /* special case for buffer to stencil */ + if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT && + info->imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) { + ops = &r3d_ops; } + /* TODO: G8_B8R8_2PLANE_420_UNORM Y plane has different hardware format, + * which matters for UBWC. buffer_to_image/etc can fail because of this + */ + VkOffset3D offset = info->imageOffset; VkExtent3D extent = info->imageExtent; uint32_t src_width = info->bufferRowLength ?: extent.width; uint32_t src_height = info->bufferImageHeight ?: extent.height; - if (dst_format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 || vk_format_is_compressed(src_format)) { - assert(src_format == dst_format); - copy_compressed(dst_format, &offset, &extent, &src_width, &src_height); - src_format = dst_format = copy_format(dst_format); - } + copy_compressed(dst_image->vk_format, &offset, &extent, &src_width, &src_height); uint32_t pitch = src_width * vk_format_get_blocksize(src_format); uint32_t layer_size = src_height * pitch; - /* note: the src_va/pitch alignment of 64 is for 2D engine, - * it is also valid for 1cpp format with shader path (stencil aspect path) - */ - - ops->setup(cmd, cs, dst_format, ROTATE_0, false, mask); + ops->setup(cmd, cs, + copy_format(dst_image->vk_format, info->imageSubresource.aspectMask, false), + info->imageSubresource.aspectMask, ROTATE_0, false, dst_image->layout[0].ubwc); struct tu_image_view dst; - tu_image_view_blit2(&dst, dst_image, dst_format, &info->imageSubresource, offset.z, false); + tu_image_view_copy(&dst, dst_image, dst_image->vk_format, &info->imageSubresource, offset.z, false); for (uint32_t i = 0; i < layers; i++) { ops->dst(cs, &dst, i); @@ -1272,13 +1170,12 @@ tu_copy_image_to_buffer(struct tu_cmd_buffer *cmd, { struct tu_cs *cs = &cmd->cs; uint32_t layers = MAX2(info->imageExtent.depth, info->imageSubresource.layerCount); - VkFormat src_format = src_image->vk_format; - VkFormat dst_format = src_image->vk_format; + VkFormat dst_format = + copy_format(src_image->vk_format, info->imageSubresource.aspectMask, true); bool stencil_read = false; if (src_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT && info->imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) { - dst_format = VK_FORMAT_R8_UNORM; stencil_read = true; } @@ -1288,26 +1185,18 @@ tu_copy_image_to_buffer(struct tu_cmd_buffer *cmd, uint32_t dst_width = info->bufferRowLength ?: extent.width; uint32_t dst_height = info->bufferImageHeight ?: extent.height; - if (dst_format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 || vk_format_is_compressed(dst_format)) { - assert(src_format == dst_format); - copy_compressed(dst_format, &offset, &extent, &dst_width, &dst_height); - src_format = dst_format = copy_format(dst_format); - } + copy_compressed(src_image->vk_format, &offset, &extent, &dst_width, &dst_height); uint32_t pitch = dst_width * vk_format_get_blocksize(dst_format); uint32_t layer_size = pitch * dst_height; - /* note: the dst_va/pitch alignment of 64 is for 2D engine, - * it is also valid for 1cpp format with shader path (stencil aspect) - */ - - ops->setup(cmd, cs, dst_format, ROTATE_0, false, 0xf); + ops->setup(cmd, cs, dst_format, VK_IMAGE_ASPECT_COLOR_BIT, ROTATE_0, false, false); struct tu_image_view src; - tu_image_view_blit2(&src, src_image, src_format, &info->imageSubresource, offset.z, stencil_read); + tu_image_view_copy(&src, src_image, src_image->vk_format, &info->imageSubresource, offset.z, stencil_read); for (uint32_t i = 0; i < layers; i++) { - ops->src(cmd, cs, &src, i, false); + ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST); uint64_t dst_va = tu_buffer_iova(dst_buffer) + info->bufferOffset + layer_size * i; if ((dst_va & 63) || (pitch & 63)) { @@ -1371,7 +1260,7 @@ is_swapped_format(VkFormat format) static bool image_is_r8g8(struct tu_image *image) { - return image->layout.cpp == 2 && + return image->layout[0].cpp == 2 && vk_format_get_nr_components(image->vk_format) == 2; } @@ -1384,19 +1273,9 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, const struct blit_ops *ops = &r2d_ops; struct tu_cs *cs = &cmd->cs; - uint8_t mask = 0xf; - if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) { - if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) - mask = 0x7; - if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) - mask = 0x8; - } - if (dst_image->samples > 1) ops = &r3d_ops; - assert(info->srcSubresource.aspectMask == info->dstSubresource.aspectMask); - VkFormat format = VK_FORMAT_UNDEFINED; VkOffset3D src_offset = info->srcOffset; VkOffset3D dst_offset = info->dstOffset; @@ -1421,10 +1300,8 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, copy_compressed(src_image->vk_format, &src_offset, &extent, NULL, NULL); copy_compressed(dst_image->vk_format, &dst_offset, NULL, NULL, NULL); - VkFormat dst_format = vk_format_is_compressed(dst_image->vk_format) ? - copy_format(dst_image->vk_format) : dst_image->vk_format; - VkFormat src_format = vk_format_is_compressed(src_image->vk_format) ? - copy_format(src_image->vk_format) : src_image->vk_format; + VkFormat dst_format = copy_format(dst_image->vk_format, info->dstSubresource.aspectMask, false); + VkFormat src_format = copy_format(src_image->vk_format, info->srcSubresource.aspectMask, false); bool use_staging_blit = false; @@ -1433,12 +1310,12 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, * the same as a blit. */ format = src_format; - } else if (!src_image->layout.tile_mode) { + } else if (!src_image->layout[0].tile_mode) { /* If an image is linear, we can always safely reinterpret it with the * other image's format and then do a regular blit. */ format = dst_format; - } else if (!dst_image->layout.tile_mode) { + } else if (!dst_image->layout[0].tile_mode) { format = src_format; } else if (image_is_r8g8(src_image) != image_is_r8g8(dst_image)) { /* We can't currently copy r8g8 images to/from other cpp=2 images, @@ -1451,9 +1328,9 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, * to/from it. */ use_staging_blit = true; - } else if (!src_image->layout.ubwc) { + } else if (!src_image->layout[0].ubwc) { format = dst_format; - } else if (!dst_image->layout.ubwc) { + } else if (!dst_image->layout[0].ubwc) { format = src_format; } else { /* Both formats use UBWC and so neither can be reinterpreted. @@ -1465,8 +1342,8 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, struct tu_image_view dst, src; if (use_staging_blit) { - tu_image_view_blit2(&dst, dst_image, dst_format, &info->dstSubresource, dst_offset.z, false); - tu_image_view_blit2(&src, src_image, src_format, &info->srcSubresource, src_offset.z, false); + tu_image_view_copy(&dst, dst_image, dst_format, &info->dstSubresource, dst_offset.z, false); + tu_image_view_copy(&src, src_image, src_format, &info->srcSubresource, src_offset.z, false); struct tu_image staging_image = { .vk_format = src_format, @@ -1488,10 +1365,10 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, VkOffset3D staging_offset = { 0 }; - staging_image.layout.tile_mode = TILE6_LINEAR; - staging_image.layout.ubwc = false; + staging_image.layout[0].tile_mode = TILE6_LINEAR; + staging_image.layout[0].ubwc = false; - fdl6_layout(&staging_image.layout, + fdl6_layout(&staging_image.layout[0], vk_format_to_pipe_format(staging_image.vk_format), staging_image.samples, staging_image.extent.width, @@ -1503,7 +1380,7 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, NULL); VkResult result = tu_get_scratch_bo(cmd->device, - staging_image.layout.size, + staging_image.layout[0].size, &staging_image.bo); if (result != VK_SUCCESS) { cmd->record_result = result; @@ -1514,14 +1391,14 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE); struct tu_image_view staging; - tu_image_view_blit2(&staging, &staging_image, src_format, - &staging_subresource, 0, false); + tu_image_view_copy(&staging, &staging_image, src_format, + &staging_subresource, 0, false); - ops->setup(cmd, cs, src_format, ROTATE_0, false, mask); + ops->setup(cmd, cs, src_format, VK_IMAGE_ASPECT_COLOR_BIT, ROTATE_0, false, false); coords(ops, cs, &staging_offset, &src_offset, &extent); for (uint32_t i = 0; i < info->extent.depth; i++) { - ops->src(cmd, cs, &src, i, false); + ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST); ops->dst(cs, &staging, i); ops->run(cmd, cs); } @@ -1532,26 +1409,28 @@ tu_copy_image_to_image(struct tu_cmd_buffer *cmd, tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS); tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE); - tu_image_view_blit2(&staging, &staging_image, dst_format, - &staging_subresource, 0, false); + tu_image_view_copy(&staging, &staging_image, dst_format, + &staging_subresource, 0, false); - ops->setup(cmd, cs, dst_format, ROTATE_0, false, mask); + ops->setup(cmd, cs, dst_format, info->dstSubresource.aspectMask, + ROTATE_0, false, dst_image->layout[0].ubwc); coords(ops, cs, &dst_offset, &staging_offset, &extent); for (uint32_t i = 0; i < info->extent.depth; i++) { - ops->src(cmd, cs, &staging, i, false); + ops->src(cmd, cs, &staging, i, VK_FILTER_NEAREST); ops->dst(cs, &dst, i); ops->run(cmd, cs); } } else { - tu_image_view_blit2(&dst, dst_image, format, &info->dstSubresource, dst_offset.z, false); - tu_image_view_blit2(&src, src_image, format, &info->srcSubresource, src_offset.z, false); + tu_image_view_copy(&dst, dst_image, format, &info->dstSubresource, dst_offset.z, false); + tu_image_view_copy(&src, src_image, format, &info->srcSubresource, src_offset.z, false); - ops->setup(cmd, cs, format, ROTATE_0, false, mask); + ops->setup(cmd, cs, format, info->dstSubresource.aspectMask, + ROTATE_0, false, dst_image->layout[0].ubwc); coords(ops, cs, &dst_offset, &src_offset, &extent); for (uint32_t i = 0; i < info->extent.depth; i++) { - ops->src(cmd, cs, &src, i, false); + ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST); ops->dst(cs, &dst, i); ops->run(cmd, cs); } @@ -1590,7 +1469,7 @@ copy_buffer(struct tu_cmd_buffer *cmd, VkFormat format = block_size == 4 ? VK_FORMAT_R32_UINT : VK_FORMAT_R8_UNORM; uint64_t blocks = size / block_size; - ops->setup(cmd, cs, format, ROTATE_0, false, 0xf); + ops->setup(cmd, cs, format, VK_IMAGE_ASPECT_COLOR_BIT, ROTATE_0, false, false); while (blocks) { uint32_t src_x = (src_va & 63) / block_size; @@ -1642,7 +1521,7 @@ tu_CmdUpdateBuffer(VkCommandBuffer commandBuffer, tu_bo_list_add(&cmd->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE); - struct ts_cs_memory tmp; + struct tu_cs_memory tmp; VkResult result = tu_cs_alloc(&cmd->sub_cs, DIV_ROUND_UP(dataSize, 64), 64, &tmp); if (result != VK_SUCCESS) { cmd->record_result = result; @@ -1673,7 +1552,7 @@ tu_CmdFillBuffer(VkCommandBuffer commandBuffer, uint64_t dst_va = tu_buffer_iova(buffer) + dstOffset; uint32_t blocks = fillSize / 4; - ops->setup(cmd, cs, VK_FORMAT_R32_UINT, ROTATE_0, true, 0xf); + ops->setup(cmd, cs, VK_FORMAT_R32_UINT, VK_IMAGE_ASPECT_COLOR_BIT, ROTATE_0, true, false); ops->clear_value(cs, VK_FORMAT_R32_UINT, &(VkClearValue){.color = {.uint32[0] = data}}); while (blocks) { @@ -1707,7 +1586,8 @@ tu_CmdResolveImage(VkCommandBuffer commandBuffer, tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ); tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE); - ops->setup(cmd, cs, dst_image->vk_format, ROTATE_0, false, 0xf); + ops->setup(cmd, cs, dst_image->vk_format, VK_IMAGE_ASPECT_COLOR_BIT, + ROTATE_0, false, dst_image->layout[0].ubwc); for (uint32_t i = 0; i < regionCount; ++i) { const VkImageResolve *info = &pRegions[i]; @@ -1723,7 +1603,7 @@ tu_CmdResolveImage(VkCommandBuffer commandBuffer, tu_image_view_blit(&src, src_image, &info->srcSubresource, info->srcOffset.z); for (uint32_t i = 0; i < layers; i++) { - ops->src(cmd, cs, &src, i, false); + ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST); ops->dst(cs, &dst, i); ops->run(cmd, cs); } @@ -1745,11 +1625,12 @@ tu_resolve_sysmem(struct tu_cmd_buffer *cmd, assert(src->image->vk_format == dst->image->vk_format); - ops->setup(cmd, cs, dst->image->vk_format, ROTATE_0, false, 0xf); + ops->setup(cmd, cs, dst->image->vk_format, VK_IMAGE_ASPECT_COLOR_BIT, + ROTATE_0, false, dst->ubwc_enabled); ops->coords(cs, &rect->offset, &rect->offset, &rect->extent); for (uint32_t i = 0; i < layers; i++) { - ops->src(cmd, cs, src, i, false); + ops->src(cmd, cs, src, i, VK_FILTER_NEAREST); ops->dst(cs, dst, i); ops->run(cmd, cs); } @@ -1773,18 +1654,9 @@ clear_image(struct tu_cmd_buffer *cmd, assert(range->baseArrayLayer == 0); } - uint8_t mask = 0xf; - if (image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) { - mask = 0; - if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) - mask |= 0x7; - if (range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) - mask |= 0x8; - } - const struct blit_ops *ops = image->samples > 1 ? &r3d_ops : &r2d_ops; - ops->setup(cmd, cs, format, ROTATE_0, true, mask); + ops->setup(cmd, cs, format, range->aspectMask, ROTATE_0, true, image->layout[0].ubwc); ops->clear_value(cs, image->vk_format, clear_value); for (unsigned j = 0; j < level_count; j++) { @@ -1797,7 +1669,7 @@ clear_image(struct tu_cmd_buffer *cmd, }); struct tu_image_view dst; - tu_image_view_blit2(&dst, image, format, &(VkImageSubresourceLayers) { + tu_image_view_copy_blit(&dst, image, format, &(VkImageSubresourceLayers) { .aspectMask = range->aspectMask, .mipLevel = range->baseMipLevel + j, .baseArrayLayer = range->baseArrayLayer, @@ -1899,18 +1771,11 @@ tu_clear_sysmem_attachments_2d(struct tu_cmd_buffer *cmd, if (a == VK_ATTACHMENT_UNUSED) continue; - uint8_t mask = 0xf; - if (cmd->state.pass->attachments[a].format == VK_FORMAT_D24_UNORM_S8_UINT) { - if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)) - mask &= ~0x7; - if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) - mask &= ~0x8; - } - const struct tu_image_view *iview = cmd->state.framebuffer->attachments[a].attachment; - ops->setup(cmd, cs, iview->image->vk_format, ROTATE_0, true, mask); + ops->setup(cmd, cs, iview->image->vk_format, attachments[j].aspectMask, + ROTATE_0, true, iview->ubwc_enabled); ops->clear_value(cs, iview->image->vk_format, &attachments[j].clearValue); /* Wait for the flushes we triggered manually to complete */ @@ -1953,6 +1818,7 @@ tu_clear_sysmem_attachments(struct tu_cmd_buffer *cmd, uint32_t clear_rts = 0, clear_components = 0, num_rts = 0, b; bool z_clear = false; bool s_clear = false; + bool layered_clear = false; uint32_t max_samples = 1; for (uint32_t i = 0; i < attachment_count; i++) { @@ -1993,7 +1859,25 @@ tu_clear_sysmem_attachments(struct tu_cmd_buffer *cmd, return; } - /* TODO: this path doesn't take into account multilayer rendering */ + /* This clear path behaves like a draw, needs the same flush as tu_draw */ + tu_emit_cache_flush_renderpass(cmd, cs); + + /* disable all draw states so they don't interfere + * TODO: use and re-use draw states for this path + * we have to disable draw states individually to preserve + * input attachment states, because a secondary command buffer + * won't be able to restore them + */ + tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2)); + for (uint32_t i = 0; i < TU_DRAW_STATE_COUNT; i++) { + if (i == TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM || + i == TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM) + continue; + tu_cs_emit(cs, CP_SET_DRAW_STATE__0_GROUP_ID(i) | + CP_SET_DRAW_STATE__0_DISABLE); + tu_cs_emit_qw(cs, 0); + } + cmd->state.dirty |= TU_CMD_DIRTY_DRAW_STATE; tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2); tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(0xfc) | @@ -2009,7 +1893,12 @@ tu_clear_sysmem_attachments(struct tu_cmd_buffer *cmd, tu_cs_emit(cs, 0); } - r3d_pipeline(cmd, cs, false, num_rts); + for (uint32_t i = 0; i < rect_count; i++) { + if (rects[i].baseArrayLayer || rects[i].layerCount > 1) + layered_clear = true; + } + + r3d_common(cmd, cs, false, num_rts, layered_clear); tu_cs_emit_regs(cs, A6XX_SP_FS_RENDER_COMPONENTS(.dword = clear_components)); @@ -2054,97 +1943,77 @@ tu_clear_sysmem_attachments(struct tu_cmd_buffer *cmd, tu_cs_emit_array(cs, clear_value[b], 4); for (uint32_t i = 0; i < rect_count; i++) { - r3d_coords_raw(cs, (float[]) { - rects[i].rect.offset.x, rects[i].rect.offset.y, - z_clear_val, 1.0f, - rects[i].rect.offset.x + rects[i].rect.extent.width, - rects[i].rect.offset.y + rects[i].rect.extent.height, - z_clear_val, 1.0f - }); - r3d_run(cmd, cs); + for (uint32_t layer = 0; layer < rects[i].layerCount; layer++) { + r3d_coords_raw(cs, (float[]) { + rects[i].rect.offset.x, rects[i].rect.offset.y, + z_clear_val, uif(rects[i].baseArrayLayer + layer), + rects[i].rect.offset.x + rects[i].rect.extent.width, + rects[i].rect.offset.y + rects[i].rect.extent.height, + z_clear_val, 1.0f, + }); + r3d_run(cmd, cs); + } } - - cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE | - TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK | - TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | - TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | - TU_CMD_DIRTY_DYNAMIC_VIEWPORT | - TU_CMD_DIRTY_DYNAMIC_SCISSOR; } -/** - * Pack a VkClearValue into a 128-bit buffer. format is respected except - * for the component order. The components are always packed in WZYX order, - * because gmem is tiled and tiled formats always have WZYX swap - */ static void -pack_gmem_clear_value(const VkClearValue *val, VkFormat format, uint32_t buf[4]) +pack_gmem_clear_value(const VkClearValue *val, VkFormat format, uint32_t clear_value[4]) { - const struct util_format_description *desc = vk_format_description(format); + enum pipe_format pformat = vk_format_to_pipe_format(format); switch (format) { - case VK_FORMAT_B10G11R11_UFLOAT_PACK32: - buf[0] = float3_to_r11g11b10f(val->color.float32); + case VK_FORMAT_X8_D24_UNORM_PACK32: + case VK_FORMAT_D24_UNORM_S8_UINT: + clear_value[0] = tu_pack_float32_for_unorm(val->depthStencil.depth, 24) | + val->depthStencil.stencil << 24; return; - case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: - buf[0] = float3_to_rgb9e5(val->color.float32); + case VK_FORMAT_D16_UNORM: + clear_value[0] = tu_pack_float32_for_unorm(val->depthStencil.depth, 16); + return; + case VK_FORMAT_D32_SFLOAT: + clear_value[0] = fui(val->depthStencil.depth); + return; + case VK_FORMAT_S8_UINT: + clear_value[0] = val->depthStencil.stencil; return; + /* these formats use a different base format when tiled + * the same format can be used for both because GMEM is always in WZYX order + */ + case VK_FORMAT_R5G5B5A1_UNORM_PACK16: + case VK_FORMAT_B5G5R5A1_UNORM_PACK16: + pformat = PIPE_FORMAT_B5G5R5A1_UNORM; default: break; } - assert(desc && desc->layout == UTIL_FORMAT_LAYOUT_PLAIN); - - /* S8_UINT is special and has no depth */ - const int max_components = - format == VK_FORMAT_S8_UINT ? 2 : desc->nr_channels; - - int buf_offset = 0; - int bit_shift = 0; - for (int comp = 0; comp < max_components; comp++) { - const struct util_format_channel_description *ch = - tu_get_format_channel_description(desc, comp); - if (!ch) { - assert((format == VK_FORMAT_S8_UINT && comp == 0) || - (format == VK_FORMAT_X8_D24_UNORM_PACK32 && comp == 1)); - continue; - } + VkClearColorValue color; - union tu_clear_component_value v = tu_get_clear_component_value( - val, comp, desc->colorspace); - - /* move to the next uint32_t when there is not enough space */ - assert(ch->size <= 32); - if (bit_shift + ch->size > 32) { - buf_offset++; - bit_shift = 0; - } - - if (bit_shift == 0) - buf[buf_offset] = 0; + /** + * GMEM is tiled and wants the components in WZYX order, + * apply swizzle to the color before packing, to counteract + * deswizzling applied by packing functions + */ + pipe_swizzle_4f(color.float32, val->color.float32, + util_format_description(pformat)->swizzle); - buf[buf_offset] |= tu_pack_clear_component_value(v, ch) << bit_shift; - bit_shift += ch->size; - } + util_format_pack_rgba(pformat, clear_value, color.uint32, 1); } static void tu_emit_clear_gmem_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t attachment, - uint8_t component_mask, + VkImageAspectFlags mask, const VkClearValue *value) { VkFormat vk_format = cmd->state.pass->attachments[attachment].format; - /* note: component_mask is 0x7 for depth and 0x8 for stencil - * because D24S8 is cleared with AS_R8G8B8A8 format - */ + tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_DST_INFO, 1); tu_cs_emit(cs, A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(tu6_base_format(vk_format))); - tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_INFO, 1); - tu_cs_emit(cs, A6XX_RB_BLIT_INFO_GMEM | A6XX_RB_BLIT_INFO_CLEAR_MASK(component_mask)); + tu_cs_emit_regs(cs, A6XX_RB_BLIT_INFO(.gmem = 1, + .clear_mask = aspect_write_mask(vk_format, mask))); tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_BASE_GMEM, 1); tu_cs_emit(cs, cmd->state.pass->attachments[attachment].gmem_offset); @@ -2192,15 +2061,7 @@ tu_clear_gmem_attachments(struct tu_cmd_buffer *cmd, if (a == VK_ATTACHMENT_UNUSED) continue; - unsigned clear_mask = 0xf; - if (cmd->state.pass->attachments[a].format == VK_FORMAT_D24_UNORM_S8_UINT) { - if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)) - clear_mask &= ~0x7; - if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) - clear_mask &= ~0x8; - } - - tu_emit_clear_gmem_attachment(cmd, cs, a, clear_mask, + tu_emit_clear_gmem_attachment(cmd, cs, a, attachments[j].aspectMask, &attachments[j].clearValue); } } @@ -2235,23 +2096,16 @@ tu_clear_sysmem_attachment(struct tu_cmd_buffer *cmd, const struct tu_image_view *iview = fb->attachments[a].attachment; const struct tu_render_pass_attachment *attachment = &cmd->state.pass->attachments[a]; - uint8_t mask = 0; - if (attachment->clear_mask == VK_IMAGE_ASPECT_COLOR_BIT) - mask = 0xf; - if (attachment->clear_mask & VK_IMAGE_ASPECT_DEPTH_BIT) - mask |= 0x7; - if (attachment->clear_mask & VK_IMAGE_ASPECT_STENCIL_BIT) - mask |= 0x8; - - if (!mask) + if (!attachment->clear_mask) return; const struct blit_ops *ops = &r2d_ops; if (attachment->samples > 1) ops = &r3d_ops; - ops->setup(cmd, cs, attachment->format, ROTATE_0, true, mask); + ops->setup(cmd, cs, attachment->format, attachment->clear_mask, ROTATE_0, + true, iview->ubwc_enabled); ops->coords(cs, &info->renderArea.offset, NULL, &info->renderArea.extent); ops->clear_value(cs, attachment->format, &info->pClearValues[a]); @@ -2289,21 +2143,13 @@ tu_clear_gmem_attachment(struct tu_cmd_buffer *cmd, { const struct tu_render_pass_attachment *attachment = &cmd->state.pass->attachments[a]; - unsigned clear_mask = 0; - - if (attachment->clear_mask == VK_IMAGE_ASPECT_COLOR_BIT) - clear_mask = 0xf; - if (attachment->clear_mask & VK_IMAGE_ASPECT_DEPTH_BIT) - clear_mask |= 0x7; - if (attachment->clear_mask & VK_IMAGE_ASPECT_STENCIL_BIT) - clear_mask |= 0x8; - if (!clear_mask) + if (!attachment->clear_mask) return; tu_cs_emit_regs(cs, A6XX_RB_MSAA_CNTL(tu_msaa_samples(attachment->samples))); - tu_emit_clear_gmem_attachment(cmd, cs, a, clear_mask, + tu_emit_clear_gmem_attachment(cmd, cs, a, attachment->clear_mask, &info->pClearValues[a]); } @@ -2392,10 +2238,10 @@ tu_store_gmem_attachment(struct tu_cmd_buffer *cmd, uint32_t a, uint32_t gmem_a) { - const struct tu_tiling_config *tiling = &cmd->state.tiling_config; - const VkRect2D *render_area = &tiling->render_area; + const struct tu_framebuffer *fb = cmd->state.framebuffer; + const VkRect2D *render_area = &cmd->state.render_area; struct tu_render_pass_attachment *dst = &cmd->state.pass->attachments[a]; - struct tu_image_view *iview = cmd->state.framebuffer->attachments[a].attachment; + struct tu_image_view *iview = fb->attachments[a].attachment; struct tu_render_pass_attachment *src = &cmd->state.pass->attachments[gmem_a]; if (!dst->store) @@ -2431,7 +2277,8 @@ tu_store_gmem_attachment(struct tu_cmd_buffer *cmd, return; } - r2d_setup_common(cmd, cs, dst->format, ROTATE_0, false, 0xf, true); + r2d_setup_common(cmd, cs, dst->format, VK_IMAGE_ASPECT_COLOR_BIT, + ROTATE_0, false, iview->ubwc_enabled, true); r2d_dst(cs, iview, 0); r2d_coords(cs, &render_area->offset, &render_area->offset, &render_area->extent); @@ -2448,7 +2295,7 @@ tu_store_gmem_attachment(struct tu_cmd_buffer *cmd, A6XX_SP_PS_2D_SRC_SIZE( .width = 0x3fff, .height = 0x3fff), A6XX_SP_PS_2D_SRC_LO(cmd->device->physical_device->gmem_base + src->gmem_offset), A6XX_SP_PS_2D_SRC_HI(), - A6XX_SP_PS_2D_SRC_PITCH(.pitch = tiling->tile0.extent.width * src->cpp)); + A6XX_SP_PS_2D_SRC_PITCH(.pitch = fb->tile0.width * src->cpp)); /* sync GMEM writes with CACHE. */ tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);