freedreno/ir3: add ir3_compiler_destroy()
[mesa.git] / src / freedreno / vulkan / tu_clear_blit.c
1 /*
2 * Copyright 2019-2020 Valve Corporation
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Jonathan Marek <jonathan@marek.ca>
7 */
8
9 #include "tu_private.h"
10
11 #include "tu_cs.h"
12 #include "vk_format.h"
13
14 #include "util/format_r11g11b10f.h"
15 #include "util/format_rgb9e5.h"
16 #include "util/format_srgb.h"
17 #include "util/u_half.h"
18
19 static uint32_t
20 tu_pack_float32_for_unorm(float val, int bits)
21 {
22 return _mesa_lroundevenf(CLAMP(val, 0.0f, 1.0f) * (float) ((1 << bits) - 1));
23 }
24
25 /* r2d_ = BLIT_OP_SCALE operations */
26
27 static enum a6xx_2d_ifmt
28 format_to_ifmt(enum a6xx_format fmt)
29 {
30 switch (fmt) {
31 case FMT6_A8_UNORM:
32 case FMT6_8_UNORM:
33 case FMT6_8_SNORM:
34 case FMT6_8_8_UNORM:
35 case FMT6_8_8_SNORM:
36 case FMT6_8_8_8_8_UNORM:
37 case FMT6_8_8_8_X8_UNORM:
38 case FMT6_8_8_8_8_SNORM:
39 case FMT6_4_4_4_4_UNORM:
40 case FMT6_5_5_5_1_UNORM:
41 case FMT6_5_6_5_UNORM:
42 case FMT6_Z24_UNORM_S8_UINT:
43 case FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8:
44 return R2D_UNORM8;
45
46 case FMT6_32_UINT:
47 case FMT6_32_SINT:
48 case FMT6_32_32_UINT:
49 case FMT6_32_32_SINT:
50 case FMT6_32_32_32_32_UINT:
51 case FMT6_32_32_32_32_SINT:
52 return R2D_INT32;
53
54 case FMT6_16_UINT:
55 case FMT6_16_SINT:
56 case FMT6_16_16_UINT:
57 case FMT6_16_16_SINT:
58 case FMT6_16_16_16_16_UINT:
59 case FMT6_16_16_16_16_SINT:
60 case FMT6_10_10_10_2_UINT:
61 return R2D_INT16;
62
63 case FMT6_8_UINT:
64 case FMT6_8_SINT:
65 case FMT6_8_8_UINT:
66 case FMT6_8_8_SINT:
67 case FMT6_8_8_8_8_UINT:
68 case FMT6_8_8_8_8_SINT:
69 return R2D_INT8;
70
71 case FMT6_16_UNORM:
72 case FMT6_16_SNORM:
73 case FMT6_16_16_UNORM:
74 case FMT6_16_16_SNORM:
75 case FMT6_16_16_16_16_UNORM:
76 case FMT6_16_16_16_16_SNORM:
77 case FMT6_32_FLOAT:
78 case FMT6_32_32_FLOAT:
79 case FMT6_32_32_32_32_FLOAT:
80 return R2D_FLOAT32;
81
82 case FMT6_16_FLOAT:
83 case FMT6_16_16_FLOAT:
84 case FMT6_16_16_16_16_FLOAT:
85 case FMT6_11_11_10_FLOAT:
86 case FMT6_10_10_10_2_UNORM:
87 case FMT6_10_10_10_2_UNORM_DEST:
88 return R2D_FLOAT16;
89
90 default:
91 unreachable("bad format");
92 return 0;
93 }
94 }
95
96 static void
97 r2d_coords(struct tu_cs *cs,
98 const VkOffset2D *dst,
99 const VkOffset2D *src,
100 const VkExtent2D *extent)
101 {
102 tu_cs_emit_regs(cs,
103 A6XX_GRAS_2D_DST_TL(.x = dst->x, .y = dst->y),
104 A6XX_GRAS_2D_DST_BR(.x = dst->x + extent->width - 1, .y = dst->y + extent->height - 1));
105
106 if (!src)
107 return;
108
109 tu_cs_emit_regs(cs,
110 A6XX_GRAS_2D_SRC_TL_X(.x = src->x),
111 A6XX_GRAS_2D_SRC_BR_X(.x = src->x + extent->width - 1),
112 A6XX_GRAS_2D_SRC_TL_Y(.y = src->y),
113 A6XX_GRAS_2D_SRC_BR_Y(.y = src->y + extent->height - 1));
114 }
115
116 static void
117 r2d_clear_value(struct tu_cs *cs, VkFormat format, const VkClearValue *val)
118 {
119 uint32_t clear_value[4] = {};
120
121 switch (format) {
122 case VK_FORMAT_X8_D24_UNORM_PACK32:
123 case VK_FORMAT_D24_UNORM_S8_UINT:
124 /* cleared as r8g8b8a8_unorm using special format */
125 clear_value[0] = tu_pack_float32_for_unorm(val->depthStencil.depth, 24);
126 clear_value[1] = clear_value[0] >> 8;
127 clear_value[2] = clear_value[0] >> 16;
128 clear_value[3] = val->depthStencil.stencil;
129 break;
130 case VK_FORMAT_D16_UNORM:
131 case VK_FORMAT_D32_SFLOAT:
132 /* R2D_FLOAT32 */
133 clear_value[0] = fui(val->depthStencil.depth);
134 break;
135 case VK_FORMAT_S8_UINT:
136 clear_value[0] = val->depthStencil.stencil;
137 break;
138 case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
139 /* cleared as UINT32 */
140 clear_value[0] = float3_to_rgb9e5(val->color.float32);
141 break;
142 default:
143 assert(!vk_format_is_depth_or_stencil(format));
144 const struct util_format_description *desc = vk_format_description(format);
145 enum a6xx_2d_ifmt ifmt = format_to_ifmt(tu6_base_format(format));
146
147 assert(desc && (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN ||
148 format == VK_FORMAT_B10G11R11_UFLOAT_PACK32));
149
150 for (unsigned i = 0; i < desc->nr_channels; i++) {
151 const struct util_format_channel_description *ch = &desc->channel[i];
152 if (ifmt == R2D_UNORM8) {
153 float linear = val->color.float32[i];
154 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB && i < 3)
155 linear = util_format_linear_to_srgb_float(val->color.float32[i]);
156
157 if (ch->type == UTIL_FORMAT_TYPE_SIGNED)
158 clear_value[i] = _mesa_lroundevenf(CLAMP(linear, -1.0f, 1.0f) * 127.0f);
159 else
160 clear_value[i] = tu_pack_float32_for_unorm(linear, 8);
161 } else if (ifmt == R2D_FLOAT16) {
162 clear_value[i] = util_float_to_half(val->color.float32[i]);
163 } else {
164 assert(ifmt == R2D_FLOAT32 || ifmt == R2D_INT32 ||
165 ifmt == R2D_INT16 || ifmt == R2D_INT8);
166 clear_value[i] = val->color.uint32[i];
167 }
168 }
169 break;
170 }
171
172 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
173 tu_cs_emit_array(cs, clear_value, 4);
174 }
175
176 static void
177 r2d_src(struct tu_cmd_buffer *cmd,
178 struct tu_cs *cs,
179 const struct tu_image_view *iview,
180 uint32_t layer,
181 VkFilter filter)
182 {
183 uint32_t src_info = iview->SP_PS_2D_SRC_INFO;
184 if (filter != VK_FILTER_NEAREST)
185 src_info |= A6XX_SP_PS_2D_SRC_INFO_FILTER;
186
187 tu_cs_emit_pkt4(cs, REG_A6XX_SP_PS_2D_SRC_INFO, 5);
188 tu_cs_emit(cs, src_info);
189 tu_cs_emit(cs, iview->SP_PS_2D_SRC_SIZE);
190 tu_cs_image_ref_2d(cs, iview, layer, true);
191
192 tu_cs_emit_pkt4(cs, REG_A6XX_SP_PS_2D_SRC_FLAGS_LO, 3);
193 tu_cs_image_flag_ref(cs, iview, layer);
194 }
195
196 static void
197 r2d_src_buffer(struct tu_cmd_buffer *cmd,
198 struct tu_cs *cs,
199 VkFormat vk_format,
200 uint64_t va, uint32_t pitch,
201 uint32_t width, uint32_t height)
202 {
203 struct tu_native_format format = tu6_format_texture(vk_format, TILE6_LINEAR);
204
205 tu_cs_emit_regs(cs,
206 A6XX_SP_PS_2D_SRC_INFO(
207 .color_format = format.fmt,
208 .color_swap = format.swap,
209 .srgb = vk_format_is_srgb(vk_format),
210 .unk20 = 1,
211 .unk22 = 1),
212 A6XX_SP_PS_2D_SRC_SIZE(.width = width, .height = height),
213 A6XX_SP_PS_2D_SRC_LO((uint32_t) va),
214 A6XX_SP_PS_2D_SRC_HI(va >> 32),
215 A6XX_SP_PS_2D_SRC_PITCH(.pitch = pitch));
216 }
217
218 static void
219 r2d_dst(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer)
220 {
221 assert(iview->image->samples == 1);
222
223 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_DST_INFO, 4);
224 tu_cs_emit(cs, iview->RB_2D_DST_INFO);
225 tu_cs_image_ref_2d(cs, iview, layer, false);
226
227 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_DST_FLAGS_LO, 3);
228 tu_cs_image_flag_ref(cs, iview, layer);
229 }
230
231 static void
232 r2d_dst_buffer(struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch)
233 {
234 struct tu_native_format format = tu6_format_color(vk_format, TILE6_LINEAR);
235
236 tu_cs_emit_regs(cs,
237 A6XX_RB_2D_DST_INFO(
238 .color_format = format.fmt,
239 .color_swap = format.swap,
240 .srgb = vk_format_is_srgb(vk_format)),
241 A6XX_RB_2D_DST_LO((uint32_t) va),
242 A6XX_RB_2D_DST_HI(va >> 32),
243 A6XX_RB_2D_DST_SIZE(.pitch = pitch));
244 }
245
246 static void
247 r2d_setup_common(struct tu_cmd_buffer *cmd,
248 struct tu_cs *cs,
249 VkFormat vk_format,
250 enum a6xx_rotation rotation,
251 bool clear,
252 uint8_t mask,
253 bool scissor)
254 {
255 enum a6xx_format format = tu6_base_format(vk_format);
256 enum a6xx_2d_ifmt ifmt = format_to_ifmt(format);
257 uint32_t unknown_8c01 = 0;
258
259 if (format == FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8) {
260 /* preserve depth channels */
261 if (mask == 0x8)
262 unknown_8c01 = 0x00084001;
263 /* preserve stencil channel */
264 if (mask == 0x7)
265 unknown_8c01 = 0x08000041;
266 }
267
268 tu_cs_emit_pkt4(cs, REG_A6XX_RB_UNKNOWN_8C01, 1);
269 tu_cs_emit(cs, unknown_8c01);
270
271 uint32_t blit_cntl = A6XX_RB_2D_BLIT_CNTL(
272 .scissor = scissor,
273 .rotate = rotation,
274 .solid_color = clear,
275 .d24s8 = format == FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 && !clear,
276 .color_format = format,
277 .mask = 0xf,
278 .ifmt = vk_format_is_srgb(vk_format) ? R2D_UNORM8_SRGB : ifmt,
279 ).value;
280
281 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_BLIT_CNTL, 1);
282 tu_cs_emit(cs, blit_cntl);
283
284 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
285 tu_cs_emit(cs, blit_cntl);
286
287 if (format == FMT6_10_10_10_2_UNORM_DEST)
288 format = FMT6_16_16_16_16_FLOAT;
289
290 tu_cs_emit_regs(cs, A6XX_SP_2D_SRC_FORMAT(
291 .sint = vk_format_is_sint(vk_format),
292 .uint = vk_format_is_uint(vk_format),
293 .color_format = format,
294 .srgb = vk_format_is_srgb(vk_format),
295 .mask = 0xf));
296 }
297
298 static void
299 r2d_setup(struct tu_cmd_buffer *cmd,
300 struct tu_cs *cs,
301 VkFormat vk_format,
302 enum a6xx_rotation rotation,
303 bool clear,
304 uint8_t mask)
305 {
306 tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
307
308 r2d_setup_common(cmd, cs, vk_format, rotation, clear, mask, false);
309 }
310
311 static void
312 r2d_run(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
313 {
314 tu_cs_emit_pkt7(cs, CP_BLIT, 1);
315 tu_cs_emit(cs, CP_BLIT_0_OP(BLIT_OP_SCALE));
316 }
317
318 /* r3d_ = shader path operations */
319
320 static void
321 r3d_common(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool blit, uint32_t num_rts,
322 bool layered_clear)
323 {
324 struct ir3_const_state dummy_const_state = {};
325 struct ir3_shader dummy_shader = {};
326
327 struct ir3_shader_variant vs = {
328 .type = MESA_SHADER_VERTEX,
329 .instrlen = 1,
330 .constlen = 4,
331 .info.max_reg = 1,
332 .inputs_count = 1,
333 .inputs[0] = {
334 .slot = SYSTEM_VALUE_VERTEX_ID,
335 .regid = regid(0, 3),
336 .sysval = true,
337 },
338 .outputs_count = blit ? 2 : 1,
339 .outputs[0] = {
340 .slot = VARYING_SLOT_POS,
341 .regid = regid(0, 0),
342 },
343 .outputs[1] = {
344 .slot = VARYING_SLOT_VAR0,
345 .regid = regid(1, 0),
346 },
347 .shader = &dummy_shader,
348 .const_state = &dummy_const_state,
349 };
350 if (layered_clear) {
351 vs = (struct ir3_shader_variant) {
352 .type = MESA_SHADER_VERTEX,
353 .instrlen = 1,
354 .info.max_reg = 0,
355 .shader = &dummy_shader,
356 .const_state = &dummy_const_state,
357 };
358 }
359
360 struct ir3_shader_variant fs = {
361 .type = MESA_SHADER_FRAGMENT,
362 .instrlen = 1, /* max of 9 instructions with num_rts = 8 */
363 .constlen = align(num_rts, 4),
364 .info.max_reg = MAX2(num_rts, 1) - 1,
365 .total_in = blit ? 2 : 0,
366 .num_samp = blit ? 1 : 0,
367 .inputs_count = blit ? 2 : 0,
368 .inputs[0] = {
369 .slot = VARYING_SLOT_VAR0,
370 .inloc = 0,
371 .compmask = 3,
372 .bary = true,
373 },
374 .inputs[1] = {
375 .slot = SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL,
376 .regid = regid(0, 0),
377 .sysval = 1,
378 },
379 .num_sampler_prefetch = blit ? 1 : 0,
380 .sampler_prefetch[0] = {
381 .src = 0,
382 .wrmask = 0xf,
383 .cmd = 4,
384 },
385 .shader = &dummy_shader,
386 .const_state = &dummy_const_state,
387 };
388
389 struct ir3_shader_variant gs_shader = {
390 .type = MESA_SHADER_GEOMETRY,
391 .instrlen = 1,
392 .constlen = 4,
393 .info.max_reg = 1,
394 .inputs_count = 1,
395 .inputs[0] = {
396 .slot = SYSTEM_VALUE_GS_HEADER_IR3,
397 .regid = regid(0, 0),
398 .sysval = true,
399 },
400 .outputs_count = 3,
401 .outputs[0] = {
402 .slot = VARYING_SLOT_POS,
403 .regid = regid(0, 0),
404 },
405 .outputs[1] = {
406 .slot = VARYING_SLOT_LAYER,
407 .regid = regid(1, 1),
408 },
409 .outputs[2] = {
410 .slot = VARYING_SLOT_GS_VERTEX_FLAGS_IR3,
411 .regid = regid(1, 0),
412 },
413 .shader = &dummy_shader,
414 .const_state = &dummy_const_state,
415 }, *gs = layered_clear ? &gs_shader : NULL;
416
417
418 #define MOV(args...) { .cat1 = { .opc_cat = 1, .src_type = TYPE_F32, .dst_type = TYPE_F32, args } }
419 #define CAT2(op, args...) { .cat2 = { .opc_cat = 2, .opc = (op) & 63, .full = 1, args } }
420 #define CAT3(op, args...) { .cat3 = { .opc_cat = 3, .opc = (op) & 63, args } }
421
422 static const instr_t vs_code[] = {
423 /* r0.xyz = r0.w ? c1.xyz : c0.xyz
424 * r1.xy = r0.w ? c1.zw : c0.zw
425 * r0.w = 1.0f
426 */
427 CAT3(OPC_SEL_B32, .repeat = 2, .dst = 0,
428 .c1 = {.src1_c = 1, .src1 = 4}, .src1_r = 1,
429 .src2 = 3,
430 .c2 = {.src3_c = 1, .dummy = 1, .src3 = 0}),
431 CAT3(OPC_SEL_B32, .repeat = 1, .dst = 4,
432 .c1 = {.src1_c = 1, .src1 = 6}, .src1_r = 1,
433 .src2 = 3,
434 .c2 = {.src3_c = 1, .dummy = 1, .src3 = 2}),
435 MOV(.dst = 3, .src_im = 1, .fim_val = 1.0f ),
436 { .cat0 = { .opc = OPC_END } },
437 };
438
439 static const instr_t vs_layered[] = {
440 { .cat0 = { .opc = OPC_CHMASK } },
441 { .cat0 = { .opc = OPC_CHSH } },
442 };
443
444 static const instr_t gs_code[16] = {
445 /* (sy)(ss)(nop3)shr.b r0.w, r0.x, 16 (extract local_id) */
446 CAT2(OPC_SHR_B, .dst = 3, .src1 = 0, .src2_im = 1, .src2 = 16,
447 .src1_r = 1, .src2_r = 1, .ss = 1, .sync = 1),
448 /* x = (local_id & 1) ? c1.x : c0.x */
449 CAT2(OPC_AND_B, .dst = 0, .src1 = 3, .src2_im = 1, .src2 = 1),
450 /* y = (local_id & 2) ? c1.y : c0.y */
451 CAT2(OPC_AND_B, .dst = 1, .src1 = 3, .src2_im = 1, .src2 = 2),
452 /* pred = (local_id >= 4), used by OPC_KILL */
453 CAT2(OPC_CMPS_S, .dst = REG_P0 * 4, .cond = IR3_COND_GE, .src1 = 3, .src2_im = 1, .src2 = 4),
454 /* vertex_flags_out = (local_id == 0) ? 4 : 0 - first vertex flag */
455 CAT2(OPC_CMPS_S, .dst = 4, .cond = IR3_COND_EQ, .src1 = 3, .src2_im = 1, .src2 = 0),
456
457 MOV(.dst = 2, .src_c = 1, .src = 2), /* depth clear value from c0.z */
458 MOV(.dst = 3, .src_im = 1, .fim_val = 1.0f),
459 MOV(.dst = 5, .src_c = 1, .src = 3), /* layer id from c0.w */
460
461 /* (rpt1)sel.b32 r0.x, (r)c1.x, (r)r0.x, (r)c0.x */
462 CAT3(OPC_SEL_B32, .repeat = 1, .dst = 0,
463 .c1 = {.src1_c = 1, .src1 = 4, .dummy = 4}, .src1_r = 1,
464 .src2 = 0,
465 .c2 = {.src3_c = 1, .dummy = 1, .src3 = 0}),
466
467 CAT2(OPC_SHL_B, .dst = 4, .src1 = 4, .src2_im = 1, .src2 = 2),
468
469 { .cat0 = { .opc = OPC_KILL } },
470 { .cat0 = { .opc = OPC_END, .ss = 1, .sync = 1 } },
471 };
472 #define FS_OFFSET (16 * sizeof(instr_t))
473 #define GS_OFFSET (32 * sizeof(instr_t))
474
475 /* shaders */
476 struct tu_cs_memory shaders = { };
477 VkResult result = tu_cs_alloc(&cmd->sub_cs, 2 + layered_clear,
478 16 * sizeof(instr_t), &shaders);
479 assert(result == VK_SUCCESS);
480
481 if (layered_clear) {
482 memcpy(shaders.map, vs_layered, sizeof(vs_layered));
483 memcpy((uint8_t*) shaders.map + GS_OFFSET, gs_code, sizeof(gs_code));
484 } else {
485 memcpy(shaders.map, vs_code, sizeof(vs_code));
486 }
487
488 instr_t *fs_code = (instr_t*) ((uint8_t*) shaders.map + FS_OFFSET);
489 for (uint32_t i = 0; i < num_rts; i++) {
490 /* (rpt3)mov.s32s32 r0.x, (r)c[i].x */
491 *fs_code++ = (instr_t) { .cat1 = {
492 .opc_cat = 1, .src_type = TYPE_S32, .dst_type = TYPE_S32,
493 .repeat = 3, .dst = i * 4, .src_c = 1, .src_r = 1, .src = i * 4
494 } };
495 }
496
497 /* " bary.f (ei)r63.x, 0, r0.x" note the blob doesn't have this in its
498 * blit path (its not clear what allows it to not have it)
499 */
500 if (blit) {
501 *fs_code++ = (instr_t) { .cat2 = {
502 .opc_cat = 2, .opc = OPC_BARY_F & 63, .ei = 1, .full = 1,
503 .dst = regid(63, 0), .src1_im = 1
504 } };
505 }
506 *fs_code++ = (instr_t) { .cat0 = { .opc = OPC_END } };
507 /* note: assumed <= 16 instructions (MAX_RTS is 8) */
508
509 tu_cs_emit_regs(cs, A6XX_HLSQ_UPDATE_CNTL(0x7ffff));
510
511 tu6_emit_xs_config(cs, MESA_SHADER_VERTEX, &vs, shaders.iova);
512 tu6_emit_xs_config(cs, MESA_SHADER_TESS_CTRL, NULL, 0);
513 tu6_emit_xs_config(cs, MESA_SHADER_TESS_EVAL, NULL, 0);
514 tu6_emit_xs_config(cs, MESA_SHADER_GEOMETRY, gs, shaders.iova + GS_OFFSET);
515 tu6_emit_xs_config(cs, MESA_SHADER_FRAGMENT, &fs, shaders.iova + FS_OFFSET);
516
517 tu_cs_emit_regs(cs, A6XX_PC_PRIMITIVE_CNTL_0());
518 tu_cs_emit_regs(cs, A6XX_VFD_CONTROL_0());
519
520 tu6_emit_vpc(cs, &vs, NULL, NULL, gs, &fs);
521
522 /* REPL_MODE for varying with RECTLIST (2 vertices only) */
523 tu_cs_emit_regs(cs, A6XX_VPC_VARYING_INTERP_MODE(0, 0));
524 tu_cs_emit_regs(cs, A6XX_VPC_VARYING_PS_REPL_MODE(0, 2 << 2 | 1 << 0));
525
526 tu6_emit_fs_inputs(cs, &fs);
527
528 tu_cs_emit_regs(cs,
529 A6XX_GRAS_CL_CNTL(
530 .persp_division_disable = 1,
531 .vp_xform_disable = 1,
532 .vp_clip_code_ignore = 1,
533 .clip_disable = 1),
534 A6XX_GRAS_UNKNOWN_8001(0));
535 tu_cs_emit_regs(cs, A6XX_GRAS_SU_CNTL()); // XXX msaa enable?
536
537 tu_cs_emit_regs(cs,
538 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0(.x = 0, .y = 0),
539 A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0(.x = 0x7fff, .y = 0x7fff));
540 tu_cs_emit_regs(cs,
541 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0(.x = 0, .y = 0),
542 A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0(.x = 0x7fff, .y = 0x7fff));
543
544 tu_cs_emit_regs(cs,
545 A6XX_VFD_INDEX_OFFSET(),
546 A6XX_VFD_INSTANCE_START_OFFSET());
547 }
548
549 static void
550 r3d_coords_raw(struct tu_cs *cs, bool gs, const float *coords)
551 {
552 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_GEOM, 3 + 8);
553 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
554 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
555 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
556 CP_LOAD_STATE6_0_STATE_BLOCK(gs ? SB6_GS_SHADER : SB6_VS_SHADER) |
557 CP_LOAD_STATE6_0_NUM_UNIT(2));
558 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
559 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
560 tu_cs_emit_array(cs, (const uint32_t *) coords, 8);
561 }
562
563 static void
564 r3d_coords(struct tu_cs *cs,
565 const VkOffset2D *dst,
566 const VkOffset2D *src,
567 const VkExtent2D *extent)
568 {
569 int32_t src_x1 = src ? src->x : 0;
570 int32_t src_y1 = src ? src->y : 0;
571 r3d_coords_raw(cs, false, (float[]) {
572 dst->x, dst->y,
573 src_x1, src_y1,
574 dst->x + extent->width, dst->y + extent->height,
575 src_x1 + extent->width, src_y1 + extent->height,
576 });
577 }
578
579 static void
580 r3d_clear_value(struct tu_cs *cs, VkFormat format, const VkClearValue *val)
581 {
582 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3 + 4);
583 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
584 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
585 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
586 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_SHADER) |
587 CP_LOAD_STATE6_0_NUM_UNIT(1));
588 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
589 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
590 switch (format) {
591 case VK_FORMAT_X8_D24_UNORM_PACK32:
592 case VK_FORMAT_D24_UNORM_S8_UINT: {
593 /* cleared as r8g8b8a8_unorm using special format */
594 uint32_t tmp = tu_pack_float32_for_unorm(val->depthStencil.depth, 24);
595 tu_cs_emit(cs, fui((tmp & 0xff) / 255.0f));
596 tu_cs_emit(cs, fui((tmp >> 8 & 0xff) / 255.0f));
597 tu_cs_emit(cs, fui((tmp >> 16 & 0xff) / 255.0f));
598 tu_cs_emit(cs, fui((val->depthStencil.stencil & 0xff) / 255.0f));
599 } break;
600 case VK_FORMAT_D16_UNORM:
601 case VK_FORMAT_D32_SFLOAT:
602 tu_cs_emit(cs, fui(val->depthStencil.depth));
603 tu_cs_emit(cs, 0);
604 tu_cs_emit(cs, 0);
605 tu_cs_emit(cs, 0);
606 break;
607 case VK_FORMAT_S8_UINT:
608 tu_cs_emit(cs, val->depthStencil.stencil & 0xff);
609 tu_cs_emit(cs, 0);
610 tu_cs_emit(cs, 0);
611 tu_cs_emit(cs, 0);
612 break;
613 default:
614 /* as color formats use clear value as-is */
615 assert(!vk_format_is_depth_or_stencil(format));
616 tu_cs_emit_array(cs, val->color.uint32, 4);
617 break;
618 }
619 }
620
621 static void
622 r3d_src_common(struct tu_cmd_buffer *cmd,
623 struct tu_cs *cs,
624 const uint32_t *tex_const,
625 uint32_t offset_base,
626 uint32_t offset_ubwc,
627 VkFilter filter)
628 {
629 struct tu_cs_memory texture = { };
630 VkResult result = tu_cs_alloc(&cmd->sub_cs,
631 2, /* allocate space for a sampler too */
632 A6XX_TEX_CONST_DWORDS, &texture);
633 assert(result == VK_SUCCESS);
634
635 memcpy(texture.map, tex_const, A6XX_TEX_CONST_DWORDS * 4);
636
637 /* patch addresses for layer offset */
638 *(uint64_t*) (texture.map + 4) += offset_base;
639 uint64_t ubwc_addr = (texture.map[7] | (uint64_t) texture.map[8] << 32) + offset_ubwc;
640 texture.map[7] = ubwc_addr;
641 texture.map[8] = ubwc_addr >> 32;
642
643 texture.map[A6XX_TEX_CONST_DWORDS + 0] =
644 A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(filter, false)) |
645 A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(filter, false)) |
646 A6XX_TEX_SAMP_0_WRAP_S(A6XX_TEX_CLAMP_TO_EDGE) |
647 A6XX_TEX_SAMP_0_WRAP_T(A6XX_TEX_CLAMP_TO_EDGE) |
648 A6XX_TEX_SAMP_0_WRAP_R(A6XX_TEX_CLAMP_TO_EDGE) |
649 0x60000; /* XXX used by blob, doesn't seem necessary */
650 texture.map[A6XX_TEX_CONST_DWORDS + 1] =
651 0x1 | /* XXX used by blob, doesn't seem necessary */
652 A6XX_TEX_SAMP_1_UNNORM_COORDS |
653 A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR;
654 texture.map[A6XX_TEX_CONST_DWORDS + 2] = 0;
655 texture.map[A6XX_TEX_CONST_DWORDS + 3] = 0;
656
657 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3);
658 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
659 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
660 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
661 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
662 CP_LOAD_STATE6_0_NUM_UNIT(1));
663 tu_cs_emit_qw(cs, texture.iova + A6XX_TEX_CONST_DWORDS * 4);
664
665 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_TEX_SAMP_LO, 2);
666 tu_cs_emit_qw(cs, texture.iova + A6XX_TEX_CONST_DWORDS * 4);
667
668 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3);
669 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
670 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
671 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
672 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
673 CP_LOAD_STATE6_0_NUM_UNIT(1));
674 tu_cs_emit_qw(cs, texture.iova);
675
676 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_TEX_CONST_LO, 2);
677 tu_cs_emit_qw(cs, texture.iova);
678
679 tu_cs_emit_regs(cs, A6XX_SP_FS_TEX_COUNT(1));
680 }
681
682 static void
683 r3d_src(struct tu_cmd_buffer *cmd,
684 struct tu_cs *cs,
685 const struct tu_image_view *iview,
686 uint32_t layer,
687 VkFilter filter)
688 {
689 r3d_src_common(cmd, cs, iview->descriptor,
690 iview->layer_size * layer,
691 iview->ubwc_layer_size * layer,
692 filter);
693 }
694
695 static void
696 r3d_src_buffer(struct tu_cmd_buffer *cmd,
697 struct tu_cs *cs,
698 VkFormat vk_format,
699 uint64_t va, uint32_t pitch,
700 uint32_t width, uint32_t height)
701 {
702 uint32_t desc[A6XX_TEX_CONST_DWORDS];
703
704 struct tu_native_format format = tu6_format_texture(vk_format, TILE6_LINEAR);
705
706 desc[0] =
707 COND(vk_format_is_srgb(vk_format), A6XX_TEX_CONST_0_SRGB) |
708 A6XX_TEX_CONST_0_FMT(format.fmt) |
709 A6XX_TEX_CONST_0_SWAP(format.swap) |
710 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_X) |
711 // XXX to swizzle into .w for stencil buffer_to_image
712 A6XX_TEX_CONST_0_SWIZ_Y(vk_format == VK_FORMAT_R8_UNORM ? A6XX_TEX_X : A6XX_TEX_Y) |
713 A6XX_TEX_CONST_0_SWIZ_Z(vk_format == VK_FORMAT_R8_UNORM ? A6XX_TEX_X : A6XX_TEX_Z) |
714 A6XX_TEX_CONST_0_SWIZ_W(vk_format == VK_FORMAT_R8_UNORM ? A6XX_TEX_X : A6XX_TEX_W);
715 desc[1] = A6XX_TEX_CONST_1_WIDTH(width) | A6XX_TEX_CONST_1_HEIGHT(height);
716 desc[2] =
717 A6XX_TEX_CONST_2_PITCH(pitch) |
718 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D);
719 desc[3] = 0;
720 desc[4] = va;
721 desc[5] = va >> 32;
722 for (uint32_t i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
723 desc[i] = 0;
724
725 r3d_src_common(cmd, cs, desc, 0, 0, VK_FILTER_NEAREST);
726 }
727
728 static void
729 r3d_dst(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer)
730 {
731 tu6_emit_msaa(cs, iview->image->samples); /* TODO: move to setup */
732
733 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_BUF_INFO(0), 6);
734 tu_cs_emit(cs, iview->RB_MRT_BUF_INFO);
735 tu_cs_image_ref(cs, iview, layer);
736 tu_cs_emit(cs, 0);
737
738 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_FLAG_BUFFER(0), 3);
739 tu_cs_image_flag_ref(cs, iview, layer);
740
741 tu_cs_emit_regs(cs, A6XX_RB_RENDER_CNTL(.flag_mrts = iview->ubwc_enabled));
742 }
743
744 static void
745 r3d_dst_buffer(struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch)
746 {
747 struct tu_native_format format = tu6_format_color(vk_format, TILE6_LINEAR);
748
749 tu6_emit_msaa(cs, 1); /* TODO: move to setup */
750
751 tu_cs_emit_regs(cs,
752 A6XX_RB_MRT_BUF_INFO(0, .color_format = format.fmt, .color_swap = format.swap),
753 A6XX_RB_MRT_PITCH(0, pitch),
754 A6XX_RB_MRT_ARRAY_PITCH(0, 0),
755 A6XX_RB_MRT_BASE_LO(0, (uint32_t) va),
756 A6XX_RB_MRT_BASE_HI(0, va >> 32),
757 A6XX_RB_MRT_BASE_GMEM(0, 0));
758
759 tu_cs_emit_regs(cs, A6XX_RB_RENDER_CNTL());
760 }
761
762 static void
763 r3d_setup(struct tu_cmd_buffer *cmd,
764 struct tu_cs *cs,
765 VkFormat vk_format,
766 enum a6xx_rotation rotation,
767 bool clear,
768 uint8_t mask)
769 {
770 if (!cmd->state.pass) {
771 tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
772 tu6_emit_window_scissor(cs, 0, 0, 0x7fff, 0x7fff);
773 }
774
775 tu_cs_emit_regs(cs, A6XX_GRAS_BIN_CONTROL(.dword = 0xc00000));
776 tu_cs_emit_regs(cs, A6XX_RB_BIN_CONTROL(.dword = 0xc00000));
777
778 r3d_common(cmd, cs, !clear, clear ? 1 : 0, false);
779
780 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);
781 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(0xfc) |
782 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(0xfc) |
783 0xfc000000);
784 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(1));
785
786 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), 1);
787 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_REG_REGID(0));
788
789 tu_cs_emit_regs(cs,
790 A6XX_RB_FS_OUTPUT_CNTL0(),
791 A6XX_RB_FS_OUTPUT_CNTL1(.mrt = 1));
792
793 tu_cs_emit_regs(cs, A6XX_SP_BLEND_CNTL());
794 tu_cs_emit_regs(cs, A6XX_RB_BLEND_CNTL(.sample_mask = 0xffff));
795 tu_cs_emit_regs(cs, A6XX_RB_ALPHA_CONTROL());
796
797 tu_cs_emit_regs(cs, A6XX_RB_DEPTH_PLANE_CNTL());
798 tu_cs_emit_regs(cs, A6XX_RB_DEPTH_CNTL());
799 tu_cs_emit_regs(cs, A6XX_GRAS_SU_DEPTH_PLANE_CNTL());
800 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_CONTROL());
801 tu_cs_emit_regs(cs, A6XX_RB_STENCILMASK());
802 tu_cs_emit_regs(cs, A6XX_RB_STENCILWRMASK());
803 tu_cs_emit_regs(cs, A6XX_RB_STENCILREF());
804
805 tu_cs_emit_regs(cs, A6XX_RB_RENDER_COMPONENTS(.rt0 = 0xf));
806 tu_cs_emit_regs(cs, A6XX_SP_FS_RENDER_COMPONENTS(.rt0 = 0xf));
807
808 tu_cs_emit_regs(cs, A6XX_SP_FS_MRT_REG(0,
809 .color_format = tu6_base_format(vk_format),
810 .color_sint = vk_format_is_sint(vk_format),
811 .color_uint = vk_format_is_uint(vk_format)));
812
813 tu_cs_emit_regs(cs, A6XX_RB_MRT_CONTROL(0, .component_enable = mask));
814 tu_cs_emit_regs(cs, A6XX_RB_SRGB_CNTL(vk_format_is_srgb(vk_format)));
815 tu_cs_emit_regs(cs, A6XX_SP_SRGB_CNTL(vk_format_is_srgb(vk_format)));
816 }
817
818 static void
819 r3d_run(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
820 {
821 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
822 tu_cs_emit(cs, CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(DI_PT_RECTLIST) |
823 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
824 CP_DRAW_INDX_OFFSET_0_VIS_CULL(IGNORE_VISIBILITY));
825 tu_cs_emit(cs, 1); /* instance count */
826 tu_cs_emit(cs, 2); /* vertex count */
827 }
828
829 /* blit ops - common interface for 2d/shader paths */
830
831 struct blit_ops {
832 void (*coords)(struct tu_cs *cs,
833 const VkOffset2D *dst,
834 const VkOffset2D *src,
835 const VkExtent2D *extent);
836 void (*clear_value)(struct tu_cs *cs, VkFormat format, const VkClearValue *val);
837 void (*src)(
838 struct tu_cmd_buffer *cmd,
839 struct tu_cs *cs,
840 const struct tu_image_view *iview,
841 uint32_t layer,
842 VkFilter filter);
843 void (*src_buffer)(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
844 VkFormat vk_format,
845 uint64_t va, uint32_t pitch,
846 uint32_t width, uint32_t height);
847 void (*dst)(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
848 void (*dst_buffer)(struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch);
849 void (*setup)(struct tu_cmd_buffer *cmd,
850 struct tu_cs *cs,
851 VkFormat vk_format,
852 enum a6xx_rotation rotation,
853 bool clear,
854 uint8_t mask);
855 void (*run)(struct tu_cmd_buffer *cmd, struct tu_cs *cs);
856 };
857
858 static const struct blit_ops r2d_ops = {
859 .coords = r2d_coords,
860 .clear_value = r2d_clear_value,
861 .src = r2d_src,
862 .src_buffer = r2d_src_buffer,
863 .dst = r2d_dst,
864 .dst_buffer = r2d_dst_buffer,
865 .setup = r2d_setup,
866 .run = r2d_run,
867 };
868
869 static const struct blit_ops r3d_ops = {
870 .coords = r3d_coords,
871 .clear_value = r3d_clear_value,
872 .src = r3d_src,
873 .src_buffer = r3d_src_buffer,
874 .dst = r3d_dst,
875 .dst_buffer = r3d_dst_buffer,
876 .setup = r3d_setup,
877 .run = r3d_run,
878 };
879
880 /* passthrough set coords from 3D extents */
881 static void
882 coords(const struct blit_ops *ops,
883 struct tu_cs *cs,
884 const VkOffset3D *dst,
885 const VkOffset3D *src,
886 const VkExtent3D *extent)
887 {
888 ops->coords(cs, (const VkOffset2D*) dst, (const VkOffset2D*) src, (const VkExtent2D*) extent);
889 }
890
891 static void
892 tu_image_view_blit2(struct tu_image_view *iview,
893 struct tu_image *image,
894 VkFormat format,
895 const VkImageSubresourceLayers *subres,
896 uint32_t layer,
897 bool stencil_read)
898 {
899 VkImageAspectFlags aspect_mask = subres->aspectMask;
900
901 /* always use the AS_R8G8B8A8 format for these */
902 if (format == VK_FORMAT_D24_UNORM_S8_UINT ||
903 format == VK_FORMAT_X8_D24_UNORM_PACK32) {
904 aspect_mask = VK_IMAGE_ASPECT_COLOR_BIT;
905 }
906
907 tu_image_view_init(iview, &(VkImageViewCreateInfo) {
908 .image = tu_image_to_handle(image),
909 .viewType = VK_IMAGE_VIEW_TYPE_2D,
910 .format = format,
911 /* image_to_buffer from d24s8 with stencil aspect mask writes out to r8 */
912 .components.r = stencil_read ? VK_COMPONENT_SWIZZLE_A : VK_COMPONENT_SWIZZLE_R,
913 .subresourceRange = {
914 .aspectMask = aspect_mask,
915 .baseMipLevel = subres->mipLevel,
916 .levelCount = 1,
917 .baseArrayLayer = subres->baseArrayLayer + layer,
918 .layerCount = 1,
919 },
920 });
921 }
922
923 static void
924 tu_image_view_blit(struct tu_image_view *iview,
925 struct tu_image *image,
926 const VkImageSubresourceLayers *subres,
927 uint32_t layer)
928 {
929 tu_image_view_blit2(iview, image, image->vk_format, subres, layer, false);
930 }
931
932 static void
933 tu6_blit_image(struct tu_cmd_buffer *cmd,
934 struct tu_image *src_image,
935 struct tu_image *dst_image,
936 const VkImageBlit *info,
937 VkFilter filter)
938 {
939 const struct blit_ops *ops = &r2d_ops;
940 struct tu_cs *cs = &cmd->cs;
941 uint32_t layers;
942
943 /* 2D blit can't do rotation mirroring from just coordinates */
944 static const enum a6xx_rotation rotate[2][2] = {
945 {ROTATE_0, ROTATE_HFLIP},
946 {ROTATE_VFLIP, ROTATE_180},
947 };
948
949 bool mirror_x = (info->srcOffsets[1].x < info->srcOffsets[0].x) !=
950 (info->dstOffsets[1].x < info->dstOffsets[0].x);
951 bool mirror_y = (info->srcOffsets[1].y < info->srcOffsets[0].y) !=
952 (info->dstOffsets[1].y < info->dstOffsets[0].y);
953 bool mirror_z = (info->srcOffsets[1].z < info->srcOffsets[0].z) !=
954 (info->dstOffsets[1].z < info->dstOffsets[0].z);
955
956 if (mirror_z) {
957 tu_finishme("blit z mirror\n");
958 return;
959 }
960
961 if (info->srcOffsets[1].z - info->srcOffsets[0].z !=
962 info->dstOffsets[1].z - info->dstOffsets[0].z) {
963 tu_finishme("blit z filter\n");
964 return;
965 }
966
967 layers = info->srcOffsets[1].z - info->srcOffsets[0].z;
968 if (info->dstSubresource.layerCount > 1) {
969 assert(layers <= 1);
970 layers = info->dstSubresource.layerCount;
971 }
972
973 uint8_t mask = 0xf;
974 if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
975 assert(info->srcSubresource.aspectMask == info->dstSubresource.aspectMask);
976 if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
977 mask = 0x7;
978 if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
979 mask = 0x8;
980 }
981
982 /* BC1_RGB_* formats need to have their last components overriden with 1
983 * when sampling, which is normally handled with the texture descriptor
984 * swizzle. The 2d path can't handle that, so use the 3d path.
985 *
986 * TODO: we could use RB_2D_BLIT_CNTL::MASK to make these formats work with
987 * the 2d path.
988 */
989
990 if (dst_image->samples > 1 ||
991 src_image->vk_format == VK_FORMAT_BC1_RGB_UNORM_BLOCK ||
992 src_image->vk_format == VK_FORMAT_BC1_RGB_SRGB_BLOCK ||
993 filter == VK_FILTER_CUBIC_EXT)
994 ops = &r3d_ops;
995
996 /* TODO: shader path fails some of blit_image.all_formats.generate_mipmaps.* tests,
997 * figure out why (should be able to pass all tests with only shader path)
998 */
999
1000 ops->setup(cmd, cs, dst_image->vk_format, rotate[mirror_y][mirror_x], false, mask);
1001
1002 if (ops == &r3d_ops) {
1003 r3d_coords_raw(cs, false, (float[]) {
1004 info->dstOffsets[0].x, info->dstOffsets[0].y,
1005 info->srcOffsets[0].x, info->srcOffsets[0].y,
1006 info->dstOffsets[1].x, info->dstOffsets[1].y,
1007 info->srcOffsets[1].x, info->srcOffsets[1].y
1008 });
1009 } else {
1010 tu_cs_emit_regs(cs,
1011 A6XX_GRAS_2D_DST_TL(.x = MIN2(info->dstOffsets[0].x, info->dstOffsets[1].x),
1012 .y = MIN2(info->dstOffsets[0].y, info->dstOffsets[1].y)),
1013 A6XX_GRAS_2D_DST_BR(.x = MAX2(info->dstOffsets[0].x, info->dstOffsets[1].x) - 1,
1014 .y = MAX2(info->dstOffsets[0].y, info->dstOffsets[1].y) - 1));
1015 tu_cs_emit_regs(cs,
1016 A6XX_GRAS_2D_SRC_TL_X(.x = MIN2(info->srcOffsets[0].x, info->srcOffsets[1].x)),
1017 A6XX_GRAS_2D_SRC_BR_X(.x = MAX2(info->srcOffsets[0].x, info->srcOffsets[1].x) - 1),
1018 A6XX_GRAS_2D_SRC_TL_Y(.y = MIN2(info->srcOffsets[0].y, info->srcOffsets[1].y)),
1019 A6XX_GRAS_2D_SRC_BR_Y(.y = MAX2(info->srcOffsets[0].y, info->srcOffsets[1].y) - 1));
1020 }
1021
1022 struct tu_image_view dst, src;
1023 tu_image_view_blit(&dst, dst_image, &info->dstSubresource, info->dstOffsets[0].z);
1024 tu_image_view_blit(&src, src_image, &info->srcSubresource, info->srcOffsets[0].z);
1025
1026 for (uint32_t i = 0; i < layers; i++) {
1027 ops->dst(cs, &dst, i);
1028 ops->src(cmd, cs, &src, i, filter);
1029 ops->run(cmd, cs);
1030 }
1031 }
1032
1033 void
1034 tu_CmdBlitImage(VkCommandBuffer commandBuffer,
1035 VkImage srcImage,
1036 VkImageLayout srcImageLayout,
1037 VkImage dstImage,
1038 VkImageLayout dstImageLayout,
1039 uint32_t regionCount,
1040 const VkImageBlit *pRegions,
1041 VkFilter filter)
1042
1043 {
1044 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1045 TU_FROM_HANDLE(tu_image, src_image, srcImage);
1046 TU_FROM_HANDLE(tu_image, dst_image, dstImage);
1047
1048 tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ);
1049 tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE);
1050
1051 for (uint32_t i = 0; i < regionCount; ++i)
1052 tu6_blit_image(cmd, src_image, dst_image, pRegions + i, filter);
1053 }
1054
1055 static VkFormat
1056 copy_format(VkFormat format)
1057 {
1058 switch (vk_format_get_blocksize(format)) {
1059 case 1: return VK_FORMAT_R8_UINT;
1060 case 2: return VK_FORMAT_R16_UINT;
1061 case 4: return VK_FORMAT_R32_UINT;
1062 case 8: return VK_FORMAT_R32G32_UINT;
1063 case 12:return VK_FORMAT_R32G32B32_UINT;
1064 case 16:return VK_FORMAT_R32G32B32A32_UINT;
1065 default:
1066 unreachable("unhandled format size");
1067 }
1068 }
1069
1070 static void
1071 copy_compressed(VkFormat format,
1072 VkOffset3D *offset,
1073 VkExtent3D *extent,
1074 uint32_t *width,
1075 uint32_t *height)
1076 {
1077 if (!vk_format_is_compressed(format))
1078 return;
1079
1080 uint32_t block_width = vk_format_get_blockwidth(format);
1081 uint32_t block_height = vk_format_get_blockheight(format);
1082
1083 offset->x /= block_width;
1084 offset->y /= block_height;
1085
1086 if (extent) {
1087 extent->width = DIV_ROUND_UP(extent->width, block_width);
1088 extent->height = DIV_ROUND_UP(extent->height, block_height);
1089 }
1090 if (width)
1091 *width = DIV_ROUND_UP(*width, block_width);
1092 if (height)
1093 *height = DIV_ROUND_UP(*height, block_height);
1094 }
1095
1096 static void
1097 tu_copy_buffer_to_image(struct tu_cmd_buffer *cmd,
1098 struct tu_buffer *src_buffer,
1099 struct tu_image *dst_image,
1100 const VkBufferImageCopy *info)
1101 {
1102 struct tu_cs *cs = &cmd->cs;
1103 uint32_t layers = MAX2(info->imageExtent.depth, info->imageSubresource.layerCount);
1104 VkFormat dst_format = dst_image->vk_format;
1105 VkFormat src_format = dst_image->vk_format;
1106 const struct blit_ops *ops = &r2d_ops;
1107
1108 uint8_t mask = 0xf;
1109
1110 if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
1111 switch (info->imageSubresource.aspectMask) {
1112 case VK_IMAGE_ASPECT_STENCIL_BIT:
1113 src_format = VK_FORMAT_R8_UNORM; /* changes how src buffer is interpreted */
1114 mask = 0x8;
1115 ops = &r3d_ops;
1116 break;
1117 case VK_IMAGE_ASPECT_DEPTH_BIT:
1118 mask = 0x7;
1119 break;
1120 }
1121 }
1122
1123 VkOffset3D offset = info->imageOffset;
1124 VkExtent3D extent = info->imageExtent;
1125 uint32_t src_width = info->bufferRowLength ?: extent.width;
1126 uint32_t src_height = info->bufferImageHeight ?: extent.height;
1127
1128 if (dst_format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 || vk_format_is_compressed(src_format)) {
1129 assert(src_format == dst_format);
1130 copy_compressed(dst_format, &offset, &extent, &src_width, &src_height);
1131 src_format = dst_format = copy_format(dst_format);
1132 }
1133
1134 uint32_t pitch = src_width * vk_format_get_blocksize(src_format);
1135 uint32_t layer_size = src_height * pitch;
1136
1137 ops->setup(cmd, cs, dst_format, ROTATE_0, false, mask);
1138
1139 struct tu_image_view dst;
1140 tu_image_view_blit2(&dst, dst_image, dst_format, &info->imageSubresource, offset.z, false);
1141
1142 for (uint32_t i = 0; i < layers; i++) {
1143 ops->dst(cs, &dst, i);
1144
1145 uint64_t src_va = tu_buffer_iova(src_buffer) + info->bufferOffset + layer_size * i;
1146 if ((src_va & 63) || (pitch & 63)) {
1147 for (uint32_t y = 0; y < extent.height; y++) {
1148 uint32_t x = (src_va & 63) / vk_format_get_blocksize(src_format);
1149 ops->src_buffer(cmd, cs, src_format, src_va & ~63, pitch,
1150 x + extent.width, 1);
1151 ops->coords(cs, &(VkOffset2D){offset.x, offset.y + y}, &(VkOffset2D){x},
1152 &(VkExtent2D) {extent.width, 1});
1153 ops->run(cmd, cs);
1154 src_va += pitch;
1155 }
1156 } else {
1157 ops->src_buffer(cmd, cs, src_format, src_va, pitch, extent.width, extent.height);
1158 coords(ops, cs, &offset, &(VkOffset3D){}, &extent);
1159 ops->run(cmd, cs);
1160 }
1161 }
1162 }
1163
1164 void
1165 tu_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,
1166 VkBuffer srcBuffer,
1167 VkImage dstImage,
1168 VkImageLayout dstImageLayout,
1169 uint32_t regionCount,
1170 const VkBufferImageCopy *pRegions)
1171 {
1172 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1173 TU_FROM_HANDLE(tu_image, dst_image, dstImage);
1174 TU_FROM_HANDLE(tu_buffer, src_buffer, srcBuffer);
1175
1176 tu_bo_list_add(&cmd->bo_list, src_buffer->bo, MSM_SUBMIT_BO_READ);
1177 tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE);
1178
1179 for (unsigned i = 0; i < regionCount; ++i)
1180 tu_copy_buffer_to_image(cmd, src_buffer, dst_image, pRegions + i);
1181 }
1182
1183 static void
1184 tu_copy_image_to_buffer(struct tu_cmd_buffer *cmd,
1185 struct tu_image *src_image,
1186 struct tu_buffer *dst_buffer,
1187 const VkBufferImageCopy *info)
1188 {
1189 struct tu_cs *cs = &cmd->cs;
1190 uint32_t layers = MAX2(info->imageExtent.depth, info->imageSubresource.layerCount);
1191 VkFormat src_format = src_image->vk_format;
1192 VkFormat dst_format = src_image->vk_format;
1193 bool stencil_read = false;
1194
1195 if (src_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT &&
1196 info->imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
1197 dst_format = VK_FORMAT_R8_UNORM;
1198 stencil_read = true;
1199 }
1200
1201 const struct blit_ops *ops = stencil_read ? &r3d_ops : &r2d_ops;
1202 VkOffset3D offset = info->imageOffset;
1203 VkExtent3D extent = info->imageExtent;
1204 uint32_t dst_width = info->bufferRowLength ?: extent.width;
1205 uint32_t dst_height = info->bufferImageHeight ?: extent.height;
1206
1207 if (dst_format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 || vk_format_is_compressed(dst_format)) {
1208 assert(src_format == dst_format);
1209 copy_compressed(dst_format, &offset, &extent, &dst_width, &dst_height);
1210 src_format = dst_format = copy_format(dst_format);
1211 }
1212
1213 uint32_t pitch = dst_width * vk_format_get_blocksize(dst_format);
1214 uint32_t layer_size = pitch * dst_height;
1215
1216 ops->setup(cmd, cs, dst_format, ROTATE_0, false, 0xf);
1217
1218 struct tu_image_view src;
1219 tu_image_view_blit2(&src, src_image, src_format, &info->imageSubresource, offset.z, stencil_read);
1220
1221 for (uint32_t i = 0; i < layers; i++) {
1222 ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST);
1223
1224 uint64_t dst_va = tu_buffer_iova(dst_buffer) + info->bufferOffset + layer_size * i;
1225 if ((dst_va & 63) || (pitch & 63)) {
1226 for (uint32_t y = 0; y < extent.height; y++) {
1227 uint32_t x = (dst_va & 63) / vk_format_get_blocksize(dst_format);
1228 ops->dst_buffer(cs, dst_format, dst_va & ~63, 0);
1229 ops->coords(cs, &(VkOffset2D) {x}, &(VkOffset2D){offset.x, offset.y + y},
1230 &(VkExtent2D) {extent.width, 1});
1231 ops->run(cmd, cs);
1232 dst_va += pitch;
1233 }
1234 } else {
1235 ops->dst_buffer(cs, dst_format, dst_va, pitch);
1236 coords(ops, cs, &(VkOffset3D) {0, 0}, &offset, &extent);
1237 ops->run(cmd, cs);
1238 }
1239 }
1240 }
1241
1242 void
1243 tu_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
1244 VkImage srcImage,
1245 VkImageLayout srcImageLayout,
1246 VkBuffer dstBuffer,
1247 uint32_t regionCount,
1248 const VkBufferImageCopy *pRegions)
1249 {
1250 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1251 TU_FROM_HANDLE(tu_image, src_image, srcImage);
1252 TU_FROM_HANDLE(tu_buffer, dst_buffer, dstBuffer);
1253
1254 tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ);
1255 tu_bo_list_add(&cmd->bo_list, dst_buffer->bo, MSM_SUBMIT_BO_WRITE);
1256
1257 for (unsigned i = 0; i < regionCount; ++i)
1258 tu_copy_image_to_buffer(cmd, src_image, dst_buffer, pRegions + i);
1259 }
1260
1261 /* Tiled formats don't support swapping, which means that we can't support
1262 * formats that require a non-WZYX swap like B8G8R8A8 natively. Also, some
1263 * formats like B5G5R5A1 have a separate linear-only format when sampling.
1264 * Currently we fake support for tiled swapped formats and use the unswapped
1265 * format instead, but this means that reinterpreting copies to and from
1266 * swapped formats can't be performed correctly unless we can swizzle the
1267 * components by reinterpreting the other image as the "correct" swapped
1268 * format, i.e. only when the other image is linear.
1269 */
1270
1271 static bool
1272 is_swapped_format(VkFormat format)
1273 {
1274 struct tu_native_format linear = tu6_format_texture(format, TILE6_LINEAR);
1275 struct tu_native_format tiled = tu6_format_texture(format, TILE6_3);
1276 return linear.fmt != tiled.fmt || linear.swap != tiled.swap;
1277 }
1278
1279 /* R8G8_* formats have a different tiling layout than other cpp=2 formats, and
1280 * therefore R8G8 images can't be reinterpreted as non-R8G8 images (and vice
1281 * versa). This should mirror the logic in fdl6_layout.
1282 */
1283 static bool
1284 image_is_r8g8(struct tu_image *image)
1285 {
1286 return image->layout.cpp == 2 &&
1287 vk_format_get_nr_components(image->vk_format) == 2;
1288 }
1289
1290 static void
1291 tu_copy_image_to_image(struct tu_cmd_buffer *cmd,
1292 struct tu_image *src_image,
1293 struct tu_image *dst_image,
1294 const VkImageCopy *info)
1295 {
1296 const struct blit_ops *ops = &r2d_ops;
1297 struct tu_cs *cs = &cmd->cs;
1298
1299 uint8_t mask = 0xf;
1300 if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
1301 if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
1302 mask = 0x7;
1303 if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
1304 mask = 0x8;
1305 }
1306
1307 if (dst_image->samples > 1)
1308 ops = &r3d_ops;
1309
1310 assert(info->srcSubresource.aspectMask == info->dstSubresource.aspectMask);
1311
1312 VkFormat format = VK_FORMAT_UNDEFINED;
1313 VkOffset3D src_offset = info->srcOffset;
1314 VkOffset3D dst_offset = info->dstOffset;
1315 VkExtent3D extent = info->extent;
1316
1317 /* From the Vulkan 1.2.140 spec, section 19.3 "Copying Data Between
1318 * Images":
1319 *
1320 * When copying between compressed and uncompressed formats the extent
1321 * members represent the texel dimensions of the source image and not
1322 * the destination. When copying from a compressed image to an
1323 * uncompressed image the image texel dimensions written to the
1324 * uncompressed image will be source extent divided by the compressed
1325 * texel block dimensions. When copying from an uncompressed image to a
1326 * compressed image the image texel dimensions written to the compressed
1327 * image will be the source extent multiplied by the compressed texel
1328 * block dimensions.
1329 *
1330 * This means we only have to adjust the extent if the source image is
1331 * compressed.
1332 */
1333 copy_compressed(src_image->vk_format, &src_offset, &extent, NULL, NULL);
1334 copy_compressed(dst_image->vk_format, &dst_offset, NULL, NULL, NULL);
1335
1336 VkFormat dst_format = vk_format_is_compressed(dst_image->vk_format) ?
1337 copy_format(dst_image->vk_format) : dst_image->vk_format;
1338 VkFormat src_format = vk_format_is_compressed(src_image->vk_format) ?
1339 copy_format(src_image->vk_format) : src_image->vk_format;
1340
1341 bool use_staging_blit = false;
1342
1343 if (src_format == dst_format) {
1344 /* Images that share a format can always be copied directly because it's
1345 * the same as a blit.
1346 */
1347 format = src_format;
1348 } else if (!src_image->layout.tile_mode) {
1349 /* If an image is linear, we can always safely reinterpret it with the
1350 * other image's format and then do a regular blit.
1351 */
1352 format = dst_format;
1353 } else if (!dst_image->layout.tile_mode) {
1354 format = src_format;
1355 } else if (image_is_r8g8(src_image) != image_is_r8g8(dst_image)) {
1356 /* We can't currently copy r8g8 images to/from other cpp=2 images,
1357 * due to the different tile layout.
1358 */
1359 use_staging_blit = true;
1360 } else if (is_swapped_format(src_format) ||
1361 is_swapped_format(dst_format)) {
1362 /* If either format has a non-identity swap, then we can't copy
1363 * to/from it.
1364 */
1365 use_staging_blit = true;
1366 } else if (!src_image->layout.ubwc) {
1367 format = dst_format;
1368 } else if (!dst_image->layout.ubwc) {
1369 format = src_format;
1370 } else {
1371 /* Both formats use UBWC and so neither can be reinterpreted.
1372 * TODO: We could do an in-place decompression of the dst instead.
1373 */
1374 use_staging_blit = true;
1375 }
1376
1377 struct tu_image_view dst, src;
1378
1379 if (use_staging_blit) {
1380 tu_image_view_blit2(&dst, dst_image, dst_format, &info->dstSubresource, dst_offset.z, false);
1381 tu_image_view_blit2(&src, src_image, src_format, &info->srcSubresource, src_offset.z, false);
1382
1383 struct tu_image staging_image = {
1384 .vk_format = src_format,
1385 .type = src_image->type,
1386 .tiling = VK_IMAGE_TILING_LINEAR,
1387 .extent = extent,
1388 .level_count = 1,
1389 .layer_count = info->srcSubresource.layerCount,
1390 .samples = src_image->samples,
1391 .bo_offset = 0,
1392 };
1393
1394 VkImageSubresourceLayers staging_subresource = {
1395 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1396 .mipLevel = 0,
1397 .baseArrayLayer = 0,
1398 .layerCount = info->srcSubresource.layerCount,
1399 };
1400
1401 VkOffset3D staging_offset = { 0 };
1402
1403 staging_image.layout.tile_mode = TILE6_LINEAR;
1404 staging_image.layout.ubwc = false;
1405
1406 fdl6_layout(&staging_image.layout,
1407 vk_format_to_pipe_format(staging_image.vk_format),
1408 staging_image.samples,
1409 staging_image.extent.width,
1410 staging_image.extent.height,
1411 staging_image.extent.depth,
1412 staging_image.level_count,
1413 staging_image.layer_count,
1414 staging_image.type == VK_IMAGE_TYPE_3D,
1415 NULL);
1416
1417 VkResult result = tu_get_scratch_bo(cmd->device,
1418 staging_image.layout.size,
1419 &staging_image.bo);
1420 if (result != VK_SUCCESS) {
1421 cmd->record_result = result;
1422 return;
1423 }
1424
1425 tu_bo_list_add(&cmd->bo_list, staging_image.bo,
1426 MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE);
1427
1428 struct tu_image_view staging;
1429 tu_image_view_blit2(&staging, &staging_image, src_format,
1430 &staging_subresource, 0, false);
1431
1432 ops->setup(cmd, cs, src_format, ROTATE_0, false, mask);
1433 coords(ops, cs, &staging_offset, &src_offset, &extent);
1434
1435 for (uint32_t i = 0; i < info->extent.depth; i++) {
1436 ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST);
1437 ops->dst(cs, &staging, i);
1438 ops->run(cmd, cs);
1439 }
1440
1441 /* When executed by the user there has to be a pipeline barrier here,
1442 * but since we're doing it manually we'll have to flush ourselves.
1443 */
1444 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
1445 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
1446
1447 tu_image_view_blit2(&staging, &staging_image, dst_format,
1448 &staging_subresource, 0, false);
1449
1450 ops->setup(cmd, cs, dst_format, ROTATE_0, false, mask);
1451 coords(ops, cs, &dst_offset, &staging_offset, &extent);
1452
1453 for (uint32_t i = 0; i < info->extent.depth; i++) {
1454 ops->src(cmd, cs, &staging, i, VK_FILTER_NEAREST);
1455 ops->dst(cs, &dst, i);
1456 ops->run(cmd, cs);
1457 }
1458 } else {
1459 tu_image_view_blit2(&dst, dst_image, format, &info->dstSubresource, dst_offset.z, false);
1460 tu_image_view_blit2(&src, src_image, format, &info->srcSubresource, src_offset.z, false);
1461
1462 ops->setup(cmd, cs, format, ROTATE_0, false, mask);
1463 coords(ops, cs, &dst_offset, &src_offset, &extent);
1464
1465 for (uint32_t i = 0; i < info->extent.depth; i++) {
1466 ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST);
1467 ops->dst(cs, &dst, i);
1468 ops->run(cmd, cs);
1469 }
1470 }
1471 }
1472
1473 void
1474 tu_CmdCopyImage(VkCommandBuffer commandBuffer,
1475 VkImage srcImage,
1476 VkImageLayout srcImageLayout,
1477 VkImage destImage,
1478 VkImageLayout destImageLayout,
1479 uint32_t regionCount,
1480 const VkImageCopy *pRegions)
1481 {
1482 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1483 TU_FROM_HANDLE(tu_image, src_image, srcImage);
1484 TU_FROM_HANDLE(tu_image, dst_image, destImage);
1485
1486 tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ);
1487 tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE);
1488
1489 for (uint32_t i = 0; i < regionCount; ++i)
1490 tu_copy_image_to_image(cmd, src_image, dst_image, pRegions + i);
1491 }
1492
1493 static void
1494 copy_buffer(struct tu_cmd_buffer *cmd,
1495 uint64_t dst_va,
1496 uint64_t src_va,
1497 uint64_t size,
1498 uint32_t block_size)
1499 {
1500 const struct blit_ops *ops = &r2d_ops;
1501 struct tu_cs *cs = &cmd->cs;
1502 VkFormat format = block_size == 4 ? VK_FORMAT_R32_UINT : VK_FORMAT_R8_UNORM;
1503 uint64_t blocks = size / block_size;
1504
1505 ops->setup(cmd, cs, format, ROTATE_0, false, 0xf);
1506
1507 while (blocks) {
1508 uint32_t src_x = (src_va & 63) / block_size;
1509 uint32_t dst_x = (dst_va & 63) / block_size;
1510 uint32_t width = MIN2(MIN2(blocks, 0x4000 - src_x), 0x4000 - dst_x);
1511
1512 ops->src_buffer(cmd, cs, format, src_va & ~63, 0, src_x + width, 1);
1513 ops->dst_buffer( cs, format, dst_va & ~63, 0);
1514 ops->coords(cs, &(VkOffset2D) {dst_x}, &(VkOffset2D) {src_x}, &(VkExtent2D) {width, 1});
1515 ops->run(cmd, cs);
1516
1517 src_va += width * block_size;
1518 dst_va += width * block_size;
1519 blocks -= width;
1520 }
1521 }
1522
1523 void
1524 tu_CmdCopyBuffer(VkCommandBuffer commandBuffer,
1525 VkBuffer srcBuffer,
1526 VkBuffer dstBuffer,
1527 uint32_t regionCount,
1528 const VkBufferCopy *pRegions)
1529 {
1530 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1531 TU_FROM_HANDLE(tu_buffer, src_buffer, srcBuffer);
1532 TU_FROM_HANDLE(tu_buffer, dst_buffer, dstBuffer);
1533
1534 tu_bo_list_add(&cmd->bo_list, src_buffer->bo, MSM_SUBMIT_BO_READ);
1535 tu_bo_list_add(&cmd->bo_list, dst_buffer->bo, MSM_SUBMIT_BO_WRITE);
1536
1537 for (unsigned i = 0; i < regionCount; ++i) {
1538 copy_buffer(cmd,
1539 tu_buffer_iova(dst_buffer) + pRegions[i].dstOffset,
1540 tu_buffer_iova(src_buffer) + pRegions[i].srcOffset,
1541 pRegions[i].size, 1);
1542 }
1543 }
1544
1545 void
1546 tu_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
1547 VkBuffer dstBuffer,
1548 VkDeviceSize dstOffset,
1549 VkDeviceSize dataSize,
1550 const void *pData)
1551 {
1552 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1553 TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
1554
1555 tu_bo_list_add(&cmd->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE);
1556
1557 struct tu_cs_memory tmp;
1558 VkResult result = tu_cs_alloc(&cmd->sub_cs, DIV_ROUND_UP(dataSize, 64), 64, &tmp);
1559 if (result != VK_SUCCESS) {
1560 cmd->record_result = result;
1561 return;
1562 }
1563
1564 memcpy(tmp.map, pData, dataSize);
1565 copy_buffer(cmd, tu_buffer_iova(buffer) + dstOffset, tmp.iova, dataSize, 4);
1566 }
1567
1568 void
1569 tu_CmdFillBuffer(VkCommandBuffer commandBuffer,
1570 VkBuffer dstBuffer,
1571 VkDeviceSize dstOffset,
1572 VkDeviceSize fillSize,
1573 uint32_t data)
1574 {
1575 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1576 TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
1577 const struct blit_ops *ops = &r2d_ops;
1578 struct tu_cs *cs = &cmd->cs;
1579
1580 tu_bo_list_add(&cmd->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE);
1581
1582 if (fillSize == VK_WHOLE_SIZE)
1583 fillSize = buffer->size - dstOffset;
1584
1585 uint64_t dst_va = tu_buffer_iova(buffer) + dstOffset;
1586 uint32_t blocks = fillSize / 4;
1587
1588 ops->setup(cmd, cs, VK_FORMAT_R32_UINT, ROTATE_0, true, 0xf);
1589 ops->clear_value(cs, VK_FORMAT_R32_UINT, &(VkClearValue){.color = {.uint32[0] = data}});
1590
1591 while (blocks) {
1592 uint32_t dst_x = (dst_va & 63) / 4;
1593 uint32_t width = MIN2(blocks, 0x4000 - dst_x);
1594
1595 ops->dst_buffer(cs, VK_FORMAT_R32_UINT, dst_va & ~63, 0);
1596 ops->coords(cs, &(VkOffset2D) {dst_x}, NULL, &(VkExtent2D) {width, 1});
1597 ops->run(cmd, cs);
1598
1599 dst_va += width * 4;
1600 blocks -= width;
1601 }
1602 }
1603
1604 void
1605 tu_CmdResolveImage(VkCommandBuffer commandBuffer,
1606 VkImage srcImage,
1607 VkImageLayout srcImageLayout,
1608 VkImage dstImage,
1609 VkImageLayout dstImageLayout,
1610 uint32_t regionCount,
1611 const VkImageResolve *pRegions)
1612 {
1613 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1614 TU_FROM_HANDLE(tu_image, src_image, srcImage);
1615 TU_FROM_HANDLE(tu_image, dst_image, dstImage);
1616 const struct blit_ops *ops = &r2d_ops;
1617 struct tu_cs *cs = &cmd->cs;
1618
1619 tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ);
1620 tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE);
1621
1622 ops->setup(cmd, cs, dst_image->vk_format, ROTATE_0, false, 0xf);
1623
1624 for (uint32_t i = 0; i < regionCount; ++i) {
1625 const VkImageResolve *info = &pRegions[i];
1626 uint32_t layers = MAX2(info->extent.depth, info->dstSubresource.layerCount);
1627
1628 assert(info->srcSubresource.layerCount == info->dstSubresource.layerCount);
1629 /* TODO: aspect masks possible ? */
1630
1631 coords(ops, cs, &info->dstOffset, &info->srcOffset, &info->extent);
1632
1633 struct tu_image_view dst, src;
1634 tu_image_view_blit(&dst, dst_image, &info->dstSubresource, info->dstOffset.z);
1635 tu_image_view_blit(&src, src_image, &info->srcSubresource, info->srcOffset.z);
1636
1637 for (uint32_t i = 0; i < layers; i++) {
1638 ops->src(cmd, cs, &src, i, VK_FILTER_NEAREST);
1639 ops->dst(cs, &dst, i);
1640 ops->run(cmd, cs);
1641 }
1642 }
1643 }
1644
1645 void
1646 tu_resolve_sysmem(struct tu_cmd_buffer *cmd,
1647 struct tu_cs *cs,
1648 struct tu_image_view *src,
1649 struct tu_image_view *dst,
1650 uint32_t layers,
1651 const VkRect2D *rect)
1652 {
1653 const struct blit_ops *ops = &r2d_ops;
1654
1655 tu_bo_list_add(&cmd->bo_list, src->image->bo, MSM_SUBMIT_BO_READ);
1656 tu_bo_list_add(&cmd->bo_list, dst->image->bo, MSM_SUBMIT_BO_WRITE);
1657
1658 assert(src->image->vk_format == dst->image->vk_format);
1659
1660 ops->setup(cmd, cs, dst->image->vk_format, ROTATE_0, false, 0xf);
1661 ops->coords(cs, &rect->offset, &rect->offset, &rect->extent);
1662
1663 for (uint32_t i = 0; i < layers; i++) {
1664 ops->src(cmd, cs, src, i, VK_FILTER_NEAREST);
1665 ops->dst(cs, dst, i);
1666 ops->run(cmd, cs);
1667 }
1668 }
1669
1670 static void
1671 clear_image(struct tu_cmd_buffer *cmd,
1672 struct tu_image *image,
1673 const VkClearValue *clear_value,
1674 const VkImageSubresourceRange *range)
1675 {
1676 uint32_t level_count = tu_get_levelCount(image, range);
1677 uint32_t layer_count = tu_get_layerCount(image, range);
1678 struct tu_cs *cs = &cmd->cs;
1679 VkFormat format = image->vk_format;
1680 if (format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)
1681 format = VK_FORMAT_R32_UINT;
1682
1683 if (image->type == VK_IMAGE_TYPE_3D) {
1684 assert(layer_count == 1);
1685 assert(range->baseArrayLayer == 0);
1686 }
1687
1688 uint8_t mask = 0xf;
1689 if (image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
1690 mask = 0;
1691 if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
1692 mask |= 0x7;
1693 if (range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
1694 mask |= 0x8;
1695 }
1696
1697 const struct blit_ops *ops = image->samples > 1 ? &r3d_ops : &r2d_ops;
1698
1699 ops->setup(cmd, cs, format, ROTATE_0, true, mask);
1700 ops->clear_value(cs, image->vk_format, clear_value);
1701
1702 for (unsigned j = 0; j < level_count; j++) {
1703 if (image->type == VK_IMAGE_TYPE_3D)
1704 layer_count = u_minify(image->extent.depth, range->baseMipLevel + j);
1705
1706 ops->coords(cs, &(VkOffset2D){}, NULL, &(VkExtent2D) {
1707 u_minify(image->extent.width, range->baseMipLevel + j),
1708 u_minify(image->extent.height, range->baseMipLevel + j)
1709 });
1710
1711 struct tu_image_view dst;
1712 tu_image_view_blit2(&dst, image, format, &(VkImageSubresourceLayers) {
1713 .aspectMask = range->aspectMask,
1714 .mipLevel = range->baseMipLevel + j,
1715 .baseArrayLayer = range->baseArrayLayer,
1716 .layerCount = 1,
1717 }, 0, false);
1718
1719 for (uint32_t i = 0; i < layer_count; i++) {
1720 ops->dst(cs, &dst, i);
1721 ops->run(cmd, cs);
1722 }
1723 }
1724 }
1725
1726 void
1727 tu_CmdClearColorImage(VkCommandBuffer commandBuffer,
1728 VkImage image_h,
1729 VkImageLayout imageLayout,
1730 const VkClearColorValue *pColor,
1731 uint32_t rangeCount,
1732 const VkImageSubresourceRange *pRanges)
1733 {
1734 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1735 TU_FROM_HANDLE(tu_image, image, image_h);
1736
1737 tu_bo_list_add(&cmd->bo_list, image->bo, MSM_SUBMIT_BO_WRITE);
1738
1739 for (unsigned i = 0; i < rangeCount; i++)
1740 clear_image(cmd, image, (const VkClearValue*) pColor, pRanges + i);
1741 }
1742
1743 void
1744 tu_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,
1745 VkImage image_h,
1746 VkImageLayout imageLayout,
1747 const VkClearDepthStencilValue *pDepthStencil,
1748 uint32_t rangeCount,
1749 const VkImageSubresourceRange *pRanges)
1750 {
1751 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1752 TU_FROM_HANDLE(tu_image, image, image_h);
1753
1754 tu_bo_list_add(&cmd->bo_list, image->bo, MSM_SUBMIT_BO_WRITE);
1755
1756 for (unsigned i = 0; i < rangeCount; i++)
1757 clear_image(cmd, image, (const VkClearValue*) pDepthStencil, pRanges + i);
1758 }
1759
1760 static void
1761 tu_clear_sysmem_attachments_2d(struct tu_cmd_buffer *cmd,
1762 uint32_t attachment_count,
1763 const VkClearAttachment *attachments,
1764 uint32_t rect_count,
1765 const VkClearRect *rects)
1766 {
1767 const struct tu_subpass *subpass = cmd->state.subpass;
1768 /* note: cannot use shader path here.. there is a special shader path
1769 * in tu_clear_sysmem_attachments()
1770 */
1771 const struct blit_ops *ops = &r2d_ops;
1772 struct tu_cs *cs = &cmd->draw_cs;
1773
1774 for (uint32_t j = 0; j < attachment_count; j++) {
1775 /* The vulkan spec, section 17.2 "Clearing Images Inside a Render
1776 * Pass Instance" says that:
1777 *
1778 * Unlike other clear commands, vkCmdClearAttachments executes as
1779 * a drawing command, rather than a transfer command, with writes
1780 * performed by it executing in rasterization order. Clears to
1781 * color attachments are executed as color attachment writes, by
1782 * the VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT stage.
1783 * Clears to depth/stencil attachments are executed as depth
1784 * writes and writes by the
1785 * VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT and
1786 * VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT stages.
1787 *
1788 * However, the 2d path here is executed the same way as a
1789 * transfer command, using the CCU color cache exclusively with
1790 * a special depth-as-color format for depth clears. This means that
1791 * we can't rely on the normal pipeline barrier mechanism here, and
1792 * have to manually flush whenever using a different cache domain
1793 * from what the 3d path would've used. This happens when we clear
1794 * depth/stencil, since normally depth attachments use CCU depth, but
1795 * we clear it using a special depth-as-color format. Since the clear
1796 * potentially uses a different attachment state we also need to
1797 * invalidate color beforehand and flush it afterwards.
1798 */
1799
1800 uint32_t a;
1801 if (attachments[j].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1802 a = subpass->color_attachments[attachments[j].colorAttachment].attachment;
1803 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
1804 } else {
1805 a = subpass->depth_stencil_attachment.attachment;
1806 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS);
1807 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
1808 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR);
1809 }
1810
1811 if (a == VK_ATTACHMENT_UNUSED)
1812 continue;
1813
1814 uint8_t mask = 0xf;
1815 if (cmd->state.pass->attachments[a].format == VK_FORMAT_D24_UNORM_S8_UINT) {
1816 if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT))
1817 mask &= ~0x7;
1818 if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))
1819 mask &= ~0x8;
1820 }
1821
1822 const struct tu_image_view *iview =
1823 cmd->state.framebuffer->attachments[a].attachment;
1824
1825 ops->setup(cmd, cs, iview->image->vk_format, ROTATE_0, true, mask);
1826 ops->clear_value(cs, iview->image->vk_format, &attachments[j].clearValue);
1827
1828 /* Wait for the flushes we triggered manually to complete */
1829 tu_cs_emit_wfi(cs);
1830
1831 for (uint32_t i = 0; i < rect_count; i++) {
1832 ops->coords(cs, &rects[i].rect.offset, NULL, &rects[i].rect.extent);
1833 for (uint32_t layer = 0; layer < rects[i].layerCount; layer++) {
1834 ops->dst(cs, iview, rects[i].baseArrayLayer + layer);
1835 ops->run(cmd, cs);
1836 }
1837 }
1838
1839 if (attachments[j].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1840 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
1841 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR);
1842 } else {
1843 /* sync color into depth */
1844 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
1845 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH);
1846 }
1847 }
1848 }
1849
1850 static void
1851 tu_clear_sysmem_attachments(struct tu_cmd_buffer *cmd,
1852 uint32_t attachment_count,
1853 const VkClearAttachment *attachments,
1854 uint32_t rect_count,
1855 const VkClearRect *rects)
1856 {
1857 /* the shader path here is special, it avoids changing MRT/etc state */
1858 const struct tu_render_pass *pass = cmd->state.pass;
1859 const struct tu_subpass *subpass = cmd->state.subpass;
1860 const uint32_t mrt_count = subpass->color_count;
1861 struct tu_cs *cs = &cmd->draw_cs;
1862 uint32_t clear_value[MAX_RTS][4];
1863 float z_clear_val = 0.0f;
1864 uint8_t s_clear_val = 0;
1865 uint32_t clear_rts = 0, clear_components = 0, num_rts = 0, b;
1866 bool z_clear = false;
1867 bool s_clear = false;
1868 bool layered_clear = false;
1869 uint32_t max_samples = 1;
1870
1871 for (uint32_t i = 0; i < attachment_count; i++) {
1872 uint32_t a;
1873 if (attachments[i].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1874 uint32_t c = attachments[i].colorAttachment;
1875 a = subpass->color_attachments[c].attachment;
1876 if (a == VK_ATTACHMENT_UNUSED)
1877 continue;
1878
1879 clear_rts |= 1 << c;
1880 clear_components |= 0xf << (c * 4);
1881 memcpy(clear_value[c], &attachments[i].clearValue, 4 * sizeof(uint32_t));
1882 } else {
1883 a = subpass->depth_stencil_attachment.attachment;
1884 if (a == VK_ATTACHMENT_UNUSED)
1885 continue;
1886
1887 if (attachments[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
1888 z_clear = true;
1889 z_clear_val = attachments[i].clearValue.depthStencil.depth;
1890 }
1891
1892 if (attachments[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
1893 s_clear = true;
1894 s_clear_val = attachments[i].clearValue.depthStencil.stencil & 0xff;
1895 }
1896 }
1897
1898 max_samples = MAX2(max_samples, pass->attachments[a].samples);
1899 }
1900
1901 /* prefer to use 2D path for clears
1902 * 2D can't clear separate depth/stencil and msaa, needs known framebuffer
1903 */
1904 if (max_samples == 1 && cmd->state.framebuffer) {
1905 tu_clear_sysmem_attachments_2d(cmd, attachment_count, attachments, rect_count, rects);
1906 return;
1907 }
1908
1909 /* This clear path behaves like a draw, needs the same flush as tu_draw */
1910 tu_emit_cache_flush_renderpass(cmd, cs);
1911
1912 /* disable all draw states so they don't interfere
1913 * TODO: use and re-use draw states for this path
1914 * we have to disable draw states individually to preserve
1915 * input attachment states, because a secondary command buffer
1916 * won't be able to restore them
1917 */
1918 tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2));
1919 for (uint32_t i = 0; i < TU_DRAW_STATE_COUNT; i++) {
1920 if (i == TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM ||
1921 i == TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM)
1922 continue;
1923 tu_cs_emit(cs, CP_SET_DRAW_STATE__0_GROUP_ID(i) |
1924 CP_SET_DRAW_STATE__0_DISABLE);
1925 tu_cs_emit_qw(cs, 0);
1926 }
1927 cmd->state.dirty |= TU_CMD_DIRTY_DRAW_STATE;
1928
1929 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);
1930 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(0xfc) |
1931 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(0xfc) |
1932 0xfc000000);
1933 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count));
1934
1935 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), mrt_count);
1936 for (uint32_t i = 0; i < mrt_count; i++) {
1937 if (clear_rts & (1 << i))
1938 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_REG_REGID(num_rts++ * 4));
1939 else
1940 tu_cs_emit(cs, 0);
1941 }
1942
1943 for (uint32_t i = 0; i < rect_count; i++) {
1944 if (rects[i].baseArrayLayer || rects[i].layerCount > 1)
1945 layered_clear = true;
1946 }
1947
1948 r3d_common(cmd, cs, false, num_rts, layered_clear);
1949
1950 tu_cs_emit_regs(cs,
1951 A6XX_SP_FS_RENDER_COMPONENTS(.dword = clear_components));
1952 tu_cs_emit_regs(cs,
1953 A6XX_RB_RENDER_COMPONENTS(.dword = clear_components));
1954
1955 tu_cs_emit_regs(cs,
1956 A6XX_RB_FS_OUTPUT_CNTL0(),
1957 A6XX_RB_FS_OUTPUT_CNTL1(.mrt = mrt_count));
1958
1959 tu_cs_emit_regs(cs, A6XX_SP_BLEND_CNTL());
1960 tu_cs_emit_regs(cs, A6XX_RB_BLEND_CNTL(.independent_blend = 1, .sample_mask = 0xffff));
1961 tu_cs_emit_regs(cs, A6XX_RB_ALPHA_CONTROL());
1962 for (uint32_t i = 0; i < mrt_count; i++) {
1963 tu_cs_emit_regs(cs, A6XX_RB_MRT_CONTROL(i,
1964 .component_enable = COND(clear_rts & (1 << i), 0xf)));
1965 }
1966
1967 tu_cs_emit_regs(cs, A6XX_RB_DEPTH_PLANE_CNTL());
1968 tu_cs_emit_regs(cs, A6XX_RB_DEPTH_CNTL(
1969 .z_enable = z_clear,
1970 .z_write_enable = z_clear,
1971 .zfunc = FUNC_ALWAYS));
1972 tu_cs_emit_regs(cs, A6XX_GRAS_SU_DEPTH_PLANE_CNTL());
1973 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_CONTROL(
1974 .stencil_enable = s_clear,
1975 .func = FUNC_ALWAYS,
1976 .zpass = STENCIL_REPLACE));
1977 tu_cs_emit_regs(cs, A6XX_RB_STENCILMASK(.mask = 0xff));
1978 tu_cs_emit_regs(cs, A6XX_RB_STENCILWRMASK(.wrmask = 0xff));
1979 tu_cs_emit_regs(cs, A6XX_RB_STENCILREF(.ref = s_clear_val));
1980
1981 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3 + 4 * num_rts);
1982 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
1983 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
1984 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
1985 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_SHADER) |
1986 CP_LOAD_STATE6_0_NUM_UNIT(num_rts));
1987 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
1988 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
1989 for_each_bit(b, clear_rts)
1990 tu_cs_emit_array(cs, clear_value[b], 4);
1991
1992 for (uint32_t i = 0; i < rect_count; i++) {
1993 for (uint32_t layer = 0; layer < rects[i].layerCount; layer++) {
1994 r3d_coords_raw(cs, layered_clear, (float[]) {
1995 rects[i].rect.offset.x, rects[i].rect.offset.y,
1996 z_clear_val, uif(rects[i].baseArrayLayer + layer),
1997 rects[i].rect.offset.x + rects[i].rect.extent.width,
1998 rects[i].rect.offset.y + rects[i].rect.extent.height,
1999 z_clear_val, 1.0f,
2000 });
2001
2002 if (layered_clear) {
2003 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
2004 tu_cs_emit(cs, CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(DI_PT_POINTLIST) |
2005 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
2006 CP_DRAW_INDX_OFFSET_0_VIS_CULL(IGNORE_VISIBILITY) |
2007 CP_DRAW_INDX_OFFSET_0_GS_ENABLE);
2008 tu_cs_emit(cs, 1); /* instance count */
2009 tu_cs_emit(cs, 1); /* vertex count */
2010 } else {
2011 r3d_run(cmd, cs);
2012 }
2013 }
2014 }
2015 }
2016
2017 static void
2018 pack_gmem_clear_value(const VkClearValue *val, VkFormat format, uint32_t clear_value[4])
2019 {
2020 enum pipe_format pformat = vk_format_to_pipe_format(format);
2021
2022 switch (format) {
2023 case VK_FORMAT_X8_D24_UNORM_PACK32:
2024 case VK_FORMAT_D24_UNORM_S8_UINT:
2025 clear_value[0] = tu_pack_float32_for_unorm(val->depthStencil.depth, 24) |
2026 val->depthStencil.stencil << 24;
2027 return;
2028 case VK_FORMAT_D16_UNORM:
2029 clear_value[0] = tu_pack_float32_for_unorm(val->depthStencil.depth, 16);
2030 return;
2031 case VK_FORMAT_D32_SFLOAT:
2032 clear_value[0] = fui(val->depthStencil.depth);
2033 return;
2034 case VK_FORMAT_S8_UINT:
2035 clear_value[0] = val->depthStencil.stencil;
2036 return;
2037 /* these formats use a different base format when tiled
2038 * the same format can be used for both because GMEM is always in WZYX order
2039 */
2040 case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
2041 case VK_FORMAT_B5G5R5A1_UNORM_PACK16:
2042 pformat = PIPE_FORMAT_B5G5R5A1_UNORM;
2043 default:
2044 break;
2045 }
2046
2047 VkClearColorValue color;
2048
2049 /**
2050 * GMEM is tiled and wants the components in WZYX order,
2051 * apply swizzle to the color before packing, to counteract
2052 * deswizzling applied by packing functions
2053 */
2054 pipe_swizzle_4f(color.float32, val->color.float32,
2055 util_format_description(pformat)->swizzle);
2056
2057 util_format_pack_rgba(pformat, clear_value, color.uint32, 1);
2058 }
2059
2060 static void
2061 tu_emit_clear_gmem_attachment(struct tu_cmd_buffer *cmd,
2062 struct tu_cs *cs,
2063 uint32_t attachment,
2064 uint8_t component_mask,
2065 const VkClearValue *value)
2066 {
2067 VkFormat vk_format = cmd->state.pass->attachments[attachment].format;
2068 /* note: component_mask is 0x7 for depth and 0x8 for stencil
2069 * because D24S8 is cleared with AS_R8G8B8A8 format
2070 */
2071
2072 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_DST_INFO, 1);
2073 tu_cs_emit(cs, A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(tu6_base_format(vk_format)));
2074
2075 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_INFO, 1);
2076 tu_cs_emit(cs, A6XX_RB_BLIT_INFO_GMEM | A6XX_RB_BLIT_INFO_CLEAR_MASK(component_mask));
2077
2078 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
2079 tu_cs_emit(cs, cmd->state.pass->attachments[attachment].gmem_offset);
2080
2081 tu_cs_emit_pkt4(cs, REG_A6XX_RB_UNKNOWN_88D0, 1);
2082 tu_cs_emit(cs, 0);
2083
2084 uint32_t clear_vals[4] = {};
2085 pack_gmem_clear_value(value, vk_format, clear_vals);
2086
2087 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
2088 tu_cs_emit_array(cs, clear_vals, 4);
2089
2090 tu6_emit_event_write(cmd, cs, BLIT);
2091 }
2092
2093 static void
2094 tu_clear_gmem_attachments(struct tu_cmd_buffer *cmd,
2095 uint32_t attachment_count,
2096 const VkClearAttachment *attachments,
2097 uint32_t rect_count,
2098 const VkClearRect *rects)
2099 {
2100 const struct tu_subpass *subpass = cmd->state.subpass;
2101 struct tu_cs *cs = &cmd->draw_cs;
2102
2103 /* TODO: swap the loops for smaller cmdstream */
2104 for (unsigned i = 0; i < rect_count; i++) {
2105 unsigned x1 = rects[i].rect.offset.x;
2106 unsigned y1 = rects[i].rect.offset.y;
2107 unsigned x2 = x1 + rects[i].rect.extent.width - 1;
2108 unsigned y2 = y1 + rects[i].rect.extent.height - 1;
2109
2110 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
2111 tu_cs_emit(cs, A6XX_RB_BLIT_SCISSOR_TL_X(x1) | A6XX_RB_BLIT_SCISSOR_TL_Y(y1));
2112 tu_cs_emit(cs, A6XX_RB_BLIT_SCISSOR_BR_X(x2) | A6XX_RB_BLIT_SCISSOR_BR_Y(y2));
2113
2114 for (unsigned j = 0; j < attachment_count; j++) {
2115 uint32_t a;
2116 if (attachments[j].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT)
2117 a = subpass->color_attachments[attachments[j].colorAttachment].attachment;
2118 else
2119 a = subpass->depth_stencil_attachment.attachment;
2120
2121 if (a == VK_ATTACHMENT_UNUSED)
2122 continue;
2123
2124 unsigned clear_mask = 0xf;
2125 if (cmd->state.pass->attachments[a].format == VK_FORMAT_D24_UNORM_S8_UINT) {
2126 if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT))
2127 clear_mask &= ~0x7;
2128 if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))
2129 clear_mask &= ~0x8;
2130 }
2131
2132 tu_emit_clear_gmem_attachment(cmd, cs, a, clear_mask,
2133 &attachments[j].clearValue);
2134 }
2135 }
2136 }
2137
2138 void
2139 tu_CmdClearAttachments(VkCommandBuffer commandBuffer,
2140 uint32_t attachmentCount,
2141 const VkClearAttachment *pAttachments,
2142 uint32_t rectCount,
2143 const VkClearRect *pRects)
2144 {
2145 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2146 struct tu_cs *cs = &cmd->draw_cs;
2147
2148 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
2149 tu_clear_gmem_attachments(cmd, attachmentCount, pAttachments, rectCount, pRects);
2150 tu_cond_exec_end(cs);
2151
2152 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
2153 tu_clear_sysmem_attachments(cmd, attachmentCount, pAttachments, rectCount, pRects);
2154 tu_cond_exec_end(cs);
2155 }
2156
2157 void
2158 tu_clear_sysmem_attachment(struct tu_cmd_buffer *cmd,
2159 struct tu_cs *cs,
2160 uint32_t a,
2161 const VkRenderPassBeginInfo *info)
2162 {
2163 const struct tu_framebuffer *fb = cmd->state.framebuffer;
2164 const struct tu_image_view *iview = fb->attachments[a].attachment;
2165 const struct tu_render_pass_attachment *attachment =
2166 &cmd->state.pass->attachments[a];
2167 uint8_t mask = 0;
2168
2169 if (attachment->clear_mask == VK_IMAGE_ASPECT_COLOR_BIT)
2170 mask = 0xf;
2171 if (attachment->clear_mask & VK_IMAGE_ASPECT_DEPTH_BIT)
2172 mask |= 0x7;
2173 if (attachment->clear_mask & VK_IMAGE_ASPECT_STENCIL_BIT)
2174 mask |= 0x8;
2175
2176 if (!mask)
2177 return;
2178
2179 const struct blit_ops *ops = &r2d_ops;
2180 if (attachment->samples > 1)
2181 ops = &r3d_ops;
2182
2183 ops->setup(cmd, cs, attachment->format, ROTATE_0, true, mask);
2184 ops->coords(cs, &info->renderArea.offset, NULL, &info->renderArea.extent);
2185 ops->clear_value(cs, attachment->format, &info->pClearValues[a]);
2186
2187 /* Wait for any flushes at the beginning of the renderpass to complete */
2188 tu_cs_emit_wfi(cs);
2189
2190 for (uint32_t i = 0; i < fb->layers; i++) {
2191 ops->dst(cs, iview, i);
2192 ops->run(cmd, cs);
2193 }
2194
2195 /* The spec doesn't explicitly say, but presumably the initial renderpass
2196 * clear is considered part of the renderpass, and therefore barriers
2197 * aren't required inside the subpass/renderpass. Therefore we need to
2198 * flush CCU color into CCU depth here, just like with
2199 * vkCmdClearAttachments(). Note that because this only happens at the
2200 * beginning of a renderpass, and renderpass writes are considered
2201 * "incoherent", we shouldn't have to worry about syncing depth into color
2202 * beforehand as depth should already be flushed.
2203 */
2204 if (vk_format_is_depth_or_stencil(attachment->format)) {
2205 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
2206 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH);
2207 } else {
2208 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
2209 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR);
2210 }
2211 }
2212
2213 void
2214 tu_clear_gmem_attachment(struct tu_cmd_buffer *cmd,
2215 struct tu_cs *cs,
2216 uint32_t a,
2217 const VkRenderPassBeginInfo *info)
2218 {
2219 const struct tu_render_pass_attachment *attachment =
2220 &cmd->state.pass->attachments[a];
2221 unsigned clear_mask = 0;
2222
2223 if (attachment->clear_mask == VK_IMAGE_ASPECT_COLOR_BIT)
2224 clear_mask = 0xf;
2225 if (attachment->clear_mask & VK_IMAGE_ASPECT_DEPTH_BIT)
2226 clear_mask |= 0x7;
2227 if (attachment->clear_mask & VK_IMAGE_ASPECT_STENCIL_BIT)
2228 clear_mask |= 0x8;
2229
2230 if (!clear_mask)
2231 return;
2232
2233 tu_cs_emit_regs(cs, A6XX_RB_MSAA_CNTL(tu_msaa_samples(attachment->samples)));
2234
2235 tu_emit_clear_gmem_attachment(cmd, cs, a, clear_mask,
2236 &info->pClearValues[a]);
2237 }
2238
2239 static void
2240 tu_emit_blit(struct tu_cmd_buffer *cmd,
2241 struct tu_cs *cs,
2242 const struct tu_image_view *iview,
2243 const struct tu_render_pass_attachment *attachment,
2244 bool resolve)
2245 {
2246 tu_cs_emit_regs(cs,
2247 A6XX_RB_MSAA_CNTL(tu_msaa_samples(attachment->samples)));
2248
2249 tu_cs_emit_regs(cs, A6XX_RB_BLIT_INFO(
2250 .unk0 = !resolve,
2251 .gmem = !resolve,
2252 /* "integer" bit disables msaa resolve averaging */
2253 .integer = vk_format_is_int(attachment->format)));
2254
2255 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_DST_INFO, 4);
2256 tu_cs_emit(cs, iview->RB_BLIT_DST_INFO);
2257 tu_cs_image_ref_2d(cs, iview, 0, false);
2258
2259 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
2260 tu_cs_image_flag_ref(cs, iview, 0);
2261
2262 tu_cs_emit_regs(cs,
2263 A6XX_RB_BLIT_BASE_GMEM(attachment->gmem_offset));
2264
2265 tu6_emit_event_write(cmd, cs, BLIT);
2266 }
2267
2268 static bool
2269 blit_can_resolve(VkFormat format)
2270 {
2271 const struct util_format_description *desc = vk_format_description(format);
2272
2273 /* blit event can only do resolve for simple cases:
2274 * averaging samples as unsigned integers or choosing only one sample
2275 */
2276 if (vk_format_is_snorm(format) || vk_format_is_srgb(format))
2277 return false;
2278
2279 /* can't do formats with larger channel sizes
2280 * note: this includes all float formats
2281 * note2: single channel integer formats seem OK
2282 */
2283 if (desc->channel[0].size > 10)
2284 return false;
2285
2286 switch (format) {
2287 /* for unknown reasons blit event can't msaa resolve these formats when tiled
2288 * likely related to these formats having different layout from other cpp=2 formats
2289 */
2290 case VK_FORMAT_R8G8_UNORM:
2291 case VK_FORMAT_R8G8_UINT:
2292 case VK_FORMAT_R8G8_SINT:
2293 /* TODO: this one should be able to work? */
2294 case VK_FORMAT_D24_UNORM_S8_UINT:
2295 return false;
2296 default:
2297 break;
2298 }
2299
2300 return true;
2301 }
2302
2303 void
2304 tu_load_gmem_attachment(struct tu_cmd_buffer *cmd,
2305 struct tu_cs *cs,
2306 uint32_t a,
2307 bool force_load)
2308 {
2309 const struct tu_image_view *iview =
2310 cmd->state.framebuffer->attachments[a].attachment;
2311 const struct tu_render_pass_attachment *attachment =
2312 &cmd->state.pass->attachments[a];
2313
2314 if (attachment->load || force_load)
2315 tu_emit_blit(cmd, cs, iview, attachment, false);
2316 }
2317
2318 void
2319 tu_store_gmem_attachment(struct tu_cmd_buffer *cmd,
2320 struct tu_cs *cs,
2321 uint32_t a,
2322 uint32_t gmem_a)
2323 {
2324 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
2325 const VkRect2D *render_area = &tiling->render_area;
2326 struct tu_render_pass_attachment *dst = &cmd->state.pass->attachments[a];
2327 struct tu_image_view *iview = cmd->state.framebuffer->attachments[a].attachment;
2328 struct tu_render_pass_attachment *src = &cmd->state.pass->attachments[gmem_a];
2329
2330 if (!dst->store)
2331 return;
2332
2333 uint32_t x1 = render_area->offset.x;
2334 uint32_t y1 = render_area->offset.y;
2335 uint32_t x2 = x1 + render_area->extent.width;
2336 uint32_t y2 = y1 + render_area->extent.height;
2337 /* x2/y2 can be unaligned if equal to the size of the image,
2338 * since it will write into padding space
2339 * the one exception is linear levels which don't have the
2340 * required y padding in the layout (except for the last level)
2341 */
2342 bool need_y2_align =
2343 y2 != iview->extent.height || iview->need_y2_align;
2344
2345 bool unaligned =
2346 x1 % GMEM_ALIGN_W || (x2 % GMEM_ALIGN_W && x2 != iview->extent.width) ||
2347 y1 % GMEM_ALIGN_H || (y2 % GMEM_ALIGN_H && need_y2_align);
2348
2349 /* use fast path when render area is aligned, except for unsupported resolve cases */
2350 if (!unaligned && (a == gmem_a || blit_can_resolve(dst->format))) {
2351 tu_emit_blit(cmd, cs, iview, src, true);
2352 return;
2353 }
2354
2355 if (dst->samples > 1) {
2356 /* I guess we need to use shader path in this case?
2357 * need a testcase which fails because of this
2358 */
2359 tu_finishme("unaligned store of msaa attachment\n");
2360 return;
2361 }
2362
2363 r2d_setup_common(cmd, cs, dst->format, ROTATE_0, false, 0xf, true);
2364 r2d_dst(cs, iview, 0);
2365 r2d_coords(cs, &render_area->offset, &render_area->offset, &render_area->extent);
2366
2367 tu_cs_emit_regs(cs,
2368 A6XX_SP_PS_2D_SRC_INFO(
2369 .color_format = tu6_format_texture(src->format, TILE6_2).fmt,
2370 .tile_mode = TILE6_2,
2371 .srgb = vk_format_is_srgb(src->format),
2372 .samples = tu_msaa_samples(src->samples),
2373 .samples_average = !vk_format_is_int(src->format),
2374 .unk20 = 1,
2375 .unk22 = 1),
2376 /* note: src size does not matter when not scaling */
2377 A6XX_SP_PS_2D_SRC_SIZE( .width = 0x3fff, .height = 0x3fff),
2378 A6XX_SP_PS_2D_SRC_LO(cmd->device->physical_device->gmem_base + src->gmem_offset),
2379 A6XX_SP_PS_2D_SRC_HI(),
2380 A6XX_SP_PS_2D_SRC_PITCH(.pitch = tiling->tile0.extent.width * src->cpp));
2381
2382 /* sync GMEM writes with CACHE. */
2383 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
2384
2385 /* Wait for CACHE_INVALIDATE to land */
2386 tu_cs_emit_wfi(cs);
2387
2388 tu_cs_emit_pkt7(cs, CP_BLIT, 1);
2389 tu_cs_emit(cs, CP_BLIT_0_OP(BLIT_OP_SCALE));
2390
2391 /* CP_BLIT writes to the CCU, unlike CP_EVENT_WRITE::BLIT which writes to
2392 * sysmem, and we generally assume that GMEM renderpasses leave their
2393 * results in sysmem, so we need to flush manually here.
2394 */
2395 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
2396 }