turnip: enable VK_FORMAT_S8_UINT as stencil format
[mesa.git] / src / freedreno / vulkan / tu_clear_blit.c
1 /*
2 * Copyright 2019-2020 Valve Corporation
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Jonathan Marek <jonathan@marek.ca>
7 */
8
9 #include "tu_private.h"
10
11 #include "tu_cs.h"
12 #include "vk_format.h"
13
14 #include "util/format_r11g11b10f.h"
15 #include "util/format_rgb9e5.h"
16 #include "util/format_srgb.h"
17 #include "util/u_half.h"
18
19 /* helper functions previously in tu_formats.c */
20
21 static uint32_t
22 tu_pack_mask(int bits)
23 {
24 assert(bits <= 32);
25 return (1ull << bits) - 1;
26 }
27
28 static uint32_t
29 tu_pack_float32_for_unorm(float val, int bits)
30 {
31 const uint32_t max = tu_pack_mask(bits);
32 if (val < 0.0f)
33 return 0;
34 else if (val > 1.0f)
35 return max;
36 else
37 return _mesa_lroundevenf(val * (float) max);
38 }
39
40 static uint32_t
41 tu_pack_float32_for_snorm(float val, int bits)
42 {
43 const int32_t max = tu_pack_mask(bits - 1);
44 int32_t tmp;
45 if (val < -1.0f)
46 tmp = -max;
47 else if (val > 1.0f)
48 tmp = max;
49 else
50 tmp = _mesa_lroundevenf(val * (float) max);
51
52 return tmp & tu_pack_mask(bits);
53 }
54
55 static uint32_t
56 tu_pack_float32_for_uscaled(float val, int bits)
57 {
58 const uint32_t max = tu_pack_mask(bits);
59 if (val < 0.0f)
60 return 0;
61 else if (val > (float) max)
62 return max;
63 else
64 return (uint32_t) val;
65 }
66
67 static uint32_t
68 tu_pack_float32_for_sscaled(float val, int bits)
69 {
70 const int32_t max = tu_pack_mask(bits - 1);
71 const int32_t min = -max - 1;
72 int32_t tmp;
73 if (val < (float) min)
74 tmp = min;
75 else if (val > (float) max)
76 tmp = max;
77 else
78 tmp = (int32_t) val;
79
80 return tmp & tu_pack_mask(bits);
81 }
82
83 static uint32_t
84 tu_pack_uint32_for_uint(uint32_t val, int bits)
85 {
86 return val & tu_pack_mask(bits);
87 }
88
89 static uint32_t
90 tu_pack_int32_for_sint(int32_t val, int bits)
91 {
92 return val & tu_pack_mask(bits);
93 }
94
95 static uint32_t
96 tu_pack_float32_for_sfloat(float val, int bits)
97 {
98 assert(bits == 16 || bits == 32);
99 return bits == 16 ? util_float_to_half(val) : fui(val);
100 }
101
102 union tu_clear_component_value {
103 float float32;
104 int32_t int32;
105 uint32_t uint32;
106 };
107
108 static uint32_t
109 tu_pack_clear_component_value(union tu_clear_component_value val,
110 const struct util_format_channel_description *ch)
111 {
112 uint32_t packed;
113
114 switch (ch->type) {
115 case UTIL_FORMAT_TYPE_UNSIGNED:
116 /* normalized, scaled, or pure integer */
117 if (ch->normalized)
118 packed = tu_pack_float32_for_unorm(val.float32, ch->size);
119 else if (ch->pure_integer)
120 packed = tu_pack_uint32_for_uint(val.uint32, ch->size);
121 else
122 packed = tu_pack_float32_for_uscaled(val.float32, ch->size);
123 break;
124 case UTIL_FORMAT_TYPE_SIGNED:
125 /* normalized, scaled, or pure integer */
126 if (ch->normalized)
127 packed = tu_pack_float32_for_snorm(val.float32, ch->size);
128 else if (ch->pure_integer)
129 packed = tu_pack_int32_for_sint(val.int32, ch->size);
130 else
131 packed = tu_pack_float32_for_sscaled(val.float32, ch->size);
132 break;
133 case UTIL_FORMAT_TYPE_FLOAT:
134 packed = tu_pack_float32_for_sfloat(val.float32, ch->size);
135 break;
136 default:
137 unreachable("unexpected channel type");
138 packed = 0;
139 break;
140 }
141
142 assert((packed & tu_pack_mask(ch->size)) == packed);
143 return packed;
144 }
145
146 static const struct util_format_channel_description *
147 tu_get_format_channel_description(const struct util_format_description *desc,
148 int comp)
149 {
150 switch (desc->swizzle[comp]) {
151 case PIPE_SWIZZLE_X:
152 return &desc->channel[0];
153 case PIPE_SWIZZLE_Y:
154 return &desc->channel[1];
155 case PIPE_SWIZZLE_Z:
156 return &desc->channel[2];
157 case PIPE_SWIZZLE_W:
158 return &desc->channel[3];
159 default:
160 return NULL;
161 }
162 }
163
164 static union tu_clear_component_value
165 tu_get_clear_component_value(const VkClearValue *val, int comp,
166 enum util_format_colorspace colorspace)
167 {
168 assert(comp < 4);
169
170 union tu_clear_component_value tmp;
171 switch (colorspace) {
172 case UTIL_FORMAT_COLORSPACE_ZS:
173 assert(comp < 2);
174 if (comp == 0)
175 tmp.float32 = val->depthStencil.depth;
176 else
177 tmp.uint32 = val->depthStencil.stencil;
178 break;
179 case UTIL_FORMAT_COLORSPACE_SRGB:
180 if (comp < 3) {
181 tmp.float32 = util_format_linear_to_srgb_float(val->color.float32[comp]);
182 break;
183 }
184 default:
185 assert(comp < 4);
186 tmp.uint32 = val->color.uint32[comp];
187 break;
188 }
189
190 return tmp;
191 }
192
193 /* r2d_ = BLIT_OP_SCALE operations */
194
195 static enum a6xx_2d_ifmt
196 format_to_ifmt(enum a6xx_format fmt)
197 {
198 switch (fmt) {
199 case FMT6_A8_UNORM:
200 case FMT6_8_UNORM:
201 case FMT6_8_SNORM:
202 case FMT6_8_8_UNORM:
203 case FMT6_8_8_SNORM:
204 case FMT6_8_8_8_8_UNORM:
205 case FMT6_8_8_8_X8_UNORM:
206 case FMT6_8_8_8_8_SNORM:
207 case FMT6_4_4_4_4_UNORM:
208 case FMT6_5_5_5_1_UNORM:
209 case FMT6_5_6_5_UNORM:
210 case FMT6_Z24_UNORM_S8_UINT:
211 case FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8:
212 return R2D_UNORM8;
213
214 case FMT6_32_UINT:
215 case FMT6_32_SINT:
216 case FMT6_32_32_UINT:
217 case FMT6_32_32_SINT:
218 case FMT6_32_32_32_32_UINT:
219 case FMT6_32_32_32_32_SINT:
220 return R2D_INT32;
221
222 case FMT6_16_UINT:
223 case FMT6_16_SINT:
224 case FMT6_16_16_UINT:
225 case FMT6_16_16_SINT:
226 case FMT6_16_16_16_16_UINT:
227 case FMT6_16_16_16_16_SINT:
228 case FMT6_10_10_10_2_UINT:
229 return R2D_INT16;
230
231 case FMT6_8_UINT:
232 case FMT6_8_SINT:
233 case FMT6_8_8_UINT:
234 case FMT6_8_8_SINT:
235 case FMT6_8_8_8_8_UINT:
236 case FMT6_8_8_8_8_SINT:
237 return R2D_INT8;
238
239 case FMT6_16_UNORM:
240 case FMT6_16_SNORM:
241 case FMT6_16_16_UNORM:
242 case FMT6_16_16_SNORM:
243 case FMT6_16_16_16_16_UNORM:
244 case FMT6_16_16_16_16_SNORM:
245 case FMT6_32_FLOAT:
246 case FMT6_32_32_FLOAT:
247 case FMT6_32_32_32_32_FLOAT:
248 return R2D_FLOAT32;
249
250 case FMT6_16_FLOAT:
251 case FMT6_16_16_FLOAT:
252 case FMT6_16_16_16_16_FLOAT:
253 case FMT6_11_11_10_FLOAT:
254 case FMT6_10_10_10_2_UNORM:
255 case FMT6_10_10_10_2_UNORM_DEST:
256 return R2D_FLOAT16;
257
258 default:
259 unreachable("bad format");
260 return 0;
261 }
262 }
263
264 static void
265 r2d_coords(struct tu_cs *cs,
266 const VkOffset2D *dst,
267 const VkOffset2D *src,
268 const VkExtent2D *extent)
269 {
270 tu_cs_emit_regs(cs,
271 A6XX_GRAS_2D_DST_TL(.x = dst->x, .y = dst->y),
272 A6XX_GRAS_2D_DST_BR(.x = dst->x + extent->width - 1, .y = dst->y + extent->height - 1));
273
274 if (!src)
275 return;
276
277 tu_cs_emit_regs(cs,
278 A6XX_GRAS_2D_SRC_TL_X(.x = src->x),
279 A6XX_GRAS_2D_SRC_BR_X(.x = src->x + extent->width - 1),
280 A6XX_GRAS_2D_SRC_TL_Y(.y = src->y),
281 A6XX_GRAS_2D_SRC_BR_Y(.y = src->y + extent->height - 1));
282 }
283
284 static void
285 r2d_clear_value(struct tu_cs *cs, VkFormat format, const VkClearValue *val)
286 {
287 uint32_t clear_value[4] = {};
288
289 switch (format) {
290 case VK_FORMAT_X8_D24_UNORM_PACK32:
291 case VK_FORMAT_D24_UNORM_S8_UINT:
292 /* cleared as r8g8b8a8_unorm using special format */
293 clear_value[0] = tu_pack_float32_for_unorm(val->depthStencil.depth, 24);
294 clear_value[1] = clear_value[0] >> 8;
295 clear_value[2] = clear_value[0] >> 16;
296 clear_value[3] = val->depthStencil.stencil;
297 break;
298 case VK_FORMAT_D16_UNORM:
299 case VK_FORMAT_D32_SFLOAT:
300 /* R2D_FLOAT32 */
301 clear_value[0] = fui(val->depthStencil.depth);
302 break;
303 case VK_FORMAT_S8_UINT:
304 clear_value[0] = val->depthStencil.stencil;
305 break;
306 case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
307 /* cleared as UINT32 */
308 clear_value[0] = float3_to_rgb9e5(val->color.float32);
309 break;
310 default:
311 assert(!vk_format_is_depth_or_stencil(format));
312 const struct util_format_description *desc = vk_format_description(format);
313 enum a6xx_2d_ifmt ifmt = format_to_ifmt(tu6_base_format(format));
314
315 assert(desc && (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN ||
316 format == VK_FORMAT_B10G11R11_UFLOAT_PACK32));
317
318 for (unsigned i = 0; i < desc->nr_channels; i++) {
319 const struct util_format_channel_description *ch = &desc->channel[i];
320 if (ifmt == R2D_UNORM8) {
321 float linear = val->color.float32[i];
322 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB && i < 3)
323 linear = util_format_linear_to_srgb_float(val->color.float32[i]);
324
325 if (ch->type == UTIL_FORMAT_TYPE_SIGNED)
326 clear_value[i] = tu_pack_float32_for_snorm(linear, 8);
327 else
328 clear_value[i] = tu_pack_float32_for_unorm(linear, 8);
329 } else if (ifmt == R2D_FLOAT16) {
330 clear_value[i] = util_float_to_half(val->color.float32[i]);
331 } else {
332 assert(ifmt == R2D_FLOAT32 || ifmt == R2D_INT32 ||
333 ifmt == R2D_INT16 || ifmt == R2D_INT8);
334 clear_value[i] = val->color.uint32[i];
335 }
336 }
337 break;
338 }
339
340 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_SRC_SOLID_C0, 4);
341 tu_cs_emit_array(cs, clear_value, 4);
342 }
343
344 static void
345 r2d_src(struct tu_cmd_buffer *cmd,
346 struct tu_cs *cs,
347 const struct tu_image_view *iview,
348 uint32_t layer,
349 bool linear_filter)
350 {
351 tu_cs_emit_pkt4(cs, REG_A6XX_SP_PS_2D_SRC_INFO, 5);
352 tu_cs_emit(cs, iview->SP_PS_2D_SRC_INFO |
353 COND(linear_filter, A6XX_SP_PS_2D_SRC_INFO_FILTER));
354 tu_cs_emit(cs, iview->SP_PS_2D_SRC_SIZE);
355 tu_cs_image_ref_2d(cs, iview, layer, true);
356
357 tu_cs_emit_pkt4(cs, REG_A6XX_SP_PS_2D_SRC_FLAGS_LO, 3);
358 tu_cs_image_flag_ref(cs, iview, layer);
359 }
360
361 static void
362 r2d_src_buffer(struct tu_cmd_buffer *cmd,
363 struct tu_cs *cs,
364 VkFormat vk_format,
365 uint64_t va, uint32_t pitch,
366 uint32_t width, uint32_t height)
367 {
368 struct tu_native_format format = tu6_format_texture(vk_format, TILE6_LINEAR);
369
370 tu_cs_emit_regs(cs,
371 A6XX_SP_PS_2D_SRC_INFO(
372 .color_format = format.fmt,
373 .color_swap = format.swap,
374 .srgb = vk_format_is_srgb(vk_format),
375 .unk20 = 1,
376 .unk22 = 1),
377 A6XX_SP_PS_2D_SRC_SIZE(.width = width, .height = height),
378 A6XX_SP_PS_2D_SRC_LO((uint32_t) va),
379 A6XX_SP_PS_2D_SRC_HI(va >> 32),
380 A6XX_SP_PS_2D_SRC_PITCH(.pitch = pitch));
381 }
382
383 static void
384 r2d_dst(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer)
385 {
386 assert(iview->image->samples == 1);
387
388 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_DST_INFO, 4);
389 tu_cs_emit(cs, iview->RB_2D_DST_INFO);
390 tu_cs_image_ref_2d(cs, iview, layer, false);
391
392 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_DST_FLAGS_LO, 3);
393 tu_cs_image_flag_ref(cs, iview, layer);
394 }
395
396 static void
397 r2d_dst_buffer(struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch)
398 {
399 struct tu_native_format format = tu6_format_color(vk_format, TILE6_LINEAR);
400
401 tu_cs_emit_regs(cs,
402 A6XX_RB_2D_DST_INFO(
403 .color_format = format.fmt,
404 .color_swap = format.swap,
405 .srgb = vk_format_is_srgb(vk_format)),
406 A6XX_RB_2D_DST_LO((uint32_t) va),
407 A6XX_RB_2D_DST_HI(va >> 32),
408 A6XX_RB_2D_DST_SIZE(.pitch = pitch));
409 }
410
411 static void
412 r2d_setup_common(struct tu_cmd_buffer *cmd,
413 struct tu_cs *cs,
414 VkFormat vk_format,
415 enum a6xx_rotation rotation,
416 bool clear,
417 uint8_t mask,
418 bool scissor)
419 {
420 enum a6xx_format format = tu6_base_format(vk_format);
421 enum a6xx_2d_ifmt ifmt = format_to_ifmt(format);
422 uint32_t unknown_8c01 = 0;
423
424 if (format == FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8) {
425 /* preserve depth channels */
426 if (mask == 0x8)
427 unknown_8c01 = 0x00084001;
428 /* preserve stencil channel */
429 if (mask == 0x7)
430 unknown_8c01 = 0x08000041;
431 }
432
433 tu_cs_emit_pkt4(cs, REG_A6XX_RB_UNKNOWN_8C01, 1);
434 tu_cs_emit(cs, unknown_8c01);
435
436 uint32_t blit_cntl = A6XX_RB_2D_BLIT_CNTL(
437 .scissor = scissor,
438 .rotate = rotation,
439 .solid_color = clear,
440 .d24s8 = format == FMT6_Z24_UNORM_S8_UINT_AS_R8G8B8A8 && !clear,
441 .color_format = format,
442 .mask = 0xf,
443 .ifmt = vk_format_is_srgb(vk_format) ? R2D_UNORM8_SRGB : ifmt,
444 ).value;
445
446 tu_cs_emit_pkt4(cs, REG_A6XX_RB_2D_BLIT_CNTL, 1);
447 tu_cs_emit(cs, blit_cntl);
448
449 tu_cs_emit_pkt4(cs, REG_A6XX_GRAS_2D_BLIT_CNTL, 1);
450 tu_cs_emit(cs, blit_cntl);
451
452 if (format == FMT6_10_10_10_2_UNORM_DEST)
453 format = FMT6_16_16_16_16_FLOAT;
454
455 tu_cs_emit_regs(cs, A6XX_SP_2D_SRC_FORMAT(
456 .sint = vk_format_is_sint(vk_format),
457 .uint = vk_format_is_uint(vk_format),
458 .color_format = format,
459 .srgb = vk_format_is_srgb(vk_format),
460 .mask = 0xf));
461 }
462
463 static void
464 r2d_setup(struct tu_cmd_buffer *cmd,
465 struct tu_cs *cs,
466 VkFormat vk_format,
467 enum a6xx_rotation rotation,
468 bool clear,
469 uint8_t mask)
470 {
471 const struct tu_physical_device *phys_dev = cmd->device->physical_device;
472
473 /* TODO: flushing with barriers instead of blindly always flushing */
474 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
475 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
476 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
477 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
478 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
479
480 tu_cs_emit_wfi(cs);
481 tu_cs_emit_regs(cs,
482 A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
483
484 r2d_setup_common(cmd, cs, vk_format, rotation, clear, mask, false);
485 }
486
487 static void
488 r2d_run(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
489 {
490 tu_cs_emit_pkt7(cs, CP_BLIT, 1);
491 tu_cs_emit(cs, CP_BLIT_0_OP(BLIT_OP_SCALE));
492
493 /* TODO: flushing with barriers instead of blindly always flushing */
494 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
495 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
496 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
497 }
498
499 /* r3d_ = shader path operations */
500
501 static void
502 r3d_pipeline(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool blit, uint32_t num_rts)
503 {
504 static const instr_t vs_code[] = {
505 /* r0.xyz = r0.w ? c1.xyz : c0.xyz
506 * r1.xy = r0.w ? c1.zw : c0.zw
507 * r0.w = 1.0f
508 */
509 { .cat3 = {
510 .opc_cat = 3, .opc = OPC_SEL_B32 & 63, .repeat = 2, .dst = 0,
511 .c1 = {.src1_c = 1, .src1 = 4}, .src1_r = 1,
512 .src2 = 3,
513 .c2 = {.src3_c = 1, .dummy = 1, .src3 = 0},
514 } },
515 { .cat3 = {
516 .opc_cat = 3, .opc = OPC_SEL_B32 & 63, .repeat = 1, .dst = 4,
517 .c1 = {.src1_c = 1, .src1 = 6}, .src1_r = 1,
518 .src2 = 3,
519 .c2 = {.src3_c = 1, .dummy = 1, .src3 = 2},
520 } },
521 { .cat1 = { .opc_cat = 1, .src_type = TYPE_F32, .dst_type = TYPE_F32, .dst = 3,
522 .src_im = 1, .fim_val = 1.0f } },
523 { .cat0 = { .opc = OPC_END } },
524 };
525 #define FS_OFFSET (16 * sizeof(instr_t))
526 STATIC_ASSERT(sizeof(vs_code) <= FS_OFFSET);
527
528 /* vs inputs: only vtx id in r0.w */
529 tu_cs_emit_pkt4(cs, REG_A6XX_VFD_CONTROL_0, 7);
530 tu_cs_emit(cs, 0x00000000);
531 tu_cs_emit(cs, 0xfcfcfc00 | A6XX_VFD_CONTROL_1_REGID4VTX(3));
532 tu_cs_emit(cs, 0x0000fcfc);
533 tu_cs_emit(cs, 0xfcfcfcfc);
534 tu_cs_emit(cs, 0x000000fc);
535 tu_cs_emit(cs, 0x0000fcfc);
536 tu_cs_emit(cs, 0x00000000);
537
538 /* vs outputs: position in r0.xyzw, blit coords in r1.xy */
539 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VAR_DISABLE(0), 4);
540 tu_cs_emit(cs, blit ? 0xffffffcf : 0xffffffff);
541 tu_cs_emit(cs, 0xffffffff);
542 tu_cs_emit(cs, 0xffffffff);
543 tu_cs_emit(cs, 0xffffffff);
544
545 tu_cs_emit_regs(cs, A6XX_SP_VS_OUT_REG(0,
546 .a_regid = 0, .a_compmask = 0xf,
547 .b_regid = 4, .b_compmask = 0x3));
548 tu_cs_emit_regs(cs, A6XX_SP_VS_VPC_DST_REG(0, .outloc0 = 0, .outloc1 = 4));
549
550 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_CNTL_0, 1);
551 tu_cs_emit(cs, 0xff00ff00 |
552 COND(blit, A6XX_VPC_CNTL_0_VARYING) |
553 A6XX_VPC_CNTL_0_NUMNONPOSVAR(blit ? 8 : 0));
554
555 tu_cs_emit_regs(cs, A6XX_VPC_PACK(
556 .positionloc = 0,
557 .psizeloc = 0xff,
558 .stride_in_vpc = blit ? 6 : 4));
559 tu_cs_emit_regs(cs, A6XX_SP_PRIMITIVE_CNTL(.vsout = blit ? 2 : 1));
560 tu_cs_emit_regs(cs,
561 A6XX_PC_PRIMITIVE_CNTL_0(),
562 A6XX_PC_PRIMITIVE_CNTL_1(.stride_in_vpc = blit ? 6 : 4));
563
564
565 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_INTERP_MODE(0), 8);
566 tu_cs_emit(cs, blit ? 0xe000 : 0); // I think this can just be 0
567 for (uint32_t i = 1; i < 8; i++)
568 tu_cs_emit(cs, 0);
569
570 tu_cs_emit_pkt4(cs, REG_A6XX_VPC_VARYING_PS_REPL_MODE(0), 8);
571 for (uint32_t i = 0; i < 8; i++)
572 tu_cs_emit(cs, 0x99999999);
573
574 /* fs inputs: none, prefetch in blit case */
575 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_PREFETCH_CNTL, 1 + blit);
576 tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CNTL_COUNT(blit) |
577 A6XX_SP_FS_PREFETCH_CNTL_UNK4(0xfc) |
578 0x7000);
579 if (blit) {
580 tu_cs_emit(cs, A6XX_SP_FS_PREFETCH_CMD_SRC(4) |
581 A6XX_SP_FS_PREFETCH_CMD_SAMP_ID(0) |
582 A6XX_SP_FS_PREFETCH_CMD_TEX_ID(0) |
583 A6XX_SP_FS_PREFETCH_CMD_DST(0) |
584 A6XX_SP_FS_PREFETCH_CMD_WRMASK(0xf) |
585 A6XX_SP_FS_PREFETCH_CMD_CMD(0x4));
586 }
587
588 tu_cs_emit_pkt4(cs, REG_A6XX_HLSQ_CONTROL_1_REG, 5);
589 tu_cs_emit(cs, 0x3); // XXX blob uses 3 in blit path
590 tu_cs_emit(cs, 0xfcfcfcfc);
591 tu_cs_emit(cs, A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_PIXEL(blit ? 0 : 0xfc) |
592 A6XX_HLSQ_CONTROL_3_REG_BARY_IJ_CENTROID(0xfc) |
593 0xfc00fc00);
594 tu_cs_emit(cs, 0xfcfcfcfc);
595 tu_cs_emit(cs, 0xfcfc);
596
597 tu_cs_emit_regs(cs, A6XX_HLSQ_UNKNOWN_B980(blit ? 3 : 1));
598 tu_cs_emit_regs(cs, A6XX_GRAS_CNTL(.varying = blit));
599 tu_cs_emit_regs(cs,
600 A6XX_RB_RENDER_CONTROL0(.varying = blit, .unk10 = blit),
601 A6XX_RB_RENDER_CONTROL1());
602
603 tu_cs_emit_regs(cs, A6XX_RB_SAMPLE_CNTL());
604 tu_cs_emit_regs(cs, A6XX_GRAS_UNKNOWN_8101());
605 tu_cs_emit_regs(cs, A6XX_GRAS_SAMPLE_CNTL());
606
607 /* shaders */
608 struct ts_cs_memory shaders = { };
609 VkResult result = tu_cs_alloc(&cmd->sub_cs, 2, 16 * sizeof(instr_t), &shaders);
610 assert(result == VK_SUCCESS);
611
612 memcpy(shaders.map, vs_code, sizeof(vs_code));
613
614 instr_t *fs = (instr_t*) ((uint8_t*) shaders.map + FS_OFFSET);
615 for (uint32_t i = 0; i < num_rts; i++) {
616 /* (rpt3)mov.s32s32 r0.x, (r)c[i].x */
617 fs[i] = (instr_t) { .cat1 = { .opc_cat = 1, .src_type = TYPE_S32, .dst_type = TYPE_S32,
618 .repeat = 3, .dst = i * 4, .src_c = 1, .src_r = 1, .src = i * 4 } };
619 }
620 fs[num_rts] = (instr_t) { .cat0 = { .opc = OPC_END } };
621 /* note: assumed <= 16 instructions (MAX_RTS is 8) */
622
623 tu_cs_emit_regs(cs, A6XX_HLSQ_UPDATE_CNTL(0x7ffff));
624 tu_cs_emit_regs(cs,
625 A6XX_HLSQ_VS_CNTL(.constlen = 8, .enabled = true),
626 A6XX_HLSQ_HS_CNTL(),
627 A6XX_HLSQ_DS_CNTL(),
628 A6XX_HLSQ_GS_CNTL());
629 tu_cs_emit_regs(cs, A6XX_HLSQ_FS_CNTL(.constlen = 4 * num_rts, .enabled = true));
630
631 tu_cs_emit_regs(cs,
632 A6XX_SP_VS_CONFIG(.enabled = true),
633 A6XX_SP_VS_INSTRLEN(1));
634 tu_cs_emit_regs(cs, A6XX_SP_HS_CONFIG());
635 tu_cs_emit_regs(cs, A6XX_SP_DS_CONFIG());
636 tu_cs_emit_regs(cs, A6XX_SP_GS_CONFIG());
637 tu_cs_emit_regs(cs,
638 A6XX_SP_FS_CONFIG(.enabled = true, .ntex = blit, .nsamp = blit),
639 A6XX_SP_FS_INSTRLEN(1));
640
641 tu_cs_emit_regs(cs, A6XX_SP_VS_CTRL_REG0(
642 .threadsize = FOUR_QUADS,
643 .fullregfootprint = 2,
644 .mergedregs = true));
645 tu_cs_emit_regs(cs, A6XX_SP_FS_CTRL_REG0(
646 .varying = blit,
647 .threadsize = FOUR_QUADS,
648 /* could this be 0 in !blit && !num_rts case ? */
649 .fullregfootprint = MAX2(1, num_rts),
650 .mergedregs = true)); /* note: tu_pipeline also sets 0x1000000 bit */
651
652 tu_cs_emit_regs(cs, A6XX_SP_IBO_COUNT(0));
653
654 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_GEOM, 3);
655 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
656 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
657 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
658 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
659 CP_LOAD_STATE6_0_NUM_UNIT(1));
660 tu_cs_emit_qw(cs, shaders.iova);
661
662 tu_cs_emit_pkt4(cs, REG_A6XX_SP_VS_OBJ_START_LO, 2);
663 tu_cs_emit_qw(cs, shaders.iova);
664
665 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3);
666 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
667 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
668 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
669 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_SHADER) |
670 CP_LOAD_STATE6_0_NUM_UNIT(1));
671 tu_cs_emit_qw(cs, shaders.iova + FS_OFFSET);
672
673 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OBJ_START_LO, 2);
674 tu_cs_emit_qw(cs, shaders.iova + FS_OFFSET);
675
676 tu_cs_emit_regs(cs,
677 A6XX_GRAS_CL_CNTL(
678 .persp_division_disable = 1,
679 .vp_xform_disable = 1,
680 .vp_clip_code_ignore = 1,
681 .clip_disable = 1),
682 A6XX_GRAS_UNKNOWN_8001(0));
683 tu_cs_emit_regs(cs, A6XX_GRAS_SU_CNTL()); // XXX msaa enable?
684
685 tu_cs_emit_regs(cs,
686 A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0(.x = 0, .y = 0),
687 A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0(.x = 0x7fff, .y = 0x7fff));
688 tu_cs_emit_regs(cs,
689 A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0(.x = 0, .y = 0),
690 A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0(.x = 0x7fff, .y = 0x7fff));
691 }
692
693 static void
694 r3d_coords_raw(struct tu_cs *cs, const float *coords)
695 {
696 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_GEOM, 3 + 8);
697 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
698 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
699 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
700 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
701 CP_LOAD_STATE6_0_NUM_UNIT(2));
702 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
703 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
704 tu_cs_emit_array(cs, (const uint32_t *) coords, 8);
705 }
706
707 static void
708 r3d_coords(struct tu_cs *cs,
709 const VkOffset2D *dst,
710 const VkOffset2D *src,
711 const VkExtent2D *extent)
712 {
713 int32_t src_x1 = src ? src->x : 0;
714 int32_t src_y1 = src ? src->y : 0;
715 r3d_coords_raw(cs, (float[]) {
716 dst->x, dst->y,
717 src_x1, src_y1,
718 dst->x + extent->width, dst->y + extent->height,
719 src_x1 + extent->width, src_y1 + extent->height,
720 });
721 }
722
723 static void
724 r3d_clear_value(struct tu_cs *cs, VkFormat format, const VkClearValue *val)
725 {
726 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3 + 4);
727 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
728 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
729 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
730 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_SHADER) |
731 CP_LOAD_STATE6_0_NUM_UNIT(1));
732 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
733 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
734 switch (format) {
735 case VK_FORMAT_X8_D24_UNORM_PACK32:
736 case VK_FORMAT_D24_UNORM_S8_UINT: {
737 /* cleared as r8g8b8a8_unorm using special format */
738 uint32_t tmp = tu_pack_float32_for_unorm(val->depthStencil.depth, 24);
739 tu_cs_emit(cs, fui((tmp & 0xff) / 255.0f));
740 tu_cs_emit(cs, fui((tmp >> 8 & 0xff) / 255.0f));
741 tu_cs_emit(cs, fui((tmp >> 16 & 0xff) / 255.0f));
742 tu_cs_emit(cs, fui((val->depthStencil.stencil & 0xff) / 255.0f));
743 } break;
744 case VK_FORMAT_D16_UNORM:
745 case VK_FORMAT_D32_SFLOAT:
746 tu_cs_emit(cs, fui(val->depthStencil.depth));
747 tu_cs_emit(cs, 0);
748 tu_cs_emit(cs, 0);
749 tu_cs_emit(cs, 0);
750 break;
751 case VK_FORMAT_S8_UINT:
752 tu_cs_emit(cs, val->depthStencil.stencil & 0xff);
753 tu_cs_emit(cs, 0);
754 tu_cs_emit(cs, 0);
755 tu_cs_emit(cs, 0);
756 break;
757 default:
758 /* as color formats use clear value as-is */
759 assert(!vk_format_is_depth_or_stencil(format));
760 tu_cs_emit_array(cs, val->color.uint32, 4);
761 break;
762 }
763 }
764
765 static void
766 r3d_src_common(struct tu_cmd_buffer *cmd,
767 struct tu_cs *cs,
768 const uint32_t *tex_const,
769 uint32_t offset_base,
770 uint32_t offset_ubwc,
771 bool linear_filter)
772 {
773 struct ts_cs_memory texture = { };
774 VkResult result = tu_cs_alloc(&cmd->sub_cs,
775 2, /* allocate space for a sampler too */
776 A6XX_TEX_CONST_DWORDS, &texture);
777 assert(result == VK_SUCCESS);
778
779 memcpy(texture.map, tex_const, A6XX_TEX_CONST_DWORDS * 4);
780
781 /* patch addresses for layer offset */
782 *(uint64_t*) (texture.map + 4) += offset_base;
783 uint64_t ubwc_addr = (texture.map[7] | (uint64_t) texture.map[8] << 32) + offset_ubwc;
784 texture.map[7] = ubwc_addr;
785 texture.map[8] = ubwc_addr >> 32;
786
787 texture.map[A6XX_TEX_CONST_DWORDS + 0] =
788 A6XX_TEX_SAMP_0_XY_MAG(linear_filter ? A6XX_TEX_LINEAR : A6XX_TEX_NEAREST) |
789 A6XX_TEX_SAMP_0_XY_MIN(linear_filter ? A6XX_TEX_LINEAR : A6XX_TEX_NEAREST) |
790 A6XX_TEX_SAMP_0_WRAP_S(A6XX_TEX_CLAMP_TO_EDGE) |
791 A6XX_TEX_SAMP_0_WRAP_T(A6XX_TEX_CLAMP_TO_EDGE) |
792 A6XX_TEX_SAMP_0_WRAP_R(A6XX_TEX_CLAMP_TO_EDGE) |
793 0x60000; /* XXX used by blob, doesn't seem necessary */
794 texture.map[A6XX_TEX_CONST_DWORDS + 1] =
795 0x1 | /* XXX used by blob, doesn't seem necessary */
796 A6XX_TEX_SAMP_1_UNNORM_COORDS |
797 A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR;
798 texture.map[A6XX_TEX_CONST_DWORDS + 2] = 0;
799 texture.map[A6XX_TEX_CONST_DWORDS + 3] = 0;
800
801 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3);
802 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
803 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
804 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
805 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
806 CP_LOAD_STATE6_0_NUM_UNIT(1));
807 tu_cs_emit_qw(cs, texture.iova + A6XX_TEX_CONST_DWORDS * 4);
808
809 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_TEX_SAMP_LO, 2);
810 tu_cs_emit_qw(cs, texture.iova + A6XX_TEX_CONST_DWORDS * 4);
811
812 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3);
813 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
814 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
815 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
816 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
817 CP_LOAD_STATE6_0_NUM_UNIT(1));
818 tu_cs_emit_qw(cs, texture.iova);
819
820 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_TEX_CONST_LO, 2);
821 tu_cs_emit_qw(cs, texture.iova);
822
823 tu_cs_emit_regs(cs, A6XX_SP_FS_TEX_COUNT(1));
824 }
825
826 static void
827 r3d_src(struct tu_cmd_buffer *cmd,
828 struct tu_cs *cs,
829 const struct tu_image_view *iview,
830 uint32_t layer,
831 bool linear_filter)
832 {
833 r3d_src_common(cmd, cs, iview->descriptor,
834 iview->layer_size * layer,
835 iview->ubwc_layer_size * layer,
836 linear_filter);
837 }
838
839 static void
840 r3d_src_buffer(struct tu_cmd_buffer *cmd,
841 struct tu_cs *cs,
842 VkFormat vk_format,
843 uint64_t va, uint32_t pitch,
844 uint32_t width, uint32_t height)
845 {
846 uint32_t desc[A6XX_TEX_CONST_DWORDS];
847
848 struct tu_native_format format = tu6_format_texture(vk_format, TILE6_LINEAR);
849
850 desc[0] =
851 COND(vk_format_is_srgb(vk_format), A6XX_TEX_CONST_0_SRGB) |
852 A6XX_TEX_CONST_0_FMT(format.fmt) |
853 A6XX_TEX_CONST_0_SWAP(format.swap) |
854 A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_X) |
855 // XXX to swizzle into .w for stencil buffer_to_image
856 A6XX_TEX_CONST_0_SWIZ_Y(vk_format == VK_FORMAT_R8_UNORM ? A6XX_TEX_X : A6XX_TEX_Y) |
857 A6XX_TEX_CONST_0_SWIZ_Z(vk_format == VK_FORMAT_R8_UNORM ? A6XX_TEX_X : A6XX_TEX_Z) |
858 A6XX_TEX_CONST_0_SWIZ_W(vk_format == VK_FORMAT_R8_UNORM ? A6XX_TEX_X : A6XX_TEX_W);
859 desc[1] = A6XX_TEX_CONST_1_WIDTH(width) | A6XX_TEX_CONST_1_HEIGHT(height);
860 desc[2] =
861 A6XX_TEX_CONST_2_FETCHSIZE(tu6_fetchsize(vk_format)) |
862 A6XX_TEX_CONST_2_PITCH(pitch) |
863 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D);
864 desc[3] = 0;
865 desc[4] = va;
866 desc[5] = va >> 32;
867 for (uint32_t i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
868 desc[i] = 0;
869
870 r3d_src_common(cmd, cs, desc, 0, 0, false);
871 }
872
873 static void
874 r3d_dst(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer)
875 {
876 tu6_emit_msaa(cs, iview->image->samples); /* TODO: move to setup */
877
878 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_BUF_INFO(0), 6);
879 tu_cs_emit(cs, iview->RB_MRT_BUF_INFO);
880 tu_cs_image_ref(cs, iview, layer);
881 tu_cs_emit(cs, 0);
882
883 tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_FLAG_BUFFER(0), 3);
884 tu_cs_image_flag_ref(cs, iview, layer);
885
886 tu_cs_emit_regs(cs, A6XX_RB_RENDER_CNTL(.flag_mrts = iview->ubwc_enabled));
887 }
888
889 static void
890 r3d_dst_buffer(struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch)
891 {
892 struct tu_native_format format = tu6_format_color(vk_format, TILE6_LINEAR);
893
894 tu6_emit_msaa(cs, 1); /* TODO: move to setup */
895
896 tu_cs_emit_regs(cs,
897 A6XX_RB_MRT_BUF_INFO(0, .color_format = format.fmt, .color_swap = format.swap),
898 A6XX_RB_MRT_PITCH(0, pitch),
899 A6XX_RB_MRT_ARRAY_PITCH(0, 0),
900 A6XX_RB_MRT_BASE_LO(0, (uint32_t) va),
901 A6XX_RB_MRT_BASE_HI(0, va >> 32),
902 A6XX_RB_MRT_BASE_GMEM(0, 0));
903
904 tu_cs_emit_regs(cs, A6XX_RB_RENDER_CNTL());
905 }
906
907 static void
908 r3d_setup(struct tu_cmd_buffer *cmd,
909 struct tu_cs *cs,
910 VkFormat vk_format,
911 enum a6xx_rotation rotation,
912 bool clear,
913 uint8_t mask)
914 {
915 const struct tu_physical_device *phys_dev = cmd->device->physical_device;
916
917 if (!cmd->state.pass) {
918 /* TODO: flushing with barriers instead of blindly always flushing */
919 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
920 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
921 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
922 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
923 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
924
925 tu_cs_emit_regs(cs,
926 A6XX_RB_CCU_CNTL(.offset = phys_dev->ccu_offset_bypass));
927
928 tu6_emit_window_scissor(cs, 0, 0, 0x7fff, 0x7fff);
929 }
930 tu_cs_emit_regs(cs, A6XX_GRAS_BIN_CONTROL(.dword = 0xc00000));
931 tu_cs_emit_regs(cs, A6XX_RB_BIN_CONTROL(.dword = 0xc00000));
932
933 r3d_pipeline(cmd, cs, !clear, clear ? 1 : 0);
934
935 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);
936 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(0xfc) |
937 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(0xfc) |
938 0xfc000000);
939 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(1));
940
941 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), 1);
942 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_REG_REGID(0));
943
944 tu_cs_emit_regs(cs,
945 A6XX_RB_FS_OUTPUT_CNTL0(),
946 A6XX_RB_FS_OUTPUT_CNTL1(.mrt = 1));
947
948 tu_cs_emit_regs(cs, A6XX_SP_BLEND_CNTL());
949 tu_cs_emit_regs(cs, A6XX_RB_BLEND_CNTL(.sample_mask = 0xffff));
950 tu_cs_emit_regs(cs, A6XX_RB_ALPHA_CONTROL());
951
952 tu_cs_emit_regs(cs, A6XX_RB_DEPTH_PLANE_CNTL());
953 tu_cs_emit_regs(cs, A6XX_RB_DEPTH_CNTL());
954 tu_cs_emit_regs(cs, A6XX_GRAS_SU_DEPTH_PLANE_CNTL());
955 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_CONTROL());
956 tu_cs_emit_regs(cs, A6XX_RB_STENCILMASK());
957 tu_cs_emit_regs(cs, A6XX_RB_STENCILWRMASK());
958 tu_cs_emit_regs(cs, A6XX_RB_STENCILREF());
959
960 tu_cs_emit_regs(cs, A6XX_RB_RENDER_COMPONENTS(.rt0 = 0xf));
961 tu_cs_emit_regs(cs, A6XX_SP_FS_RENDER_COMPONENTS(.rt0 = 0xf));
962
963 tu_cs_emit_regs(cs, A6XX_SP_FS_MRT_REG(0,
964 .color_format = tu6_base_format(vk_format),
965 .color_sint = vk_format_is_sint(vk_format),
966 .color_uint = vk_format_is_uint(vk_format)));
967
968 tu_cs_emit_regs(cs, A6XX_RB_MRT_CONTROL(0, .component_enable = mask));
969 tu_cs_emit_regs(cs, A6XX_RB_SRGB_CNTL(vk_format_is_srgb(vk_format)));
970 tu_cs_emit_regs(cs, A6XX_SP_SRGB_CNTL(vk_format_is_srgb(vk_format)));
971 }
972
973 static void
974 r3d_run(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
975 {
976 tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
977 tu_cs_emit(cs, CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(DI_PT_RECTLIST) |
978 CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(DI_SRC_SEL_AUTO_INDEX) |
979 CP_DRAW_INDX_OFFSET_0_VIS_CULL(IGNORE_VISIBILITY));
980 tu_cs_emit(cs, 1); /* instance count */
981 tu_cs_emit(cs, 2); /* vertex count */
982
983 if (!cmd->state.pass) {
984 /* TODO: flushing with barriers instead of blindly always flushing */
985 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
986 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
987 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
988 }
989 }
990
991 /* blit ops - common interface for 2d/shader paths */
992
993 struct blit_ops {
994 void (*coords)(struct tu_cs *cs,
995 const VkOffset2D *dst,
996 const VkOffset2D *src,
997 const VkExtent2D *extent);
998 void (*clear_value)(struct tu_cs *cs, VkFormat format, const VkClearValue *val);
999 void (*src)(
1000 struct tu_cmd_buffer *cmd,
1001 struct tu_cs *cs,
1002 const struct tu_image_view *iview,
1003 uint32_t layer,
1004 bool linear_filter);
1005 void (*src_buffer)(struct tu_cmd_buffer *cmd, struct tu_cs *cs,
1006 VkFormat vk_format,
1007 uint64_t va, uint32_t pitch,
1008 uint32_t width, uint32_t height);
1009 void (*dst)(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
1010 void (*dst_buffer)(struct tu_cs *cs, VkFormat vk_format, uint64_t va, uint32_t pitch);
1011 void (*setup)(struct tu_cmd_buffer *cmd,
1012 struct tu_cs *cs,
1013 VkFormat vk_format,
1014 enum a6xx_rotation rotation,
1015 bool clear,
1016 uint8_t mask);
1017 void (*run)(struct tu_cmd_buffer *cmd, struct tu_cs *cs);
1018 };
1019
1020 static const struct blit_ops r2d_ops = {
1021 .coords = r2d_coords,
1022 .clear_value = r2d_clear_value,
1023 .src = r2d_src,
1024 .src_buffer = r2d_src_buffer,
1025 .dst = r2d_dst,
1026 .dst_buffer = r2d_dst_buffer,
1027 .setup = r2d_setup,
1028 .run = r2d_run,
1029 };
1030
1031 static const struct blit_ops r3d_ops = {
1032 .coords = r3d_coords,
1033 .clear_value = r3d_clear_value,
1034 .src = r3d_src,
1035 .src_buffer = r3d_src_buffer,
1036 .dst = r3d_dst,
1037 .dst_buffer = r3d_dst_buffer,
1038 .setup = r3d_setup,
1039 .run = r3d_run,
1040 };
1041
1042 /* passthrough set coords from 3D extents */
1043 static void
1044 coords(const struct blit_ops *ops,
1045 struct tu_cs *cs,
1046 const VkOffset3D *dst,
1047 const VkOffset3D *src,
1048 const VkExtent3D *extent)
1049 {
1050 ops->coords(cs, (const VkOffset2D*) dst, (const VkOffset2D*) src, (const VkExtent2D*) extent);
1051 }
1052
1053 static void
1054 tu_image_view_blit2(struct tu_image_view *iview,
1055 struct tu_image *image,
1056 VkFormat format,
1057 const VkImageSubresourceLayers *subres,
1058 uint32_t layer,
1059 bool stencil_read)
1060 {
1061 VkImageAspectFlags aspect_mask = subres->aspectMask;
1062
1063 /* always use the AS_R8G8B8A8 format for these */
1064 if (format == VK_FORMAT_D24_UNORM_S8_UINT ||
1065 format == VK_FORMAT_X8_D24_UNORM_PACK32) {
1066 aspect_mask = VK_IMAGE_ASPECT_COLOR_BIT;
1067 }
1068
1069 tu_image_view_init(iview, &(VkImageViewCreateInfo) {
1070 .image = tu_image_to_handle(image),
1071 .viewType = VK_IMAGE_VIEW_TYPE_2D,
1072 .format = format,
1073 /* image_to_buffer from d24s8 with stencil aspect mask writes out to r8 */
1074 .components.r = stencil_read ? VK_COMPONENT_SWIZZLE_A : VK_COMPONENT_SWIZZLE_R,
1075 .subresourceRange = {
1076 .aspectMask = aspect_mask,
1077 .baseMipLevel = subres->mipLevel,
1078 .levelCount = 1,
1079 .baseArrayLayer = subres->baseArrayLayer + layer,
1080 .layerCount = 1,
1081 },
1082 });
1083 }
1084
1085 static void
1086 tu_image_view_blit(struct tu_image_view *iview,
1087 struct tu_image *image,
1088 const VkImageSubresourceLayers *subres,
1089 uint32_t layer)
1090 {
1091 tu_image_view_blit2(iview, image, image->vk_format, subres, layer, false);
1092 }
1093
1094 static void
1095 tu6_blit_image(struct tu_cmd_buffer *cmd,
1096 struct tu_image *src_image,
1097 struct tu_image *dst_image,
1098 const VkImageBlit *info,
1099 VkFilter filter)
1100 {
1101 const struct blit_ops *ops = &r2d_ops;
1102 struct tu_cs *cs = &cmd->cs;
1103 uint32_t layers;
1104
1105 /* 2D blit can't do rotation mirroring from just coordinates */
1106 static const enum a6xx_rotation rotate[2][2] = {
1107 {ROTATE_0, ROTATE_HFLIP},
1108 {ROTATE_VFLIP, ROTATE_180},
1109 };
1110
1111 bool mirror_x = (info->srcOffsets[1].x < info->srcOffsets[0].x) !=
1112 (info->dstOffsets[1].x < info->dstOffsets[0].x);
1113 bool mirror_y = (info->srcOffsets[1].y < info->srcOffsets[0].y) !=
1114 (info->dstOffsets[1].y < info->dstOffsets[0].y);
1115 bool mirror_z = (info->srcOffsets[1].z < info->srcOffsets[0].z) !=
1116 (info->dstOffsets[1].z < info->dstOffsets[0].z);
1117
1118 if (mirror_z) {
1119 tu_finishme("blit z mirror\n");
1120 return;
1121 }
1122
1123 if (info->srcOffsets[1].z - info->srcOffsets[0].z !=
1124 info->dstOffsets[1].z - info->dstOffsets[0].z) {
1125 tu_finishme("blit z filter\n");
1126 return;
1127 }
1128
1129 layers = info->srcOffsets[1].z - info->srcOffsets[0].z;
1130 if (info->dstSubresource.layerCount > 1) {
1131 assert(layers <= 1);
1132 layers = info->dstSubresource.layerCount;
1133 }
1134
1135 uint8_t mask = 0xf;
1136 if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
1137 assert(info->srcSubresource.aspectMask == info->dstSubresource.aspectMask);
1138 if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
1139 mask = 0x7;
1140 if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
1141 mask = 0x8;
1142 }
1143
1144 if (dst_image->samples > 1)
1145 ops = &r3d_ops;
1146
1147 /* TODO: shader path fails some of blit_image.all_formats.generate_mipmaps.* tests,
1148 * figure out why (should be able to pass all tests with only shader path)
1149 */
1150
1151 ops->setup(cmd, cs, dst_image->vk_format, rotate[mirror_y][mirror_x], false, mask);
1152
1153 if (ops == &r3d_ops) {
1154 r3d_coords_raw(cs, (float[]) {
1155 info->dstOffsets[0].x, info->dstOffsets[0].y,
1156 info->srcOffsets[0].x, info->srcOffsets[0].y,
1157 info->dstOffsets[1].x, info->dstOffsets[1].y,
1158 info->srcOffsets[1].x, info->srcOffsets[1].y
1159 });
1160 } else {
1161 tu_cs_emit_regs(cs,
1162 A6XX_GRAS_2D_DST_TL(.x = MIN2(info->dstOffsets[0].x, info->dstOffsets[1].x),
1163 .y = MIN2(info->dstOffsets[0].y, info->dstOffsets[1].y)),
1164 A6XX_GRAS_2D_DST_BR(.x = MAX2(info->dstOffsets[0].x, info->dstOffsets[1].x) - 1,
1165 .y = MAX2(info->dstOffsets[0].y, info->dstOffsets[1].y) - 1));
1166 tu_cs_emit_regs(cs,
1167 A6XX_GRAS_2D_SRC_TL_X(.x = MIN2(info->srcOffsets[0].x, info->srcOffsets[1].x)),
1168 A6XX_GRAS_2D_SRC_BR_X(.x = MAX2(info->srcOffsets[0].x, info->srcOffsets[1].x) - 1),
1169 A6XX_GRAS_2D_SRC_TL_Y(.y = MIN2(info->srcOffsets[0].y, info->srcOffsets[1].y)),
1170 A6XX_GRAS_2D_SRC_BR_Y(.y = MAX2(info->srcOffsets[0].y, info->srcOffsets[1].y) - 1));
1171 }
1172
1173 struct tu_image_view dst, src;
1174 tu_image_view_blit(&dst, dst_image, &info->dstSubresource, info->dstOffsets[0].z);
1175 tu_image_view_blit(&src, src_image, &info->srcSubresource, info->srcOffsets[0].z);
1176
1177 for (uint32_t i = 0; i < layers; i++) {
1178 ops->dst(cs, &dst, i);
1179 ops->src(cmd, cs, &src, i, filter == VK_FILTER_LINEAR);
1180 ops->run(cmd, cs);
1181 }
1182 }
1183
1184 void
1185 tu_CmdBlitImage(VkCommandBuffer commandBuffer,
1186 VkImage srcImage,
1187 VkImageLayout srcImageLayout,
1188 VkImage dstImage,
1189 VkImageLayout dstImageLayout,
1190 uint32_t regionCount,
1191 const VkImageBlit *pRegions,
1192 VkFilter filter)
1193
1194 {
1195 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1196 TU_FROM_HANDLE(tu_image, src_image, srcImage);
1197 TU_FROM_HANDLE(tu_image, dst_image, dstImage);
1198
1199 tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ);
1200 tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE);
1201
1202 for (uint32_t i = 0; i < regionCount; ++i)
1203 tu6_blit_image(cmd, src_image, dst_image, pRegions + i, filter);
1204 }
1205
1206 static VkFormat
1207 copy_format(VkFormat format)
1208 {
1209 switch (vk_format_get_blocksizebits(format)) {
1210 case 8: return VK_FORMAT_R8_UINT;
1211 case 16: return VK_FORMAT_R16_UINT;
1212 case 32: return VK_FORMAT_R32_UINT;
1213 case 64: return VK_FORMAT_R32G32_UINT;
1214 case 96: return VK_FORMAT_R32G32B32_UINT;
1215 case 128:return VK_FORMAT_R32G32B32A32_UINT;
1216 default:
1217 unreachable("unhandled format size");
1218 }
1219 }
1220
1221 static void
1222 copy_compressed(VkFormat format,
1223 VkOffset3D *offset,
1224 VkExtent3D *extent,
1225 uint32_t *pitch,
1226 uint32_t *layer_size)
1227 {
1228 if (!vk_format_is_compressed(format))
1229 return;
1230
1231 uint32_t block_width = vk_format_get_blockwidth(format);
1232 uint32_t block_height = vk_format_get_blockheight(format);
1233
1234 offset->x /= block_width;
1235 offset->y /= block_height;
1236
1237 if (extent) {
1238 extent->width = DIV_ROUND_UP(extent->width, block_width);
1239 extent->height = DIV_ROUND_UP(extent->height, block_height);
1240 }
1241 if (pitch)
1242 *pitch /= block_width;
1243 if (layer_size)
1244 *layer_size /= (block_width * block_height);
1245 }
1246
1247 static void
1248 tu_copy_buffer_to_image(struct tu_cmd_buffer *cmd,
1249 struct tu_buffer *src_buffer,
1250 struct tu_image *dst_image,
1251 const VkBufferImageCopy *info)
1252 {
1253 struct tu_cs *cs = &cmd->cs;
1254 uint32_t layers = MAX2(info->imageExtent.depth, info->imageSubresource.layerCount);
1255 VkFormat dst_format = dst_image->vk_format;
1256 VkFormat src_format = dst_image->vk_format;
1257 const struct blit_ops *ops = &r2d_ops;
1258
1259 uint8_t mask = 0xf;
1260
1261 if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
1262 switch (info->imageSubresource.aspectMask) {
1263 case VK_IMAGE_ASPECT_STENCIL_BIT:
1264 src_format = VK_FORMAT_R8_UNORM; /* changes how src buffer is interpreted */
1265 mask = 0x8;
1266 ops = &r3d_ops;
1267 break;
1268 case VK_IMAGE_ASPECT_DEPTH_BIT:
1269 mask = 0x7;
1270 break;
1271 }
1272 }
1273
1274 VkOffset3D offset = info->imageOffset;
1275 VkExtent3D extent = info->imageExtent;
1276 uint32_t pitch =
1277 (info->bufferRowLength ?: extent.width) * vk_format_get_blocksize(src_format);
1278 uint32_t layer_size = (info->bufferImageHeight ?: extent.height) * pitch;
1279
1280 if (dst_format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 || vk_format_is_compressed(src_format)) {
1281 assert(src_format == dst_format);
1282 copy_compressed(dst_format, &offset, &extent, &pitch, &layer_size);
1283 src_format = dst_format = copy_format(dst_format);
1284 }
1285
1286 /* note: the src_va/pitch alignment of 64 is for 2D engine,
1287 * it is also valid for 1cpp format with shader path (stencil aspect path)
1288 */
1289
1290 ops->setup(cmd, cs, dst_format, ROTATE_0, false, mask);
1291
1292 struct tu_image_view dst;
1293 tu_image_view_blit2(&dst, dst_image, dst_format, &info->imageSubresource, offset.z, false);
1294
1295 for (uint32_t i = 0; i < layers; i++) {
1296 ops->dst(cs, &dst, i);
1297
1298 uint64_t src_va = tu_buffer_iova(src_buffer) + info->bufferOffset + layer_size * i;
1299 if ((src_va & 63) || (pitch & 63)) {
1300 for (uint32_t y = 0; y < extent.height; y++) {
1301 uint32_t x = (src_va & 63) / vk_format_get_blocksize(src_format);
1302 ops->src_buffer(cmd, cs, src_format, src_va & ~63, pitch,
1303 x + extent.width, 1);
1304 ops->coords(cs, &(VkOffset2D){offset.x, offset.y + y}, &(VkOffset2D){x},
1305 &(VkExtent2D) {extent.width, 1});
1306 ops->run(cmd, cs);
1307 src_va += pitch;
1308 }
1309 } else {
1310 ops->src_buffer(cmd, cs, src_format, src_va, pitch, extent.width, extent.height);
1311 coords(ops, cs, &offset, &(VkOffset3D){}, &extent);
1312 ops->run(cmd, cs);
1313 }
1314 }
1315 }
1316
1317 void
1318 tu_CmdCopyBufferToImage(VkCommandBuffer commandBuffer,
1319 VkBuffer srcBuffer,
1320 VkImage dstImage,
1321 VkImageLayout dstImageLayout,
1322 uint32_t regionCount,
1323 const VkBufferImageCopy *pRegions)
1324 {
1325 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1326 TU_FROM_HANDLE(tu_image, dst_image, dstImage);
1327 TU_FROM_HANDLE(tu_buffer, src_buffer, srcBuffer);
1328
1329 tu_bo_list_add(&cmd->bo_list, src_buffer->bo, MSM_SUBMIT_BO_READ);
1330 tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE);
1331
1332 for (unsigned i = 0; i < regionCount; ++i)
1333 tu_copy_buffer_to_image(cmd, src_buffer, dst_image, pRegions + i);
1334 }
1335
1336 static void
1337 tu_copy_image_to_buffer(struct tu_cmd_buffer *cmd,
1338 struct tu_image *src_image,
1339 struct tu_buffer *dst_buffer,
1340 const VkBufferImageCopy *info)
1341 {
1342 struct tu_cs *cs = &cmd->cs;
1343 uint32_t layers = MAX2(info->imageExtent.depth, info->imageSubresource.layerCount);
1344 VkFormat src_format = src_image->vk_format;
1345 VkFormat dst_format = src_image->vk_format;
1346 bool stencil_read = false;
1347
1348 if (src_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT &&
1349 info->imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
1350 dst_format = VK_FORMAT_R8_UNORM;
1351 stencil_read = true;
1352 }
1353
1354 const struct blit_ops *ops = stencil_read ? &r3d_ops : &r2d_ops;
1355 VkOffset3D offset = info->imageOffset;
1356 VkExtent3D extent = info->imageExtent;
1357 uint32_t pitch = (info->bufferRowLength ?: extent.width) * vk_format_get_blocksize(dst_format);
1358 uint32_t layer_size = (info->bufferImageHeight ?: extent.height) * pitch;
1359
1360 if (dst_format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 || vk_format_is_compressed(dst_format)) {
1361 assert(src_format == dst_format);
1362 copy_compressed(dst_format, &offset, &extent, &pitch, &layer_size);
1363 src_format = dst_format = copy_format(dst_format);
1364 }
1365
1366 /* note: the dst_va/pitch alignment of 64 is for 2D engine,
1367 * it is also valid for 1cpp format with shader path (stencil aspect)
1368 */
1369
1370 ops->setup(cmd, cs, dst_format, ROTATE_0, false, 0xf);
1371
1372 struct tu_image_view src;
1373 tu_image_view_blit2(&src, src_image, src_format, &info->imageSubresource, offset.z, stencil_read);
1374
1375 for (uint32_t i = 0; i < layers; i++) {
1376 ops->src(cmd, cs, &src, i, false);
1377
1378 uint64_t dst_va = tu_buffer_iova(dst_buffer) + info->bufferOffset + layer_size * i;
1379 if ((dst_va & 63) || (pitch & 63)) {
1380 for (uint32_t y = 0; y < extent.height; y++) {
1381 uint32_t x = (dst_va & 63) / vk_format_get_blocksize(dst_format);
1382 ops->dst_buffer(cs, dst_format, dst_va & ~63, 0);
1383 ops->coords(cs, &(VkOffset2D) {x}, &(VkOffset2D){offset.x, offset.y + y},
1384 &(VkExtent2D) {extent.width, 1});
1385 ops->run(cmd, cs);
1386 dst_va += pitch;
1387 }
1388 } else {
1389 ops->dst_buffer(cs, dst_format, dst_va, pitch);
1390 coords(ops, cs, &(VkOffset3D) {0, 0}, &offset, &extent);
1391 ops->run(cmd, cs);
1392 }
1393 }
1394 }
1395
1396 void
1397 tu_CmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
1398 VkImage srcImage,
1399 VkImageLayout srcImageLayout,
1400 VkBuffer dstBuffer,
1401 uint32_t regionCount,
1402 const VkBufferImageCopy *pRegions)
1403 {
1404 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1405 TU_FROM_HANDLE(tu_image, src_image, srcImage);
1406 TU_FROM_HANDLE(tu_buffer, dst_buffer, dstBuffer);
1407
1408 tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ);
1409 tu_bo_list_add(&cmd->bo_list, dst_buffer->bo, MSM_SUBMIT_BO_WRITE);
1410
1411 for (unsigned i = 0; i < regionCount; ++i)
1412 tu_copy_image_to_buffer(cmd, src_image, dst_buffer, pRegions + i);
1413 }
1414
1415 static void
1416 tu_copy_image_to_image(struct tu_cmd_buffer *cmd,
1417 struct tu_image *src_image,
1418 struct tu_image *dst_image,
1419 const VkImageCopy *info)
1420 {
1421 const struct blit_ops *ops = &r2d_ops;
1422 struct tu_cs *cs = &cmd->cs;
1423
1424 uint8_t mask = 0xf;
1425 if (dst_image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
1426 if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT)
1427 mask = 0x7;
1428 if (info->dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)
1429 mask = 0x8;
1430 }
1431
1432 if (dst_image->samples > 1)
1433 ops = &r3d_ops;
1434
1435 assert(info->srcSubresource.aspectMask == info->dstSubresource.aspectMask);
1436
1437 VkFormat format = VK_FORMAT_UNDEFINED;
1438 VkOffset3D src_offset = info->srcOffset;
1439 VkOffset3D dst_offset = info->dstOffset;
1440 VkExtent3D extent = info->extent;
1441
1442 /* TODO: should check (ubwc || (tile_mode && swap)) instead */
1443 if (src_image->layout.tile_mode && src_image->vk_format != VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)
1444 format = src_image->vk_format;
1445
1446 if (dst_image->layout.tile_mode && dst_image->vk_format != VK_FORMAT_E5B9G9R9_UFLOAT_PACK32) {
1447 if (format != VK_FORMAT_UNDEFINED && format != dst_image->vk_format) {
1448 /* can be clever in some cases but in some cases we need and intermediate
1449 * linear buffer
1450 */
1451 tu_finishme("image copy between two tiled/ubwc images\n");
1452 return;
1453 }
1454 format = dst_image->vk_format;
1455 }
1456
1457 if (format == VK_FORMAT_UNDEFINED)
1458 format = copy_format(src_image->vk_format);
1459
1460 copy_compressed(src_image->vk_format, &src_offset, &extent, NULL, NULL);
1461 copy_compressed(dst_image->vk_format, &dst_offset, NULL, NULL, NULL);
1462
1463 ops->setup(cmd, cs, format, ROTATE_0, false, mask);
1464 coords(ops, cs, &dst_offset, &src_offset, &extent);
1465
1466 struct tu_image_view dst, src;
1467 tu_image_view_blit2(&dst, dst_image, format, &info->dstSubresource, dst_offset.z, false);
1468 tu_image_view_blit2(&src, src_image, format, &info->srcSubresource, src_offset.z, false);
1469
1470 for (uint32_t i = 0; i < info->extent.depth; i++) {
1471 ops->src(cmd, cs, &src, i, false);
1472 ops->dst(cs, &dst, i);
1473 ops->run(cmd, cs);
1474 }
1475 }
1476
1477 void
1478 tu_CmdCopyImage(VkCommandBuffer commandBuffer,
1479 VkImage srcImage,
1480 VkImageLayout srcImageLayout,
1481 VkImage destImage,
1482 VkImageLayout destImageLayout,
1483 uint32_t regionCount,
1484 const VkImageCopy *pRegions)
1485 {
1486 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1487 TU_FROM_HANDLE(tu_image, src_image, srcImage);
1488 TU_FROM_HANDLE(tu_image, dst_image, destImage);
1489
1490 tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ);
1491 tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE);
1492
1493 for (uint32_t i = 0; i < regionCount; ++i)
1494 tu_copy_image_to_image(cmd, src_image, dst_image, pRegions + i);
1495 }
1496
1497 static void
1498 copy_buffer(struct tu_cmd_buffer *cmd,
1499 uint64_t dst_va,
1500 uint64_t src_va,
1501 uint64_t size,
1502 uint32_t block_size)
1503 {
1504 const struct blit_ops *ops = &r2d_ops;
1505 struct tu_cs *cs = &cmd->cs;
1506 VkFormat format = block_size == 4 ? VK_FORMAT_R32_UINT : VK_FORMAT_R8_UNORM;
1507 uint64_t blocks = size / block_size;
1508
1509 ops->setup(cmd, cs, format, ROTATE_0, false, 0xf);
1510
1511 while (blocks) {
1512 uint32_t src_x = (src_va & 63) / block_size;
1513 uint32_t dst_x = (dst_va & 63) / block_size;
1514 uint32_t width = MIN2(MIN2(blocks, 0x4000 - src_x), 0x4000 - dst_x);
1515
1516 ops->src_buffer(cmd, cs, format, src_va & ~63, 0, src_x + width, 1);
1517 ops->dst_buffer( cs, format, dst_va & ~63, 0);
1518 ops->coords(cs, &(VkOffset2D) {dst_x}, &(VkOffset2D) {src_x}, &(VkExtent2D) {width, 1});
1519 ops->run(cmd, cs);
1520
1521 src_va += width * block_size;
1522 dst_va += width * block_size;
1523 blocks -= width;
1524 }
1525 }
1526
1527 void
1528 tu_CmdCopyBuffer(VkCommandBuffer commandBuffer,
1529 VkBuffer srcBuffer,
1530 VkBuffer dstBuffer,
1531 uint32_t regionCount,
1532 const VkBufferCopy *pRegions)
1533 {
1534 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1535 TU_FROM_HANDLE(tu_buffer, src_buffer, srcBuffer);
1536 TU_FROM_HANDLE(tu_buffer, dst_buffer, dstBuffer);
1537
1538 tu_bo_list_add(&cmd->bo_list, src_buffer->bo, MSM_SUBMIT_BO_READ);
1539 tu_bo_list_add(&cmd->bo_list, dst_buffer->bo, MSM_SUBMIT_BO_WRITE);
1540
1541 for (unsigned i = 0; i < regionCount; ++i) {
1542 copy_buffer(cmd,
1543 tu_buffer_iova(dst_buffer) + pRegions[i].dstOffset,
1544 tu_buffer_iova(src_buffer) + pRegions[i].srcOffset,
1545 pRegions[i].size, 1);
1546 }
1547 }
1548
1549 void
1550 tu_CmdUpdateBuffer(VkCommandBuffer commandBuffer,
1551 VkBuffer dstBuffer,
1552 VkDeviceSize dstOffset,
1553 VkDeviceSize dataSize,
1554 const void *pData)
1555 {
1556 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1557 TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
1558
1559 tu_bo_list_add(&cmd->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE);
1560
1561 struct ts_cs_memory tmp;
1562 VkResult result = tu_cs_alloc(&cmd->sub_cs, DIV_ROUND_UP(dataSize, 64), 64, &tmp);
1563 if (result != VK_SUCCESS) {
1564 cmd->record_result = result;
1565 return;
1566 }
1567
1568 memcpy(tmp.map, pData, dataSize);
1569 copy_buffer(cmd, tu_buffer_iova(buffer) + dstOffset, tmp.iova, dataSize, 4);
1570 }
1571
1572 void
1573 tu_CmdFillBuffer(VkCommandBuffer commandBuffer,
1574 VkBuffer dstBuffer,
1575 VkDeviceSize dstOffset,
1576 VkDeviceSize fillSize,
1577 uint32_t data)
1578 {
1579 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1580 TU_FROM_HANDLE(tu_buffer, buffer, dstBuffer);
1581 const struct blit_ops *ops = &r2d_ops;
1582 struct tu_cs *cs = &cmd->cs;
1583
1584 tu_bo_list_add(&cmd->bo_list, buffer->bo, MSM_SUBMIT_BO_WRITE);
1585
1586 if (fillSize == VK_WHOLE_SIZE)
1587 fillSize = buffer->size - dstOffset;
1588
1589 uint64_t dst_va = tu_buffer_iova(buffer) + dstOffset;
1590 uint32_t blocks = fillSize / 4;
1591
1592 ops->setup(cmd, cs, VK_FORMAT_R32_UINT, ROTATE_0, true, 0xf);
1593 ops->clear_value(cs, VK_FORMAT_R32_UINT, &(VkClearValue){.color = {.uint32[0] = data}});
1594
1595 while (blocks) {
1596 uint32_t dst_x = (dst_va & 63) / 4;
1597 uint32_t width = MIN2(blocks, 0x4000 - dst_x);
1598
1599 ops->dst_buffer(cs, VK_FORMAT_R32_UINT, dst_va & ~63, 0);
1600 ops->coords(cs, &(VkOffset2D) {dst_x}, NULL, &(VkExtent2D) {width, 1});
1601 ops->run(cmd, cs);
1602
1603 dst_va += width * 4;
1604 blocks -= width;
1605 }
1606 }
1607
1608 void
1609 tu_CmdResolveImage(VkCommandBuffer commandBuffer,
1610 VkImage srcImage,
1611 VkImageLayout srcImageLayout,
1612 VkImage dstImage,
1613 VkImageLayout dstImageLayout,
1614 uint32_t regionCount,
1615 const VkImageResolve *pRegions)
1616 {
1617 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1618 TU_FROM_HANDLE(tu_image, src_image, srcImage);
1619 TU_FROM_HANDLE(tu_image, dst_image, dstImage);
1620 const struct blit_ops *ops = &r2d_ops;
1621 struct tu_cs *cs = &cmd->cs;
1622
1623 tu_bo_list_add(&cmd->bo_list, src_image->bo, MSM_SUBMIT_BO_READ);
1624 tu_bo_list_add(&cmd->bo_list, dst_image->bo, MSM_SUBMIT_BO_WRITE);
1625
1626 ops->setup(cmd, cs, dst_image->vk_format, ROTATE_0, false, 0xf);
1627
1628 for (uint32_t i = 0; i < regionCount; ++i) {
1629 const VkImageResolve *info = &pRegions[i];
1630 uint32_t layers = MAX2(info->extent.depth, info->dstSubresource.layerCount);
1631
1632 assert(info->srcSubresource.layerCount == info->dstSubresource.layerCount);
1633 /* TODO: aspect masks possible ? */
1634
1635 coords(ops, cs, &info->dstOffset, &info->srcOffset, &info->extent);
1636
1637 struct tu_image_view dst, src;
1638 tu_image_view_blit(&dst, dst_image, &info->dstSubresource, info->dstOffset.z);
1639 tu_image_view_blit(&src, src_image, &info->srcSubresource, info->srcOffset.z);
1640
1641 for (uint32_t i = 0; i < layers; i++) {
1642 ops->src(cmd, cs, &src, i, false);
1643 ops->dst(cs, &dst, i);
1644 ops->run(cmd, cs);
1645 }
1646 }
1647 }
1648
1649 void
1650 tu_resolve_sysmem(struct tu_cmd_buffer *cmd,
1651 struct tu_cs *cs,
1652 struct tu_image_view *src,
1653 struct tu_image_view *dst,
1654 uint32_t layers,
1655 const VkRect2D *rect)
1656 {
1657 const struct blit_ops *ops = &r2d_ops;
1658
1659 tu_bo_list_add(&cmd->bo_list, src->image->bo, MSM_SUBMIT_BO_READ);
1660 tu_bo_list_add(&cmd->bo_list, dst->image->bo, MSM_SUBMIT_BO_WRITE);
1661
1662 assert(src->image->vk_format == dst->image->vk_format);
1663
1664 ops->setup(cmd, cs, dst->image->vk_format, ROTATE_0, false, 0xf);
1665 ops->coords(cs, &rect->offset, &rect->offset, &rect->extent);
1666
1667 for (uint32_t i = 0; i < layers; i++) {
1668 ops->src(cmd, cs, src, i, false);
1669 ops->dst(cs, dst, i);
1670 ops->run(cmd, cs);
1671 }
1672 }
1673
1674 static void
1675 clear_image(struct tu_cmd_buffer *cmd,
1676 struct tu_image *image,
1677 const VkClearValue *clear_value,
1678 const VkImageSubresourceRange *range)
1679 {
1680 uint32_t level_count = tu_get_levelCount(image, range);
1681 uint32_t layer_count = tu_get_layerCount(image, range);
1682 struct tu_cs *cs = &cmd->cs;
1683 VkFormat format = image->vk_format;
1684 if (format == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)
1685 format = VK_FORMAT_R32_UINT;
1686
1687 if (image->type == VK_IMAGE_TYPE_3D) {
1688 assert(layer_count == 1);
1689 assert(range->baseArrayLayer == 0);
1690 }
1691
1692 uint8_t mask = 0xf;
1693 if (image->vk_format == VK_FORMAT_D24_UNORM_S8_UINT) {
1694 mask = 0;
1695 if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
1696 mask |= 0x7;
1697 if (range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
1698 mask |= 0x8;
1699 }
1700
1701 const struct blit_ops *ops = image->samples > 1 ? &r3d_ops : &r2d_ops;
1702
1703 ops->setup(cmd, cs, format, ROTATE_0, true, mask);
1704 ops->clear_value(cs, image->vk_format, clear_value);
1705
1706 for (unsigned j = 0; j < level_count; j++) {
1707 if (image->type == VK_IMAGE_TYPE_3D)
1708 layer_count = u_minify(image->extent.depth, range->baseMipLevel + j);
1709
1710 ops->coords(cs, &(VkOffset2D){}, NULL, &(VkExtent2D) {
1711 u_minify(image->extent.width, range->baseMipLevel + j),
1712 u_minify(image->extent.height, range->baseMipLevel + j)
1713 });
1714
1715 struct tu_image_view dst;
1716 tu_image_view_blit2(&dst, image, format, &(VkImageSubresourceLayers) {
1717 .aspectMask = range->aspectMask,
1718 .mipLevel = range->baseMipLevel + j,
1719 .baseArrayLayer = range->baseArrayLayer,
1720 .layerCount = 1,
1721 }, 0, false);
1722
1723 for (uint32_t i = 0; i < layer_count; i++) {
1724 ops->dst(cs, &dst, i);
1725 ops->run(cmd, cs);
1726 }
1727 }
1728 }
1729
1730 void
1731 tu_CmdClearColorImage(VkCommandBuffer commandBuffer,
1732 VkImage image_h,
1733 VkImageLayout imageLayout,
1734 const VkClearColorValue *pColor,
1735 uint32_t rangeCount,
1736 const VkImageSubresourceRange *pRanges)
1737 {
1738 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1739 TU_FROM_HANDLE(tu_image, image, image_h);
1740
1741 tu_bo_list_add(&cmd->bo_list, image->bo, MSM_SUBMIT_BO_WRITE);
1742
1743 for (unsigned i = 0; i < rangeCount; i++)
1744 clear_image(cmd, image, (const VkClearValue*) pColor, pRanges + i);
1745 }
1746
1747 void
1748 tu_CmdClearDepthStencilImage(VkCommandBuffer commandBuffer,
1749 VkImage image_h,
1750 VkImageLayout imageLayout,
1751 const VkClearDepthStencilValue *pDepthStencil,
1752 uint32_t rangeCount,
1753 const VkImageSubresourceRange *pRanges)
1754 {
1755 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1756 TU_FROM_HANDLE(tu_image, image, image_h);
1757
1758 tu_bo_list_add(&cmd->bo_list, image->bo, MSM_SUBMIT_BO_WRITE);
1759
1760 for (unsigned i = 0; i < rangeCount; i++)
1761 clear_image(cmd, image, (const VkClearValue*) pDepthStencil, pRanges + i);
1762 }
1763
1764 static void
1765 tu_clear_sysmem_attachments_2d(struct tu_cmd_buffer *cmd,
1766 uint32_t attachment_count,
1767 const VkClearAttachment *attachments,
1768 uint32_t rect_count,
1769 const VkClearRect *rects)
1770 {
1771 const struct tu_subpass *subpass = cmd->state.subpass;
1772 /* note: cannot use shader path here.. there is a special shader path
1773 * in tu_clear_sysmem_attachments()
1774 */
1775 const struct blit_ops *ops = &r2d_ops;
1776 struct tu_cs *cs = &cmd->draw_cs;
1777
1778 for (uint32_t j = 0; j < attachment_count; j++) {
1779 uint32_t a;
1780 if (attachments[j].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1781 a = subpass->color_attachments[attachments[j].colorAttachment].attachment;
1782 } else {
1783 a = subpass->depth_stencil_attachment.attachment;
1784
1785 /* sync depth into color */
1786 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
1787 /* also flush color to avoid losing contents from invalidate */
1788 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
1789 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_COLOR, false);
1790 }
1791
1792 if (a == VK_ATTACHMENT_UNUSED)
1793 continue;
1794
1795 uint8_t mask = 0xf;
1796 if (cmd->state.pass->attachments[a].format == VK_FORMAT_D24_UNORM_S8_UINT) {
1797 if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT))
1798 mask &= ~0x7;
1799 if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))
1800 mask &= ~0x8;
1801 }
1802
1803 const struct tu_image_view *iview =
1804 cmd->state.framebuffer->attachments[a].attachment;
1805
1806 ops->setup(cmd, cs, iview->image->vk_format, ROTATE_0, true, mask);
1807 ops->clear_value(cs, iview->image->vk_format, &attachments[j].clearValue);
1808
1809 for (uint32_t i = 0; i < rect_count; i++) {
1810 ops->coords(cs, &rects[i].rect.offset, NULL, &rects[i].rect.extent);
1811 for (uint32_t layer = 0; layer < rects[i].layerCount; layer++) {
1812 ops->dst(cs, iview, rects[i].baseArrayLayer + layer);
1813 ops->run(cmd, cs);
1814 }
1815 }
1816
1817 if (attachments[j].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1818 /* does not use CCU - flush
1819 * note: cache invalidate might be needed to, and just not covered by test cases
1820 */
1821 if (attachments[j].colorAttachment > 0)
1822 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
1823 } else {
1824 /* sync color into depth */
1825 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
1826 tu6_emit_event_write(cmd, cs, PC_CCU_INVALIDATE_DEPTH, false);
1827 }
1828 }
1829 }
1830
1831 static void
1832 tu_clear_sysmem_attachments(struct tu_cmd_buffer *cmd,
1833 uint32_t attachment_count,
1834 const VkClearAttachment *attachments,
1835 uint32_t rect_count,
1836 const VkClearRect *rects)
1837 {
1838 /* the shader path here is special, it avoids changing MRT/etc state */
1839 const struct tu_render_pass *pass = cmd->state.pass;
1840 const struct tu_subpass *subpass = cmd->state.subpass;
1841 const uint32_t mrt_count = subpass->color_count;
1842 struct tu_cs *cs = &cmd->draw_cs;
1843 uint32_t clear_value[MAX_RTS][4];
1844 float z_clear_val = 0.0f;
1845 uint8_t s_clear_val = 0;
1846 uint32_t clear_rts = 0, num_rts = 0, b;
1847 bool z_clear = false;
1848 bool s_clear = false;
1849 uint32_t max_samples = 1;
1850
1851 for (uint32_t i = 0; i < attachment_count; i++) {
1852 uint32_t a;
1853 if (attachments[i].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
1854 uint32_t c = attachments[i].colorAttachment;
1855 a = subpass->color_attachments[c].attachment;
1856 if (a == VK_ATTACHMENT_UNUSED)
1857 continue;
1858
1859 clear_rts |= 1 << c;
1860 memcpy(clear_value[c], &attachments[i].clearValue, 4 * sizeof(uint32_t));
1861 } else {
1862 a = subpass->depth_stencil_attachment.attachment;
1863 if (a == VK_ATTACHMENT_UNUSED)
1864 continue;
1865
1866 if (attachments[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
1867 z_clear = true;
1868 z_clear_val = attachments[i].clearValue.depthStencil.depth;
1869 }
1870
1871 if (attachments[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
1872 s_clear = true;
1873 s_clear_val = attachments[i].clearValue.depthStencil.stencil & 0xff;
1874 }
1875 }
1876
1877 max_samples = MAX2(max_samples, pass->attachments[a].samples);
1878 }
1879
1880 /* prefer to use 2D path for clears
1881 * 2D can't clear separate depth/stencil and msaa, needs known framebuffer
1882 */
1883 if (max_samples == 1 && cmd->state.framebuffer) {
1884 tu_clear_sysmem_attachments_2d(cmd, attachment_count, attachments, rect_count, rects);
1885 return;
1886 }
1887
1888 /* TODO: this path doesn't take into account multilayer rendering */
1889
1890 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_CNTL0, 2);
1891 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(0xfc) |
1892 A6XX_SP_FS_OUTPUT_CNTL0_SAMPMASK_REGID(0xfc) |
1893 0xfc000000);
1894 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_CNTL1_MRT(mrt_count));
1895
1896 tu_cs_emit_pkt4(cs, REG_A6XX_SP_FS_OUTPUT_REG(0), mrt_count);
1897 for (uint32_t i = 0; i < mrt_count; i++) {
1898 if (clear_rts & (1 << i))
1899 tu_cs_emit(cs, A6XX_SP_FS_OUTPUT_REG_REGID(num_rts++ * 4));
1900 else
1901 tu_cs_emit(cs, 0);
1902 }
1903
1904 r3d_pipeline(cmd, cs, false, num_rts);
1905
1906 tu_cs_emit_regs(cs,
1907 A6XX_RB_FS_OUTPUT_CNTL0(),
1908 A6XX_RB_FS_OUTPUT_CNTL1(.mrt = mrt_count));
1909
1910 tu_cs_emit_regs(cs, A6XX_SP_BLEND_CNTL());
1911 tu_cs_emit_regs(cs, A6XX_RB_BLEND_CNTL(.independent_blend = 1, .sample_mask = 0xffff));
1912 tu_cs_emit_regs(cs, A6XX_RB_ALPHA_CONTROL());
1913 for (uint32_t i = 0; i < mrt_count; i++) {
1914 tu_cs_emit_regs(cs, A6XX_RB_MRT_CONTROL(i,
1915 .component_enable = COND(clear_rts & (1 << i), 0xf)));
1916 }
1917
1918 tu_cs_emit_regs(cs, A6XX_RB_DEPTH_PLANE_CNTL());
1919 tu_cs_emit_regs(cs, A6XX_RB_DEPTH_CNTL(
1920 .z_enable = z_clear,
1921 .z_write_enable = z_clear,
1922 .zfunc = FUNC_ALWAYS));
1923 tu_cs_emit_regs(cs, A6XX_GRAS_SU_DEPTH_PLANE_CNTL());
1924 tu_cs_emit_regs(cs, A6XX_RB_STENCIL_CONTROL(
1925 .stencil_enable = s_clear,
1926 .func = FUNC_ALWAYS,
1927 .zpass = VK_STENCIL_OP_REPLACE));
1928 tu_cs_emit_regs(cs, A6XX_RB_STENCILMASK(.mask = 0xff));
1929 tu_cs_emit_regs(cs, A6XX_RB_STENCILWRMASK(.wrmask = 0xff));
1930 tu_cs_emit_regs(cs, A6XX_RB_STENCILREF(.ref = s_clear_val));
1931
1932 tu_cs_emit_pkt7(cs, CP_LOAD_STATE6_FRAG, 3 + 4 * num_rts);
1933 tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(0) |
1934 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
1935 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
1936 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_SHADER) |
1937 CP_LOAD_STATE6_0_NUM_UNIT(num_rts));
1938 tu_cs_emit(cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
1939 tu_cs_emit(cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
1940 for_each_bit(b, clear_rts)
1941 tu_cs_emit_array(cs, clear_value[b], 4);
1942
1943 for (uint32_t i = 0; i < rect_count; i++) {
1944 r3d_coords_raw(cs, (float[]) {
1945 rects[i].rect.offset.x, rects[i].rect.offset.y,
1946 z_clear_val, 1.0f,
1947 rects[i].rect.offset.x + rects[i].rect.extent.width,
1948 rects[i].rect.offset.y + rects[i].rect.extent.height,
1949 z_clear_val, 1.0f
1950 });
1951 r3d_run(cmd, cs);
1952 }
1953
1954 cmd->state.dirty |= TU_CMD_DIRTY_PIPELINE |
1955 TU_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
1956 TU_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1957 TU_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1958 TU_CMD_DIRTY_DYNAMIC_VIEWPORT |
1959 TU_CMD_DIRTY_DYNAMIC_SCISSOR;
1960 }
1961
1962 /**
1963 * Pack a VkClearValue into a 128-bit buffer. format is respected except
1964 * for the component order. The components are always packed in WZYX order,
1965 * because gmem is tiled and tiled formats always have WZYX swap
1966 */
1967 static void
1968 pack_gmem_clear_value(const VkClearValue *val, VkFormat format, uint32_t buf[4])
1969 {
1970 const struct util_format_description *desc = vk_format_description(format);
1971
1972 switch (format) {
1973 case VK_FORMAT_B10G11R11_UFLOAT_PACK32:
1974 buf[0] = float3_to_r11g11b10f(val->color.float32);
1975 return;
1976 case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32:
1977 buf[0] = float3_to_rgb9e5(val->color.float32);
1978 return;
1979 default:
1980 break;
1981 }
1982
1983 assert(desc && desc->layout == UTIL_FORMAT_LAYOUT_PLAIN);
1984
1985 /* S8_UINT is special and has no depth */
1986 const int max_components =
1987 format == VK_FORMAT_S8_UINT ? 2 : desc->nr_channels;
1988
1989 int buf_offset = 0;
1990 int bit_shift = 0;
1991 for (int comp = 0; comp < max_components; comp++) {
1992 const struct util_format_channel_description *ch =
1993 tu_get_format_channel_description(desc, comp);
1994 if (!ch) {
1995 assert((format == VK_FORMAT_S8_UINT && comp == 0) ||
1996 (format == VK_FORMAT_X8_D24_UNORM_PACK32 && comp == 1));
1997 continue;
1998 }
1999
2000 union tu_clear_component_value v = tu_get_clear_component_value(
2001 val, comp, desc->colorspace);
2002
2003 /* move to the next uint32_t when there is not enough space */
2004 assert(ch->size <= 32);
2005 if (bit_shift + ch->size > 32) {
2006 buf_offset++;
2007 bit_shift = 0;
2008 }
2009
2010 if (bit_shift == 0)
2011 buf[buf_offset] = 0;
2012
2013 buf[buf_offset] |= tu_pack_clear_component_value(v, ch) << bit_shift;
2014 bit_shift += ch->size;
2015 }
2016 }
2017
2018 static void
2019 tu_emit_clear_gmem_attachment(struct tu_cmd_buffer *cmd,
2020 struct tu_cs *cs,
2021 uint32_t attachment,
2022 uint8_t component_mask,
2023 const VkClearValue *value)
2024 {
2025 VkFormat vk_format = cmd->state.pass->attachments[attachment].format;
2026 /* note: component_mask is 0x7 for depth and 0x8 for stencil
2027 * because D24S8 is cleared with AS_R8G8B8A8 format
2028 */
2029
2030 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_DST_INFO, 1);
2031 tu_cs_emit(cs, A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(tu6_base_format(vk_format)));
2032
2033 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_INFO, 1);
2034 tu_cs_emit(cs, A6XX_RB_BLIT_INFO_GMEM | A6XX_RB_BLIT_INFO_CLEAR_MASK(component_mask));
2035
2036 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
2037 tu_cs_emit(cs, cmd->state.pass->attachments[attachment].gmem_offset);
2038
2039 tu_cs_emit_pkt4(cs, REG_A6XX_RB_UNKNOWN_88D0, 1);
2040 tu_cs_emit(cs, 0);
2041
2042 uint32_t clear_vals[4] = {};
2043 pack_gmem_clear_value(value, vk_format, clear_vals);
2044
2045 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
2046 tu_cs_emit_array(cs, clear_vals, 4);
2047
2048 tu6_emit_event_write(cmd, cs, BLIT, false);
2049 }
2050
2051 static void
2052 tu_clear_gmem_attachments(struct tu_cmd_buffer *cmd,
2053 uint32_t attachment_count,
2054 const VkClearAttachment *attachments,
2055 uint32_t rect_count,
2056 const VkClearRect *rects)
2057 {
2058 const struct tu_subpass *subpass = cmd->state.subpass;
2059 struct tu_cs *cs = &cmd->draw_cs;
2060
2061 /* TODO: swap the loops for smaller cmdstream */
2062 for (unsigned i = 0; i < rect_count; i++) {
2063 unsigned x1 = rects[i].rect.offset.x;
2064 unsigned y1 = rects[i].rect.offset.y;
2065 unsigned x2 = x1 + rects[i].rect.extent.width - 1;
2066 unsigned y2 = y1 + rects[i].rect.extent.height - 1;
2067
2068 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
2069 tu_cs_emit(cs, A6XX_RB_BLIT_SCISSOR_TL_X(x1) | A6XX_RB_BLIT_SCISSOR_TL_Y(y1));
2070 tu_cs_emit(cs, A6XX_RB_BLIT_SCISSOR_BR_X(x2) | A6XX_RB_BLIT_SCISSOR_BR_Y(y2));
2071
2072 for (unsigned j = 0; j < attachment_count; j++) {
2073 uint32_t a;
2074 if (attachments[j].aspectMask & VK_IMAGE_ASPECT_COLOR_BIT)
2075 a = subpass->color_attachments[attachments[j].colorAttachment].attachment;
2076 else
2077 a = subpass->depth_stencil_attachment.attachment;
2078
2079 if (a == VK_ATTACHMENT_UNUSED)
2080 continue;
2081
2082 unsigned clear_mask = 0xf;
2083 if (cmd->state.pass->attachments[a].format == VK_FORMAT_D24_UNORM_S8_UINT) {
2084 if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT))
2085 clear_mask &= ~0x7;
2086 if (!(attachments[j].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))
2087 clear_mask &= ~0x8;
2088 }
2089
2090 tu_emit_clear_gmem_attachment(cmd, cs, a, clear_mask,
2091 &attachments[j].clearValue);
2092 }
2093 }
2094 }
2095
2096 void
2097 tu_CmdClearAttachments(VkCommandBuffer commandBuffer,
2098 uint32_t attachmentCount,
2099 const VkClearAttachment *pAttachments,
2100 uint32_t rectCount,
2101 const VkClearRect *pRects)
2102 {
2103 TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2104 struct tu_cs *cs = &cmd->draw_cs;
2105
2106 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
2107 tu_clear_gmem_attachments(cmd, attachmentCount, pAttachments, rectCount, pRects);
2108 tu_cond_exec_end(cs);
2109
2110 tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
2111 tu_clear_sysmem_attachments(cmd, attachmentCount, pAttachments, rectCount, pRects);
2112 tu_cond_exec_end(cs);
2113 }
2114
2115 void
2116 tu_clear_sysmem_attachment(struct tu_cmd_buffer *cmd,
2117 struct tu_cs *cs,
2118 uint32_t a,
2119 const VkRenderPassBeginInfo *info)
2120 {
2121 const struct tu_framebuffer *fb = cmd->state.framebuffer;
2122 const struct tu_image_view *iview = fb->attachments[a].attachment;
2123 const struct tu_render_pass_attachment *attachment =
2124 &cmd->state.pass->attachments[a];
2125 uint8_t mask = 0;
2126
2127 if (attachment->clear_mask == VK_IMAGE_ASPECT_COLOR_BIT)
2128 mask = 0xf;
2129 if (attachment->clear_mask & VK_IMAGE_ASPECT_DEPTH_BIT)
2130 mask |= 0x7;
2131 if (attachment->clear_mask & VK_IMAGE_ASPECT_STENCIL_BIT)
2132 mask |= 0x8;
2133
2134 if (!mask)
2135 return;
2136
2137 const struct blit_ops *ops = &r2d_ops;
2138 if (attachment->samples > 1)
2139 ops = &r3d_ops;
2140
2141 ops->setup(cmd, cs, attachment->format, ROTATE_0, true, mask);
2142 ops->coords(cs, &info->renderArea.offset, NULL, &info->renderArea.extent);
2143 ops->clear_value(cs, attachment->format, &info->pClearValues[a]);
2144
2145 for (uint32_t i = 0; i < fb->layers; i++) {
2146 ops->dst(cs, iview, i);
2147 ops->run(cmd, cs);
2148 }
2149 }
2150
2151 void
2152 tu_clear_gmem_attachment(struct tu_cmd_buffer *cmd,
2153 struct tu_cs *cs,
2154 uint32_t a,
2155 const VkRenderPassBeginInfo *info)
2156 {
2157 const struct tu_render_pass_attachment *attachment =
2158 &cmd->state.pass->attachments[a];
2159 unsigned clear_mask = 0;
2160
2161 if (attachment->clear_mask == VK_IMAGE_ASPECT_COLOR_BIT)
2162 clear_mask = 0xf;
2163 if (attachment->clear_mask & VK_IMAGE_ASPECT_DEPTH_BIT)
2164 clear_mask |= 0x7;
2165 if (attachment->clear_mask & VK_IMAGE_ASPECT_STENCIL_BIT)
2166 clear_mask |= 0x8;
2167
2168 if (!clear_mask)
2169 return;
2170
2171 tu_cs_emit_regs(cs, A6XX_RB_MSAA_CNTL(tu_msaa_samples(attachment->samples)));
2172
2173 tu_emit_clear_gmem_attachment(cmd, cs, a, clear_mask,
2174 &info->pClearValues[a]);
2175 }
2176
2177 static void
2178 tu_emit_blit(struct tu_cmd_buffer *cmd,
2179 struct tu_cs *cs,
2180 const struct tu_image_view *iview,
2181 const struct tu_render_pass_attachment *attachment,
2182 bool resolve)
2183 {
2184 tu_cs_emit_regs(cs,
2185 A6XX_RB_MSAA_CNTL(tu_msaa_samples(attachment->samples)));
2186
2187 tu_cs_emit_regs(cs, A6XX_RB_BLIT_INFO(
2188 .unk0 = !resolve,
2189 .gmem = !resolve,
2190 /* "integer" bit disables msaa resolve averaging */
2191 .integer = vk_format_is_int(attachment->format)));
2192
2193 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_DST_INFO, 4);
2194 tu_cs_emit(cs, iview->RB_BLIT_DST_INFO);
2195 tu_cs_image_ref_2d(cs, iview, 0, false);
2196
2197 tu_cs_emit_pkt4(cs, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
2198 tu_cs_image_flag_ref(cs, iview, 0);
2199
2200 tu_cs_emit_regs(cs,
2201 A6XX_RB_BLIT_BASE_GMEM(attachment->gmem_offset));
2202
2203 tu6_emit_event_write(cmd, cs, BLIT, false);
2204 }
2205
2206 static bool
2207 blit_can_resolve(VkFormat format)
2208 {
2209 const struct util_format_description *desc = vk_format_description(format);
2210
2211 /* blit event can only do resolve for simple cases:
2212 * averaging samples as unsigned integers or choosing only one sample
2213 */
2214 if (vk_format_is_snorm(format) || vk_format_is_srgb(format))
2215 return false;
2216
2217 /* can't do formats with larger channel sizes
2218 * note: this includes all float formats
2219 * note2: single channel integer formats seem OK
2220 */
2221 if (desc->channel[0].size > 10)
2222 return false;
2223
2224 switch (format) {
2225 /* for unknown reasons blit event can't msaa resolve these formats when tiled
2226 * likely related to these formats having different layout from other cpp=2 formats
2227 */
2228 case VK_FORMAT_R8G8_UNORM:
2229 case VK_FORMAT_R8G8_UINT:
2230 case VK_FORMAT_R8G8_SINT:
2231 /* TODO: this one should be able to work? */
2232 case VK_FORMAT_D24_UNORM_S8_UINT:
2233 return false;
2234 default:
2235 break;
2236 }
2237
2238 return true;
2239 }
2240
2241 void
2242 tu_load_gmem_attachment(struct tu_cmd_buffer *cmd,
2243 struct tu_cs *cs,
2244 uint32_t a,
2245 bool force_load)
2246 {
2247 const struct tu_image_view *iview =
2248 cmd->state.framebuffer->attachments[a].attachment;
2249 const struct tu_render_pass_attachment *attachment =
2250 &cmd->state.pass->attachments[a];
2251
2252 if (attachment->load || force_load)
2253 tu_emit_blit(cmd, cs, iview, attachment, false);
2254 }
2255
2256 void
2257 tu_store_gmem_attachment(struct tu_cmd_buffer *cmd,
2258 struct tu_cs *cs,
2259 uint32_t a,
2260 uint32_t gmem_a)
2261 {
2262 const struct tu_tiling_config *tiling = &cmd->state.tiling_config;
2263 const VkRect2D *render_area = &tiling->render_area;
2264 struct tu_render_pass_attachment *dst = &cmd->state.pass->attachments[a];
2265 struct tu_image_view *iview = cmd->state.framebuffer->attachments[a].attachment;
2266 struct tu_render_pass_attachment *src = &cmd->state.pass->attachments[gmem_a];
2267
2268 if (!dst->store)
2269 return;
2270
2271 uint32_t x1 = render_area->offset.x;
2272 uint32_t y1 = render_area->offset.y;
2273 uint32_t x2 = x1 + render_area->extent.width;
2274 uint32_t y2 = y1 + render_area->extent.height;
2275 /* x2/y2 can be unaligned if equal to the size of the image,
2276 * since it will write into padding space
2277 * the one exception is linear levels which don't have the
2278 * required y padding in the layout (except for the last level)
2279 */
2280 bool need_y2_align =
2281 y2 != iview->extent.height || iview->need_y2_align;
2282
2283 bool unaligned =
2284 x1 % GMEM_ALIGN_W || (x2 % GMEM_ALIGN_W && x2 != iview->extent.width) ||
2285 y1 % GMEM_ALIGN_H || (y2 % GMEM_ALIGN_H && need_y2_align);
2286
2287 /* use fast path when render area is aligned, except for unsupported resolve cases */
2288 if (!unaligned && (a == gmem_a || blit_can_resolve(dst->format))) {
2289 tu_emit_blit(cmd, cs, iview, src, true);
2290 return;
2291 }
2292
2293 if (dst->samples > 1) {
2294 /* I guess we need to use shader path in this case?
2295 * need a testcase which fails because of this
2296 */
2297 tu_finishme("unaligned store of msaa attachment\n");
2298 return;
2299 }
2300
2301 r2d_setup_common(cmd, cs, dst->format, ROTATE_0, false, 0xf, true);
2302 r2d_dst(cs, iview, 0);
2303 r2d_coords(cs, &render_area->offset, &render_area->offset, &render_area->extent);
2304
2305 tu_cs_emit_regs(cs,
2306 A6XX_SP_PS_2D_SRC_INFO(
2307 .color_format = tu6_format_texture(src->format, TILE6_2).fmt,
2308 .tile_mode = TILE6_2,
2309 .srgb = vk_format_is_srgb(src->format),
2310 .samples = tu_msaa_samples(src->samples),
2311 .samples_average = !vk_format_is_int(src->format),
2312 .unk20 = 1,
2313 .unk22 = 1),
2314 /* note: src size does not matter when not scaling */
2315 A6XX_SP_PS_2D_SRC_SIZE( .width = 0x3fff, .height = 0x3fff),
2316 A6XX_SP_PS_2D_SRC_LO(cmd->device->physical_device->gmem_base + src->gmem_offset),
2317 A6XX_SP_PS_2D_SRC_HI(),
2318 A6XX_SP_PS_2D_SRC_PITCH(.pitch = tiling->tile0.extent.width * src->cpp));
2319
2320 /* sync GMEM writes with CACHE */
2321 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
2322
2323 tu_cs_emit_pkt7(cs, CP_BLIT, 1);
2324 tu_cs_emit(cs, CP_BLIT_0_OP(BLIT_OP_SCALE));
2325
2326 /* TODO: flushing with barriers instead of blindly always flushing */
2327 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS, true);
2328 tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_DEPTH_TS, true);
2329 tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE, false);
2330 }