util: rename PIPE_ARCH_*_ENDIAN to UTIL_ARCH_*_ENDIAN
[mesa.git] / src / gallium / drivers / radeonsi / si_clear.c
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "sid.h"
27
28 #include "util/u_format.h"
29 #include "util/u_pack_color.h"
30 #include "util/u_surface.h"
31
32 enum {
33 SI_CLEAR = SI_SAVE_FRAGMENT_STATE,
34 SI_CLEAR_SURFACE = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE,
35 };
36
37 static void si_alloc_separate_cmask(struct si_screen *sscreen,
38 struct si_texture *tex)
39 {
40 /* CMASK for MSAA is allocated in advance or always disabled
41 * by "nofmask" option.
42 */
43 if (tex->cmask_buffer || !tex->surface.cmask_size ||
44 tex->buffer.b.b.nr_samples >= 2)
45 return;
46
47 tex->cmask_buffer =
48 si_aligned_buffer_create(&sscreen->b,
49 SI_RESOURCE_FLAG_UNMAPPABLE,
50 PIPE_USAGE_DEFAULT,
51 tex->surface.cmask_size,
52 tex->surface.cmask_alignment);
53 if (tex->cmask_buffer == NULL)
54 return;
55
56 tex->cmask_base_address_reg = tex->cmask_buffer->gpu_address >> 8;
57 tex->cb_color_info |= S_028C70_FAST_CLEAR(1);
58
59 p_atomic_inc(&sscreen->compressed_colortex_counter);
60 }
61
62 static bool si_set_clear_color(struct si_texture *tex,
63 enum pipe_format surface_format,
64 const union pipe_color_union *color)
65 {
66 union util_color uc;
67
68 memset(&uc, 0, sizeof(uc));
69
70 if (tex->surface.bpe == 16) {
71 /* DCC fast clear only:
72 * CLEAR_WORD0 = R = G = B
73 * CLEAR_WORD1 = A
74 */
75 assert(color->ui[0] == color->ui[1] &&
76 color->ui[0] == color->ui[2]);
77 uc.ui[0] = color->ui[0];
78 uc.ui[1] = color->ui[3];
79 } else if (util_format_is_pure_uint(surface_format)) {
80 util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
81 } else if (util_format_is_pure_sint(surface_format)) {
82 util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1);
83 } else {
84 util_pack_color(color->f, surface_format, &uc);
85 }
86
87 if (memcmp(tex->color_clear_value, &uc, 2 * sizeof(uint32_t)) == 0)
88 return false;
89
90 memcpy(tex->color_clear_value, &uc, 2 * sizeof(uint32_t));
91 return true;
92 }
93
94 /** Linearize and convert luminace/intensity to red. */
95 enum pipe_format si_simplify_cb_format(enum pipe_format format)
96 {
97 format = util_format_linear(format);
98 format = util_format_luminance_to_red(format);
99 return util_format_intensity_to_red(format);
100 }
101
102 bool vi_alpha_is_on_msb(struct si_screen *sscreen, enum pipe_format format)
103 {
104 format = si_simplify_cb_format(format);
105 const struct util_format_description *desc = util_format_description(format);
106
107 /* Formats with 3 channels can't have alpha. */
108 if (desc->nr_channels == 3)
109 return true; /* same as xxxA; is any value OK here? */
110
111 if (sscreen->info.chip_class >= GFX10 && desc->nr_channels == 1)
112 return desc->swizzle[3] == PIPE_SWIZZLE_X;
113
114 return si_translate_colorswap(format, false) <= 1;
115 }
116
117 static bool vi_get_fast_clear_parameters(struct si_screen *sscreen,
118 enum pipe_format base_format,
119 enum pipe_format surface_format,
120 const union pipe_color_union *color,
121 uint32_t* clear_value,
122 bool *eliminate_needed)
123 {
124 /* If we want to clear without needing a fast clear eliminate step, we
125 * can set color and alpha independently to 0 or 1 (or 0/max for integer
126 * formats).
127 */
128 bool values[4] = {}; /* whether to clear to 0 or 1 */
129 bool color_value = false; /* clear color to 0 or 1 */
130 bool alpha_value = false; /* clear alpha to 0 or 1 */
131 int alpha_channel; /* index of the alpha component */
132 bool has_color = false;
133 bool has_alpha = false;
134
135 const struct util_format_description *desc =
136 util_format_description(si_simplify_cb_format(surface_format));
137
138 /* 128-bit fast clear with different R,G,B values is unsupported. */
139 if (desc->block.bits == 128 &&
140 (color->ui[0] != color->ui[1] ||
141 color->ui[0] != color->ui[2]))
142 return false;
143
144 *eliminate_needed = true;
145 *clear_value = DCC_CLEAR_COLOR_REG;
146
147 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
148 return true; /* need ELIMINATE_FAST_CLEAR */
149
150 bool base_alpha_is_on_msb = vi_alpha_is_on_msb(sscreen, base_format);
151 bool surf_alpha_is_on_msb = vi_alpha_is_on_msb(sscreen, surface_format);
152
153 /* Formats with 3 channels can't have alpha. */
154 if (desc->nr_channels == 3)
155 alpha_channel = -1;
156 else if (surf_alpha_is_on_msb)
157 alpha_channel = desc->nr_channels - 1;
158 else
159 alpha_channel = 0;
160
161 for (int i = 0; i < 4; ++i) {
162 if (desc->swizzle[i] >= PIPE_SWIZZLE_0)
163 continue;
164
165 if (desc->channel[i].pure_integer &&
166 desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
167 /* Use the maximum value for clamping the clear color. */
168 int max = u_bit_consecutive(0, desc->channel[i].size - 1);
169
170 values[i] = color->i[i] != 0;
171 if (color->i[i] != 0 && MIN2(color->i[i], max) != max)
172 return true; /* need ELIMINATE_FAST_CLEAR */
173 } else if (desc->channel[i].pure_integer &&
174 desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
175 /* Use the maximum value for clamping the clear color. */
176 unsigned max = u_bit_consecutive(0, desc->channel[i].size);
177
178 values[i] = color->ui[i] != 0U;
179 if (color->ui[i] != 0U && MIN2(color->ui[i], max) != max)
180 return true; /* need ELIMINATE_FAST_CLEAR */
181 } else {
182 values[i] = color->f[i] != 0.0F;
183 if (color->f[i] != 0.0F && color->f[i] != 1.0F)
184 return true; /* need ELIMINATE_FAST_CLEAR */
185 }
186
187 if (desc->swizzle[i] == alpha_channel) {
188 alpha_value = values[i];
189 has_alpha = true;
190 } else {
191 color_value = values[i];
192 has_color = true;
193 }
194 }
195
196 /* If alpha isn't present, make it the same as color, and vice versa. */
197 if (!has_alpha)
198 alpha_value = color_value;
199 else if (!has_color)
200 color_value = alpha_value;
201
202 if (color_value != alpha_value &&
203 base_alpha_is_on_msb != surf_alpha_is_on_msb)
204 return true; /* require ELIMINATE_FAST_CLEAR */
205
206 /* Check if all color values are equal if they are present. */
207 for (int i = 0; i < 4; ++i) {
208 if (desc->swizzle[i] <= PIPE_SWIZZLE_W &&
209 desc->swizzle[i] != alpha_channel &&
210 values[i] != color_value)
211 return true; /* require ELIMINATE_FAST_CLEAR */
212 }
213
214 /* This doesn't need ELIMINATE_FAST_CLEAR.
215 * On chips predating Raven2, the DCC clear codes and the CB clear
216 * color registers must match.
217 */
218 *eliminate_needed = false;
219
220 if (color_value) {
221 if (alpha_value)
222 *clear_value = DCC_CLEAR_COLOR_1111;
223 else
224 *clear_value = DCC_CLEAR_COLOR_1110;
225 } else {
226 if (alpha_value)
227 *clear_value = DCC_CLEAR_COLOR_0001;
228 else
229 *clear_value = DCC_CLEAR_COLOR_0000;
230 }
231 return true;
232 }
233
234 bool vi_dcc_clear_level(struct si_context *sctx,
235 struct si_texture *tex,
236 unsigned level, unsigned clear_value)
237 {
238 struct pipe_resource *dcc_buffer;
239 uint64_t dcc_offset, clear_size;
240
241 assert(vi_dcc_enabled(tex, level));
242
243 if (tex->dcc_separate_buffer) {
244 dcc_buffer = &tex->dcc_separate_buffer->b.b;
245 dcc_offset = 0;
246 } else {
247 dcc_buffer = &tex->buffer.b.b;
248 dcc_offset = tex->surface.dcc_offset;
249 }
250
251 if (sctx->chip_class >= GFX9) {
252 /* Mipmap level clears aren't implemented. */
253 if (tex->buffer.b.b.last_level > 0)
254 return false;
255
256 /* 4x and 8x MSAA needs a sophisticated compute shader for
257 * the clear. See AMDVLK. */
258 if (tex->buffer.b.b.nr_storage_samples >= 4)
259 return false;
260
261 clear_size = tex->surface.dcc_size;
262 } else {
263 unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
264
265 /* If this is 0, fast clear isn't possible. (can occur with MSAA) */
266 if (!tex->surface.u.legacy.level[level].dcc_fast_clear_size)
267 return false;
268
269 /* Layered 4x and 8x MSAA DCC fast clears need to clear
270 * dcc_fast_clear_size bytes for each layer. A compute shader
271 * would be more efficient than separate per-layer clear operations.
272 */
273 if (tex->buffer.b.b.nr_storage_samples >= 4 && num_layers > 1)
274 return false;
275
276 dcc_offset += tex->surface.u.legacy.level[level].dcc_offset;
277 clear_size = tex->surface.u.legacy.level[level].dcc_fast_clear_size *
278 num_layers;
279 }
280
281 si_clear_buffer(sctx, dcc_buffer, dcc_offset, clear_size,
282 &clear_value, 4, SI_COHERENCY_CB_META, false);
283 return true;
284 }
285
286 /* Set the same micro tile mode as the destination of the last MSAA resolve.
287 * This allows hitting the MSAA resolve fast path, which requires that both
288 * src and dst micro tile modes match.
289 */
290 static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen,
291 struct si_texture *tex)
292 {
293 if (sscreen->info.chip_class >= GFX10 ||
294 tex->buffer.b.is_shared ||
295 tex->buffer.b.b.nr_samples <= 1 ||
296 tex->surface.micro_tile_mode == tex->last_msaa_resolve_target_micro_mode)
297 return;
298
299 assert(sscreen->info.chip_class >= GFX9 ||
300 tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
301 assert(tex->buffer.b.b.last_level == 0);
302
303 if (sscreen->info.chip_class >= GFX9) {
304 /* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
305 assert(tex->surface.u.gfx9.surf.swizzle_mode >= 4);
306
307 /* If you do swizzle_mode % 4, you'll get:
308 * 0 = Depth
309 * 1 = Standard,
310 * 2 = Displayable
311 * 3 = Rotated
312 *
313 * Depth-sample order isn't allowed:
314 */
315 assert(tex->surface.u.gfx9.surf.swizzle_mode % 4 != 0);
316
317 switch (tex->last_msaa_resolve_target_micro_mode) {
318 case RADEON_MICRO_MODE_DISPLAY:
319 tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
320 tex->surface.u.gfx9.surf.swizzle_mode += 2; /* D */
321 break;
322 case RADEON_MICRO_MODE_THIN:
323 tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
324 tex->surface.u.gfx9.surf.swizzle_mode += 1; /* S */
325 break;
326 case RADEON_MICRO_MODE_ROTATED:
327 tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
328 tex->surface.u.gfx9.surf.swizzle_mode += 3; /* R */
329 break;
330 default: /* depth */
331 assert(!"unexpected micro mode");
332 return;
333 }
334 } else if (sscreen->info.chip_class >= GFX7) {
335 /* These magic numbers were copied from addrlib. It doesn't use
336 * any definitions for them either. They are all 2D_TILED_THIN1
337 * modes with different bpp and micro tile mode.
338 */
339 switch (tex->last_msaa_resolve_target_micro_mode) {
340 case RADEON_MICRO_MODE_DISPLAY:
341 tex->surface.u.legacy.tiling_index[0] = 10;
342 break;
343 case RADEON_MICRO_MODE_THIN:
344 tex->surface.u.legacy.tiling_index[0] = 14;
345 break;
346 case RADEON_MICRO_MODE_ROTATED:
347 tex->surface.u.legacy.tiling_index[0] = 28;
348 break;
349 default: /* depth, thick */
350 assert(!"unexpected micro mode");
351 return;
352 }
353 } else { /* GFX6 */
354 switch (tex->last_msaa_resolve_target_micro_mode) {
355 case RADEON_MICRO_MODE_DISPLAY:
356 switch (tex->surface.bpe) {
357 case 1:
358 tex->surface.u.legacy.tiling_index[0] = 10;
359 break;
360 case 2:
361 tex->surface.u.legacy.tiling_index[0] = 11;
362 break;
363 default: /* 4, 8 */
364 tex->surface.u.legacy.tiling_index[0] = 12;
365 break;
366 }
367 break;
368 case RADEON_MICRO_MODE_THIN:
369 switch (tex->surface.bpe) {
370 case 1:
371 tex->surface.u.legacy.tiling_index[0] = 14;
372 break;
373 case 2:
374 tex->surface.u.legacy.tiling_index[0] = 15;
375 break;
376 case 4:
377 tex->surface.u.legacy.tiling_index[0] = 16;
378 break;
379 default: /* 8, 16 */
380 tex->surface.u.legacy.tiling_index[0] = 17;
381 break;
382 }
383 break;
384 default: /* depth, thick */
385 assert(!"unexpected micro mode");
386 return;
387 }
388 }
389
390 tex->surface.micro_tile_mode = tex->last_msaa_resolve_target_micro_mode;
391
392 p_atomic_inc(&sscreen->dirty_tex_counter);
393 }
394
395 static void si_do_fast_color_clear(struct si_context *sctx,
396 unsigned *buffers,
397 const union pipe_color_union *color)
398 {
399 struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
400 int i;
401
402 /* This function is broken in BE, so just disable this path for now */
403 #if UTIL_ARCH_BIG_ENDIAN
404 return;
405 #endif
406
407 if (sctx->render_cond)
408 return;
409
410 for (i = 0; i < fb->nr_cbufs; i++) {
411 struct si_texture *tex;
412 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
413
414 if (!fb->cbufs[i])
415 continue;
416
417 /* if this colorbuffer is not being cleared */
418 if (!(*buffers & clear_bit))
419 continue;
420
421 unsigned level = fb->cbufs[i]->u.tex.level;
422 if (level > 0)
423 continue;
424
425 tex = (struct si_texture *)fb->cbufs[i]->texture;
426
427 /* TODO: GFX9: Implement DCC fast clear for level 0 of
428 * mipmapped textures. Mipmapped DCC has to clear a rectangular
429 * area of DCC for level 0 (because the whole miptree is
430 * organized in a 2D plane).
431 */
432 if (sctx->chip_class >= GFX9 &&
433 tex->buffer.b.b.last_level > 0)
434 continue;
435
436 /* the clear is allowed if all layers are bound */
437 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
438 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->buffer.b.b, 0)) {
439 continue;
440 }
441
442 /* only supported on tiled surfaces */
443 if (tex->surface.is_linear) {
444 continue;
445 }
446
447 /* shared textures can't use fast clear without an explicit flush,
448 * because there is no way to communicate the clear color among
449 * all clients
450 */
451 if (tex->buffer.b.is_shared &&
452 !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
453 continue;
454
455 if (sctx->chip_class <= GFX8 &&
456 tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
457 !sctx->screen->info.htile_cmask_support_1d_tiling)
458 continue;
459
460 /* Use a slow clear for small surfaces where the cost of
461 * the eliminate pass can be higher than the benefit of fast
462 * clear. The closed driver does this, but the numbers may differ.
463 *
464 * This helps on both dGPUs and APUs, even small APUs like Mullins.
465 */
466 bool too_small = tex->buffer.b.b.nr_samples <= 1 &&
467 tex->buffer.b.b.width0 *
468 tex->buffer.b.b.height0 <= 512 * 512;
469 bool eliminate_needed = false;
470 bool fmask_decompress_needed = false;
471
472 /* Fast clear is the most appropriate place to enable DCC for
473 * displayable surfaces.
474 */
475 if (sctx->family == CHIP_STONEY && !too_small) {
476 vi_separate_dcc_try_enable(sctx, tex);
477
478 /* RB+ isn't supported with a CMASK clear only on Stoney,
479 * so all clears are considered to be hypothetically slow
480 * clears, which is weighed when determining whether to
481 * enable separate DCC.
482 */
483 if (tex->dcc_gather_statistics) /* only for Stoney */
484 tex->num_slow_clears++;
485 }
486
487 /* Try to clear DCC first, otherwise try CMASK. */
488 if (vi_dcc_enabled(tex, 0)) {
489 uint32_t reset_value;
490
491 if (sctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
492 continue;
493
494 if (!vi_get_fast_clear_parameters(sctx->screen,
495 tex->buffer.b.b.format,
496 fb->cbufs[i]->format,
497 color, &reset_value,
498 &eliminate_needed))
499 continue;
500
501 if (eliminate_needed && too_small)
502 continue;
503
504 /* TODO: This DCC+CMASK clear doesn't work with MSAA. */
505 if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer &&
506 eliminate_needed)
507 continue;
508
509 if (!vi_dcc_clear_level(sctx, tex, 0, reset_value))
510 continue;
511
512 tex->separate_dcc_dirty = true;
513
514 /* DCC fast clear with MSAA should clear CMASK to 0xC. */
515 if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer) {
516 uint32_t clear_value = 0xCCCCCCCC;
517 si_clear_buffer(sctx, &tex->cmask_buffer->b.b,
518 tex->surface.cmask_offset, tex->surface.cmask_size,
519 &clear_value, 4, SI_COHERENCY_CB_META, false);
520 fmask_decompress_needed = true;
521 }
522 } else {
523 if (too_small)
524 continue;
525
526 /* 128-bit formats are unusupported */
527 if (tex->surface.bpe > 8) {
528 continue;
529 }
530
531 /* RB+ doesn't work with CMASK fast clear on Stoney. */
532 if (sctx->family == CHIP_STONEY)
533 continue;
534
535 /* ensure CMASK is enabled */
536 si_alloc_separate_cmask(sctx->screen, tex);
537 if (!tex->cmask_buffer)
538 continue;
539
540 /* Do the fast clear. */
541 uint32_t clear_value = 0;
542 si_clear_buffer(sctx, &tex->cmask_buffer->b.b,
543 tex->surface.cmask_offset, tex->surface.cmask_size,
544 &clear_value, 4, SI_COHERENCY_CB_META, false);
545 eliminate_needed = true;
546 }
547
548 if ((eliminate_needed || fmask_decompress_needed) &&
549 !(tex->dirty_level_mask & (1 << level))) {
550 tex->dirty_level_mask |= 1 << level;
551 p_atomic_inc(&sctx->screen->compressed_colortex_counter);
552 }
553
554 /* We can change the micro tile mode before a full clear. */
555 si_set_optimal_micro_tile_mode(sctx->screen, tex);
556
557 *buffers &= ~clear_bit;
558
559 /* Chips with DCC constant encoding don't need to set the clear
560 * color registers for DCC clear values 0 and 1.
561 */
562 if (sctx->screen->info.has_dcc_constant_encode && !eliminate_needed)
563 continue;
564
565 if (si_set_clear_color(tex, fb->cbufs[i]->format, color)) {
566 sctx->framebuffer.dirty_cbufs |= 1 << i;
567 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
568 }
569 }
570 }
571
572 static void si_clear(struct pipe_context *ctx, unsigned buffers,
573 const union pipe_color_union *color,
574 double depth, unsigned stencil)
575 {
576 struct si_context *sctx = (struct si_context *)ctx;
577 struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
578 struct pipe_surface *zsbuf = fb->zsbuf;
579 struct si_texture *zstex =
580 zsbuf ? (struct si_texture*)zsbuf->texture : NULL;
581
582 if (buffers & PIPE_CLEAR_COLOR) {
583 si_do_fast_color_clear(sctx, &buffers, color);
584 if (!buffers)
585 return; /* all buffers have been fast cleared */
586
587 /* These buffers cannot use fast clear, make sure to disable expansion. */
588 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
589 struct si_texture *tex;
590
591 /* If not clearing this buffer, skip. */
592 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
593 continue;
594
595 tex = (struct si_texture *)fb->cbufs[i]->texture;
596 if (tex->surface.fmask_size == 0)
597 tex->dirty_level_mask &= ~(1 << fb->cbufs[i]->u.tex.level);
598 }
599 }
600
601 if (zstex &&
602 zsbuf->u.tex.first_layer == 0 &&
603 zsbuf->u.tex.last_layer == util_max_layer(&zstex->buffer.b.b, 0)) {
604 /* TC-compatible HTILE only supports depth clears to 0 or 1. */
605 if (buffers & PIPE_CLEAR_DEPTH &&
606 si_htile_enabled(zstex, zsbuf->u.tex.level, PIPE_MASK_Z) &&
607 (!zstex->tc_compatible_htile ||
608 depth == 0 || depth == 1)) {
609 /* Need to disable EXPCLEAR temporarily if clearing
610 * to a new value. */
611 if (!zstex->depth_cleared || zstex->depth_clear_value != depth) {
612 sctx->db_depth_disable_expclear = true;
613 }
614
615 if (zstex->depth_clear_value != (float)depth) {
616 /* Update DB_DEPTH_CLEAR. */
617 zstex->depth_clear_value = depth;
618 sctx->framebuffer.dirty_zsbuf = true;
619 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
620 }
621 sctx->db_depth_clear = true;
622 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
623 }
624
625 /* TC-compatible HTILE only supports stencil clears to 0. */
626 if (buffers & PIPE_CLEAR_STENCIL &&
627 si_htile_enabled(zstex, zsbuf->u.tex.level, PIPE_MASK_S) &&
628 (!zstex->tc_compatible_htile || stencil == 0)) {
629 stencil &= 0xff;
630
631 /* Need to disable EXPCLEAR temporarily if clearing
632 * to a new value. */
633 if (!zstex->stencil_cleared || zstex->stencil_clear_value != stencil) {
634 sctx->db_stencil_disable_expclear = true;
635 }
636
637 if (zstex->stencil_clear_value != (uint8_t)stencil) {
638 /* Update DB_STENCIL_CLEAR. */
639 zstex->stencil_clear_value = stencil;
640 sctx->framebuffer.dirty_zsbuf = true;
641 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
642 }
643 sctx->db_stencil_clear = true;
644 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
645 }
646
647 /* TODO: Find out what's wrong here. Fast depth clear leads to
648 * corruption in ARK: Survival Evolved, but that may just be
649 * a coincidence and the root cause is elsewhere.
650 *
651 * The corruption can be fixed by putting the DB flush before
652 * or after the depth clear. (surprisingly)
653 *
654 * https://bugs.freedesktop.org/show_bug.cgi?id=102955 (apitrace)
655 *
656 * This hack decreases back-to-back ClearDepth performance.
657 */
658 if ((sctx->db_depth_clear || sctx->db_stencil_clear) &&
659 sctx->screen->options.clear_db_cache_before_clear)
660 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB;
661 }
662
663 si_blitter_begin(sctx, SI_CLEAR);
664 util_blitter_clear(sctx->blitter, fb->width, fb->height,
665 util_framebuffer_get_num_layers(fb),
666 buffers, color, depth, stencil,
667 sctx->framebuffer.nr_samples > 1);
668 si_blitter_end(sctx);
669
670 if (sctx->db_depth_clear) {
671 sctx->db_depth_clear = false;
672 sctx->db_depth_disable_expclear = false;
673 zstex->depth_cleared = true;
674 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
675 }
676
677 if (sctx->db_stencil_clear) {
678 sctx->db_stencil_clear = false;
679 sctx->db_stencil_disable_expclear = false;
680 zstex->stencil_cleared = true;
681 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
682 }
683 }
684
685 static void si_clear_render_target(struct pipe_context *ctx,
686 struct pipe_surface *dst,
687 const union pipe_color_union *color,
688 unsigned dstx, unsigned dsty,
689 unsigned width, unsigned height,
690 bool render_condition_enabled)
691 {
692 struct si_context *sctx = (struct si_context *)ctx;
693 struct si_texture *sdst = (struct si_texture*)dst->texture;
694
695 if (dst->texture->nr_samples <= 1 && !sdst->surface.dcc_offset) {
696 si_compute_clear_render_target(ctx, dst, color, dstx, dsty, width,
697 height, render_condition_enabled);
698 return;
699 }
700
701 si_blitter_begin(sctx, SI_CLEAR_SURFACE |
702 (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
703 util_blitter_clear_render_target(sctx->blitter, dst, color,
704 dstx, dsty, width, height);
705 si_blitter_end(sctx);
706 }
707
708 static void si_clear_depth_stencil(struct pipe_context *ctx,
709 struct pipe_surface *dst,
710 unsigned clear_flags,
711 double depth,
712 unsigned stencil,
713 unsigned dstx, unsigned dsty,
714 unsigned width, unsigned height,
715 bool render_condition_enabled)
716 {
717 struct si_context *sctx = (struct si_context *)ctx;
718
719 si_blitter_begin(sctx, SI_CLEAR_SURFACE |
720 (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
721 util_blitter_clear_depth_stencil(sctx->blitter, dst, clear_flags, depth, stencil,
722 dstx, dsty, width, height);
723 si_blitter_end(sctx);
724 }
725
726 static void si_clear_texture(struct pipe_context *pipe,
727 struct pipe_resource *tex,
728 unsigned level,
729 const struct pipe_box *box,
730 const void *data)
731 {
732 struct pipe_screen *screen = pipe->screen;
733 struct si_texture *stex = (struct si_texture*)tex;
734 struct pipe_surface tmpl = {{0}};
735 struct pipe_surface *sf;
736 const struct util_format_description *desc =
737 util_format_description(tex->format);
738
739 tmpl.format = tex->format;
740 tmpl.u.tex.first_layer = box->z;
741 tmpl.u.tex.last_layer = box->z + box->depth - 1;
742 tmpl.u.tex.level = level;
743 sf = pipe->create_surface(pipe, tex, &tmpl);
744 if (!sf)
745 return;
746
747 if (stex->is_depth) {
748 unsigned clear;
749 float depth;
750 uint8_t stencil = 0;
751
752 /* Depth is always present. */
753 clear = PIPE_CLEAR_DEPTH;
754 desc->unpack_z_float(&depth, 0, data, 0, 1, 1);
755
756 if (stex->surface.has_stencil) {
757 clear |= PIPE_CLEAR_STENCIL;
758 desc->unpack_s_8uint(&stencil, 0, data, 0, 1, 1);
759 }
760
761 si_clear_depth_stencil(pipe, sf, clear, depth, stencil,
762 box->x, box->y,
763 box->width, box->height, false);
764 } else {
765 union pipe_color_union color;
766
767 /* pipe_color_union requires the full vec4 representation. */
768 if (util_format_is_pure_uint(tex->format))
769 desc->unpack_rgba_uint(color.ui, 0, data, 0, 1, 1);
770 else if (util_format_is_pure_sint(tex->format))
771 desc->unpack_rgba_sint(color.i, 0, data, 0, 1, 1);
772 else
773 desc->unpack_rgba_float(color.f, 0, data, 0, 1, 1);
774
775 if (screen->is_format_supported(screen, tex->format,
776 tex->target, 0, 0,
777 PIPE_BIND_RENDER_TARGET)) {
778 si_clear_render_target(pipe, sf, &color,
779 box->x, box->y,
780 box->width, box->height, false);
781 } else {
782 /* Software fallback - just for R9G9B9E5_FLOAT */
783 util_clear_render_target(pipe, sf, &color,
784 box->x, box->y,
785 box->width, box->height);
786 }
787 }
788 pipe_surface_reference(&sf, NULL);
789 }
790
791 void si_init_clear_functions(struct si_context *sctx)
792 {
793 sctx->b.clear_render_target = si_clear_render_target;
794 sctx->b.clear_texture = si_clear_texture;
795
796 if (sctx->has_graphics) {
797 sctx->b.clear = si_clear;
798 sctx->b.clear_depth_stencil = si_clear_depth_stencil;
799 }
800 }