radeonsi: factor out metadata import
[mesa.git] / src / gallium / drivers / radeon / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include "r600_pipe_common.h"
28 #include "r600_cs.h"
29 #include "r600_query.h"
30 #include "util/u_format.h"
31 #include "util/u_memory.h"
32 #include "util/u_pack_color.h"
33 #include "util/u_surface.h"
34 #include "os/os_time.h"
35 #include <errno.h>
36 #include <inttypes.h>
37
38 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
39 struct r600_texture *rtex);
40 static enum radeon_surf_mode
41 r600_choose_tiling(struct r600_common_screen *rscreen,
42 const struct pipe_resource *templ);
43
44
45 bool r600_prepare_for_dma_blit(struct r600_common_context *rctx,
46 struct r600_texture *rdst,
47 unsigned dst_level, unsigned dstx,
48 unsigned dsty, unsigned dstz,
49 struct r600_texture *rsrc,
50 unsigned src_level,
51 const struct pipe_box *src_box)
52 {
53 if (!rctx->dma.cs)
54 return false;
55
56 if (rdst->surface.bpe != rsrc->surface.bpe)
57 return false;
58
59 /* MSAA: Blits don't exist in the real world. */
60 if (rsrc->resource.b.b.nr_samples > 1 ||
61 rdst->resource.b.b.nr_samples > 1)
62 return false;
63
64 /* Depth-stencil surfaces:
65 * When dst is linear, the DB->CB copy preserves HTILE.
66 * When dst is tiled, the 3D path must be used to update HTILE.
67 */
68 if (rsrc->is_depth || rdst->is_depth)
69 return false;
70
71 /* DCC as:
72 * src: Use the 3D path. DCC decompression is expensive.
73 * dst: Use the 3D path to compress the pixels with DCC.
74 */
75 if (vi_dcc_enabled(rsrc, src_level) ||
76 vi_dcc_enabled(rdst, dst_level))
77 return false;
78
79 /* CMASK as:
80 * src: Both texture and SDMA paths need decompression. Use SDMA.
81 * dst: If overwriting the whole texture, discard CMASK and use
82 * SDMA. Otherwise, use the 3D path.
83 */
84 if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
85 /* The CMASK clear is only enabled for the first level. */
86 assert(dst_level == 0);
87 if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
88 dstx, dsty, dstz, src_box->width,
89 src_box->height, src_box->depth))
90 return false;
91
92 r600_texture_discard_cmask(rctx->screen, rdst);
93 }
94
95 /* All requirements are met. Prepare textures for SDMA. */
96 if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
97 rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b);
98
99 assert(!(rsrc->dirty_level_mask & (1 << src_level)));
100 assert(!(rdst->dirty_level_mask & (1 << dst_level)));
101
102 return true;
103 }
104
105 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
106 static void r600_copy_region_with_blit(struct pipe_context *pipe,
107 struct pipe_resource *dst,
108 unsigned dst_level,
109 unsigned dstx, unsigned dsty, unsigned dstz,
110 struct pipe_resource *src,
111 unsigned src_level,
112 const struct pipe_box *src_box)
113 {
114 struct pipe_blit_info blit;
115
116 memset(&blit, 0, sizeof(blit));
117 blit.src.resource = src;
118 blit.src.format = src->format;
119 blit.src.level = src_level;
120 blit.src.box = *src_box;
121 blit.dst.resource = dst;
122 blit.dst.format = dst->format;
123 blit.dst.level = dst_level;
124 blit.dst.box.x = dstx;
125 blit.dst.box.y = dsty;
126 blit.dst.box.z = dstz;
127 blit.dst.box.width = src_box->width;
128 blit.dst.box.height = src_box->height;
129 blit.dst.box.depth = src_box->depth;
130 blit.mask = util_format_get_mask(src->format) &
131 util_format_get_mask(dst->format);
132 blit.filter = PIPE_TEX_FILTER_NEAREST;
133
134 if (blit.mask) {
135 pipe->blit(pipe, &blit);
136 }
137 }
138
139 /* Copy from a full GPU texture to a transfer's staging one. */
140 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
141 {
142 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
143 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
144 struct pipe_resource *dst = &rtransfer->staging->b.b;
145 struct pipe_resource *src = transfer->resource;
146
147 if (src->nr_samples > 1) {
148 r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
149 src, transfer->level, &transfer->box);
150 return;
151 }
152
153 rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
154 &transfer->box);
155 }
156
157 /* Copy from a transfer's staging texture to a full GPU one. */
158 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
159 {
160 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
161 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
162 struct pipe_resource *dst = transfer->resource;
163 struct pipe_resource *src = &rtransfer->staging->b.b;
164 struct pipe_box sbox;
165
166 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
167
168 if (dst->nr_samples > 1) {
169 r600_copy_region_with_blit(ctx, dst, transfer->level,
170 transfer->box.x, transfer->box.y, transfer->box.z,
171 src, 0, &sbox);
172 return;
173 }
174
175 rctx->dma_copy(ctx, dst, transfer->level,
176 transfer->box.x, transfer->box.y, transfer->box.z,
177 src, 0, &sbox);
178 }
179
180 static unsigned r600_texture_get_offset(struct r600_common_screen *rscreen,
181 struct r600_texture *rtex, unsigned level,
182 const struct pipe_box *box,
183 unsigned *stride,
184 unsigned *layer_stride)
185 {
186 if (rscreen->chip_class >= GFX9) {
187 *stride = rtex->surface.u.gfx9.surf_pitch * rtex->surface.bpe;
188 *layer_stride = rtex->surface.u.gfx9.surf_slice_size;
189
190 if (!box)
191 return 0;
192
193 /* Each texture is an array of slices. Each slice is an array
194 * of mipmap levels. */
195 return box->z * rtex->surface.u.gfx9.surf_slice_size +
196 rtex->surface.u.gfx9.offset[level] +
197 (box->y / rtex->surface.blk_h *
198 rtex->surface.u.gfx9.surf_pitch +
199 box->x / rtex->surface.blk_w) * rtex->surface.bpe;
200 } else {
201 *stride = rtex->surface.u.legacy.level[level].nblk_x *
202 rtex->surface.bpe;
203 *layer_stride = rtex->surface.u.legacy.level[level].slice_size;
204
205 if (!box)
206 return rtex->surface.u.legacy.level[level].offset;
207
208 /* Each texture is an array of mipmap levels. Each level is
209 * an array of slices. */
210 return rtex->surface.u.legacy.level[level].offset +
211 box->z * rtex->surface.u.legacy.level[level].slice_size +
212 (box->y / rtex->surface.blk_h *
213 rtex->surface.u.legacy.level[level].nblk_x +
214 box->x / rtex->surface.blk_w) * rtex->surface.bpe;
215 }
216 }
217
218 static int r600_init_surface(struct r600_common_screen *rscreen,
219 struct radeon_surf *surface,
220 const struct pipe_resource *ptex,
221 enum radeon_surf_mode array_mode,
222 unsigned pitch_in_bytes_override,
223 unsigned offset,
224 bool is_imported,
225 bool is_scanout,
226 bool is_flushed_depth,
227 bool tc_compatible_htile)
228 {
229 const struct util_format_description *desc =
230 util_format_description(ptex->format);
231 bool is_depth, is_stencil;
232 int r;
233 unsigned i, bpe, flags = 0;
234
235 is_depth = util_format_has_depth(desc);
236 is_stencil = util_format_has_stencil(desc);
237
238 if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&
239 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
240 bpe = 4; /* stencil is allocated separately on evergreen */
241 } else {
242 bpe = util_format_get_blocksize(ptex->format);
243 assert(util_is_power_of_two(bpe));
244 }
245
246 if (!is_flushed_depth && is_depth) {
247 flags |= RADEON_SURF_ZBUFFER;
248
249 if (tc_compatible_htile &&
250 (rscreen->chip_class >= GFX9 ||
251 array_mode == RADEON_SURF_MODE_2D)) {
252 /* TC-compatible HTILE only supports Z32_FLOAT.
253 * GFX9 also supports Z16_UNORM.
254 * On VI, promote Z16 to Z32. DB->CB copies will convert
255 * the format for transfers.
256 */
257 if (rscreen->chip_class == VI)
258 bpe = 4;
259
260 flags |= RADEON_SURF_TC_COMPATIBLE_HTILE;
261 }
262
263 if (is_stencil)
264 flags |= RADEON_SURF_SBUFFER;
265 }
266
267 if (rscreen->chip_class >= VI &&
268 (ptex->flags & R600_RESOURCE_FLAG_DISABLE_DCC ||
269 ptex->format == PIPE_FORMAT_R9G9B9E5_FLOAT))
270 flags |= RADEON_SURF_DISABLE_DCC;
271
272 if (ptex->bind & PIPE_BIND_SCANOUT || is_scanout) {
273 /* This should catch bugs in gallium users setting incorrect flags. */
274 assert(ptex->nr_samples <= 1 &&
275 ptex->array_size == 1 &&
276 ptex->depth0 == 1 &&
277 ptex->last_level == 0 &&
278 !(flags & RADEON_SURF_Z_OR_SBUFFER));
279
280 flags |= RADEON_SURF_SCANOUT;
281 }
282
283 if (ptex->bind & PIPE_BIND_SHARED)
284 flags |= RADEON_SURF_SHAREABLE;
285 if (is_imported)
286 flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;
287 if (!(ptex->flags & R600_RESOURCE_FLAG_FORCE_TILING))
288 flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
289
290 r = rscreen->ws->surface_init(rscreen->ws, ptex, flags, bpe,
291 array_mode, surface);
292 if (r) {
293 return r;
294 }
295
296 if (rscreen->chip_class >= GFX9) {
297 assert(!pitch_in_bytes_override ||
298 pitch_in_bytes_override == surface->u.gfx9.surf_pitch * bpe);
299 surface->u.gfx9.surf_offset = offset;
300 } else {
301 if (pitch_in_bytes_override &&
302 pitch_in_bytes_override != surface->u.legacy.level[0].nblk_x * bpe) {
303 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
304 * for those
305 */
306 surface->u.legacy.level[0].nblk_x = pitch_in_bytes_override / bpe;
307 surface->u.legacy.level[0].slice_size = pitch_in_bytes_override *
308 surface->u.legacy.level[0].nblk_y;
309 }
310
311 if (offset) {
312 for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i)
313 surface->u.legacy.level[i].offset += offset;
314 }
315 }
316 return 0;
317 }
318
319 static void r600_texture_init_metadata(struct r600_common_screen *rscreen,
320 struct r600_texture *rtex,
321 struct radeon_bo_metadata *metadata)
322 {
323 struct radeon_surf *surface = &rtex->surface;
324
325 memset(metadata, 0, sizeof(*metadata));
326
327 if (rscreen->chip_class >= GFX9) {
328 metadata->u.gfx9.swizzle_mode = surface->u.gfx9.surf.swizzle_mode;
329 } else {
330 metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
331 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
332 metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
333 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
334 metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
335 metadata->u.legacy.bankw = surface->u.legacy.bankw;
336 metadata->u.legacy.bankh = surface->u.legacy.bankh;
337 metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
338 metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
339 metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
340 metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
341 metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
342 }
343 }
344
345 static void r600_surface_import_metadata(struct r600_common_screen *rscreen,
346 struct radeon_surf *surf,
347 struct radeon_bo_metadata *metadata,
348 enum radeon_surf_mode *array_mode,
349 bool *is_scanout)
350 {
351 if (rscreen->chip_class >= GFX9) {
352 if (metadata->u.gfx9.swizzle_mode > 0)
353 *array_mode = RADEON_SURF_MODE_2D;
354 else
355 *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
356
357 *is_scanout = metadata->u.gfx9.swizzle_mode == 0 ||
358 metadata->u.gfx9.swizzle_mode % 4 == 2;
359 } else {
360 surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config;
361 surf->u.legacy.bankw = metadata->u.legacy.bankw;
362 surf->u.legacy.bankh = metadata->u.legacy.bankh;
363 surf->u.legacy.tile_split = metadata->u.legacy.tile_split;
364 surf->u.legacy.mtilea = metadata->u.legacy.mtilea;
365 surf->u.legacy.num_banks = metadata->u.legacy.num_banks;
366
367 if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED)
368 *array_mode = RADEON_SURF_MODE_2D;
369 else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED)
370 *array_mode = RADEON_SURF_MODE_1D;
371 else
372 *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
373
374 *is_scanout = metadata->u.legacy.scanout;
375 }
376 }
377
378 static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
379 struct r600_texture *rtex)
380 {
381 struct r600_common_screen *rscreen = rctx->screen;
382 struct pipe_context *ctx = &rctx->b;
383
384 if (ctx == rscreen->aux_context)
385 mtx_lock(&rscreen->aux_context_lock);
386
387 ctx->flush_resource(ctx, &rtex->resource.b.b);
388 ctx->flush(ctx, NULL, 0);
389
390 if (ctx == rscreen->aux_context)
391 mtx_unlock(&rscreen->aux_context_lock);
392 }
393
394 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
395 struct r600_texture *rtex)
396 {
397 if (!rtex->cmask.size)
398 return;
399
400 assert(rtex->resource.b.b.nr_samples <= 1);
401
402 /* Disable CMASK. */
403 memset(&rtex->cmask, 0, sizeof(rtex->cmask));
404 rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
405 rtex->dirty_level_mask = 0;
406
407 if (rscreen->chip_class >= SI)
408 rtex->cb_color_info &= ~SI_S_028C70_FAST_CLEAR(1);
409 else
410 rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);
411
412 if (rtex->cmask_buffer != &rtex->resource)
413 r600_resource_reference(&rtex->cmask_buffer, NULL);
414
415 /* Notify all contexts about the change. */
416 p_atomic_inc(&rscreen->dirty_tex_counter);
417 p_atomic_inc(&rscreen->compressed_colortex_counter);
418 }
419
420 static bool r600_can_disable_dcc(struct r600_texture *rtex)
421 {
422 /* We can't disable DCC if it can be written by another process. */
423 return rtex->dcc_offset &&
424 (!rtex->resource.b.is_shared ||
425 !(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
426 }
427
428 static bool r600_texture_discard_dcc(struct r600_common_screen *rscreen,
429 struct r600_texture *rtex)
430 {
431 if (!r600_can_disable_dcc(rtex))
432 return false;
433
434 assert(rtex->dcc_separate_buffer == NULL);
435
436 /* Disable DCC. */
437 rtex->dcc_offset = 0;
438
439 /* Notify all contexts about the change. */
440 p_atomic_inc(&rscreen->dirty_tex_counter);
441 return true;
442 }
443
444 /**
445 * Disable DCC for the texture. (first decompress, then discard metadata).
446 *
447 * There is unresolved multi-context synchronization issue between
448 * screen::aux_context and the current context. If applications do this with
449 * multiple contexts, it's already undefined behavior for them and we don't
450 * have to worry about that. The scenario is:
451 *
452 * If context 1 disables DCC and context 2 has queued commands that write
453 * to the texture via CB with DCC enabled, and the order of operations is
454 * as follows:
455 * context 2 queues draw calls rendering to the texture, but doesn't flush
456 * context 1 disables DCC and flushes
457 * context 1 & 2 reset descriptors and FB state
458 * context 2 flushes (new compressed tiles written by the draw calls)
459 * context 1 & 2 read garbage, because DCC is disabled, yet there are
460 * compressed tiled
461 *
462 * \param rctx the current context if you have one, or rscreen->aux_context
463 * if you don't.
464 */
465 bool r600_texture_disable_dcc(struct r600_common_context *rctx,
466 struct r600_texture *rtex)
467 {
468 struct r600_common_screen *rscreen = rctx->screen;
469
470 if (!r600_can_disable_dcc(rtex))
471 return false;
472
473 if (&rctx->b == rscreen->aux_context)
474 mtx_lock(&rscreen->aux_context_lock);
475
476 /* Decompress DCC. */
477 rctx->decompress_dcc(&rctx->b, rtex);
478 rctx->b.flush(&rctx->b, NULL, 0);
479
480 if (&rctx->b == rscreen->aux_context)
481 mtx_unlock(&rscreen->aux_context_lock);
482
483 return r600_texture_discard_dcc(rscreen, rtex);
484 }
485
486 static void r600_reallocate_texture_inplace(struct r600_common_context *rctx,
487 struct r600_texture *rtex,
488 unsigned new_bind_flag,
489 bool invalidate_storage)
490 {
491 struct pipe_screen *screen = rctx->b.screen;
492 struct r600_texture *new_tex;
493 struct pipe_resource templ = rtex->resource.b.b;
494 unsigned i;
495
496 templ.bind |= new_bind_flag;
497
498 /* r600g doesn't react to dirty_tex_descriptor_counter */
499 if (rctx->chip_class < SI)
500 return;
501
502 if (rtex->resource.b.is_shared)
503 return;
504
505 if (new_bind_flag == PIPE_BIND_LINEAR) {
506 if (rtex->surface.is_linear)
507 return;
508
509 /* This fails with MSAA, depth, and compressed textures. */
510 if (r600_choose_tiling(rctx->screen, &templ) !=
511 RADEON_SURF_MODE_LINEAR_ALIGNED)
512 return;
513 }
514
515 new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
516 if (!new_tex)
517 return;
518
519 /* Copy the pixels to the new texture. */
520 if (!invalidate_storage) {
521 for (i = 0; i <= templ.last_level; i++) {
522 struct pipe_box box;
523
524 u_box_3d(0, 0, 0,
525 u_minify(templ.width0, i), u_minify(templ.height0, i),
526 util_max_layer(&templ, i) + 1, &box);
527
528 rctx->dma_copy(&rctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
529 &rtex->resource.b.b, i, &box);
530 }
531 }
532
533 if (new_bind_flag == PIPE_BIND_LINEAR) {
534 r600_texture_discard_cmask(rctx->screen, rtex);
535 r600_texture_discard_dcc(rctx->screen, rtex);
536 }
537
538 /* Replace the structure fields of rtex. */
539 rtex->resource.b.b.bind = templ.bind;
540 pb_reference(&rtex->resource.buf, new_tex->resource.buf);
541 rtex->resource.gpu_address = new_tex->resource.gpu_address;
542 rtex->resource.vram_usage = new_tex->resource.vram_usage;
543 rtex->resource.gart_usage = new_tex->resource.gart_usage;
544 rtex->resource.bo_size = new_tex->resource.bo_size;
545 rtex->resource.bo_alignment = new_tex->resource.bo_alignment;
546 rtex->resource.domains = new_tex->resource.domains;
547 rtex->resource.flags = new_tex->resource.flags;
548 rtex->size = new_tex->size;
549 rtex->db_render_format = new_tex->db_render_format;
550 rtex->db_compatible = new_tex->db_compatible;
551 rtex->can_sample_z = new_tex->can_sample_z;
552 rtex->can_sample_s = new_tex->can_sample_s;
553 rtex->surface = new_tex->surface;
554 rtex->fmask = new_tex->fmask;
555 rtex->cmask = new_tex->cmask;
556 rtex->cb_color_info = new_tex->cb_color_info;
557 rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
558 rtex->htile_offset = new_tex->htile_offset;
559 rtex->tc_compatible_htile = new_tex->tc_compatible_htile;
560 rtex->depth_cleared = new_tex->depth_cleared;
561 rtex->stencil_cleared = new_tex->stencil_cleared;
562 rtex->non_disp_tiling = new_tex->non_disp_tiling;
563 rtex->dcc_gather_statistics = new_tex->dcc_gather_statistics;
564 rtex->framebuffers_bound = new_tex->framebuffers_bound;
565
566 if (new_bind_flag == PIPE_BIND_LINEAR) {
567 assert(!rtex->htile_offset);
568 assert(!rtex->cmask.size);
569 assert(!rtex->fmask.size);
570 assert(!rtex->dcc_offset);
571 assert(!rtex->is_depth);
572 }
573
574 r600_texture_reference(&new_tex, NULL);
575
576 p_atomic_inc(&rctx->screen->dirty_tex_counter);
577 }
578
579 static boolean r600_texture_get_handle(struct pipe_screen* screen,
580 struct pipe_context *ctx,
581 struct pipe_resource *resource,
582 struct winsys_handle *whandle,
583 unsigned usage)
584 {
585 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
586 struct r600_common_context *rctx;
587 struct r600_resource *res = (struct r600_resource*)resource;
588 struct r600_texture *rtex = (struct r600_texture*)resource;
589 struct radeon_bo_metadata metadata;
590 bool update_metadata = false;
591 unsigned stride, offset, slice_size;
592
593 ctx = threaded_context_unwrap_sync(ctx);
594 rctx = (struct r600_common_context*)(ctx ? ctx : rscreen->aux_context);
595
596 if (resource->target != PIPE_BUFFER) {
597 /* This is not supported now, but it might be required for OpenCL
598 * interop in the future.
599 */
600 if (resource->nr_samples > 1 || rtex->is_depth)
601 return false;
602
603 /* Move a suballocated texture into a non-suballocated allocation. */
604 if (rscreen->ws->buffer_is_suballocated(res->buf) ||
605 rtex->surface.tile_swizzle) {
606 assert(!res->b.is_shared);
607 r600_reallocate_texture_inplace(rctx, rtex,
608 PIPE_BIND_SHARED, false);
609 rctx->b.flush(&rctx->b, NULL, 0);
610 assert(res->b.b.bind & PIPE_BIND_SHARED);
611 assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
612 assert(rtex->surface.tile_swizzle == 0);
613 }
614
615 /* Since shader image stores don't support DCC on VI,
616 * disable it for external clients that want write
617 * access.
618 */
619 if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) {
620 if (r600_texture_disable_dcc(rctx, rtex))
621 update_metadata = true;
622 }
623
624 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
625 (rtex->cmask.size || rtex->dcc_offset)) {
626 /* Eliminate fast clear (both CMASK and DCC) */
627 r600_eliminate_fast_color_clear(rctx, rtex);
628
629 /* Disable CMASK if flush_resource isn't going
630 * to be called.
631 */
632 if (rtex->cmask.size)
633 r600_texture_discard_cmask(rscreen, rtex);
634 }
635
636 /* Set metadata. */
637 if (!res->b.is_shared || update_metadata) {
638 r600_texture_init_metadata(rscreen, rtex, &metadata);
639 if (rscreen->query_opaque_metadata)
640 rscreen->query_opaque_metadata(rscreen, rtex,
641 &metadata);
642
643 rscreen->ws->buffer_set_metadata(res->buf, &metadata);
644 }
645
646 if (rscreen->chip_class >= GFX9) {
647 offset = rtex->surface.u.gfx9.surf_offset;
648 stride = rtex->surface.u.gfx9.surf_pitch *
649 rtex->surface.bpe;
650 slice_size = rtex->surface.u.gfx9.surf_slice_size;
651 } else {
652 offset = rtex->surface.u.legacy.level[0].offset;
653 stride = rtex->surface.u.legacy.level[0].nblk_x *
654 rtex->surface.bpe;
655 slice_size = rtex->surface.u.legacy.level[0].slice_size;
656 }
657 } else {
658 /* Move a suballocated buffer into a non-suballocated allocation. */
659 if (rscreen->ws->buffer_is_suballocated(res->buf)) {
660 assert(!res->b.is_shared);
661
662 /* Allocate a new buffer with PIPE_BIND_SHARED. */
663 struct pipe_resource templ = res->b.b;
664 templ.bind |= PIPE_BIND_SHARED;
665
666 struct pipe_resource *newb =
667 screen->resource_create(screen, &templ);
668 if (!newb)
669 return false;
670
671 /* Copy the old buffer contents to the new one. */
672 struct pipe_box box;
673 u_box_1d(0, newb->width0, &box);
674 rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0,
675 &res->b.b, 0, &box);
676 /* Move the new buffer storage to the old pipe_resource. */
677 r600_replace_buffer_storage(&rctx->b, &res->b.b, newb);
678 pipe_resource_reference(&newb, NULL);
679
680 assert(res->b.b.bind & PIPE_BIND_SHARED);
681 assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
682 }
683
684 /* Buffers */
685 offset = 0;
686 stride = 0;
687 slice_size = 0;
688 }
689
690 if (res->b.is_shared) {
691 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
692 * doesn't set it.
693 */
694 res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
695 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
696 res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
697 } else {
698 res->b.is_shared = true;
699 res->external_usage = usage;
700 }
701
702 return rscreen->ws->buffer_get_handle(res->buf, stride, offset,
703 slice_size, whandle);
704 }
705
706 static void r600_texture_destroy(struct pipe_screen *screen,
707 struct pipe_resource *ptex)
708 {
709 struct r600_texture *rtex = (struct r600_texture*)ptex;
710 struct r600_resource *resource = &rtex->resource;
711
712 r600_texture_reference(&rtex->flushed_depth_texture, NULL);
713
714 if (rtex->cmask_buffer != &rtex->resource) {
715 r600_resource_reference(&rtex->cmask_buffer, NULL);
716 }
717 pb_reference(&resource->buf, NULL);
718 r600_resource_reference(&rtex->dcc_separate_buffer, NULL);
719 r600_resource_reference(&rtex->last_dcc_separate_buffer, NULL);
720 FREE(rtex);
721 }
722
723 static const struct u_resource_vtbl r600_texture_vtbl;
724
725 /* The number of samples can be specified independently of the texture. */
726 void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
727 struct r600_texture *rtex,
728 unsigned nr_samples,
729 struct r600_fmask_info *out)
730 {
731 /* FMASK is allocated like an ordinary texture. */
732 struct pipe_resource templ = rtex->resource.b.b;
733 struct radeon_surf fmask = {};
734 unsigned flags, bpe;
735
736 memset(out, 0, sizeof(*out));
737
738 if (rscreen->chip_class >= GFX9) {
739 out->alignment = rtex->surface.u.gfx9.fmask_alignment;
740 out->size = rtex->surface.u.gfx9.fmask_size;
741 return;
742 }
743
744 templ.nr_samples = 1;
745 flags = rtex->surface.flags | RADEON_SURF_FMASK;
746
747 if (rscreen->chip_class <= CAYMAN) {
748 /* Use the same parameters and tile mode. */
749 fmask.u.legacy.bankw = rtex->surface.u.legacy.bankw;
750 fmask.u.legacy.bankh = rtex->surface.u.legacy.bankh;
751 fmask.u.legacy.mtilea = rtex->surface.u.legacy.mtilea;
752 fmask.u.legacy.tile_split = rtex->surface.u.legacy.tile_split;
753
754 if (nr_samples <= 4)
755 fmask.u.legacy.bankh = 4;
756 }
757
758 switch (nr_samples) {
759 case 2:
760 case 4:
761 bpe = 1;
762 break;
763 case 8:
764 bpe = 4;
765 break;
766 default:
767 R600_ERR("Invalid sample count for FMASK allocation.\n");
768 return;
769 }
770
771 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
772 * This can be fixed by writing a separate FMASK allocator specifically
773 * for R600-R700 asics. */
774 if (rscreen->chip_class <= R700) {
775 bpe *= 2;
776 }
777
778 if (rscreen->ws->surface_init(rscreen->ws, &templ, flags, bpe,
779 RADEON_SURF_MODE_2D, &fmask)) {
780 R600_ERR("Got error in surface_init while allocating FMASK.\n");
781 return;
782 }
783
784 assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
785
786 out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64;
787 if (out->slice_tile_max)
788 out->slice_tile_max -= 1;
789
790 out->tile_mode_index = fmask.u.legacy.tiling_index[0];
791 out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x;
792 out->bank_height = fmask.u.legacy.bankh;
793 out->tile_swizzle = fmask.tile_swizzle;
794 out->alignment = MAX2(256, fmask.surf_alignment);
795 out->size = fmask.surf_size;
796 }
797
798 static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,
799 struct r600_texture *rtex)
800 {
801 r600_texture_get_fmask_info(rscreen, rtex,
802 rtex->resource.b.b.nr_samples, &rtex->fmask);
803
804 rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
805 rtex->size = rtex->fmask.offset + rtex->fmask.size;
806 }
807
808 void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
809 struct r600_texture *rtex,
810 struct r600_cmask_info *out)
811 {
812 unsigned cmask_tile_width = 8;
813 unsigned cmask_tile_height = 8;
814 unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
815 unsigned element_bits = 4;
816 unsigned cmask_cache_bits = 1024;
817 unsigned num_pipes = rscreen->info.num_tile_pipes;
818 unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
819
820 unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
821 unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
822 unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
823 unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
824 unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
825
826 unsigned pitch_elements = align(rtex->resource.b.b.width0, macro_tile_width);
827 unsigned height = align(rtex->resource.b.b.height0, macro_tile_height);
828
829 unsigned base_align = num_pipes * pipe_interleave_bytes;
830 unsigned slice_bytes =
831 ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
832
833 assert(macro_tile_width % 128 == 0);
834 assert(macro_tile_height % 128 == 0);
835
836 out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
837 out->alignment = MAX2(256, base_align);
838 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
839 align(slice_bytes, base_align);
840 }
841
842 static void si_texture_get_cmask_info(struct r600_common_screen *rscreen,
843 struct r600_texture *rtex,
844 struct r600_cmask_info *out)
845 {
846 unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
847 unsigned num_pipes = rscreen->info.num_tile_pipes;
848 unsigned cl_width, cl_height;
849
850 if (rscreen->chip_class >= GFX9) {
851 out->alignment = rtex->surface.u.gfx9.cmask_alignment;
852 out->size = rtex->surface.u.gfx9.cmask_size;
853 return;
854 }
855
856 switch (num_pipes) {
857 case 2:
858 cl_width = 32;
859 cl_height = 16;
860 break;
861 case 4:
862 cl_width = 32;
863 cl_height = 32;
864 break;
865 case 8:
866 cl_width = 64;
867 cl_height = 32;
868 break;
869 case 16: /* Hawaii */
870 cl_width = 64;
871 cl_height = 64;
872 break;
873 default:
874 assert(0);
875 return;
876 }
877
878 unsigned base_align = num_pipes * pipe_interleave_bytes;
879
880 unsigned width = align(rtex->resource.b.b.width0, cl_width*8);
881 unsigned height = align(rtex->resource.b.b.height0, cl_height*8);
882 unsigned slice_elements = (width * height) / (8*8);
883
884 /* Each element of CMASK is a nibble. */
885 unsigned slice_bytes = slice_elements / 2;
886
887 out->slice_tile_max = (width * height) / (128*128);
888 if (out->slice_tile_max)
889 out->slice_tile_max -= 1;
890
891 out->alignment = MAX2(256, base_align);
892 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
893 align(slice_bytes, base_align);
894 }
895
896 static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,
897 struct r600_texture *rtex)
898 {
899 if (rscreen->chip_class >= SI) {
900 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
901 } else {
902 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
903 }
904
905 rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
906 rtex->size = rtex->cmask.offset + rtex->cmask.size;
907
908 if (rscreen->chip_class >= SI)
909 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
910 else
911 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
912 }
913
914 static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
915 struct r600_texture *rtex)
916 {
917 if (rtex->cmask_buffer)
918 return;
919
920 assert(rtex->cmask.size == 0);
921
922 if (rscreen->chip_class >= SI) {
923 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
924 } else {
925 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
926 }
927
928 rtex->cmask_buffer = (struct r600_resource *)
929 r600_aligned_buffer_create(&rscreen->b,
930 R600_RESOURCE_FLAG_UNMAPPABLE,
931 PIPE_USAGE_DEFAULT,
932 rtex->cmask.size,
933 rtex->cmask.alignment);
934 if (rtex->cmask_buffer == NULL) {
935 rtex->cmask.size = 0;
936 return;
937 }
938
939 /* update colorbuffer state bits */
940 rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
941
942 if (rscreen->chip_class >= SI)
943 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
944 else
945 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
946
947 p_atomic_inc(&rscreen->compressed_colortex_counter);
948 }
949
950 static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
951 struct r600_texture *rtex)
952 {
953 unsigned cl_width, cl_height, width, height;
954 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
955 unsigned num_pipes = rscreen->info.num_tile_pipes;
956
957 assert(rscreen->chip_class <= VI);
958
959 rtex->surface.htile_size = 0;
960
961 if (rscreen->chip_class <= EVERGREEN &&
962 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26)
963 return;
964
965 /* HW bug on R6xx. */
966 if (rscreen->chip_class == R600 &&
967 (rtex->resource.b.b.width0 > 7680 ||
968 rtex->resource.b.b.height0 > 7680))
969 return;
970
971 /* HTILE is broken with 1D tiling on old kernels and CIK. */
972 if (rscreen->chip_class >= CIK &&
973 rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
974 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 38)
975 return;
976
977 /* Overalign HTILE on P2 configs to work around GPU hangs in
978 * piglit/depthstencil-render-miplevels 585.
979 *
980 * This has been confirmed to help Kabini & Stoney, where the hangs
981 * are always reproducible. I think I have seen the test hang
982 * on Carrizo too, though it was very rare there.
983 */
984 if (rscreen->chip_class >= CIK && num_pipes < 4)
985 num_pipes = 4;
986
987 switch (num_pipes) {
988 case 1:
989 cl_width = 32;
990 cl_height = 16;
991 break;
992 case 2:
993 cl_width = 32;
994 cl_height = 32;
995 break;
996 case 4:
997 cl_width = 64;
998 cl_height = 32;
999 break;
1000 case 8:
1001 cl_width = 64;
1002 cl_height = 64;
1003 break;
1004 case 16:
1005 cl_width = 128;
1006 cl_height = 64;
1007 break;
1008 default:
1009 assert(0);
1010 return;
1011 }
1012
1013 width = align(rtex->resource.b.b.width0, cl_width * 8);
1014 height = align(rtex->resource.b.b.height0, cl_height * 8);
1015
1016 slice_elements = (width * height) / (8 * 8);
1017 slice_bytes = slice_elements * 4;
1018
1019 pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
1020 base_align = num_pipes * pipe_interleave_bytes;
1021
1022 rtex->surface.htile_alignment = base_align;
1023 rtex->surface.htile_size =
1024 (util_max_layer(&rtex->resource.b.b, 0) + 1) *
1025 align(slice_bytes, base_align);
1026 }
1027
1028 static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
1029 struct r600_texture *rtex)
1030 {
1031 if (rscreen->chip_class <= VI && !rtex->tc_compatible_htile)
1032 r600_texture_get_htile_size(rscreen, rtex);
1033
1034 if (!rtex->surface.htile_size)
1035 return;
1036
1037 rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment);
1038 rtex->size = rtex->htile_offset + rtex->surface.htile_size;
1039 }
1040
1041 void r600_print_texture_info(struct r600_common_screen *rscreen,
1042 struct r600_texture *rtex, FILE *f)
1043 {
1044 int i;
1045
1046 /* Common parameters. */
1047 fprintf(f, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
1048 "blk_h=%u, array_size=%u, last_level=%u, "
1049 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
1050 rtex->resource.b.b.width0, rtex->resource.b.b.height0,
1051 rtex->resource.b.b.depth0, rtex->surface.blk_w,
1052 rtex->surface.blk_h,
1053 rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,
1054 rtex->surface.bpe, rtex->resource.b.b.nr_samples,
1055 rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
1056
1057 if (rscreen->chip_class >= GFX9) {
1058 fprintf(f, " Surf: size=%"PRIu64", slice_size=%"PRIu64", "
1059 "alignment=%u, swmode=%u, epitch=%u, pitch=%u\n",
1060 rtex->surface.surf_size,
1061 rtex->surface.u.gfx9.surf_slice_size,
1062 rtex->surface.surf_alignment,
1063 rtex->surface.u.gfx9.surf.swizzle_mode,
1064 rtex->surface.u.gfx9.surf.epitch,
1065 rtex->surface.u.gfx9.surf_pitch);
1066
1067 if (rtex->fmask.size) {
1068 fprintf(f, " FMASK: offset=%"PRIu64", size=%"PRIu64", "
1069 "alignment=%u, swmode=%u, epitch=%u\n",
1070 rtex->fmask.offset,
1071 rtex->surface.u.gfx9.fmask_size,
1072 rtex->surface.u.gfx9.fmask_alignment,
1073 rtex->surface.u.gfx9.fmask.swizzle_mode,
1074 rtex->surface.u.gfx9.fmask.epitch);
1075 }
1076
1077 if (rtex->cmask.size) {
1078 fprintf(f, " CMask: offset=%"PRIu64", size=%"PRIu64", "
1079 "alignment=%u, rb_aligned=%u, pipe_aligned=%u\n",
1080 rtex->cmask.offset,
1081 rtex->surface.u.gfx9.cmask_size,
1082 rtex->surface.u.gfx9.cmask_alignment,
1083 rtex->surface.u.gfx9.cmask.rb_aligned,
1084 rtex->surface.u.gfx9.cmask.pipe_aligned);
1085 }
1086
1087 if (rtex->htile_offset) {
1088 fprintf(f, " HTile: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
1089 "rb_aligned=%u, pipe_aligned=%u\n",
1090 rtex->htile_offset,
1091 rtex->surface.htile_size,
1092 rtex->surface.htile_alignment,
1093 rtex->surface.u.gfx9.htile.rb_aligned,
1094 rtex->surface.u.gfx9.htile.pipe_aligned);
1095 }
1096
1097 if (rtex->dcc_offset) {
1098 fprintf(f, " DCC: offset=%"PRIu64", size=%"PRIu64", "
1099 "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
1100 rtex->dcc_offset, rtex->surface.dcc_size,
1101 rtex->surface.dcc_alignment,
1102 rtex->surface.u.gfx9.dcc_pitch_max,
1103 rtex->surface.num_dcc_levels);
1104 }
1105
1106 if (rtex->surface.u.gfx9.stencil_offset) {
1107 fprintf(f, " Stencil: offset=%"PRIu64", swmode=%u, epitch=%u\n",
1108 rtex->surface.u.gfx9.stencil_offset,
1109 rtex->surface.u.gfx9.stencil.swizzle_mode,
1110 rtex->surface.u.gfx9.stencil.epitch);
1111 }
1112 return;
1113 }
1114
1115 fprintf(f, " Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
1116 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
1117 rtex->surface.surf_size, rtex->surface.surf_alignment, rtex->surface.u.legacy.bankw,
1118 rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea,
1119 rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config,
1120 (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
1121
1122 if (rtex->fmask.size)
1123 fprintf(f, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
1124 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
1125 rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
1126 rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
1127 rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
1128
1129 if (rtex->cmask.size)
1130 fprintf(f, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
1131 "slice_tile_max=%u\n",
1132 rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
1133 rtex->cmask.slice_tile_max);
1134
1135 if (rtex->htile_offset)
1136 fprintf(f, " HTile: offset=%"PRIu64", size=%"PRIu64", "
1137 "alignment=%u, TC_compatible = %u\n",
1138 rtex->htile_offset, rtex->surface.htile_size,
1139 rtex->surface.htile_alignment,
1140 rtex->tc_compatible_htile);
1141
1142 if (rtex->dcc_offset) {
1143 fprintf(f, " DCC: offset=%"PRIu64", size=%"PRIu64", alignment=%u\n",
1144 rtex->dcc_offset, rtex->surface.dcc_size,
1145 rtex->surface.dcc_alignment);
1146 for (i = 0; i <= rtex->resource.b.b.last_level; i++)
1147 fprintf(f, " DCCLevel[%i]: enabled=%u, offset=%"PRIu64", "
1148 "fast_clear_size=%"PRIu64"\n",
1149 i, i < rtex->surface.num_dcc_levels,
1150 rtex->surface.u.legacy.level[i].dcc_offset,
1151 rtex->surface.u.legacy.level[i].dcc_fast_clear_size);
1152 }
1153
1154 for (i = 0; i <= rtex->resource.b.b.last_level; i++)
1155 fprintf(f, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
1156 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1157 "mode=%u, tiling_index = %u\n",
1158 i, rtex->surface.u.legacy.level[i].offset,
1159 rtex->surface.u.legacy.level[i].slice_size,
1160 u_minify(rtex->resource.b.b.width0, i),
1161 u_minify(rtex->resource.b.b.height0, i),
1162 u_minify(rtex->resource.b.b.depth0, i),
1163 rtex->surface.u.legacy.level[i].nblk_x,
1164 rtex->surface.u.legacy.level[i].nblk_y,
1165 rtex->surface.u.legacy.level[i].mode,
1166 rtex->surface.u.legacy.tiling_index[i]);
1167
1168 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
1169 fprintf(f, " StencilLayout: tilesplit=%u\n",
1170 rtex->surface.u.legacy.stencil_tile_split);
1171 for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
1172 fprintf(f, " StencilLevel[%i]: offset=%"PRIu64", "
1173 "slice_size=%"PRIu64", npix_x=%u, "
1174 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
1175 "mode=%u, tiling_index = %u\n",
1176 i, rtex->surface.u.legacy.stencil_level[i].offset,
1177 rtex->surface.u.legacy.stencil_level[i].slice_size,
1178 u_minify(rtex->resource.b.b.width0, i),
1179 u_minify(rtex->resource.b.b.height0, i),
1180 u_minify(rtex->resource.b.b.depth0, i),
1181 rtex->surface.u.legacy.stencil_level[i].nblk_x,
1182 rtex->surface.u.legacy.stencil_level[i].nblk_y,
1183 rtex->surface.u.legacy.stencil_level[i].mode,
1184 rtex->surface.u.legacy.stencil_tiling_index[i]);
1185 }
1186 }
1187 }
1188
1189 /* Common processing for r600_texture_create and r600_texture_from_handle */
1190 static struct r600_texture *
1191 r600_texture_create_object(struct pipe_screen *screen,
1192 const struct pipe_resource *base,
1193 struct pb_buffer *buf,
1194 struct radeon_surf *surface)
1195 {
1196 struct r600_texture *rtex;
1197 struct r600_resource *resource;
1198 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1199
1200 rtex = CALLOC_STRUCT(r600_texture);
1201 if (!rtex)
1202 return NULL;
1203
1204 resource = &rtex->resource;
1205 resource->b.b = *base;
1206 resource->b.b.next = NULL;
1207 resource->b.vtbl = &r600_texture_vtbl;
1208 pipe_reference_init(&resource->b.b.reference, 1);
1209 resource->b.b.screen = screen;
1210
1211 /* don't include stencil-only formats which we don't support for rendering */
1212 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
1213
1214 rtex->surface = *surface;
1215 rtex->size = rtex->surface.surf_size;
1216
1217 rtex->tc_compatible_htile = rtex->surface.htile_size != 0 &&
1218 (rtex->surface.flags &
1219 RADEON_SURF_TC_COMPATIBLE_HTILE);
1220
1221 /* TC-compatible HTILE:
1222 * - VI only supports Z32_FLOAT.
1223 * - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
1224 if (rtex->tc_compatible_htile) {
1225 if (rscreen->chip_class >= GFX9 &&
1226 base->format == PIPE_FORMAT_Z16_UNORM)
1227 rtex->db_render_format = base->format;
1228 else
1229 rtex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
1230 } else {
1231 rtex->db_render_format = base->format;
1232 }
1233
1234 /* Tiled depth textures utilize the non-displayable tile order.
1235 * This must be done after r600_setup_surface.
1236 * Applies to R600-Cayman. */
1237 rtex->non_disp_tiling = rtex->is_depth && rtex->surface.u.legacy.level[0].mode >= RADEON_SURF_MODE_1D;
1238 /* Applies to GCN. */
1239 rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;
1240
1241 /* Disable separate DCC at the beginning. DRI2 doesn't reuse buffers
1242 * between frames, so the only thing that can enable separate DCC
1243 * with DRI2 is multiple slow clears within a frame.
1244 */
1245 rtex->ps_draw_ratio = 0;
1246
1247 if (rtex->is_depth) {
1248 if (base->flags & (R600_RESOURCE_FLAG_TRANSFER |
1249 R600_RESOURCE_FLAG_FLUSHED_DEPTH) ||
1250 rscreen->chip_class >= EVERGREEN) {
1251 if (rscreen->chip_class >= GFX9) {
1252 rtex->can_sample_z = true;
1253 rtex->can_sample_s = true;
1254 } else {
1255 rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
1256 rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
1257 }
1258 } else {
1259 if (rtex->resource.b.b.nr_samples <= 1 &&
1260 (rtex->resource.b.b.format == PIPE_FORMAT_Z16_UNORM ||
1261 rtex->resource.b.b.format == PIPE_FORMAT_Z32_FLOAT))
1262 rtex->can_sample_z = true;
1263 }
1264
1265 if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
1266 R600_RESOURCE_FLAG_FLUSHED_DEPTH))) {
1267 rtex->db_compatible = true;
1268
1269 if (!(rscreen->debug_flags & DBG_NO_HYPERZ))
1270 r600_texture_allocate_htile(rscreen, rtex);
1271 }
1272 } else {
1273 if (base->nr_samples > 1) {
1274 if (!buf) {
1275 r600_texture_allocate_fmask(rscreen, rtex);
1276 r600_texture_allocate_cmask(rscreen, rtex);
1277 rtex->cmask_buffer = &rtex->resource;
1278 }
1279 if (!rtex->fmask.size || !rtex->cmask.size) {
1280 FREE(rtex);
1281 return NULL;
1282 }
1283 }
1284
1285 /* Shared textures must always set up DCC here.
1286 * If it's not present, it will be disabled by
1287 * apply_opaque_metadata later.
1288 */
1289 if (rtex->surface.dcc_size &&
1290 (buf || !(rscreen->debug_flags & DBG_NO_DCC)) &&
1291 !(rtex->surface.flags & RADEON_SURF_SCANOUT)) {
1292 /* Reserve space for the DCC buffer. */
1293 rtex->dcc_offset = align64(rtex->size, rtex->surface.dcc_alignment);
1294 rtex->size = rtex->dcc_offset + rtex->surface.dcc_size;
1295 }
1296 }
1297
1298 /* Now create the backing buffer. */
1299 if (!buf) {
1300 r600_init_resource_fields(rscreen, resource, rtex->size,
1301 rtex->surface.surf_alignment);
1302
1303 /* Displayable surfaces are not suballocated. */
1304 if (resource->b.b.bind & PIPE_BIND_SCANOUT)
1305 resource->flags |= RADEON_FLAG_NO_SUBALLOC;
1306
1307 if (!r600_alloc_resource(rscreen, resource)) {
1308 FREE(rtex);
1309 return NULL;
1310 }
1311 } else {
1312 resource->buf = buf;
1313 resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf);
1314 resource->bo_size = buf->size;
1315 resource->bo_alignment = buf->alignment;
1316 resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf);
1317 if (resource->domains & RADEON_DOMAIN_VRAM)
1318 resource->vram_usage = buf->size;
1319 else if (resource->domains & RADEON_DOMAIN_GTT)
1320 resource->gart_usage = buf->size;
1321 }
1322
1323 if (rtex->cmask.size) {
1324 /* Initialize the cmask to 0xCC (= compressed state). */
1325 r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
1326 rtex->cmask.offset, rtex->cmask.size,
1327 0xCCCCCCCC);
1328 }
1329 if (rtex->htile_offset) {
1330 uint32_t clear_value = 0;
1331
1332 if (rscreen->chip_class >= GFX9 || rtex->tc_compatible_htile)
1333 clear_value = 0x0000030F;
1334
1335 r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
1336 rtex->htile_offset,
1337 rtex->surface.htile_size,
1338 clear_value);
1339 }
1340
1341 /* Initialize DCC only if the texture is not being imported. */
1342 if (!buf && rtex->dcc_offset) {
1343 r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
1344 rtex->dcc_offset,
1345 rtex->surface.dcc_size,
1346 0xFFFFFFFF);
1347 }
1348
1349 /* Initialize the CMASK base register value. */
1350 rtex->cmask.base_address_reg =
1351 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
1352
1353 if (rscreen->debug_flags & DBG_VM) {
1354 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
1355 rtex->resource.gpu_address,
1356 rtex->resource.gpu_address + rtex->resource.buf->size,
1357 base->width0, base->height0, util_max_layer(base, 0)+1, base->last_level+1,
1358 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
1359 }
1360
1361 if (rscreen->debug_flags & DBG_TEX) {
1362 puts("Texture:");
1363 r600_print_texture_info(rscreen, rtex, stdout);
1364 fflush(stdout);
1365 }
1366
1367 return rtex;
1368 }
1369
1370 static enum radeon_surf_mode
1371 r600_choose_tiling(struct r600_common_screen *rscreen,
1372 const struct pipe_resource *templ)
1373 {
1374 const struct util_format_description *desc = util_format_description(templ->format);
1375 bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
1376 bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&
1377 !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
1378
1379 /* MSAA resources must be 2D tiled. */
1380 if (templ->nr_samples > 1)
1381 return RADEON_SURF_MODE_2D;
1382
1383 /* Transfer resources should be linear. */
1384 if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
1385 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1386
1387 /* Avoid Z/S decompress blits by forcing TC-compatible HTILE on VI,
1388 * which requires 2D tiling.
1389 */
1390 if (rscreen->chip_class == VI &&
1391 is_depth_stencil &&
1392 (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY))
1393 return RADEON_SURF_MODE_2D;
1394
1395 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
1396 if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&
1397 (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&
1398 (templ->target == PIPE_TEXTURE_2D ||
1399 templ->target == PIPE_TEXTURE_3D))
1400 force_tiling = true;
1401
1402 /* Handle common candidates for the linear mode.
1403 * Compressed textures and DB surfaces must always be tiled.
1404 */
1405 if (!force_tiling &&
1406 !is_depth_stencil &&
1407 !util_format_is_compressed(templ->format)) {
1408 if (rscreen->debug_flags & DBG_NO_TILING)
1409 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1410
1411 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1412 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
1413 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1414
1415 /* Cursors are linear on SI.
1416 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1417 if (rscreen->chip_class >= SI &&
1418 (templ->bind & PIPE_BIND_CURSOR))
1419 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1420
1421 if (templ->bind & PIPE_BIND_LINEAR)
1422 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1423
1424 /* Textures with a very small height are recommended to be linear. */
1425 if (templ->target == PIPE_TEXTURE_1D ||
1426 templ->target == PIPE_TEXTURE_1D_ARRAY ||
1427 /* Only very thin and long 2D textures should benefit from
1428 * linear_aligned. */
1429 (templ->width0 > 8 && templ->height0 <= 2))
1430 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1431
1432 /* Textures likely to be mapped often. */
1433 if (templ->usage == PIPE_USAGE_STAGING ||
1434 templ->usage == PIPE_USAGE_STREAM)
1435 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1436 }
1437
1438 /* Make small textures 1D tiled. */
1439 if (templ->width0 <= 16 || templ->height0 <= 16 ||
1440 (rscreen->debug_flags & DBG_NO_2D_TILING))
1441 return RADEON_SURF_MODE_1D;
1442
1443 /* The allocator will switch to 1D if needed. */
1444 return RADEON_SURF_MODE_2D;
1445 }
1446
1447 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
1448 const struct pipe_resource *templ)
1449 {
1450 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1451 struct radeon_surf surface = {0};
1452 bool is_flushed_depth = templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH;
1453 bool tc_compatible_htile =
1454 rscreen->chip_class >= VI &&
1455 (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) &&
1456 !(rscreen->debug_flags & DBG_NO_HYPERZ) &&
1457 !is_flushed_depth &&
1458 templ->nr_samples <= 1 && /* TC-compat HTILE is less efficient with MSAA */
1459 util_format_is_depth_or_stencil(templ->format);
1460
1461 int r;
1462
1463 r = r600_init_surface(rscreen, &surface, templ,
1464 r600_choose_tiling(rscreen, templ), 0, 0,
1465 false, false, is_flushed_depth,
1466 tc_compatible_htile);
1467 if (r) {
1468 return NULL;
1469 }
1470
1471 return (struct pipe_resource *)
1472 r600_texture_create_object(screen, templ, NULL, &surface);
1473 }
1474
1475 static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
1476 const struct pipe_resource *templ,
1477 struct winsys_handle *whandle,
1478 unsigned usage)
1479 {
1480 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1481 struct pb_buffer *buf = NULL;
1482 unsigned stride = 0, offset = 0;
1483 enum radeon_surf_mode array_mode;
1484 struct radeon_surf surface = {};
1485 int r;
1486 struct radeon_bo_metadata metadata = {};
1487 struct r600_texture *rtex;
1488 bool is_scanout;
1489
1490 /* Support only 2D textures without mipmaps */
1491 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
1492 templ->depth0 != 1 || templ->last_level != 0)
1493 return NULL;
1494
1495 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride, &offset);
1496 if (!buf)
1497 return NULL;
1498
1499 rscreen->ws->buffer_get_metadata(buf, &metadata);
1500 r600_surface_import_metadata(rscreen, &surface, &metadata,
1501 &array_mode, &is_scanout);
1502
1503 r = r600_init_surface(rscreen, &surface, templ, array_mode, stride,
1504 offset, true, is_scanout, false, false);
1505 if (r) {
1506 return NULL;
1507 }
1508
1509 rtex = r600_texture_create_object(screen, templ, buf, &surface);
1510 if (!rtex)
1511 return NULL;
1512
1513 rtex->resource.b.is_shared = true;
1514 rtex->resource.external_usage = usage;
1515
1516 if (rscreen->apply_opaque_metadata)
1517 rscreen->apply_opaque_metadata(rscreen, rtex, &metadata);
1518
1519 /* Validate that addrlib arrived at the same surface parameters. */
1520 if (rscreen->chip_class >= GFX9) {
1521 assert(metadata.u.gfx9.swizzle_mode == surface.u.gfx9.surf.swizzle_mode);
1522 }
1523
1524 assert(rtex->surface.tile_swizzle == 0);
1525 return &rtex->resource.b.b;
1526 }
1527
1528 bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
1529 struct pipe_resource *texture,
1530 struct r600_texture **staging)
1531 {
1532 struct r600_texture *rtex = (struct r600_texture*)texture;
1533 struct pipe_resource resource;
1534 struct r600_texture **flushed_depth_texture = staging ?
1535 staging : &rtex->flushed_depth_texture;
1536 enum pipe_format pipe_format = texture->format;
1537
1538 if (!staging) {
1539 if (rtex->flushed_depth_texture)
1540 return true; /* it's ready */
1541
1542 if (!rtex->can_sample_z && rtex->can_sample_s) {
1543 switch (pipe_format) {
1544 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1545 /* Save memory by not allocating the S plane. */
1546 pipe_format = PIPE_FORMAT_Z32_FLOAT;
1547 break;
1548 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1549 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
1550 /* Save memory bandwidth by not copying the
1551 * stencil part during flush.
1552 *
1553 * This potentially increases memory bandwidth
1554 * if an application uses both Z and S texturing
1555 * simultaneously (a flushed Z24S8 texture
1556 * would be stored compactly), but how often
1557 * does that really happen?
1558 */
1559 pipe_format = PIPE_FORMAT_Z24X8_UNORM;
1560 break;
1561 default:;
1562 }
1563 } else if (!rtex->can_sample_s && rtex->can_sample_z) {
1564 assert(util_format_has_stencil(util_format_description(pipe_format)));
1565
1566 /* DB->CB copies to an 8bpp surface don't work. */
1567 pipe_format = PIPE_FORMAT_X24S8_UINT;
1568 }
1569 }
1570
1571 memset(&resource, 0, sizeof(resource));
1572 resource.target = texture->target;
1573 resource.format = pipe_format;
1574 resource.width0 = texture->width0;
1575 resource.height0 = texture->height0;
1576 resource.depth0 = texture->depth0;
1577 resource.array_size = texture->array_size;
1578 resource.last_level = texture->last_level;
1579 resource.nr_samples = texture->nr_samples;
1580 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1581 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
1582 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
1583
1584 if (staging)
1585 resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
1586
1587 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
1588 if (*flushed_depth_texture == NULL) {
1589 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1590 return false;
1591 }
1592
1593 (*flushed_depth_texture)->non_disp_tiling = false;
1594 return true;
1595 }
1596
1597 /**
1598 * Initialize the pipe_resource descriptor to be of the same size as the box,
1599 * which is supposed to hold a subregion of the texture "orig" at the given
1600 * mipmap level.
1601 */
1602 static void r600_init_temp_resource_from_box(struct pipe_resource *res,
1603 struct pipe_resource *orig,
1604 const struct pipe_box *box,
1605 unsigned level, unsigned flags)
1606 {
1607 memset(res, 0, sizeof(*res));
1608 res->format = orig->format;
1609 res->width0 = box->width;
1610 res->height0 = box->height;
1611 res->depth0 = 1;
1612 res->array_size = 1;
1613 res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1614 res->flags = flags;
1615
1616 /* We must set the correct texture target and dimensions for a 3D box. */
1617 if (box->depth > 1 && util_max_layer(orig, level) > 0) {
1618 res->target = PIPE_TEXTURE_2D_ARRAY;
1619 res->array_size = box->depth;
1620 } else {
1621 res->target = PIPE_TEXTURE_2D;
1622 }
1623 }
1624
1625 static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
1626 struct r600_texture *rtex,
1627 unsigned transfer_usage,
1628 const struct pipe_box *box)
1629 {
1630 /* r600g doesn't react to dirty_tex_descriptor_counter */
1631 return rscreen->chip_class >= SI &&
1632 !rtex->resource.b.is_shared &&
1633 !(transfer_usage & PIPE_TRANSFER_READ) &&
1634 rtex->resource.b.b.last_level == 0 &&
1635 util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
1636 box->x, box->y, box->z,
1637 box->width, box->height,
1638 box->depth);
1639 }
1640
1641 static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
1642 struct r600_texture *rtex)
1643 {
1644 struct r600_common_screen *rscreen = rctx->screen;
1645
1646 /* There is no point in discarding depth and tiled buffers. */
1647 assert(!rtex->is_depth);
1648 assert(rtex->surface.is_linear);
1649
1650 /* Reallocate the buffer in the same pipe_resource. */
1651 r600_alloc_resource(rscreen, &rtex->resource);
1652
1653 /* Initialize the CMASK base address (needed even without CMASK). */
1654 rtex->cmask.base_address_reg =
1655 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
1656
1657 p_atomic_inc(&rscreen->dirty_tex_counter);
1658
1659 rctx->num_alloc_tex_transfer_bytes += rtex->size;
1660 }
1661
1662 static void *r600_texture_transfer_map(struct pipe_context *ctx,
1663 struct pipe_resource *texture,
1664 unsigned level,
1665 unsigned usage,
1666 const struct pipe_box *box,
1667 struct pipe_transfer **ptransfer)
1668 {
1669 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1670 struct r600_texture *rtex = (struct r600_texture*)texture;
1671 struct r600_transfer *trans;
1672 struct r600_resource *buf;
1673 unsigned offset = 0;
1674 char *map;
1675 bool use_staging_texture = false;
1676
1677 assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));
1678 assert(box->width && box->height && box->depth);
1679
1680 /* Depth textures use staging unconditionally. */
1681 if (!rtex->is_depth) {
1682 /* Degrade the tile mode if we get too many transfers on APUs.
1683 * On dGPUs, the staging texture is always faster.
1684 * Only count uploads that are at least 4x4 pixels large.
1685 */
1686 if (!rctx->screen->info.has_dedicated_vram &&
1687 level == 0 &&
1688 box->width >= 4 && box->height >= 4 &&
1689 p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
1690 bool can_invalidate =
1691 r600_can_invalidate_texture(rctx->screen, rtex,
1692 usage, box);
1693
1694 r600_reallocate_texture_inplace(rctx, rtex,
1695 PIPE_BIND_LINEAR,
1696 can_invalidate);
1697 }
1698
1699 /* Tiled textures need to be converted into a linear texture for CPU
1700 * access. The staging texture is always linear and is placed in GART.
1701 *
1702 * Reading from VRAM or GTT WC is slow, always use the staging
1703 * texture in this case.
1704 *
1705 * Use the staging texture for uploads if the underlying BO
1706 * is busy.
1707 */
1708 if (!rtex->surface.is_linear)
1709 use_staging_texture = true;
1710 else if (usage & PIPE_TRANSFER_READ)
1711 use_staging_texture =
1712 rtex->resource.domains & RADEON_DOMAIN_VRAM ||
1713 rtex->resource.flags & RADEON_FLAG_GTT_WC;
1714 /* Write & linear only: */
1715 else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf,
1716 RADEON_USAGE_READWRITE) ||
1717 !rctx->ws->buffer_wait(rtex->resource.buf, 0,
1718 RADEON_USAGE_READWRITE)) {
1719 /* It's busy. */
1720 if (r600_can_invalidate_texture(rctx->screen, rtex,
1721 usage, box))
1722 r600_texture_invalidate_storage(rctx, rtex);
1723 else
1724 use_staging_texture = true;
1725 }
1726 }
1727
1728 trans = CALLOC_STRUCT(r600_transfer);
1729 if (!trans)
1730 return NULL;
1731 pipe_resource_reference(&trans->b.b.resource, texture);
1732 trans->b.b.level = level;
1733 trans->b.b.usage = usage;
1734 trans->b.b.box = *box;
1735
1736 if (rtex->is_depth) {
1737 struct r600_texture *staging_depth;
1738
1739 if (rtex->resource.b.b.nr_samples > 1) {
1740 /* MSAA depth buffers need to be converted to single sample buffers.
1741 *
1742 * Mapping MSAA depth buffers can occur if ReadPixels is called
1743 * with a multisample GLX visual.
1744 *
1745 * First downsample the depth buffer to a temporary texture,
1746 * then decompress the temporary one to staging.
1747 *
1748 * Only the region being mapped is transfered.
1749 */
1750 struct pipe_resource resource;
1751
1752 r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
1753
1754 if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1755 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1756 FREE(trans);
1757 return NULL;
1758 }
1759
1760 if (usage & PIPE_TRANSFER_READ) {
1761 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1762 if (!temp) {
1763 R600_ERR("failed to create a temporary depth texture\n");
1764 FREE(trans);
1765 return NULL;
1766 }
1767
1768 r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1769 rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
1770 0, 0, 0, box->depth, 0, 0);
1771 pipe_resource_reference(&temp, NULL);
1772 }
1773
1774 /* Just get the strides. */
1775 r600_texture_get_offset(rctx->screen, staging_depth, level, NULL,
1776 &trans->b.b.stride,
1777 &trans->b.b.layer_stride);
1778 } else {
1779 /* XXX: only readback the rectangle which is being mapped? */
1780 /* XXX: when discard is true, no need to read back from depth texture */
1781 if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1782 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1783 FREE(trans);
1784 return NULL;
1785 }
1786
1787 rctx->blit_decompress_depth(ctx, rtex, staging_depth,
1788 level, level,
1789 box->z, box->z + box->depth - 1,
1790 0, 0);
1791
1792 offset = r600_texture_get_offset(rctx->screen, staging_depth,
1793 level, box,
1794 &trans->b.b.stride,
1795 &trans->b.b.layer_stride);
1796 }
1797
1798 trans->staging = (struct r600_resource*)staging_depth;
1799 buf = trans->staging;
1800 } else if (use_staging_texture) {
1801 struct pipe_resource resource;
1802 struct r600_texture *staging;
1803
1804 r600_init_temp_resource_from_box(&resource, texture, box, level,
1805 R600_RESOURCE_FLAG_TRANSFER);
1806 resource.usage = (usage & PIPE_TRANSFER_READ) ?
1807 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1808
1809 /* Create the temporary texture. */
1810 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1811 if (!staging) {
1812 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1813 FREE(trans);
1814 return NULL;
1815 }
1816 trans->staging = &staging->resource;
1817
1818 /* Just get the strides. */
1819 r600_texture_get_offset(rctx->screen, staging, 0, NULL,
1820 &trans->b.b.stride,
1821 &trans->b.b.layer_stride);
1822
1823 if (usage & PIPE_TRANSFER_READ)
1824 r600_copy_to_staging_texture(ctx, trans);
1825 else
1826 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1827
1828 buf = trans->staging;
1829 } else {
1830 /* the resource is mapped directly */
1831 offset = r600_texture_get_offset(rctx->screen, rtex, level, box,
1832 &trans->b.b.stride,
1833 &trans->b.b.layer_stride);
1834 buf = &rtex->resource;
1835 }
1836
1837 if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
1838 r600_resource_reference(&trans->staging, NULL);
1839 FREE(trans);
1840 return NULL;
1841 }
1842
1843 *ptransfer = &trans->b.b;
1844 return map + offset;
1845 }
1846
1847 static void r600_texture_transfer_unmap(struct pipe_context *ctx,
1848 struct pipe_transfer* transfer)
1849 {
1850 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1851 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
1852 struct pipe_resource *texture = transfer->resource;
1853 struct r600_texture *rtex = (struct r600_texture*)texture;
1854
1855 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
1856 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
1857 ctx->resource_copy_region(ctx, texture, transfer->level,
1858 transfer->box.x, transfer->box.y, transfer->box.z,
1859 &rtransfer->staging->b.b, transfer->level,
1860 &transfer->box);
1861 } else {
1862 r600_copy_from_staging_texture(ctx, rtransfer);
1863 }
1864 }
1865
1866 if (rtransfer->staging) {
1867 rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
1868 r600_resource_reference(&rtransfer->staging, NULL);
1869 }
1870
1871 /* Heuristic for {upload, draw, upload, draw, ..}:
1872 *
1873 * Flush the gfx IB if we've allocated too much texture storage.
1874 *
1875 * The idea is that we don't want to build IBs that use too much
1876 * memory and put pressure on the kernel memory manager and we also
1877 * want to make temporary and invalidated buffers go idle ASAP to
1878 * decrease the total memory usage or make them reusable. The memory
1879 * usage will be slightly higher than given here because of the buffer
1880 * cache in the winsys.
1881 *
1882 * The result is that the kernel memory manager is never a bottleneck.
1883 */
1884 if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
1885 rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
1886 rctx->num_alloc_tex_transfer_bytes = 0;
1887 }
1888
1889 pipe_resource_reference(&transfer->resource, NULL);
1890 FREE(transfer);
1891 }
1892
1893 static const struct u_resource_vtbl r600_texture_vtbl =
1894 {
1895 NULL, /* get_handle */
1896 r600_texture_destroy, /* resource_destroy */
1897 r600_texture_transfer_map, /* transfer_map */
1898 u_default_transfer_flush_region, /* transfer_flush_region */
1899 r600_texture_transfer_unmap, /* transfer_unmap */
1900 };
1901
1902 /* DCC channel type categories within which formats can be reinterpreted
1903 * while keeping the same DCC encoding. The swizzle must also match. */
1904 enum dcc_channel_type {
1905 dcc_channel_float32,
1906 dcc_channel_uint32,
1907 dcc_channel_sint32,
1908 dcc_channel_float16,
1909 dcc_channel_uint16,
1910 dcc_channel_sint16,
1911 dcc_channel_uint_10_10_10_2,
1912 dcc_channel_uint8,
1913 dcc_channel_sint8,
1914 dcc_channel_incompatible,
1915 };
1916
1917 /* Return the type of DCC encoding. */
1918 static enum dcc_channel_type
1919 vi_get_dcc_channel_type(const struct util_format_description *desc)
1920 {
1921 int i;
1922
1923 /* Find the first non-void channel. */
1924 for (i = 0; i < desc->nr_channels; i++)
1925 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID)
1926 break;
1927 if (i == desc->nr_channels)
1928 return dcc_channel_incompatible;
1929
1930 switch (desc->channel[i].size) {
1931 case 32:
1932 if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)
1933 return dcc_channel_float32;
1934 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
1935 return dcc_channel_uint32;
1936 return dcc_channel_sint32;
1937 case 16:
1938 if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)
1939 return dcc_channel_float16;
1940 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
1941 return dcc_channel_uint16;
1942 return dcc_channel_sint16;
1943 case 10:
1944 return dcc_channel_uint_10_10_10_2;
1945 case 8:
1946 if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)
1947 return dcc_channel_uint8;
1948 return dcc_channel_sint8;
1949 default:
1950 return dcc_channel_incompatible;
1951 }
1952 }
1953
1954 /* Return if it's allowed to reinterpret one format as another with DCC enabled. */
1955 bool vi_dcc_formats_compatible(enum pipe_format format1,
1956 enum pipe_format format2)
1957 {
1958 const struct util_format_description *desc1, *desc2;
1959 enum dcc_channel_type type1, type2;
1960 int i;
1961
1962 if (format1 == format2)
1963 return true;
1964
1965 desc1 = util_format_description(format1);
1966 desc2 = util_format_description(format2);
1967
1968 if (desc1->nr_channels != desc2->nr_channels)
1969 return false;
1970
1971 /* Swizzles must be the same. */
1972 for (i = 0; i < desc1->nr_channels; i++)
1973 if (desc1->swizzle[i] <= PIPE_SWIZZLE_W &&
1974 desc2->swizzle[i] <= PIPE_SWIZZLE_W &&
1975 desc1->swizzle[i] != desc2->swizzle[i])
1976 return false;
1977
1978 type1 = vi_get_dcc_channel_type(desc1);
1979 type2 = vi_get_dcc_channel_type(desc2);
1980
1981 return type1 != dcc_channel_incompatible &&
1982 type2 != dcc_channel_incompatible &&
1983 type1 == type2;
1984 }
1985
1986 bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
1987 unsigned level,
1988 enum pipe_format view_format)
1989 {
1990 struct r600_texture *rtex = (struct r600_texture *)tex;
1991
1992 return vi_dcc_enabled(rtex, level) &&
1993 !vi_dcc_formats_compatible(tex->format, view_format);
1994 }
1995
1996 /* This can't be merged with the above function, because
1997 * vi_dcc_formats_compatible should be called only when DCC is enabled. */
1998 void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx,
1999 struct pipe_resource *tex,
2000 unsigned level,
2001 enum pipe_format view_format)
2002 {
2003 struct r600_texture *rtex = (struct r600_texture *)tex;
2004
2005 if (vi_dcc_enabled(rtex, level) &&
2006 !vi_dcc_formats_compatible(tex->format, view_format))
2007 if (!r600_texture_disable_dcc(rctx, (struct r600_texture*)tex))
2008 rctx->decompress_dcc(&rctx->b, rtex);
2009 }
2010
2011 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
2012 struct pipe_resource *texture,
2013 const struct pipe_surface *templ,
2014 unsigned width0, unsigned height0,
2015 unsigned width, unsigned height)
2016 {
2017 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
2018
2019 if (!surface)
2020 return NULL;
2021
2022 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
2023 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
2024
2025 pipe_reference_init(&surface->base.reference, 1);
2026 pipe_resource_reference(&surface->base.texture, texture);
2027 surface->base.context = pipe;
2028 surface->base.format = templ->format;
2029 surface->base.width = width;
2030 surface->base.height = height;
2031 surface->base.u = templ->u;
2032
2033 surface->width0 = width0;
2034 surface->height0 = height0;
2035
2036 surface->dcc_incompatible =
2037 texture->target != PIPE_BUFFER &&
2038 vi_dcc_formats_are_incompatible(texture, templ->u.tex.level,
2039 templ->format);
2040 return &surface->base;
2041 }
2042
2043 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
2044 struct pipe_resource *tex,
2045 const struct pipe_surface *templ)
2046 {
2047 unsigned level = templ->u.tex.level;
2048 unsigned width = u_minify(tex->width0, level);
2049 unsigned height = u_minify(tex->height0, level);
2050 unsigned width0 = tex->width0;
2051 unsigned height0 = tex->height0;
2052
2053 if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
2054 const struct util_format_description *tex_desc
2055 = util_format_description(tex->format);
2056 const struct util_format_description *templ_desc
2057 = util_format_description(templ->format);
2058
2059 assert(tex_desc->block.bits == templ_desc->block.bits);
2060
2061 /* Adjust size of surface if and only if the block width or
2062 * height is changed. */
2063 if (tex_desc->block.width != templ_desc->block.width ||
2064 tex_desc->block.height != templ_desc->block.height) {
2065 unsigned nblks_x = util_format_get_nblocksx(tex->format, width);
2066 unsigned nblks_y = util_format_get_nblocksy(tex->format, height);
2067
2068 width = nblks_x * templ_desc->block.width;
2069 height = nblks_y * templ_desc->block.height;
2070
2071 width0 = util_format_get_nblocksx(tex->format, width0);
2072 height0 = util_format_get_nblocksy(tex->format, height0);
2073 }
2074 }
2075
2076 return r600_create_surface_custom(pipe, tex, templ,
2077 width0, height0,
2078 width, height);
2079 }
2080
2081 static void r600_surface_destroy(struct pipe_context *pipe,
2082 struct pipe_surface *surface)
2083 {
2084 struct r600_surface *surf = (struct r600_surface*)surface;
2085 r600_resource_reference(&surf->cb_buffer_fmask, NULL);
2086 r600_resource_reference(&surf->cb_buffer_cmask, NULL);
2087 pipe_resource_reference(&surface->texture, NULL);
2088 FREE(surface);
2089 }
2090
2091 static void r600_clear_texture(struct pipe_context *pipe,
2092 struct pipe_resource *tex,
2093 unsigned level,
2094 const struct pipe_box *box,
2095 const void *data)
2096 {
2097 struct pipe_screen *screen = pipe->screen;
2098 struct r600_texture *rtex = (struct r600_texture*)tex;
2099 struct pipe_surface tmpl = {{0}};
2100 struct pipe_surface *sf;
2101 const struct util_format_description *desc =
2102 util_format_description(tex->format);
2103
2104 tmpl.format = tex->format;
2105 tmpl.u.tex.first_layer = box->z;
2106 tmpl.u.tex.last_layer = box->z + box->depth - 1;
2107 tmpl.u.tex.level = level;
2108 sf = pipe->create_surface(pipe, tex, &tmpl);
2109 if (!sf)
2110 return;
2111
2112 if (rtex->is_depth) {
2113 unsigned clear;
2114 float depth;
2115 uint8_t stencil = 0;
2116
2117 /* Depth is always present. */
2118 clear = PIPE_CLEAR_DEPTH;
2119 desc->unpack_z_float(&depth, 0, data, 0, 1, 1);
2120
2121 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
2122 clear |= PIPE_CLEAR_STENCIL;
2123 desc->unpack_s_8uint(&stencil, 0, data, 0, 1, 1);
2124 }
2125
2126 pipe->clear_depth_stencil(pipe, sf, clear, depth, stencil,
2127 box->x, box->y,
2128 box->width, box->height, false);
2129 } else {
2130 union pipe_color_union color;
2131
2132 /* pipe_color_union requires the full vec4 representation. */
2133 if (util_format_is_pure_uint(tex->format))
2134 desc->unpack_rgba_uint(color.ui, 0, data, 0, 1, 1);
2135 else if (util_format_is_pure_sint(tex->format))
2136 desc->unpack_rgba_sint(color.i, 0, data, 0, 1, 1);
2137 else
2138 desc->unpack_rgba_float(color.f, 0, data, 0, 1, 1);
2139
2140 if (screen->is_format_supported(screen, tex->format,
2141 tex->target, 0,
2142 PIPE_BIND_RENDER_TARGET)) {
2143 pipe->clear_render_target(pipe, sf, &color,
2144 box->x, box->y,
2145 box->width, box->height, false);
2146 } else {
2147 /* Software fallback - just for R9G9B9E5_FLOAT */
2148 util_clear_render_target(pipe, sf, &color,
2149 box->x, box->y,
2150 box->width, box->height);
2151 }
2152 }
2153 pipe_surface_reference(&sf, NULL);
2154 }
2155
2156 unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap)
2157 {
2158 const struct util_format_description *desc = util_format_description(format);
2159
2160 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
2161
2162 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
2163 return V_0280A0_SWAP_STD;
2164
2165 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
2166 return ~0U;
2167
2168 switch (desc->nr_channels) {
2169 case 1:
2170 if (HAS_SWIZZLE(0,X))
2171 return V_0280A0_SWAP_STD; /* X___ */
2172 else if (HAS_SWIZZLE(3,X))
2173 return V_0280A0_SWAP_ALT_REV; /* ___X */
2174 break;
2175 case 2:
2176 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
2177 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
2178 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
2179 return V_0280A0_SWAP_STD; /* XY__ */
2180 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
2181 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
2182 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
2183 /* YX__ */
2184 return (do_endian_swap ? V_0280A0_SWAP_STD : V_0280A0_SWAP_STD_REV);
2185 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
2186 return V_0280A0_SWAP_ALT; /* X__Y */
2187 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
2188 return V_0280A0_SWAP_ALT_REV; /* Y__X */
2189 break;
2190 case 3:
2191 if (HAS_SWIZZLE(0,X))
2192 return (do_endian_swap ? V_0280A0_SWAP_STD_REV : V_0280A0_SWAP_STD);
2193 else if (HAS_SWIZZLE(0,Z))
2194 return V_0280A0_SWAP_STD_REV; /* ZYX */
2195 break;
2196 case 4:
2197 /* check the middle channels, the 1st and 4th channel can be NONE */
2198 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {
2199 return V_0280A0_SWAP_STD; /* XYZW */
2200 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {
2201 return V_0280A0_SWAP_STD_REV; /* WZYX */
2202 } else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {
2203 return V_0280A0_SWAP_ALT; /* ZYXW */
2204 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {
2205 /* YZWX */
2206 if (desc->is_array)
2207 return V_0280A0_SWAP_ALT_REV;
2208 else
2209 return (do_endian_swap ? V_0280A0_SWAP_ALT : V_0280A0_SWAP_ALT_REV);
2210 }
2211 break;
2212 }
2213 return ~0U;
2214 }
2215
2216 /* PIPELINE_STAT-BASED DCC ENABLEMENT FOR DISPLAYABLE SURFACES */
2217
2218 static void vi_dcc_clean_up_context_slot(struct r600_common_context *rctx,
2219 int slot)
2220 {
2221 int i;
2222
2223 if (rctx->dcc_stats[slot].query_active)
2224 vi_separate_dcc_stop_query(&rctx->b,
2225 rctx->dcc_stats[slot].tex);
2226
2227 for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats[slot].ps_stats); i++)
2228 if (rctx->dcc_stats[slot].ps_stats[i]) {
2229 rctx->b.destroy_query(&rctx->b,
2230 rctx->dcc_stats[slot].ps_stats[i]);
2231 rctx->dcc_stats[slot].ps_stats[i] = NULL;
2232 }
2233
2234 r600_texture_reference(&rctx->dcc_stats[slot].tex, NULL);
2235 }
2236
2237 /**
2238 * Return the per-context slot where DCC statistics queries for the texture live.
2239 */
2240 static unsigned vi_get_context_dcc_stats_index(struct r600_common_context *rctx,
2241 struct r600_texture *tex)
2242 {
2243 int i, empty_slot = -1;
2244
2245 /* Remove zombie textures (textures kept alive by this array only). */
2246 for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++)
2247 if (rctx->dcc_stats[i].tex &&
2248 rctx->dcc_stats[i].tex->resource.b.b.reference.count == 1)
2249 vi_dcc_clean_up_context_slot(rctx, i);
2250
2251 /* Find the texture. */
2252 for (i = 0; i < ARRAY_SIZE(rctx->dcc_stats); i++) {
2253 /* Return if found. */
2254 if (rctx->dcc_stats[i].tex == tex) {
2255 rctx->dcc_stats[i].last_use_timestamp = os_time_get();
2256 return i;
2257 }
2258
2259 /* Record the first seen empty slot. */
2260 if (empty_slot == -1 && !rctx->dcc_stats[i].tex)
2261 empty_slot = i;
2262 }
2263
2264 /* Not found. Remove the oldest member to make space in the array. */
2265 if (empty_slot == -1) {
2266 int oldest_slot = 0;
2267
2268 /* Find the oldest slot. */
2269 for (i = 1; i < ARRAY_SIZE(rctx->dcc_stats); i++)
2270 if (rctx->dcc_stats[oldest_slot].last_use_timestamp >
2271 rctx->dcc_stats[i].last_use_timestamp)
2272 oldest_slot = i;
2273
2274 /* Clean up the oldest slot. */
2275 vi_dcc_clean_up_context_slot(rctx, oldest_slot);
2276 empty_slot = oldest_slot;
2277 }
2278
2279 /* Add the texture to the new slot. */
2280 r600_texture_reference(&rctx->dcc_stats[empty_slot].tex, tex);
2281 rctx->dcc_stats[empty_slot].last_use_timestamp = os_time_get();
2282 return empty_slot;
2283 }
2284
2285 static struct pipe_query *
2286 vi_create_resuming_pipestats_query(struct pipe_context *ctx)
2287 {
2288 struct r600_query_hw *query = (struct r600_query_hw*)
2289 ctx->create_query(ctx, PIPE_QUERY_PIPELINE_STATISTICS, 0);
2290
2291 query->flags |= R600_QUERY_HW_FLAG_BEGIN_RESUMES;
2292 return (struct pipe_query*)query;
2293 }
2294
2295 /**
2296 * Called when binding a color buffer.
2297 */
2298 void vi_separate_dcc_start_query(struct pipe_context *ctx,
2299 struct r600_texture *tex)
2300 {
2301 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
2302 unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
2303
2304 assert(!rctx->dcc_stats[i].query_active);
2305
2306 if (!rctx->dcc_stats[i].ps_stats[0])
2307 rctx->dcc_stats[i].ps_stats[0] = vi_create_resuming_pipestats_query(ctx);
2308
2309 /* begin or resume the query */
2310 ctx->begin_query(ctx, rctx->dcc_stats[i].ps_stats[0]);
2311 rctx->dcc_stats[i].query_active = true;
2312 }
2313
2314 /**
2315 * Called when unbinding a color buffer.
2316 */
2317 void vi_separate_dcc_stop_query(struct pipe_context *ctx,
2318 struct r600_texture *tex)
2319 {
2320 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
2321 unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
2322
2323 assert(rctx->dcc_stats[i].query_active);
2324 assert(rctx->dcc_stats[i].ps_stats[0]);
2325
2326 /* pause or end the query */
2327 ctx->end_query(ctx, rctx->dcc_stats[i].ps_stats[0]);
2328 rctx->dcc_stats[i].query_active = false;
2329 }
2330
2331 static bool vi_should_enable_separate_dcc(struct r600_texture *tex)
2332 {
2333 /* The minimum number of fullscreen draws per frame that is required
2334 * to enable DCC. */
2335 return tex->ps_draw_ratio + tex->num_slow_clears >= 5;
2336 }
2337
2338 /* Called by fast clear. */
2339 static void vi_separate_dcc_try_enable(struct r600_common_context *rctx,
2340 struct r600_texture *tex)
2341 {
2342 /* The intent is to use this with shared displayable back buffers,
2343 * but it's not strictly limited only to them.
2344 */
2345 if (!tex->resource.b.is_shared ||
2346 !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) ||
2347 tex->resource.b.b.target != PIPE_TEXTURE_2D ||
2348 tex->resource.b.b.last_level > 0 ||
2349 !tex->surface.dcc_size)
2350 return;
2351
2352 if (tex->dcc_offset)
2353 return; /* already enabled */
2354
2355 /* Enable the DCC stat gathering. */
2356 if (!tex->dcc_gather_statistics) {
2357 tex->dcc_gather_statistics = true;
2358 vi_separate_dcc_start_query(&rctx->b, tex);
2359 }
2360
2361 if (!vi_should_enable_separate_dcc(tex))
2362 return; /* stats show that DCC decompression is too expensive */
2363
2364 assert(tex->surface.num_dcc_levels);
2365 assert(!tex->dcc_separate_buffer);
2366
2367 r600_texture_discard_cmask(rctx->screen, tex);
2368
2369 /* Get a DCC buffer. */
2370 if (tex->last_dcc_separate_buffer) {
2371 assert(tex->dcc_gather_statistics);
2372 assert(!tex->dcc_separate_buffer);
2373 tex->dcc_separate_buffer = tex->last_dcc_separate_buffer;
2374 tex->last_dcc_separate_buffer = NULL;
2375 } else {
2376 tex->dcc_separate_buffer = (struct r600_resource*)
2377 r600_aligned_buffer_create(rctx->b.screen,
2378 R600_RESOURCE_FLAG_UNMAPPABLE,
2379 PIPE_USAGE_DEFAULT,
2380 tex->surface.dcc_size,
2381 tex->surface.dcc_alignment);
2382 if (!tex->dcc_separate_buffer)
2383 return;
2384 }
2385
2386 /* dcc_offset is the absolute GPUVM address. */
2387 tex->dcc_offset = tex->dcc_separate_buffer->gpu_address;
2388
2389 /* no need to flag anything since this is called by fast clear that
2390 * flags framebuffer state
2391 */
2392 }
2393
2394 /**
2395 * Called by pipe_context::flush_resource, the place where DCC decompression
2396 * takes place.
2397 */
2398 void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
2399 struct r600_texture *tex)
2400 {
2401 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
2402 struct pipe_query *tmp;
2403 unsigned i = vi_get_context_dcc_stats_index(rctx, tex);
2404 bool query_active = rctx->dcc_stats[i].query_active;
2405 bool disable = false;
2406
2407 if (rctx->dcc_stats[i].ps_stats[2]) {
2408 union pipe_query_result result;
2409
2410 /* Read the results. */
2411 ctx->get_query_result(ctx, rctx->dcc_stats[i].ps_stats[2],
2412 true, &result);
2413 r600_query_hw_reset_buffers(rctx,
2414 (struct r600_query_hw*)
2415 rctx->dcc_stats[i].ps_stats[2]);
2416
2417 /* Compute the approximate number of fullscreen draws. */
2418 tex->ps_draw_ratio =
2419 result.pipeline_statistics.ps_invocations /
2420 (tex->resource.b.b.width0 * tex->resource.b.b.height0);
2421 rctx->last_tex_ps_draw_ratio = tex->ps_draw_ratio;
2422
2423 disable = tex->dcc_separate_buffer &&
2424 !vi_should_enable_separate_dcc(tex);
2425 }
2426
2427 tex->num_slow_clears = 0;
2428
2429 /* stop the statistics query for ps_stats[0] */
2430 if (query_active)
2431 vi_separate_dcc_stop_query(ctx, tex);
2432
2433 /* Move the queries in the queue by one. */
2434 tmp = rctx->dcc_stats[i].ps_stats[2];
2435 rctx->dcc_stats[i].ps_stats[2] = rctx->dcc_stats[i].ps_stats[1];
2436 rctx->dcc_stats[i].ps_stats[1] = rctx->dcc_stats[i].ps_stats[0];
2437 rctx->dcc_stats[i].ps_stats[0] = tmp;
2438
2439 /* create and start a new query as ps_stats[0] */
2440 if (query_active)
2441 vi_separate_dcc_start_query(ctx, tex);
2442
2443 if (disable) {
2444 assert(!tex->last_dcc_separate_buffer);
2445 tex->last_dcc_separate_buffer = tex->dcc_separate_buffer;
2446 tex->dcc_separate_buffer = NULL;
2447 tex->dcc_offset = 0;
2448 /* no need to flag anything since this is called after
2449 * decompression that re-sets framebuffer state
2450 */
2451 }
2452 }
2453
2454 /* FAST COLOR CLEAR */
2455
2456 static void evergreen_set_clear_color(struct r600_texture *rtex,
2457 enum pipe_format surface_format,
2458 const union pipe_color_union *color)
2459 {
2460 union util_color uc;
2461
2462 memset(&uc, 0, sizeof(uc));
2463
2464 if (rtex->surface.bpe == 16) {
2465 /* DCC fast clear only:
2466 * CLEAR_WORD0 = R = G = B
2467 * CLEAR_WORD1 = A
2468 */
2469 assert(color->ui[0] == color->ui[1] &&
2470 color->ui[0] == color->ui[2]);
2471 uc.ui[0] = color->ui[0];
2472 uc.ui[1] = color->ui[3];
2473 } else if (util_format_is_pure_uint(surface_format)) {
2474 util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
2475 } else if (util_format_is_pure_sint(surface_format)) {
2476 util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1);
2477 } else {
2478 util_pack_color(color->f, surface_format, &uc);
2479 }
2480
2481 memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));
2482 }
2483
2484 static bool vi_get_fast_clear_parameters(enum pipe_format surface_format,
2485 const union pipe_color_union *color,
2486 uint32_t* reset_value,
2487 bool* clear_words_needed)
2488 {
2489 bool values[4] = {};
2490 int i;
2491 bool main_value = false;
2492 bool extra_value = false;
2493 int extra_channel;
2494
2495 /* This is needed to get the correct DCC clear value for luminance formats.
2496 * 1) Get the linear format (because the next step can't handle L8_SRGB).
2497 * 2) Convert luminance to red. (the real hw format for luminance)
2498 */
2499 surface_format = util_format_linear(surface_format);
2500 surface_format = util_format_luminance_to_red(surface_format);
2501
2502 const struct util_format_description *desc = util_format_description(surface_format);
2503
2504 if (desc->block.bits == 128 &&
2505 (color->ui[0] != color->ui[1] ||
2506 color->ui[0] != color->ui[2]))
2507 return false;
2508
2509 *clear_words_needed = true;
2510 *reset_value = 0x20202020U;
2511
2512 /* If we want to clear without needing a fast clear eliminate step, we
2513 * can set each channel to 0 or 1 (or 0/max for integer formats). We
2514 * have two sets of flags, one for the last or first channel(extra) and
2515 * one for the other channels(main).
2516 */
2517
2518 if (surface_format == PIPE_FORMAT_R11G11B10_FLOAT ||
2519 surface_format == PIPE_FORMAT_B5G6R5_UNORM ||
2520 surface_format == PIPE_FORMAT_B5G6R5_SRGB ||
2521 util_format_is_alpha(surface_format)) {
2522 extra_channel = -1;
2523 } else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
2524 if(r600_translate_colorswap(surface_format, false) <= 1)
2525 extra_channel = desc->nr_channels - 1;
2526 else
2527 extra_channel = 0;
2528 } else
2529 return true;
2530
2531 for (i = 0; i < 4; ++i) {
2532 int index = desc->swizzle[i] - PIPE_SWIZZLE_X;
2533
2534 if (desc->swizzle[i] < PIPE_SWIZZLE_X ||
2535 desc->swizzle[i] > PIPE_SWIZZLE_W)
2536 continue;
2537
2538 if (desc->channel[i].pure_integer &&
2539 desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
2540 /* Use the maximum value for clamping the clear color. */
2541 int max = u_bit_consecutive(0, desc->channel[i].size - 1);
2542
2543 values[i] = color->i[i] != 0;
2544 if (color->i[i] != 0 && MIN2(color->i[i], max) != max)
2545 return true;
2546 } else if (desc->channel[i].pure_integer &&
2547 desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
2548 /* Use the maximum value for clamping the clear color. */
2549 unsigned max = u_bit_consecutive(0, desc->channel[i].size);
2550
2551 values[i] = color->ui[i] != 0U;
2552 if (color->ui[i] != 0U && MIN2(color->ui[i], max) != max)
2553 return true;
2554 } else {
2555 values[i] = color->f[i] != 0.0F;
2556 if (color->f[i] != 0.0F && color->f[i] != 1.0F)
2557 return true;
2558 }
2559
2560 if (index == extra_channel)
2561 extra_value = values[i];
2562 else
2563 main_value = values[i];
2564 }
2565
2566 for (int i = 0; i < 4; ++i)
2567 if (values[i] != main_value &&
2568 desc->swizzle[i] - PIPE_SWIZZLE_X != extra_channel &&
2569 desc->swizzle[i] >= PIPE_SWIZZLE_X &&
2570 desc->swizzle[i] <= PIPE_SWIZZLE_W)
2571 return true;
2572
2573 *clear_words_needed = false;
2574 if (main_value)
2575 *reset_value |= 0x80808080U;
2576
2577 if (extra_value)
2578 *reset_value |= 0x40404040U;
2579 return true;
2580 }
2581
2582 void vi_dcc_clear_level(struct r600_common_context *rctx,
2583 struct r600_texture *rtex,
2584 unsigned level, unsigned clear_value)
2585 {
2586 struct pipe_resource *dcc_buffer;
2587 uint64_t dcc_offset, clear_size;
2588
2589 assert(vi_dcc_enabled(rtex, level));
2590
2591 if (rtex->dcc_separate_buffer) {
2592 dcc_buffer = &rtex->dcc_separate_buffer->b.b;
2593 dcc_offset = 0;
2594 } else {
2595 dcc_buffer = &rtex->resource.b.b;
2596 dcc_offset = rtex->dcc_offset;
2597 }
2598
2599 if (rctx->chip_class >= GFX9) {
2600 /* Mipmap level clears aren't implemented. */
2601 assert(rtex->resource.b.b.last_level == 0);
2602 /* MSAA needs a different clear size. */
2603 assert(rtex->resource.b.b.nr_samples <= 1);
2604 clear_size = rtex->surface.dcc_size;
2605 } else {
2606 dcc_offset += rtex->surface.u.legacy.level[level].dcc_offset;
2607 clear_size = rtex->surface.u.legacy.level[level].dcc_fast_clear_size;
2608 }
2609
2610 rctx->clear_buffer(&rctx->b, dcc_buffer, dcc_offset, clear_size,
2611 clear_value, R600_COHERENCY_CB_META);
2612 }
2613
2614 /* Set the same micro tile mode as the destination of the last MSAA resolve.
2615 * This allows hitting the MSAA resolve fast path, which requires that both
2616 * src and dst micro tile modes match.
2617 */
2618 static void si_set_optimal_micro_tile_mode(struct r600_common_screen *rscreen,
2619 struct r600_texture *rtex)
2620 {
2621 if (rtex->resource.b.is_shared ||
2622 rtex->resource.b.b.nr_samples <= 1 ||
2623 rtex->surface.micro_tile_mode == rtex->last_msaa_resolve_target_micro_mode)
2624 return;
2625
2626 assert(rscreen->chip_class >= GFX9 ||
2627 rtex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
2628 assert(rtex->resource.b.b.last_level == 0);
2629
2630 if (rscreen->chip_class >= GFX9) {
2631 /* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
2632 assert(rtex->surface.u.gfx9.surf.swizzle_mode >= 4);
2633
2634 /* If you do swizzle_mode % 4, you'll get:
2635 * 0 = Depth
2636 * 1 = Standard,
2637 * 2 = Displayable
2638 * 3 = Rotated
2639 *
2640 * Depth-sample order isn't allowed:
2641 */
2642 assert(rtex->surface.u.gfx9.surf.swizzle_mode % 4 != 0);
2643
2644 switch (rtex->last_msaa_resolve_target_micro_mode) {
2645 case RADEON_MICRO_MODE_DISPLAY:
2646 rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
2647 rtex->surface.u.gfx9.surf.swizzle_mode += 2; /* D */
2648 break;
2649 case RADEON_MICRO_MODE_THIN:
2650 rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
2651 rtex->surface.u.gfx9.surf.swizzle_mode += 1; /* S */
2652 break;
2653 case RADEON_MICRO_MODE_ROTATED:
2654 rtex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
2655 rtex->surface.u.gfx9.surf.swizzle_mode += 3; /* R */
2656 break;
2657 default: /* depth */
2658 assert(!"unexpected micro mode");
2659 return;
2660 }
2661 } else if (rscreen->chip_class >= CIK) {
2662 /* These magic numbers were copied from addrlib. It doesn't use
2663 * any definitions for them either. They are all 2D_TILED_THIN1
2664 * modes with different bpp and micro tile mode.
2665 */
2666 switch (rtex->last_msaa_resolve_target_micro_mode) {
2667 case RADEON_MICRO_MODE_DISPLAY:
2668 rtex->surface.u.legacy.tiling_index[0] = 10;
2669 break;
2670 case RADEON_MICRO_MODE_THIN:
2671 rtex->surface.u.legacy.tiling_index[0] = 14;
2672 break;
2673 case RADEON_MICRO_MODE_ROTATED:
2674 rtex->surface.u.legacy.tiling_index[0] = 28;
2675 break;
2676 default: /* depth, thick */
2677 assert(!"unexpected micro mode");
2678 return;
2679 }
2680 } else { /* SI */
2681 switch (rtex->last_msaa_resolve_target_micro_mode) {
2682 case RADEON_MICRO_MODE_DISPLAY:
2683 switch (rtex->surface.bpe) {
2684 case 1:
2685 rtex->surface.u.legacy.tiling_index[0] = 10;
2686 break;
2687 case 2:
2688 rtex->surface.u.legacy.tiling_index[0] = 11;
2689 break;
2690 default: /* 4, 8 */
2691 rtex->surface.u.legacy.tiling_index[0] = 12;
2692 break;
2693 }
2694 break;
2695 case RADEON_MICRO_MODE_THIN:
2696 switch (rtex->surface.bpe) {
2697 case 1:
2698 rtex->surface.u.legacy.tiling_index[0] = 14;
2699 break;
2700 case 2:
2701 rtex->surface.u.legacy.tiling_index[0] = 15;
2702 break;
2703 case 4:
2704 rtex->surface.u.legacy.tiling_index[0] = 16;
2705 break;
2706 default: /* 8, 16 */
2707 rtex->surface.u.legacy.tiling_index[0] = 17;
2708 break;
2709 }
2710 break;
2711 default: /* depth, thick */
2712 assert(!"unexpected micro mode");
2713 return;
2714 }
2715 }
2716
2717 rtex->surface.micro_tile_mode = rtex->last_msaa_resolve_target_micro_mode;
2718
2719 p_atomic_inc(&rscreen->dirty_tex_counter);
2720 }
2721
2722 void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
2723 struct pipe_framebuffer_state *fb,
2724 struct r600_atom *fb_state,
2725 unsigned *buffers, ubyte *dirty_cbufs,
2726 const union pipe_color_union *color)
2727 {
2728 int i;
2729
2730 /* This function is broken in BE, so just disable this path for now */
2731 #ifdef PIPE_ARCH_BIG_ENDIAN
2732 return;
2733 #endif
2734
2735 if (rctx->render_cond)
2736 return;
2737
2738 for (i = 0; i < fb->nr_cbufs; i++) {
2739 struct r600_texture *tex;
2740 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
2741
2742 if (!fb->cbufs[i])
2743 continue;
2744
2745 /* if this colorbuffer is not being cleared */
2746 if (!(*buffers & clear_bit))
2747 continue;
2748
2749 tex = (struct r600_texture *)fb->cbufs[i]->texture;
2750
2751 /* the clear is allowed if all layers are bound */
2752 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
2753 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
2754 continue;
2755 }
2756
2757 /* cannot clear mipmapped textures */
2758 if (fb->cbufs[i]->texture->last_level != 0) {
2759 continue;
2760 }
2761
2762 /* only supported on tiled surfaces */
2763 if (tex->surface.is_linear) {
2764 continue;
2765 }
2766
2767 /* shared textures can't use fast clear without an explicit flush,
2768 * because there is no way to communicate the clear color among
2769 * all clients
2770 */
2771 if (tex->resource.b.is_shared &&
2772 !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
2773 continue;
2774
2775 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
2776 if (rctx->chip_class == CIK &&
2777 tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
2778 rctx->screen->info.drm_major == 2 &&
2779 rctx->screen->info.drm_minor < 38) {
2780 continue;
2781 }
2782
2783 /* Fast clear is the most appropriate place to enable DCC for
2784 * displayable surfaces.
2785 */
2786 if (rctx->chip_class >= VI &&
2787 !(rctx->screen->debug_flags & DBG_NO_DCC_FB)) {
2788 vi_separate_dcc_try_enable(rctx, tex);
2789
2790 /* RB+ isn't supported with a CMASK clear only on Stoney,
2791 * so all clears are considered to be hypothetically slow
2792 * clears, which is weighed when determining whether to
2793 * enable separate DCC.
2794 */
2795 if (tex->dcc_gather_statistics &&
2796 rctx->family == CHIP_STONEY)
2797 tex->num_slow_clears++;
2798 }
2799
2800 /* Try to clear DCC first, otherwise try CMASK. */
2801 if (vi_dcc_enabled(tex, 0)) {
2802 uint32_t reset_value;
2803 bool clear_words_needed;
2804
2805 if (rctx->screen->debug_flags & DBG_NO_DCC_CLEAR)
2806 continue;
2807
2808 if (!vi_get_fast_clear_parameters(fb->cbufs[i]->format,
2809 color, &reset_value,
2810 &clear_words_needed))
2811 continue;
2812
2813 vi_dcc_clear_level(rctx, tex, 0, reset_value);
2814
2815 unsigned level_bit = 1 << fb->cbufs[i]->u.tex.level;
2816 if (clear_words_needed) {
2817 bool need_compressed_update = !tex->dirty_level_mask;
2818
2819 tex->dirty_level_mask |= level_bit;
2820
2821 if (need_compressed_update)
2822 p_atomic_inc(&rctx->screen->compressed_colortex_counter);
2823 }
2824 tex->separate_dcc_dirty = true;
2825 } else {
2826 /* 128-bit formats are unusupported */
2827 if (tex->surface.bpe > 8) {
2828 continue;
2829 }
2830
2831 /* RB+ doesn't work with CMASK fast clear on Stoney. */
2832 if (rctx->family == CHIP_STONEY)
2833 continue;
2834
2835 /* ensure CMASK is enabled */
2836 r600_texture_alloc_cmask_separate(rctx->screen, tex);
2837 if (tex->cmask.size == 0) {
2838 continue;
2839 }
2840
2841 /* Do the fast clear. */
2842 rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,
2843 tex->cmask.offset, tex->cmask.size, 0,
2844 R600_COHERENCY_CB_META);
2845
2846 bool need_compressed_update = !tex->dirty_level_mask;
2847
2848 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
2849
2850 if (need_compressed_update)
2851 p_atomic_inc(&rctx->screen->compressed_colortex_counter);
2852 }
2853
2854 /* We can change the micro tile mode before a full clear. */
2855 if (rctx->screen->chip_class >= SI)
2856 si_set_optimal_micro_tile_mode(rctx->screen, tex);
2857
2858 evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);
2859
2860 if (dirty_cbufs)
2861 *dirty_cbufs |= 1 << i;
2862 rctx->set_atom_dirty(rctx, fb_state, true);
2863 *buffers &= ~clear_bit;
2864 }
2865 }
2866
2867 void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
2868 {
2869 rscreen->b.resource_from_handle = r600_texture_from_handle;
2870 rscreen->b.resource_get_handle = r600_texture_get_handle;
2871 }
2872
2873 void r600_init_context_texture_functions(struct r600_common_context *rctx)
2874 {
2875 rctx->b.create_surface = r600_create_surface;
2876 rctx->b.surface_destroy = r600_surface_destroy;
2877 rctx->b.clear_texture = r600_clear_texture;
2878 }