r600g: Implement spilling of temp arrays (v2)
[mesa.git] / src / gallium / drivers / r600 / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include "r600_pipe_common.h"
28 #include "r600_cs.h"
29 #include "r600_query.h"
30 #include "util/u_format.h"
31 #include "util/u_log.h"
32 #include "util/u_memory.h"
33 #include "util/u_pack_color.h"
34 #include "util/u_surface.h"
35 #include "util/os_time.h"
36 #include <errno.h>
37 #include <inttypes.h>
38
39 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
40 struct r600_texture *rtex);
41 static enum radeon_surf_mode
42 r600_choose_tiling(struct r600_common_screen *rscreen,
43 const struct pipe_resource *templ);
44
45
46 bool r600_prepare_for_dma_blit(struct r600_common_context *rctx,
47 struct r600_texture *rdst,
48 unsigned dst_level, unsigned dstx,
49 unsigned dsty, unsigned dstz,
50 struct r600_texture *rsrc,
51 unsigned src_level,
52 const struct pipe_box *src_box)
53 {
54 if (!rctx->dma.cs)
55 return false;
56
57 if (rdst->surface.bpe != rsrc->surface.bpe)
58 return false;
59
60 /* MSAA: Blits don't exist in the real world. */
61 if (rsrc->resource.b.b.nr_samples > 1 ||
62 rdst->resource.b.b.nr_samples > 1)
63 return false;
64
65 /* Depth-stencil surfaces:
66 * When dst is linear, the DB->CB copy preserves HTILE.
67 * When dst is tiled, the 3D path must be used to update HTILE.
68 */
69 if (rsrc->is_depth || rdst->is_depth)
70 return false;
71
72 /* CMASK as:
73 * src: Both texture and SDMA paths need decompression. Use SDMA.
74 * dst: If overwriting the whole texture, discard CMASK and use
75 * SDMA. Otherwise, use the 3D path.
76 */
77 if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
78 /* The CMASK clear is only enabled for the first level. */
79 assert(dst_level == 0);
80 if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
81 dstx, dsty, dstz, src_box->width,
82 src_box->height, src_box->depth))
83 return false;
84
85 r600_texture_discard_cmask(rctx->screen, rdst);
86 }
87
88 /* All requirements are met. Prepare textures for SDMA. */
89 if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
90 rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b);
91
92 assert(!(rsrc->dirty_level_mask & (1 << src_level)));
93 assert(!(rdst->dirty_level_mask & (1 << dst_level)));
94
95 return true;
96 }
97
98 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
99 static void r600_copy_region_with_blit(struct pipe_context *pipe,
100 struct pipe_resource *dst,
101 unsigned dst_level,
102 unsigned dstx, unsigned dsty, unsigned dstz,
103 struct pipe_resource *src,
104 unsigned src_level,
105 const struct pipe_box *src_box)
106 {
107 struct pipe_blit_info blit;
108
109 memset(&blit, 0, sizeof(blit));
110 blit.src.resource = src;
111 blit.src.format = src->format;
112 blit.src.level = src_level;
113 blit.src.box = *src_box;
114 blit.dst.resource = dst;
115 blit.dst.format = dst->format;
116 blit.dst.level = dst_level;
117 blit.dst.box.x = dstx;
118 blit.dst.box.y = dsty;
119 blit.dst.box.z = dstz;
120 blit.dst.box.width = src_box->width;
121 blit.dst.box.height = src_box->height;
122 blit.dst.box.depth = src_box->depth;
123 blit.mask = util_format_get_mask(src->format) &
124 util_format_get_mask(dst->format);
125 blit.filter = PIPE_TEX_FILTER_NEAREST;
126
127 if (blit.mask) {
128 pipe->blit(pipe, &blit);
129 }
130 }
131
132 /* Copy from a full GPU texture to a transfer's staging one. */
133 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
134 {
135 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
136 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
137 struct pipe_resource *dst = &rtransfer->staging->b.b;
138 struct pipe_resource *src = transfer->resource;
139
140 if (src->nr_samples > 1) {
141 r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
142 src, transfer->level, &transfer->box);
143 return;
144 }
145
146 rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
147 &transfer->box);
148 }
149
150 /* Copy from a transfer's staging texture to a full GPU one. */
151 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
152 {
153 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
154 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
155 struct pipe_resource *dst = transfer->resource;
156 struct pipe_resource *src = &rtransfer->staging->b.b;
157 struct pipe_box sbox;
158
159 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
160
161 if (dst->nr_samples > 1) {
162 r600_copy_region_with_blit(ctx, dst, transfer->level,
163 transfer->box.x, transfer->box.y, transfer->box.z,
164 src, 0, &sbox);
165 return;
166 }
167
168 rctx->dma_copy(ctx, dst, transfer->level,
169 transfer->box.x, transfer->box.y, transfer->box.z,
170 src, 0, &sbox);
171 }
172
173 static unsigned r600_texture_get_offset(struct r600_common_screen *rscreen,
174 struct r600_texture *rtex, unsigned level,
175 const struct pipe_box *box,
176 unsigned *stride,
177 unsigned *layer_stride)
178 {
179 *stride = rtex->surface.u.legacy.level[level].nblk_x *
180 rtex->surface.bpe;
181 assert((uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX);
182 *layer_stride = (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4;
183
184 if (!box)
185 return rtex->surface.u.legacy.level[level].offset;
186
187 /* Each texture is an array of mipmap levels. Each level is
188 * an array of slices. */
189 return rtex->surface.u.legacy.level[level].offset +
190 box->z * (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 +
191 (box->y / rtex->surface.blk_h *
192 rtex->surface.u.legacy.level[level].nblk_x +
193 box->x / rtex->surface.blk_w) * rtex->surface.bpe;
194 }
195
196 static int r600_init_surface(struct r600_common_screen *rscreen,
197 struct radeon_surf *surface,
198 const struct pipe_resource *ptex,
199 enum radeon_surf_mode array_mode,
200 unsigned pitch_in_bytes_override,
201 unsigned offset,
202 bool is_imported,
203 bool is_scanout,
204 bool is_flushed_depth)
205 {
206 const struct util_format_description *desc =
207 util_format_description(ptex->format);
208 bool is_depth, is_stencil;
209 int r;
210 unsigned i, bpe, flags = 0;
211
212 is_depth = util_format_has_depth(desc);
213 is_stencil = util_format_has_stencil(desc);
214
215 if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&
216 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
217 bpe = 4; /* stencil is allocated separately on evergreen */
218 } else {
219 bpe = util_format_get_blocksize(ptex->format);
220 assert(util_is_power_of_two(bpe));
221 }
222
223 if (!is_flushed_depth && is_depth) {
224 flags |= RADEON_SURF_ZBUFFER;
225
226 if (is_stencil)
227 flags |= RADEON_SURF_SBUFFER;
228 }
229
230 if (ptex->bind & PIPE_BIND_SCANOUT || is_scanout) {
231 /* This should catch bugs in gallium users setting incorrect flags. */
232 assert(ptex->nr_samples <= 1 &&
233 ptex->array_size == 1 &&
234 ptex->depth0 == 1 &&
235 ptex->last_level == 0 &&
236 !(flags & RADEON_SURF_Z_OR_SBUFFER));
237
238 flags |= RADEON_SURF_SCANOUT;
239 }
240
241 if (ptex->bind & PIPE_BIND_SHARED)
242 flags |= RADEON_SURF_SHAREABLE;
243 if (is_imported)
244 flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE;
245 if (!(ptex->flags & R600_RESOURCE_FLAG_FORCE_TILING))
246 flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE;
247
248 r = rscreen->ws->surface_init(rscreen->ws, ptex, flags, bpe,
249 array_mode, surface);
250 if (r) {
251 return r;
252 }
253
254 if (pitch_in_bytes_override &&
255 pitch_in_bytes_override != surface->u.legacy.level[0].nblk_x * bpe) {
256 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
257 * for those
258 */
259 surface->u.legacy.level[0].nblk_x = pitch_in_bytes_override / bpe;
260 surface->u.legacy.level[0].slice_size_dw =
261 ((uint64_t)pitch_in_bytes_override * surface->u.legacy.level[0].nblk_y) / 4;
262 }
263
264 if (offset) {
265 for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i)
266 surface->u.legacy.level[i].offset += offset;
267 }
268
269 return 0;
270 }
271
272 static void r600_texture_init_metadata(struct r600_common_screen *rscreen,
273 struct r600_texture *rtex,
274 struct radeon_bo_metadata *metadata)
275 {
276 struct radeon_surf *surface = &rtex->surface;
277
278 memset(metadata, 0, sizeof(*metadata));
279
280 metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ?
281 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
282 metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ?
283 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
284 metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config;
285 metadata->u.legacy.bankw = surface->u.legacy.bankw;
286 metadata->u.legacy.bankh = surface->u.legacy.bankh;
287 metadata->u.legacy.tile_split = surface->u.legacy.tile_split;
288 metadata->u.legacy.mtilea = surface->u.legacy.mtilea;
289 metadata->u.legacy.num_banks = surface->u.legacy.num_banks;
290 metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe;
291 metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
292 }
293
294 static void r600_surface_import_metadata(struct r600_common_screen *rscreen,
295 struct radeon_surf *surf,
296 struct radeon_bo_metadata *metadata,
297 enum radeon_surf_mode *array_mode,
298 bool *is_scanout)
299 {
300 surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config;
301 surf->u.legacy.bankw = metadata->u.legacy.bankw;
302 surf->u.legacy.bankh = metadata->u.legacy.bankh;
303 surf->u.legacy.tile_split = metadata->u.legacy.tile_split;
304 surf->u.legacy.mtilea = metadata->u.legacy.mtilea;
305 surf->u.legacy.num_banks = metadata->u.legacy.num_banks;
306
307 if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED)
308 *array_mode = RADEON_SURF_MODE_2D;
309 else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED)
310 *array_mode = RADEON_SURF_MODE_1D;
311 else
312 *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
313
314 *is_scanout = metadata->u.legacy.scanout;
315 }
316
317 static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx,
318 struct r600_texture *rtex)
319 {
320 struct r600_common_screen *rscreen = rctx->screen;
321 struct pipe_context *ctx = &rctx->b;
322
323 if (ctx == rscreen->aux_context)
324 mtx_lock(&rscreen->aux_context_lock);
325
326 ctx->flush_resource(ctx, &rtex->resource.b.b);
327 ctx->flush(ctx, NULL, 0);
328
329 if (ctx == rscreen->aux_context)
330 mtx_unlock(&rscreen->aux_context_lock);
331 }
332
333 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
334 struct r600_texture *rtex)
335 {
336 if (!rtex->cmask.size)
337 return;
338
339 assert(rtex->resource.b.b.nr_samples <= 1);
340
341 /* Disable CMASK. */
342 memset(&rtex->cmask, 0, sizeof(rtex->cmask));
343 rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
344 rtex->dirty_level_mask = 0;
345
346 rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);
347
348 if (rtex->cmask_buffer != &rtex->resource)
349 r600_resource_reference(&rtex->cmask_buffer, NULL);
350
351 /* Notify all contexts about the change. */
352 p_atomic_inc(&rscreen->dirty_tex_counter);
353 p_atomic_inc(&rscreen->compressed_colortex_counter);
354 }
355
356 static void r600_reallocate_texture_inplace(struct r600_common_context *rctx,
357 struct r600_texture *rtex,
358 unsigned new_bind_flag,
359 bool invalidate_storage)
360 {
361 struct pipe_screen *screen = rctx->b.screen;
362 struct r600_texture *new_tex;
363 struct pipe_resource templ = rtex->resource.b.b;
364 unsigned i;
365
366 templ.bind |= new_bind_flag;
367
368 /* r600g doesn't react to dirty_tex_descriptor_counter */
369 if (rctx->chip_class < SI)
370 return;
371
372 if (rtex->resource.b.is_shared)
373 return;
374
375 if (new_bind_flag == PIPE_BIND_LINEAR) {
376 if (rtex->surface.is_linear)
377 return;
378
379 /* This fails with MSAA, depth, and compressed textures. */
380 if (r600_choose_tiling(rctx->screen, &templ) !=
381 RADEON_SURF_MODE_LINEAR_ALIGNED)
382 return;
383 }
384
385 new_tex = (struct r600_texture*)screen->resource_create(screen, &templ);
386 if (!new_tex)
387 return;
388
389 /* Copy the pixels to the new texture. */
390 if (!invalidate_storage) {
391 for (i = 0; i <= templ.last_level; i++) {
392 struct pipe_box box;
393
394 u_box_3d(0, 0, 0,
395 u_minify(templ.width0, i), u_minify(templ.height0, i),
396 util_num_layers(&templ, i), &box);
397
398 rctx->dma_copy(&rctx->b, &new_tex->resource.b.b, i, 0, 0, 0,
399 &rtex->resource.b.b, i, &box);
400 }
401 }
402
403 if (new_bind_flag == PIPE_BIND_LINEAR) {
404 r600_texture_discard_cmask(rctx->screen, rtex);
405 }
406
407 /* Replace the structure fields of rtex. */
408 rtex->resource.b.b.bind = templ.bind;
409 pb_reference(&rtex->resource.buf, new_tex->resource.buf);
410 rtex->resource.gpu_address = new_tex->resource.gpu_address;
411 rtex->resource.vram_usage = new_tex->resource.vram_usage;
412 rtex->resource.gart_usage = new_tex->resource.gart_usage;
413 rtex->resource.bo_size = new_tex->resource.bo_size;
414 rtex->resource.bo_alignment = new_tex->resource.bo_alignment;
415 rtex->resource.domains = new_tex->resource.domains;
416 rtex->resource.flags = new_tex->resource.flags;
417 rtex->size = new_tex->size;
418 rtex->db_render_format = new_tex->db_render_format;
419 rtex->db_compatible = new_tex->db_compatible;
420 rtex->can_sample_z = new_tex->can_sample_z;
421 rtex->can_sample_s = new_tex->can_sample_s;
422 rtex->surface = new_tex->surface;
423 rtex->fmask = new_tex->fmask;
424 rtex->cmask = new_tex->cmask;
425 rtex->cb_color_info = new_tex->cb_color_info;
426 rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode;
427 rtex->htile_offset = new_tex->htile_offset;
428 rtex->depth_cleared = new_tex->depth_cleared;
429 rtex->stencil_cleared = new_tex->stencil_cleared;
430 rtex->non_disp_tiling = new_tex->non_disp_tiling;
431 rtex->framebuffers_bound = new_tex->framebuffers_bound;
432
433 if (new_bind_flag == PIPE_BIND_LINEAR) {
434 assert(!rtex->htile_offset);
435 assert(!rtex->cmask.size);
436 assert(!rtex->fmask.size);
437 assert(!rtex->is_depth);
438 }
439
440 r600_texture_reference(&new_tex, NULL);
441
442 p_atomic_inc(&rctx->screen->dirty_tex_counter);
443 }
444
445 static boolean r600_texture_get_handle(struct pipe_screen* screen,
446 struct pipe_context *ctx,
447 struct pipe_resource *resource,
448 struct winsys_handle *whandle,
449 unsigned usage)
450 {
451 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
452 struct r600_common_context *rctx;
453 struct r600_resource *res = (struct r600_resource*)resource;
454 struct r600_texture *rtex = (struct r600_texture*)resource;
455 struct radeon_bo_metadata metadata;
456 bool update_metadata = false;
457 unsigned stride, offset, slice_size;
458
459 ctx = threaded_context_unwrap_sync(ctx);
460 rctx = (struct r600_common_context*)(ctx ? ctx : rscreen->aux_context);
461
462 if (resource->target != PIPE_BUFFER) {
463 /* This is not supported now, but it might be required for OpenCL
464 * interop in the future.
465 */
466 if (resource->nr_samples > 1 || rtex->is_depth)
467 return false;
468
469 /* Move a suballocated texture into a non-suballocated allocation. */
470 if (rscreen->ws->buffer_is_suballocated(res->buf) ||
471 rtex->surface.tile_swizzle) {
472 assert(!res->b.is_shared);
473 r600_reallocate_texture_inplace(rctx, rtex,
474 PIPE_BIND_SHARED, false);
475 rctx->b.flush(&rctx->b, NULL, 0);
476 assert(res->b.b.bind & PIPE_BIND_SHARED);
477 assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
478 assert(rtex->surface.tile_swizzle == 0);
479 }
480
481 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
482 rtex->cmask.size) {
483 /* Eliminate fast clear (CMASK) */
484 r600_eliminate_fast_color_clear(rctx, rtex);
485
486 /* Disable CMASK if flush_resource isn't going
487 * to be called.
488 */
489 if (rtex->cmask.size)
490 r600_texture_discard_cmask(rscreen, rtex);
491 }
492
493 /* Set metadata. */
494 if (!res->b.is_shared || update_metadata) {
495 r600_texture_init_metadata(rscreen, rtex, &metadata);
496 if (rscreen->query_opaque_metadata)
497 rscreen->query_opaque_metadata(rscreen, rtex,
498 &metadata);
499
500 rscreen->ws->buffer_set_metadata(res->buf, &metadata);
501 }
502
503 offset = rtex->surface.u.legacy.level[0].offset;
504 stride = rtex->surface.u.legacy.level[0].nblk_x *
505 rtex->surface.bpe;
506 slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4;
507 } else {
508 /* Move a suballocated buffer into a non-suballocated allocation. */
509 if (rscreen->ws->buffer_is_suballocated(res->buf)) {
510 assert(!res->b.is_shared);
511
512 /* Allocate a new buffer with PIPE_BIND_SHARED. */
513 struct pipe_resource templ = res->b.b;
514 templ.bind |= PIPE_BIND_SHARED;
515
516 struct pipe_resource *newb =
517 screen->resource_create(screen, &templ);
518 if (!newb)
519 return false;
520
521 /* Copy the old buffer contents to the new one. */
522 struct pipe_box box;
523 u_box_1d(0, newb->width0, &box);
524 rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0,
525 &res->b.b, 0, &box);
526 /* Move the new buffer storage to the old pipe_resource. */
527 r600_replace_buffer_storage(&rctx->b, &res->b.b, newb);
528 pipe_resource_reference(&newb, NULL);
529
530 assert(res->b.b.bind & PIPE_BIND_SHARED);
531 assert(res->flags & RADEON_FLAG_NO_SUBALLOC);
532 }
533
534 /* Buffers */
535 offset = 0;
536 stride = 0;
537 slice_size = 0;
538 }
539
540 if (res->b.is_shared) {
541 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
542 * doesn't set it.
543 */
544 res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
545 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
546 res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
547 } else {
548 res->b.is_shared = true;
549 res->external_usage = usage;
550 }
551
552 return rscreen->ws->buffer_get_handle(res->buf, stride, offset,
553 slice_size, whandle);
554 }
555
556 static void r600_texture_destroy(struct pipe_screen *screen,
557 struct pipe_resource *ptex)
558 {
559 struct r600_texture *rtex = (struct r600_texture*)ptex;
560 struct r600_resource *resource = &rtex->resource;
561
562 r600_texture_reference(&rtex->flushed_depth_texture, NULL);
563 pipe_resource_reference((struct pipe_resource**)&resource->immed_buffer, NULL);
564
565 if (rtex->cmask_buffer != &rtex->resource) {
566 r600_resource_reference(&rtex->cmask_buffer, NULL);
567 }
568 pb_reference(&resource->buf, NULL);
569 FREE(rtex);
570 }
571
572 static const struct u_resource_vtbl r600_texture_vtbl;
573
574 /* The number of samples can be specified independently of the texture. */
575 void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
576 struct r600_texture *rtex,
577 unsigned nr_samples,
578 struct r600_fmask_info *out)
579 {
580 /* FMASK is allocated like an ordinary texture. */
581 struct pipe_resource templ = rtex->resource.b.b;
582 struct radeon_surf fmask = {};
583 unsigned flags, bpe;
584
585 memset(out, 0, sizeof(*out));
586
587 templ.nr_samples = 1;
588 flags = rtex->surface.flags | RADEON_SURF_FMASK;
589
590 /* Use the same parameters and tile mode. */
591 fmask.u.legacy.bankw = rtex->surface.u.legacy.bankw;
592 fmask.u.legacy.bankh = rtex->surface.u.legacy.bankh;
593 fmask.u.legacy.mtilea = rtex->surface.u.legacy.mtilea;
594 fmask.u.legacy.tile_split = rtex->surface.u.legacy.tile_split;
595
596 if (nr_samples <= 4)
597 fmask.u.legacy.bankh = 4;
598
599 switch (nr_samples) {
600 case 2:
601 case 4:
602 bpe = 1;
603 break;
604 case 8:
605 bpe = 4;
606 break;
607 default:
608 R600_ERR("Invalid sample count for FMASK allocation.\n");
609 return;
610 }
611
612 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
613 * This can be fixed by writing a separate FMASK allocator specifically
614 * for R600-R700 asics. */
615 if (rscreen->chip_class <= R700) {
616 bpe *= 2;
617 }
618
619 if (rscreen->ws->surface_init(rscreen->ws, &templ, flags, bpe,
620 RADEON_SURF_MODE_2D, &fmask)) {
621 R600_ERR("Got error in surface_init while allocating FMASK.\n");
622 return;
623 }
624
625 assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
626
627 out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64;
628 if (out->slice_tile_max)
629 out->slice_tile_max -= 1;
630
631 out->tile_mode_index = fmask.u.legacy.tiling_index[0];
632 out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x;
633 out->bank_height = fmask.u.legacy.bankh;
634 out->tile_swizzle = fmask.tile_swizzle;
635 out->alignment = MAX2(256, fmask.surf_alignment);
636 out->size = fmask.surf_size;
637 }
638
639 static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,
640 struct r600_texture *rtex)
641 {
642 r600_texture_get_fmask_info(rscreen, rtex,
643 rtex->resource.b.b.nr_samples, &rtex->fmask);
644
645 rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
646 rtex->size = rtex->fmask.offset + rtex->fmask.size;
647 }
648
649 void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
650 struct r600_texture *rtex,
651 struct r600_cmask_info *out)
652 {
653 unsigned cmask_tile_width = 8;
654 unsigned cmask_tile_height = 8;
655 unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
656 unsigned element_bits = 4;
657 unsigned cmask_cache_bits = 1024;
658 unsigned num_pipes = rscreen->info.num_tile_pipes;
659 unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
660
661 unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
662 unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
663 unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
664 unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
665 unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
666
667 unsigned pitch_elements = align(rtex->resource.b.b.width0, macro_tile_width);
668 unsigned height = align(rtex->resource.b.b.height0, macro_tile_height);
669
670 unsigned base_align = num_pipes * pipe_interleave_bytes;
671 unsigned slice_bytes =
672 ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
673
674 assert(macro_tile_width % 128 == 0);
675 assert(macro_tile_height % 128 == 0);
676
677 out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
678 out->alignment = MAX2(256, base_align);
679 out->size = util_num_layers(&rtex->resource.b.b, 0) *
680 align(slice_bytes, base_align);
681 }
682
683 static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,
684 struct r600_texture *rtex)
685 {
686 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
687
688 rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
689 rtex->size = rtex->cmask.offset + rtex->cmask.size;
690
691 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
692 }
693
694 static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
695 struct r600_texture *rtex)
696 {
697 if (rtex->cmask_buffer)
698 return;
699
700 assert(rtex->cmask.size == 0);
701
702 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
703
704 rtex->cmask_buffer = (struct r600_resource *)
705 r600_aligned_buffer_create(&rscreen->b,
706 R600_RESOURCE_FLAG_UNMAPPABLE,
707 PIPE_USAGE_DEFAULT,
708 rtex->cmask.size,
709 rtex->cmask.alignment);
710 if (rtex->cmask_buffer == NULL) {
711 rtex->cmask.size = 0;
712 return;
713 }
714
715 /* update colorbuffer state bits */
716 rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
717
718 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
719
720 p_atomic_inc(&rscreen->compressed_colortex_counter);
721 }
722
723 void eg_resource_alloc_immed(struct r600_common_screen *rscreen,
724 struct r600_resource *res,
725 unsigned immed_size)
726 {
727 res->immed_buffer = (struct r600_resource *)
728 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
729 PIPE_USAGE_DEFAULT, immed_size);
730 }
731
732 static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
733 struct r600_texture *rtex)
734 {
735 unsigned cl_width, cl_height, width, height;
736 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
737 unsigned num_pipes = rscreen->info.num_tile_pipes;
738
739 rtex->surface.htile_size = 0;
740
741 if (rscreen->chip_class <= EVERGREEN &&
742 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26)
743 return;
744
745 /* HW bug on R6xx. */
746 if (rscreen->chip_class == R600 &&
747 (rtex->resource.b.b.width0 > 7680 ||
748 rtex->resource.b.b.height0 > 7680))
749 return;
750
751 switch (num_pipes) {
752 case 1:
753 cl_width = 32;
754 cl_height = 16;
755 break;
756 case 2:
757 cl_width = 32;
758 cl_height = 32;
759 break;
760 case 4:
761 cl_width = 64;
762 cl_height = 32;
763 break;
764 case 8:
765 cl_width = 64;
766 cl_height = 64;
767 break;
768 case 16:
769 cl_width = 128;
770 cl_height = 64;
771 break;
772 default:
773 assert(0);
774 return;
775 }
776
777 width = align(rtex->resource.b.b.width0, cl_width * 8);
778 height = align(rtex->resource.b.b.height0, cl_height * 8);
779
780 slice_elements = (width * height) / (8 * 8);
781 slice_bytes = slice_elements * 4;
782
783 pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
784 base_align = num_pipes * pipe_interleave_bytes;
785
786 rtex->surface.htile_alignment = base_align;
787 rtex->surface.htile_size =
788 util_num_layers(&rtex->resource.b.b, 0) *
789 align(slice_bytes, base_align);
790 }
791
792 static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
793 struct r600_texture *rtex)
794 {
795 r600_texture_get_htile_size(rscreen, rtex);
796
797 if (!rtex->surface.htile_size)
798 return;
799
800 rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment);
801 rtex->size = rtex->htile_offset + rtex->surface.htile_size;
802 }
803
804 void r600_print_texture_info(struct r600_common_screen *rscreen,
805 struct r600_texture *rtex, struct u_log_context *log)
806 {
807 int i;
808
809 /* Common parameters. */
810 u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
811 "blk_h=%u, array_size=%u, last_level=%u, "
812 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
813 rtex->resource.b.b.width0, rtex->resource.b.b.height0,
814 rtex->resource.b.b.depth0, rtex->surface.blk_w,
815 rtex->surface.blk_h,
816 rtex->resource.b.b.array_size, rtex->resource.b.b.last_level,
817 rtex->surface.bpe, rtex->resource.b.b.nr_samples,
818 rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
819
820 u_log_printf(log, " Layout: size=%"PRIu64", alignment=%u, bankw=%u, "
821 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
822 rtex->surface.surf_size, rtex->surface.surf_alignment, rtex->surface.u.legacy.bankw,
823 rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea,
824 rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config,
825 (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
826
827 if (rtex->fmask.size)
828 u_log_printf(log, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
829 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
830 rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
831 rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
832 rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
833
834 if (rtex->cmask.size)
835 u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
836 "slice_tile_max=%u\n",
837 rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
838 rtex->cmask.slice_tile_max);
839
840 if (rtex->htile_offset)
841 u_log_printf(log, " HTile: offset=%"PRIu64", size=%u "
842 "alignment=%u\n",
843 rtex->htile_offset, rtex->surface.htile_size,
844 rtex->surface.htile_alignment);
845
846 for (i = 0; i <= rtex->resource.b.b.last_level; i++)
847 u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
848 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
849 "mode=%u, tiling_index = %u\n",
850 i, rtex->surface.u.legacy.level[i].offset,
851 (uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4,
852 u_minify(rtex->resource.b.b.width0, i),
853 u_minify(rtex->resource.b.b.height0, i),
854 u_minify(rtex->resource.b.b.depth0, i),
855 rtex->surface.u.legacy.level[i].nblk_x,
856 rtex->surface.u.legacy.level[i].nblk_y,
857 rtex->surface.u.legacy.level[i].mode,
858 rtex->surface.u.legacy.tiling_index[i]);
859
860 if (rtex->surface.has_stencil) {
861 u_log_printf(log, " StencilLayout: tilesplit=%u\n",
862 rtex->surface.u.legacy.stencil_tile_split);
863 for (i = 0; i <= rtex->resource.b.b.last_level; i++) {
864 u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", "
865 "slice_size=%"PRIu64", npix_x=%u, "
866 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
867 "mode=%u, tiling_index = %u\n",
868 i, rtex->surface.u.legacy.stencil_level[i].offset,
869 (uint64_t)rtex->surface.u.legacy.stencil_level[i].slice_size_dw * 4,
870 u_minify(rtex->resource.b.b.width0, i),
871 u_minify(rtex->resource.b.b.height0, i),
872 u_minify(rtex->resource.b.b.depth0, i),
873 rtex->surface.u.legacy.stencil_level[i].nblk_x,
874 rtex->surface.u.legacy.stencil_level[i].nblk_y,
875 rtex->surface.u.legacy.stencil_level[i].mode,
876 rtex->surface.u.legacy.stencil_tiling_index[i]);
877 }
878 }
879 }
880
881 /* Common processing for r600_texture_create and r600_texture_from_handle */
882 static struct r600_texture *
883 r600_texture_create_object(struct pipe_screen *screen,
884 const struct pipe_resource *base,
885 struct pb_buffer *buf,
886 struct radeon_surf *surface)
887 {
888 struct r600_texture *rtex;
889 struct r600_resource *resource;
890 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
891
892 rtex = CALLOC_STRUCT(r600_texture);
893 if (!rtex)
894 return NULL;
895
896 resource = &rtex->resource;
897 resource->b.b = *base;
898 resource->b.b.next = NULL;
899 resource->b.vtbl = &r600_texture_vtbl;
900 pipe_reference_init(&resource->b.b.reference, 1);
901 resource->b.b.screen = screen;
902
903 /* don't include stencil-only formats which we don't support for rendering */
904 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
905
906 rtex->surface = *surface;
907 rtex->size = rtex->surface.surf_size;
908 rtex->db_render_format = base->format;
909
910 /* Tiled depth textures utilize the non-displayable tile order.
911 * This must be done after r600_setup_surface.
912 * Applies to R600-Cayman. */
913 rtex->non_disp_tiling = rtex->is_depth && rtex->surface.u.legacy.level[0].mode >= RADEON_SURF_MODE_1D;
914 /* Applies to GCN. */
915 rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode;
916
917 if (rtex->is_depth) {
918 if (base->flags & (R600_RESOURCE_FLAG_TRANSFER |
919 R600_RESOURCE_FLAG_FLUSHED_DEPTH) ||
920 rscreen->chip_class >= EVERGREEN) {
921 rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted;
922 rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted;
923 } else {
924 if (rtex->resource.b.b.nr_samples <= 1 &&
925 (rtex->resource.b.b.format == PIPE_FORMAT_Z16_UNORM ||
926 rtex->resource.b.b.format == PIPE_FORMAT_Z32_FLOAT))
927 rtex->can_sample_z = true;
928 }
929
930 if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
931 R600_RESOURCE_FLAG_FLUSHED_DEPTH))) {
932 rtex->db_compatible = true;
933
934 if (!(rscreen->debug_flags & DBG_NO_HYPERZ))
935 r600_texture_allocate_htile(rscreen, rtex);
936 }
937 } else {
938 if (base->nr_samples > 1) {
939 if (!buf) {
940 r600_texture_allocate_fmask(rscreen, rtex);
941 r600_texture_allocate_cmask(rscreen, rtex);
942 rtex->cmask_buffer = &rtex->resource;
943 }
944 if (!rtex->fmask.size || !rtex->cmask.size) {
945 FREE(rtex);
946 return NULL;
947 }
948 }
949 }
950
951 /* Now create the backing buffer. */
952 if (!buf) {
953 r600_init_resource_fields(rscreen, resource, rtex->size,
954 rtex->surface.surf_alignment);
955
956 /* Displayable surfaces are not suballocated. */
957 if (resource->b.b.bind & PIPE_BIND_SCANOUT)
958 resource->flags |= RADEON_FLAG_NO_SUBALLOC;
959
960 if (!r600_alloc_resource(rscreen, resource)) {
961 FREE(rtex);
962 return NULL;
963 }
964 } else {
965 resource->buf = buf;
966 resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf);
967 resource->bo_size = buf->size;
968 resource->bo_alignment = buf->alignment;
969 resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf);
970 if (resource->domains & RADEON_DOMAIN_VRAM)
971 resource->vram_usage = buf->size;
972 else if (resource->domains & RADEON_DOMAIN_GTT)
973 resource->gart_usage = buf->size;
974 }
975
976 if (rtex->cmask.size) {
977 /* Initialize the cmask to 0xCC (= compressed state). */
978 r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
979 rtex->cmask.offset, rtex->cmask.size,
980 0xCCCCCCCC);
981 }
982 if (rtex->htile_offset) {
983 uint32_t clear_value = 0;
984
985 r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
986 rtex->htile_offset,
987 rtex->surface.htile_size,
988 clear_value);
989 }
990
991 /* Initialize the CMASK base register value. */
992 rtex->cmask.base_address_reg =
993 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
994
995 if (rscreen->debug_flags & DBG_VM) {
996 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
997 rtex->resource.gpu_address,
998 rtex->resource.gpu_address + rtex->resource.buf->size,
999 base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
1000 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
1001 }
1002
1003 if (rscreen->debug_flags & DBG_TEX) {
1004 puts("Texture:");
1005 struct u_log_context log;
1006 u_log_context_init(&log);
1007 r600_print_texture_info(rscreen, rtex, &log);
1008 u_log_new_page_print(&log, stdout);
1009 fflush(stdout);
1010 u_log_context_destroy(&log);
1011 }
1012
1013 return rtex;
1014 }
1015
1016 static enum radeon_surf_mode
1017 r600_choose_tiling(struct r600_common_screen *rscreen,
1018 const struct pipe_resource *templ)
1019 {
1020 const struct util_format_description *desc = util_format_description(templ->format);
1021 bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
1022 bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) &&
1023 !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
1024
1025 /* MSAA resources must be 2D tiled. */
1026 if (templ->nr_samples > 1)
1027 return RADEON_SURF_MODE_2D;
1028
1029 /* Transfer resources should be linear. */
1030 if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
1031 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1032
1033 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
1034 if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&
1035 (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&
1036 (templ->target == PIPE_TEXTURE_2D ||
1037 templ->target == PIPE_TEXTURE_3D))
1038 force_tiling = true;
1039
1040 /* Handle common candidates for the linear mode.
1041 * Compressed textures and DB surfaces must always be tiled.
1042 */
1043 if (!force_tiling &&
1044 !is_depth_stencil &&
1045 !util_format_is_compressed(templ->format)) {
1046 if (rscreen->debug_flags & DBG_NO_TILING)
1047 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1048
1049 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1050 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
1051 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1052
1053 if (templ->bind & PIPE_BIND_LINEAR)
1054 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1055
1056 /* Textures likely to be mapped often. */
1057 if (templ->usage == PIPE_USAGE_STAGING ||
1058 templ->usage == PIPE_USAGE_STREAM)
1059 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1060 }
1061
1062 /* Make small textures 1D tiled. */
1063 if (templ->width0 <= 16 || templ->height0 <= 16 ||
1064 (rscreen->debug_flags & DBG_NO_2D_TILING))
1065 return RADEON_SURF_MODE_1D;
1066
1067 /* The allocator will switch to 1D if needed. */
1068 return RADEON_SURF_MODE_2D;
1069 }
1070
1071 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
1072 const struct pipe_resource *templ)
1073 {
1074 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1075 struct radeon_surf surface = {0};
1076 bool is_flushed_depth = templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH;
1077 int r;
1078
1079 r = r600_init_surface(rscreen, &surface, templ,
1080 r600_choose_tiling(rscreen, templ), 0, 0,
1081 false, false, is_flushed_depth);
1082 if (r) {
1083 return NULL;
1084 }
1085
1086 return (struct pipe_resource *)
1087 r600_texture_create_object(screen, templ, NULL, &surface);
1088 }
1089
1090 static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
1091 const struct pipe_resource *templ,
1092 struct winsys_handle *whandle,
1093 unsigned usage)
1094 {
1095 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1096 struct pb_buffer *buf = NULL;
1097 unsigned stride = 0, offset = 0;
1098 enum radeon_surf_mode array_mode;
1099 struct radeon_surf surface = {};
1100 int r;
1101 struct radeon_bo_metadata metadata = {};
1102 struct r600_texture *rtex;
1103 bool is_scanout;
1104
1105 /* Support only 2D textures without mipmaps */
1106 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
1107 templ->depth0 != 1 || templ->last_level != 0)
1108 return NULL;
1109
1110 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride, &offset);
1111 if (!buf)
1112 return NULL;
1113
1114 rscreen->ws->buffer_get_metadata(buf, &metadata);
1115 r600_surface_import_metadata(rscreen, &surface, &metadata,
1116 &array_mode, &is_scanout);
1117
1118 r = r600_init_surface(rscreen, &surface, templ, array_mode, stride,
1119 offset, true, is_scanout, false);
1120 if (r) {
1121 return NULL;
1122 }
1123
1124 rtex = r600_texture_create_object(screen, templ, buf, &surface);
1125 if (!rtex)
1126 return NULL;
1127
1128 rtex->resource.b.is_shared = true;
1129 rtex->resource.external_usage = usage;
1130
1131 if (rscreen->apply_opaque_metadata)
1132 rscreen->apply_opaque_metadata(rscreen, rtex, &metadata);
1133
1134 assert(rtex->surface.tile_swizzle == 0);
1135 return &rtex->resource.b.b;
1136 }
1137
1138 bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
1139 struct pipe_resource *texture,
1140 struct r600_texture **staging)
1141 {
1142 struct r600_texture *rtex = (struct r600_texture*)texture;
1143 struct pipe_resource resource;
1144 struct r600_texture **flushed_depth_texture = staging ?
1145 staging : &rtex->flushed_depth_texture;
1146 enum pipe_format pipe_format = texture->format;
1147
1148 if (!staging) {
1149 if (rtex->flushed_depth_texture)
1150 return true; /* it's ready */
1151
1152 if (!rtex->can_sample_z && rtex->can_sample_s) {
1153 switch (pipe_format) {
1154 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1155 /* Save memory by not allocating the S plane. */
1156 pipe_format = PIPE_FORMAT_Z32_FLOAT;
1157 break;
1158 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1159 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
1160 /* Save memory bandwidth by not copying the
1161 * stencil part during flush.
1162 *
1163 * This potentially increases memory bandwidth
1164 * if an application uses both Z and S texturing
1165 * simultaneously (a flushed Z24S8 texture
1166 * would be stored compactly), but how often
1167 * does that really happen?
1168 */
1169 pipe_format = PIPE_FORMAT_Z24X8_UNORM;
1170 break;
1171 default:;
1172 }
1173 } else if (!rtex->can_sample_s && rtex->can_sample_z) {
1174 assert(util_format_has_stencil(util_format_description(pipe_format)));
1175
1176 /* DB->CB copies to an 8bpp surface don't work. */
1177 pipe_format = PIPE_FORMAT_X24S8_UINT;
1178 }
1179 }
1180
1181 memset(&resource, 0, sizeof(resource));
1182 resource.target = texture->target;
1183 resource.format = pipe_format;
1184 resource.width0 = texture->width0;
1185 resource.height0 = texture->height0;
1186 resource.depth0 = texture->depth0;
1187 resource.array_size = texture->array_size;
1188 resource.last_level = texture->last_level;
1189 resource.nr_samples = texture->nr_samples;
1190 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1191 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
1192 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
1193
1194 if (staging)
1195 resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
1196
1197 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
1198 if (*flushed_depth_texture == NULL) {
1199 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1200 return false;
1201 }
1202
1203 (*flushed_depth_texture)->non_disp_tiling = false;
1204 return true;
1205 }
1206
1207 /**
1208 * Initialize the pipe_resource descriptor to be of the same size as the box,
1209 * which is supposed to hold a subregion of the texture "orig" at the given
1210 * mipmap level.
1211 */
1212 static void r600_init_temp_resource_from_box(struct pipe_resource *res,
1213 struct pipe_resource *orig,
1214 const struct pipe_box *box,
1215 unsigned level, unsigned flags)
1216 {
1217 memset(res, 0, sizeof(*res));
1218 res->format = orig->format;
1219 res->width0 = box->width;
1220 res->height0 = box->height;
1221 res->depth0 = 1;
1222 res->array_size = 1;
1223 res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1224 res->flags = flags;
1225
1226 /* We must set the correct texture target and dimensions for a 3D box. */
1227 if (box->depth > 1 && util_max_layer(orig, level) > 0) {
1228 res->target = PIPE_TEXTURE_2D_ARRAY;
1229 res->array_size = box->depth;
1230 } else {
1231 res->target = PIPE_TEXTURE_2D;
1232 }
1233 }
1234
1235 static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen,
1236 struct r600_texture *rtex,
1237 unsigned transfer_usage,
1238 const struct pipe_box *box)
1239 {
1240 /* r600g doesn't react to dirty_tex_descriptor_counter */
1241 return rscreen->chip_class >= SI &&
1242 !rtex->resource.b.is_shared &&
1243 !(transfer_usage & PIPE_TRANSFER_READ) &&
1244 rtex->resource.b.b.last_level == 0 &&
1245 util_texrange_covers_whole_level(&rtex->resource.b.b, 0,
1246 box->x, box->y, box->z,
1247 box->width, box->height,
1248 box->depth);
1249 }
1250
1251 static void r600_texture_invalidate_storage(struct r600_common_context *rctx,
1252 struct r600_texture *rtex)
1253 {
1254 struct r600_common_screen *rscreen = rctx->screen;
1255
1256 /* There is no point in discarding depth and tiled buffers. */
1257 assert(!rtex->is_depth);
1258 assert(rtex->surface.is_linear);
1259
1260 /* Reallocate the buffer in the same pipe_resource. */
1261 r600_alloc_resource(rscreen, &rtex->resource);
1262
1263 /* Initialize the CMASK base address (needed even without CMASK). */
1264 rtex->cmask.base_address_reg =
1265 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
1266
1267 p_atomic_inc(&rscreen->dirty_tex_counter);
1268
1269 rctx->num_alloc_tex_transfer_bytes += rtex->size;
1270 }
1271
1272 static void *r600_texture_transfer_map(struct pipe_context *ctx,
1273 struct pipe_resource *texture,
1274 unsigned level,
1275 unsigned usage,
1276 const struct pipe_box *box,
1277 struct pipe_transfer **ptransfer)
1278 {
1279 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1280 struct r600_texture *rtex = (struct r600_texture*)texture;
1281 struct r600_transfer *trans;
1282 struct r600_resource *buf;
1283 unsigned offset = 0;
1284 char *map;
1285 bool use_staging_texture = false;
1286
1287 assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));
1288 assert(box->width && box->height && box->depth);
1289
1290 /* Depth textures use staging unconditionally. */
1291 if (!rtex->is_depth) {
1292 /* Degrade the tile mode if we get too many transfers on APUs.
1293 * On dGPUs, the staging texture is always faster.
1294 * Only count uploads that are at least 4x4 pixels large.
1295 */
1296 if (!rctx->screen->info.has_dedicated_vram &&
1297 level == 0 &&
1298 box->width >= 4 && box->height >= 4 &&
1299 p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
1300 bool can_invalidate =
1301 r600_can_invalidate_texture(rctx->screen, rtex,
1302 usage, box);
1303
1304 r600_reallocate_texture_inplace(rctx, rtex,
1305 PIPE_BIND_LINEAR,
1306 can_invalidate);
1307 }
1308
1309 /* Tiled textures need to be converted into a linear texture for CPU
1310 * access. The staging texture is always linear and is placed in GART.
1311 *
1312 * Reading from VRAM or GTT WC is slow, always use the staging
1313 * texture in this case.
1314 *
1315 * Use the staging texture for uploads if the underlying BO
1316 * is busy.
1317 */
1318 if (!rtex->surface.is_linear)
1319 use_staging_texture = true;
1320 else if (usage & PIPE_TRANSFER_READ)
1321 use_staging_texture =
1322 rtex->resource.domains & RADEON_DOMAIN_VRAM ||
1323 rtex->resource.flags & RADEON_FLAG_GTT_WC;
1324 /* Write & linear only: */
1325 else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf,
1326 RADEON_USAGE_READWRITE) ||
1327 !rctx->ws->buffer_wait(rtex->resource.buf, 0,
1328 RADEON_USAGE_READWRITE)) {
1329 /* It's busy. */
1330 if (r600_can_invalidate_texture(rctx->screen, rtex,
1331 usage, box))
1332 r600_texture_invalidate_storage(rctx, rtex);
1333 else
1334 use_staging_texture = true;
1335 }
1336 }
1337
1338 trans = CALLOC_STRUCT(r600_transfer);
1339 if (!trans)
1340 return NULL;
1341 pipe_resource_reference(&trans->b.b.resource, texture);
1342 trans->b.b.level = level;
1343 trans->b.b.usage = usage;
1344 trans->b.b.box = *box;
1345
1346 if (rtex->is_depth) {
1347 struct r600_texture *staging_depth;
1348
1349 if (rtex->resource.b.b.nr_samples > 1) {
1350 /* MSAA depth buffers need to be converted to single sample buffers.
1351 *
1352 * Mapping MSAA depth buffers can occur if ReadPixels is called
1353 * with a multisample GLX visual.
1354 *
1355 * First downsample the depth buffer to a temporary texture,
1356 * then decompress the temporary one to staging.
1357 *
1358 * Only the region being mapped is transfered.
1359 */
1360 struct pipe_resource resource;
1361
1362 r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
1363
1364 if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1365 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1366 FREE(trans);
1367 return NULL;
1368 }
1369
1370 if (usage & PIPE_TRANSFER_READ) {
1371 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1372 if (!temp) {
1373 R600_ERR("failed to create a temporary depth texture\n");
1374 FREE(trans);
1375 return NULL;
1376 }
1377
1378 r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1379 rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
1380 0, 0, 0, box->depth, 0, 0);
1381 pipe_resource_reference(&temp, NULL);
1382 }
1383
1384 /* Just get the strides. */
1385 r600_texture_get_offset(rctx->screen, staging_depth, level, NULL,
1386 &trans->b.b.stride,
1387 &trans->b.b.layer_stride);
1388 } else {
1389 /* XXX: only readback the rectangle which is being mapped? */
1390 /* XXX: when discard is true, no need to read back from depth texture */
1391 if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1392 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1393 FREE(trans);
1394 return NULL;
1395 }
1396
1397 rctx->blit_decompress_depth(ctx, rtex, staging_depth,
1398 level, level,
1399 box->z, box->z + box->depth - 1,
1400 0, 0);
1401
1402 offset = r600_texture_get_offset(rctx->screen, staging_depth,
1403 level, box,
1404 &trans->b.b.stride,
1405 &trans->b.b.layer_stride);
1406 }
1407
1408 trans->staging = (struct r600_resource*)staging_depth;
1409 buf = trans->staging;
1410 } else if (use_staging_texture) {
1411 struct pipe_resource resource;
1412 struct r600_texture *staging;
1413
1414 r600_init_temp_resource_from_box(&resource, texture, box, level,
1415 R600_RESOURCE_FLAG_TRANSFER);
1416 resource.usage = (usage & PIPE_TRANSFER_READ) ?
1417 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1418
1419 /* Create the temporary texture. */
1420 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1421 if (!staging) {
1422 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1423 FREE(trans);
1424 return NULL;
1425 }
1426 trans->staging = &staging->resource;
1427
1428 /* Just get the strides. */
1429 r600_texture_get_offset(rctx->screen, staging, 0, NULL,
1430 &trans->b.b.stride,
1431 &trans->b.b.layer_stride);
1432
1433 if (usage & PIPE_TRANSFER_READ)
1434 r600_copy_to_staging_texture(ctx, trans);
1435 else
1436 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1437
1438 buf = trans->staging;
1439 } else {
1440 /* the resource is mapped directly */
1441 offset = r600_texture_get_offset(rctx->screen, rtex, level, box,
1442 &trans->b.b.stride,
1443 &trans->b.b.layer_stride);
1444 buf = &rtex->resource;
1445 }
1446
1447 if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
1448 r600_resource_reference(&trans->staging, NULL);
1449 FREE(trans);
1450 return NULL;
1451 }
1452
1453 *ptransfer = &trans->b.b;
1454 return map + offset;
1455 }
1456
1457 static void r600_texture_transfer_unmap(struct pipe_context *ctx,
1458 struct pipe_transfer* transfer)
1459 {
1460 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1461 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
1462 struct pipe_resource *texture = transfer->resource;
1463 struct r600_texture *rtex = (struct r600_texture*)texture;
1464
1465 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
1466 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
1467 ctx->resource_copy_region(ctx, texture, transfer->level,
1468 transfer->box.x, transfer->box.y, transfer->box.z,
1469 &rtransfer->staging->b.b, transfer->level,
1470 &transfer->box);
1471 } else {
1472 r600_copy_from_staging_texture(ctx, rtransfer);
1473 }
1474 }
1475
1476 if (rtransfer->staging) {
1477 rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
1478 r600_resource_reference(&rtransfer->staging, NULL);
1479 }
1480
1481 /* Heuristic for {upload, draw, upload, draw, ..}:
1482 *
1483 * Flush the gfx IB if we've allocated too much texture storage.
1484 *
1485 * The idea is that we don't want to build IBs that use too much
1486 * memory and put pressure on the kernel memory manager and we also
1487 * want to make temporary and invalidated buffers go idle ASAP to
1488 * decrease the total memory usage or make them reusable. The memory
1489 * usage will be slightly higher than given here because of the buffer
1490 * cache in the winsys.
1491 *
1492 * The result is that the kernel memory manager is never a bottleneck.
1493 */
1494 if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
1495 rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
1496 rctx->num_alloc_tex_transfer_bytes = 0;
1497 }
1498
1499 pipe_resource_reference(&transfer->resource, NULL);
1500 FREE(transfer);
1501 }
1502
1503 static const struct u_resource_vtbl r600_texture_vtbl =
1504 {
1505 NULL, /* get_handle */
1506 r600_texture_destroy, /* resource_destroy */
1507 r600_texture_transfer_map, /* transfer_map */
1508 u_default_transfer_flush_region, /* transfer_flush_region */
1509 r600_texture_transfer_unmap, /* transfer_unmap */
1510 };
1511
1512 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
1513 struct pipe_resource *texture,
1514 const struct pipe_surface *templ,
1515 unsigned width0, unsigned height0,
1516 unsigned width, unsigned height)
1517 {
1518 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
1519
1520 if (!surface)
1521 return NULL;
1522
1523 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
1524 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
1525
1526 pipe_reference_init(&surface->base.reference, 1);
1527 pipe_resource_reference(&surface->base.texture, texture);
1528 surface->base.context = pipe;
1529 surface->base.format = templ->format;
1530 surface->base.width = width;
1531 surface->base.height = height;
1532 surface->base.u = templ->u;
1533
1534 surface->width0 = width0;
1535 surface->height0 = height0;
1536
1537 return &surface->base;
1538 }
1539
1540 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
1541 struct pipe_resource *tex,
1542 const struct pipe_surface *templ)
1543 {
1544 unsigned level = templ->u.tex.level;
1545 unsigned width = u_minify(tex->width0, level);
1546 unsigned height = u_minify(tex->height0, level);
1547 unsigned width0 = tex->width0;
1548 unsigned height0 = tex->height0;
1549
1550 if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
1551 const struct util_format_description *tex_desc
1552 = util_format_description(tex->format);
1553 const struct util_format_description *templ_desc
1554 = util_format_description(templ->format);
1555
1556 assert(tex_desc->block.bits == templ_desc->block.bits);
1557
1558 /* Adjust size of surface if and only if the block width or
1559 * height is changed. */
1560 if (tex_desc->block.width != templ_desc->block.width ||
1561 tex_desc->block.height != templ_desc->block.height) {
1562 unsigned nblks_x = util_format_get_nblocksx(tex->format, width);
1563 unsigned nblks_y = util_format_get_nblocksy(tex->format, height);
1564
1565 width = nblks_x * templ_desc->block.width;
1566 height = nblks_y * templ_desc->block.height;
1567
1568 width0 = util_format_get_nblocksx(tex->format, width0);
1569 height0 = util_format_get_nblocksy(tex->format, height0);
1570 }
1571 }
1572
1573 return r600_create_surface_custom(pipe, tex, templ,
1574 width0, height0,
1575 width, height);
1576 }
1577
1578 static void r600_surface_destroy(struct pipe_context *pipe,
1579 struct pipe_surface *surface)
1580 {
1581 struct r600_surface *surf = (struct r600_surface*)surface;
1582 r600_resource_reference(&surf->cb_buffer_fmask, NULL);
1583 r600_resource_reference(&surf->cb_buffer_cmask, NULL);
1584 pipe_resource_reference(&surface->texture, NULL);
1585 FREE(surface);
1586 }
1587
1588 static void r600_clear_texture(struct pipe_context *pipe,
1589 struct pipe_resource *tex,
1590 unsigned level,
1591 const struct pipe_box *box,
1592 const void *data)
1593 {
1594 struct pipe_screen *screen = pipe->screen;
1595 struct r600_texture *rtex = (struct r600_texture*)tex;
1596 struct pipe_surface tmpl = {{0}};
1597 struct pipe_surface *sf;
1598 const struct util_format_description *desc =
1599 util_format_description(tex->format);
1600
1601 tmpl.format = tex->format;
1602 tmpl.u.tex.first_layer = box->z;
1603 tmpl.u.tex.last_layer = box->z + box->depth - 1;
1604 tmpl.u.tex.level = level;
1605 sf = pipe->create_surface(pipe, tex, &tmpl);
1606 if (!sf)
1607 return;
1608
1609 if (rtex->is_depth) {
1610 unsigned clear;
1611 float depth;
1612 uint8_t stencil = 0;
1613
1614 /* Depth is always present. */
1615 clear = PIPE_CLEAR_DEPTH;
1616 desc->unpack_z_float(&depth, 0, data, 0, 1, 1);
1617
1618 if (rtex->surface.has_stencil) {
1619 clear |= PIPE_CLEAR_STENCIL;
1620 desc->unpack_s_8uint(&stencil, 0, data, 0, 1, 1);
1621 }
1622
1623 pipe->clear_depth_stencil(pipe, sf, clear, depth, stencil,
1624 box->x, box->y,
1625 box->width, box->height, false);
1626 } else {
1627 union pipe_color_union color;
1628
1629 /* pipe_color_union requires the full vec4 representation. */
1630 if (util_format_is_pure_uint(tex->format))
1631 desc->unpack_rgba_uint(color.ui, 0, data, 0, 1, 1);
1632 else if (util_format_is_pure_sint(tex->format))
1633 desc->unpack_rgba_sint(color.i, 0, data, 0, 1, 1);
1634 else
1635 desc->unpack_rgba_float(color.f, 0, data, 0, 1, 1);
1636
1637 if (screen->is_format_supported(screen, tex->format,
1638 tex->target, 0,
1639 PIPE_BIND_RENDER_TARGET)) {
1640 pipe->clear_render_target(pipe, sf, &color,
1641 box->x, box->y,
1642 box->width, box->height, false);
1643 } else {
1644 /* Software fallback - just for R9G9B9E5_FLOAT */
1645 util_clear_render_target(pipe, sf, &color,
1646 box->x, box->y,
1647 box->width, box->height);
1648 }
1649 }
1650 pipe_surface_reference(&sf, NULL);
1651 }
1652
1653 unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap)
1654 {
1655 const struct util_format_description *desc = util_format_description(format);
1656
1657 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
1658
1659 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
1660 return V_0280A0_SWAP_STD;
1661
1662 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
1663 return ~0U;
1664
1665 switch (desc->nr_channels) {
1666 case 1:
1667 if (HAS_SWIZZLE(0,X))
1668 return V_0280A0_SWAP_STD; /* X___ */
1669 else if (HAS_SWIZZLE(3,X))
1670 return V_0280A0_SWAP_ALT_REV; /* ___X */
1671 break;
1672 case 2:
1673 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
1674 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
1675 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
1676 return V_0280A0_SWAP_STD; /* XY__ */
1677 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
1678 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
1679 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
1680 /* YX__ */
1681 return (do_endian_swap ? V_0280A0_SWAP_STD : V_0280A0_SWAP_STD_REV);
1682 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
1683 return V_0280A0_SWAP_ALT; /* X__Y */
1684 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
1685 return V_0280A0_SWAP_ALT_REV; /* Y__X */
1686 break;
1687 case 3:
1688 if (HAS_SWIZZLE(0,X))
1689 return (do_endian_swap ? V_0280A0_SWAP_STD_REV : V_0280A0_SWAP_STD);
1690 else if (HAS_SWIZZLE(0,Z))
1691 return V_0280A0_SWAP_STD_REV; /* ZYX */
1692 break;
1693 case 4:
1694 /* check the middle channels, the 1st and 4th channel can be NONE */
1695 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {
1696 return V_0280A0_SWAP_STD; /* XYZW */
1697 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {
1698 return V_0280A0_SWAP_STD_REV; /* WZYX */
1699 } else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {
1700 return V_0280A0_SWAP_ALT; /* ZYXW */
1701 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {
1702 /* YZWX */
1703 if (desc->is_array)
1704 return V_0280A0_SWAP_ALT_REV;
1705 else
1706 return (do_endian_swap ? V_0280A0_SWAP_ALT : V_0280A0_SWAP_ALT_REV);
1707 }
1708 break;
1709 }
1710 return ~0U;
1711 }
1712
1713 /* FAST COLOR CLEAR */
1714
1715 static void evergreen_set_clear_color(struct r600_texture *rtex,
1716 enum pipe_format surface_format,
1717 const union pipe_color_union *color)
1718 {
1719 union util_color uc;
1720
1721 memset(&uc, 0, sizeof(uc));
1722
1723 if (rtex->surface.bpe == 16) {
1724 /* DCC fast clear only:
1725 * CLEAR_WORD0 = R = G = B
1726 * CLEAR_WORD1 = A
1727 */
1728 assert(color->ui[0] == color->ui[1] &&
1729 color->ui[0] == color->ui[2]);
1730 uc.ui[0] = color->ui[0];
1731 uc.ui[1] = color->ui[3];
1732 } else if (util_format_is_pure_uint(surface_format)) {
1733 util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
1734 } else if (util_format_is_pure_sint(surface_format)) {
1735 util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1);
1736 } else {
1737 util_pack_color(color->f, surface_format, &uc);
1738 }
1739
1740 memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));
1741 }
1742
1743 void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
1744 struct pipe_framebuffer_state *fb,
1745 struct r600_atom *fb_state,
1746 unsigned *buffers, ubyte *dirty_cbufs,
1747 const union pipe_color_union *color)
1748 {
1749 int i;
1750
1751 /* This function is broken in BE, so just disable this path for now */
1752 #ifdef PIPE_ARCH_BIG_ENDIAN
1753 return;
1754 #endif
1755
1756 if (rctx->render_cond)
1757 return;
1758
1759 for (i = 0; i < fb->nr_cbufs; i++) {
1760 struct r600_texture *tex;
1761 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
1762
1763 if (!fb->cbufs[i])
1764 continue;
1765
1766 /* if this colorbuffer is not being cleared */
1767 if (!(*buffers & clear_bit))
1768 continue;
1769
1770 tex = (struct r600_texture *)fb->cbufs[i]->texture;
1771
1772 /* the clear is allowed if all layers are bound */
1773 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
1774 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
1775 continue;
1776 }
1777
1778 /* cannot clear mipmapped textures */
1779 if (fb->cbufs[i]->texture->last_level != 0) {
1780 continue;
1781 }
1782
1783 /* only supported on tiled surfaces */
1784 if (tex->surface.is_linear) {
1785 continue;
1786 }
1787
1788 /* shared textures can't use fast clear without an explicit flush,
1789 * because there is no way to communicate the clear color among
1790 * all clients
1791 */
1792 if (tex->resource.b.is_shared &&
1793 !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
1794 continue;
1795
1796 {
1797 /* 128-bit formats are unusupported */
1798 if (tex->surface.bpe > 8) {
1799 continue;
1800 }
1801
1802 /* ensure CMASK is enabled */
1803 r600_texture_alloc_cmask_separate(rctx->screen, tex);
1804 if (tex->cmask.size == 0) {
1805 continue;
1806 }
1807
1808 /* Do the fast clear. */
1809 rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,
1810 tex->cmask.offset, tex->cmask.size, 0,
1811 R600_COHERENCY_CB_META);
1812
1813 bool need_compressed_update = !tex->dirty_level_mask;
1814
1815 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
1816
1817 if (need_compressed_update)
1818 p_atomic_inc(&rctx->screen->compressed_colortex_counter);
1819 }
1820
1821 evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);
1822
1823 if (dirty_cbufs)
1824 *dirty_cbufs |= 1 << i;
1825 rctx->set_atom_dirty(rctx, fb_state, true);
1826 *buffers &= ~clear_bit;
1827 }
1828 }
1829
1830 static struct pipe_memory_object *
1831 r600_memobj_from_handle(struct pipe_screen *screen,
1832 struct winsys_handle *whandle,
1833 bool dedicated)
1834 {
1835 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1836 struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
1837 struct pb_buffer *buf = NULL;
1838 uint32_t stride, offset;
1839
1840 if (!memobj)
1841 return NULL;
1842
1843 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle,
1844 &stride, &offset);
1845 if (!buf) {
1846 free(memobj);
1847 return NULL;
1848 }
1849
1850 memobj->b.dedicated = dedicated;
1851 memobj->buf = buf;
1852 memobj->stride = stride;
1853 memobj->offset = offset;
1854
1855 return (struct pipe_memory_object *)memobj;
1856
1857 }
1858
1859 static void
1860 r600_memobj_destroy(struct pipe_screen *screen,
1861 struct pipe_memory_object *_memobj)
1862 {
1863 struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
1864
1865 pb_reference(&memobj->buf, NULL);
1866 free(memobj);
1867 }
1868
1869 static struct pipe_resource *
1870 r600_texture_from_memobj(struct pipe_screen *screen,
1871 const struct pipe_resource *templ,
1872 struct pipe_memory_object *_memobj,
1873 uint64_t offset)
1874 {
1875 int r;
1876 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1877 struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
1878 struct r600_texture *rtex;
1879 struct radeon_surf surface = {};
1880 struct radeon_bo_metadata metadata = {};
1881 enum radeon_surf_mode array_mode;
1882 bool is_scanout;
1883 struct pb_buffer *buf = NULL;
1884
1885 if (memobj->b.dedicated) {
1886 rscreen->ws->buffer_get_metadata(memobj->buf, &metadata);
1887 r600_surface_import_metadata(rscreen, &surface, &metadata,
1888 &array_mode, &is_scanout);
1889 } else {
1890 /**
1891 * The bo metadata is unset for un-dedicated images. So we fall
1892 * back to linear. See answer to question 5 of the
1893 * VK_KHX_external_memory spec for some details.
1894 *
1895 * It is possible that this case isn't going to work if the
1896 * surface pitch isn't correctly aligned by default.
1897 *
1898 * In order to support it correctly we require multi-image
1899 * metadata to be syncrhonized between radv and radeonsi. The
1900 * semantics of associating multiple image metadata to a memory
1901 * object on the vulkan export side are not concretely defined
1902 * either.
1903 *
1904 * All the use cases we are aware of at the moment for memory
1905 * objects use dedicated allocations. So lets keep the initial
1906 * implementation simple.
1907 *
1908 * A possible alternative is to attempt to reconstruct the
1909 * tiling information when the TexParameter TEXTURE_TILING_EXT
1910 * is set.
1911 */
1912 array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
1913 is_scanout = false;
1914
1915 }
1916
1917 r = r600_init_surface(rscreen, &surface, templ,
1918 array_mode, memobj->stride,
1919 offset, true, is_scanout,
1920 false);
1921 if (r)
1922 return NULL;
1923
1924 rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface);
1925 if (!rtex)
1926 return NULL;
1927
1928 /* r600_texture_create_object doesn't increment refcount of
1929 * memobj->buf, so increment it here.
1930 */
1931 pb_reference(&buf, memobj->buf);
1932
1933 rtex->resource.b.is_shared = true;
1934 rtex->resource.external_usage = PIPE_HANDLE_USAGE_READ_WRITE;
1935
1936 if (rscreen->apply_opaque_metadata)
1937 rscreen->apply_opaque_metadata(rscreen, rtex, &metadata);
1938
1939 return &rtex->resource.b.b;
1940 }
1941
1942 void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
1943 {
1944 rscreen->b.resource_from_handle = r600_texture_from_handle;
1945 rscreen->b.resource_get_handle = r600_texture_get_handle;
1946 rscreen->b.resource_from_memobj = r600_texture_from_memobj;
1947 rscreen->b.memobj_create_from_handle = r600_memobj_from_handle;
1948 rscreen->b.memobj_destroy = r600_memobj_destroy;
1949 }
1950
1951 void r600_init_context_texture_functions(struct r600_common_context *rctx)
1952 {
1953 rctx->b.create_surface = r600_create_surface;
1954 rctx->b.surface_destroy = r600_surface_destroy;
1955 rctx->b.clear_texture = r600_clear_texture;
1956 }