svga: Add ASTC formats to format table.
[mesa.git] / src / gallium / drivers / radeon / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include "r600_pipe_common.h"
28 #include "r600_cs.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include <errno.h>
33 #include <inttypes.h>
34
35 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
36 static void r600_copy_region_with_blit(struct pipe_context *pipe,
37 struct pipe_resource *dst,
38 unsigned dst_level,
39 unsigned dstx, unsigned dsty, unsigned dstz,
40 struct pipe_resource *src,
41 unsigned src_level,
42 const struct pipe_box *src_box)
43 {
44 struct pipe_blit_info blit;
45
46 memset(&blit, 0, sizeof(blit));
47 blit.src.resource = src;
48 blit.src.format = src->format;
49 blit.src.level = src_level;
50 blit.src.box = *src_box;
51 blit.dst.resource = dst;
52 blit.dst.format = dst->format;
53 blit.dst.level = dst_level;
54 blit.dst.box.x = dstx;
55 blit.dst.box.y = dsty;
56 blit.dst.box.z = dstz;
57 blit.dst.box.width = src_box->width;
58 blit.dst.box.height = src_box->height;
59 blit.dst.box.depth = src_box->depth;
60 blit.mask = util_format_get_mask(src->format) &
61 util_format_get_mask(dst->format);
62 blit.filter = PIPE_TEX_FILTER_NEAREST;
63
64 if (blit.mask) {
65 pipe->blit(pipe, &blit);
66 }
67 }
68
69 /* Copy from a full GPU texture to a transfer's staging one. */
70 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
71 {
72 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
73 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
74 struct pipe_resource *dst = &rtransfer->staging->b.b;
75 struct pipe_resource *src = transfer->resource;
76
77 if (src->nr_samples > 1) {
78 r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
79 src, transfer->level, &transfer->box);
80 return;
81 }
82
83 rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
84 &transfer->box);
85 }
86
87 /* Copy from a transfer's staging texture to a full GPU one. */
88 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
89 {
90 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
91 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
92 struct pipe_resource *dst = transfer->resource;
93 struct pipe_resource *src = &rtransfer->staging->b.b;
94 struct pipe_box sbox;
95
96 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
97
98 if (dst->nr_samples > 1) {
99 r600_copy_region_with_blit(ctx, dst, transfer->level,
100 transfer->box.x, transfer->box.y, transfer->box.z,
101 src, 0, &sbox);
102 return;
103 }
104
105 rctx->dma_copy(ctx, dst, transfer->level,
106 transfer->box.x, transfer->box.y, transfer->box.z,
107 src, 0, &sbox);
108 }
109
110 static unsigned r600_texture_get_offset(struct r600_texture *rtex, unsigned level,
111 const struct pipe_box *box)
112 {
113 enum pipe_format format = rtex->resource.b.b.format;
114
115 return rtex->surface.level[level].offset +
116 box->z * rtex->surface.level[level].slice_size +
117 box->y / util_format_get_blockheight(format) * rtex->surface.level[level].pitch_bytes +
118 box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
119 }
120
121 static int r600_init_surface(struct r600_common_screen *rscreen,
122 struct radeon_surf *surface,
123 const struct pipe_resource *ptex,
124 unsigned array_mode,
125 bool is_flushed_depth)
126 {
127 const struct util_format_description *desc =
128 util_format_description(ptex->format);
129 bool is_depth, is_stencil;
130
131 is_depth = util_format_has_depth(desc);
132 is_stencil = util_format_has_stencil(desc);
133
134 surface->npix_x = ptex->width0;
135 surface->npix_y = ptex->height0;
136 surface->npix_z = ptex->depth0;
137 surface->blk_w = util_format_get_blockwidth(ptex->format);
138 surface->blk_h = util_format_get_blockheight(ptex->format);
139 surface->blk_d = 1;
140 surface->array_size = 1;
141 surface->last_level = ptex->last_level;
142
143 if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&
144 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
145 surface->bpe = 4; /* stencil is allocated separately on evergreen */
146 } else {
147 surface->bpe = util_format_get_blocksize(ptex->format);
148 /* align byte per element on dword */
149 if (surface->bpe == 3) {
150 surface->bpe = 4;
151 }
152 }
153
154 surface->nsamples = ptex->nr_samples ? ptex->nr_samples : 1;
155 surface->flags = RADEON_SURF_SET(array_mode, MODE);
156
157 switch (ptex->target) {
158 case PIPE_TEXTURE_1D:
159 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
160 break;
161 case PIPE_TEXTURE_RECT:
162 case PIPE_TEXTURE_2D:
163 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
164 break;
165 case PIPE_TEXTURE_3D:
166 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
167 break;
168 case PIPE_TEXTURE_1D_ARRAY:
169 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
170 surface->array_size = ptex->array_size;
171 break;
172 case PIPE_TEXTURE_2D_ARRAY:
173 case PIPE_TEXTURE_CUBE_ARRAY: /* cube array layout like 2d array */
174 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
175 surface->array_size = ptex->array_size;
176 break;
177 case PIPE_TEXTURE_CUBE:
178 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE);
179 break;
180 case PIPE_BUFFER:
181 default:
182 return -EINVAL;
183 }
184 if (ptex->bind & PIPE_BIND_SCANOUT) {
185 surface->flags |= RADEON_SURF_SCANOUT;
186 }
187
188 if (!is_flushed_depth && is_depth) {
189 surface->flags |= RADEON_SURF_ZBUFFER;
190
191 if (is_stencil) {
192 surface->flags |= RADEON_SURF_SBUFFER |
193 RADEON_SURF_HAS_SBUFFER_MIPTREE;
194 }
195 }
196 if (rscreen->chip_class >= SI) {
197 surface->flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
198 }
199 return 0;
200 }
201
202 static int r600_setup_surface(struct pipe_screen *screen,
203 struct r600_texture *rtex,
204 unsigned pitch_in_bytes_override)
205 {
206 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
207 int r;
208
209 r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface);
210 if (r) {
211 return r;
212 }
213
214 rtex->size = rtex->surface.bo_size;
215
216 if (pitch_in_bytes_override && pitch_in_bytes_override != rtex->surface.level[0].pitch_bytes) {
217 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
218 * for those
219 */
220 rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe;
221 rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override;
222 rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y;
223 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
224 rtex->surface.stencil_offset =
225 rtex->surface.stencil_level[0].offset = rtex->surface.level[0].slice_size;
226 }
227 }
228 return 0;
229 }
230
231 static boolean r600_texture_get_handle(struct pipe_screen* screen,
232 struct pipe_resource *ptex,
233 struct winsys_handle *whandle)
234 {
235 struct r600_texture *rtex = (struct r600_texture*)ptex;
236 struct r600_resource *resource = &rtex->resource;
237 struct radeon_surf *surface = &rtex->surface;
238 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
239
240 rscreen->ws->buffer_set_tiling(resource->buf,
241 NULL,
242 surface->level[0].mode >= RADEON_SURF_MODE_1D ?
243 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
244 surface->level[0].mode >= RADEON_SURF_MODE_2D ?
245 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
246 surface->pipe_config,
247 surface->bankw, surface->bankh,
248 surface->tile_split,
249 surface->stencil_tile_split,
250 surface->mtilea, surface->num_banks,
251 surface->level[0].pitch_bytes,
252 (surface->flags & RADEON_SURF_SCANOUT) != 0);
253
254 return rscreen->ws->buffer_get_handle(resource->buf,
255 surface->level[0].pitch_bytes, whandle);
256 }
257
258 static void r600_texture_destroy(struct pipe_screen *screen,
259 struct pipe_resource *ptex)
260 {
261 struct r600_texture *rtex = (struct r600_texture*)ptex;
262 struct r600_resource *resource = &rtex->resource;
263
264 if (rtex->flushed_depth_texture)
265 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
266
267 pipe_resource_reference((struct pipe_resource**)&rtex->htile_buffer, NULL);
268 if (rtex->cmask_buffer != &rtex->resource) {
269 pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
270 }
271 pipe_resource_reference((struct pipe_resource**)&rtex->dcc_buffer, NULL);
272 pb_reference(&resource->buf, NULL);
273 FREE(rtex);
274 }
275
276 static const struct u_resource_vtbl r600_texture_vtbl;
277
278 /* The number of samples can be specified independently of the texture. */
279 void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
280 struct r600_texture *rtex,
281 unsigned nr_samples,
282 struct r600_fmask_info *out)
283 {
284 /* FMASK is allocated like an ordinary texture. */
285 struct radeon_surf fmask = rtex->surface;
286
287 memset(out, 0, sizeof(*out));
288
289 fmask.bo_alignment = 0;
290 fmask.bo_size = 0;
291 fmask.nsamples = 1;
292 fmask.flags |= RADEON_SURF_FMASK;
293
294 /* Force 2D tiling if it wasn't set. This may occur when creating
295 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
296 * destination buffer must have an FMASK too. */
297 fmask.flags = RADEON_SURF_CLR(fmask.flags, MODE);
298 fmask.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
299
300 if (rscreen->chip_class >= SI) {
301 fmask.flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
302 }
303
304 switch (nr_samples) {
305 case 2:
306 case 4:
307 fmask.bpe = 1;
308 if (rscreen->chip_class <= CAYMAN) {
309 fmask.bankh = 4;
310 }
311 break;
312 case 8:
313 fmask.bpe = 4;
314 break;
315 default:
316 R600_ERR("Invalid sample count for FMASK allocation.\n");
317 return;
318 }
319
320 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
321 * This can be fixed by writing a separate FMASK allocator specifically
322 * for R600-R700 asics. */
323 if (rscreen->chip_class <= R700) {
324 fmask.bpe *= 2;
325 }
326
327 if (rscreen->ws->surface_init(rscreen->ws, &fmask)) {
328 R600_ERR("Got error in surface_init while allocating FMASK.\n");
329 return;
330 }
331
332 assert(fmask.level[0].mode == RADEON_SURF_MODE_2D);
333
334 out->slice_tile_max = (fmask.level[0].nblk_x * fmask.level[0].nblk_y) / 64;
335 if (out->slice_tile_max)
336 out->slice_tile_max -= 1;
337
338 out->tile_mode_index = fmask.tiling_index[0];
339 out->pitch = fmask.level[0].nblk_x;
340 out->bank_height = fmask.bankh;
341 out->alignment = MAX2(256, fmask.bo_alignment);
342 out->size = fmask.bo_size;
343 }
344
345 static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,
346 struct r600_texture *rtex)
347 {
348 r600_texture_get_fmask_info(rscreen, rtex,
349 rtex->resource.b.b.nr_samples, &rtex->fmask);
350
351 rtex->fmask.offset = align(rtex->size, rtex->fmask.alignment);
352 rtex->size = rtex->fmask.offset + rtex->fmask.size;
353 }
354
355 void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
356 struct r600_texture *rtex,
357 struct r600_cmask_info *out)
358 {
359 unsigned cmask_tile_width = 8;
360 unsigned cmask_tile_height = 8;
361 unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
362 unsigned element_bits = 4;
363 unsigned cmask_cache_bits = 1024;
364 unsigned num_pipes = rscreen->tiling_info.num_channels;
365 unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
366
367 unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
368 unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
369 unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
370 unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
371 unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
372
373 unsigned pitch_elements = align(rtex->surface.npix_x, macro_tile_width);
374 unsigned height = align(rtex->surface.npix_y, macro_tile_height);
375
376 unsigned base_align = num_pipes * pipe_interleave_bytes;
377 unsigned slice_bytes =
378 ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
379
380 assert(macro_tile_width % 128 == 0);
381 assert(macro_tile_height % 128 == 0);
382
383 out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
384 out->alignment = MAX2(256, base_align);
385 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
386 align(slice_bytes, base_align);
387 }
388
389 static void si_texture_get_cmask_info(struct r600_common_screen *rscreen,
390 struct r600_texture *rtex,
391 struct r600_cmask_info *out)
392 {
393 unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
394 unsigned num_pipes = rscreen->tiling_info.num_channels;
395 unsigned cl_width, cl_height;
396
397 switch (num_pipes) {
398 case 2:
399 cl_width = 32;
400 cl_height = 16;
401 break;
402 case 4:
403 cl_width = 32;
404 cl_height = 32;
405 break;
406 case 8:
407 cl_width = 64;
408 cl_height = 32;
409 break;
410 case 16: /* Hawaii */
411 cl_width = 64;
412 cl_height = 64;
413 break;
414 default:
415 assert(0);
416 return;
417 }
418
419 unsigned base_align = num_pipes * pipe_interleave_bytes;
420
421 unsigned width = align(rtex->surface.npix_x, cl_width*8);
422 unsigned height = align(rtex->surface.npix_y, cl_height*8);
423 unsigned slice_elements = (width * height) / (8*8);
424
425 /* Each element of CMASK is a nibble. */
426 unsigned slice_bytes = slice_elements / 2;
427
428 out->slice_tile_max = (width * height) / (128*128);
429 if (out->slice_tile_max)
430 out->slice_tile_max -= 1;
431
432 out->alignment = MAX2(256, base_align);
433 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
434 align(slice_bytes, base_align);
435 }
436
437 static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,
438 struct r600_texture *rtex)
439 {
440 if (rscreen->chip_class >= SI) {
441 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
442 } else {
443 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
444 }
445
446 rtex->cmask.offset = align(rtex->size, rtex->cmask.alignment);
447 rtex->size = rtex->cmask.offset + rtex->cmask.size;
448
449 if (rscreen->chip_class >= SI)
450 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
451 else
452 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
453 }
454
455 static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
456 struct r600_texture *rtex)
457 {
458 if (rtex->cmask_buffer)
459 return;
460
461 assert(rtex->cmask.size == 0);
462
463 if (rscreen->chip_class >= SI) {
464 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
465 } else {
466 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
467 }
468
469 rtex->cmask_buffer = (struct r600_resource *)
470 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
471 PIPE_USAGE_DEFAULT, rtex->cmask.size);
472 if (rtex->cmask_buffer == NULL) {
473 rtex->cmask.size = 0;
474 return;
475 }
476
477 /* update colorbuffer state bits */
478 rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
479
480 if (rscreen->chip_class >= SI)
481 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
482 else
483 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
484 }
485
486 static void vi_texture_alloc_dcc_separate(struct r600_common_screen *rscreen,
487 struct r600_texture *rtex)
488 {
489 if (rscreen->debug_flags & DBG_NO_DCC)
490 return;
491
492 rtex->dcc_buffer = (struct r600_resource *)
493 r600_aligned_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
494 PIPE_USAGE_DEFAULT, rtex->surface.dcc_size, rtex->surface.dcc_alignment);
495 if (rtex->dcc_buffer == NULL) {
496 return;
497 }
498
499 r600_screen_clear_buffer(rscreen, &rtex->dcc_buffer->b.b, 0, rtex->surface.dcc_size,
500 0xFFFFFFFF, true);
501
502 rtex->cb_color_info |= VI_S_028C70_DCC_ENABLE(1);
503 }
504
505 static unsigned r600_texture_get_htile_size(struct r600_common_screen *rscreen,
506 struct r600_texture *rtex)
507 {
508 unsigned cl_width, cl_height, width, height;
509 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
510 unsigned num_pipes = rscreen->tiling_info.num_channels;
511
512 if (rscreen->chip_class <= EVERGREEN &&
513 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26)
514 return 0;
515
516 /* HW bug on R6xx. */
517 if (rscreen->chip_class == R600 &&
518 (rtex->surface.level[0].npix_x > 7680 ||
519 rtex->surface.level[0].npix_y > 7680))
520 return 0;
521
522 /* HTILE is broken with 1D tiling on old kernels and CIK. */
523 if (rscreen->chip_class >= CIK &&
524 rtex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
525 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 38)
526 return 0;
527
528 switch (num_pipes) {
529 case 1:
530 cl_width = 32;
531 cl_height = 16;
532 break;
533 case 2:
534 cl_width = 32;
535 cl_height = 32;
536 break;
537 case 4:
538 cl_width = 64;
539 cl_height = 32;
540 break;
541 case 8:
542 cl_width = 64;
543 cl_height = 64;
544 break;
545 case 16:
546 cl_width = 128;
547 cl_height = 64;
548 break;
549 default:
550 assert(0);
551 return 0;
552 }
553
554 width = align(rtex->surface.npix_x, cl_width * 8);
555 height = align(rtex->surface.npix_y, cl_height * 8);
556
557 slice_elements = (width * height) / (8 * 8);
558 slice_bytes = slice_elements * 4;
559
560 pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
561 base_align = num_pipes * pipe_interleave_bytes;
562
563 return (util_max_layer(&rtex->resource.b.b, 0) + 1) *
564 align(slice_bytes, base_align);
565 }
566
567 static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
568 struct r600_texture *rtex)
569 {
570 unsigned htile_size = r600_texture_get_htile_size(rscreen, rtex);
571
572 if (!htile_size)
573 return;
574
575 rtex->htile_buffer = (struct r600_resource*)
576 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
577 PIPE_USAGE_DEFAULT, htile_size);
578 if (rtex->htile_buffer == NULL) {
579 /* this is not a fatal error as we can still keep rendering
580 * without htile buffer */
581 R600_ERR("Failed to create buffer object for htile buffer.\n");
582 } else {
583 r600_screen_clear_buffer(rscreen, &rtex->htile_buffer->b.b, 0,
584 htile_size, 0, true);
585 }
586 }
587
588 /* Common processing for r600_texture_create and r600_texture_from_handle */
589 static struct r600_texture *
590 r600_texture_create_object(struct pipe_screen *screen,
591 const struct pipe_resource *base,
592 unsigned pitch_in_bytes_override,
593 struct pb_buffer *buf,
594 struct radeon_surf *surface)
595 {
596 struct r600_texture *rtex;
597 struct r600_resource *resource;
598 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
599
600 rtex = CALLOC_STRUCT(r600_texture);
601 if (rtex == NULL)
602 return NULL;
603
604 resource = &rtex->resource;
605 resource->b.b = *base;
606 resource->b.vtbl = &r600_texture_vtbl;
607 pipe_reference_init(&resource->b.b.reference, 1);
608 resource->b.b.screen = screen;
609 rtex->pitch_override = pitch_in_bytes_override;
610
611 /* don't include stencil-only formats which we don't support for rendering */
612 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
613
614 rtex->surface = *surface;
615 if (r600_setup_surface(screen, rtex, pitch_in_bytes_override)) {
616 FREE(rtex);
617 return NULL;
618 }
619
620 /* Tiled depth textures utilize the non-displayable tile order.
621 * This must be done after r600_setup_surface.
622 * Applies to R600-Cayman. */
623 rtex->non_disp_tiling = rtex->is_depth && rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D;
624
625 if (rtex->is_depth) {
626 if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
627 R600_RESOURCE_FLAG_FLUSHED_DEPTH)) &&
628 !(rscreen->debug_flags & DBG_NO_HYPERZ)) {
629
630 r600_texture_allocate_htile(rscreen, rtex);
631 }
632 } else {
633 if (base->nr_samples > 1) {
634 if (!buf) {
635 r600_texture_allocate_fmask(rscreen, rtex);
636 r600_texture_allocate_cmask(rscreen, rtex);
637 rtex->cmask_buffer = &rtex->resource;
638 }
639 if (!rtex->fmask.size || !rtex->cmask.size) {
640 FREE(rtex);
641 return NULL;
642 }
643 }
644 if (rtex->surface.dcc_size)
645 vi_texture_alloc_dcc_separate(rscreen, rtex);
646 }
647
648 /* Now create the backing buffer. */
649 if (!buf) {
650 if (!r600_init_resource(rscreen, resource, rtex->size,
651 rtex->surface.bo_alignment, TRUE)) {
652 FREE(rtex);
653 return NULL;
654 }
655 } else {
656 resource->buf = buf;
657 resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
658 resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->cs_buf);
659 resource->domains = rscreen->ws->buffer_get_initial_domain(resource->cs_buf);
660 }
661
662 if (rtex->cmask.size) {
663 /* Initialize the cmask to 0xCC (= compressed state). */
664 r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
665 rtex->cmask.offset, rtex->cmask.size,
666 0xCCCCCCCC, true);
667 }
668
669 /* Initialize the CMASK base register value. */
670 rtex->cmask.base_address_reg =
671 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
672
673 if (rscreen->debug_flags & DBG_VM) {
674 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
675 rtex->resource.gpu_address,
676 rtex->resource.gpu_address + rtex->resource.buf->size,
677 base->width0, base->height0, util_max_layer(base, 0)+1, base->last_level+1,
678 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
679 }
680
681 if (rscreen->debug_flags & DBG_TEX ||
682 (rtex->resource.b.b.last_level > 0 && rscreen->debug_flags & DBG_TEXMIP)) {
683 printf("Texture: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
684 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
685 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
686 rtex->surface.npix_x, rtex->surface.npix_y,
687 rtex->surface.npix_z, rtex->surface.blk_w,
688 rtex->surface.blk_h, rtex->surface.blk_d,
689 rtex->surface.array_size, rtex->surface.last_level,
690 rtex->surface.bpe, rtex->surface.nsamples,
691 rtex->surface.flags, util_format_short_name(base->format));
692 for (int i = 0; i <= rtex->surface.last_level; i++) {
693 printf(" L %i: offset=%"PRIu64", slice_size=%"PRIu64", npix_x=%u, "
694 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
695 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
696 i, rtex->surface.level[i].offset,
697 rtex->surface.level[i].slice_size,
698 u_minify(rtex->resource.b.b.width0, i),
699 u_minify(rtex->resource.b.b.height0, i),
700 u_minify(rtex->resource.b.b.depth0, i),
701 rtex->surface.level[i].nblk_x,
702 rtex->surface.level[i].nblk_y,
703 rtex->surface.level[i].nblk_z,
704 rtex->surface.level[i].pitch_bytes,
705 rtex->surface.level[i].mode);
706 }
707 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
708 for (int i = 0; i <= rtex->surface.last_level; i++) {
709 printf(" S %i: offset=%"PRIu64", slice_size=%"PRIu64", npix_x=%u, "
710 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
711 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
712 i, rtex->surface.stencil_level[i].offset,
713 rtex->surface.stencil_level[i].slice_size,
714 u_minify(rtex->resource.b.b.width0, i),
715 u_minify(rtex->resource.b.b.height0, i),
716 u_minify(rtex->resource.b.b.depth0, i),
717 rtex->surface.stencil_level[i].nblk_x,
718 rtex->surface.stencil_level[i].nblk_y,
719 rtex->surface.stencil_level[i].nblk_z,
720 rtex->surface.stencil_level[i].pitch_bytes,
721 rtex->surface.stencil_level[i].mode);
722 }
723 }
724 }
725 return rtex;
726 }
727
728 static unsigned r600_choose_tiling(struct r600_common_screen *rscreen,
729 const struct pipe_resource *templ)
730 {
731 const struct util_format_description *desc = util_format_description(templ->format);
732 bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
733
734 /* MSAA resources must be 2D tiled. */
735 if (templ->nr_samples > 1)
736 return RADEON_SURF_MODE_2D;
737
738 /* Transfer resources should be linear. */
739 if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
740 return RADEON_SURF_MODE_LINEAR_ALIGNED;
741
742 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
743 if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&
744 (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&
745 (templ->target == PIPE_TEXTURE_2D ||
746 templ->target == PIPE_TEXTURE_3D))
747 force_tiling = true;
748
749 /* Handle common candidates for the linear mode.
750 * Compressed textures must always be tiled. */
751 if (!force_tiling && !util_format_is_compressed(templ->format)) {
752 /* Not everything can be linear, so we cannot enforce it
753 * for all textures. */
754 if ((rscreen->debug_flags & DBG_NO_TILING) &&
755 (!util_format_is_depth_or_stencil(templ->format) ||
756 !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH)))
757 return RADEON_SURF_MODE_LINEAR_ALIGNED;
758
759 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
760 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
761 return RADEON_SURF_MODE_LINEAR_ALIGNED;
762
763 /* Cursors are linear on SI.
764 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
765 if (rscreen->chip_class >= SI &&
766 (templ->bind & PIPE_BIND_CURSOR))
767 return RADEON_SURF_MODE_LINEAR_ALIGNED;
768
769 if (templ->bind & PIPE_BIND_LINEAR)
770 return RADEON_SURF_MODE_LINEAR_ALIGNED;
771
772 /* Textures with a very small height are recommended to be linear. */
773 if (templ->target == PIPE_TEXTURE_1D ||
774 templ->target == PIPE_TEXTURE_1D_ARRAY ||
775 templ->height0 <= 4)
776 return RADEON_SURF_MODE_LINEAR_ALIGNED;
777
778 /* Textures likely to be mapped often. */
779 if (templ->usage == PIPE_USAGE_STAGING ||
780 templ->usage == PIPE_USAGE_STREAM)
781 return RADEON_SURF_MODE_LINEAR_ALIGNED;
782 }
783
784 /* Make small textures 1D tiled. */
785 if (templ->width0 <= 16 || templ->height0 <= 16 ||
786 (rscreen->debug_flags & DBG_NO_2D_TILING))
787 return RADEON_SURF_MODE_1D;
788
789 /* The allocator will switch to 1D if needed. */
790 return RADEON_SURF_MODE_2D;
791 }
792
793 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
794 const struct pipe_resource *templ)
795 {
796 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
797 struct radeon_surf surface = {0};
798 int r;
799
800 r = r600_init_surface(rscreen, &surface, templ,
801 r600_choose_tiling(rscreen, templ),
802 templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
803 if (r) {
804 return NULL;
805 }
806 r = rscreen->ws->surface_best(rscreen->ws, &surface);
807 if (r) {
808 return NULL;
809 }
810 return (struct pipe_resource *)r600_texture_create_object(screen, templ,
811 0, NULL, &surface);
812 }
813
814 static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
815 const struct pipe_resource *templ,
816 struct winsys_handle *whandle)
817 {
818 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
819 struct pb_buffer *buf = NULL;
820 unsigned stride = 0;
821 unsigned array_mode;
822 enum radeon_bo_layout micro, macro;
823 struct radeon_surf surface;
824 bool scanout;
825 int r;
826
827 /* Support only 2D textures without mipmaps */
828 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
829 templ->depth0 != 1 || templ->last_level != 0)
830 return NULL;
831
832 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride);
833 if (!buf)
834 return NULL;
835
836 rscreen->ws->buffer_get_tiling(buf, &micro, &macro,
837 &surface.bankw, &surface.bankh,
838 &surface.tile_split,
839 &surface.stencil_tile_split,
840 &surface.mtilea, &scanout);
841
842 if (macro == RADEON_LAYOUT_TILED)
843 array_mode = RADEON_SURF_MODE_2D;
844 else if (micro == RADEON_LAYOUT_TILED)
845 array_mode = RADEON_SURF_MODE_1D;
846 else
847 array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
848
849 r = r600_init_surface(rscreen, &surface, templ, array_mode, false);
850 if (r) {
851 return NULL;
852 }
853
854 if (scanout)
855 surface.flags |= RADEON_SURF_SCANOUT;
856
857 return (struct pipe_resource *)r600_texture_create_object(screen, templ,
858 stride, buf, &surface);
859 }
860
861 bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
862 struct pipe_resource *texture,
863 struct r600_texture **staging)
864 {
865 struct r600_texture *rtex = (struct r600_texture*)texture;
866 struct pipe_resource resource;
867 struct r600_texture **flushed_depth_texture = staging ?
868 staging : &rtex->flushed_depth_texture;
869
870 if (!staging && rtex->flushed_depth_texture)
871 return true; /* it's ready */
872
873 resource.target = texture->target;
874 resource.format = texture->format;
875 resource.width0 = texture->width0;
876 resource.height0 = texture->height0;
877 resource.depth0 = texture->depth0;
878 resource.array_size = texture->array_size;
879 resource.last_level = texture->last_level;
880 resource.nr_samples = texture->nr_samples;
881 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
882 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
883 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
884
885 if (staging)
886 resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
887
888 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
889 if (*flushed_depth_texture == NULL) {
890 R600_ERR("failed to create temporary texture to hold flushed depth\n");
891 return false;
892 }
893
894 (*flushed_depth_texture)->is_flushing_texture = TRUE;
895 (*flushed_depth_texture)->non_disp_tiling = false;
896 return true;
897 }
898
899 /**
900 * Initialize the pipe_resource descriptor to be of the same size as the box,
901 * which is supposed to hold a subregion of the texture "orig" at the given
902 * mipmap level.
903 */
904 static void r600_init_temp_resource_from_box(struct pipe_resource *res,
905 struct pipe_resource *orig,
906 const struct pipe_box *box,
907 unsigned level, unsigned flags)
908 {
909 memset(res, 0, sizeof(*res));
910 res->format = orig->format;
911 res->width0 = box->width;
912 res->height0 = box->height;
913 res->depth0 = 1;
914 res->array_size = 1;
915 res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
916 res->flags = flags;
917
918 /* We must set the correct texture target and dimensions for a 3D box. */
919 if (box->depth > 1 && util_max_layer(orig, level) > 0)
920 res->target = orig->target;
921 else
922 res->target = PIPE_TEXTURE_2D;
923
924 switch (res->target) {
925 case PIPE_TEXTURE_1D_ARRAY:
926 case PIPE_TEXTURE_2D_ARRAY:
927 case PIPE_TEXTURE_CUBE_ARRAY:
928 res->array_size = box->depth;
929 break;
930 case PIPE_TEXTURE_3D:
931 res->depth0 = box->depth;
932 break;
933 default:;
934 }
935 }
936
937 static void *r600_texture_transfer_map(struct pipe_context *ctx,
938 struct pipe_resource *texture,
939 unsigned level,
940 unsigned usage,
941 const struct pipe_box *box,
942 struct pipe_transfer **ptransfer)
943 {
944 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
945 struct r600_texture *rtex = (struct r600_texture*)texture;
946 struct r600_transfer *trans;
947 boolean use_staging_texture = FALSE;
948 struct r600_resource *buf;
949 unsigned offset = 0;
950 char *map;
951
952 /* We cannot map a tiled texture directly because the data is
953 * in a different order, therefore we do detiling using a blit.
954 *
955 * Also, use a temporary in GTT memory for read transfers, as
956 * the CPU is much happier reading out of cached system memory
957 * than uncached VRAM.
958 */
959 if (rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
960 use_staging_texture = TRUE;
961 } else if ((usage & PIPE_TRANSFER_READ) && !(usage & PIPE_TRANSFER_MAP_DIRECTLY) &&
962 (rtex->resource.domains == RADEON_DOMAIN_VRAM)) {
963 /* Untiled buffers in VRAM, which is slow for CPU reads */
964 use_staging_texture = TRUE;
965 } else if (!(usage & PIPE_TRANSFER_READ) &&
966 (r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
967 !rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE))) {
968 /* Use a staging texture for uploads if the underlying BO is busy. */
969 use_staging_texture = TRUE;
970 }
971
972 if (texture->flags & R600_RESOURCE_FLAG_TRANSFER) {
973 use_staging_texture = FALSE;
974 }
975
976 if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
977 return NULL;
978 }
979
980 trans = CALLOC_STRUCT(r600_transfer);
981 if (trans == NULL)
982 return NULL;
983 trans->transfer.resource = texture;
984 trans->transfer.level = level;
985 trans->transfer.usage = usage;
986 trans->transfer.box = *box;
987
988 if (rtex->is_depth) {
989 struct r600_texture *staging_depth;
990
991 if (rtex->resource.b.b.nr_samples > 1) {
992 /* MSAA depth buffers need to be converted to single sample buffers.
993 *
994 * Mapping MSAA depth buffers can occur if ReadPixels is called
995 * with a multisample GLX visual.
996 *
997 * First downsample the depth buffer to a temporary texture,
998 * then decompress the temporary one to staging.
999 *
1000 * Only the region being mapped is transfered.
1001 */
1002 struct pipe_resource resource;
1003
1004 r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
1005
1006 if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1007 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1008 FREE(trans);
1009 return NULL;
1010 }
1011
1012 if (usage & PIPE_TRANSFER_READ) {
1013 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1014 if (!temp) {
1015 R600_ERR("failed to create a temporary depth texture\n");
1016 FREE(trans);
1017 return NULL;
1018 }
1019
1020 r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1021 rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
1022 0, 0, 0, box->depth, 0, 0);
1023 pipe_resource_reference((struct pipe_resource**)&temp, NULL);
1024 }
1025 }
1026 else {
1027 /* XXX: only readback the rectangle which is being mapped? */
1028 /* XXX: when discard is true, no need to read back from depth texture */
1029 if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1030 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1031 FREE(trans);
1032 return NULL;
1033 }
1034
1035 rctx->blit_decompress_depth(ctx, rtex, staging_depth,
1036 level, level,
1037 box->z, box->z + box->depth - 1,
1038 0, 0);
1039
1040 offset = r600_texture_get_offset(staging_depth, level, box);
1041 }
1042
1043 trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
1044 trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
1045 trans->staging = (struct r600_resource*)staging_depth;
1046 } else if (use_staging_texture) {
1047 struct pipe_resource resource;
1048 struct r600_texture *staging;
1049
1050 r600_init_temp_resource_from_box(&resource, texture, box, level,
1051 R600_RESOURCE_FLAG_TRANSFER);
1052 resource.usage = (usage & PIPE_TRANSFER_READ) ?
1053 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1054
1055 /* Create the temporary texture. */
1056 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1057 if (staging == NULL) {
1058 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1059 FREE(trans);
1060 return NULL;
1061 }
1062 trans->staging = &staging->resource;
1063 trans->transfer.stride = staging->surface.level[0].pitch_bytes;
1064 trans->transfer.layer_stride = staging->surface.level[0].slice_size;
1065 if (usage & PIPE_TRANSFER_READ) {
1066 r600_copy_to_staging_texture(ctx, trans);
1067 }
1068 } else {
1069 /* the resource is mapped directly */
1070 trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
1071 trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
1072 offset = r600_texture_get_offset(rtex, level, box);
1073 }
1074
1075 if (trans->staging) {
1076 buf = trans->staging;
1077 if (!rtex->is_depth && !(usage & PIPE_TRANSFER_READ))
1078 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1079 } else {
1080 buf = &rtex->resource;
1081 }
1082
1083 if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
1084 pipe_resource_reference((struct pipe_resource**)&trans->staging, NULL);
1085 FREE(trans);
1086 return NULL;
1087 }
1088
1089 *ptransfer = &trans->transfer;
1090 return map + offset;
1091 }
1092
1093 static void r600_texture_transfer_unmap(struct pipe_context *ctx,
1094 struct pipe_transfer* transfer)
1095 {
1096 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
1097 struct pipe_resource *texture = transfer->resource;
1098 struct r600_texture *rtex = (struct r600_texture*)texture;
1099
1100 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
1101 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
1102 ctx->resource_copy_region(ctx, texture, transfer->level,
1103 transfer->box.x, transfer->box.y, transfer->box.z,
1104 &rtransfer->staging->b.b, transfer->level,
1105 &transfer->box);
1106 } else {
1107 r600_copy_from_staging_texture(ctx, rtransfer);
1108 }
1109 }
1110
1111 if (rtransfer->staging)
1112 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
1113
1114 FREE(transfer);
1115 }
1116
1117 static const struct u_resource_vtbl r600_texture_vtbl =
1118 {
1119 NULL, /* get_handle */
1120 r600_texture_destroy, /* resource_destroy */
1121 r600_texture_transfer_map, /* transfer_map */
1122 u_default_transfer_flush_region, /* transfer_flush_region */
1123 r600_texture_transfer_unmap, /* transfer_unmap */
1124 NULL /* transfer_inline_write */
1125 };
1126
1127 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
1128 struct pipe_resource *texture,
1129 const struct pipe_surface *templ,
1130 unsigned width, unsigned height)
1131 {
1132 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
1133
1134 if (surface == NULL)
1135 return NULL;
1136
1137 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
1138 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
1139
1140 pipe_reference_init(&surface->base.reference, 1);
1141 pipe_resource_reference(&surface->base.texture, texture);
1142 surface->base.context = pipe;
1143 surface->base.format = templ->format;
1144 surface->base.width = width;
1145 surface->base.height = height;
1146 surface->base.u = templ->u;
1147 return &surface->base;
1148 }
1149
1150 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
1151 struct pipe_resource *tex,
1152 const struct pipe_surface *templ)
1153 {
1154 unsigned level = templ->u.tex.level;
1155
1156 return r600_create_surface_custom(pipe, tex, templ,
1157 u_minify(tex->width0, level),
1158 u_minify(tex->height0, level));
1159 }
1160
1161 static void r600_surface_destroy(struct pipe_context *pipe,
1162 struct pipe_surface *surface)
1163 {
1164 struct r600_surface *surf = (struct r600_surface*)surface;
1165 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, NULL);
1166 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, NULL);
1167 pipe_resource_reference(&surface->texture, NULL);
1168 FREE(surface);
1169 }
1170
1171 unsigned r600_translate_colorswap(enum pipe_format format)
1172 {
1173 const struct util_format_description *desc = util_format_description(format);
1174
1175 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == UTIL_FORMAT_SWIZZLE_##swz)
1176
1177 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
1178 return V_0280A0_SWAP_STD;
1179
1180 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
1181 return ~0U;
1182
1183 switch (desc->nr_channels) {
1184 case 1:
1185 if (HAS_SWIZZLE(0,X))
1186 return V_0280A0_SWAP_STD; /* X___ */
1187 else if (HAS_SWIZZLE(3,X))
1188 return V_0280A0_SWAP_ALT_REV; /* ___X */
1189 break;
1190 case 2:
1191 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
1192 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
1193 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
1194 return V_0280A0_SWAP_STD; /* XY__ */
1195 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
1196 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
1197 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
1198 return V_0280A0_SWAP_STD_REV; /* YX__ */
1199 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
1200 return V_0280A0_SWAP_ALT; /* X__Y */
1201 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
1202 return V_0280A0_SWAP_ALT_REV; /* Y__X */
1203 break;
1204 case 3:
1205 if (HAS_SWIZZLE(0,X))
1206 return V_0280A0_SWAP_STD; /* XYZ */
1207 else if (HAS_SWIZZLE(0,Z))
1208 return V_0280A0_SWAP_STD_REV; /* ZYX */
1209 break;
1210 case 4:
1211 /* check the middle channels, the 1st and 4th channel can be NONE */
1212 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z))
1213 return V_0280A0_SWAP_STD; /* XYZW */
1214 else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y))
1215 return V_0280A0_SWAP_STD_REV; /* WZYX */
1216 else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X))
1217 return V_0280A0_SWAP_ALT; /* ZYXW */
1218 else if (HAS_SWIZZLE(1,X) && HAS_SWIZZLE(2,Y))
1219 return V_0280A0_SWAP_ALT_REV; /* WXYZ */
1220 break;
1221 }
1222 return ~0U;
1223 }
1224
1225 static void evergreen_set_clear_color(struct r600_texture *rtex,
1226 enum pipe_format surface_format,
1227 const union pipe_color_union *color)
1228 {
1229 union util_color uc;
1230
1231 memset(&uc, 0, sizeof(uc));
1232
1233 if (util_format_is_pure_uint(surface_format)) {
1234 util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
1235 } else if (util_format_is_pure_sint(surface_format)) {
1236 util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1);
1237 } else {
1238 util_pack_color(color->f, surface_format, &uc);
1239 }
1240
1241 memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));
1242 }
1243
1244 static void vi_get_fast_clear_parameters(enum pipe_format surface_format,
1245 const union pipe_color_union *color,
1246 uint32_t* reset_value,
1247 bool* clear_words_needed)
1248 {
1249 bool values[4] = {};
1250 int i;
1251 bool main_value = false;
1252 bool extra_value = false;
1253 int extra_channel;
1254 const struct util_format_description *desc = util_format_description(surface_format);
1255
1256 *clear_words_needed = true;
1257 *reset_value = 0x20202020U;
1258
1259 /* If we want to clear without needing a fast clear eliminate step, we
1260 * can set each channel to 0 or 1 (or 0/max for integer formats). We
1261 * have two sets of flags, one for the last or first channel(extra) and
1262 * one for the other channels(main).
1263 */
1264
1265 if (surface_format == PIPE_FORMAT_R11G11B10_FLOAT ||
1266 surface_format == PIPE_FORMAT_B5G6R5_UNORM ||
1267 surface_format == PIPE_FORMAT_B5G6R5_SRGB) {
1268 extra_channel = -1;
1269 } else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
1270 if(r600_translate_colorswap(surface_format) <= 1)
1271 extra_channel = desc->nr_channels - 1;
1272 else
1273 extra_channel = 0;
1274 } else
1275 return;
1276
1277 for (i = 0; i < 4; ++i) {
1278 int index = desc->swizzle[i] - UTIL_FORMAT_SWIZZLE_X;
1279
1280 if (desc->swizzle[i] < UTIL_FORMAT_SWIZZLE_X ||
1281 desc->swizzle[i] > UTIL_FORMAT_SWIZZLE_W)
1282 continue;
1283
1284 if (util_format_is_pure_sint(surface_format)) {
1285 values[i] = color->i[i] != 0;
1286 if (color->i[i] != 0 && color->i[i] != INT32_MAX)
1287 return;
1288 } else if (util_format_is_pure_uint(surface_format)) {
1289 values[i] = color->ui[i] != 0U;
1290 if (color->ui[i] != 0U && color->ui[i] != UINT32_MAX)
1291 return;
1292 } else {
1293 values[i] = color->f[i] != 0.0F;
1294 if (color->f[i] != 0.0F && color->f[i] != 1.0F)
1295 return;
1296 }
1297
1298 if (index == extra_channel)
1299 extra_value = values[i];
1300 else
1301 main_value = values[i];
1302 }
1303
1304 for (int i = 0; i < 4; ++i)
1305 if (values[i] != main_value &&
1306 desc->swizzle[i] - UTIL_FORMAT_SWIZZLE_X != extra_channel &&
1307 desc->swizzle[i] >= UTIL_FORMAT_SWIZZLE_X &&
1308 desc->swizzle[i] <= UTIL_FORMAT_SWIZZLE_W)
1309 return;
1310
1311 *clear_words_needed = false;
1312 if (main_value)
1313 *reset_value |= 0x80808080U;
1314
1315 if (extra_value)
1316 *reset_value |= 0x40404040U;
1317 }
1318
1319 void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
1320 struct pipe_framebuffer_state *fb,
1321 struct r600_atom *fb_state,
1322 unsigned *buffers, unsigned *dirty_cbufs,
1323 const union pipe_color_union *color)
1324 {
1325 int i;
1326
1327 if (rctx->render_cond)
1328 return;
1329
1330 for (i = 0; i < fb->nr_cbufs; i++) {
1331 struct r600_texture *tex;
1332 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
1333
1334 if (!fb->cbufs[i])
1335 continue;
1336
1337 /* if this colorbuffer is not being cleared */
1338 if (!(*buffers & clear_bit))
1339 continue;
1340
1341 tex = (struct r600_texture *)fb->cbufs[i]->texture;
1342
1343 /* 128-bit formats are unusupported */
1344 if (util_format_get_blocksizebits(fb->cbufs[i]->format) > 64) {
1345 continue;
1346 }
1347
1348 /* the clear is allowed if all layers are bound */
1349 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
1350 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
1351 continue;
1352 }
1353
1354 /* cannot clear mipmapped textures */
1355 if (fb->cbufs[i]->texture->last_level != 0) {
1356 continue;
1357 }
1358
1359 /* only supported on tiled surfaces */
1360 if (tex->surface.level[0].mode < RADEON_SURF_MODE_1D) {
1361 continue;
1362 }
1363
1364 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
1365 if (tex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
1366 rctx->chip_class >= CIK &&
1367 rctx->screen->info.drm_major == 2 &&
1368 rctx->screen->info.drm_minor < 38) {
1369 continue;
1370 }
1371
1372 if (tex->dcc_buffer) {
1373 uint32_t reset_value;
1374 bool clear_words_needed;
1375
1376 if (rctx->screen->debug_flags & DBG_NO_DCC_CLEAR)
1377 continue;
1378
1379 vi_get_fast_clear_parameters(fb->cbufs[i]->format, color, &reset_value, &clear_words_needed);
1380
1381 rctx->clear_buffer(&rctx->b, &tex->dcc_buffer->b.b,
1382 0, tex->surface.dcc_size, reset_value, true);
1383
1384 if (clear_words_needed)
1385 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
1386 } else {
1387 /* ensure CMASK is enabled */
1388 r600_texture_alloc_cmask_separate(rctx->screen, tex);
1389 if (tex->cmask.size == 0) {
1390 continue;
1391 }
1392
1393 /* Do the fast clear. */
1394 rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,
1395 tex->cmask.offset, tex->cmask.size, 0, true);
1396
1397 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
1398 }
1399
1400 evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);
1401
1402 if (dirty_cbufs)
1403 *dirty_cbufs |= 1 << i;
1404 rctx->set_atom_dirty(rctx, fb_state, true);
1405 *buffers &= ~clear_bit;
1406 }
1407 }
1408
1409 void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
1410 {
1411 rscreen->b.resource_from_handle = r600_texture_from_handle;
1412 rscreen->b.resource_get_handle = r600_texture_get_handle;
1413 }
1414
1415 void r600_init_context_texture_functions(struct r600_common_context *rctx)
1416 {
1417 rctx->b.create_surface = r600_create_surface;
1418 rctx->b.surface_destroy = r600_surface_destroy;
1419 }