gallium/radeon: print more info about CMASK
[mesa.git] / src / gallium / drivers / radeon / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include "r600_pipe_common.h"
28 #include "r600_cs.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include <errno.h>
33 #include <inttypes.h>
34
35 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
36 static void r600_copy_region_with_blit(struct pipe_context *pipe,
37 struct pipe_resource *dst,
38 unsigned dst_level,
39 unsigned dstx, unsigned dsty, unsigned dstz,
40 struct pipe_resource *src,
41 unsigned src_level,
42 const struct pipe_box *src_box)
43 {
44 struct pipe_blit_info blit;
45
46 memset(&blit, 0, sizeof(blit));
47 blit.src.resource = src;
48 blit.src.format = src->format;
49 blit.src.level = src_level;
50 blit.src.box = *src_box;
51 blit.dst.resource = dst;
52 blit.dst.format = dst->format;
53 blit.dst.level = dst_level;
54 blit.dst.box.x = dstx;
55 blit.dst.box.y = dsty;
56 blit.dst.box.z = dstz;
57 blit.dst.box.width = src_box->width;
58 blit.dst.box.height = src_box->height;
59 blit.dst.box.depth = src_box->depth;
60 blit.mask = util_format_get_mask(src->format) &
61 util_format_get_mask(dst->format);
62 blit.filter = PIPE_TEX_FILTER_NEAREST;
63
64 if (blit.mask) {
65 pipe->blit(pipe, &blit);
66 }
67 }
68
69 /* Copy from a full GPU texture to a transfer's staging one. */
70 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
71 {
72 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
73 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
74 struct pipe_resource *dst = &rtransfer->staging->b.b;
75 struct pipe_resource *src = transfer->resource;
76
77 if (src->nr_samples > 1) {
78 r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
79 src, transfer->level, &transfer->box);
80 return;
81 }
82
83 rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
84 &transfer->box);
85 }
86
87 /* Copy from a transfer's staging texture to a full GPU one. */
88 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
89 {
90 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
91 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
92 struct pipe_resource *dst = transfer->resource;
93 struct pipe_resource *src = &rtransfer->staging->b.b;
94 struct pipe_box sbox;
95
96 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
97
98 if (dst->nr_samples > 1) {
99 r600_copy_region_with_blit(ctx, dst, transfer->level,
100 transfer->box.x, transfer->box.y, transfer->box.z,
101 src, 0, &sbox);
102 return;
103 }
104
105 rctx->dma_copy(ctx, dst, transfer->level,
106 transfer->box.x, transfer->box.y, transfer->box.z,
107 src, 0, &sbox);
108 }
109
110 static unsigned r600_texture_get_offset(struct r600_texture *rtex, unsigned level,
111 const struct pipe_box *box)
112 {
113 enum pipe_format format = rtex->resource.b.b.format;
114
115 return rtex->surface.level[level].offset +
116 box->z * rtex->surface.level[level].slice_size +
117 box->y / util_format_get_blockheight(format) * rtex->surface.level[level].pitch_bytes +
118 box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
119 }
120
121 static int r600_init_surface(struct r600_common_screen *rscreen,
122 struct radeon_surf *surface,
123 const struct pipe_resource *ptex,
124 unsigned array_mode,
125 bool is_flushed_depth)
126 {
127 const struct util_format_description *desc =
128 util_format_description(ptex->format);
129 bool is_depth, is_stencil;
130
131 is_depth = util_format_has_depth(desc);
132 is_stencil = util_format_has_stencil(desc);
133
134 surface->npix_x = ptex->width0;
135 surface->npix_y = ptex->height0;
136 surface->npix_z = ptex->depth0;
137 surface->blk_w = util_format_get_blockwidth(ptex->format);
138 surface->blk_h = util_format_get_blockheight(ptex->format);
139 surface->blk_d = 1;
140 surface->array_size = 1;
141 surface->last_level = ptex->last_level;
142
143 if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&
144 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
145 surface->bpe = 4; /* stencil is allocated separately on evergreen */
146 } else {
147 surface->bpe = util_format_get_blocksize(ptex->format);
148 /* align byte per element on dword */
149 if (surface->bpe == 3) {
150 surface->bpe = 4;
151 }
152 }
153
154 surface->nsamples = ptex->nr_samples ? ptex->nr_samples : 1;
155 surface->flags = RADEON_SURF_SET(array_mode, MODE);
156
157 switch (ptex->target) {
158 case PIPE_TEXTURE_1D:
159 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
160 break;
161 case PIPE_TEXTURE_RECT:
162 case PIPE_TEXTURE_2D:
163 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
164 break;
165 case PIPE_TEXTURE_3D:
166 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
167 break;
168 case PIPE_TEXTURE_1D_ARRAY:
169 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
170 surface->array_size = ptex->array_size;
171 break;
172 case PIPE_TEXTURE_2D_ARRAY:
173 case PIPE_TEXTURE_CUBE_ARRAY: /* cube array layout like 2d array */
174 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
175 surface->array_size = ptex->array_size;
176 break;
177 case PIPE_TEXTURE_CUBE:
178 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE);
179 break;
180 case PIPE_BUFFER:
181 default:
182 return -EINVAL;
183 }
184 if (ptex->bind & PIPE_BIND_SCANOUT) {
185 surface->flags |= RADEON_SURF_SCANOUT;
186 }
187
188 if (!is_flushed_depth && is_depth) {
189 surface->flags |= RADEON_SURF_ZBUFFER;
190
191 if (is_stencil) {
192 surface->flags |= RADEON_SURF_SBUFFER |
193 RADEON_SURF_HAS_SBUFFER_MIPTREE;
194 }
195 }
196 if (rscreen->chip_class >= SI) {
197 surface->flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
198 }
199 return 0;
200 }
201
202 static int r600_setup_surface(struct pipe_screen *screen,
203 struct r600_texture *rtex,
204 unsigned pitch_in_bytes_override)
205 {
206 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
207 int r;
208
209 r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface);
210 if (r) {
211 return r;
212 }
213
214 rtex->size = rtex->surface.bo_size;
215
216 if (pitch_in_bytes_override && pitch_in_bytes_override != rtex->surface.level[0].pitch_bytes) {
217 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
218 * for those
219 */
220 rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe;
221 rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override;
222 rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y;
223 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
224 rtex->surface.stencil_offset =
225 rtex->surface.stencil_level[0].offset = rtex->surface.level[0].slice_size;
226 }
227 }
228 return 0;
229 }
230
231 static boolean r600_texture_get_handle(struct pipe_screen* screen,
232 struct pipe_resource *ptex,
233 struct winsys_handle *whandle)
234 {
235 struct r600_texture *rtex = (struct r600_texture*)ptex;
236 struct r600_resource *resource = &rtex->resource;
237 struct radeon_surf *surface = &rtex->surface;
238 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
239
240 rscreen->ws->buffer_set_tiling(resource->buf,
241 NULL,
242 surface->level[0].mode >= RADEON_SURF_MODE_1D ?
243 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
244 surface->level[0].mode >= RADEON_SURF_MODE_2D ?
245 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
246 surface->pipe_config,
247 surface->bankw, surface->bankh,
248 surface->tile_split,
249 surface->stencil_tile_split,
250 surface->mtilea, surface->num_banks,
251 surface->level[0].pitch_bytes,
252 (surface->flags & RADEON_SURF_SCANOUT) != 0);
253
254 return rscreen->ws->buffer_get_handle(resource->buf,
255 surface->level[0].pitch_bytes, whandle);
256 }
257
258 static void r600_texture_destroy(struct pipe_screen *screen,
259 struct pipe_resource *ptex)
260 {
261 struct r600_texture *rtex = (struct r600_texture*)ptex;
262 struct r600_resource *resource = &rtex->resource;
263
264 if (rtex->flushed_depth_texture)
265 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
266
267 pipe_resource_reference((struct pipe_resource**)&rtex->htile_buffer, NULL);
268 if (rtex->cmask_buffer != &rtex->resource) {
269 pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
270 }
271 pipe_resource_reference((struct pipe_resource**)&rtex->dcc_buffer, NULL);
272 pb_reference(&resource->buf, NULL);
273 FREE(rtex);
274 }
275
276 static const struct u_resource_vtbl r600_texture_vtbl;
277
278 /* The number of samples can be specified independently of the texture. */
279 void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
280 struct r600_texture *rtex,
281 unsigned nr_samples,
282 struct r600_fmask_info *out)
283 {
284 /* FMASK is allocated like an ordinary texture. */
285 struct radeon_surf fmask = rtex->surface;
286
287 memset(out, 0, sizeof(*out));
288
289 fmask.bo_alignment = 0;
290 fmask.bo_size = 0;
291 fmask.nsamples = 1;
292 fmask.flags |= RADEON_SURF_FMASK;
293
294 /* Force 2D tiling if it wasn't set. This may occur when creating
295 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
296 * destination buffer must have an FMASK too. */
297 fmask.flags = RADEON_SURF_CLR(fmask.flags, MODE);
298 fmask.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
299
300 if (rscreen->chip_class >= SI) {
301 fmask.flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
302 }
303
304 switch (nr_samples) {
305 case 2:
306 case 4:
307 fmask.bpe = 1;
308 if (rscreen->chip_class <= CAYMAN) {
309 fmask.bankh = 4;
310 }
311 break;
312 case 8:
313 fmask.bpe = 4;
314 break;
315 default:
316 R600_ERR("Invalid sample count for FMASK allocation.\n");
317 return;
318 }
319
320 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
321 * This can be fixed by writing a separate FMASK allocator specifically
322 * for R600-R700 asics. */
323 if (rscreen->chip_class <= R700) {
324 fmask.bpe *= 2;
325 }
326
327 if (rscreen->ws->surface_init(rscreen->ws, &fmask)) {
328 R600_ERR("Got error in surface_init while allocating FMASK.\n");
329 return;
330 }
331
332 assert(fmask.level[0].mode == RADEON_SURF_MODE_2D);
333
334 out->slice_tile_max = (fmask.level[0].nblk_x * fmask.level[0].nblk_y) / 64;
335 if (out->slice_tile_max)
336 out->slice_tile_max -= 1;
337
338 out->tile_mode_index = fmask.tiling_index[0];
339 out->pitch_in_pixels = fmask.level[0].nblk_x;
340 out->bank_height = fmask.bankh;
341 out->alignment = MAX2(256, fmask.bo_alignment);
342 out->size = fmask.bo_size;
343 }
344
345 static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,
346 struct r600_texture *rtex)
347 {
348 r600_texture_get_fmask_info(rscreen, rtex,
349 rtex->resource.b.b.nr_samples, &rtex->fmask);
350
351 rtex->fmask.offset = align(rtex->size, rtex->fmask.alignment);
352 rtex->size = rtex->fmask.offset + rtex->fmask.size;
353 }
354
355 void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
356 struct r600_texture *rtex,
357 struct r600_cmask_info *out)
358 {
359 unsigned cmask_tile_width = 8;
360 unsigned cmask_tile_height = 8;
361 unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
362 unsigned element_bits = 4;
363 unsigned cmask_cache_bits = 1024;
364 unsigned num_pipes = rscreen->tiling_info.num_channels;
365 unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
366
367 unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
368 unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
369 unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
370 unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
371 unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
372
373 unsigned pitch_elements = align(rtex->surface.npix_x, macro_tile_width);
374 unsigned height = align(rtex->surface.npix_y, macro_tile_height);
375
376 unsigned base_align = num_pipes * pipe_interleave_bytes;
377 unsigned slice_bytes =
378 ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
379
380 assert(macro_tile_width % 128 == 0);
381 assert(macro_tile_height % 128 == 0);
382
383 out->pitch = pitch_elements;
384 out->height = height;
385 out->xalign = macro_tile_width;
386 out->yalign = macro_tile_height;
387 out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
388 out->alignment = MAX2(256, base_align);
389 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
390 align(slice_bytes, base_align);
391 }
392
393 static void si_texture_get_cmask_info(struct r600_common_screen *rscreen,
394 struct r600_texture *rtex,
395 struct r600_cmask_info *out)
396 {
397 unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
398 unsigned num_pipes = rscreen->tiling_info.num_channels;
399 unsigned cl_width, cl_height;
400
401 switch (num_pipes) {
402 case 2:
403 cl_width = 32;
404 cl_height = 16;
405 break;
406 case 4:
407 cl_width = 32;
408 cl_height = 32;
409 break;
410 case 8:
411 cl_width = 64;
412 cl_height = 32;
413 break;
414 case 16: /* Hawaii */
415 cl_width = 64;
416 cl_height = 64;
417 break;
418 default:
419 assert(0);
420 return;
421 }
422
423 unsigned base_align = num_pipes * pipe_interleave_bytes;
424
425 unsigned width = align(rtex->surface.npix_x, cl_width*8);
426 unsigned height = align(rtex->surface.npix_y, cl_height*8);
427 unsigned slice_elements = (width * height) / (8*8);
428
429 /* Each element of CMASK is a nibble. */
430 unsigned slice_bytes = slice_elements / 2;
431
432 out->pitch = width;
433 out->height = height;
434 out->xalign = cl_width * 8;
435 out->yalign = cl_height * 8;
436 out->slice_tile_max = (width * height) / (128*128);
437 if (out->slice_tile_max)
438 out->slice_tile_max -= 1;
439
440 out->alignment = MAX2(256, base_align);
441 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
442 align(slice_bytes, base_align);
443 }
444
445 static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,
446 struct r600_texture *rtex)
447 {
448 if (rscreen->chip_class >= SI) {
449 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
450 } else {
451 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
452 }
453
454 rtex->cmask.offset = align(rtex->size, rtex->cmask.alignment);
455 rtex->size = rtex->cmask.offset + rtex->cmask.size;
456
457 if (rscreen->chip_class >= SI)
458 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
459 else
460 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
461 }
462
463 static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
464 struct r600_texture *rtex)
465 {
466 if (rtex->cmask_buffer)
467 return;
468
469 assert(rtex->cmask.size == 0);
470
471 if (rscreen->chip_class >= SI) {
472 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
473 } else {
474 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
475 }
476
477 rtex->cmask_buffer = (struct r600_resource *)
478 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
479 PIPE_USAGE_DEFAULT, rtex->cmask.size);
480 if (rtex->cmask_buffer == NULL) {
481 rtex->cmask.size = 0;
482 return;
483 }
484
485 /* update colorbuffer state bits */
486 rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
487
488 if (rscreen->chip_class >= SI)
489 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
490 else
491 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
492 }
493
494 static void vi_texture_alloc_dcc_separate(struct r600_common_screen *rscreen,
495 struct r600_texture *rtex)
496 {
497 if (rscreen->debug_flags & DBG_NO_DCC)
498 return;
499
500 rtex->dcc_buffer = (struct r600_resource *)
501 r600_aligned_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
502 PIPE_USAGE_DEFAULT, rtex->surface.dcc_size, rtex->surface.dcc_alignment);
503 if (rtex->dcc_buffer == NULL) {
504 return;
505 }
506
507 r600_screen_clear_buffer(rscreen, &rtex->dcc_buffer->b.b, 0, rtex->surface.dcc_size,
508 0xFFFFFFFF, true);
509
510 rtex->cb_color_info |= VI_S_028C70_DCC_ENABLE(1);
511 }
512
513 static unsigned r600_texture_get_htile_size(struct r600_common_screen *rscreen,
514 struct r600_texture *rtex)
515 {
516 unsigned cl_width, cl_height, width, height;
517 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
518 unsigned num_pipes = rscreen->tiling_info.num_channels;
519
520 if (rscreen->chip_class <= EVERGREEN &&
521 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26)
522 return 0;
523
524 /* HW bug on R6xx. */
525 if (rscreen->chip_class == R600 &&
526 (rtex->surface.level[0].npix_x > 7680 ||
527 rtex->surface.level[0].npix_y > 7680))
528 return 0;
529
530 /* HTILE is broken with 1D tiling on old kernels and CIK. */
531 if (rscreen->chip_class >= CIK &&
532 rtex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
533 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 38)
534 return 0;
535
536 switch (num_pipes) {
537 case 1:
538 cl_width = 32;
539 cl_height = 16;
540 break;
541 case 2:
542 cl_width = 32;
543 cl_height = 32;
544 break;
545 case 4:
546 cl_width = 64;
547 cl_height = 32;
548 break;
549 case 8:
550 cl_width = 64;
551 cl_height = 64;
552 break;
553 case 16:
554 cl_width = 128;
555 cl_height = 64;
556 break;
557 default:
558 assert(0);
559 return 0;
560 }
561
562 width = align(rtex->surface.npix_x, cl_width * 8);
563 height = align(rtex->surface.npix_y, cl_height * 8);
564
565 slice_elements = (width * height) / (8 * 8);
566 slice_bytes = slice_elements * 4;
567
568 pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
569 base_align = num_pipes * pipe_interleave_bytes;
570
571 return (util_max_layer(&rtex->resource.b.b, 0) + 1) *
572 align(slice_bytes, base_align);
573 }
574
575 static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
576 struct r600_texture *rtex)
577 {
578 unsigned htile_size = r600_texture_get_htile_size(rscreen, rtex);
579
580 if (!htile_size)
581 return;
582
583 rtex->htile_buffer = (struct r600_resource*)
584 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
585 PIPE_USAGE_DEFAULT, htile_size);
586 if (rtex->htile_buffer == NULL) {
587 /* this is not a fatal error as we can still keep rendering
588 * without htile buffer */
589 R600_ERR("Failed to create buffer object for htile buffer.\n");
590 } else {
591 r600_screen_clear_buffer(rscreen, &rtex->htile_buffer->b.b, 0,
592 htile_size, 0, true);
593 }
594 }
595
596 static void
597 r600_print_texture_info(struct r600_texture *rtex, FILE *f)
598 {
599 int i;
600
601 fprintf(f, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
602 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
603 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
604 rtex->surface.npix_x, rtex->surface.npix_y,
605 rtex->surface.npix_z, rtex->surface.blk_w,
606 rtex->surface.blk_h, rtex->surface.blk_d,
607 rtex->surface.array_size, rtex->surface.last_level,
608 rtex->surface.bpe, rtex->surface.nsamples,
609 rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
610
611 fprintf(f, " Layout: size=%"PRIu64", alignment=%"PRIu64", bankw=%u, "
612 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
613 rtex->surface.bo_size, rtex->surface.bo_alignment, rtex->surface.bankw,
614 rtex->surface.bankh, rtex->surface.num_banks, rtex->surface.mtilea,
615 rtex->surface.tile_split, rtex->surface.pipe_config,
616 (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
617
618 if (rtex->fmask.size)
619 fprintf(f, " FMask: offset=%u, size=%u, alignment=%u, pitch_in_pixels=%u, "
620 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
621 rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
622 rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
623 rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
624
625 if (rtex->cmask.size)
626 fprintf(f, " CMask: offset=%u, size=%u, alignment=%u, pitch=%u, "
627 "height=%u, xalign=%u, yalign=%u, slice_tile_max=%u\n",
628 rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
629 rtex->cmask.pitch, rtex->cmask.height, rtex->cmask.xalign,
630 rtex->cmask.yalign, rtex->cmask.slice_tile_max);
631
632 if (rtex->htile_buffer)
633 fprintf(f, " HTile: size=%u, alignment=%u\n",
634 rtex->htile_buffer->b.b.width0,
635 rtex->htile_buffer->buf->alignment);
636
637 if (rtex->dcc_buffer) {
638 fprintf(f, " DCC: size=%u, alignment=%u\n",
639 rtex->dcc_buffer->b.b.width0,
640 rtex->dcc_buffer->buf->alignment);
641 for (i = 0; i <= rtex->surface.last_level; i++)
642 fprintf(f, " DCCLevel[%i]: offset=%"PRIu64"\n",
643 i, rtex->surface.level[i].dcc_offset);
644 }
645
646 for (i = 0; i <= rtex->surface.last_level; i++)
647 fprintf(f, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
648 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
649 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
650 i, rtex->surface.level[i].offset,
651 rtex->surface.level[i].slice_size,
652 u_minify(rtex->resource.b.b.width0, i),
653 u_minify(rtex->resource.b.b.height0, i),
654 u_minify(rtex->resource.b.b.depth0, i),
655 rtex->surface.level[i].nblk_x,
656 rtex->surface.level[i].nblk_y,
657 rtex->surface.level[i].nblk_z,
658 rtex->surface.level[i].pitch_bytes,
659 rtex->surface.level[i].mode);
660
661 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
662 for (i = 0; i <= rtex->surface.last_level; i++) {
663 fprintf(f, " StencilLayout: tilesplit=%u\n",
664 rtex->surface.stencil_tile_split);
665 fprintf(f, " StencilLevel[%i]: offset=%"PRIu64", "
666 "slice_size=%"PRIu64", npix_x=%u, "
667 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
668 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
669 i, rtex->surface.stencil_level[i].offset,
670 rtex->surface.stencil_level[i].slice_size,
671 u_minify(rtex->resource.b.b.width0, i),
672 u_minify(rtex->resource.b.b.height0, i),
673 u_minify(rtex->resource.b.b.depth0, i),
674 rtex->surface.stencil_level[i].nblk_x,
675 rtex->surface.stencil_level[i].nblk_y,
676 rtex->surface.stencil_level[i].nblk_z,
677 rtex->surface.stencil_level[i].pitch_bytes,
678 rtex->surface.stencil_level[i].mode);
679 }
680 }
681 }
682
683 /* Common processing for r600_texture_create and r600_texture_from_handle */
684 static struct r600_texture *
685 r600_texture_create_object(struct pipe_screen *screen,
686 const struct pipe_resource *base,
687 unsigned pitch_in_bytes_override,
688 struct pb_buffer *buf,
689 struct radeon_surf *surface)
690 {
691 struct r600_texture *rtex;
692 struct r600_resource *resource;
693 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
694
695 rtex = CALLOC_STRUCT(r600_texture);
696 if (rtex == NULL)
697 return NULL;
698
699 resource = &rtex->resource;
700 resource->b.b = *base;
701 resource->b.vtbl = &r600_texture_vtbl;
702 pipe_reference_init(&resource->b.b.reference, 1);
703 resource->b.b.screen = screen;
704
705 /* don't include stencil-only formats which we don't support for rendering */
706 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
707
708 rtex->surface = *surface;
709 if (r600_setup_surface(screen, rtex, pitch_in_bytes_override)) {
710 FREE(rtex);
711 return NULL;
712 }
713
714 /* Tiled depth textures utilize the non-displayable tile order.
715 * This must be done after r600_setup_surface.
716 * Applies to R600-Cayman. */
717 rtex->non_disp_tiling = rtex->is_depth && rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D;
718
719 if (rtex->is_depth) {
720 if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
721 R600_RESOURCE_FLAG_FLUSHED_DEPTH)) &&
722 !(rscreen->debug_flags & DBG_NO_HYPERZ)) {
723
724 r600_texture_allocate_htile(rscreen, rtex);
725 }
726 } else {
727 if (base->nr_samples > 1) {
728 if (!buf) {
729 r600_texture_allocate_fmask(rscreen, rtex);
730 r600_texture_allocate_cmask(rscreen, rtex);
731 rtex->cmask_buffer = &rtex->resource;
732 }
733 if (!rtex->fmask.size || !rtex->cmask.size) {
734 FREE(rtex);
735 return NULL;
736 }
737 }
738 if (rtex->surface.dcc_size)
739 vi_texture_alloc_dcc_separate(rscreen, rtex);
740 }
741
742 /* Now create the backing buffer. */
743 if (!buf) {
744 if (!r600_init_resource(rscreen, resource, rtex->size,
745 rtex->surface.bo_alignment, TRUE)) {
746 FREE(rtex);
747 return NULL;
748 }
749 } else {
750 resource->buf = buf;
751 resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
752 resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->cs_buf);
753 resource->domains = rscreen->ws->buffer_get_initial_domain(resource->cs_buf);
754 }
755
756 if (rtex->cmask.size) {
757 /* Initialize the cmask to 0xCC (= compressed state). */
758 r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
759 rtex->cmask.offset, rtex->cmask.size,
760 0xCCCCCCCC, true);
761 }
762
763 /* Initialize the CMASK base register value. */
764 rtex->cmask.base_address_reg =
765 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
766
767 if (rscreen->debug_flags & DBG_VM) {
768 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
769 rtex->resource.gpu_address,
770 rtex->resource.gpu_address + rtex->resource.buf->size,
771 base->width0, base->height0, util_max_layer(base, 0)+1, base->last_level+1,
772 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
773 }
774
775 if (rscreen->debug_flags & DBG_TEX) {
776 puts("Texture:");
777 r600_print_texture_info(rtex, stdout);
778 }
779
780 return rtex;
781 }
782
783 static unsigned r600_choose_tiling(struct r600_common_screen *rscreen,
784 const struct pipe_resource *templ)
785 {
786 const struct util_format_description *desc = util_format_description(templ->format);
787 bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
788
789 /* MSAA resources must be 2D tiled. */
790 if (templ->nr_samples > 1)
791 return RADEON_SURF_MODE_2D;
792
793 /* Transfer resources should be linear. */
794 if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
795 return RADEON_SURF_MODE_LINEAR_ALIGNED;
796
797 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
798 if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&
799 (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&
800 (templ->target == PIPE_TEXTURE_2D ||
801 templ->target == PIPE_TEXTURE_3D))
802 force_tiling = true;
803
804 /* Handle common candidates for the linear mode.
805 * Compressed textures must always be tiled. */
806 if (!force_tiling && !util_format_is_compressed(templ->format)) {
807 /* Not everything can be linear, so we cannot enforce it
808 * for all textures. */
809 if ((rscreen->debug_flags & DBG_NO_TILING) &&
810 (!util_format_is_depth_or_stencil(templ->format) ||
811 !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH)))
812 return RADEON_SURF_MODE_LINEAR_ALIGNED;
813
814 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
815 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
816 return RADEON_SURF_MODE_LINEAR_ALIGNED;
817
818 /* Cursors are linear on SI.
819 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
820 if (rscreen->chip_class >= SI &&
821 (templ->bind & PIPE_BIND_CURSOR))
822 return RADEON_SURF_MODE_LINEAR_ALIGNED;
823
824 if (templ->bind & PIPE_BIND_LINEAR)
825 return RADEON_SURF_MODE_LINEAR_ALIGNED;
826
827 /* Textures with a very small height are recommended to be linear. */
828 if (templ->target == PIPE_TEXTURE_1D ||
829 templ->target == PIPE_TEXTURE_1D_ARRAY ||
830 templ->height0 <= 4)
831 return RADEON_SURF_MODE_LINEAR_ALIGNED;
832
833 /* Textures likely to be mapped often. */
834 if (templ->usage == PIPE_USAGE_STAGING ||
835 templ->usage == PIPE_USAGE_STREAM)
836 return RADEON_SURF_MODE_LINEAR_ALIGNED;
837 }
838
839 /* Make small textures 1D tiled. */
840 if (templ->width0 <= 16 || templ->height0 <= 16 ||
841 (rscreen->debug_flags & DBG_NO_2D_TILING))
842 return RADEON_SURF_MODE_1D;
843
844 /* The allocator will switch to 1D if needed. */
845 return RADEON_SURF_MODE_2D;
846 }
847
848 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
849 const struct pipe_resource *templ)
850 {
851 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
852 struct radeon_surf surface = {0};
853 int r;
854
855 r = r600_init_surface(rscreen, &surface, templ,
856 r600_choose_tiling(rscreen, templ),
857 templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
858 if (r) {
859 return NULL;
860 }
861 r = rscreen->ws->surface_best(rscreen->ws, &surface);
862 if (r) {
863 return NULL;
864 }
865 return (struct pipe_resource *)r600_texture_create_object(screen, templ,
866 0, NULL, &surface);
867 }
868
869 static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
870 const struct pipe_resource *templ,
871 struct winsys_handle *whandle)
872 {
873 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
874 struct pb_buffer *buf = NULL;
875 unsigned stride = 0;
876 unsigned array_mode;
877 enum radeon_bo_layout micro, macro;
878 struct radeon_surf surface;
879 bool scanout;
880 int r;
881
882 /* Support only 2D textures without mipmaps */
883 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
884 templ->depth0 != 1 || templ->last_level != 0)
885 return NULL;
886
887 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride);
888 if (!buf)
889 return NULL;
890
891 rscreen->ws->buffer_get_tiling(buf, &micro, &macro,
892 &surface.bankw, &surface.bankh,
893 &surface.tile_split,
894 &surface.stencil_tile_split,
895 &surface.mtilea, &scanout);
896
897 if (macro == RADEON_LAYOUT_TILED)
898 array_mode = RADEON_SURF_MODE_2D;
899 else if (micro == RADEON_LAYOUT_TILED)
900 array_mode = RADEON_SURF_MODE_1D;
901 else
902 array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
903
904 r = r600_init_surface(rscreen, &surface, templ, array_mode, false);
905 if (r) {
906 return NULL;
907 }
908
909 if (scanout)
910 surface.flags |= RADEON_SURF_SCANOUT;
911
912 return (struct pipe_resource *)r600_texture_create_object(screen, templ,
913 stride, buf, &surface);
914 }
915
916 bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
917 struct pipe_resource *texture,
918 struct r600_texture **staging)
919 {
920 struct r600_texture *rtex = (struct r600_texture*)texture;
921 struct pipe_resource resource;
922 struct r600_texture **flushed_depth_texture = staging ?
923 staging : &rtex->flushed_depth_texture;
924
925 if (!staging && rtex->flushed_depth_texture)
926 return true; /* it's ready */
927
928 resource.target = texture->target;
929 resource.format = texture->format;
930 resource.width0 = texture->width0;
931 resource.height0 = texture->height0;
932 resource.depth0 = texture->depth0;
933 resource.array_size = texture->array_size;
934 resource.last_level = texture->last_level;
935 resource.nr_samples = texture->nr_samples;
936 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
937 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
938 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
939
940 if (staging)
941 resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
942
943 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
944 if (*flushed_depth_texture == NULL) {
945 R600_ERR("failed to create temporary texture to hold flushed depth\n");
946 return false;
947 }
948
949 (*flushed_depth_texture)->is_flushing_texture = TRUE;
950 (*flushed_depth_texture)->non_disp_tiling = false;
951 return true;
952 }
953
954 /**
955 * Initialize the pipe_resource descriptor to be of the same size as the box,
956 * which is supposed to hold a subregion of the texture "orig" at the given
957 * mipmap level.
958 */
959 static void r600_init_temp_resource_from_box(struct pipe_resource *res,
960 struct pipe_resource *orig,
961 const struct pipe_box *box,
962 unsigned level, unsigned flags)
963 {
964 memset(res, 0, sizeof(*res));
965 res->format = orig->format;
966 res->width0 = box->width;
967 res->height0 = box->height;
968 res->depth0 = 1;
969 res->array_size = 1;
970 res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
971 res->flags = flags;
972
973 /* We must set the correct texture target and dimensions for a 3D box. */
974 if (box->depth > 1 && util_max_layer(orig, level) > 0)
975 res->target = orig->target;
976 else
977 res->target = PIPE_TEXTURE_2D;
978
979 switch (res->target) {
980 case PIPE_TEXTURE_1D_ARRAY:
981 case PIPE_TEXTURE_2D_ARRAY:
982 case PIPE_TEXTURE_CUBE_ARRAY:
983 res->array_size = box->depth;
984 break;
985 case PIPE_TEXTURE_3D:
986 res->depth0 = box->depth;
987 break;
988 default:;
989 }
990 }
991
992 static void *r600_texture_transfer_map(struct pipe_context *ctx,
993 struct pipe_resource *texture,
994 unsigned level,
995 unsigned usage,
996 const struct pipe_box *box,
997 struct pipe_transfer **ptransfer)
998 {
999 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1000 struct r600_texture *rtex = (struct r600_texture*)texture;
1001 struct r600_transfer *trans;
1002 boolean use_staging_texture = FALSE;
1003 struct r600_resource *buf;
1004 unsigned offset = 0;
1005 char *map;
1006
1007 /* We cannot map a tiled texture directly because the data is
1008 * in a different order, therefore we do detiling using a blit.
1009 *
1010 * Also, use a temporary in GTT memory for read transfers, as
1011 * the CPU is much happier reading out of cached system memory
1012 * than uncached VRAM.
1013 */
1014 if (rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
1015 use_staging_texture = TRUE;
1016 } else if ((usage & PIPE_TRANSFER_READ) && !(usage & PIPE_TRANSFER_MAP_DIRECTLY) &&
1017 (rtex->resource.domains == RADEON_DOMAIN_VRAM)) {
1018 /* Untiled buffers in VRAM, which is slow for CPU reads */
1019 use_staging_texture = TRUE;
1020 } else if (!(usage & PIPE_TRANSFER_READ) &&
1021 (r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
1022 !rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE))) {
1023 /* Use a staging texture for uploads if the underlying BO is busy. */
1024 use_staging_texture = TRUE;
1025 }
1026
1027 if (texture->flags & R600_RESOURCE_FLAG_TRANSFER) {
1028 use_staging_texture = FALSE;
1029 }
1030
1031 if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
1032 return NULL;
1033 }
1034
1035 trans = CALLOC_STRUCT(r600_transfer);
1036 if (trans == NULL)
1037 return NULL;
1038 trans->transfer.resource = texture;
1039 trans->transfer.level = level;
1040 trans->transfer.usage = usage;
1041 trans->transfer.box = *box;
1042
1043 if (rtex->is_depth) {
1044 struct r600_texture *staging_depth;
1045
1046 if (rtex->resource.b.b.nr_samples > 1) {
1047 /* MSAA depth buffers need to be converted to single sample buffers.
1048 *
1049 * Mapping MSAA depth buffers can occur if ReadPixels is called
1050 * with a multisample GLX visual.
1051 *
1052 * First downsample the depth buffer to a temporary texture,
1053 * then decompress the temporary one to staging.
1054 *
1055 * Only the region being mapped is transfered.
1056 */
1057 struct pipe_resource resource;
1058
1059 r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
1060
1061 if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1062 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1063 FREE(trans);
1064 return NULL;
1065 }
1066
1067 if (usage & PIPE_TRANSFER_READ) {
1068 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1069 if (!temp) {
1070 R600_ERR("failed to create a temporary depth texture\n");
1071 FREE(trans);
1072 return NULL;
1073 }
1074
1075 r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1076 rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
1077 0, 0, 0, box->depth, 0, 0);
1078 pipe_resource_reference((struct pipe_resource**)&temp, NULL);
1079 }
1080 }
1081 else {
1082 /* XXX: only readback the rectangle which is being mapped? */
1083 /* XXX: when discard is true, no need to read back from depth texture */
1084 if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1085 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1086 FREE(trans);
1087 return NULL;
1088 }
1089
1090 rctx->blit_decompress_depth(ctx, rtex, staging_depth,
1091 level, level,
1092 box->z, box->z + box->depth - 1,
1093 0, 0);
1094
1095 offset = r600_texture_get_offset(staging_depth, level, box);
1096 }
1097
1098 trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
1099 trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
1100 trans->staging = (struct r600_resource*)staging_depth;
1101 } else if (use_staging_texture) {
1102 struct pipe_resource resource;
1103 struct r600_texture *staging;
1104
1105 r600_init_temp_resource_from_box(&resource, texture, box, level,
1106 R600_RESOURCE_FLAG_TRANSFER);
1107 resource.usage = (usage & PIPE_TRANSFER_READ) ?
1108 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1109
1110 /* Create the temporary texture. */
1111 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1112 if (staging == NULL) {
1113 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1114 FREE(trans);
1115 return NULL;
1116 }
1117 trans->staging = &staging->resource;
1118 trans->transfer.stride = staging->surface.level[0].pitch_bytes;
1119 trans->transfer.layer_stride = staging->surface.level[0].slice_size;
1120 if (usage & PIPE_TRANSFER_READ) {
1121 r600_copy_to_staging_texture(ctx, trans);
1122 }
1123 } else {
1124 /* the resource is mapped directly */
1125 trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
1126 trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
1127 offset = r600_texture_get_offset(rtex, level, box);
1128 }
1129
1130 if (trans->staging) {
1131 buf = trans->staging;
1132 if (!rtex->is_depth && !(usage & PIPE_TRANSFER_READ))
1133 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1134 } else {
1135 buf = &rtex->resource;
1136 }
1137
1138 if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
1139 pipe_resource_reference((struct pipe_resource**)&trans->staging, NULL);
1140 FREE(trans);
1141 return NULL;
1142 }
1143
1144 *ptransfer = &trans->transfer;
1145 return map + offset;
1146 }
1147
1148 static void r600_texture_transfer_unmap(struct pipe_context *ctx,
1149 struct pipe_transfer* transfer)
1150 {
1151 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
1152 struct pipe_resource *texture = transfer->resource;
1153 struct r600_texture *rtex = (struct r600_texture*)texture;
1154
1155 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
1156 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
1157 ctx->resource_copy_region(ctx, texture, transfer->level,
1158 transfer->box.x, transfer->box.y, transfer->box.z,
1159 &rtransfer->staging->b.b, transfer->level,
1160 &transfer->box);
1161 } else {
1162 r600_copy_from_staging_texture(ctx, rtransfer);
1163 }
1164 }
1165
1166 if (rtransfer->staging)
1167 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
1168
1169 FREE(transfer);
1170 }
1171
1172 static const struct u_resource_vtbl r600_texture_vtbl =
1173 {
1174 NULL, /* get_handle */
1175 r600_texture_destroy, /* resource_destroy */
1176 r600_texture_transfer_map, /* transfer_map */
1177 u_default_transfer_flush_region, /* transfer_flush_region */
1178 r600_texture_transfer_unmap, /* transfer_unmap */
1179 NULL /* transfer_inline_write */
1180 };
1181
1182 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
1183 struct pipe_resource *texture,
1184 const struct pipe_surface *templ,
1185 unsigned width, unsigned height)
1186 {
1187 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
1188
1189 if (surface == NULL)
1190 return NULL;
1191
1192 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
1193 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
1194
1195 pipe_reference_init(&surface->base.reference, 1);
1196 pipe_resource_reference(&surface->base.texture, texture);
1197 surface->base.context = pipe;
1198 surface->base.format = templ->format;
1199 surface->base.width = width;
1200 surface->base.height = height;
1201 surface->base.u = templ->u;
1202 return &surface->base;
1203 }
1204
1205 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
1206 struct pipe_resource *tex,
1207 const struct pipe_surface *templ)
1208 {
1209 unsigned level = templ->u.tex.level;
1210
1211 return r600_create_surface_custom(pipe, tex, templ,
1212 u_minify(tex->width0, level),
1213 u_minify(tex->height0, level));
1214 }
1215
1216 static void r600_surface_destroy(struct pipe_context *pipe,
1217 struct pipe_surface *surface)
1218 {
1219 struct r600_surface *surf = (struct r600_surface*)surface;
1220 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, NULL);
1221 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, NULL);
1222 pipe_resource_reference(&surface->texture, NULL);
1223 FREE(surface);
1224 }
1225
1226 unsigned r600_translate_colorswap(enum pipe_format format)
1227 {
1228 const struct util_format_description *desc = util_format_description(format);
1229
1230 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == UTIL_FORMAT_SWIZZLE_##swz)
1231
1232 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
1233 return V_0280A0_SWAP_STD;
1234
1235 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
1236 return ~0U;
1237
1238 switch (desc->nr_channels) {
1239 case 1:
1240 if (HAS_SWIZZLE(0,X))
1241 return V_0280A0_SWAP_STD; /* X___ */
1242 else if (HAS_SWIZZLE(3,X))
1243 return V_0280A0_SWAP_ALT_REV; /* ___X */
1244 break;
1245 case 2:
1246 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
1247 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
1248 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
1249 return V_0280A0_SWAP_STD; /* XY__ */
1250 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
1251 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
1252 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
1253 return V_0280A0_SWAP_STD_REV; /* YX__ */
1254 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
1255 return V_0280A0_SWAP_ALT; /* X__Y */
1256 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
1257 return V_0280A0_SWAP_ALT_REV; /* Y__X */
1258 break;
1259 case 3:
1260 if (HAS_SWIZZLE(0,X))
1261 return V_0280A0_SWAP_STD; /* XYZ */
1262 else if (HAS_SWIZZLE(0,Z))
1263 return V_0280A0_SWAP_STD_REV; /* ZYX */
1264 break;
1265 case 4:
1266 /* check the middle channels, the 1st and 4th channel can be NONE */
1267 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z))
1268 return V_0280A0_SWAP_STD; /* XYZW */
1269 else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y))
1270 return V_0280A0_SWAP_STD_REV; /* WZYX */
1271 else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X))
1272 return V_0280A0_SWAP_ALT; /* ZYXW */
1273 else if (HAS_SWIZZLE(1,X) && HAS_SWIZZLE(2,Y))
1274 return V_0280A0_SWAP_ALT_REV; /* WXYZ */
1275 break;
1276 }
1277 return ~0U;
1278 }
1279
1280 static void evergreen_set_clear_color(struct r600_texture *rtex,
1281 enum pipe_format surface_format,
1282 const union pipe_color_union *color)
1283 {
1284 union util_color uc;
1285
1286 memset(&uc, 0, sizeof(uc));
1287
1288 if (util_format_is_pure_uint(surface_format)) {
1289 util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
1290 } else if (util_format_is_pure_sint(surface_format)) {
1291 util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1);
1292 } else {
1293 util_pack_color(color->f, surface_format, &uc);
1294 }
1295
1296 memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));
1297 }
1298
1299 static void vi_get_fast_clear_parameters(enum pipe_format surface_format,
1300 const union pipe_color_union *color,
1301 uint32_t* reset_value,
1302 bool* clear_words_needed)
1303 {
1304 bool values[4] = {};
1305 int i;
1306 bool main_value = false;
1307 bool extra_value = false;
1308 int extra_channel;
1309 const struct util_format_description *desc = util_format_description(surface_format);
1310
1311 *clear_words_needed = true;
1312 *reset_value = 0x20202020U;
1313
1314 /* If we want to clear without needing a fast clear eliminate step, we
1315 * can set each channel to 0 or 1 (or 0/max for integer formats). We
1316 * have two sets of flags, one for the last or first channel(extra) and
1317 * one for the other channels(main).
1318 */
1319
1320 if (surface_format == PIPE_FORMAT_R11G11B10_FLOAT ||
1321 surface_format == PIPE_FORMAT_B5G6R5_UNORM ||
1322 surface_format == PIPE_FORMAT_B5G6R5_SRGB) {
1323 extra_channel = -1;
1324 } else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
1325 if(r600_translate_colorswap(surface_format) <= 1)
1326 extra_channel = desc->nr_channels - 1;
1327 else
1328 extra_channel = 0;
1329 } else
1330 return;
1331
1332 for (i = 0; i < 4; ++i) {
1333 int index = desc->swizzle[i] - UTIL_FORMAT_SWIZZLE_X;
1334
1335 if (desc->swizzle[i] < UTIL_FORMAT_SWIZZLE_X ||
1336 desc->swizzle[i] > UTIL_FORMAT_SWIZZLE_W)
1337 continue;
1338
1339 if (util_format_is_pure_sint(surface_format)) {
1340 values[i] = color->i[i] != 0;
1341 if (color->i[i] != 0 && color->i[i] != INT32_MAX)
1342 return;
1343 } else if (util_format_is_pure_uint(surface_format)) {
1344 values[i] = color->ui[i] != 0U;
1345 if (color->ui[i] != 0U && color->ui[i] != UINT32_MAX)
1346 return;
1347 } else {
1348 values[i] = color->f[i] != 0.0F;
1349 if (color->f[i] != 0.0F && color->f[i] != 1.0F)
1350 return;
1351 }
1352
1353 if (index == extra_channel)
1354 extra_value = values[i];
1355 else
1356 main_value = values[i];
1357 }
1358
1359 for (int i = 0; i < 4; ++i)
1360 if (values[i] != main_value &&
1361 desc->swizzle[i] - UTIL_FORMAT_SWIZZLE_X != extra_channel &&
1362 desc->swizzle[i] >= UTIL_FORMAT_SWIZZLE_X &&
1363 desc->swizzle[i] <= UTIL_FORMAT_SWIZZLE_W)
1364 return;
1365
1366 *clear_words_needed = false;
1367 if (main_value)
1368 *reset_value |= 0x80808080U;
1369
1370 if (extra_value)
1371 *reset_value |= 0x40404040U;
1372 }
1373
1374 void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
1375 struct pipe_framebuffer_state *fb,
1376 struct r600_atom *fb_state,
1377 unsigned *buffers, unsigned *dirty_cbufs,
1378 const union pipe_color_union *color)
1379 {
1380 int i;
1381
1382 if (rctx->render_cond)
1383 return;
1384
1385 for (i = 0; i < fb->nr_cbufs; i++) {
1386 struct r600_texture *tex;
1387 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
1388
1389 if (!fb->cbufs[i])
1390 continue;
1391
1392 /* if this colorbuffer is not being cleared */
1393 if (!(*buffers & clear_bit))
1394 continue;
1395
1396 tex = (struct r600_texture *)fb->cbufs[i]->texture;
1397
1398 /* 128-bit formats are unusupported */
1399 if (util_format_get_blocksizebits(fb->cbufs[i]->format) > 64) {
1400 continue;
1401 }
1402
1403 /* the clear is allowed if all layers are bound */
1404 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
1405 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
1406 continue;
1407 }
1408
1409 /* cannot clear mipmapped textures */
1410 if (fb->cbufs[i]->texture->last_level != 0) {
1411 continue;
1412 }
1413
1414 /* only supported on tiled surfaces */
1415 if (tex->surface.level[0].mode < RADEON_SURF_MODE_1D) {
1416 continue;
1417 }
1418
1419 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
1420 if (tex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
1421 rctx->chip_class >= CIK &&
1422 rctx->screen->info.drm_major == 2 &&
1423 rctx->screen->info.drm_minor < 38) {
1424 continue;
1425 }
1426
1427 if (tex->dcc_buffer) {
1428 uint32_t reset_value;
1429 bool clear_words_needed;
1430
1431 if (rctx->screen->debug_flags & DBG_NO_DCC_CLEAR)
1432 continue;
1433
1434 vi_get_fast_clear_parameters(fb->cbufs[i]->format, color, &reset_value, &clear_words_needed);
1435
1436 rctx->clear_buffer(&rctx->b, &tex->dcc_buffer->b.b,
1437 0, tex->surface.dcc_size, reset_value, true);
1438
1439 if (clear_words_needed)
1440 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
1441 } else {
1442 /* ensure CMASK is enabled */
1443 r600_texture_alloc_cmask_separate(rctx->screen, tex);
1444 if (tex->cmask.size == 0) {
1445 continue;
1446 }
1447
1448 /* Do the fast clear. */
1449 rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,
1450 tex->cmask.offset, tex->cmask.size, 0, true);
1451
1452 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
1453 }
1454
1455 evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);
1456
1457 if (dirty_cbufs)
1458 *dirty_cbufs |= 1 << i;
1459 rctx->set_atom_dirty(rctx, fb_state, true);
1460 *buffers &= ~clear_bit;
1461 }
1462 }
1463
1464 void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
1465 {
1466 rscreen->b.resource_from_handle = r600_texture_from_handle;
1467 rscreen->b.resource_get_handle = r600_texture_get_handle;
1468 }
1469
1470 void r600_init_context_texture_functions(struct r600_common_context *rctx)
1471 {
1472 rctx->b.create_surface = r600_create_surface;
1473 rctx->b.surface_destroy = r600_surface_destroy;
1474 }