r600g,radeonsi: switch all occurences of array_size to util_max_layer
[mesa.git] / src / gallium / drivers / radeon / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include "r600_pipe_common.h"
28 #include "r600_cs.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include <errno.h>
33 #include <inttypes.h>
34
35 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
36 static void r600_copy_region_with_blit(struct pipe_context *pipe,
37 struct pipe_resource *dst,
38 unsigned dst_level,
39 unsigned dstx, unsigned dsty, unsigned dstz,
40 struct pipe_resource *src,
41 unsigned src_level,
42 const struct pipe_box *src_box)
43 {
44 struct pipe_blit_info blit;
45
46 memset(&blit, 0, sizeof(blit));
47 blit.src.resource = src;
48 blit.src.format = src->format;
49 blit.src.level = src_level;
50 blit.src.box = *src_box;
51 blit.dst.resource = dst;
52 blit.dst.format = dst->format;
53 blit.dst.level = dst_level;
54 blit.dst.box.x = dstx;
55 blit.dst.box.y = dsty;
56 blit.dst.box.z = dstz;
57 blit.dst.box.width = src_box->width;
58 blit.dst.box.height = src_box->height;
59 blit.dst.box.depth = src_box->depth;
60 blit.mask = util_format_get_mask(src->format) &
61 util_format_get_mask(dst->format);
62 blit.filter = PIPE_TEX_FILTER_NEAREST;
63
64 if (blit.mask) {
65 pipe->blit(pipe, &blit);
66 }
67 }
68
69 /* Copy from a full GPU texture to a transfer's staging one. */
70 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
71 {
72 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
73 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
74 struct pipe_resource *dst = &rtransfer->staging->b.b;
75 struct pipe_resource *src = transfer->resource;
76
77 if (src->nr_samples > 1) {
78 r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
79 src, transfer->level, &transfer->box);
80 return;
81 }
82
83 rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
84 &transfer->box);
85 }
86
87 /* Copy from a transfer's staging texture to a full GPU one. */
88 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
89 {
90 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
91 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
92 struct pipe_resource *dst = transfer->resource;
93 struct pipe_resource *src = &rtransfer->staging->b.b;
94 struct pipe_box sbox;
95
96 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
97
98 if (dst->nr_samples > 1) {
99 r600_copy_region_with_blit(ctx, dst, transfer->level,
100 transfer->box.x, transfer->box.y, transfer->box.z,
101 src, 0, &sbox);
102 return;
103 }
104
105 rctx->dma_copy(ctx, dst, transfer->level,
106 transfer->box.x, transfer->box.y, transfer->box.z,
107 src, 0, &sbox);
108 }
109
110 static unsigned r600_texture_get_offset(struct r600_texture *rtex, unsigned level,
111 const struct pipe_box *box)
112 {
113 enum pipe_format format = rtex->resource.b.b.format;
114
115 return rtex->surface.level[level].offset +
116 box->z * rtex->surface.level[level].slice_size +
117 box->y / util_format_get_blockheight(format) * rtex->surface.level[level].pitch_bytes +
118 box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
119 }
120
121 static int r600_init_surface(struct r600_common_screen *rscreen,
122 struct radeon_surface *surface,
123 const struct pipe_resource *ptex,
124 unsigned array_mode,
125 bool is_flushed_depth)
126 {
127 const struct util_format_description *desc =
128 util_format_description(ptex->format);
129 bool is_depth, is_stencil;
130
131 is_depth = util_format_has_depth(desc);
132 is_stencil = util_format_has_stencil(desc);
133
134 surface->npix_x = ptex->width0;
135 surface->npix_y = ptex->height0;
136 surface->npix_z = ptex->depth0;
137 surface->blk_w = util_format_get_blockwidth(ptex->format);
138 surface->blk_h = util_format_get_blockheight(ptex->format);
139 surface->blk_d = 1;
140 surface->array_size = 1;
141 surface->last_level = ptex->last_level;
142
143 if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&
144 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
145 surface->bpe = 4; /* stencil is allocated separately on evergreen */
146 } else {
147 surface->bpe = util_format_get_blocksize(ptex->format);
148 /* align byte per element on dword */
149 if (surface->bpe == 3) {
150 surface->bpe = 4;
151 }
152 }
153
154 surface->nsamples = ptex->nr_samples ? ptex->nr_samples : 1;
155 surface->flags = RADEON_SURF_SET(array_mode, MODE);
156
157 switch (ptex->target) {
158 case PIPE_TEXTURE_1D:
159 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
160 break;
161 case PIPE_TEXTURE_RECT:
162 case PIPE_TEXTURE_2D:
163 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
164 break;
165 case PIPE_TEXTURE_3D:
166 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
167 break;
168 case PIPE_TEXTURE_1D_ARRAY:
169 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
170 surface->array_size = ptex->array_size;
171 break;
172 case PIPE_TEXTURE_2D_ARRAY:
173 case PIPE_TEXTURE_CUBE_ARRAY: /* cube array layout like 2d array */
174 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
175 surface->array_size = ptex->array_size;
176 break;
177 case PIPE_TEXTURE_CUBE:
178 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE);
179 break;
180 case PIPE_BUFFER:
181 default:
182 return -EINVAL;
183 }
184 if (ptex->bind & PIPE_BIND_SCANOUT) {
185 surface->flags |= RADEON_SURF_SCANOUT;
186 }
187
188 if (!is_flushed_depth && is_depth) {
189 surface->flags |= RADEON_SURF_ZBUFFER;
190
191 if (is_stencil) {
192 surface->flags |= RADEON_SURF_SBUFFER |
193 RADEON_SURF_HAS_SBUFFER_MIPTREE;
194 }
195 }
196 if (rscreen->chip_class >= SI) {
197 surface->flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
198 }
199 return 0;
200 }
201
202 static int r600_setup_surface(struct pipe_screen *screen,
203 struct r600_texture *rtex,
204 unsigned pitch_in_bytes_override)
205 {
206 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
207 int r;
208
209 r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface);
210 if (r) {
211 return r;
212 }
213
214 rtex->size = rtex->surface.bo_size;
215
216 if (pitch_in_bytes_override && pitch_in_bytes_override != rtex->surface.level[0].pitch_bytes) {
217 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
218 * for those
219 */
220 rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe;
221 rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override;
222 rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y;
223 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
224 rtex->surface.stencil_offset =
225 rtex->surface.stencil_level[0].offset = rtex->surface.level[0].slice_size;
226 }
227 }
228 return 0;
229 }
230
231 static boolean r600_texture_get_handle(struct pipe_screen* screen,
232 struct pipe_resource *ptex,
233 struct winsys_handle *whandle)
234 {
235 struct r600_texture *rtex = (struct r600_texture*)ptex;
236 struct r600_resource *resource = &rtex->resource;
237 struct radeon_surface *surface = &rtex->surface;
238 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
239
240 rscreen->ws->buffer_set_tiling(resource->buf,
241 NULL,
242 surface->level[0].mode >= RADEON_SURF_MODE_1D ?
243 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
244 surface->level[0].mode >= RADEON_SURF_MODE_2D ?
245 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
246 surface->bankw, surface->bankh,
247 surface->tile_split,
248 surface->stencil_tile_split,
249 surface->mtilea,
250 surface->level[0].pitch_bytes,
251 (surface->flags & RADEON_SURF_SCANOUT) != 0);
252
253 return rscreen->ws->buffer_get_handle(resource->buf,
254 surface->level[0].pitch_bytes, whandle);
255 }
256
257 static void r600_texture_destroy(struct pipe_screen *screen,
258 struct pipe_resource *ptex)
259 {
260 struct r600_texture *rtex = (struct r600_texture*)ptex;
261 struct r600_resource *resource = &rtex->resource;
262
263 if (rtex->flushed_depth_texture)
264 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
265
266 pipe_resource_reference((struct pipe_resource**)&rtex->htile_buffer, NULL);
267 if (rtex->cmask_buffer != &rtex->resource) {
268 pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
269 }
270 pb_reference(&resource->buf, NULL);
271 FREE(rtex);
272 }
273
274 static const struct u_resource_vtbl r600_texture_vtbl;
275
276 /* The number of samples can be specified independently of the texture. */
277 void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
278 struct r600_texture *rtex,
279 unsigned nr_samples,
280 struct r600_fmask_info *out)
281 {
282 /* FMASK is allocated like an ordinary texture. */
283 struct radeon_surface fmask = rtex->surface;
284
285 memset(out, 0, sizeof(*out));
286
287 fmask.bo_alignment = 0;
288 fmask.bo_size = 0;
289 fmask.nsamples = 1;
290 fmask.flags |= RADEON_SURF_FMASK;
291
292 /* Force 2D tiling if it wasn't set. This may occur when creating
293 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
294 * destination buffer must have an FMASK too. */
295 fmask.flags = RADEON_SURF_CLR(fmask.flags, MODE);
296 fmask.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
297
298 if (rscreen->chip_class >= SI) {
299 fmask.flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
300 }
301
302 switch (nr_samples) {
303 case 2:
304 case 4:
305 fmask.bpe = 1;
306 if (rscreen->chip_class <= CAYMAN) {
307 fmask.bankh = 4;
308 }
309 break;
310 case 8:
311 fmask.bpe = 4;
312 break;
313 default:
314 R600_ERR("Invalid sample count for FMASK allocation.\n");
315 return;
316 }
317
318 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
319 * This can be fixed by writing a separate FMASK allocator specifically
320 * for R600-R700 asics. */
321 if (rscreen->chip_class <= R700) {
322 fmask.bpe *= 2;
323 }
324
325 if (rscreen->ws->surface_init(rscreen->ws, &fmask)) {
326 R600_ERR("Got error in surface_init while allocating FMASK.\n");
327 return;
328 }
329
330 assert(fmask.level[0].mode == RADEON_SURF_MODE_2D);
331
332 out->slice_tile_max = (fmask.level[0].nblk_x * fmask.level[0].nblk_y) / 64;
333 if (out->slice_tile_max)
334 out->slice_tile_max -= 1;
335
336 out->tile_mode_index = fmask.tiling_index[0];
337 out->pitch = fmask.level[0].nblk_x;
338 out->bank_height = fmask.bankh;
339 out->alignment = MAX2(256, fmask.bo_alignment);
340 out->size = fmask.bo_size;
341 }
342
343 static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,
344 struct r600_texture *rtex)
345 {
346 r600_texture_get_fmask_info(rscreen, rtex,
347 rtex->resource.b.b.nr_samples, &rtex->fmask);
348
349 rtex->fmask.offset = align(rtex->size, rtex->fmask.alignment);
350 rtex->size = rtex->fmask.offset + rtex->fmask.size;
351 }
352
353 void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
354 struct r600_texture *rtex,
355 struct r600_cmask_info *out)
356 {
357 unsigned cmask_tile_width = 8;
358 unsigned cmask_tile_height = 8;
359 unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
360 unsigned element_bits = 4;
361 unsigned cmask_cache_bits = 1024;
362 unsigned num_pipes = rscreen->tiling_info.num_channels;
363 unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
364
365 unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
366 unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
367 unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
368 unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
369 unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
370
371 unsigned pitch_elements = align(rtex->surface.npix_x, macro_tile_width);
372 unsigned height = align(rtex->surface.npix_y, macro_tile_height);
373
374 unsigned base_align = num_pipes * pipe_interleave_bytes;
375 unsigned slice_bytes =
376 ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
377
378 assert(macro_tile_width % 128 == 0);
379 assert(macro_tile_height % 128 == 0);
380
381 out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
382 out->alignment = MAX2(256, base_align);
383 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
384 align(slice_bytes, base_align);
385 }
386
387 static void si_texture_get_cmask_info(struct r600_common_screen *rscreen,
388 struct r600_texture *rtex,
389 struct r600_cmask_info *out)
390 {
391 unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
392 unsigned num_pipes = rscreen->info.r600_num_tile_pipes;
393 unsigned cl_width, cl_height;
394
395 switch (num_pipes) {
396 case 2:
397 cl_width = 32;
398 cl_height = 16;
399 break;
400 case 4:
401 cl_width = 32;
402 cl_height = 32;
403 break;
404 case 8:
405 cl_width = 64;
406 cl_height = 32;
407 break;
408 case 16: /* Hawaii */
409 cl_width = 64;
410 cl_height = 64;
411 break;
412 default:
413 assert(0);
414 return;
415 }
416
417 unsigned base_align = num_pipes * pipe_interleave_bytes;
418
419 unsigned width = align(rtex->surface.npix_x, cl_width*8);
420 unsigned height = align(rtex->surface.npix_y, cl_height*8);
421 unsigned slice_elements = (width * height) / (8*8);
422
423 /* Each element of CMASK is a nibble. */
424 unsigned slice_bytes = slice_elements / 2;
425
426 out->slice_tile_max = (width * height) / (128*128);
427 if (out->slice_tile_max)
428 out->slice_tile_max -= 1;
429
430 out->alignment = MAX2(256, base_align);
431 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
432 align(slice_bytes, base_align);
433 }
434
435 static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,
436 struct r600_texture *rtex)
437 {
438 if (rscreen->chip_class >= SI) {
439 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
440 } else {
441 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
442 }
443
444 rtex->cmask.offset = align(rtex->size, rtex->cmask.alignment);
445 rtex->size = rtex->cmask.offset + rtex->cmask.size;
446
447 if (rscreen->chip_class >= SI)
448 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
449 else
450 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
451 }
452
453 static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
454 struct r600_texture *rtex)
455 {
456 if (rtex->cmask_buffer)
457 return;
458
459 assert(rtex->cmask.size == 0);
460
461 if (rscreen->chip_class >= SI) {
462 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
463 } else {
464 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
465 }
466
467 rtex->cmask_buffer = (struct r600_resource *)
468 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
469 PIPE_USAGE_DEFAULT, rtex->cmask.size);
470 if (rtex->cmask_buffer == NULL) {
471 rtex->cmask.size = 0;
472 return;
473 }
474
475 /* update colorbuffer state bits */
476 rtex->cmask.base_address_reg =
477 r600_resource_va(&rscreen->b, &rtex->cmask_buffer->b.b) >> 8;
478
479 if (rscreen->chip_class >= SI)
480 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
481 else
482 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
483 }
484
485 static unsigned si_texture_htile_alloc_size(struct r600_common_screen *rscreen,
486 struct r600_texture *rtex)
487 {
488 unsigned cl_width, cl_height, width, height;
489 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
490 unsigned num_pipes = rscreen->info.r600_num_tile_pipes;
491
492 /* HTILE is broken with 1D tiling on old kernels and CIK. */
493 if (rtex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
494 rscreen->chip_class >= CIK && rscreen->info.drm_minor < 38)
495 return 0;
496
497 switch (num_pipes) {
498 case 2:
499 cl_width = 32;
500 cl_height = 32;
501 break;
502 case 4:
503 cl_width = 64;
504 cl_height = 32;
505 break;
506 case 8:
507 cl_width = 64;
508 cl_height = 64;
509 break;
510 case 16:
511 cl_width = 128;
512 cl_height = 64;
513 break;
514 default:
515 assert(0);
516 return 0;
517 }
518
519 width = align(rtex->surface.npix_x, cl_width * 8);
520 height = align(rtex->surface.npix_y, cl_height * 8);
521
522 slice_elements = (width * height) / (8 * 8);
523 slice_bytes = slice_elements * 4;
524
525 pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
526 base_align = num_pipes * pipe_interleave_bytes;
527
528 return (util_max_layer(&rtex->resource.b.b, 0) + 1) *
529 align(slice_bytes, base_align);
530 }
531
532 static unsigned r600_texture_htile_alloc_size(struct r600_common_screen *rscreen,
533 struct r600_texture *rtex)
534 {
535 unsigned sw = rtex->surface.level[0].nblk_x * rtex->surface.blk_w;
536 unsigned sh = rtex->surface.level[0].nblk_y * rtex->surface.blk_h;
537 unsigned npipes = rscreen->info.r600_num_tile_pipes;
538 unsigned htile_size;
539
540 /* XXX also use it for other texture targets */
541 if (rscreen->info.drm_minor < 26 ||
542 rtex->resource.b.b.target != PIPE_TEXTURE_2D ||
543 rtex->surface.level[0].nblk_x < 32 ||
544 rtex->surface.level[0].nblk_y < 32) {
545 return 0;
546 }
547
548 /* HW bug on R6xx. */
549 if (rscreen->chip_class == R600 &&
550 (rtex->surface.level[0].npix_x > 7680 ||
551 rtex->surface.level[0].npix_y > 7680))
552 return 0;
553
554 /* this alignment and htile size only apply to linear htile buffer */
555 sw = align(sw, 16 << 3);
556 sh = align(sh, npipes << 3);
557 htile_size = (sw >> 3) * (sh >> 3) * 4;
558 /* must be aligned with 2K * npipes */
559 htile_size = align(htile_size, (2 << 10) * npipes);
560 return htile_size;
561 }
562
563 static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
564 struct r600_texture *rtex)
565 {
566 unsigned htile_size;
567 if (rscreen->chip_class >= SI) {
568 htile_size = si_texture_htile_alloc_size(rscreen, rtex);
569 } else {
570 htile_size = r600_texture_htile_alloc_size(rscreen, rtex);
571 }
572
573 if (!htile_size)
574 return;
575
576 /* XXX don't allocate it separately */
577 rtex->htile_buffer = (struct r600_resource*)
578 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
579 PIPE_USAGE_DEFAULT, htile_size);
580 if (rtex->htile_buffer == NULL) {
581 /* this is not a fatal error as we can still keep rendering
582 * without htile buffer */
583 R600_ERR("Failed to create buffer object for htile buffer.\n");
584 } else {
585 r600_screen_clear_buffer(rscreen, &rtex->htile_buffer->b.b, 0, htile_size, 0);
586 }
587 }
588
589 /* Common processing for r600_texture_create and r600_texture_from_handle */
590 static struct r600_texture *
591 r600_texture_create_object(struct pipe_screen *screen,
592 const struct pipe_resource *base,
593 unsigned pitch_in_bytes_override,
594 struct pb_buffer *buf,
595 struct radeon_surface *surface)
596 {
597 struct r600_texture *rtex;
598 struct r600_resource *resource;
599 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
600 uint64_t va;
601
602 rtex = CALLOC_STRUCT(r600_texture);
603 if (rtex == NULL)
604 return NULL;
605
606 resource = &rtex->resource;
607 resource->b.b = *base;
608 resource->b.vtbl = &r600_texture_vtbl;
609 pipe_reference_init(&resource->b.b.reference, 1);
610 resource->b.b.screen = screen;
611 rtex->pitch_override = pitch_in_bytes_override;
612
613 /* don't include stencil-only formats which we don't support for rendering */
614 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
615
616 rtex->surface = *surface;
617 if (r600_setup_surface(screen, rtex, pitch_in_bytes_override)) {
618 FREE(rtex);
619 return NULL;
620 }
621
622 /* Tiled depth textures utilize the non-displayable tile order.
623 * This must be done after r600_setup_surface.
624 * Applies to R600-Cayman. */
625 rtex->non_disp_tiling = rtex->is_depth && rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D;
626
627 if (rtex->is_depth) {
628 if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
629 R600_RESOURCE_FLAG_FLUSHED_DEPTH)) &&
630 (rscreen->debug_flags & DBG_HYPERZ)) {
631
632 r600_texture_allocate_htile(rscreen, rtex);
633 }
634 } else {
635 if (base->nr_samples > 1) {
636 if (!buf) {
637 r600_texture_allocate_fmask(rscreen, rtex);
638 r600_texture_allocate_cmask(rscreen, rtex);
639 rtex->cmask_buffer = &rtex->resource;
640 }
641 if (!rtex->fmask.size || !rtex->cmask.size) {
642 FREE(rtex);
643 return NULL;
644 }
645 }
646 }
647
648 /* Now create the backing buffer. */
649 if (!buf) {
650 if (!r600_init_resource(rscreen, resource, rtex->size,
651 rtex->surface.bo_alignment, TRUE)) {
652 FREE(rtex);
653 return NULL;
654 }
655 } else {
656 resource->buf = buf;
657 resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
658 resource->domains = rscreen->ws->buffer_get_initial_domain(resource->cs_buf);
659 }
660
661 if (rtex->cmask.size) {
662 /* Initialize the cmask to 0xCC (= compressed state). */
663 r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
664 rtex->cmask.offset, rtex->cmask.size, 0xCCCCCCCC);
665 }
666
667 /* Initialize the CMASK base register value. */
668 va = r600_resource_va(&rscreen->b, &rtex->resource.b.b);
669 rtex->cmask.base_address_reg = (va + rtex->cmask.offset) >> 8;
670
671 if (rscreen->debug_flags & DBG_VM) {
672 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
673 r600_resource_va(screen, &rtex->resource.b.b),
674 r600_resource_va(screen, &rtex->resource.b.b) + rtex->resource.buf->size,
675 base->width0, base->height0, util_max_layer(base, 0)+1, base->last_level+1,
676 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
677 }
678
679 if (rscreen->debug_flags & DBG_TEX ||
680 (rtex->resource.b.b.last_level > 0 && rscreen->debug_flags & DBG_TEXMIP)) {
681 printf("Texture: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
682 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
683 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
684 rtex->surface.npix_x, rtex->surface.npix_y,
685 rtex->surface.npix_z, rtex->surface.blk_w,
686 rtex->surface.blk_h, rtex->surface.blk_d,
687 rtex->surface.array_size, rtex->surface.last_level,
688 rtex->surface.bpe, rtex->surface.nsamples,
689 rtex->surface.flags, util_format_short_name(base->format));
690 for (int i = 0; i <= rtex->surface.last_level; i++) {
691 printf(" L %i: offset=%"PRIu64", slice_size=%"PRIu64", npix_x=%u, "
692 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
693 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
694 i, rtex->surface.level[i].offset,
695 rtex->surface.level[i].slice_size,
696 u_minify(rtex->resource.b.b.width0, i),
697 u_minify(rtex->resource.b.b.height0, i),
698 u_minify(rtex->resource.b.b.depth0, i),
699 rtex->surface.level[i].nblk_x,
700 rtex->surface.level[i].nblk_y,
701 rtex->surface.level[i].nblk_z,
702 rtex->surface.level[i].pitch_bytes,
703 rtex->surface.level[i].mode);
704 }
705 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
706 for (int i = 0; i <= rtex->surface.last_level; i++) {
707 printf(" S %i: offset=%"PRIu64", slice_size=%"PRIu64", npix_x=%u, "
708 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
709 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
710 i, rtex->surface.stencil_level[i].offset,
711 rtex->surface.stencil_level[i].slice_size,
712 u_minify(rtex->resource.b.b.width0, i),
713 u_minify(rtex->resource.b.b.height0, i),
714 u_minify(rtex->resource.b.b.depth0, i),
715 rtex->surface.stencil_level[i].nblk_x,
716 rtex->surface.stencil_level[i].nblk_y,
717 rtex->surface.stencil_level[i].nblk_z,
718 rtex->surface.stencil_level[i].pitch_bytes,
719 rtex->surface.stencil_level[i].mode);
720 }
721 }
722 }
723 return rtex;
724 }
725
726 static unsigned r600_choose_tiling(struct r600_common_screen *rscreen,
727 const struct pipe_resource *templ)
728 {
729 const struct util_format_description *desc = util_format_description(templ->format);
730
731 /* MSAA resources must be 2D tiled. */
732 if (templ->nr_samples > 1)
733 return RADEON_SURF_MODE_2D;
734
735 /* Transfer resources should be linear. */
736 if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
737 return RADEON_SURF_MODE_LINEAR_ALIGNED;
738
739 /* Handle common candidates for the linear mode.
740 * Compressed textures must always be tiled. */
741 if (!(templ->flags & R600_RESOURCE_FLAG_FORCE_TILING) &&
742 !util_format_is_compressed(templ->format)) {
743 /* Not everything can be linear, so we cannot enforce it
744 * for all textures. */
745 if ((rscreen->debug_flags & DBG_NO_TILING) &&
746 (!util_format_is_depth_or_stencil(templ->format) ||
747 !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH)))
748 return RADEON_SURF_MODE_LINEAR_ALIGNED;
749
750 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
751 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
752 return RADEON_SURF_MODE_LINEAR_ALIGNED;
753
754 /* Cursors are linear on SI.
755 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
756 if (rscreen->chip_class >= SI &&
757 (templ->bind & PIPE_BIND_CURSOR))
758 return RADEON_SURF_MODE_LINEAR_ALIGNED;
759
760 if (templ->bind & PIPE_BIND_LINEAR)
761 return RADEON_SURF_MODE_LINEAR_ALIGNED;
762
763 /* Textures with a very small height are recommended to be linear. */
764 if (templ->target == PIPE_TEXTURE_1D ||
765 templ->target == PIPE_TEXTURE_1D_ARRAY ||
766 templ->height0 <= 4)
767 return RADEON_SURF_MODE_LINEAR_ALIGNED;
768
769 /* Textures likely to be mapped often. */
770 if (templ->usage == PIPE_USAGE_STAGING ||
771 templ->usage == PIPE_USAGE_STREAM)
772 return RADEON_SURF_MODE_LINEAR_ALIGNED;
773 }
774
775 /* Make small textures 1D tiled. */
776 if (templ->width0 <= 16 || templ->height0 <= 16 ||
777 (rscreen->debug_flags & DBG_NO_2D_TILING))
778 return RADEON_SURF_MODE_1D;
779
780 /* The allocator will switch to 1D if needed. */
781 return RADEON_SURF_MODE_2D;
782 }
783
784 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
785 const struct pipe_resource *templ)
786 {
787 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
788 struct radeon_surface surface = {0};
789 int r;
790
791 r = r600_init_surface(rscreen, &surface, templ,
792 r600_choose_tiling(rscreen, templ),
793 templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
794 if (r) {
795 return NULL;
796 }
797 r = rscreen->ws->surface_best(rscreen->ws, &surface);
798 if (r) {
799 return NULL;
800 }
801 return (struct pipe_resource *)r600_texture_create_object(screen, templ,
802 0, NULL, &surface);
803 }
804
805 static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
806 const struct pipe_resource *templ,
807 struct winsys_handle *whandle)
808 {
809 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
810 struct pb_buffer *buf = NULL;
811 unsigned stride = 0;
812 unsigned array_mode;
813 enum radeon_bo_layout micro, macro;
814 struct radeon_surface surface;
815 bool scanout;
816 int r;
817
818 /* Support only 2D textures without mipmaps */
819 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
820 templ->depth0 != 1 || templ->last_level != 0)
821 return NULL;
822
823 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride);
824 if (!buf)
825 return NULL;
826
827 rscreen->ws->buffer_get_tiling(buf, &micro, &macro,
828 &surface.bankw, &surface.bankh,
829 &surface.tile_split,
830 &surface.stencil_tile_split,
831 &surface.mtilea, &scanout);
832
833 if (macro == RADEON_LAYOUT_TILED)
834 array_mode = RADEON_SURF_MODE_2D;
835 else if (micro == RADEON_LAYOUT_TILED)
836 array_mode = RADEON_SURF_MODE_1D;
837 else
838 array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
839
840 r = r600_init_surface(rscreen, &surface, templ, array_mode, false);
841 if (r) {
842 return NULL;
843 }
844
845 if (scanout)
846 surface.flags |= RADEON_SURF_SCANOUT;
847
848 return (struct pipe_resource *)r600_texture_create_object(screen, templ,
849 stride, buf, &surface);
850 }
851
852 bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
853 struct pipe_resource *texture,
854 struct r600_texture **staging)
855 {
856 struct r600_texture *rtex = (struct r600_texture*)texture;
857 struct pipe_resource resource;
858 struct r600_texture **flushed_depth_texture = staging ?
859 staging : &rtex->flushed_depth_texture;
860
861 if (!staging && rtex->flushed_depth_texture)
862 return true; /* it's ready */
863
864 resource.target = texture->target;
865 resource.format = texture->format;
866 resource.width0 = texture->width0;
867 resource.height0 = texture->height0;
868 resource.depth0 = texture->depth0;
869 resource.array_size = texture->array_size;
870 resource.last_level = texture->last_level;
871 resource.nr_samples = texture->nr_samples;
872 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
873 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
874 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
875
876 if (staging)
877 resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
878
879 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
880 if (*flushed_depth_texture == NULL) {
881 R600_ERR("failed to create temporary texture to hold flushed depth\n");
882 return false;
883 }
884
885 (*flushed_depth_texture)->is_flushing_texture = TRUE;
886 (*flushed_depth_texture)->non_disp_tiling = false;
887 return true;
888 }
889
890 /**
891 * Initialize the pipe_resource descriptor to be of the same size as the box,
892 * which is supposed to hold a subregion of the texture "orig" at the given
893 * mipmap level.
894 */
895 static void r600_init_temp_resource_from_box(struct pipe_resource *res,
896 struct pipe_resource *orig,
897 const struct pipe_box *box,
898 unsigned level, unsigned flags)
899 {
900 memset(res, 0, sizeof(*res));
901 res->format = orig->format;
902 res->width0 = box->width;
903 res->height0 = box->height;
904 res->depth0 = 1;
905 res->array_size = 1;
906 res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
907 res->flags = flags;
908
909 /* We must set the correct texture target and dimensions for a 3D box. */
910 if (box->depth > 1 && util_max_layer(orig, level) > 0)
911 res->target = orig->target;
912 else
913 res->target = PIPE_TEXTURE_2D;
914
915 switch (res->target) {
916 case PIPE_TEXTURE_1D_ARRAY:
917 case PIPE_TEXTURE_2D_ARRAY:
918 case PIPE_TEXTURE_CUBE_ARRAY:
919 res->array_size = box->depth;
920 break;
921 case PIPE_TEXTURE_3D:
922 res->depth0 = box->depth;
923 break;
924 default:;
925 }
926 }
927
928 static void *r600_texture_transfer_map(struct pipe_context *ctx,
929 struct pipe_resource *texture,
930 unsigned level,
931 unsigned usage,
932 const struct pipe_box *box,
933 struct pipe_transfer **ptransfer)
934 {
935 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
936 struct r600_texture *rtex = (struct r600_texture*)texture;
937 struct r600_transfer *trans;
938 boolean use_staging_texture = FALSE;
939 struct r600_resource *buf;
940 unsigned offset = 0;
941 char *map;
942
943 /* We cannot map a tiled texture directly because the data is
944 * in a different order, therefore we do detiling using a blit.
945 *
946 * Also, use a temporary in GTT memory for read transfers, as
947 * the CPU is much happier reading out of cached system memory
948 * than uncached VRAM.
949 */
950 if (rtex->surface.level[level].mode >= RADEON_SURF_MODE_1D)
951 use_staging_texture = TRUE;
952
953 /* Untiled buffers in VRAM, which is slow for CPU reads */
954 if ((usage & PIPE_TRANSFER_READ) && !(usage & PIPE_TRANSFER_MAP_DIRECTLY) &&
955 (rtex->resource.domains == RADEON_DOMAIN_VRAM)) {
956 use_staging_texture = TRUE;
957 }
958
959 /* Use a staging texture for uploads if the underlying BO is busy. */
960 if (!(usage & PIPE_TRANSFER_READ) &&
961 (r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
962 rctx->ws->buffer_is_busy(rtex->resource.buf, RADEON_USAGE_READWRITE))) {
963 use_staging_texture = TRUE;
964 }
965
966 if (texture->flags & R600_RESOURCE_FLAG_TRANSFER) {
967 use_staging_texture = FALSE;
968 }
969
970 if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
971 return NULL;
972 }
973
974 trans = CALLOC_STRUCT(r600_transfer);
975 if (trans == NULL)
976 return NULL;
977 trans->transfer.resource = texture;
978 trans->transfer.level = level;
979 trans->transfer.usage = usage;
980 trans->transfer.box = *box;
981
982 if (rtex->is_depth) {
983 struct r600_texture *staging_depth;
984
985 if (rtex->resource.b.b.nr_samples > 1) {
986 /* MSAA depth buffers need to be converted to single sample buffers.
987 *
988 * Mapping MSAA depth buffers can occur if ReadPixels is called
989 * with a multisample GLX visual.
990 *
991 * First downsample the depth buffer to a temporary texture,
992 * then decompress the temporary one to staging.
993 *
994 * Only the region being mapped is transfered.
995 */
996 struct pipe_resource resource;
997
998 r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
999
1000 if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1001 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1002 FREE(trans);
1003 return NULL;
1004 }
1005
1006 if (usage & PIPE_TRANSFER_READ) {
1007 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1008
1009 r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1010 rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
1011 0, 0, 0, box->depth, 0, 0);
1012 pipe_resource_reference((struct pipe_resource**)&temp, NULL);
1013 }
1014 }
1015 else {
1016 /* XXX: only readback the rectangle which is being mapped? */
1017 /* XXX: when discard is true, no need to read back from depth texture */
1018 if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1019 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1020 FREE(trans);
1021 return NULL;
1022 }
1023
1024 rctx->blit_decompress_depth(ctx, rtex, staging_depth,
1025 level, level,
1026 box->z, box->z + box->depth - 1,
1027 0, 0);
1028
1029 offset = r600_texture_get_offset(staging_depth, level, box);
1030 }
1031
1032 trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
1033 trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
1034 trans->staging = (struct r600_resource*)staging_depth;
1035 } else if (use_staging_texture) {
1036 struct pipe_resource resource;
1037 struct r600_texture *staging;
1038
1039 r600_init_temp_resource_from_box(&resource, texture, box, level,
1040 R600_RESOURCE_FLAG_TRANSFER);
1041 resource.usage = (usage & PIPE_TRANSFER_READ) ?
1042 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1043
1044 /* Create the temporary texture. */
1045 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1046 if (staging == NULL) {
1047 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1048 FREE(trans);
1049 return NULL;
1050 }
1051 trans->staging = &staging->resource;
1052 trans->transfer.stride = staging->surface.level[0].pitch_bytes;
1053 trans->transfer.layer_stride = staging->surface.level[0].slice_size;
1054 if (usage & PIPE_TRANSFER_READ) {
1055 r600_copy_to_staging_texture(ctx, trans);
1056 }
1057 } else {
1058 /* the resource is mapped directly */
1059 trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
1060 trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
1061 offset = r600_texture_get_offset(rtex, level, box);
1062 }
1063
1064 if (trans->staging) {
1065 buf = trans->staging;
1066 if (!rtex->is_depth && !(usage & PIPE_TRANSFER_READ))
1067 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1068 } else {
1069 buf = &rtex->resource;
1070 }
1071
1072 if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
1073 pipe_resource_reference((struct pipe_resource**)&trans->staging, NULL);
1074 FREE(trans);
1075 return NULL;
1076 }
1077
1078 *ptransfer = &trans->transfer;
1079 return map + offset;
1080 }
1081
1082 static void r600_texture_transfer_unmap(struct pipe_context *ctx,
1083 struct pipe_transfer* transfer)
1084 {
1085 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
1086 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1087 struct radeon_winsys_cs_handle *buf;
1088 struct pipe_resource *texture = transfer->resource;
1089 struct r600_texture *rtex = (struct r600_texture*)texture;
1090
1091 if (rtransfer->staging) {
1092 buf = rtransfer->staging->cs_buf;
1093 } else {
1094 buf = r600_resource(transfer->resource)->cs_buf;
1095 }
1096 rctx->ws->buffer_unmap(buf);
1097
1098 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
1099 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
1100 ctx->resource_copy_region(ctx, texture, transfer->level,
1101 transfer->box.x, transfer->box.y, transfer->box.z,
1102 &rtransfer->staging->b.b, transfer->level,
1103 &transfer->box);
1104 } else {
1105 r600_copy_from_staging_texture(ctx, rtransfer);
1106 }
1107 }
1108
1109 if (rtransfer->staging)
1110 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
1111
1112 FREE(transfer);
1113 }
1114
1115 static const struct u_resource_vtbl r600_texture_vtbl =
1116 {
1117 NULL, /* get_handle */
1118 r600_texture_destroy, /* resource_destroy */
1119 r600_texture_transfer_map, /* transfer_map */
1120 NULL, /* transfer_flush_region */
1121 r600_texture_transfer_unmap, /* transfer_unmap */
1122 NULL /* transfer_inline_write */
1123 };
1124
1125 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
1126 struct pipe_resource *texture,
1127 const struct pipe_surface *templ,
1128 unsigned width, unsigned height)
1129 {
1130 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
1131
1132 if (surface == NULL)
1133 return NULL;
1134
1135 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
1136 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
1137
1138 pipe_reference_init(&surface->base.reference, 1);
1139 pipe_resource_reference(&surface->base.texture, texture);
1140 surface->base.context = pipe;
1141 surface->base.format = templ->format;
1142 surface->base.width = width;
1143 surface->base.height = height;
1144 surface->base.u = templ->u;
1145 return &surface->base;
1146 }
1147
1148 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
1149 struct pipe_resource *tex,
1150 const struct pipe_surface *templ)
1151 {
1152 unsigned level = templ->u.tex.level;
1153
1154 return r600_create_surface_custom(pipe, tex, templ,
1155 u_minify(tex->width0, level),
1156 u_minify(tex->height0, level));
1157 }
1158
1159 static void r600_surface_destroy(struct pipe_context *pipe,
1160 struct pipe_surface *surface)
1161 {
1162 struct r600_surface *surf = (struct r600_surface*)surface;
1163 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, NULL);
1164 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, NULL);
1165 pipe_resource_reference(&surface->texture, NULL);
1166 FREE(surface);
1167 }
1168
1169 unsigned r600_translate_colorswap(enum pipe_format format)
1170 {
1171 const struct util_format_description *desc = util_format_description(format);
1172
1173 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == UTIL_FORMAT_SWIZZLE_##swz)
1174
1175 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
1176 return V_0280A0_SWAP_STD;
1177
1178 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
1179 return ~0U;
1180
1181 switch (desc->nr_channels) {
1182 case 1:
1183 if (HAS_SWIZZLE(0,X))
1184 return V_0280A0_SWAP_STD; /* X___ */
1185 else if (HAS_SWIZZLE(3,X))
1186 return V_0280A0_SWAP_ALT_REV; /* ___X */
1187 break;
1188 case 2:
1189 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
1190 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
1191 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
1192 return V_0280A0_SWAP_STD; /* XY__ */
1193 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
1194 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
1195 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
1196 return V_0280A0_SWAP_STD_REV; /* YX__ */
1197 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
1198 return V_0280A0_SWAP_ALT; /* X__Y */
1199 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
1200 return V_0280A0_SWAP_ALT_REV; /* Y__X */
1201 break;
1202 case 3:
1203 if (HAS_SWIZZLE(0,X))
1204 return V_0280A0_SWAP_STD; /* XYZ */
1205 else if (HAS_SWIZZLE(0,Z))
1206 return V_0280A0_SWAP_STD_REV; /* ZYX */
1207 break;
1208 case 4:
1209 /* check the middle channels, the 1st and 4th channel can be NONE */
1210 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z))
1211 return V_0280A0_SWAP_STD; /* XYZW */
1212 else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y))
1213 return V_0280A0_SWAP_STD_REV; /* WZYX */
1214 else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X))
1215 return V_0280A0_SWAP_ALT; /* ZYXW */
1216 else if (HAS_SWIZZLE(1,X) && HAS_SWIZZLE(2,Y))
1217 return V_0280A0_SWAP_ALT_REV; /* WXYZ */
1218 break;
1219 }
1220 return ~0U;
1221 }
1222
1223 static void evergreen_set_clear_color(struct r600_texture *rtex,
1224 enum pipe_format surface_format,
1225 const union pipe_color_union *color)
1226 {
1227 union util_color uc;
1228
1229 memset(&uc, 0, sizeof(uc));
1230
1231 if (util_format_is_pure_uint(surface_format)) {
1232 util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
1233 } else if (util_format_is_pure_sint(surface_format)) {
1234 util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1);
1235 } else {
1236 util_pack_color(color->f, surface_format, &uc);
1237 }
1238
1239 memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));
1240 }
1241
1242 void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
1243 struct pipe_framebuffer_state *fb,
1244 struct r600_atom *fb_state,
1245 unsigned *buffers,
1246 const union pipe_color_union *color)
1247 {
1248 int i;
1249
1250 if (rctx->current_render_cond)
1251 return;
1252
1253 for (i = 0; i < fb->nr_cbufs; i++) {
1254 struct r600_texture *tex;
1255 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
1256
1257 if (!fb->cbufs[i])
1258 continue;
1259
1260 /* if this colorbuffer is not being cleared */
1261 if (!(*buffers & clear_bit))
1262 continue;
1263
1264 tex = (struct r600_texture *)fb->cbufs[i]->texture;
1265
1266 /* 128-bit formats are unusupported */
1267 if (util_format_get_blocksizebits(fb->cbufs[i]->format) > 64) {
1268 continue;
1269 }
1270
1271 /* the clear is allowed if all layers are bound */
1272 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
1273 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
1274 continue;
1275 }
1276
1277 /* cannot clear mipmapped textures */
1278 if (fb->cbufs[i]->texture->last_level != 0) {
1279 continue;
1280 }
1281
1282 /* only supported on tiled surfaces */
1283 if (tex->surface.level[0].mode < RADEON_SURF_MODE_1D) {
1284 continue;
1285 }
1286
1287 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
1288 if (tex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
1289 rctx->chip_class >= CIK && rctx->screen->info.drm_minor < 38) {
1290 continue;
1291 }
1292
1293 /* ensure CMASK is enabled */
1294 r600_texture_alloc_cmask_separate(rctx->screen, tex);
1295 if (tex->cmask.size == 0) {
1296 continue;
1297 }
1298
1299 /* Do the fast clear. */
1300 evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);
1301 rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,
1302 tex->cmask.offset, tex->cmask.size, 0);
1303
1304 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
1305 fb_state->dirty = true;
1306 *buffers &= ~clear_bit;
1307 }
1308 }
1309
1310 void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
1311 {
1312 rscreen->b.resource_from_handle = r600_texture_from_handle;
1313 rscreen->b.resource_get_handle = r600_texture_get_handle;
1314 }
1315
1316 void r600_init_context_texture_functions(struct r600_common_context *rctx)
1317 {
1318 rctx->b.create_surface = r600_create_surface;
1319 rctx->b.surface_destroy = r600_surface_destroy;
1320 }