10f47ec50779e6f38f17d4cad744626f1f730311
[mesa.git] / src / gallium / drivers / r600 / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include "r600_formats.h"
28 #include "r600d.h"
29
30 #include <errno.h>
31 #include "util/u_format_s3tc.h"
32 #include "util/u_memory.h"
33
34 /* Copy from a full GPU texture to a transfer's staging one. */
35 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
36 {
37 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
38 struct pipe_resource *texture = transfer->resource;
39
40 ctx->resource_copy_region(ctx, &rtransfer->staging->b.b,
41 0, 0, 0, 0, texture, transfer->level,
42 &transfer->box);
43 }
44
45
46 /* Copy from a transfer's staging texture to a full GPU one. */
47 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
48 {
49 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
50 struct pipe_resource *texture = transfer->resource;
51 struct pipe_box sbox;
52
53 sbox.x = sbox.y = sbox.z = 0;
54 sbox.width = transfer->box.width;
55 sbox.height = transfer->box.height;
56 /* XXX that might be wrong */
57 sbox.depth = 1;
58 ctx->resource_copy_region(ctx, texture, transfer->level,
59 transfer->box.x, transfer->box.y, transfer->box.z,
60 &rtransfer->staging->b.b,
61 0, &sbox);
62 }
63
64 unsigned r600_texture_get_offset(struct r600_resource_texture *rtex,
65 unsigned level, unsigned layer)
66 {
67 unsigned offset = rtex->offset[level];
68
69 switch (rtex->resource.b.b.target) {
70 case PIPE_TEXTURE_3D:
71 case PIPE_TEXTURE_CUBE:
72 default:
73 return offset + layer * rtex->layer_size[level];
74 }
75 }
76
77 static unsigned r600_get_block_alignment(struct pipe_screen *screen,
78 enum pipe_format format,
79 unsigned array_mode)
80 {
81 struct r600_screen* rscreen = (struct r600_screen *)screen;
82 unsigned pixsize = util_format_get_blocksize(format);
83 int p_align;
84
85 switch(array_mode) {
86 case V_038000_ARRAY_1D_TILED_THIN1:
87 p_align = MAX2(8,
88 ((rscreen->tiling_info.group_bytes / 8 / pixsize)));
89 break;
90 case V_038000_ARRAY_2D_TILED_THIN1:
91 p_align = MAX2(rscreen->tiling_info.num_banks,
92 (((rscreen->tiling_info.group_bytes / 8 / pixsize)) *
93 rscreen->tiling_info.num_banks)) * 8;
94 break;
95 case V_038000_ARRAY_LINEAR_ALIGNED:
96 p_align = MAX2(64, rscreen->tiling_info.group_bytes / pixsize);
97 break;
98 case V_038000_ARRAY_LINEAR_GENERAL:
99 default:
100 p_align = rscreen->tiling_info.group_bytes / pixsize;
101 break;
102 }
103 return p_align;
104 }
105
106 static unsigned r600_get_height_alignment(struct pipe_screen *screen,
107 unsigned array_mode)
108 {
109 struct r600_screen* rscreen = (struct r600_screen *)screen;
110 int h_align;
111
112 switch (array_mode) {
113 case V_038000_ARRAY_2D_TILED_THIN1:
114 h_align = rscreen->tiling_info.num_channels * 8;
115 break;
116 case V_038000_ARRAY_1D_TILED_THIN1:
117 case V_038000_ARRAY_LINEAR_ALIGNED:
118 h_align = 8;
119 break;
120 case V_038000_ARRAY_LINEAR_GENERAL:
121 default:
122 h_align = 1;
123 break;
124 }
125 return h_align;
126 }
127
128 static unsigned r600_get_base_alignment(struct pipe_screen *screen,
129 enum pipe_format format,
130 unsigned array_mode)
131 {
132 struct r600_screen* rscreen = (struct r600_screen *)screen;
133 unsigned pixsize = util_format_get_blocksize(format);
134 int p_align = r600_get_block_alignment(screen, format, array_mode);
135 int h_align = r600_get_height_alignment(screen, array_mode);
136 int b_align;
137
138 switch (array_mode) {
139 case V_038000_ARRAY_2D_TILED_THIN1:
140 b_align = MAX2(rscreen->tiling_info.num_banks * rscreen->tiling_info.num_channels * 8 * 8 * pixsize,
141 p_align * pixsize * h_align);
142 break;
143 case V_038000_ARRAY_1D_TILED_THIN1:
144 case V_038000_ARRAY_LINEAR_ALIGNED:
145 case V_038000_ARRAY_LINEAR_GENERAL:
146 default:
147 b_align = rscreen->tiling_info.group_bytes;
148 break;
149 }
150 return b_align;
151 }
152
153 static unsigned mip_minify(unsigned size, unsigned level)
154 {
155 unsigned val;
156 val = u_minify(size, level);
157 if (level > 0)
158 val = util_next_power_of_two(val);
159 return val;
160 }
161
162 static unsigned r600_texture_get_nblocksx(struct pipe_screen *screen,
163 struct r600_resource_texture *rtex,
164 unsigned level)
165 {
166 struct pipe_resource *ptex = &rtex->resource.b.b;
167 unsigned nblocksx, block_align, width;
168 unsigned blocksize = util_format_get_blocksize(rtex->real_format);
169
170 if (rtex->pitch_override)
171 return rtex->pitch_override / blocksize;
172
173 width = mip_minify(ptex->width0, level);
174 nblocksx = util_format_get_nblocksx(rtex->real_format, width);
175
176 block_align = r600_get_block_alignment(screen, rtex->real_format,
177 rtex->array_mode[level]);
178 nblocksx = align(nblocksx, block_align);
179 return nblocksx;
180 }
181
182 static unsigned r600_texture_get_nblocksy(struct pipe_screen *screen,
183 struct r600_resource_texture *rtex,
184 unsigned level)
185 {
186 struct pipe_resource *ptex = &rtex->resource.b.b;
187 unsigned height, tile_height;
188
189 height = mip_minify(ptex->height0, level);
190 height = util_format_get_nblocksy(rtex->real_format, height);
191 tile_height = r600_get_height_alignment(screen,
192 rtex->array_mode[level]);
193
194 /* XXX Hack around an alignment issue. Less tests fail with this.
195 *
196 * The thing is depth-stencil buffers should be tiled, i.e.
197 * the alignment should be >=8. If I make them tiled, stencil starts
198 * working because it no longer overlaps with the depth buffer
199 * in memory, but texturing like drawpix-stencil breaks. */
200 if (util_format_is_depth_or_stencil(rtex->real_format) && tile_height < 8)
201 tile_height = 8;
202
203 height = align(height, tile_height);
204 return height;
205 }
206
207 static void r600_texture_set_array_mode(struct pipe_screen *screen,
208 struct r600_resource_texture *rtex,
209 unsigned level, unsigned array_mode)
210 {
211 struct pipe_resource *ptex = &rtex->resource.b.b;
212
213 switch (array_mode) {
214 case V_0280A0_ARRAY_LINEAR_GENERAL:
215 case V_0280A0_ARRAY_LINEAR_ALIGNED:
216 case V_0280A0_ARRAY_1D_TILED_THIN1:
217 default:
218 rtex->array_mode[level] = array_mode;
219 break;
220 case V_0280A0_ARRAY_2D_TILED_THIN1:
221 {
222 unsigned w, h, tile_height, tile_width;
223
224 tile_height = r600_get_height_alignment(screen, array_mode);
225 tile_width = r600_get_block_alignment(screen, rtex->real_format, array_mode);
226
227 w = mip_minify(ptex->width0, level);
228 h = mip_minify(ptex->height0, level);
229 if (w <= tile_width || h <= tile_height)
230 rtex->array_mode[level] = V_0280A0_ARRAY_1D_TILED_THIN1;
231 else
232 rtex->array_mode[level] = array_mode;
233 }
234 break;
235 }
236 }
237
238 static int r600_init_surface(struct radeon_surface *surface,
239 const struct pipe_resource *ptex,
240 unsigned array_mode, bool is_transfer)
241 {
242 surface->npix_x = ptex->width0;
243 surface->npix_y = ptex->height0;
244 surface->npix_z = ptex->depth0;
245 surface->blk_w = util_format_get_blockwidth(ptex->format);
246 surface->blk_h = util_format_get_blockheight(ptex->format);
247 surface->blk_d = 1;
248 surface->array_size = 1;
249 surface->last_level = ptex->last_level;
250 surface->bpe = util_format_get_blocksize(ptex->format);
251 /* align byte per element on dword */
252 if (surface->bpe == 3) {
253 surface->bpe = 4;
254 }
255 surface->nsamples = 1;
256 surface->flags = 0;
257 switch (array_mode) {
258 case V_038000_ARRAY_1D_TILED_THIN1:
259 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
260 break;
261 case V_038000_ARRAY_2D_TILED_THIN1:
262 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
263 break;
264 case V_038000_ARRAY_LINEAR_ALIGNED:
265 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR_ALIGNED, MODE);
266 break;
267 case V_038000_ARRAY_LINEAR_GENERAL:
268 default:
269 surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR, MODE);
270 break;
271 }
272 switch (ptex->target) {
273 case PIPE_TEXTURE_1D:
274 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
275 break;
276 case PIPE_TEXTURE_RECT:
277 case PIPE_TEXTURE_2D:
278 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
279 break;
280 case PIPE_TEXTURE_3D:
281 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
282 break;
283 case PIPE_TEXTURE_1D_ARRAY:
284 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
285 surface->array_size = ptex->array_size;
286 break;
287 case PIPE_TEXTURE_2D_ARRAY:
288 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
289 surface->array_size = ptex->array_size;
290 break;
291 case PIPE_TEXTURE_CUBE:
292 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE);
293 break;
294 case PIPE_BUFFER:
295 default:
296 return -EINVAL;
297 }
298 if (ptex->bind & PIPE_BIND_SCANOUT) {
299 surface->flags |= RADEON_SURF_SCANOUT;
300 }
301 if (util_format_is_depth_and_stencil(ptex->format) && !is_transfer) {
302 surface->flags |= RADEON_SURF_ZBUFFER;
303 surface->flags |= RADEON_SURF_SBUFFER;
304 }
305
306 return 0;
307 }
308
309 static int r600_setup_surface(struct pipe_screen *screen,
310 struct r600_resource_texture *rtex,
311 unsigned array_mode,
312 unsigned pitch_in_bytes_override)
313 {
314 struct pipe_resource *ptex = &rtex->resource.b.b;
315 struct r600_screen *rscreen = (struct r600_screen*)screen;
316 unsigned i;
317 int r;
318
319 r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface);
320 if (r) {
321 return r;
322 }
323 rtex->size = rtex->surface.bo_size;
324 if (pitch_in_bytes_override && pitch_in_bytes_override != rtex->surface.level[0].pitch_bytes) {
325 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
326 * for those
327 */
328 rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe;
329 rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override;
330 rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y;
331 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
332 rtex->surface.stencil_offset = rtex->surface.level[0].slice_size;
333 }
334 }
335 for (i = 0; i <= ptex->last_level; i++) {
336 rtex->offset[i] = rtex->surface.level[i].offset;
337 rtex->layer_size[i] = rtex->surface.level[i].slice_size;
338 rtex->pitch_in_bytes[i] = rtex->surface.level[i].pitch_bytes;
339 switch (rtex->surface.level[i].mode) {
340 case RADEON_SURF_MODE_LINEAR_ALIGNED:
341 rtex->array_mode[i] = V_038000_ARRAY_LINEAR_ALIGNED;
342 break;
343 case RADEON_SURF_MODE_1D:
344 rtex->array_mode[i] = V_038000_ARRAY_1D_TILED_THIN1;
345 break;
346 case RADEON_SURF_MODE_2D:
347 rtex->array_mode[i] = V_038000_ARRAY_2D_TILED_THIN1;
348 break;
349 default:
350 case RADEON_SURF_MODE_LINEAR:
351 rtex->array_mode[i] = 0;
352 break;
353 }
354 }
355 return 0;
356 }
357
358 static void r600_setup_miptree(struct pipe_screen *screen,
359 struct r600_resource_texture *rtex,
360 unsigned array_mode)
361 {
362 struct pipe_resource *ptex = &rtex->resource.b.b;
363 enum chip_class chipc = ((struct r600_screen*)screen)->chip_class;
364 unsigned size, layer_size, i, offset;
365 unsigned nblocksx, nblocksy;
366
367 for (i = 0, offset = 0; i <= ptex->last_level; i++) {
368 unsigned blocksize = util_format_get_blocksize(rtex->real_format);
369 unsigned base_align = r600_get_base_alignment(screen, rtex->real_format, array_mode);
370
371 r600_texture_set_array_mode(screen, rtex, i, array_mode);
372
373 nblocksx = r600_texture_get_nblocksx(screen, rtex, i);
374 nblocksy = r600_texture_get_nblocksy(screen, rtex, i);
375
376 if (chipc >= EVERGREEN && array_mode == V_038000_ARRAY_LINEAR_GENERAL)
377 layer_size = align(nblocksx, 64) * nblocksy * blocksize;
378 else
379 layer_size = nblocksx * nblocksy * blocksize;
380
381 if (ptex->target == PIPE_TEXTURE_CUBE) {
382 if (chipc >= R700)
383 size = layer_size * 8;
384 else
385 size = layer_size * 6;
386 }
387 else if (ptex->target == PIPE_TEXTURE_3D)
388 size = layer_size * u_minify(ptex->depth0, i);
389 else
390 size = layer_size * ptex->array_size;
391
392 /* align base image and start of miptree */
393 if ((i == 0) || (i == 1))
394 offset = align(offset, base_align);
395 rtex->offset[i] = offset;
396 rtex->layer_size[i] = layer_size;
397 rtex->pitch_in_blocks[i] = nblocksx; /* CB talks in elements */
398 rtex->pitch_in_bytes[i] = nblocksx * blocksize;
399
400 offset += size;
401 }
402 rtex->size = offset;
403 }
404
405 /* Figure out whether u_blitter will fallback to a transfer operation.
406 * If so, don't use a staging resource.
407 */
408 static boolean permit_hardware_blit(struct pipe_screen *screen,
409 const struct pipe_resource *res)
410 {
411 unsigned bind;
412
413 if (util_format_is_depth_or_stencil(res->format))
414 bind = PIPE_BIND_DEPTH_STENCIL;
415 else
416 bind = PIPE_BIND_RENDER_TARGET;
417
418 /* hackaround for S3TC */
419 if (util_format_is_compressed(res->format))
420 return TRUE;
421
422 if (!screen->is_format_supported(screen,
423 res->format,
424 res->target,
425 res->nr_samples,
426 bind))
427 return FALSE;
428
429 if (!screen->is_format_supported(screen,
430 res->format,
431 res->target,
432 res->nr_samples,
433 PIPE_BIND_SAMPLER_VIEW))
434 return FALSE;
435
436 return TRUE;
437 }
438
439 static boolean r600_texture_get_handle(struct pipe_screen* screen,
440 struct pipe_resource *ptex,
441 struct winsys_handle *whandle)
442 {
443 struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
444 struct r600_resource *resource = &rtex->resource;
445 struct radeon_surface *surface = &rtex->surface;
446 struct r600_screen *rscreen = (struct r600_screen*)screen;
447
448 rscreen->ws->buffer_set_tiling(resource->buf,
449 NULL,
450 surface->level[0].mode >= RADEON_SURF_MODE_1D ?
451 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
452 surface->level[0].mode >= RADEON_SURF_MODE_2D ?
453 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
454 surface->bankw, surface->bankh,
455 surface->tile_split,
456 surface->stencil_tile_split,
457 surface->mtilea,
458 rtex->pitch_in_bytes[0]);
459
460 return rscreen->ws->buffer_get_handle(resource->buf,
461 rtex->pitch_in_bytes[0], whandle);
462 }
463
464 static void r600_texture_destroy(struct pipe_screen *screen,
465 struct pipe_resource *ptex)
466 {
467 struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
468 struct r600_resource *resource = &rtex->resource;
469
470 if (rtex->flushed_depth_texture)
471 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
472
473 if (rtex->stencil)
474 pipe_resource_reference((struct pipe_resource **)&rtex->stencil, NULL);
475
476 pb_reference(&resource->buf, NULL);
477 FREE(rtex);
478 }
479
480 static const struct u_resource_vtbl r600_texture_vtbl =
481 {
482 r600_texture_get_handle, /* get_handle */
483 r600_texture_destroy, /* resource_destroy */
484 r600_texture_get_transfer, /* get_transfer */
485 r600_texture_transfer_destroy, /* transfer_destroy */
486 r600_texture_transfer_map, /* transfer_map */
487 NULL, /* transfer_flush_region */
488 r600_texture_transfer_unmap, /* transfer_unmap */
489 NULL /* transfer_inline_write */
490 };
491
492 static struct r600_resource_texture *
493 r600_texture_create_object(struct pipe_screen *screen,
494 const struct pipe_resource *base,
495 unsigned array_mode,
496 unsigned pitch_in_bytes_override,
497 unsigned max_buffer_size,
498 struct pb_buffer *buf,
499 boolean alloc_bo,
500 struct radeon_surface *surface)
501 {
502 struct r600_resource_texture *rtex;
503 struct r600_resource *resource;
504 struct r600_screen *rscreen = (struct r600_screen*)screen;
505 int r;
506
507 rtex = CALLOC_STRUCT(r600_resource_texture);
508 if (rtex == NULL)
509 return NULL;
510
511 resource = &rtex->resource;
512 resource->b.b = *base;
513 resource->b.vtbl = &r600_texture_vtbl;
514 pipe_reference_init(&resource->b.b.reference, 1);
515 resource->b.b.screen = screen;
516 rtex->pitch_override = pitch_in_bytes_override;
517 rtex->real_format = base->format;
518
519 /* We must split depth and stencil into two separate buffers on Evergreen. */
520 if (!(base->flags & R600_RESOURCE_FLAG_TRANSFER) &&
521 ((struct r600_screen*)screen)->chip_class >= EVERGREEN &&
522 util_format_is_depth_and_stencil(base->format) &&
523 !rscreen->use_surface_alloc) {
524 struct pipe_resource stencil;
525 unsigned stencil_pitch_override = 0;
526
527 switch (base->format) {
528 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
529 rtex->real_format = PIPE_FORMAT_Z24X8_UNORM;
530 break;
531 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
532 rtex->real_format = PIPE_FORMAT_X8Z24_UNORM;
533 break;
534 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
535 rtex->real_format = PIPE_FORMAT_Z32_FLOAT;
536 break;
537 default:
538 assert(0);
539 FREE(rtex);
540 return NULL;
541 }
542
543 /* Divide the pitch in bytes by 4 for stencil, because it has a smaller pixel size. */
544 if (pitch_in_bytes_override) {
545 assert(base->format == PIPE_FORMAT_Z24_UNORM_S8_UINT ||
546 base->format == PIPE_FORMAT_S8_UINT_Z24_UNORM);
547 stencil_pitch_override = pitch_in_bytes_override / 4;
548 }
549
550 /* Allocate the stencil buffer. */
551 stencil = *base;
552 stencil.format = PIPE_FORMAT_S8_UINT;
553 rtex->stencil = r600_texture_create_object(screen, &stencil, array_mode,
554 stencil_pitch_override,
555 max_buffer_size, NULL, FALSE, surface);
556 if (!rtex->stencil) {
557 FREE(rtex);
558 return NULL;
559 }
560 /* Proceed in creating the depth buffer. */
561 }
562
563 /* only mark depth textures the HW can hit as depth textures */
564 if (util_format_is_depth_or_stencil(rtex->real_format) && permit_hardware_blit(screen, base))
565 rtex->is_depth = true;
566
567 r600_setup_miptree(screen, rtex, array_mode);
568 if (rscreen->use_surface_alloc) {
569 rtex->surface = *surface;
570 r = r600_setup_surface(screen, rtex, array_mode,
571 pitch_in_bytes_override);
572 if (r) {
573 FREE(rtex);
574 return NULL;
575 }
576 }
577
578 /* If we initialized separate stencil for Evergreen. place it after depth. */
579 if (rtex->stencil) {
580 unsigned stencil_align, stencil_offset;
581
582 stencil_align = r600_get_base_alignment(screen, rtex->stencil->real_format, array_mode);
583 stencil_offset = align(rtex->size, stencil_align);
584
585 for (unsigned i = 0; i <= rtex->stencil->resource.b.b.last_level; i++)
586 rtex->stencil->offset[i] += stencil_offset;
587
588 rtex->size = stencil_offset + rtex->stencil->size;
589 }
590
591 /* Now create the backing buffer. */
592 if (!buf && alloc_bo) {
593 struct pipe_resource *ptex = &rtex->resource.b.b;
594 unsigned base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
595
596 if (rscreen->use_surface_alloc) {
597 base_align = rtex->surface.bo_alignment;
598 } else if (util_format_is_depth_or_stencil(rtex->real_format)) {
599 /* ugly work around depth buffer need stencil room at end of bo */
600 rtex->size += ptex->width0 * ptex->height0;
601 }
602 if (!r600_init_resource(rscreen, resource, rtex->size, base_align, base->bind, base->usage)) {
603 pipe_resource_reference((struct pipe_resource**)&rtex->stencil, NULL);
604 FREE(rtex);
605 return NULL;
606 }
607 } else if (buf) {
608 resource->buf = buf;
609 resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
610 resource->domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
611 }
612
613 if (rtex->stencil) {
614 pb_reference(&rtex->stencil->resource.buf, rtex->resource.buf);
615 rtex->stencil->resource.cs_buf = rtex->resource.cs_buf;
616 rtex->stencil->resource.domains = rtex->resource.domains;
617 }
618 return rtex;
619 }
620
621 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
622 const struct pipe_resource *templ)
623 {
624 struct r600_screen *rscreen = (struct r600_screen*)screen;
625 struct radeon_surface surface;
626 unsigned array_mode = 0;
627 int r;
628
629 if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER)) {
630 if (rscreen->use_surface_alloc &&
631 !(templ->bind & PIPE_BIND_SCANOUT) &&
632 templ->usage != PIPE_USAGE_STAGING &&
633 templ->usage != PIPE_USAGE_STREAM &&
634 permit_hardware_blit(screen, templ)) {
635 array_mode = V_038000_ARRAY_2D_TILED_THIN1;
636 } else if (util_format_is_compressed(templ->format)) {
637 array_mode = V_038000_ARRAY_1D_TILED_THIN1;
638 }
639 }
640
641 r = r600_init_surface(&surface, templ, array_mode,
642 templ->flags & R600_RESOURCE_FLAG_TRANSFER);
643 if (r) {
644 return NULL;
645 }
646 r = rscreen->ws->surface_best(rscreen->ws, &surface);
647 if (r) {
648 return NULL;
649 }
650 return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
651 0, 0, NULL, TRUE, &surface);
652 }
653
654 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
655 struct pipe_resource *texture,
656 const struct pipe_surface *surf_tmpl)
657 {
658 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
659 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
660 unsigned level = surf_tmpl->u.tex.level;
661
662 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
663 if (surface == NULL)
664 return NULL;
665 pipe_reference_init(&surface->base.reference, 1);
666 pipe_resource_reference(&surface->base.texture, texture);
667 surface->base.context = pipe;
668 surface->base.format = surf_tmpl->format;
669 surface->base.width = mip_minify(texture->width0, level);
670 surface->base.height = mip_minify(texture->height0, level);
671 surface->base.usage = surf_tmpl->usage;
672 surface->base.texture = texture;
673 surface->base.u.tex.first_layer = surf_tmpl->u.tex.first_layer;
674 surface->base.u.tex.last_layer = surf_tmpl->u.tex.last_layer;
675 surface->base.u.tex.level = level;
676
677 surface->aligned_height = r600_texture_get_nblocksy(pipe->screen,
678 rtex, level);
679 return &surface->base;
680 }
681
682 static void r600_surface_destroy(struct pipe_context *pipe,
683 struct pipe_surface *surface)
684 {
685 pipe_resource_reference(&surface->texture, NULL);
686 FREE(surface);
687 }
688
689 struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
690 const struct pipe_resource *templ,
691 struct winsys_handle *whandle)
692 {
693 struct r600_screen *rscreen = (struct r600_screen*)screen;
694 struct pb_buffer *buf = NULL;
695 unsigned stride = 0;
696 unsigned array_mode = 0;
697 enum radeon_bo_layout micro, macro;
698 struct radeon_surface surface;
699 int r;
700
701 /* Support only 2D textures without mipmaps */
702 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
703 templ->depth0 != 1 || templ->last_level != 0)
704 return NULL;
705
706 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride);
707 if (!buf)
708 return NULL;
709
710 rscreen->ws->buffer_get_tiling(buf, &micro, &macro,
711 &surface.bankw, &surface.bankh,
712 &surface.tile_split,
713 &surface.stencil_tile_split,
714 &surface.mtilea);
715
716 if (macro == RADEON_LAYOUT_TILED)
717 array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
718 else if (micro == RADEON_LAYOUT_TILED)
719 array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
720 else
721 array_mode = 0;
722
723 r = r600_init_surface(&surface, templ, array_mode, 0);
724 if (r) {
725 return NULL;
726 }
727 return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
728 stride, 0, buf, FALSE, &surface);
729 }
730
731 void r600_init_flushed_depth_texture(struct pipe_context *ctx,
732 struct pipe_resource *texture)
733 {
734 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
735 struct pipe_resource resource;
736
737 if (rtex->flushed_depth_texture)
738 return; /* it's ready */
739
740 resource.target = texture->target;
741 resource.format = texture->format;
742 resource.width0 = texture->width0;
743 resource.height0 = texture->height0;
744 resource.depth0 = texture->depth0;
745 resource.array_size = texture->array_size;
746 resource.last_level = texture->last_level;
747 resource.nr_samples = texture->nr_samples;
748 resource.usage = PIPE_USAGE_DYNAMIC;
749 resource.bind = texture->bind | PIPE_BIND_DEPTH_STENCIL;
750 resource.flags = R600_RESOURCE_FLAG_TRANSFER | texture->flags;
751
752 rtex->flushed_depth_texture = (struct r600_resource_texture *)ctx->screen->resource_create(ctx->screen, &resource);
753 if (rtex->flushed_depth_texture == NULL) {
754 R600_ERR("failed to create temporary texture to hold untiled copy\n");
755 return;
756 }
757
758 ((struct r600_resource_texture *)rtex->flushed_depth_texture)->is_flushing_texture = TRUE;
759 }
760
761 void r600_texture_depth_flush(struct pipe_context *ctx,
762 struct pipe_resource *texture)
763 {
764 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
765
766 r600_init_flushed_depth_texture(ctx, texture);
767
768 if (!rtex->flushed_depth_texture)
769 return; /* error */
770
771 /* XXX: only do this if the depth texture has actually changed:
772 */
773 r600_blit_uncompress_depth(ctx, rtex);
774 }
775
776 /* Needs adjustment for pixelformat:
777 */
778 static INLINE unsigned u_box_volume( const struct pipe_box *box )
779 {
780 return box->width * box->depth * box->height;
781 };
782
783 struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
784 struct pipe_resource *texture,
785 unsigned level,
786 unsigned usage,
787 const struct pipe_box *box)
788 {
789 struct r600_context *rctx = (struct r600_context*)ctx;
790 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
791 struct pipe_resource resource;
792 struct r600_transfer *trans;
793 boolean use_staging_texture = FALSE;
794
795 /* We cannot map a tiled texture directly because the data is
796 * in a different order, therefore we do detiling using a blit.
797 *
798 * Also, use a temporary in GTT memory for read transfers, as
799 * the CPU is much happier reading out of cached system memory
800 * than uncached VRAM.
801 */
802 if (R600_TEX_IS_TILED(rtex, level)) {
803 use_staging_texture = TRUE;
804 }
805
806 if ((usage & PIPE_TRANSFER_READ) && u_box_volume(box) > 1024)
807 use_staging_texture = TRUE;
808
809 /* Use a staging texture for uploads if the underlying BO is busy. */
810 if (!(usage & PIPE_TRANSFER_READ) &&
811 (rctx->ws->cs_is_buffer_referenced(rctx->cs, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
812 rctx->ws->buffer_is_busy(rtex->resource.buf, RADEON_USAGE_READWRITE))) {
813 use_staging_texture = TRUE;
814 }
815
816 if (!permit_hardware_blit(ctx->screen, texture) ||
817 (texture->flags & R600_RESOURCE_FLAG_TRANSFER)) {
818 use_staging_texture = FALSE;
819 }
820
821 if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
822 return NULL;
823 }
824
825 trans = CALLOC_STRUCT(r600_transfer);
826 if (trans == NULL)
827 return NULL;
828 pipe_resource_reference(&trans->transfer.resource, texture);
829 trans->transfer.level = level;
830 trans->transfer.usage = usage;
831 trans->transfer.box = *box;
832 if (rtex->is_depth) {
833 /* XXX: only readback the rectangle which is being mapped?
834 */
835 /* XXX: when discard is true, no need to read back from depth texture
836 */
837 r600_texture_depth_flush(ctx, texture);
838 if (!rtex->flushed_depth_texture) {
839 R600_ERR("failed to create temporary texture to hold untiled copy\n");
840 pipe_resource_reference(&trans->transfer.resource, NULL);
841 FREE(trans);
842 return NULL;
843 }
844 trans->transfer.stride = rtex->flushed_depth_texture->pitch_in_bytes[level];
845 trans->offset = r600_texture_get_offset(rtex->flushed_depth_texture, level, box->z);
846 return &trans->transfer;
847 } else if (use_staging_texture) {
848 resource.target = PIPE_TEXTURE_2D;
849 resource.format = texture->format;
850 resource.width0 = box->width;
851 resource.height0 = box->height;
852 resource.depth0 = 1;
853 resource.array_size = 1;
854 resource.last_level = 0;
855 resource.nr_samples = 0;
856 resource.usage = PIPE_USAGE_STAGING;
857 resource.bind = 0;
858 resource.flags = R600_RESOURCE_FLAG_TRANSFER;
859 /* For texture reading, the temporary (detiled) texture is used as
860 * a render target when blitting from a tiled texture. */
861 if (usage & PIPE_TRANSFER_READ) {
862 resource.bind |= PIPE_BIND_RENDER_TARGET;
863 }
864 /* For texture writing, the temporary texture is used as a sampler
865 * when blitting into a tiled texture. */
866 if (usage & PIPE_TRANSFER_WRITE) {
867 resource.bind |= PIPE_BIND_SAMPLER_VIEW;
868 }
869 /* Create the temporary texture. */
870 trans->staging = (struct r600_resource*)ctx->screen->resource_create(ctx->screen, &resource);
871 if (trans->staging == NULL) {
872 R600_ERR("failed to create temporary texture to hold untiled copy\n");
873 pipe_resource_reference(&trans->transfer.resource, NULL);
874 FREE(trans);
875 return NULL;
876 }
877
878 trans->transfer.stride =
879 ((struct r600_resource_texture *)trans->staging)->pitch_in_bytes[0];
880 if (usage & PIPE_TRANSFER_READ) {
881 r600_copy_to_staging_texture(ctx, trans);
882 /* Always referenced in the blit. */
883 r600_flush(ctx, NULL, 0);
884 }
885 return &trans->transfer;
886 }
887 trans->transfer.stride = rtex->pitch_in_bytes[level];
888 trans->transfer.layer_stride = rtex->layer_size[level];
889 trans->offset = r600_texture_get_offset(rtex, level, box->z);
890 return &trans->transfer;
891 }
892
893 void r600_texture_transfer_destroy(struct pipe_context *ctx,
894 struct pipe_transfer *transfer)
895 {
896 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
897 struct pipe_resource *texture = transfer->resource;
898 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
899
900 if (rtransfer->staging) {
901 if (transfer->usage & PIPE_TRANSFER_WRITE) {
902 r600_copy_from_staging_texture(ctx, rtransfer);
903 }
904 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
905 }
906
907 if (rtex->is_depth && !rtex->is_flushing_texture) {
908 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtex->flushed_depth_texture) {
909 r600_blit_push_depth(ctx, rtex);
910 }
911 }
912
913 pipe_resource_reference(&transfer->resource, NULL);
914 FREE(transfer);
915 }
916
917 void* r600_texture_transfer_map(struct pipe_context *ctx,
918 struct pipe_transfer* transfer)
919 {
920 struct r600_context *rctx = (struct r600_context *)ctx;
921 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
922 struct radeon_winsys_cs_handle *buf;
923 enum pipe_format format = transfer->resource->format;
924 unsigned offset = 0;
925 char *map;
926
927 if ((transfer->resource->bind & PIPE_BIND_GLOBAL) && transfer->resource->target == PIPE_BUFFER) {
928 return r600_compute_global_transfer_map(ctx, transfer);
929 }
930
931 if (rtransfer->staging) {
932 buf = ((struct r600_resource *)rtransfer->staging)->cs_buf;
933 } else {
934 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
935
936 if (rtex->flushed_depth_texture)
937 buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf;
938 else
939 buf = ((struct r600_resource *)transfer->resource)->cs_buf;
940
941 offset = rtransfer->offset +
942 transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
943 transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
944 }
945
946 if (!(map = rctx->ws->buffer_map(buf, rctx->cs, transfer->usage))) {
947 return NULL;
948 }
949
950 return map + offset;
951 }
952
953 void r600_texture_transfer_unmap(struct pipe_context *ctx,
954 struct pipe_transfer* transfer)
955 {
956 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
957 struct r600_context *rctx = (struct r600_context*)ctx;
958 struct radeon_winsys_cs_handle *buf;
959
960 if ((transfer->resource->bind & PIPE_BIND_GLOBAL) && transfer->resource->target == PIPE_BUFFER) {
961 return r600_compute_global_transfer_unmap(ctx, transfer);
962 }
963
964 if (rtransfer->staging) {
965 buf = ((struct r600_resource *)rtransfer->staging)->cs_buf;
966 } else {
967 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
968
969 if (rtex->flushed_depth_texture) {
970 buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf;
971 } else {
972 buf = ((struct r600_resource *)transfer->resource)->cs_buf;
973 }
974 }
975 rctx->ws->buffer_unmap(buf);
976 }
977
978 void r600_init_surface_functions(struct r600_context *r600)
979 {
980 r600->context.create_surface = r600_create_surface;
981 r600->context.surface_destroy = r600_surface_destroy;
982 }
983
984 static unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
985 const unsigned char *swizzle_view)
986 {
987 unsigned i;
988 unsigned char swizzle[4];
989 unsigned result = 0;
990 const uint32_t swizzle_shift[4] = {
991 16, 19, 22, 25,
992 };
993 const uint32_t swizzle_bit[4] = {
994 0, 1, 2, 3,
995 };
996
997 if (swizzle_view) {
998 util_format_compose_swizzles(swizzle_format, swizzle_view, swizzle);
999 } else {
1000 memcpy(swizzle, swizzle_format, 4);
1001 }
1002
1003 /* Get swizzle. */
1004 for (i = 0; i < 4; i++) {
1005 switch (swizzle[i]) {
1006 case UTIL_FORMAT_SWIZZLE_Y:
1007 result |= swizzle_bit[1] << swizzle_shift[i];
1008 break;
1009 case UTIL_FORMAT_SWIZZLE_Z:
1010 result |= swizzle_bit[2] << swizzle_shift[i];
1011 break;
1012 case UTIL_FORMAT_SWIZZLE_W:
1013 result |= swizzle_bit[3] << swizzle_shift[i];
1014 break;
1015 case UTIL_FORMAT_SWIZZLE_0:
1016 result |= V_038010_SQ_SEL_0 << swizzle_shift[i];
1017 break;
1018 case UTIL_FORMAT_SWIZZLE_1:
1019 result |= V_038010_SQ_SEL_1 << swizzle_shift[i];
1020 break;
1021 default: /* UTIL_FORMAT_SWIZZLE_X */
1022 result |= swizzle_bit[0] << swizzle_shift[i];
1023 }
1024 }
1025 return result;
1026 }
1027
1028 /* texture format translate */
1029 uint32_t r600_translate_texformat(struct pipe_screen *screen,
1030 enum pipe_format format,
1031 const unsigned char *swizzle_view,
1032 uint32_t *word4_p, uint32_t *yuv_format_p)
1033 {
1034 uint32_t result = 0, word4 = 0, yuv_format = 0;
1035 const struct util_format_description *desc;
1036 boolean uniform = TRUE;
1037 static int r600_enable_s3tc = -1;
1038 bool is_srgb_valid = FALSE;
1039
1040 int i;
1041 const uint32_t sign_bit[4] = {
1042 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED),
1043 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED),
1044 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED),
1045 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED)
1046 };
1047 desc = util_format_description(format);
1048
1049 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view);
1050
1051 /* Colorspace (return non-RGB formats directly). */
1052 switch (desc->colorspace) {
1053 /* Depth stencil formats */
1054 case UTIL_FORMAT_COLORSPACE_ZS:
1055 switch (format) {
1056 case PIPE_FORMAT_Z16_UNORM:
1057 result = FMT_16;
1058 goto out_word4;
1059 case PIPE_FORMAT_X24S8_UINT:
1060 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
1061 case PIPE_FORMAT_Z24X8_UNORM:
1062 case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1063 result = FMT_8_24;
1064 goto out_word4;
1065 case PIPE_FORMAT_S8X24_UINT:
1066 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
1067 case PIPE_FORMAT_X8Z24_UNORM:
1068 case PIPE_FORMAT_S8_UINT_Z24_UNORM:
1069 result = FMT_24_8;
1070 goto out_word4;
1071 case PIPE_FORMAT_S8_UINT:
1072 result = FMT_8;
1073 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
1074 goto out_word4;
1075 case PIPE_FORMAT_Z32_FLOAT:
1076 result = FMT_32_FLOAT;
1077 goto out_word4;
1078 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
1079 result = FMT_X24_8_32_FLOAT;
1080 goto out_word4;
1081 default:
1082 goto out_unknown;
1083 }
1084
1085 case UTIL_FORMAT_COLORSPACE_YUV:
1086 yuv_format |= (1 << 30);
1087 switch (format) {
1088 case PIPE_FORMAT_UYVY:
1089 case PIPE_FORMAT_YUYV:
1090 default:
1091 break;
1092 }
1093 goto out_unknown; /* XXX */
1094
1095 case UTIL_FORMAT_COLORSPACE_SRGB:
1096 word4 |= S_038010_FORCE_DEGAMMA(1);
1097 break;
1098
1099 default:
1100 break;
1101 }
1102
1103 if (r600_enable_s3tc == -1) {
1104 struct r600_screen *rscreen = (struct r600_screen *)screen;
1105 if (rscreen->info.drm_minor >= 9)
1106 r600_enable_s3tc = 1;
1107 else
1108 r600_enable_s3tc = debug_get_bool_option("R600_ENABLE_S3TC", FALSE);
1109 }
1110
1111 if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
1112 if (!r600_enable_s3tc)
1113 goto out_unknown;
1114
1115 switch (format) {
1116 case PIPE_FORMAT_RGTC1_SNORM:
1117 case PIPE_FORMAT_LATC1_SNORM:
1118 word4 |= sign_bit[0];
1119 case PIPE_FORMAT_RGTC1_UNORM:
1120 case PIPE_FORMAT_LATC1_UNORM:
1121 result = FMT_BC4;
1122 goto out_word4;
1123 case PIPE_FORMAT_RGTC2_SNORM:
1124 case PIPE_FORMAT_LATC2_SNORM:
1125 word4 |= sign_bit[0] | sign_bit[1];
1126 case PIPE_FORMAT_RGTC2_UNORM:
1127 case PIPE_FORMAT_LATC2_UNORM:
1128 result = FMT_BC5;
1129 goto out_word4;
1130 default:
1131 goto out_unknown;
1132 }
1133 }
1134
1135 if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
1136
1137 if (!r600_enable_s3tc)
1138 goto out_unknown;
1139
1140 if (!util_format_s3tc_enabled) {
1141 goto out_unknown;
1142 }
1143
1144 switch (format) {
1145 case PIPE_FORMAT_DXT1_RGB:
1146 case PIPE_FORMAT_DXT1_RGBA:
1147 case PIPE_FORMAT_DXT1_SRGB:
1148 case PIPE_FORMAT_DXT1_SRGBA:
1149 result = FMT_BC1;
1150 is_srgb_valid = TRUE;
1151 goto out_word4;
1152 case PIPE_FORMAT_DXT3_RGBA:
1153 case PIPE_FORMAT_DXT3_SRGBA:
1154 result = FMT_BC2;
1155 is_srgb_valid = TRUE;
1156 goto out_word4;
1157 case PIPE_FORMAT_DXT5_RGBA:
1158 case PIPE_FORMAT_DXT5_SRGBA:
1159 result = FMT_BC3;
1160 is_srgb_valid = TRUE;
1161 goto out_word4;
1162 default:
1163 goto out_unknown;
1164 }
1165 }
1166
1167 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) {
1168 switch (format) {
1169 case PIPE_FORMAT_R8G8_B8G8_UNORM:
1170 case PIPE_FORMAT_G8R8_B8R8_UNORM:
1171 result = FMT_GB_GR;
1172 goto out_word4;
1173 case PIPE_FORMAT_G8R8_G8B8_UNORM:
1174 case PIPE_FORMAT_R8G8_R8B8_UNORM:
1175 result = FMT_BG_RG;
1176 goto out_word4;
1177 default:
1178 goto out_unknown;
1179 }
1180 }
1181
1182 if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
1183 result = FMT_5_9_9_9_SHAREDEXP;
1184 goto out_word4;
1185 } else if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
1186 result = FMT_10_11_11_FLOAT;
1187 goto out_word4;
1188 }
1189
1190
1191 for (i = 0; i < desc->nr_channels; i++) {
1192 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
1193 word4 |= sign_bit[i];
1194 }
1195 }
1196
1197 /* R8G8Bx_SNORM - XXX CxV8U8 */
1198
1199 /* See whether the components are of the same size. */
1200 for (i = 1; i < desc->nr_channels; i++) {
1201 uniform = uniform && desc->channel[0].size == desc->channel[i].size;
1202 }
1203
1204 /* Non-uniform formats. */
1205 if (!uniform) {
1206 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB &&
1207 desc->channel[0].pure_integer)
1208 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
1209 switch(desc->nr_channels) {
1210 case 3:
1211 if (desc->channel[0].size == 5 &&
1212 desc->channel[1].size == 6 &&
1213 desc->channel[2].size == 5) {
1214 result = FMT_5_6_5;
1215 goto out_word4;
1216 }
1217 goto out_unknown;
1218 case 4:
1219 if (desc->channel[0].size == 5 &&
1220 desc->channel[1].size == 5 &&
1221 desc->channel[2].size == 5 &&
1222 desc->channel[3].size == 1) {
1223 result = FMT_1_5_5_5;
1224 goto out_word4;
1225 }
1226 if (desc->channel[0].size == 10 &&
1227 desc->channel[1].size == 10 &&
1228 desc->channel[2].size == 10 &&
1229 desc->channel[3].size == 2) {
1230 result = FMT_2_10_10_10;
1231 goto out_word4;
1232 }
1233 goto out_unknown;
1234 }
1235 goto out_unknown;
1236 }
1237
1238 /* Find the first non-VOID channel. */
1239 for (i = 0; i < 4; i++) {
1240 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
1241 break;
1242 }
1243 }
1244
1245 if (i == 4)
1246 goto out_unknown;
1247
1248 /* uniform formats */
1249 switch (desc->channel[i].type) {
1250 case UTIL_FORMAT_TYPE_UNSIGNED:
1251 case UTIL_FORMAT_TYPE_SIGNED:
1252 #if 0
1253 if (!desc->channel[i].normalized &&
1254 desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
1255 goto out_unknown;
1256 }
1257 #endif
1258 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB &&
1259 desc->channel[i].pure_integer)
1260 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
1261
1262 switch (desc->channel[i].size) {
1263 case 4:
1264 switch (desc->nr_channels) {
1265 case 2:
1266 result = FMT_4_4;
1267 goto out_word4;
1268 case 4:
1269 result = FMT_4_4_4_4;
1270 goto out_word4;
1271 }
1272 goto out_unknown;
1273 case 8:
1274 switch (desc->nr_channels) {
1275 case 1:
1276 result = FMT_8;
1277 goto out_word4;
1278 case 2:
1279 result = FMT_8_8;
1280 goto out_word4;
1281 case 4:
1282 result = FMT_8_8_8_8;
1283 is_srgb_valid = TRUE;
1284 goto out_word4;
1285 }
1286 goto out_unknown;
1287 case 16:
1288 switch (desc->nr_channels) {
1289 case 1:
1290 result = FMT_16;
1291 goto out_word4;
1292 case 2:
1293 result = FMT_16_16;
1294 goto out_word4;
1295 case 4:
1296 result = FMT_16_16_16_16;
1297 goto out_word4;
1298 }
1299 goto out_unknown;
1300 case 32:
1301 switch (desc->nr_channels) {
1302 case 1:
1303 result = FMT_32;
1304 goto out_word4;
1305 case 2:
1306 result = FMT_32_32;
1307 goto out_word4;
1308 case 4:
1309 result = FMT_32_32_32_32;
1310 goto out_word4;
1311 }
1312 }
1313 goto out_unknown;
1314
1315 case UTIL_FORMAT_TYPE_FLOAT:
1316 switch (desc->channel[i].size) {
1317 case 16:
1318 switch (desc->nr_channels) {
1319 case 1:
1320 result = FMT_16_FLOAT;
1321 goto out_word4;
1322 case 2:
1323 result = FMT_16_16_FLOAT;
1324 goto out_word4;
1325 case 4:
1326 result = FMT_16_16_16_16_FLOAT;
1327 goto out_word4;
1328 }
1329 goto out_unknown;
1330 case 32:
1331 switch (desc->nr_channels) {
1332 case 1:
1333 result = FMT_32_FLOAT;
1334 goto out_word4;
1335 case 2:
1336 result = FMT_32_32_FLOAT;
1337 goto out_word4;
1338 case 4:
1339 result = FMT_32_32_32_32_FLOAT;
1340 goto out_word4;
1341 }
1342 }
1343 goto out_unknown;
1344 }
1345
1346 out_word4:
1347
1348 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB && !is_srgb_valid)
1349 return ~0;
1350 if (word4_p)
1351 *word4_p = word4;
1352 if (yuv_format_p)
1353 *yuv_format_p = yuv_format;
1354 return result;
1355 out_unknown:
1356 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
1357 return ~0;
1358 }