r600g: remove unused flink, domain fields from r600_resource
[mesa.git] / src / gallium / drivers / r600 / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include <errno.h>
28 #include <pipe/p_screen.h>
29 #include <util/u_format.h>
30 #include <util/u_math.h>
31 #include <util/u_inlines.h>
32 #include <util/u_memory.h>
33 #include "state_tracker/drm_driver.h"
34 #include "pipebuffer/pb_buffer.h"
35 #include "r600_pipe.h"
36 #include "r600_resource.h"
37 #include "r600_state_inlines.h"
38 #include "r600d.h"
39 #include "r600_formats.h"
40
41 extern struct u_resource_vtbl r600_texture_vtbl;
42
43 /* Copy from a full GPU texture to a transfer's staging one. */
44 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
45 {
46 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
47 struct pipe_resource *texture = transfer->resource;
48 struct pipe_subresource subdst;
49
50 subdst.face = 0;
51 subdst.level = 0;
52 ctx->resource_copy_region(ctx, rtransfer->staging_texture,
53 subdst, 0, 0, 0, texture, transfer->sr,
54 transfer->box.x, transfer->box.y, transfer->box.z,
55 transfer->box.width, transfer->box.height);
56 }
57
58
59 /* Copy from a transfer's staging texture to a full GPU one. */
60 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
61 {
62 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
63 struct pipe_resource *texture = transfer->resource;
64 struct pipe_subresource subsrc;
65
66 subsrc.face = 0;
67 subsrc.level = 0;
68 ctx->resource_copy_region(ctx, texture, transfer->sr,
69 transfer->box.x, transfer->box.y, transfer->box.z,
70 rtransfer->staging_texture, subsrc,
71 0, 0, 0,
72 transfer->box.width, transfer->box.height);
73
74 ctx->flush(ctx, 0, NULL);
75 }
76
77 static unsigned r600_texture_get_offset(struct r600_resource_texture *rtex,
78 unsigned level, unsigned zslice,
79 unsigned face)
80 {
81 unsigned offset = rtex->offset[level];
82
83 switch (rtex->resource.base.b.target) {
84 case PIPE_TEXTURE_3D:
85 assert(face == 0);
86 return offset + zslice * rtex->layer_size[level];
87 case PIPE_TEXTURE_CUBE:
88 assert(zslice == 0);
89 return offset + face * rtex->layer_size[level];
90 default:
91 assert(zslice == 0 && face == 0);
92 return offset;
93 }
94 }
95
96 static unsigned r600_get_pixel_alignment(struct pipe_screen *screen,
97 enum pipe_format format,
98 unsigned array_mode)
99 {
100 struct r600_screen* rscreen = (struct r600_screen *)screen;
101 unsigned pixsize = util_format_get_blocksize(format);
102 int p_align;
103
104 switch(array_mode) {
105 case V_038000_ARRAY_1D_TILED_THIN1:
106 p_align = MAX2(8,
107 ((rscreen->tiling_info->group_bytes / 8 / pixsize)));
108 break;
109 case V_038000_ARRAY_2D_TILED_THIN1:
110 p_align = MAX2(rscreen->tiling_info->num_banks,
111 (((rscreen->tiling_info->group_bytes / 8 / pixsize)) *
112 rscreen->tiling_info->num_banks));
113 break;
114 case 0:
115 default:
116 p_align = 64;
117 break;
118 }
119 return p_align;
120 }
121
122 static unsigned r600_get_height_alignment(struct pipe_screen *screen,
123 unsigned array_mode)
124 {
125 struct r600_screen* rscreen = (struct r600_screen *)screen;
126 int h_align;
127
128 switch (array_mode) {
129 case V_038000_ARRAY_2D_TILED_THIN1:
130 h_align = rscreen->tiling_info->num_channels * 8;
131 break;
132 case V_038000_ARRAY_1D_TILED_THIN1:
133 h_align = 8;
134 break;
135 default:
136 h_align = 1;
137 break;
138 }
139 return h_align;
140 }
141
142 static unsigned mip_minify(unsigned size, unsigned level)
143 {
144 unsigned val;
145 val = u_minify(size, level);
146 if (level > 0)
147 val = util_next_power_of_two(val);
148 return val;
149 }
150
151 static unsigned r600_texture_get_stride(struct pipe_screen *screen,
152 struct r600_resource_texture *rtex,
153 unsigned level)
154 {
155 struct pipe_resource *ptex = &rtex->resource.base.b;
156 struct radeon *radeon = (struct radeon *)screen->winsys;
157 enum chip_class chipc = r600_get_family_class(radeon);
158 unsigned width, stride, tile_width;
159
160 if (rtex->pitch_override)
161 return rtex->pitch_override;
162
163 width = mip_minify(ptex->width0, level);
164 if (util_format_is_plain(ptex->format)) {
165 tile_width = r600_get_pixel_alignment(screen, ptex->format,
166 rtex->array_mode[level]);
167 width = align(width, tile_width);
168 }
169 stride = util_format_get_stride(ptex->format, width);
170 if (chipc == EVERGREEN)
171 stride = align(stride, 512);
172 return stride;
173 }
174
175 static unsigned r600_texture_get_nblocksy(struct pipe_screen *screen,
176 struct r600_resource_texture *rtex,
177 unsigned level)
178 {
179 struct pipe_resource *ptex = &rtex->resource.base.b;
180 unsigned height, tile_height;
181
182 height = mip_minify(ptex->height0, level);
183 if (util_format_is_plain(ptex->format)) {
184 tile_height = r600_get_height_alignment(screen,
185 rtex->array_mode[level]);
186 height = align(height, tile_height);
187 }
188 return util_format_get_nblocksy(ptex->format, height);
189 }
190
191 /* Get a width in pixels from a stride in bytes. */
192 static unsigned pitch_to_width(enum pipe_format format,
193 unsigned pitch_in_bytes)
194 {
195 return (pitch_in_bytes / util_format_get_blocksize(format)) *
196 util_format_get_blockwidth(format);
197 }
198
199 static void r600_texture_set_array_mode(struct pipe_screen *screen,
200 struct r600_resource_texture *rtex,
201 unsigned level, unsigned array_mode)
202 {
203 struct pipe_resource *ptex = &rtex->resource.base.b;
204
205 switch (array_mode) {
206 case V_0280A0_ARRAY_LINEAR_GENERAL:
207 case V_0280A0_ARRAY_LINEAR_ALIGNED:
208 case V_0280A0_ARRAY_1D_TILED_THIN1:
209 default:
210 rtex->array_mode[level] = array_mode;
211 break;
212 case V_0280A0_ARRAY_2D_TILED_THIN1:
213 {
214 unsigned w, h, tile_height, tile_width;
215
216 tile_height = r600_get_height_alignment(screen, array_mode);
217 tile_width = r600_get_pixel_alignment(screen, ptex->format, array_mode);
218
219 w = mip_minify(ptex->width0, level);
220 h = mip_minify(ptex->height0, level);
221 if (w < tile_width || h < tile_height)
222 rtex->array_mode[level] = V_0280A0_ARRAY_1D_TILED_THIN1;
223 else
224 rtex->array_mode[level] = array_mode;
225 }
226 break;
227 }
228 }
229
230 static void r600_setup_miptree(struct pipe_screen *screen,
231 struct r600_resource_texture *rtex,
232 unsigned array_mode)
233 {
234 struct pipe_resource *ptex = &rtex->resource.base.b;
235 struct radeon *radeon = (struct radeon *)screen->winsys;
236 enum chip_class chipc = r600_get_family_class(radeon);
237 unsigned pitch, size, layer_size, i, offset;
238 unsigned nblocksy;
239
240 for (i = 0, offset = 0; i <= ptex->last_level; i++) {
241 r600_texture_set_array_mode(screen, rtex, i, array_mode);
242
243 pitch = r600_texture_get_stride(screen, rtex, i);
244 nblocksy = r600_texture_get_nblocksy(screen, rtex, i);
245
246 layer_size = pitch * nblocksy;
247
248 if (ptex->target == PIPE_TEXTURE_CUBE) {
249 if (chipc >= R700)
250 size = layer_size * 8;
251 else
252 size = layer_size * 6;
253 }
254 else
255 size = layer_size * u_minify(ptex->depth0, i);
256 rtex->offset[i] = offset;
257 rtex->layer_size[i] = layer_size;
258 rtex->pitch_in_bytes[i] = pitch;
259 rtex->pitch_in_pixels[i] = pitch_to_width(ptex->format, pitch);
260 offset += size;
261 }
262 rtex->size = offset;
263 }
264
265 static struct r600_resource_texture *
266 r600_texture_create_object(struct pipe_screen *screen,
267 const struct pipe_resource *base,
268 unsigned array_mode,
269 unsigned pitch_in_bytes_override,
270 unsigned max_buffer_size,
271 struct r600_bo *bo)
272 {
273 struct r600_resource_texture *rtex;
274 struct r600_resource *resource;
275 struct radeon *radeon = (struct radeon *)screen->winsys;
276
277 rtex = CALLOC_STRUCT(r600_resource_texture);
278 if (rtex == NULL)
279 return NULL;
280
281 resource = &rtex->resource;
282 resource->base.b = *base;
283 resource->base.vtbl = &r600_texture_vtbl;
284 pipe_reference_init(&resource->base.b.reference, 1);
285 resource->base.b.screen = screen;
286 resource->bo = bo;
287 rtex->pitch_override = pitch_in_bytes_override;
288
289 if (array_mode)
290 rtex->tiled = 1;
291 r600_setup_miptree(screen, rtex, array_mode);
292
293 resource->size = rtex->size;
294
295 if (!resource->bo) {
296 resource->bo = r600_bo(radeon, rtex->size, 4096, base->bind, base->usage);
297 if (!resource->bo) {
298 FREE(rtex);
299 return NULL;
300 }
301 }
302 return rtex;
303 }
304
305 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
306 const struct pipe_resource *templ)
307 {
308 unsigned array_mode = 0;
309
310 if (debug_get_bool_option("R600_FORCE_TILING", FALSE)) {
311 if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
312 !(templ->bind & PIPE_BIND_SCANOUT)) {
313 array_mode = V_038000_ARRAY_2D_TILED_THIN1;
314 }
315 }
316
317 return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
318 0, 0, NULL);
319
320 }
321
322 static void r600_texture_destroy(struct pipe_screen *screen,
323 struct pipe_resource *ptex)
324 {
325 struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
326 struct r600_resource *resource = &rtex->resource;
327 struct radeon *radeon = (struct radeon *)screen->winsys;
328
329 if (rtex->flushed_depth_texture)
330 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
331
332 if (resource->bo) {
333 r600_bo_reference(radeon, &resource->bo, NULL);
334 }
335 FREE(rtex);
336 }
337
338 static struct pipe_surface *r600_get_tex_surface(struct pipe_screen *screen,
339 struct pipe_resource *texture,
340 unsigned face, unsigned level,
341 unsigned zslice, unsigned flags)
342 {
343 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
344 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
345 unsigned offset, tile_height;
346
347 if (surface == NULL)
348 return NULL;
349 offset = r600_texture_get_offset(rtex, level, zslice, face);
350 pipe_reference_init(&surface->base.reference, 1);
351 pipe_resource_reference(&surface->base.texture, texture);
352 surface->base.format = texture->format;
353 surface->base.width = mip_minify(texture->width0, level);
354 surface->base.height = mip_minify(texture->height0, level);
355 surface->base.offset = offset;
356 surface->base.usage = flags;
357 surface->base.zslice = zslice;
358 surface->base.texture = texture;
359 surface->base.face = face;
360 surface->base.level = level;
361
362 tile_height = r600_get_height_alignment(screen, rtex->array_mode[level]);
363 surface->aligned_height = align(surface->base.height, tile_height);
364 return &surface->base;
365 }
366
367 static void r600_tex_surface_destroy(struct pipe_surface *surface)
368 {
369 pipe_resource_reference(&surface->texture, NULL);
370 FREE(surface);
371 }
372
373
374 struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
375 const struct pipe_resource *templ,
376 struct winsys_handle *whandle)
377 {
378 struct radeon *rw = (struct radeon*)screen->winsys;
379 struct r600_bo *bo = NULL;
380 unsigned array_mode = 0;
381
382 /* Support only 2D textures without mipmaps */
383 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
384 templ->depth0 != 1 || templ->last_level != 0)
385 return NULL;
386
387 bo = r600_bo_handle(rw, whandle->handle, &array_mode);
388 if (bo == NULL) {
389 return NULL;
390 }
391
392 return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
393 whandle->stride,
394 0,
395 bo);
396 }
397
398 static unsigned int r600_texture_is_referenced(struct pipe_context *context,
399 struct pipe_resource *texture,
400 unsigned face, unsigned level)
401 {
402 /* FIXME */
403 return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
404 }
405
406 int (*r600_blit_uncompress_depth_ptr)(struct pipe_context *ctx, struct r600_resource_texture *texture);
407
408 int r600_texture_depth_flush(struct pipe_context *ctx,
409 struct pipe_resource *texture)
410 {
411 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
412 struct pipe_resource resource;
413
414 if (rtex->flushed_depth_texture)
415 goto out;
416
417 resource.target = PIPE_TEXTURE_2D;
418 resource.format = texture->format;
419 resource.width0 = texture->width0;
420 resource.height0 = texture->height0;
421 resource.depth0 = 1;
422 resource.last_level = 0;
423 resource.nr_samples = 0;
424 resource.usage = PIPE_USAGE_DYNAMIC;
425 resource.bind = 0;
426 resource.flags = R600_RESOURCE_FLAG_TRANSFER;
427
428 resource.bind |= PIPE_BIND_DEPTH_STENCIL;
429
430 rtex->flushed_depth_texture = (struct r600_resource_texture *)ctx->screen->resource_create(ctx->screen, &resource);
431 if (rtex->flushed_depth_texture == NULL) {
432 R600_ERR("failed to create temporary texture to hold untiled copy\n");
433 return -ENOMEM;
434 }
435
436 out:
437 /* XXX: only do this if the depth texture has actually changed:
438 */
439 r600_blit_uncompress_depth_ptr(ctx, rtex);
440 return 0;
441 }
442
443 /* Needs adjustment for pixelformat:
444 */
445 static INLINE unsigned u_box_volume( const struct pipe_box *box )
446 {
447 return box->width * box->depth * box->height;
448 };
449
450
451 struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
452 struct pipe_resource *texture,
453 struct pipe_subresource sr,
454 unsigned usage,
455 const struct pipe_box *box)
456 {
457 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
458 struct pipe_resource resource;
459 struct r600_transfer *trans;
460 int r;
461 boolean use_staging_texture = FALSE;
462 boolean discard = FALSE;
463
464 if (!(usage & PIPE_TRANSFER_READ) && (usage & PIPE_TRANSFER_DISCARD))
465 discard = TRUE;
466
467 /* We cannot map a tiled texture directly because the data is
468 * in a different order, therefore we do detiling using a blit.
469 *
470 * Also, use a temporary in GTT memory for read transfers, as
471 * the CPU is much happier reading out of cached system memory
472 * than uncached VRAM.
473 */
474 if (rtex->tiled)
475 use_staging_texture = TRUE;
476
477 if (usage & PIPE_TRANSFER_READ &&
478 u_box_volume(box) > 1024)
479 use_staging_texture = TRUE;
480
481 /* XXX: Use a staging texture for uploads if the underlying BO
482 * is busy. No interface for checking that currently? so do
483 * it eagerly whenever the transfer doesn't require a readback
484 * and might block.
485 */
486 if ((usage & PIPE_TRANSFER_WRITE) &&
487 discard &&
488 !(usage & (PIPE_TRANSFER_DONTBLOCK | PIPE_TRANSFER_UNSYNCHRONIZED)))
489 use_staging_texture = TRUE;
490
491 trans = CALLOC_STRUCT(r600_transfer);
492 if (trans == NULL)
493 return NULL;
494 pipe_resource_reference(&trans->transfer.resource, texture);
495 trans->transfer.sr = sr;
496 trans->transfer.usage = usage;
497 trans->transfer.box = *box;
498 if (rtex->depth) {
499 /* XXX: only readback the rectangle which is being mapped?
500 */
501 /* XXX: when discard is true, no need to read back from depth texture
502 */
503 r = r600_texture_depth_flush(ctx, texture);
504 if (r < 0) {
505 R600_ERR("failed to create temporary texture to hold untiled copy\n");
506 pipe_resource_reference(&trans->transfer.resource, NULL);
507 FREE(trans);
508 return NULL;
509 }
510 } else if (use_staging_texture) {
511 resource.target = PIPE_TEXTURE_2D;
512 resource.format = texture->format;
513 resource.width0 = box->width;
514 resource.height0 = box->height;
515 resource.depth0 = 1;
516 resource.last_level = 0;
517 resource.nr_samples = 0;
518 resource.usage = PIPE_USAGE_STAGING;
519 resource.bind = 0;
520 resource.flags = R600_RESOURCE_FLAG_TRANSFER;
521 /* For texture reading, the temporary (detiled) texture is used as
522 * a render target when blitting from a tiled texture. */
523 if (usage & PIPE_TRANSFER_READ) {
524 resource.bind |= PIPE_BIND_RENDER_TARGET;
525 }
526 /* For texture writing, the temporary texture is used as a sampler
527 * when blitting into a tiled texture. */
528 if (usage & PIPE_TRANSFER_WRITE) {
529 resource.bind |= PIPE_BIND_SAMPLER_VIEW;
530 }
531 /* Create the temporary texture. */
532 trans->staging_texture = ctx->screen->resource_create(ctx->screen, &resource);
533 if (trans->staging_texture == NULL) {
534 R600_ERR("failed to create temporary texture to hold untiled copy\n");
535 pipe_resource_reference(&trans->transfer.resource, NULL);
536 FREE(trans);
537 return NULL;
538 }
539
540 trans->transfer.stride =
541 ((struct r600_resource_texture *)trans->staging_texture)->pitch_in_bytes[0];
542 if (!discard) {
543 r600_copy_to_staging_texture(ctx, trans);
544 /* Always referenced in the blit. */
545 ctx->flush(ctx, 0, NULL);
546 }
547 return &trans->transfer;
548 }
549 trans->transfer.stride = rtex->pitch_in_bytes[sr.level];
550 trans->offset = r600_texture_get_offset(rtex, sr.level, box->z, sr.face);
551 return &trans->transfer;
552 }
553
554 void r600_texture_transfer_destroy(struct pipe_context *ctx,
555 struct pipe_transfer *transfer)
556 {
557 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
558 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
559
560 if (rtransfer->staging_texture) {
561 if (transfer->usage & PIPE_TRANSFER_WRITE) {
562 r600_copy_from_staging_texture(ctx, rtransfer);
563 }
564 pipe_resource_reference(&rtransfer->staging_texture, NULL);
565 }
566 if (rtex->flushed_depth_texture) {
567 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
568 }
569 pipe_resource_reference(&transfer->resource, NULL);
570 FREE(transfer);
571 }
572
573 void* r600_texture_transfer_map(struct pipe_context *ctx,
574 struct pipe_transfer* transfer)
575 {
576 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
577 struct r600_bo *bo;
578 enum pipe_format format = transfer->resource->format;
579 struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
580 unsigned offset = 0;
581 unsigned usage = 0;
582 char *map;
583
584 if (rtransfer->staging_texture) {
585 bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
586 } else {
587 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
588
589 if (rtex->flushed_depth_texture)
590 bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
591 else
592 bo = ((struct r600_resource *)transfer->resource)->bo;
593
594 offset = rtransfer->offset +
595 transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
596 transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
597 }
598
599 if (transfer->usage & PIPE_TRANSFER_WRITE) {
600 usage |= PB_USAGE_CPU_WRITE;
601
602 if (transfer->usage & PIPE_TRANSFER_DISCARD) {
603 }
604
605 if (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
606 }
607 }
608
609 if (transfer->usage & PIPE_TRANSFER_READ) {
610 usage |= PB_USAGE_CPU_READ;
611 }
612
613 if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) {
614 usage |= PB_USAGE_DONTBLOCK;
615 }
616
617 if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
618 usage |= PB_USAGE_UNSYNCHRONIZED;
619 }
620
621 map = r600_bo_map(radeon, bo, usage, ctx);
622 if (!map) {
623 return NULL;
624 }
625
626 return map + offset;
627 }
628
629 void r600_texture_transfer_unmap(struct pipe_context *ctx,
630 struct pipe_transfer* transfer)
631 {
632 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
633 struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
634 struct r600_bo *bo;
635
636 if (rtransfer->staging_texture) {
637 bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
638 } else {
639 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
640
641 if (rtex->flushed_depth_texture) {
642 bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
643 } else {
644 bo = ((struct r600_resource *)transfer->resource)->bo;
645 }
646 }
647 r600_bo_unmap(radeon, bo);
648 }
649
650 struct u_resource_vtbl r600_texture_vtbl =
651 {
652 u_default_resource_get_handle, /* get_handle */
653 r600_texture_destroy, /* resource_destroy */
654 r600_texture_is_referenced, /* is_resource_referenced */
655 r600_texture_get_transfer, /* get_transfer */
656 r600_texture_transfer_destroy, /* transfer_destroy */
657 r600_texture_transfer_map, /* transfer_map */
658 u_default_transfer_flush_region,/* transfer_flush_region */
659 r600_texture_transfer_unmap, /* transfer_unmap */
660 u_default_transfer_inline_write /* transfer_inline_write */
661 };
662
663 void r600_init_screen_texture_functions(struct pipe_screen *screen)
664 {
665 screen->get_tex_surface = r600_get_tex_surface;
666 screen->tex_surface_destroy = r600_tex_surface_destroy;
667 }
668
669 static unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
670 const unsigned char *swizzle_view)
671 {
672 unsigned i;
673 unsigned char swizzle[4];
674 unsigned result = 0;
675 const uint32_t swizzle_shift[4] = {
676 16, 19, 22, 25,
677 };
678 const uint32_t swizzle_bit[4] = {
679 0, 1, 2, 3,
680 };
681
682 if (swizzle_view) {
683 /* Combine two sets of swizzles. */
684 for (i = 0; i < 4; i++) {
685 swizzle[i] = swizzle_view[i] <= UTIL_FORMAT_SWIZZLE_W ?
686 swizzle_format[swizzle_view[i]] : swizzle_view[i];
687 }
688 } else {
689 memcpy(swizzle, swizzle_format, 4);
690 }
691
692 /* Get swizzle. */
693 for (i = 0; i < 4; i++) {
694 switch (swizzle[i]) {
695 case UTIL_FORMAT_SWIZZLE_Y:
696 result |= swizzle_bit[1] << swizzle_shift[i];
697 break;
698 case UTIL_FORMAT_SWIZZLE_Z:
699 result |= swizzle_bit[2] << swizzle_shift[i];
700 break;
701 case UTIL_FORMAT_SWIZZLE_W:
702 result |= swizzle_bit[3] << swizzle_shift[i];
703 break;
704 case UTIL_FORMAT_SWIZZLE_0:
705 result |= V_038010_SQ_SEL_0 << swizzle_shift[i];
706 break;
707 case UTIL_FORMAT_SWIZZLE_1:
708 result |= V_038010_SQ_SEL_1 << swizzle_shift[i];
709 break;
710 default: /* UTIL_FORMAT_SWIZZLE_X */
711 result |= swizzle_bit[0] << swizzle_shift[i];
712 }
713 }
714 return result;
715 }
716
717 /* texture format translate */
718 uint32_t r600_translate_texformat(enum pipe_format format,
719 const unsigned char *swizzle_view,
720 uint32_t *word4_p, uint32_t *yuv_format_p)
721 {
722 uint32_t result = 0, word4 = 0, yuv_format = 0;
723 const struct util_format_description *desc;
724 boolean uniform = TRUE;
725 int i;
726 const uint32_t sign_bit[4] = {
727 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED),
728 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED),
729 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED),
730 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED)
731 };
732 desc = util_format_description(format);
733
734 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view);
735
736 /* Colorspace (return non-RGB formats directly). */
737 switch (desc->colorspace) {
738 /* Depth stencil formats */
739 case UTIL_FORMAT_COLORSPACE_ZS:
740 switch (format) {
741 case PIPE_FORMAT_Z16_UNORM:
742 result = FMT_16;
743 goto out_word4;
744 case PIPE_FORMAT_X24S8_USCALED:
745 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
746 case PIPE_FORMAT_Z24X8_UNORM:
747 case PIPE_FORMAT_Z24_UNORM_S8_USCALED:
748 result = FMT_8_24;
749 goto out_word4;
750 case PIPE_FORMAT_S8X24_USCALED:
751 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
752 case PIPE_FORMAT_X8Z24_UNORM:
753 case PIPE_FORMAT_S8_USCALED_Z24_UNORM:
754 result = FMT_24_8;
755 goto out_word4;
756 case PIPE_FORMAT_S8_USCALED:
757 result = V_0280A0_COLOR_8;
758 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
759 goto out_word4;
760 default:
761 goto out_unknown;
762 }
763
764 case UTIL_FORMAT_COLORSPACE_YUV:
765 yuv_format |= (1 << 30);
766 switch (format) {
767 case PIPE_FORMAT_UYVY:
768 case PIPE_FORMAT_YUYV:
769 default:
770 break;
771 }
772 goto out_unknown; /* TODO */
773
774 case UTIL_FORMAT_COLORSPACE_SRGB:
775 word4 |= S_038010_FORCE_DEGAMMA(1);
776 if (format == PIPE_FORMAT_L8A8_SRGB || format == PIPE_FORMAT_L8_SRGB)
777 goto out_unknown; /* fails for some reason - TODO */
778 break;
779
780 default:
781 break;
782 }
783
784 /* S3TC formats. TODO */
785 if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
786 goto out_unknown;
787 }
788
789
790 for (i = 0; i < desc->nr_channels; i++) {
791 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
792 word4 |= sign_bit[i];
793 }
794 }
795
796 /* R8G8Bx_SNORM - TODO CxV8U8 */
797
798 /* RGTC - TODO */
799
800 /* See whether the components are of the same size. */
801 for (i = 1; i < desc->nr_channels; i++) {
802 uniform = uniform && desc->channel[0].size == desc->channel[i].size;
803 }
804
805 /* Non-uniform formats. */
806 if (!uniform) {
807 switch(desc->nr_channels) {
808 case 3:
809 if (desc->channel[0].size == 5 &&
810 desc->channel[1].size == 6 &&
811 desc->channel[2].size == 5) {
812 result = FMT_5_6_5;
813 goto out_word4;
814 }
815 goto out_unknown;
816 case 4:
817 if (desc->channel[0].size == 5 &&
818 desc->channel[1].size == 5 &&
819 desc->channel[2].size == 5 &&
820 desc->channel[3].size == 1) {
821 result = FMT_1_5_5_5;
822 goto out_word4;
823 }
824 if (desc->channel[0].size == 10 &&
825 desc->channel[1].size == 10 &&
826 desc->channel[2].size == 10 &&
827 desc->channel[3].size == 2) {
828 result = FMT_10_10_10_2;
829 goto out_word4;
830 }
831 goto out_unknown;
832 }
833 goto out_unknown;
834 }
835
836 /* Find the first non-VOID channel. */
837 for (i = 0; i < 4; i++) {
838 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
839 break;
840 }
841 }
842
843 if (i == 4)
844 goto out_unknown;
845
846 /* uniform formats */
847 switch (desc->channel[i].type) {
848 case UTIL_FORMAT_TYPE_UNSIGNED:
849 case UTIL_FORMAT_TYPE_SIGNED:
850 if (!desc->channel[i].normalized &&
851 desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
852 goto out_unknown;
853 }
854
855 switch (desc->channel[i].size) {
856 case 4:
857 switch (desc->nr_channels) {
858 case 2:
859 result = FMT_4_4;
860 goto out_word4;
861 case 4:
862 result = FMT_4_4_4_4;
863 goto out_word4;
864 }
865 goto out_unknown;
866 case 8:
867 switch (desc->nr_channels) {
868 case 1:
869 result = FMT_8;
870 goto out_word4;
871 case 2:
872 result = FMT_8_8;
873 goto out_word4;
874 case 4:
875 result = FMT_8_8_8_8;
876 goto out_word4;
877 }
878 goto out_unknown;
879 case 16:
880 switch (desc->nr_channels) {
881 case 1:
882 result = FMT_16;
883 goto out_word4;
884 case 2:
885 result = FMT_16_16;
886 goto out_word4;
887 case 4:
888 result = FMT_16_16_16_16;
889 goto out_word4;
890 }
891 }
892 goto out_unknown;
893
894 case UTIL_FORMAT_TYPE_FLOAT:
895 switch (desc->channel[i].size) {
896 case 16:
897 switch (desc->nr_channels) {
898 case 1:
899 result = FMT_16_FLOAT;
900 goto out_word4;
901 case 2:
902 result = FMT_16_16_FLOAT;
903 goto out_word4;
904 case 4:
905 result = FMT_16_16_16_16_FLOAT;
906 goto out_word4;
907 }
908 goto out_unknown;
909 case 32:
910 switch (desc->nr_channels) {
911 case 1:
912 result = FMT_32_FLOAT;
913 goto out_word4;
914 case 2:
915 result = FMT_32_32_FLOAT;
916 goto out_word4;
917 case 4:
918 result = FMT_32_32_32_32_FLOAT;
919 goto out_word4;
920 }
921 }
922
923 }
924 out_word4:
925 if (word4_p)
926 *word4_p = word4;
927 if (yuv_format_p)
928 *yuv_format_p = yuv_format;
929 return result;
930 out_unknown:
931 // R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format));
932 return ~0;
933 }