vdpau: add vdpau-r600 target
[mesa.git] / src / gallium / drivers / r600 / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include <errno.h>
28 #include <pipe/p_screen.h>
29 #include <util/u_format.h>
30 #include <util/u_format_s3tc.h>
31 #include <util/u_math.h>
32 #include <util/u_inlines.h>
33 #include <util/u_memory.h>
34 #include "state_tracker/drm_driver.h"
35 #include "pipebuffer/pb_buffer.h"
36 #include "r600_pipe.h"
37 #include "r600_resource.h"
38 #include "r600_state_inlines.h"
39 #include "r600d.h"
40 #include "r600_formats.h"
41
42 /* Copy from a full GPU texture to a transfer's staging one. */
43 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
44 {
45 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
46 struct pipe_resource *texture = transfer->resource;
47
48 ctx->resource_copy_region(ctx, rtransfer->staging_texture,
49 0, 0, 0, 0, texture, transfer->level,
50 &transfer->box);
51 }
52
53
54 /* Copy from a transfer's staging texture to a full GPU one. */
55 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
56 {
57 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
58 struct pipe_resource *texture = transfer->resource;
59 struct pipe_box sbox;
60
61 sbox.x = sbox.y = sbox.z = 0;
62 sbox.width = transfer->box.width;
63 sbox.height = transfer->box.height;
64 /* XXX that might be wrong */
65 sbox.depth = 1;
66 ctx->resource_copy_region(ctx, texture, transfer->level,
67 transfer->box.x, transfer->box.y, transfer->box.z,
68 rtransfer->staging_texture,
69 0, &sbox);
70
71 ctx->flush(ctx, 0, NULL);
72 }
73
74 unsigned r600_texture_get_offset(struct r600_resource_texture *rtex,
75 unsigned level, unsigned layer)
76 {
77 unsigned offset = rtex->offset[level];
78
79 switch (rtex->resource.b.b.b.target) {
80 case PIPE_TEXTURE_3D:
81 case PIPE_TEXTURE_CUBE:
82 default:
83 return offset + layer * rtex->layer_size[level];
84 }
85 }
86
87 static unsigned r600_get_block_alignment(struct pipe_screen *screen,
88 enum pipe_format format,
89 unsigned array_mode)
90 {
91 struct r600_screen* rscreen = (struct r600_screen *)screen;
92 unsigned pixsize = util_format_get_blocksize(format);
93 int p_align;
94
95 switch(array_mode) {
96 case V_038000_ARRAY_1D_TILED_THIN1:
97 p_align = MAX2(8,
98 ((rscreen->tiling_info->group_bytes / 8 / pixsize)));
99 break;
100 case V_038000_ARRAY_2D_TILED_THIN1:
101 p_align = MAX2(rscreen->tiling_info->num_banks,
102 (((rscreen->tiling_info->group_bytes / 8 / pixsize)) *
103 rscreen->tiling_info->num_banks)) * 8;
104 break;
105 case V_038000_ARRAY_LINEAR_ALIGNED:
106 p_align = MAX2(64, rscreen->tiling_info->group_bytes / pixsize);
107 break;
108 case V_038000_ARRAY_LINEAR_GENERAL:
109 default:
110 p_align = rscreen->tiling_info->group_bytes / pixsize;
111 break;
112 }
113 return p_align;
114 }
115
116 static unsigned r600_get_height_alignment(struct pipe_screen *screen,
117 unsigned array_mode)
118 {
119 struct r600_screen* rscreen = (struct r600_screen *)screen;
120 int h_align;
121
122 switch (array_mode) {
123 case V_038000_ARRAY_2D_TILED_THIN1:
124 h_align = rscreen->tiling_info->num_channels * 8;
125 break;
126 case V_038000_ARRAY_1D_TILED_THIN1:
127 case V_038000_ARRAY_LINEAR_ALIGNED:
128 h_align = 8;
129 break;
130 case V_038000_ARRAY_LINEAR_GENERAL:
131 default:
132 h_align = 1;
133 break;
134 }
135 return h_align;
136 }
137
138 static unsigned r600_get_base_alignment(struct pipe_screen *screen,
139 enum pipe_format format,
140 unsigned array_mode)
141 {
142 struct r600_screen* rscreen = (struct r600_screen *)screen;
143 unsigned pixsize = util_format_get_blocksize(format);
144 int p_align = r600_get_block_alignment(screen, format, array_mode);
145 int h_align = r600_get_height_alignment(screen, array_mode);
146 int b_align;
147
148 switch (array_mode) {
149 case V_038000_ARRAY_2D_TILED_THIN1:
150 b_align = MAX2(rscreen->tiling_info->num_banks * rscreen->tiling_info->num_channels * 8 * 8 * pixsize,
151 p_align * pixsize * h_align);
152 break;
153 case V_038000_ARRAY_1D_TILED_THIN1:
154 case V_038000_ARRAY_LINEAR_ALIGNED:
155 case V_038000_ARRAY_LINEAR_GENERAL:
156 default:
157 b_align = rscreen->tiling_info->group_bytes;
158 break;
159 }
160 return b_align;
161 }
162
163 static unsigned mip_minify(unsigned size, unsigned level)
164 {
165 unsigned val;
166 val = u_minify(size, level);
167 if (level > 0)
168 val = util_next_power_of_two(val);
169 return val;
170 }
171
172 static unsigned r600_texture_get_nblocksx(struct pipe_screen *screen,
173 struct r600_resource_texture *rtex,
174 unsigned level)
175 {
176 struct pipe_resource *ptex = &rtex->resource.b.b.b;
177 unsigned nblocksx, block_align, width;
178 unsigned blocksize = util_format_get_blocksize(ptex->format);
179
180 if (rtex->pitch_override)
181 return rtex->pitch_override / blocksize;
182
183 width = mip_minify(ptex->width0, level);
184 nblocksx = util_format_get_nblocksx(ptex->format, width);
185
186 block_align = r600_get_block_alignment(screen, ptex->format,
187 rtex->array_mode[level]);
188 nblocksx = align(nblocksx, block_align);
189 return nblocksx;
190 }
191
192 static unsigned r600_texture_get_nblocksy(struct pipe_screen *screen,
193 struct r600_resource_texture *rtex,
194 unsigned level)
195 {
196 struct pipe_resource *ptex = &rtex->resource.b.b.b;
197 unsigned height, tile_height;
198
199 height = mip_minify(ptex->height0, level);
200 height = util_format_get_nblocksy(ptex->format, height);
201 tile_height = r600_get_height_alignment(screen,
202 rtex->array_mode[level]);
203 height = align(height, tile_height);
204 return height;
205 }
206
207 static void r600_texture_set_array_mode(struct pipe_screen *screen,
208 struct r600_resource_texture *rtex,
209 unsigned level, unsigned array_mode)
210 {
211 struct pipe_resource *ptex = &rtex->resource.b.b.b;
212
213 switch (array_mode) {
214 case V_0280A0_ARRAY_LINEAR_GENERAL:
215 case V_0280A0_ARRAY_LINEAR_ALIGNED:
216 case V_0280A0_ARRAY_1D_TILED_THIN1:
217 default:
218 rtex->array_mode[level] = array_mode;
219 break;
220 case V_0280A0_ARRAY_2D_TILED_THIN1:
221 {
222 unsigned w, h, tile_height, tile_width;
223
224 tile_height = r600_get_height_alignment(screen, array_mode);
225 tile_width = r600_get_block_alignment(screen, ptex->format, array_mode);
226
227 w = mip_minify(ptex->width0, level);
228 h = mip_minify(ptex->height0, level);
229 if (w <= tile_width || h <= tile_height)
230 rtex->array_mode[level] = V_0280A0_ARRAY_1D_TILED_THIN1;
231 else
232 rtex->array_mode[level] = array_mode;
233 }
234 break;
235 }
236 }
237
238 static void r600_setup_miptree(struct pipe_screen *screen,
239 struct r600_resource_texture *rtex,
240 unsigned array_mode)
241 {
242 struct pipe_resource *ptex = &rtex->resource.b.b.b;
243 struct radeon *radeon = (struct radeon *)screen->winsys;
244 enum chip_class chipc = r600_get_family_class(radeon);
245 unsigned size, layer_size, i, offset;
246 unsigned nblocksx, nblocksy;
247
248 for (i = 0, offset = 0; i <= ptex->last_level; i++) {
249 unsigned blocksize = util_format_get_blocksize(ptex->format);
250
251 r600_texture_set_array_mode(screen, rtex, i, array_mode);
252
253 nblocksx = r600_texture_get_nblocksx(screen, rtex, i);
254 nblocksy = r600_texture_get_nblocksy(screen, rtex, i);
255
256 layer_size = nblocksx * nblocksy * blocksize;
257 if (ptex->target == PIPE_TEXTURE_CUBE) {
258 if (chipc >= R700)
259 size = layer_size * 8;
260 else
261 size = layer_size * 6;
262 }
263 else if (ptex->target == PIPE_TEXTURE_3D)
264 size = layer_size * u_minify(ptex->depth0, i);
265 else
266 size = layer_size * ptex->array_size;
267
268 /* align base image and start of miptree */
269 if ((i == 0) || (i == 1))
270 offset = align(offset, r600_get_base_alignment(screen, ptex->format, array_mode));
271 rtex->offset[i] = offset;
272 rtex->layer_size[i] = layer_size;
273 rtex->pitch_in_blocks[i] = nblocksx; /* CB talks in elements */
274 rtex->pitch_in_bytes[i] = nblocksx * blocksize;
275
276 offset += size;
277 }
278 rtex->size = offset;
279 }
280
281 /* Figure out whether u_blitter will fallback to a transfer operation.
282 * If so, don't use a staging resource.
283 */
284 static boolean permit_hardware_blit(struct pipe_screen *screen,
285 const struct pipe_resource *res)
286 {
287 unsigned bind;
288
289 if (util_format_is_depth_or_stencil(res->format))
290 bind = PIPE_BIND_DEPTH_STENCIL;
291 else
292 bind = PIPE_BIND_RENDER_TARGET;
293
294 /* hackaround for S3TC */
295 if (util_format_is_compressed(res->format))
296 return TRUE;
297
298 if (!screen->is_format_supported(screen,
299 res->format,
300 res->target,
301 res->nr_samples,
302 bind, 0))
303 return FALSE;
304
305 if (!screen->is_format_supported(screen,
306 res->format,
307 res->target,
308 res->nr_samples,
309 PIPE_BIND_SAMPLER_VIEW, 0))
310 return FALSE;
311
312 switch (res->usage) {
313 case PIPE_USAGE_STREAM:
314 case PIPE_USAGE_STAGING:
315 case PIPE_USAGE_STATIC:
316 case PIPE_USAGE_IMMUTABLE:
317 return FALSE;
318
319 default:
320 return TRUE;
321 }
322 }
323
324 static boolean r600_texture_get_handle(struct pipe_screen* screen,
325 struct pipe_resource *ptex,
326 struct winsys_handle *whandle)
327 {
328 struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
329 struct r600_resource *resource = &rtex->resource;
330 struct radeon *radeon = (struct radeon *)screen->winsys;
331
332 return r600_bo_get_winsys_handle(radeon, resource->bo,
333 rtex->pitch_in_bytes[0], whandle);
334 }
335
336 static void r600_texture_destroy(struct pipe_screen *screen,
337 struct pipe_resource *ptex)
338 {
339 struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
340 struct r600_resource *resource = &rtex->resource;
341 struct radeon *radeon = (struct radeon *)screen->winsys;
342
343 if (rtex->flushed_depth_texture)
344 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
345
346 if (resource->bo) {
347 r600_bo_reference(radeon, &resource->bo, NULL);
348 }
349 FREE(rtex);
350 }
351
352 static unsigned int r600_texture_is_referenced(struct pipe_context *context,
353 struct pipe_resource *texture,
354 unsigned level, int layer)
355 {
356 /* FIXME */
357 return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
358 }
359
360 static const struct u_resource_vtbl r600_texture_vtbl =
361 {
362 r600_texture_get_handle, /* get_handle */
363 r600_texture_destroy, /* resource_destroy */
364 r600_texture_is_referenced, /* is_resource_referenced */
365 r600_texture_get_transfer, /* get_transfer */
366 r600_texture_transfer_destroy, /* transfer_destroy */
367 r600_texture_transfer_map, /* transfer_map */
368 u_default_transfer_flush_region,/* transfer_flush_region */
369 r600_texture_transfer_unmap, /* transfer_unmap */
370 u_default_transfer_inline_write /* transfer_inline_write */
371 };
372
373 static struct r600_resource_texture *
374 r600_texture_create_object(struct pipe_screen *screen,
375 const struct pipe_resource *base,
376 unsigned array_mode,
377 unsigned pitch_in_bytes_override,
378 unsigned max_buffer_size,
379 struct r600_bo *bo)
380 {
381 struct r600_resource_texture *rtex;
382 struct r600_resource *resource;
383 struct radeon *radeon = (struct radeon *)screen->winsys;
384
385 rtex = CALLOC_STRUCT(r600_resource_texture);
386 if (rtex == NULL)
387 return NULL;
388
389 resource = &rtex->resource;
390 resource->b.b.b = *base;
391 resource->b.b.vtbl = &r600_texture_vtbl;
392 pipe_reference_init(&resource->b.b.b.reference, 1);
393 resource->b.b.b.screen = screen;
394 resource->bo = bo;
395 rtex->pitch_override = pitch_in_bytes_override;
396 /* only mark depth textures the HW can hit as depth textures */
397 if (util_format_is_depth_or_stencil(base->format) && permit_hardware_blit(screen, base))
398 rtex->depth = 1;
399
400 r600_setup_miptree(screen, rtex, array_mode);
401
402 resource->size = rtex->size;
403
404 if (!resource->bo) {
405 struct pipe_resource *ptex = &rtex->resource.b.b.b;
406 int base_align = r600_get_base_alignment(screen, ptex->format, array_mode);
407
408 resource->bo = r600_bo(radeon, rtex->size, base_align, base->bind, base->usage);
409 if (!resource->bo) {
410 FREE(rtex);
411 return NULL;
412 }
413 }
414 return rtex;
415 }
416
417 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
418 const struct pipe_resource *templ)
419 {
420 unsigned array_mode = 0;
421 static int force_tiling = -1;
422
423 /* Would like some magic "get_bool_option_once" routine.
424 */
425 if (force_tiling == -1) {
426 struct r600_screen *rscreen = (struct r600_screen *)screen;
427 /* reenable when 2D tiling is fixed better */
428 /*if (r600_get_minor_version(rscreen->radeon) >= 9)
429 force_tiling = debug_get_bool_option("R600_TILING", TRUE);*/
430 force_tiling = debug_get_bool_option("R600_TILING", FALSE);
431 }
432
433 if (force_tiling && permit_hardware_blit(screen, templ)) {
434 if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
435 !(templ->bind & PIPE_BIND_SCANOUT)) {
436 array_mode = V_038000_ARRAY_2D_TILED_THIN1;
437 }
438 }
439
440 if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) &&
441 util_format_is_compressed(templ->format))
442 array_mode = V_038000_ARRAY_1D_TILED_THIN1;
443
444 return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
445 0, 0, NULL);
446
447 }
448
449 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
450 struct pipe_resource *texture,
451 const struct pipe_surface *surf_tmpl)
452 {
453 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
454 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
455 unsigned level = surf_tmpl->u.tex.level;
456
457 assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer);
458 if (surface == NULL)
459 return NULL;
460 /* XXX no offset */
461 /* offset = r600_texture_get_offset(rtex, level, surf_tmpl->u.tex.first_layer);*/
462 pipe_reference_init(&surface->base.reference, 1);
463 pipe_resource_reference(&surface->base.texture, texture);
464 surface->base.context = pipe;
465 surface->base.format = surf_tmpl->format;
466 surface->base.width = mip_minify(texture->width0, level);
467 surface->base.height = mip_minify(texture->height0, level);
468 surface->base.usage = surf_tmpl->usage;
469 surface->base.texture = texture;
470 surface->base.u.tex.first_layer = surf_tmpl->u.tex.first_layer;
471 surface->base.u.tex.last_layer = surf_tmpl->u.tex.last_layer;
472 surface->base.u.tex.level = level;
473
474 surface->aligned_height = r600_texture_get_nblocksy(pipe->screen,
475 rtex, level);
476 return &surface->base;
477 }
478
479 static void r600_surface_destroy(struct pipe_context *pipe,
480 struct pipe_surface *surface)
481 {
482 pipe_resource_reference(&surface->texture, NULL);
483 FREE(surface);
484 }
485
486
487 struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
488 const struct pipe_resource *templ,
489 struct winsys_handle *whandle)
490 {
491 struct radeon *rw = (struct radeon*)screen->winsys;
492 struct r600_bo *bo = NULL;
493 unsigned array_mode = 0;
494
495 /* Support only 2D textures without mipmaps */
496 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
497 templ->depth0 != 1 || templ->last_level != 0)
498 return NULL;
499
500 bo = r600_bo_handle(rw, whandle->handle, &array_mode);
501 if (bo == NULL) {
502 return NULL;
503 }
504
505 return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode,
506 whandle->stride,
507 0,
508 bo);
509 }
510
511 int r600_texture_depth_flush(struct pipe_context *ctx,
512 struct pipe_resource *texture, boolean just_create)
513 {
514 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
515 struct pipe_resource resource;
516
517 if (rtex->flushed_depth_texture)
518 goto out;
519
520 resource.target = PIPE_TEXTURE_2D;
521 resource.format = texture->format;
522 resource.width0 = texture->width0;
523 resource.height0 = texture->height0;
524 resource.depth0 = 1;
525 resource.array_size = 1;
526 resource.last_level = texture->last_level;
527 resource.nr_samples = 0;
528 resource.usage = PIPE_USAGE_DYNAMIC;
529 resource.bind = 0;
530 resource.flags = R600_RESOURCE_FLAG_TRANSFER;
531
532 resource.bind |= PIPE_BIND_DEPTH_STENCIL;
533
534 rtex->flushed_depth_texture = (struct r600_resource_texture *)ctx->screen->resource_create(ctx->screen, &resource);
535 if (rtex->flushed_depth_texture == NULL) {
536 R600_ERR("failed to create temporary texture to hold untiled copy\n");
537 return -ENOMEM;
538 }
539
540 ((struct r600_resource_texture *)rtex->flushed_depth_texture)->is_flushing_texture = TRUE;
541 out:
542 if (just_create)
543 return 0;
544
545 /* XXX: only do this if the depth texture has actually changed:
546 */
547 r600_blit_uncompress_depth(ctx, rtex);
548 return 0;
549 }
550
551 /* Needs adjustment for pixelformat:
552 */
553 static INLINE unsigned u_box_volume( const struct pipe_box *box )
554 {
555 return box->width * box->depth * box->height;
556 };
557
558 struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
559 struct pipe_resource *texture,
560 unsigned level,
561 unsigned usage,
562 const struct pipe_box *box)
563 {
564 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
565 struct pipe_resource resource;
566 struct r600_transfer *trans;
567 int r;
568 boolean use_staging_texture = FALSE;
569
570 /* We cannot map a tiled texture directly because the data is
571 * in a different order, therefore we do detiling using a blit.
572 *
573 * Also, use a temporary in GTT memory for read transfers, as
574 * the CPU is much happier reading out of cached system memory
575 * than uncached VRAM.
576 */
577 if (R600_TEX_IS_TILED(rtex, level))
578 use_staging_texture = TRUE;
579
580 if ((usage & PIPE_TRANSFER_READ) && u_box_volume(box) > 1024)
581 use_staging_texture = TRUE;
582
583 /* XXX: Use a staging texture for uploads if the underlying BO
584 * is busy. No interface for checking that currently? so do
585 * it eagerly whenever the transfer doesn't require a readback
586 * and might block.
587 */
588 if ((usage & PIPE_TRANSFER_WRITE) &&
589 !(usage & (PIPE_TRANSFER_READ |
590 PIPE_TRANSFER_DONTBLOCK |
591 PIPE_TRANSFER_UNSYNCHRONIZED)))
592 use_staging_texture = TRUE;
593
594 if (!permit_hardware_blit(ctx->screen, texture) ||
595 (texture->flags & R600_RESOURCE_FLAG_TRANSFER))
596 use_staging_texture = FALSE;
597
598 trans = CALLOC_STRUCT(r600_transfer);
599 if (trans == NULL)
600 return NULL;
601 pipe_resource_reference(&trans->transfer.resource, texture);
602 trans->transfer.level = level;
603 trans->transfer.usage = usage;
604 trans->transfer.box = *box;
605 if (rtex->depth) {
606 /* XXX: only readback the rectangle which is being mapped?
607 */
608 /* XXX: when discard is true, no need to read back from depth texture
609 */
610 r = r600_texture_depth_flush(ctx, texture, FALSE);
611 if (r < 0) {
612 R600_ERR("failed to create temporary texture to hold untiled copy\n");
613 pipe_resource_reference(&trans->transfer.resource, NULL);
614 FREE(trans);
615 return NULL;
616 }
617 trans->transfer.stride = rtex->flushed_depth_texture->pitch_in_bytes[level];
618 trans->offset = r600_texture_get_offset(rtex->flushed_depth_texture, level, box->z);
619 return &trans->transfer;
620 } else if (use_staging_texture) {
621 resource.target = PIPE_TEXTURE_2D;
622 resource.format = texture->format;
623 resource.width0 = box->width;
624 resource.height0 = box->height;
625 resource.depth0 = 1;
626 resource.array_size = 1;
627 resource.last_level = 0;
628 resource.nr_samples = 0;
629 resource.usage = PIPE_USAGE_STAGING;
630 resource.bind = 0;
631 resource.flags = R600_RESOURCE_FLAG_TRANSFER;
632 /* For texture reading, the temporary (detiled) texture is used as
633 * a render target when blitting from a tiled texture. */
634 if (usage & PIPE_TRANSFER_READ) {
635 resource.bind |= PIPE_BIND_RENDER_TARGET;
636 }
637 /* For texture writing, the temporary texture is used as a sampler
638 * when blitting into a tiled texture. */
639 if (usage & PIPE_TRANSFER_WRITE) {
640 resource.bind |= PIPE_BIND_SAMPLER_VIEW;
641 }
642 /* Create the temporary texture. */
643 trans->staging_texture = ctx->screen->resource_create(ctx->screen, &resource);
644 if (trans->staging_texture == NULL) {
645 R600_ERR("failed to create temporary texture to hold untiled copy\n");
646 pipe_resource_reference(&trans->transfer.resource, NULL);
647 FREE(trans);
648 return NULL;
649 }
650
651 trans->transfer.stride =
652 ((struct r600_resource_texture *)trans->staging_texture)->pitch_in_bytes[0];
653 if (usage & PIPE_TRANSFER_READ) {
654 r600_copy_to_staging_texture(ctx, trans);
655 /* Always referenced in the blit. */
656 ctx->flush(ctx, 0, NULL);
657 }
658 return &trans->transfer;
659 }
660 trans->transfer.stride = rtex->pitch_in_bytes[level];
661 trans->transfer.layer_stride = rtex->layer_size[level];
662 trans->offset = r600_texture_get_offset(rtex, level, box->z);
663 return &trans->transfer;
664 }
665
666 void r600_texture_transfer_destroy(struct pipe_context *ctx,
667 struct pipe_transfer *transfer)
668 {
669 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
670 struct pipe_resource *texture = transfer->resource;
671 struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture;
672
673 if (rtransfer->staging_texture) {
674 if (transfer->usage & PIPE_TRANSFER_WRITE) {
675 r600_copy_from_staging_texture(ctx, rtransfer);
676 }
677 pipe_resource_reference(&rtransfer->staging_texture, NULL);
678 }
679
680 if (rtex->depth && !rtex->is_flushing_texture) {
681 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtex->flushed_depth_texture)
682 r600_blit_push_depth(ctx, rtex);
683 }
684
685 pipe_resource_reference(&transfer->resource, NULL);
686 FREE(transfer);
687 }
688
689 void* r600_texture_transfer_map(struct pipe_context *ctx,
690 struct pipe_transfer* transfer)
691 {
692 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
693 struct r600_bo *bo;
694 enum pipe_format format = transfer->resource->format;
695 struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
696 unsigned offset = 0;
697 unsigned usage = 0;
698 char *map;
699
700 if (rtransfer->staging_texture) {
701 bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
702 } else {
703 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
704
705 if (rtex->flushed_depth_texture)
706 bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
707 else
708 bo = ((struct r600_resource *)transfer->resource)->bo;
709
710 offset = rtransfer->offset +
711 transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
712 transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
713 }
714
715 if (transfer->usage & PIPE_TRANSFER_WRITE) {
716 usage |= PB_USAGE_CPU_WRITE;
717
718 if (transfer->usage & PIPE_TRANSFER_DISCARD) {
719 }
720
721 if (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
722 }
723 }
724
725 if (transfer->usage & PIPE_TRANSFER_READ) {
726 usage |= PB_USAGE_CPU_READ;
727 }
728
729 if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) {
730 usage |= PB_USAGE_DONTBLOCK;
731 }
732
733 if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
734 usage |= PB_USAGE_UNSYNCHRONIZED;
735 }
736
737 map = r600_bo_map(radeon, bo, usage, ctx);
738 if (!map) {
739 return NULL;
740 }
741
742 return map + offset;
743 }
744
745 void r600_texture_transfer_unmap(struct pipe_context *ctx,
746 struct pipe_transfer* transfer)
747 {
748 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
749 struct radeon *radeon = (struct radeon *)ctx->screen->winsys;
750 struct r600_bo *bo;
751
752 if (rtransfer->staging_texture) {
753 bo = ((struct r600_resource *)rtransfer->staging_texture)->bo;
754 } else {
755 struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
756
757 if (rtex->flushed_depth_texture) {
758 bo = ((struct r600_resource *)rtex->flushed_depth_texture)->bo;
759 } else {
760 bo = ((struct r600_resource *)transfer->resource)->bo;
761 }
762 }
763 r600_bo_unmap(radeon, bo);
764 }
765
766 void r600_init_surface_functions(struct r600_pipe_context *r600)
767 {
768 r600->context.create_surface = r600_create_surface;
769 r600->context.surface_destroy = r600_surface_destroy;
770 }
771
772 static unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
773 const unsigned char *swizzle_view)
774 {
775 unsigned i;
776 unsigned char swizzle[4];
777 unsigned result = 0;
778 const uint32_t swizzle_shift[4] = {
779 16, 19, 22, 25,
780 };
781 const uint32_t swizzle_bit[4] = {
782 0, 1, 2, 3,
783 };
784
785 if (swizzle_view) {
786 /* Combine two sets of swizzles. */
787 for (i = 0; i < 4; i++) {
788 swizzle[i] = swizzle_view[i] <= UTIL_FORMAT_SWIZZLE_W ?
789 swizzle_format[swizzle_view[i]] : swizzle_view[i];
790 }
791 } else {
792 memcpy(swizzle, swizzle_format, 4);
793 }
794
795 /* Get swizzle. */
796 for (i = 0; i < 4; i++) {
797 switch (swizzle[i]) {
798 case UTIL_FORMAT_SWIZZLE_Y:
799 result |= swizzle_bit[1] << swizzle_shift[i];
800 break;
801 case UTIL_FORMAT_SWIZZLE_Z:
802 result |= swizzle_bit[2] << swizzle_shift[i];
803 break;
804 case UTIL_FORMAT_SWIZZLE_W:
805 result |= swizzle_bit[3] << swizzle_shift[i];
806 break;
807 case UTIL_FORMAT_SWIZZLE_0:
808 result |= V_038010_SQ_SEL_0 << swizzle_shift[i];
809 break;
810 case UTIL_FORMAT_SWIZZLE_1:
811 result |= V_038010_SQ_SEL_1 << swizzle_shift[i];
812 break;
813 default: /* UTIL_FORMAT_SWIZZLE_X */
814 result |= swizzle_bit[0] << swizzle_shift[i];
815 }
816 }
817 return result;
818 }
819
820 /* texture format translate */
821 uint32_t r600_translate_texformat(struct pipe_screen *screen,
822 enum pipe_format format,
823 const unsigned char *swizzle_view,
824 uint32_t *word4_p, uint32_t *yuv_format_p)
825 {
826 uint32_t result = 0, word4 = 0, yuv_format = 0;
827 const struct util_format_description *desc;
828 boolean uniform = TRUE;
829 static int r600_enable_s3tc = -1;
830
831 int i;
832 const uint32_t sign_bit[4] = {
833 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED),
834 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED),
835 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED),
836 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED)
837 };
838 desc = util_format_description(format);
839
840 word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view);
841
842 /* Colorspace (return non-RGB formats directly). */
843 switch (desc->colorspace) {
844 /* Depth stencil formats */
845 case UTIL_FORMAT_COLORSPACE_ZS:
846 switch (format) {
847 case PIPE_FORMAT_Z16_UNORM:
848 result = FMT_16;
849 goto out_word4;
850 case PIPE_FORMAT_X24S8_USCALED:
851 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
852 case PIPE_FORMAT_Z24X8_UNORM:
853 case PIPE_FORMAT_Z24_UNORM_S8_USCALED:
854 result = FMT_8_24;
855 goto out_word4;
856 case PIPE_FORMAT_S8X24_USCALED:
857 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
858 case PIPE_FORMAT_X8Z24_UNORM:
859 case PIPE_FORMAT_S8_USCALED_Z24_UNORM:
860 result = FMT_24_8;
861 goto out_word4;
862 case PIPE_FORMAT_S8_USCALED:
863 result = FMT_8;
864 word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
865 goto out_word4;
866 default:
867 goto out_unknown;
868 }
869
870 case UTIL_FORMAT_COLORSPACE_YUV:
871 yuv_format |= (1 << 30);
872 switch (format) {
873 case PIPE_FORMAT_UYVY:
874 case PIPE_FORMAT_YUYV:
875 default:
876 break;
877 }
878 goto out_unknown; /* TODO */
879
880 case UTIL_FORMAT_COLORSPACE_SRGB:
881 word4 |= S_038010_FORCE_DEGAMMA(1);
882 break;
883
884 default:
885 break;
886 }
887
888 if (r600_enable_s3tc == -1) {
889 struct r600_screen *rscreen = (struct r600_screen *)screen;
890 if (r600_get_minor_version(rscreen->radeon) >= 9)
891 r600_enable_s3tc = 1;
892 else
893 r600_enable_s3tc = debug_get_bool_option("R600_ENABLE_S3TC", FALSE);
894 }
895
896 if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
897 if (!r600_enable_s3tc)
898 goto out_unknown;
899
900 switch (format) {
901 case PIPE_FORMAT_RGTC1_SNORM:
902 word4 |= sign_bit[0];
903 case PIPE_FORMAT_RGTC1_UNORM:
904 result = FMT_BC4;
905 goto out_word4;
906 case PIPE_FORMAT_RGTC2_SNORM:
907 word4 |= sign_bit[0] | sign_bit[1];
908 case PIPE_FORMAT_RGTC2_UNORM:
909 result = FMT_BC5;
910 goto out_word4;
911 default:
912 goto out_unknown;
913 }
914 }
915
916 if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
917
918 if (!r600_enable_s3tc)
919 goto out_unknown;
920
921 if (!util_format_s3tc_enabled) {
922 goto out_unknown;
923 }
924
925 switch (format) {
926 case PIPE_FORMAT_DXT1_RGB:
927 case PIPE_FORMAT_DXT1_RGBA:
928 case PIPE_FORMAT_DXT1_SRGB:
929 case PIPE_FORMAT_DXT1_SRGBA:
930 result = FMT_BC1;
931 goto out_word4;
932 case PIPE_FORMAT_DXT3_RGBA:
933 case PIPE_FORMAT_DXT3_SRGBA:
934 result = FMT_BC2;
935 goto out_word4;
936 case PIPE_FORMAT_DXT5_RGBA:
937 case PIPE_FORMAT_DXT5_SRGBA:
938 result = FMT_BC3;
939 goto out_word4;
940 default:
941 goto out_unknown;
942 }
943 }
944
945
946 for (i = 0; i < desc->nr_channels; i++) {
947 if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
948 word4 |= sign_bit[i];
949 }
950 }
951
952 /* R8G8Bx_SNORM - TODO CxV8U8 */
953
954 /* See whether the components are of the same size. */
955 for (i = 1; i < desc->nr_channels; i++) {
956 uniform = uniform && desc->channel[0].size == desc->channel[i].size;
957 }
958
959 /* Non-uniform formats. */
960 if (!uniform) {
961 switch(desc->nr_channels) {
962 case 3:
963 if (desc->channel[0].size == 5 &&
964 desc->channel[1].size == 6 &&
965 desc->channel[2].size == 5) {
966 result = FMT_5_6_5;
967 goto out_word4;
968 }
969 goto out_unknown;
970 case 4:
971 if (desc->channel[0].size == 5 &&
972 desc->channel[1].size == 5 &&
973 desc->channel[2].size == 5 &&
974 desc->channel[3].size == 1) {
975 result = FMT_1_5_5_5;
976 goto out_word4;
977 }
978 if (desc->channel[0].size == 10 &&
979 desc->channel[1].size == 10 &&
980 desc->channel[2].size == 10 &&
981 desc->channel[3].size == 2) {
982 result = FMT_2_10_10_10;
983 goto out_word4;
984 }
985 goto out_unknown;
986 }
987 goto out_unknown;
988 }
989
990 /* Find the first non-VOID channel. */
991 for (i = 0; i < 4; i++) {
992 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
993 break;
994 }
995 }
996
997 if (i == 4)
998 goto out_unknown;
999
1000 /* uniform formats */
1001 switch (desc->channel[i].type) {
1002 case UTIL_FORMAT_TYPE_UNSIGNED:
1003 case UTIL_FORMAT_TYPE_SIGNED:
1004 if (!desc->channel[i].normalized &&
1005 desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
1006 goto out_unknown;
1007 }
1008
1009 switch (desc->channel[i].size) {
1010 case 4:
1011 switch (desc->nr_channels) {
1012 case 2:
1013 result = FMT_4_4;
1014 goto out_word4;
1015 case 4:
1016 result = FMT_4_4_4_4;
1017 goto out_word4;
1018 }
1019 goto out_unknown;
1020 case 8:
1021 switch (desc->nr_channels) {
1022 case 1:
1023 result = FMT_8;
1024 goto out_word4;
1025 case 2:
1026 result = FMT_8_8;
1027 goto out_word4;
1028 case 4:
1029 result = FMT_8_8_8_8;
1030 goto out_word4;
1031 }
1032 goto out_unknown;
1033 case 16:
1034 switch (desc->nr_channels) {
1035 case 1:
1036 result = FMT_16;
1037 goto out_word4;
1038 case 2:
1039 result = FMT_16_16;
1040 goto out_word4;
1041 case 4:
1042 result = FMT_16_16_16_16;
1043 goto out_word4;
1044 }
1045 goto out_unknown;
1046 case 32:
1047 switch (desc->nr_channels) {
1048 case 1:
1049 result = FMT_32;
1050 goto out_word4;
1051 case 2:
1052 result = FMT_32_32;
1053 goto out_word4;
1054 case 4:
1055 result = FMT_32_32_32_32;
1056 goto out_word4;
1057 }
1058 }
1059 goto out_unknown;
1060
1061 case UTIL_FORMAT_TYPE_FLOAT:
1062 switch (desc->channel[i].size) {
1063 case 16:
1064 switch (desc->nr_channels) {
1065 case 1:
1066 result = FMT_16_FLOAT;
1067 goto out_word4;
1068 case 2:
1069 result = FMT_16_16_FLOAT;
1070 goto out_word4;
1071 case 4:
1072 result = FMT_16_16_16_16_FLOAT;
1073 goto out_word4;
1074 }
1075 goto out_unknown;
1076 case 32:
1077 switch (desc->nr_channels) {
1078 case 1:
1079 result = FMT_32_FLOAT;
1080 goto out_word4;
1081 case 2:
1082 result = FMT_32_32_FLOAT;
1083 goto out_word4;
1084 case 4:
1085 result = FMT_32_32_32_32_FLOAT;
1086 goto out_word4;
1087 }
1088 }
1089
1090 }
1091 out_word4:
1092 if (word4_p)
1093 *word4_p = word4;
1094 if (yuv_format_p)
1095 *yuv_format_p = yuv_format;
1096 return result;
1097 out_unknown:
1098 // R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format));
1099 return ~0;
1100 }