db63bebc676ae0e7e8ea18fd6313ce1e34262619
[mesa.git] / src / gallium / drivers / radeon / r600_texture.c
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jerome Glisse
25 * Corbin Simpson
26 */
27 #include "r600_pipe_common.h"
28 #include "r600_cs.h"
29 #include "util/u_format.h"
30 #include "util/u_memory.h"
31 #include "util/u_pack_color.h"
32 #include <errno.h>
33 #include <inttypes.h>
34
35 static void r600_texture_discard_dcc(struct r600_common_screen *rscreen,
36 struct r600_texture *rtex);
37 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
38 struct r600_texture *rtex);
39
40
41 bool r600_prepare_for_dma_blit(struct r600_common_context *rctx,
42 struct r600_texture *rdst,
43 unsigned dst_level, unsigned dstx,
44 unsigned dsty, unsigned dstz,
45 struct r600_texture *rsrc,
46 unsigned src_level,
47 const struct pipe_box *src_box)
48 {
49 if (!rctx->dma.cs)
50 return false;
51
52 if (util_format_get_blocksizebits(rdst->resource.b.b.format) !=
53 util_format_get_blocksizebits(rsrc->resource.b.b.format))
54 return false;
55
56 /* MSAA: Blits don't exist in the real world. */
57 if (rsrc->resource.b.b.nr_samples > 1 ||
58 rdst->resource.b.b.nr_samples > 1)
59 return false;
60
61 /* Depth-stencil surfaces:
62 * When dst is linear, the DB->CB copy preserves HTILE.
63 * When dst is tiled, the 3D path must be used to update HTILE.
64 */
65 if (rsrc->is_depth || rdst->is_depth)
66 return false;
67
68 /* DCC as:
69 * src: Use the 3D path. DCC decompression is expensive.
70 * dst: If overwriting the whole texture, discard DCC and use SDMA.
71 * Otherwise, use the 3D path.
72 */
73 if (rsrc->dcc_offset)
74 return false;
75
76 if (rdst->dcc_offset) {
77 /* We can't discard DCC if the texture has been exported. */
78 if (rdst->resource.is_shared ||
79 !util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
80 dstx, dsty, dstz, src_box->width,
81 src_box->height, src_box->depth))
82 return false;
83
84 r600_texture_discard_dcc(rctx->screen, rdst);
85 }
86
87 /* CMASK as:
88 * src: Both texture and SDMA paths need decompression. Use SDMA.
89 * dst: If overwriting the whole texture, discard CMASK and use
90 * SDMA. Otherwise, use the 3D path.
91 */
92 if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) {
93 if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level,
94 dstx, dsty, dstz, src_box->width,
95 src_box->height, src_box->depth))
96 return false;
97
98 r600_texture_discard_cmask(rctx->screen, rdst);
99 }
100
101 /* All requirements are met. Prepare textures for SDMA. */
102 if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level))
103 rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b);
104
105 assert(!(rsrc->dirty_level_mask & (1 << src_level)));
106 assert(!(rdst->dirty_level_mask & (1 << dst_level)));
107
108 return true;
109 }
110
111 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
112 static void r600_copy_region_with_blit(struct pipe_context *pipe,
113 struct pipe_resource *dst,
114 unsigned dst_level,
115 unsigned dstx, unsigned dsty, unsigned dstz,
116 struct pipe_resource *src,
117 unsigned src_level,
118 const struct pipe_box *src_box)
119 {
120 struct pipe_blit_info blit;
121
122 memset(&blit, 0, sizeof(blit));
123 blit.src.resource = src;
124 blit.src.format = src->format;
125 blit.src.level = src_level;
126 blit.src.box = *src_box;
127 blit.dst.resource = dst;
128 blit.dst.format = dst->format;
129 blit.dst.level = dst_level;
130 blit.dst.box.x = dstx;
131 blit.dst.box.y = dsty;
132 blit.dst.box.z = dstz;
133 blit.dst.box.width = src_box->width;
134 blit.dst.box.height = src_box->height;
135 blit.dst.box.depth = src_box->depth;
136 blit.mask = util_format_get_mask(src->format) &
137 util_format_get_mask(dst->format);
138 blit.filter = PIPE_TEX_FILTER_NEAREST;
139
140 if (blit.mask) {
141 pipe->blit(pipe, &blit);
142 }
143 }
144
145 /* Copy from a full GPU texture to a transfer's staging one. */
146 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
147 {
148 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
149 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
150 struct pipe_resource *dst = &rtransfer->staging->b.b;
151 struct pipe_resource *src = transfer->resource;
152
153 if (src->nr_samples > 1) {
154 r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
155 src, transfer->level, &transfer->box);
156 return;
157 }
158
159 rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
160 &transfer->box);
161 }
162
163 /* Copy from a transfer's staging texture to a full GPU one. */
164 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
165 {
166 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
167 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
168 struct pipe_resource *dst = transfer->resource;
169 struct pipe_resource *src = &rtransfer->staging->b.b;
170 struct pipe_box sbox;
171
172 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
173
174 if (dst->nr_samples > 1) {
175 r600_copy_region_with_blit(ctx, dst, transfer->level,
176 transfer->box.x, transfer->box.y, transfer->box.z,
177 src, 0, &sbox);
178 return;
179 }
180
181 rctx->dma_copy(ctx, dst, transfer->level,
182 transfer->box.x, transfer->box.y, transfer->box.z,
183 src, 0, &sbox);
184 }
185
186 static unsigned r600_texture_get_offset(struct r600_texture *rtex, unsigned level,
187 const struct pipe_box *box)
188 {
189 enum pipe_format format = rtex->resource.b.b.format;
190
191 return rtex->surface.level[level].offset +
192 box->z * rtex->surface.level[level].slice_size +
193 box->y / util_format_get_blockheight(format) * rtex->surface.level[level].pitch_bytes +
194 box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
195 }
196
197 static int r600_init_surface(struct r600_common_screen *rscreen,
198 struct radeon_surf *surface,
199 const struct pipe_resource *ptex,
200 unsigned array_mode,
201 bool is_flushed_depth)
202 {
203 const struct util_format_description *desc =
204 util_format_description(ptex->format);
205 bool is_depth, is_stencil;
206
207 is_depth = util_format_has_depth(desc);
208 is_stencil = util_format_has_stencil(desc);
209
210 surface->npix_x = ptex->width0;
211 surface->npix_y = ptex->height0;
212 surface->npix_z = ptex->depth0;
213 surface->blk_w = util_format_get_blockwidth(ptex->format);
214 surface->blk_h = util_format_get_blockheight(ptex->format);
215 surface->blk_d = 1;
216 surface->array_size = 1;
217 surface->last_level = ptex->last_level;
218
219 if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth &&
220 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
221 surface->bpe = 4; /* stencil is allocated separately on evergreen */
222 } else {
223 surface->bpe = util_format_get_blocksize(ptex->format);
224 /* align byte per element on dword */
225 if (surface->bpe == 3) {
226 surface->bpe = 4;
227 }
228 }
229
230 surface->nsamples = ptex->nr_samples ? ptex->nr_samples : 1;
231 surface->flags = RADEON_SURF_SET(array_mode, MODE);
232
233 switch (ptex->target) {
234 case PIPE_TEXTURE_1D:
235 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
236 break;
237 case PIPE_TEXTURE_RECT:
238 case PIPE_TEXTURE_2D:
239 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
240 break;
241 case PIPE_TEXTURE_3D:
242 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
243 break;
244 case PIPE_TEXTURE_1D_ARRAY:
245 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
246 surface->array_size = ptex->array_size;
247 break;
248 case PIPE_TEXTURE_CUBE_ARRAY: /* cube array layout like 2d array */
249 assert(ptex->array_size % 6 == 0);
250 case PIPE_TEXTURE_2D_ARRAY:
251 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
252 surface->array_size = ptex->array_size;
253 break;
254 case PIPE_TEXTURE_CUBE:
255 surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE);
256 break;
257 case PIPE_BUFFER:
258 default:
259 return -EINVAL;
260 }
261 if (ptex->bind & PIPE_BIND_SCANOUT) {
262 surface->flags |= RADEON_SURF_SCANOUT;
263 }
264
265 if (!is_flushed_depth && is_depth) {
266 surface->flags |= RADEON_SURF_ZBUFFER;
267
268 if (is_stencil) {
269 surface->flags |= RADEON_SURF_SBUFFER |
270 RADEON_SURF_HAS_SBUFFER_MIPTREE;
271 }
272 }
273 if (rscreen->chip_class >= SI) {
274 surface->flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
275 }
276 return 0;
277 }
278
279 static int r600_setup_surface(struct pipe_screen *screen,
280 struct r600_texture *rtex,
281 unsigned pitch_in_bytes_override,
282 unsigned offset)
283 {
284 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
285 unsigned i;
286 int r;
287
288 r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface);
289 if (r) {
290 return r;
291 }
292
293 rtex->size = rtex->surface.bo_size;
294
295 if (pitch_in_bytes_override && pitch_in_bytes_override != rtex->surface.level[0].pitch_bytes) {
296 /* old ddx on evergreen over estimate alignment for 1d, only 1 level
297 * for those
298 */
299 rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe;
300 rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override;
301 rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y;
302 }
303
304 if (offset) {
305 for (i = 0; i < ARRAY_SIZE(rtex->surface.level); ++i)
306 rtex->surface.level[i].offset += offset;
307 }
308 return 0;
309 }
310
311 static void r600_texture_init_metadata(struct r600_texture *rtex,
312 struct radeon_bo_metadata *metadata)
313 {
314 struct radeon_surf *surface = &rtex->surface;
315
316 memset(metadata, 0, sizeof(*metadata));
317 metadata->microtile = surface->level[0].mode >= RADEON_SURF_MODE_1D ?
318 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
319 metadata->macrotile = surface->level[0].mode >= RADEON_SURF_MODE_2D ?
320 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR;
321 metadata->pipe_config = surface->pipe_config;
322 metadata->bankw = surface->bankw;
323 metadata->bankh = surface->bankh;
324 metadata->tile_split = surface->tile_split;
325 metadata->mtilea = surface->mtilea;
326 metadata->num_banks = surface->num_banks;
327 metadata->stride = surface->level[0].pitch_bytes;
328 metadata->scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
329 }
330
331 static void r600_dirty_all_framebuffer_states(struct r600_common_screen *rscreen)
332 {
333 p_atomic_inc(&rscreen->dirty_fb_counter);
334 }
335
336 static void r600_eliminate_fast_color_clear(struct r600_common_screen *rscreen,
337 struct r600_texture *rtex)
338 {
339 struct pipe_context *ctx = rscreen->aux_context;
340
341 pipe_mutex_lock(rscreen->aux_context_lock);
342 ctx->flush_resource(ctx, &rtex->resource.b.b);
343 ctx->flush(ctx, NULL, 0);
344 pipe_mutex_unlock(rscreen->aux_context_lock);
345 }
346
347 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen,
348 struct r600_texture *rtex)
349 {
350 if (!rtex->cmask.size)
351 return;
352
353 assert(rtex->resource.b.b.nr_samples <= 1);
354
355 /* Disable CMASK. */
356 memset(&rtex->cmask, 0, sizeof(rtex->cmask));
357 rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
358
359 if (rscreen->chip_class >= SI)
360 rtex->cb_color_info &= ~SI_S_028C70_FAST_CLEAR(1);
361 else
362 rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);
363
364 if (rtex->cmask_buffer != &rtex->resource)
365 pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
366
367 /* Notify all contexts about the change. */
368 r600_dirty_all_framebuffer_states(rscreen);
369 p_atomic_inc(&rscreen->compressed_colortex_counter);
370 }
371
372 static void r600_texture_discard_dcc(struct r600_common_screen *rscreen,
373 struct r600_texture *rtex)
374 {
375 /* Disable DCC. */
376 rtex->dcc_offset = 0;
377 rtex->cb_color_info &= ~VI_S_028C70_DCC_ENABLE(1);
378
379 /* Notify all contexts about the change. */
380 r600_dirty_all_framebuffer_states(rscreen);
381 }
382
383 void r600_texture_disable_dcc(struct r600_common_screen *rscreen,
384 struct r600_texture *rtex)
385 {
386 struct r600_common_context *rctx =
387 (struct r600_common_context *)rscreen->aux_context;
388
389 if (!rtex->dcc_offset)
390 return;
391
392 /* Decompress DCC. */
393 pipe_mutex_lock(rscreen->aux_context_lock);
394 rctx->decompress_dcc(&rctx->b, rtex);
395 rctx->b.flush(&rctx->b, NULL, 0);
396 pipe_mutex_unlock(rscreen->aux_context_lock);
397
398 r600_texture_discard_dcc(rscreen, rtex);
399 }
400
401 static boolean r600_texture_get_handle(struct pipe_screen* screen,
402 struct pipe_resource *resource,
403 struct winsys_handle *whandle,
404 unsigned usage)
405 {
406 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
407 struct r600_resource *res = (struct r600_resource*)resource;
408 struct r600_texture *rtex = (struct r600_texture*)resource;
409 struct radeon_bo_metadata metadata;
410 bool update_metadata = false;
411
412 /* This is not supported now, but it might be required for OpenCL
413 * interop in the future.
414 */
415 if (resource->target != PIPE_BUFFER &&
416 (resource->nr_samples > 1 || rtex->is_depth))
417 return false;
418
419 if (resource->target != PIPE_BUFFER) {
420 /* Since shader image stores don't support DCC on VI,
421 * disable it for external clients that want write
422 * access.
423 */
424 if (usage & PIPE_HANDLE_USAGE_WRITE && rtex->dcc_offset) {
425 r600_texture_disable_dcc(rscreen, rtex);
426 update_metadata = true;
427 }
428
429 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) &&
430 rtex->cmask.size) {
431 /* Eliminate fast clear (both CMASK and DCC) */
432 r600_eliminate_fast_color_clear(rscreen, rtex);
433
434 /* Disable CMASK if flush_resource isn't going
435 * to be called.
436 */
437 r600_texture_discard_cmask(rscreen, rtex);
438 update_metadata = true;
439 }
440
441 /* Set metadata. */
442 if (!res->is_shared || update_metadata) {
443 r600_texture_init_metadata(rtex, &metadata);
444 if (rscreen->query_opaque_metadata)
445 rscreen->query_opaque_metadata(rscreen, rtex,
446 &metadata);
447
448 rscreen->ws->buffer_set_metadata(res->buf, &metadata);
449 }
450 }
451
452 if (res->is_shared) {
453 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user
454 * doesn't set it.
455 */
456 res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
457 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
458 res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
459 } else {
460 res->is_shared = true;
461 res->external_usage = usage;
462 }
463
464 return rscreen->ws->buffer_get_handle(res->buf,
465 rtex->surface.level[0].pitch_bytes,
466 rtex->surface.level[0].offset,
467 rtex->surface.level[0].slice_size,
468 whandle);
469 }
470
471 static void r600_texture_destroy(struct pipe_screen *screen,
472 struct pipe_resource *ptex)
473 {
474 struct r600_texture *rtex = (struct r600_texture*)ptex;
475 struct r600_resource *resource = &rtex->resource;
476
477 if (rtex->flushed_depth_texture)
478 pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
479
480 pipe_resource_reference((struct pipe_resource**)&rtex->htile_buffer, NULL);
481 if (rtex->cmask_buffer != &rtex->resource) {
482 pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
483 }
484 pb_reference(&resource->buf, NULL);
485 FREE(rtex);
486 }
487
488 static const struct u_resource_vtbl r600_texture_vtbl;
489
490 /* The number of samples can be specified independently of the texture. */
491 void r600_texture_get_fmask_info(struct r600_common_screen *rscreen,
492 struct r600_texture *rtex,
493 unsigned nr_samples,
494 struct r600_fmask_info *out)
495 {
496 /* FMASK is allocated like an ordinary texture. */
497 struct radeon_surf fmask = rtex->surface;
498
499 memset(out, 0, sizeof(*out));
500
501 fmask.bo_alignment = 0;
502 fmask.bo_size = 0;
503 fmask.nsamples = 1;
504 fmask.flags |= RADEON_SURF_FMASK;
505
506 /* Force 2D tiling if it wasn't set. This may occur when creating
507 * FMASK for MSAA resolve on R6xx. On R6xx, the single-sample
508 * destination buffer must have an FMASK too. */
509 fmask.flags = RADEON_SURF_CLR(fmask.flags, MODE);
510 fmask.flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
511
512 if (rscreen->chip_class >= SI) {
513 fmask.flags |= RADEON_SURF_HAS_TILE_MODE_INDEX;
514 }
515
516 switch (nr_samples) {
517 case 2:
518 case 4:
519 fmask.bpe = 1;
520 if (rscreen->chip_class <= CAYMAN) {
521 fmask.bankh = 4;
522 }
523 break;
524 case 8:
525 fmask.bpe = 4;
526 break;
527 default:
528 R600_ERR("Invalid sample count for FMASK allocation.\n");
529 return;
530 }
531
532 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption.
533 * This can be fixed by writing a separate FMASK allocator specifically
534 * for R600-R700 asics. */
535 if (rscreen->chip_class <= R700) {
536 fmask.bpe *= 2;
537 }
538
539 if (rscreen->ws->surface_init(rscreen->ws, &fmask)) {
540 R600_ERR("Got error in surface_init while allocating FMASK.\n");
541 return;
542 }
543
544 assert(fmask.level[0].mode == RADEON_SURF_MODE_2D);
545
546 out->slice_tile_max = (fmask.level[0].nblk_x * fmask.level[0].nblk_y) / 64;
547 if (out->slice_tile_max)
548 out->slice_tile_max -= 1;
549
550 out->tile_mode_index = fmask.tiling_index[0];
551 out->pitch_in_pixels = fmask.level[0].nblk_x;
552 out->bank_height = fmask.bankh;
553 out->alignment = MAX2(256, fmask.bo_alignment);
554 out->size = fmask.bo_size;
555 }
556
557 static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen,
558 struct r600_texture *rtex)
559 {
560 r600_texture_get_fmask_info(rscreen, rtex,
561 rtex->resource.b.b.nr_samples, &rtex->fmask);
562
563 rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment);
564 rtex->size = rtex->fmask.offset + rtex->fmask.size;
565 }
566
567 void r600_texture_get_cmask_info(struct r600_common_screen *rscreen,
568 struct r600_texture *rtex,
569 struct r600_cmask_info *out)
570 {
571 unsigned cmask_tile_width = 8;
572 unsigned cmask_tile_height = 8;
573 unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
574 unsigned element_bits = 4;
575 unsigned cmask_cache_bits = 1024;
576 unsigned num_pipes = rscreen->info.num_tile_pipes;
577 unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
578
579 unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
580 unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
581 unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
582 unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
583 unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
584
585 unsigned pitch_elements = align(rtex->surface.npix_x, macro_tile_width);
586 unsigned height = align(rtex->surface.npix_y, macro_tile_height);
587
588 unsigned base_align = num_pipes * pipe_interleave_bytes;
589 unsigned slice_bytes =
590 ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
591
592 assert(macro_tile_width % 128 == 0);
593 assert(macro_tile_height % 128 == 0);
594
595 out->pitch = pitch_elements;
596 out->height = height;
597 out->xalign = macro_tile_width;
598 out->yalign = macro_tile_height;
599 out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
600 out->alignment = MAX2(256, base_align);
601 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
602 align(slice_bytes, base_align);
603 }
604
605 static void si_texture_get_cmask_info(struct r600_common_screen *rscreen,
606 struct r600_texture *rtex,
607 struct r600_cmask_info *out)
608 {
609 unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
610 unsigned num_pipes = rscreen->info.num_tile_pipes;
611 unsigned cl_width, cl_height;
612
613 switch (num_pipes) {
614 case 2:
615 cl_width = 32;
616 cl_height = 16;
617 break;
618 case 4:
619 cl_width = 32;
620 cl_height = 32;
621 break;
622 case 8:
623 cl_width = 64;
624 cl_height = 32;
625 break;
626 case 16: /* Hawaii */
627 cl_width = 64;
628 cl_height = 64;
629 break;
630 default:
631 assert(0);
632 return;
633 }
634
635 unsigned base_align = num_pipes * pipe_interleave_bytes;
636
637 unsigned width = align(rtex->surface.npix_x, cl_width*8);
638 unsigned height = align(rtex->surface.npix_y, cl_height*8);
639 unsigned slice_elements = (width * height) / (8*8);
640
641 /* Each element of CMASK is a nibble. */
642 unsigned slice_bytes = slice_elements / 2;
643
644 out->pitch = width;
645 out->height = height;
646 out->xalign = cl_width * 8;
647 out->yalign = cl_height * 8;
648 out->slice_tile_max = (width * height) / (128*128);
649 if (out->slice_tile_max)
650 out->slice_tile_max -= 1;
651
652 out->alignment = MAX2(256, base_align);
653 out->size = (util_max_layer(&rtex->resource.b.b, 0) + 1) *
654 align(slice_bytes, base_align);
655 }
656
657 static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen,
658 struct r600_texture *rtex)
659 {
660 if (rscreen->chip_class >= SI) {
661 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
662 } else {
663 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
664 }
665
666 rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment);
667 rtex->size = rtex->cmask.offset + rtex->cmask.size;
668
669 if (rscreen->chip_class >= SI)
670 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
671 else
672 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
673 }
674
675 static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
676 struct r600_texture *rtex)
677 {
678 if (rtex->cmask_buffer)
679 return;
680
681 assert(rtex->cmask.size == 0);
682
683 if (rscreen->chip_class >= SI) {
684 si_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
685 } else {
686 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
687 }
688
689 rtex->cmask_buffer = (struct r600_resource *)
690 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
691 PIPE_USAGE_DEFAULT, rtex->cmask.size);
692 if (rtex->cmask_buffer == NULL) {
693 rtex->cmask.size = 0;
694 return;
695 }
696
697 /* update colorbuffer state bits */
698 rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8;
699
700 if (rscreen->chip_class >= SI)
701 rtex->cb_color_info |= SI_S_028C70_FAST_CLEAR(1);
702 else
703 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1);
704
705 p_atomic_inc(&rscreen->compressed_colortex_counter);
706 }
707
708 static unsigned r600_texture_get_htile_size(struct r600_common_screen *rscreen,
709 struct r600_texture *rtex)
710 {
711 unsigned cl_width, cl_height, width, height;
712 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
713 unsigned num_pipes = rscreen->info.num_tile_pipes;
714
715 if (rscreen->chip_class <= EVERGREEN &&
716 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26)
717 return 0;
718
719 /* HW bug on R6xx. */
720 if (rscreen->chip_class == R600 &&
721 (rtex->surface.level[0].npix_x > 7680 ||
722 rtex->surface.level[0].npix_y > 7680))
723 return 0;
724
725 /* HTILE is broken with 1D tiling on old kernels and CIK. */
726 if (rscreen->chip_class >= CIK &&
727 rtex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
728 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 38)
729 return 0;
730
731 /* Overalign HTILE on P2 configs to work around GPU hangs in
732 * piglit/depthstencil-render-miplevels 585.
733 *
734 * This has been confirmed to help Kabini & Stoney, where the hangs
735 * are always reproducible. I think I have seen the test hang
736 * on Carrizo too, though it was very rare there.
737 */
738 if (rscreen->chip_class >= CIK && num_pipes < 4)
739 num_pipes = 4;
740
741 switch (num_pipes) {
742 case 1:
743 cl_width = 32;
744 cl_height = 16;
745 break;
746 case 2:
747 cl_width = 32;
748 cl_height = 32;
749 break;
750 case 4:
751 cl_width = 64;
752 cl_height = 32;
753 break;
754 case 8:
755 cl_width = 64;
756 cl_height = 64;
757 break;
758 case 16:
759 cl_width = 128;
760 cl_height = 64;
761 break;
762 default:
763 assert(0);
764 return 0;
765 }
766
767 width = align(rtex->surface.npix_x, cl_width * 8);
768 height = align(rtex->surface.npix_y, cl_height * 8);
769
770 slice_elements = (width * height) / (8 * 8);
771 slice_bytes = slice_elements * 4;
772
773 pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes;
774 base_align = num_pipes * pipe_interleave_bytes;
775
776 rtex->htile.pitch = width;
777 rtex->htile.height = height;
778 rtex->htile.xalign = cl_width * 8;
779 rtex->htile.yalign = cl_height * 8;
780
781 return (util_max_layer(&rtex->resource.b.b, 0) + 1) *
782 align(slice_bytes, base_align);
783 }
784
785 static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
786 struct r600_texture *rtex)
787 {
788 unsigned htile_size = r600_texture_get_htile_size(rscreen, rtex);
789
790 if (!htile_size)
791 return;
792
793 rtex->htile_buffer = (struct r600_resource*)
794 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
795 PIPE_USAGE_DEFAULT, htile_size);
796 if (rtex->htile_buffer == NULL) {
797 /* this is not a fatal error as we can still keep rendering
798 * without htile buffer */
799 R600_ERR("Failed to create buffer object for htile buffer.\n");
800 } else {
801 r600_screen_clear_buffer(rscreen, &rtex->htile_buffer->b.b, 0,
802 htile_size, 0, R600_COHERENCY_NONE);
803 }
804 }
805
806 void r600_print_texture_info(struct r600_texture *rtex, FILE *f)
807 {
808 int i;
809
810 fprintf(f, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, "
811 "blk_h=%u, blk_d=%u, array_size=%u, last_level=%u, "
812 "bpe=%u, nsamples=%u, flags=0x%x, %s\n",
813 rtex->surface.npix_x, rtex->surface.npix_y,
814 rtex->surface.npix_z, rtex->surface.blk_w,
815 rtex->surface.blk_h, rtex->surface.blk_d,
816 rtex->surface.array_size, rtex->surface.last_level,
817 rtex->surface.bpe, rtex->surface.nsamples,
818 rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format));
819
820 fprintf(f, " Layout: size=%"PRIu64", alignment=%"PRIu64", bankw=%u, "
821 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
822 rtex->surface.bo_size, rtex->surface.bo_alignment, rtex->surface.bankw,
823 rtex->surface.bankh, rtex->surface.num_banks, rtex->surface.mtilea,
824 rtex->surface.tile_split, rtex->surface.pipe_config,
825 (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0);
826
827 if (rtex->fmask.size)
828 fprintf(f, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, "
829 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n",
830 rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment,
831 rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height,
832 rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index);
833
834 if (rtex->cmask.size)
835 fprintf(f, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch=%u, "
836 "height=%u, xalign=%u, yalign=%u, slice_tile_max=%u\n",
837 rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
838 rtex->cmask.pitch, rtex->cmask.height, rtex->cmask.xalign,
839 rtex->cmask.yalign, rtex->cmask.slice_tile_max);
840
841 if (rtex->htile_buffer)
842 fprintf(f, " HTile: size=%u, alignment=%u, pitch=%u, height=%u, "
843 "xalign=%u, yalign=%u\n",
844 rtex->htile_buffer->b.b.width0,
845 rtex->htile_buffer->buf->alignment, rtex->htile.pitch,
846 rtex->htile.height, rtex->htile.xalign, rtex->htile.yalign);
847
848 if (rtex->dcc_offset) {
849 fprintf(f, " DCC: offset=%"PRIu64", size=%"PRIu64", alignment=%"PRIu64"\n",
850 rtex->dcc_offset, rtex->surface.dcc_size,
851 rtex->surface.dcc_alignment);
852 for (i = 0; i <= rtex->surface.last_level; i++)
853 fprintf(f, " DCCLevel[%i]: offset=%"PRIu64"\n",
854 i, rtex->surface.level[i].dcc_offset);
855 }
856
857 for (i = 0; i <= rtex->surface.last_level; i++)
858 fprintf(f, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", "
859 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
860 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
861 i, rtex->surface.level[i].offset,
862 rtex->surface.level[i].slice_size,
863 u_minify(rtex->resource.b.b.width0, i),
864 u_minify(rtex->resource.b.b.height0, i),
865 u_minify(rtex->resource.b.b.depth0, i),
866 rtex->surface.level[i].nblk_x,
867 rtex->surface.level[i].nblk_y,
868 rtex->surface.level[i].nblk_z,
869 rtex->surface.level[i].pitch_bytes,
870 rtex->surface.level[i].mode);
871
872 if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
873 for (i = 0; i <= rtex->surface.last_level; i++) {
874 fprintf(f, " StencilLayout: tilesplit=%u\n",
875 rtex->surface.stencil_tile_split);
876 fprintf(f, " StencilLevel[%i]: offset=%"PRIu64", "
877 "slice_size=%"PRIu64", npix_x=%u, "
878 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, "
879 "nblk_z=%u, pitch_bytes=%u, mode=%u\n",
880 i, rtex->surface.stencil_level[i].offset,
881 rtex->surface.stencil_level[i].slice_size,
882 u_minify(rtex->resource.b.b.width0, i),
883 u_minify(rtex->resource.b.b.height0, i),
884 u_minify(rtex->resource.b.b.depth0, i),
885 rtex->surface.stencil_level[i].nblk_x,
886 rtex->surface.stencil_level[i].nblk_y,
887 rtex->surface.stencil_level[i].nblk_z,
888 rtex->surface.stencil_level[i].pitch_bytes,
889 rtex->surface.stencil_level[i].mode);
890 }
891 }
892 }
893
894 /* Common processing for r600_texture_create and r600_texture_from_handle */
895 static struct r600_texture *
896 r600_texture_create_object(struct pipe_screen *screen,
897 const struct pipe_resource *base,
898 unsigned pitch_in_bytes_override,
899 unsigned offset,
900 struct pb_buffer *buf,
901 struct radeon_surf *surface)
902 {
903 struct r600_texture *rtex;
904 struct r600_resource *resource;
905 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
906
907 rtex = CALLOC_STRUCT(r600_texture);
908 if (!rtex)
909 return NULL;
910
911 resource = &rtex->resource;
912 resource->b.b = *base;
913 resource->b.vtbl = &r600_texture_vtbl;
914 pipe_reference_init(&resource->b.b.reference, 1);
915 resource->b.b.screen = screen;
916
917 /* don't include stencil-only formats which we don't support for rendering */
918 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
919
920 rtex->surface = *surface;
921 if (r600_setup_surface(screen, rtex, pitch_in_bytes_override, offset)) {
922 FREE(rtex);
923 return NULL;
924 }
925
926 /* Tiled depth textures utilize the non-displayable tile order.
927 * This must be done after r600_setup_surface.
928 * Applies to R600-Cayman. */
929 rtex->non_disp_tiling = rtex->is_depth && rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D;
930
931 if (rtex->is_depth) {
932 if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER |
933 R600_RESOURCE_FLAG_FLUSHED_DEPTH)) &&
934 !(rscreen->debug_flags & DBG_NO_HYPERZ)) {
935
936 r600_texture_allocate_htile(rscreen, rtex);
937 }
938 } else {
939 if (base->nr_samples > 1) {
940 if (!buf) {
941 r600_texture_allocate_fmask(rscreen, rtex);
942 r600_texture_allocate_cmask(rscreen, rtex);
943 rtex->cmask_buffer = &rtex->resource;
944 }
945 if (!rtex->fmask.size || !rtex->cmask.size) {
946 FREE(rtex);
947 return NULL;
948 }
949 }
950
951 if (!buf && rtex->surface.dcc_size &&
952 !(rscreen->debug_flags & DBG_NO_DCC)) {
953 /* Reserve space for the DCC buffer. */
954 rtex->dcc_offset = align64(rtex->size, rtex->surface.dcc_alignment);
955 rtex->size = rtex->dcc_offset + rtex->surface.dcc_size;
956 rtex->cb_color_info |= VI_S_028C70_DCC_ENABLE(1);
957 }
958 }
959
960 /* Now create the backing buffer. */
961 if (!buf) {
962 if (!r600_init_resource(rscreen, resource, rtex->size,
963 rtex->surface.bo_alignment)) {
964 FREE(rtex);
965 return NULL;
966 }
967 } else {
968 resource->buf = buf;
969 resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf);
970 resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf);
971 }
972
973 if (rtex->cmask.size) {
974 /* Initialize the cmask to 0xCC (= compressed state). */
975 r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b,
976 rtex->cmask.offset, rtex->cmask.size,
977 0xCCCCCCCC, R600_COHERENCY_NONE);
978 }
979 if (rtex->dcc_offset) {
980 r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
981 rtex->dcc_offset,
982 rtex->surface.dcc_size,
983 0xFFFFFFFF, R600_COHERENCY_NONE);
984 }
985
986 /* Initialize the CMASK base register value. */
987 rtex->cmask.base_address_reg =
988 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
989
990 if (rscreen->debug_flags & DBG_VM) {
991 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
992 rtex->resource.gpu_address,
993 rtex->resource.gpu_address + rtex->resource.buf->size,
994 base->width0, base->height0, util_max_layer(base, 0)+1, base->last_level+1,
995 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
996 }
997
998 if (rscreen->debug_flags & DBG_TEX) {
999 puts("Texture:");
1000 r600_print_texture_info(rtex, stdout);
1001 }
1002
1003 return rtex;
1004 }
1005
1006 static unsigned r600_choose_tiling(struct r600_common_screen *rscreen,
1007 const struct pipe_resource *templ)
1008 {
1009 const struct util_format_description *desc = util_format_description(templ->format);
1010 bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING;
1011
1012 /* MSAA resources must be 2D tiled. */
1013 if (templ->nr_samples > 1)
1014 return RADEON_SURF_MODE_2D;
1015
1016 /* Transfer resources should be linear. */
1017 if (templ->flags & R600_RESOURCE_FLAG_TRANSFER)
1018 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1019
1020 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */
1021 if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN &&
1022 (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) &&
1023 (templ->target == PIPE_TEXTURE_2D ||
1024 templ->target == PIPE_TEXTURE_3D))
1025 force_tiling = true;
1026
1027 /* Handle common candidates for the linear mode.
1028 * Compressed textures and DB surfaces must always be tiled.
1029 */
1030 if (!force_tiling && !util_format_is_compressed(templ->format) &&
1031 (!util_format_is_depth_or_stencil(templ->format) ||
1032 templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH)) {
1033 if (rscreen->debug_flags & DBG_NO_TILING)
1034 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1035
1036 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
1037 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
1038 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1039
1040 /* Cursors are linear on SI.
1041 * (XXX double-check, maybe also use RADEON_SURF_SCANOUT) */
1042 if (rscreen->chip_class >= SI &&
1043 (templ->bind & PIPE_BIND_CURSOR))
1044 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1045
1046 if (templ->bind & PIPE_BIND_LINEAR)
1047 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1048
1049 /* Textures with a very small height are recommended to be linear. */
1050 if (templ->target == PIPE_TEXTURE_1D ||
1051 templ->target == PIPE_TEXTURE_1D_ARRAY ||
1052 templ->height0 <= 4)
1053 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1054
1055 /* Textures likely to be mapped often. */
1056 if (templ->usage == PIPE_USAGE_STAGING ||
1057 templ->usage == PIPE_USAGE_STREAM)
1058 return RADEON_SURF_MODE_LINEAR_ALIGNED;
1059 }
1060
1061 /* Make small textures 1D tiled. */
1062 if (templ->width0 <= 16 || templ->height0 <= 16 ||
1063 (rscreen->debug_flags & DBG_NO_2D_TILING))
1064 return RADEON_SURF_MODE_1D;
1065
1066 /* The allocator will switch to 1D if needed. */
1067 return RADEON_SURF_MODE_2D;
1068 }
1069
1070 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
1071 const struct pipe_resource *templ)
1072 {
1073 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1074 struct radeon_surf surface = {0};
1075 int r;
1076
1077 r = r600_init_surface(rscreen, &surface, templ,
1078 r600_choose_tiling(rscreen, templ),
1079 templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
1080 if (r) {
1081 return NULL;
1082 }
1083 r = rscreen->ws->surface_best(rscreen->ws, &surface);
1084 if (r) {
1085 return NULL;
1086 }
1087 return (struct pipe_resource *)r600_texture_create_object(screen, templ, 0,
1088 0, NULL, &surface);
1089 }
1090
1091 static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
1092 const struct pipe_resource *templ,
1093 struct winsys_handle *whandle,
1094 unsigned usage)
1095 {
1096 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
1097 struct pb_buffer *buf = NULL;
1098 unsigned stride = 0, offset = 0;
1099 unsigned array_mode;
1100 struct radeon_surf surface;
1101 int r;
1102 struct radeon_bo_metadata metadata = {};
1103 struct r600_texture *rtex;
1104
1105 /* Support only 2D textures without mipmaps */
1106 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
1107 templ->depth0 != 1 || templ->last_level != 0)
1108 return NULL;
1109
1110 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride, &offset);
1111 if (!buf)
1112 return NULL;
1113
1114 rscreen->ws->buffer_get_metadata(buf, &metadata);
1115
1116 surface.pipe_config = metadata.pipe_config;
1117 surface.bankw = metadata.bankw;
1118 surface.bankh = metadata.bankh;
1119 surface.tile_split = metadata.tile_split;
1120 surface.mtilea = metadata.mtilea;
1121 surface.num_banks = metadata.num_banks;
1122
1123 if (metadata.macrotile == RADEON_LAYOUT_TILED)
1124 array_mode = RADEON_SURF_MODE_2D;
1125 else if (metadata.microtile == RADEON_LAYOUT_TILED)
1126 array_mode = RADEON_SURF_MODE_1D;
1127 else
1128 array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
1129
1130 r = r600_init_surface(rscreen, &surface, templ, array_mode, false);
1131 if (r) {
1132 return NULL;
1133 }
1134
1135 if (metadata.scanout)
1136 surface.flags |= RADEON_SURF_SCANOUT;
1137
1138 rtex = r600_texture_create_object(screen, templ, stride,
1139 offset, buf, &surface);
1140 if (!rtex)
1141 return NULL;
1142
1143 rtex->resource.is_shared = true;
1144 rtex->resource.external_usage = usage;
1145 return &rtex->resource.b.b;
1146 }
1147
1148 bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
1149 struct pipe_resource *texture,
1150 struct r600_texture **staging)
1151 {
1152 struct r600_texture *rtex = (struct r600_texture*)texture;
1153 struct pipe_resource resource;
1154 struct r600_texture **flushed_depth_texture = staging ?
1155 staging : &rtex->flushed_depth_texture;
1156
1157 if (!staging && rtex->flushed_depth_texture)
1158 return true; /* it's ready */
1159
1160 resource.target = texture->target;
1161 resource.format = texture->format;
1162 resource.width0 = texture->width0;
1163 resource.height0 = texture->height0;
1164 resource.depth0 = texture->depth0;
1165 resource.array_size = texture->array_size;
1166 resource.last_level = texture->last_level;
1167 resource.nr_samples = texture->nr_samples;
1168 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1169 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
1170 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
1171
1172 if (staging)
1173 resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
1174
1175 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
1176 if (*flushed_depth_texture == NULL) {
1177 R600_ERR("failed to create temporary texture to hold flushed depth\n");
1178 return false;
1179 }
1180
1181 (*flushed_depth_texture)->is_flushing_texture = TRUE;
1182 (*flushed_depth_texture)->non_disp_tiling = false;
1183 return true;
1184 }
1185
1186 /**
1187 * Initialize the pipe_resource descriptor to be of the same size as the box,
1188 * which is supposed to hold a subregion of the texture "orig" at the given
1189 * mipmap level.
1190 */
1191 static void r600_init_temp_resource_from_box(struct pipe_resource *res,
1192 struct pipe_resource *orig,
1193 const struct pipe_box *box,
1194 unsigned level, unsigned flags)
1195 {
1196 memset(res, 0, sizeof(*res));
1197 res->format = orig->format;
1198 res->width0 = box->width;
1199 res->height0 = box->height;
1200 res->depth0 = 1;
1201 res->array_size = 1;
1202 res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT;
1203 res->flags = flags;
1204
1205 /* We must set the correct texture target and dimensions for a 3D box. */
1206 if (box->depth > 1 && util_max_layer(orig, level) > 0) {
1207 res->target = PIPE_TEXTURE_2D_ARRAY;
1208 res->array_size = box->depth;
1209 } else {
1210 res->target = PIPE_TEXTURE_2D;
1211 }
1212 }
1213
1214 static void *r600_texture_transfer_map(struct pipe_context *ctx,
1215 struct pipe_resource *texture,
1216 unsigned level,
1217 unsigned usage,
1218 const struct pipe_box *box,
1219 struct pipe_transfer **ptransfer)
1220 {
1221 struct r600_common_context *rctx = (struct r600_common_context*)ctx;
1222 struct r600_texture *rtex = (struct r600_texture*)texture;
1223 struct r600_transfer *trans;
1224 boolean use_staging_texture = FALSE;
1225 struct r600_resource *buf;
1226 unsigned offset = 0;
1227 char *map;
1228
1229 assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER));
1230
1231 /* We cannot map a tiled texture directly because the data is
1232 * in a different order, therefore we do detiling using a blit.
1233 *
1234 * Also, use a temporary in GTT memory for read transfers, as
1235 * the CPU is much happier reading out of cached system memory
1236 * than uncached VRAM.
1237 */
1238 if (rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
1239 use_staging_texture = TRUE;
1240 } else if ((usage & PIPE_TRANSFER_READ) &&
1241 rtex->resource.domains & RADEON_DOMAIN_VRAM) {
1242 /* Untiled buffers in VRAM, which is slow for CPU reads */
1243 use_staging_texture = TRUE;
1244 } else if (!(usage & PIPE_TRANSFER_READ) &&
1245 (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf, RADEON_USAGE_READWRITE) ||
1246 !rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE))) {
1247 /* Use a staging texture for uploads if the underlying BO is busy. */
1248 use_staging_texture = TRUE;
1249 }
1250
1251 trans = CALLOC_STRUCT(r600_transfer);
1252 if (!trans)
1253 return NULL;
1254 trans->transfer.resource = texture;
1255 trans->transfer.level = level;
1256 trans->transfer.usage = usage;
1257 trans->transfer.box = *box;
1258
1259 if (rtex->is_depth) {
1260 struct r600_texture *staging_depth;
1261
1262 if (rtex->resource.b.b.nr_samples > 1) {
1263 /* MSAA depth buffers need to be converted to single sample buffers.
1264 *
1265 * Mapping MSAA depth buffers can occur if ReadPixels is called
1266 * with a multisample GLX visual.
1267 *
1268 * First downsample the depth buffer to a temporary texture,
1269 * then decompress the temporary one to staging.
1270 *
1271 * Only the region being mapped is transfered.
1272 */
1273 struct pipe_resource resource;
1274
1275 r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
1276
1277 if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
1278 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1279 FREE(trans);
1280 return NULL;
1281 }
1282
1283 if (usage & PIPE_TRANSFER_READ) {
1284 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource);
1285 if (!temp) {
1286 R600_ERR("failed to create a temporary depth texture\n");
1287 FREE(trans);
1288 return NULL;
1289 }
1290
1291 r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
1292 rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
1293 0, 0, 0, box->depth, 0, 0);
1294 pipe_resource_reference(&temp, NULL);
1295 }
1296 }
1297 else {
1298 /* XXX: only readback the rectangle which is being mapped? */
1299 /* XXX: when discard is true, no need to read back from depth texture */
1300 if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
1301 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1302 FREE(trans);
1303 return NULL;
1304 }
1305
1306 rctx->blit_decompress_depth(ctx, rtex, staging_depth,
1307 level, level,
1308 box->z, box->z + box->depth - 1,
1309 0, 0);
1310
1311 offset = r600_texture_get_offset(staging_depth, level, box);
1312 }
1313
1314 trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
1315 trans->transfer.layer_stride = staging_depth->surface.level[level].slice_size;
1316 trans->staging = (struct r600_resource*)staging_depth;
1317 buf = trans->staging;
1318 } else if (use_staging_texture) {
1319 struct pipe_resource resource;
1320 struct r600_texture *staging;
1321
1322 r600_init_temp_resource_from_box(&resource, texture, box, level,
1323 R600_RESOURCE_FLAG_TRANSFER);
1324 resource.usage = (usage & PIPE_TRANSFER_READ) ?
1325 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
1326
1327 /* Create the temporary texture. */
1328 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource);
1329 if (!staging) {
1330 R600_ERR("failed to create temporary texture to hold untiled copy\n");
1331 FREE(trans);
1332 return NULL;
1333 }
1334 trans->staging = &staging->resource;
1335 trans->transfer.stride = staging->surface.level[0].pitch_bytes;
1336 trans->transfer.layer_stride = staging->surface.level[0].slice_size;
1337
1338 if (usage & PIPE_TRANSFER_READ)
1339 r600_copy_to_staging_texture(ctx, trans);
1340 else
1341 usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
1342
1343 buf = trans->staging;
1344 } else {
1345 /* the resource is mapped directly */
1346 trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
1347 trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
1348 offset = r600_texture_get_offset(rtex, level, box);
1349 buf = &rtex->resource;
1350 }
1351
1352 if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
1353 pipe_resource_reference((struct pipe_resource**)&trans->staging, NULL);
1354 FREE(trans);
1355 return NULL;
1356 }
1357
1358 *ptransfer = &trans->transfer;
1359 return map + offset;
1360 }
1361
1362 static void r600_texture_transfer_unmap(struct pipe_context *ctx,
1363 struct pipe_transfer* transfer)
1364 {
1365 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
1366 struct pipe_resource *texture = transfer->resource;
1367 struct r600_texture *rtex = (struct r600_texture*)texture;
1368
1369 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
1370 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
1371 ctx->resource_copy_region(ctx, texture, transfer->level,
1372 transfer->box.x, transfer->box.y, transfer->box.z,
1373 &rtransfer->staging->b.b, transfer->level,
1374 &transfer->box);
1375 } else {
1376 r600_copy_from_staging_texture(ctx, rtransfer);
1377 }
1378 }
1379
1380 if (rtransfer->staging)
1381 pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
1382
1383 FREE(transfer);
1384 }
1385
1386 static const struct u_resource_vtbl r600_texture_vtbl =
1387 {
1388 NULL, /* get_handle */
1389 r600_texture_destroy, /* resource_destroy */
1390 r600_texture_transfer_map, /* transfer_map */
1391 u_default_transfer_flush_region, /* transfer_flush_region */
1392 r600_texture_transfer_unmap, /* transfer_unmap */
1393 NULL /* transfer_inline_write */
1394 };
1395
1396 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe,
1397 struct pipe_resource *texture,
1398 const struct pipe_surface *templ,
1399 unsigned width, unsigned height)
1400 {
1401 struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
1402
1403 if (!surface)
1404 return NULL;
1405
1406 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level));
1407 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level));
1408
1409 pipe_reference_init(&surface->base.reference, 1);
1410 pipe_resource_reference(&surface->base.texture, texture);
1411 surface->base.context = pipe;
1412 surface->base.format = templ->format;
1413 surface->base.width = width;
1414 surface->base.height = height;
1415 surface->base.u = templ->u;
1416 return &surface->base;
1417 }
1418
1419 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
1420 struct pipe_resource *tex,
1421 const struct pipe_surface *templ)
1422 {
1423 unsigned level = templ->u.tex.level;
1424 unsigned width = u_minify(tex->width0, level);
1425 unsigned height = u_minify(tex->height0, level);
1426
1427 if (tex->target != PIPE_BUFFER && templ->format != tex->format) {
1428 const struct util_format_description *tex_desc
1429 = util_format_description(tex->format);
1430 const struct util_format_description *templ_desc
1431 = util_format_description(templ->format);
1432
1433 assert(tex_desc->block.bits == templ_desc->block.bits);
1434
1435 /* Adjust size of surface if and only if the block width or
1436 * height is changed. */
1437 if (tex_desc->block.width != templ_desc->block.width ||
1438 tex_desc->block.height != templ_desc->block.height) {
1439 unsigned nblks_x = util_format_get_nblocksx(tex->format, width);
1440 unsigned nblks_y = util_format_get_nblocksy(tex->format, height);
1441
1442 width = nblks_x * templ_desc->block.width;
1443 height = nblks_y * templ_desc->block.height;
1444 }
1445 }
1446
1447 return r600_create_surface_custom(pipe, tex, templ, width, height);
1448 }
1449
1450 static void r600_surface_destroy(struct pipe_context *pipe,
1451 struct pipe_surface *surface)
1452 {
1453 struct r600_surface *surf = (struct r600_surface*)surface;
1454 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, NULL);
1455 pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, NULL);
1456 pipe_resource_reference(&surface->texture, NULL);
1457 FREE(surface);
1458 }
1459
1460 unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap)
1461 {
1462 const struct util_format_description *desc = util_format_description(format);
1463
1464 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz)
1465
1466 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
1467 return V_0280A0_SWAP_STD;
1468
1469 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
1470 return ~0U;
1471
1472 switch (desc->nr_channels) {
1473 case 1:
1474 if (HAS_SWIZZLE(0,X))
1475 return V_0280A0_SWAP_STD; /* X___ */
1476 else if (HAS_SWIZZLE(3,X))
1477 return V_0280A0_SWAP_ALT_REV; /* ___X */
1478 break;
1479 case 2:
1480 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) ||
1481 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) ||
1482 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y)))
1483 return V_0280A0_SWAP_STD; /* XY__ */
1484 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) ||
1485 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) ||
1486 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X)))
1487 /* YX__ */
1488 return (do_endian_swap ? V_0280A0_SWAP_STD : V_0280A0_SWAP_STD_REV);
1489 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y))
1490 return V_0280A0_SWAP_ALT; /* X__Y */
1491 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X))
1492 return V_0280A0_SWAP_ALT_REV; /* Y__X */
1493 break;
1494 case 3:
1495 if (HAS_SWIZZLE(0,X))
1496 return (do_endian_swap ? V_0280A0_SWAP_STD_REV : V_0280A0_SWAP_STD);
1497 else if (HAS_SWIZZLE(0,Z))
1498 return V_0280A0_SWAP_STD_REV; /* ZYX */
1499 break;
1500 case 4:
1501 /* check the middle channels, the 1st and 4th channel can be NONE */
1502 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) {
1503 return V_0280A0_SWAP_STD; /* XYZW */
1504 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) {
1505 return V_0280A0_SWAP_STD_REV; /* WZYX */
1506 } else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) {
1507 return V_0280A0_SWAP_ALT; /* ZYXW */
1508 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) {
1509 /* YZWX */
1510 if (desc->is_array)
1511 return V_0280A0_SWAP_ALT_REV;
1512 else
1513 return (do_endian_swap ? V_0280A0_SWAP_ALT : V_0280A0_SWAP_ALT_REV);
1514 }
1515 break;
1516 }
1517 return ~0U;
1518 }
1519
1520 static void evergreen_set_clear_color(struct r600_texture *rtex,
1521 enum pipe_format surface_format,
1522 const union pipe_color_union *color)
1523 {
1524 union util_color uc;
1525
1526 memset(&uc, 0, sizeof(uc));
1527
1528 if (util_format_is_pure_uint(surface_format)) {
1529 util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1);
1530 } else if (util_format_is_pure_sint(surface_format)) {
1531 util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1);
1532 } else {
1533 util_pack_color(color->f, surface_format, &uc);
1534 }
1535
1536 memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t));
1537 }
1538
1539 static void vi_get_fast_clear_parameters(enum pipe_format surface_format,
1540 const union pipe_color_union *color,
1541 uint32_t* reset_value,
1542 bool* clear_words_needed)
1543 {
1544 bool values[4] = {};
1545 int i;
1546 bool main_value = false;
1547 bool extra_value = false;
1548 int extra_channel;
1549 const struct util_format_description *desc = util_format_description(surface_format);
1550
1551 *clear_words_needed = true;
1552 *reset_value = 0x20202020U;
1553
1554 /* If we want to clear without needing a fast clear eliminate step, we
1555 * can set each channel to 0 or 1 (or 0/max for integer formats). We
1556 * have two sets of flags, one for the last or first channel(extra) and
1557 * one for the other channels(main).
1558 */
1559
1560 if (surface_format == PIPE_FORMAT_R11G11B10_FLOAT ||
1561 surface_format == PIPE_FORMAT_B5G6R5_UNORM ||
1562 surface_format == PIPE_FORMAT_B5G6R5_SRGB) {
1563 extra_channel = -1;
1564 } else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) {
1565 if(r600_translate_colorswap(surface_format, FALSE) <= 1)
1566 extra_channel = desc->nr_channels - 1;
1567 else
1568 extra_channel = 0;
1569 } else
1570 return;
1571
1572 for (i = 0; i < 4; ++i) {
1573 int index = desc->swizzle[i] - PIPE_SWIZZLE_X;
1574
1575 if (desc->swizzle[i] < PIPE_SWIZZLE_X ||
1576 desc->swizzle[i] > PIPE_SWIZZLE_W)
1577 continue;
1578
1579 if (util_format_is_pure_sint(surface_format)) {
1580 values[i] = color->i[i] != 0;
1581 if (color->i[i] != 0 && color->i[i] != INT32_MAX)
1582 return;
1583 } else if (util_format_is_pure_uint(surface_format)) {
1584 values[i] = color->ui[i] != 0U;
1585 if (color->ui[i] != 0U && color->ui[i] != UINT32_MAX)
1586 return;
1587 } else {
1588 values[i] = color->f[i] != 0.0F;
1589 if (color->f[i] != 0.0F && color->f[i] != 1.0F)
1590 return;
1591 }
1592
1593 if (index == extra_channel)
1594 extra_value = values[i];
1595 else
1596 main_value = values[i];
1597 }
1598
1599 for (int i = 0; i < 4; ++i)
1600 if (values[i] != main_value &&
1601 desc->swizzle[i] - PIPE_SWIZZLE_X != extra_channel &&
1602 desc->swizzle[i] >= PIPE_SWIZZLE_X &&
1603 desc->swizzle[i] <= PIPE_SWIZZLE_W)
1604 return;
1605
1606 *clear_words_needed = false;
1607 if (main_value)
1608 *reset_value |= 0x80808080U;
1609
1610 if (extra_value)
1611 *reset_value |= 0x40404040U;
1612 }
1613
1614 void evergreen_do_fast_color_clear(struct r600_common_context *rctx,
1615 struct pipe_framebuffer_state *fb,
1616 struct r600_atom *fb_state,
1617 unsigned *buffers, unsigned *dirty_cbufs,
1618 const union pipe_color_union *color)
1619 {
1620 int i;
1621
1622 /* This function is broken in BE, so just disable this path for now */
1623 #ifdef PIPE_ARCH_BIG_ENDIAN
1624 return;
1625 #endif
1626
1627 if (rctx->render_cond)
1628 return;
1629
1630 for (i = 0; i < fb->nr_cbufs; i++) {
1631 struct r600_texture *tex;
1632 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
1633
1634 if (!fb->cbufs[i])
1635 continue;
1636
1637 /* if this colorbuffer is not being cleared */
1638 if (!(*buffers & clear_bit))
1639 continue;
1640
1641 tex = (struct r600_texture *)fb->cbufs[i]->texture;
1642
1643 /* 128-bit formats are unusupported */
1644 if (util_format_get_blocksizebits(fb->cbufs[i]->format) > 64) {
1645 continue;
1646 }
1647
1648 /* the clear is allowed if all layers are bound */
1649 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
1650 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) {
1651 continue;
1652 }
1653
1654 /* cannot clear mipmapped textures */
1655 if (fb->cbufs[i]->texture->last_level != 0) {
1656 continue;
1657 }
1658
1659 /* only supported on tiled surfaces */
1660 if (tex->surface.level[0].mode < RADEON_SURF_MODE_1D) {
1661 continue;
1662 }
1663
1664 /* shared textures can't use fast clear without an explicit flush,
1665 * because there is no way to communicate the clear color among
1666 * all clients
1667 */
1668 if (tex->resource.is_shared &&
1669 !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
1670 continue;
1671
1672 /* fast color clear with 1D tiling doesn't work on old kernels and CIK */
1673 if (tex->surface.level[0].mode == RADEON_SURF_MODE_1D &&
1674 rctx->chip_class >= CIK &&
1675 rctx->screen->info.drm_major == 2 &&
1676 rctx->screen->info.drm_minor < 38) {
1677 continue;
1678 }
1679
1680 if (tex->dcc_offset) {
1681 uint32_t reset_value;
1682 bool clear_words_needed;
1683
1684 if (rctx->screen->debug_flags & DBG_NO_DCC_CLEAR)
1685 continue;
1686
1687 vi_get_fast_clear_parameters(fb->cbufs[i]->format, color, &reset_value, &clear_words_needed);
1688
1689 rctx->clear_buffer(&rctx->b, &tex->resource.b.b,
1690 tex->dcc_offset, tex->surface.dcc_size,
1691 reset_value, R600_COHERENCY_CB_META);
1692
1693 if (clear_words_needed)
1694 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
1695 } else {
1696 /* Stoney/RB+ doesn't work with CMASK fast clear. */
1697 if (rctx->family == CHIP_STONEY)
1698 continue;
1699
1700 /* ensure CMASK is enabled */
1701 r600_texture_alloc_cmask_separate(rctx->screen, tex);
1702 if (tex->cmask.size == 0) {
1703 continue;
1704 }
1705
1706 /* Do the fast clear. */
1707 rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b,
1708 tex->cmask.offset, tex->cmask.size, 0,
1709 R600_COHERENCY_CB_META);
1710
1711 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level;
1712 }
1713
1714 evergreen_set_clear_color(tex, fb->cbufs[i]->format, color);
1715
1716 if (dirty_cbufs)
1717 *dirty_cbufs |= 1 << i;
1718 rctx->set_atom_dirty(rctx, fb_state, true);
1719 *buffers &= ~clear_bit;
1720 }
1721 }
1722
1723 void r600_init_screen_texture_functions(struct r600_common_screen *rscreen)
1724 {
1725 rscreen->b.resource_from_handle = r600_texture_from_handle;
1726 rscreen->b.resource_get_handle = r600_texture_get_handle;
1727 }
1728
1729 void r600_init_context_texture_functions(struct r600_common_context *rctx)
1730 {
1731 rctx->b.create_surface = r600_create_surface;
1732 rctx->b.surface_destroy = r600_surface_destroy;
1733 }