gallium: split transfer_inline_write into buffer and texture callbacks
[mesa.git] / src / gallium / drivers / svga / svga_resource_texture.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
28
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "os/os_time.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_math.h"
36 #include "util/u_memory.h"
37 #include "util/u_resource.h"
38
39 #include "svga_cmd.h"
40 #include "svga_format.h"
41 #include "svga_screen.h"
42 #include "svga_context.h"
43 #include "svga_resource_texture.h"
44 #include "svga_resource_buffer.h"
45 #include "svga_sampler_view.h"
46 #include "svga_winsys.h"
47 #include "svga_debug.h"
48
49
50 static void
51 svga_transfer_dma_band(struct svga_context *svga,
52 struct svga_transfer *st,
53 SVGA3dTransferType transfer,
54 unsigned x, unsigned y, unsigned z,
55 unsigned w, unsigned h, unsigned d,
56 unsigned srcx, unsigned srcy, unsigned srcz,
57 SVGA3dSurfaceDMAFlags flags)
58 {
59 struct svga_texture *texture = svga_texture(st->base.resource);
60 SVGA3dCopyBox box;
61 enum pipe_error ret;
62
63 assert(!st->use_direct_map);
64
65 box.x = x;
66 box.y = y;
67 box.z = z;
68 box.w = w;
69 box.h = h;
70 box.d = d;
71 box.srcx = srcx;
72 box.srcy = srcy;
73 box.srcz = srcz;
74
75 SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
76 "(%u, %u, %u), %ubpp\n",
77 transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
78 texture->handle,
79 st->slice,
80 x,
81 y,
82 z,
83 x + w,
84 y + h,
85 z + 1,
86 util_format_get_blocksize(texture->b.b.format) * 8 /
87 (util_format_get_blockwidth(texture->b.b.format)
88 * util_format_get_blockheight(texture->b.b.format)));
89
90 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
91 if (ret != PIPE_OK) {
92 svga_context_flush(svga, NULL);
93 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
94 assert(ret == PIPE_OK);
95 }
96 }
97
98
99 static void
100 svga_transfer_dma(struct svga_context *svga,
101 struct svga_transfer *st,
102 SVGA3dTransferType transfer,
103 SVGA3dSurfaceDMAFlags flags)
104 {
105 struct svga_texture *texture = svga_texture(st->base.resource);
106 struct svga_screen *screen = svga_screen(texture->b.b.screen);
107 struct svga_winsys_screen *sws = screen->sws;
108 struct pipe_fence_handle *fence = NULL;
109
110 assert(!st->use_direct_map);
111
112 if (transfer == SVGA3D_READ_HOST_VRAM) {
113 SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
114 }
115
116 /* Ensure any pending operations on host surfaces are queued on the command
117 * buffer first.
118 */
119 svga_surfaces_flush( svga );
120
121 if (!st->swbuf) {
122 /* Do the DMA transfer in a single go */
123 svga_transfer_dma_band(svga, st, transfer,
124 st->base.box.x, st->base.box.y, st->base.box.z,
125 st->base.box.width, st->base.box.height, st->base.box.depth,
126 0, 0, 0,
127 flags);
128
129 if (transfer == SVGA3D_READ_HOST_VRAM) {
130 svga_context_flush(svga, &fence);
131 sws->fence_finish(sws, fence, 0);
132 sws->fence_reference(sws, &fence, NULL);
133 }
134 }
135 else {
136 int y, h, srcy;
137 unsigned blockheight =
138 util_format_get_blockheight(st->base.resource->format);
139
140 h = st->hw_nblocksy * blockheight;
141 srcy = 0;
142
143 for (y = 0; y < st->base.box.height; y += h) {
144 unsigned offset, length;
145 void *hw, *sw;
146
147 if (y + h > st->base.box.height)
148 h = st->base.box.height - y;
149
150 /* Transfer band must be aligned to pixel block boundaries */
151 assert(y % blockheight == 0);
152 assert(h % blockheight == 0);
153
154 offset = y * st->base.stride / blockheight;
155 length = h * st->base.stride / blockheight;
156
157 sw = (uint8_t *) st->swbuf + offset;
158
159 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
160 unsigned usage = PIPE_TRANSFER_WRITE;
161
162 /* Wait for the previous DMAs to complete */
163 /* TODO: keep one DMA (at half the size) in the background */
164 if (y) {
165 svga_context_flush(svga, NULL);
166 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
167 }
168
169 hw = sws->buffer_map(sws, st->hwbuf, usage);
170 assert(hw);
171 if (hw) {
172 memcpy(hw, sw, length);
173 sws->buffer_unmap(sws, st->hwbuf);
174 }
175 }
176
177 svga_transfer_dma_band(svga, st, transfer,
178 st->base.box.x, y, st->base.box.z,
179 st->base.box.width, h, st->base.box.depth,
180 0, srcy, 0, flags);
181
182 /*
183 * Prevent the texture contents to be discarded on the next band
184 * upload.
185 */
186 flags.discard = FALSE;
187
188 if (transfer == SVGA3D_READ_HOST_VRAM) {
189 svga_context_flush(svga, &fence);
190 sws->fence_finish(sws, fence, 0);
191
192 hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
193 assert(hw);
194 if (hw) {
195 memcpy(sw, hw, length);
196 sws->buffer_unmap(sws, st->hwbuf);
197 }
198 }
199 }
200 }
201 }
202
203
204 static boolean
205 svga_texture_get_handle(struct pipe_screen *screen,
206 struct pipe_resource *texture,
207 struct winsys_handle *whandle)
208 {
209 struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
210 unsigned stride;
211
212 assert(svga_texture(texture)->key.cachable == 0);
213 svga_texture(texture)->key.cachable = 0;
214
215 stride = util_format_get_nblocksx(texture->format, texture->width0) *
216 util_format_get_blocksize(texture->format);
217
218 return sws->surface_get_handle(sws, svga_texture(texture)->handle,
219 stride, whandle);
220 }
221
222
223 static void
224 svga_texture_destroy(struct pipe_screen *screen,
225 struct pipe_resource *pt)
226 {
227 struct svga_screen *ss = svga_screen(screen);
228 struct svga_texture *tex = svga_texture(pt);
229
230 ss->texture_timestamp++;
231
232 svga_sampler_view_reference(&tex->cached_view, NULL);
233
234 /*
235 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
236 */
237 SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
238 svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
239
240 ss->hud.total_resource_bytes -= tex->size;
241
242 FREE(tex->defined);
243 FREE(tex->rendered_to);
244 FREE(tex->dirty);
245 FREE(tex);
246
247 assert(ss->hud.num_resources > 0);
248 if (ss->hud.num_resources > 0)
249 ss->hud.num_resources--;
250 }
251
252
253 /**
254 * Determine if we need to read back a texture image before mapping it.
255 */
256 static boolean
257 need_tex_readback(struct pipe_transfer *transfer)
258 {
259 struct svga_texture *t = svga_texture(transfer->resource);
260
261 if (transfer->usage & PIPE_TRANSFER_READ)
262 return TRUE;
263
264 if ((transfer->usage & PIPE_TRANSFER_WRITE) &&
265 ((transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
266 unsigned face;
267
268 if (transfer->resource->target == PIPE_TEXTURE_CUBE) {
269 assert(transfer->box.depth == 1);
270 face = transfer->box.z;
271 }
272 else {
273 face = 0;
274 }
275 if (svga_was_texture_rendered_to(t, face, transfer->level)) {
276 return TRUE;
277 }
278 }
279
280 return FALSE;
281 }
282
283
284 static enum pipe_error
285 readback_image_vgpu9(struct svga_context *svga,
286 struct svga_winsys_surface *surf,
287 unsigned slice,
288 unsigned level)
289 {
290 enum pipe_error ret;
291
292 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
293 if (ret != PIPE_OK) {
294 svga_context_flush(svga, NULL);
295 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
296 }
297 return ret;
298 }
299
300
301 static enum pipe_error
302 readback_image_vgpu10(struct svga_context *svga,
303 struct svga_winsys_surface *surf,
304 unsigned slice,
305 unsigned level,
306 unsigned numMipLevels)
307 {
308 enum pipe_error ret;
309 unsigned subResource;
310
311 subResource = slice * numMipLevels + level;
312 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
313 if (ret != PIPE_OK) {
314 svga_context_flush(svga, NULL);
315 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
316 }
317 return ret;
318 }
319
320
321 static void *
322 svga_texture_transfer_map(struct pipe_context *pipe,
323 struct pipe_resource *texture,
324 unsigned level,
325 unsigned usage,
326 const struct pipe_box *box,
327 struct pipe_transfer **ptransfer)
328 {
329 struct svga_context *svga = svga_context(pipe);
330 struct svga_screen *ss = svga_screen(pipe->screen);
331 struct svga_winsys_screen *sws = ss->sws;
332 struct svga_texture *tex = svga_texture(texture);
333 struct svga_transfer *st;
334 unsigned nblocksx, nblocksy;
335 boolean use_direct_map = svga_have_gb_objects(svga) &&
336 !svga_have_gb_dma(svga);
337 unsigned d;
338 void *returnVal;
339 int64_t begin = os_time_get();
340
341 /* We can't map texture storage directly unless we have GB objects */
342 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
343 if (svga_have_gb_objects(svga))
344 use_direct_map = TRUE;
345 else
346 return NULL;
347 }
348
349 st = CALLOC_STRUCT(svga_transfer);
350 if (!st)
351 return NULL;
352
353 st->base.level = level;
354 st->base.usage = usage;
355 st->base.box = *box;
356
357 switch (tex->b.b.target) {
358 case PIPE_TEXTURE_CUBE:
359 st->slice = st->base.box.z;
360 st->base.box.z = 0; /* so we don't apply double offsets below */
361 break;
362 case PIPE_TEXTURE_2D_ARRAY:
363 case PIPE_TEXTURE_1D_ARRAY:
364 st->slice = st->base.box.z;
365 st->base.box.z = 0; /* so we don't apply double offsets below */
366
367 /* Force direct map for transfering multiple slices */
368 if (st->base.box.depth > 1)
369 use_direct_map = svga_have_gb_objects(svga);
370
371 break;
372 default:
373 st->slice = 0;
374 break;
375 }
376
377 {
378 unsigned w, h;
379 if (use_direct_map) {
380 /* we'll directly access the guest-backed surface */
381 w = u_minify(texture->width0, level);
382 h = u_minify(texture->height0, level);
383 d = u_minify(texture->depth0, level);
384 }
385 else {
386 /* we'll put the data into a tightly packed buffer */
387 w = box->width;
388 h = box->height;
389 d = box->depth;
390 }
391 nblocksx = util_format_get_nblocksx(texture->format, w);
392 nblocksy = util_format_get_nblocksy(texture->format, h);
393 }
394
395 pipe_resource_reference(&st->base.resource, texture);
396
397 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
398 st->base.layer_stride = st->base.stride * nblocksy;
399
400 if (usage & PIPE_TRANSFER_WRITE) {
401 /* record texture upload for HUD */
402 svga->hud.num_bytes_uploaded +=
403 nblocksx * nblocksy * d * util_format_get_blocksize(texture->format);
404 }
405
406 if (!use_direct_map) {
407 /* Use a DMA buffer */
408 st->hw_nblocksy = nblocksy;
409
410 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
411 st->hw_nblocksy * st->base.stride * d);
412 while(!st->hwbuf && (st->hw_nblocksy /= 2)) {
413 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
414 st->hw_nblocksy * st->base.stride * d);
415 }
416
417 if (!st->hwbuf) {
418 FREE(st);
419 return NULL;
420 }
421
422 if (st->hw_nblocksy < nblocksy) {
423 /* We couldn't allocate a hardware buffer big enough for the transfer,
424 * so allocate regular malloc memory instead */
425 if (0) {
426 debug_printf("%s: failed to allocate %u KB of DMA, "
427 "splitting into %u x %u KB DMA transfers\n",
428 __FUNCTION__,
429 (nblocksy*st->base.stride + 1023)/1024,
430 (nblocksy + st->hw_nblocksy - 1)/st->hw_nblocksy,
431 (st->hw_nblocksy*st->base.stride + 1023)/1024);
432 }
433
434 st->swbuf = MALLOC(nblocksy * st->base.stride * d);
435 if (!st->swbuf) {
436 sws->buffer_destroy(sws, st->hwbuf);
437 FREE(st);
438 return NULL;
439 }
440 }
441
442 if (usage & PIPE_TRANSFER_READ) {
443 SVGA3dSurfaceDMAFlags flags;
444 memset(&flags, 0, sizeof flags);
445 svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
446 }
447 } else {
448 struct pipe_transfer *transfer = &st->base;
449 struct svga_winsys_surface *surf = tex->handle;
450
451 if (!surf) {
452 FREE(st);
453 return NULL;
454 }
455
456 /* If this is the first time mapping to the surface in this
457 * command buffer, clear the dirty masks of this surface.
458 */
459 if (sws->surface_is_flushed(sws, surf)) {
460 svga_clear_texture_dirty(tex);
461 }
462
463 if (need_tex_readback(transfer)) {
464 enum pipe_error ret;
465
466 svga_surfaces_flush(svga);
467
468 if (svga_have_vgpu10(svga)) {
469 ret = readback_image_vgpu10(svga, surf, st->slice, transfer->level,
470 tex->b.b.last_level + 1);
471 } else {
472 ret = readback_image_vgpu9(svga, surf, st->slice, transfer->level);
473 }
474
475 svga->hud.num_readbacks++;
476
477 assert(ret == PIPE_OK);
478 (void) ret;
479
480 svga_context_flush(svga, NULL);
481
482 /*
483 * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
484 * we could potentially clear the flag for all faces/layers/mips.
485 */
486 svga_clear_texture_rendered_to(tex, st->slice, transfer->level);
487 }
488 else {
489 assert(transfer->usage & PIPE_TRANSFER_WRITE);
490 if ((transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
491 if (svga_is_texture_dirty(tex, st->slice, transfer->level)) {
492 /*
493 * do a surface flush if the subresource has been modified
494 * in this command buffer.
495 */
496 svga_surfaces_flush(svga);
497 if (!sws->surface_is_flushed(sws, surf)) {
498 svga->hud.surface_write_flushes++;
499 svga_context_flush(svga, NULL);
500 }
501 }
502 }
503 }
504 if (transfer->usage & PIPE_TRANSFER_WRITE) {
505 /* mark this texture level as dirty */
506 svga_set_texture_dirty(tex, st->slice, transfer->level);
507 }
508 }
509
510 st->use_direct_map = use_direct_map;
511
512 *ptransfer = &st->base;
513
514 /*
515 * Begin mapping code
516 */
517 if (st->swbuf) {
518 returnVal = st->swbuf;
519 }
520 else if (!st->use_direct_map) {
521 returnVal = sws->buffer_map(sws, st->hwbuf, usage);
522 }
523 else {
524 SVGA3dSize baseLevelSize;
525 struct svga_texture *tex = svga_texture(texture);
526 struct svga_winsys_surface *surf = tex->handle;
527 uint8_t *map;
528 boolean retry;
529 unsigned offset, mip_width, mip_height;
530 unsigned xoffset = st->base.box.x;
531 unsigned yoffset = st->base.box.y;
532 unsigned zoffset = st->base.box.z;
533
534 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
535 if (map == NULL && retry) {
536 /*
537 * At this point, the svga_surfaces_flush() should already have
538 * called in svga_texture_get_transfer().
539 */
540 svga_context_flush(svga, NULL);
541 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
542 }
543
544 /*
545 * Make sure we return NULL if the map fails
546 */
547 if (!map) {
548 FREE(st);
549 return map;
550 }
551
552 /**
553 * Compute the offset to the specific texture slice in the buffer.
554 */
555 baseLevelSize.width = tex->b.b.width0;
556 baseLevelSize.height = tex->b.b.height0;
557 baseLevelSize.depth = tex->b.b.depth0;
558
559 if ((tex->b.b.target == PIPE_TEXTURE_1D_ARRAY) ||
560 (tex->b.b.target == PIPE_TEXTURE_2D_ARRAY)) {
561 st->base.layer_stride =
562 svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
563 tex->b.b.last_level + 1, 1, 0);
564 }
565
566 offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
567 tex->b.b.last_level + 1, /* numMips */
568 st->slice, level);
569 if (level > 0) {
570 assert(offset > 0);
571 }
572
573 mip_width = u_minify(tex->b.b.width0, level);
574 mip_height = u_minify(tex->b.b.height0, level);
575
576 offset += svga3dsurface_get_pixel_offset(tex->key.format,
577 mip_width, mip_height,
578 xoffset, yoffset, zoffset);
579 returnVal = (void *) (map + offset);
580 }
581
582 svga->hud.map_buffer_time += (os_time_get() - begin);
583 svga->hud.num_resources_mapped++;
584
585 return returnVal;
586 }
587
588
589 /**
590 * Unmap a GB texture surface.
591 */
592 static void
593 svga_texture_surface_unmap(struct svga_context *svga,
594 struct pipe_transfer *transfer)
595 {
596 struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
597 struct svga_winsys_context *swc = svga->swc;
598 boolean rebind;
599
600 assert(surf);
601
602 swc->surface_unmap(swc, surf, &rebind);
603 if (rebind) {
604 enum pipe_error ret;
605 ret = SVGA3D_BindGBSurface(swc, surf);
606 if (ret != PIPE_OK) {
607 /* flush and retry */
608 svga_context_flush(svga, NULL);
609 ret = SVGA3D_BindGBSurface(swc, surf);
610 assert(ret == PIPE_OK);
611 }
612 }
613 }
614
615
616 static enum pipe_error
617 update_image_vgpu9(struct svga_context *svga,
618 struct svga_winsys_surface *surf,
619 const SVGA3dBox *box,
620 unsigned slice,
621 unsigned level)
622 {
623 enum pipe_error ret;
624
625 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
626 if (ret != PIPE_OK) {
627 svga_context_flush(svga, NULL);
628 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
629 }
630 return ret;
631 }
632
633
634 static enum pipe_error
635 update_image_vgpu10(struct svga_context *svga,
636 struct svga_winsys_surface *surf,
637 const SVGA3dBox *box,
638 unsigned slice,
639 unsigned level,
640 unsigned numMipLevels)
641 {
642 enum pipe_error ret;
643 unsigned subResource;
644
645 subResource = slice * numMipLevels + level;
646 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
647 if (ret != PIPE_OK) {
648 svga_context_flush(svga, NULL);
649 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
650 }
651 return ret;
652 }
653
654
655 static void
656 svga_texture_transfer_unmap(struct pipe_context *pipe,
657 struct pipe_transfer *transfer)
658 {
659 struct svga_context *svga = svga_context(pipe);
660 struct svga_screen *ss = svga_screen(pipe->screen);
661 struct svga_winsys_screen *sws = ss->sws;
662 struct svga_transfer *st = svga_transfer(transfer);
663 struct svga_texture *tex = svga_texture(transfer->resource);
664
665 if (!st->swbuf) {
666 if (st->use_direct_map) {
667 svga_texture_surface_unmap(svga, transfer);
668 }
669 else {
670 sws->buffer_unmap(sws, st->hwbuf);
671 }
672 }
673
674 if (!st->use_direct_map && (st->base.usage & PIPE_TRANSFER_WRITE)) {
675 /* Use DMA to transfer texture data */
676 SVGA3dSurfaceDMAFlags flags;
677
678 memset(&flags, 0, sizeof flags);
679 if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
680 flags.discard = TRUE;
681 }
682 if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
683 flags.unsynchronized = TRUE;
684 }
685
686 svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
687 } else if (transfer->usage & PIPE_TRANSFER_WRITE) {
688 struct svga_winsys_surface *surf =
689 svga_texture(transfer->resource)->handle;
690 SVGA3dBox box;
691 enum pipe_error ret;
692 unsigned nlayers = 1;
693
694 assert(svga_have_gb_objects(svga));
695
696 /* update the effected region */
697 box.x = transfer->box.x;
698 box.y = transfer->box.y;
699 box.w = transfer->box.width;
700 box.h = transfer->box.height;
701 box.d = transfer->box.depth;
702
703 switch (tex->b.b.target) {
704 case PIPE_TEXTURE_CUBE:
705 box.z = 0;
706 break;
707 case PIPE_TEXTURE_2D_ARRAY:
708 nlayers = box.d;
709 box.z = 0;
710 box.d = 1;
711 break;
712 case PIPE_TEXTURE_1D_ARRAY:
713 nlayers = box.d;
714 box.y = box.z = 0;
715 box.d = 1;
716 break;
717 default:
718 box.z = transfer->box.z;
719 break;
720 }
721
722 if (0)
723 debug_printf("%s %d, %d, %d %d x %d x %d\n",
724 __FUNCTION__,
725 box.x, box.y, box.z,
726 box.w, box.h, box.d);
727
728 if (svga_have_vgpu10(svga)) {
729 unsigned i;
730 for (i = 0; i < nlayers; i++) {
731 ret = update_image_vgpu10(svga, surf, &box,
732 st->slice + i, transfer->level,
733 tex->b.b.last_level + 1);
734 assert(ret == PIPE_OK);
735 }
736 } else {
737 assert(nlayers == 1);
738 ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
739 assert(ret == PIPE_OK);
740 }
741
742 svga->hud.num_resource_updates++;
743
744 (void) ret;
745 }
746
747 ss->texture_timestamp++;
748 svga_age_texture_view(tex, transfer->level);
749 if (transfer->resource->target == PIPE_TEXTURE_CUBE)
750 svga_define_texture_level(tex, st->slice, transfer->level);
751 else
752 svga_define_texture_level(tex, 0, transfer->level);
753
754 pipe_resource_reference(&st->base.resource, NULL);
755
756 FREE(st->swbuf);
757 if (!st->use_direct_map) {
758 sws->buffer_destroy(sws, st->hwbuf);
759 }
760 FREE(st);
761 }
762
763
764 /**
765 * Does format store depth values?
766 */
767 static inline boolean
768 format_has_depth(enum pipe_format format)
769 {
770 const struct util_format_description *desc = util_format_description(format);
771 return util_format_has_depth(desc);
772 }
773
774
775 struct u_resource_vtbl svga_texture_vtbl =
776 {
777 svga_texture_get_handle, /* get_handle */
778 svga_texture_destroy, /* resource_destroy */
779 svga_texture_transfer_map, /* transfer_map */
780 u_default_transfer_flush_region, /* transfer_flush_region */
781 svga_texture_transfer_unmap, /* transfer_unmap */
782 };
783
784
785 struct pipe_resource *
786 svga_texture_create(struct pipe_screen *screen,
787 const struct pipe_resource *template)
788 {
789 struct svga_screen *svgascreen = svga_screen(screen);
790 struct svga_texture *tex;
791 unsigned bindings = template->bind;
792
793 assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
794 if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
795 return NULL;
796 }
797
798 tex = CALLOC_STRUCT(svga_texture);
799 if (!tex) {
800 return NULL;
801 }
802
803 tex->defined = CALLOC(template->depth0 * template->array_size,
804 sizeof(tex->defined[0]));
805 if (!tex->defined) {
806 FREE(tex);
807 return NULL;
808 }
809
810 tex->rendered_to = CALLOC(template->depth0 * template->array_size,
811 sizeof(tex->rendered_to[0]));
812 if (!tex->rendered_to) {
813 goto fail;
814 }
815
816 tex->dirty = CALLOC(template->depth0 * template->array_size,
817 sizeof(tex->dirty[0]));
818 if (!tex->dirty) {
819 goto fail;
820 }
821
822 tex->b.b = *template;
823 tex->b.vtbl = &svga_texture_vtbl;
824 pipe_reference_init(&tex->b.b.reference, 1);
825 tex->b.b.screen = screen;
826
827 tex->key.flags = 0;
828 tex->key.size.width = template->width0;
829 tex->key.size.height = template->height0;
830 tex->key.size.depth = template->depth0;
831 tex->key.arraySize = 1;
832 tex->key.numFaces = 1;
833 tex->key.sampleCount = template->nr_samples;
834
835 if (template->nr_samples > 1) {
836 tex->key.flags |= SVGA3D_SURFACE_MASKABLE_ANTIALIAS;
837 }
838
839 if (svgascreen->sws->have_vgpu10) {
840 switch (template->target) {
841 case PIPE_TEXTURE_1D:
842 tex->key.flags |= SVGA3D_SURFACE_1D;
843 break;
844 case PIPE_TEXTURE_1D_ARRAY:
845 tex->key.flags |= SVGA3D_SURFACE_1D;
846 /* fall-through */
847 case PIPE_TEXTURE_2D_ARRAY:
848 tex->key.flags |= SVGA3D_SURFACE_ARRAY;
849 tex->key.arraySize = template->array_size;
850 break;
851 case PIPE_TEXTURE_3D:
852 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
853 break;
854 case PIPE_TEXTURE_CUBE:
855 tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
856 tex->key.numFaces = 6;
857 break;
858 default:
859 break;
860 }
861 }
862 else {
863 switch (template->target) {
864 case PIPE_TEXTURE_3D:
865 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
866 break;
867 case PIPE_TEXTURE_CUBE:
868 tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
869 tex->key.numFaces = 6;
870 break;
871 default:
872 break;
873 }
874 }
875
876 tex->key.cachable = 1;
877
878 if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
879 !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
880 /* Also check if the format can be sampled from */
881 if (screen->is_format_supported(screen, template->format,
882 template->target,
883 template->nr_samples,
884 PIPE_BIND_SAMPLER_VIEW)) {
885 bindings |= PIPE_BIND_SAMPLER_VIEW;
886 }
887 }
888
889 if (bindings & PIPE_BIND_SAMPLER_VIEW) {
890 tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
891 tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
892
893 if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
894 /* Also check if the format is renderable */
895 if (screen->is_format_supported(screen, template->format,
896 template->target,
897 template->nr_samples,
898 PIPE_BIND_RENDER_TARGET)) {
899 bindings |= PIPE_BIND_RENDER_TARGET;
900 }
901 }
902 }
903
904 if (bindings & PIPE_BIND_DISPLAY_TARGET) {
905 tex->key.cachable = 0;
906 }
907
908 if (bindings & PIPE_BIND_SHARED) {
909 tex->key.cachable = 0;
910 }
911
912 if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
913 tex->key.scanout = 1;
914 tex->key.cachable = 0;
915 }
916
917 /*
918 * Note: Previously we never passed the
919 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
920 * know beforehand whether a texture will be used as a rendertarget or not
921 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
922 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
923 *
924 * However, this was changed since other state trackers
925 * (XA for example) uses it accurately and certain device versions
926 * relies on it in certain situations to render correctly.
927 */
928 if ((bindings & PIPE_BIND_RENDER_TARGET) &&
929 !util_format_is_s3tc(template->format)) {
930 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
931 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
932 }
933
934 if (bindings & PIPE_BIND_DEPTH_STENCIL) {
935 tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
936 tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
937 }
938
939 tex->key.numMipLevels = template->last_level + 1;
940
941 tex->key.format = svga_translate_format(svgascreen, template->format,
942 bindings);
943 if (tex->key.format == SVGA3D_FORMAT_INVALID) {
944 goto fail;
945 }
946
947 /* The actual allocation is done with a typeless format. Typeless
948 * formats can be reinterpreted as other formats. For example,
949 * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
950 * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
951 * Do not use typeless formats for SHARED, DISPLAY_TARGET or SCANOUT
952 * buffers.
953 */
954 if (svgascreen->sws->have_vgpu10
955 && ((bindings & (PIPE_BIND_SHARED |
956 PIPE_BIND_DISPLAY_TARGET |
957 PIPE_BIND_SCANOUT)) == 0)) {
958 SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
959 if (0) {
960 debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
961 svga_format_name(tex->key.format),
962 svga_format_name(typeless),
963 bindings);
964 }
965
966 if (svga_format_is_uncompressed_snorm(tex->key.format)) {
967 /* We can't normally render to snorm surfaces, but once we
968 * substitute a typeless format, we can if the rendertarget view
969 * is unorm. This can happen with GL_ARB_copy_image.
970 */
971 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
972 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
973 }
974
975 tex->key.format = typeless;
976 }
977
978 SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
979 tex->handle = svga_screen_surface_create(svgascreen, bindings,
980 tex->b.b.usage, &tex->key);
981 if (!tex->handle) {
982 goto fail;
983 }
984
985 SVGA_DBG(DEBUG_DMA, " --> got sid %p (texture)\n", tex->handle);
986
987 debug_reference(&tex->b.b.reference,
988 (debug_reference_descriptor)debug_describe_resource, 0);
989
990 tex->size = util_resource_size(template);
991 svgascreen->hud.total_resource_bytes += tex->size;
992 svgascreen->hud.num_resources++;
993
994 return &tex->b.b;
995
996 fail:
997 if (tex->dirty)
998 FREE(tex->dirty);
999 if (tex->rendered_to)
1000 FREE(tex->rendered_to);
1001 if (tex->defined)
1002 FREE(tex->defined);
1003 FREE(tex);
1004 return NULL;
1005 }
1006
1007
1008 struct pipe_resource *
1009 svga_texture_from_handle(struct pipe_screen *screen,
1010 const struct pipe_resource *template,
1011 struct winsys_handle *whandle)
1012 {
1013 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1014 struct svga_screen *ss = svga_screen(screen);
1015 struct svga_winsys_surface *srf;
1016 struct svga_texture *tex;
1017 enum SVGA3dSurfaceFormat format = 0;
1018 assert(screen);
1019
1020 /* Only supports one type */
1021 if ((template->target != PIPE_TEXTURE_2D &&
1022 template->target != PIPE_TEXTURE_RECT) ||
1023 template->last_level != 0 ||
1024 template->depth0 != 1) {
1025 return NULL;
1026 }
1027
1028 srf = sws->surface_from_handle(sws, whandle, &format);
1029
1030 if (!srf)
1031 return NULL;
1032
1033 if (svga_translate_format(svga_screen(screen), template->format,
1034 template->bind) != format) {
1035 unsigned f1 = svga_translate_format(svga_screen(screen),
1036 template->format, template->bind);
1037 unsigned f2 = format;
1038
1039 /* It's okay for XRGB and ARGB or depth with/out stencil to get mixed up.
1040 */
1041 if (f1 == SVGA3D_B8G8R8A8_UNORM)
1042 f1 = SVGA3D_A8R8G8B8;
1043 if (f1 == SVGA3D_B8G8R8X8_UNORM)
1044 f1 = SVGA3D_X8R8G8B8;
1045
1046 if ( !( (f1 == f2) ||
1047 (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_A8R8G8B8) ||
1048 (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_B8G8R8X8_UNORM) ||
1049 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_X8R8G8B8) ||
1050 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_B8G8R8A8_UNORM) ||
1051 (f1 == SVGA3D_Z_D24X8 && f2 == SVGA3D_Z_D24S8) ||
1052 (f1 == SVGA3D_Z_DF24 && f2 == SVGA3D_Z_D24S8_INT) ) ) {
1053 debug_printf("%s wrong format %s != %s\n", __FUNCTION__,
1054 svga_format_name(f1), svga_format_name(f2));
1055 return NULL;
1056 }
1057 }
1058
1059 tex = CALLOC_STRUCT(svga_texture);
1060 if (!tex)
1061 return NULL;
1062
1063 tex->defined = CALLOC(template->depth0 * template->array_size,
1064 sizeof(tex->defined[0]));
1065 if (!tex->defined) {
1066 FREE(tex);
1067 return NULL;
1068 }
1069
1070 tex->b.b = *template;
1071 tex->b.vtbl = &svga_texture_vtbl;
1072 pipe_reference_init(&tex->b.b.reference, 1);
1073 tex->b.b.screen = screen;
1074
1075 SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1076
1077 tex->key.cachable = 0;
1078 tex->key.format = format;
1079 tex->handle = srf;
1080
1081 tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
1082 if (!tex->rendered_to)
1083 goto fail;
1084
1085 tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1086 if (!tex->dirty)
1087 goto fail;
1088
1089 tex->imported = TRUE;
1090
1091 ss->hud.num_resources++;
1092
1093 return &tex->b.b;
1094
1095 fail:
1096 if (tex->defined)
1097 FREE(tex->defined);
1098 if (tex->rendered_to)
1099 FREE(tex->rendered_to);
1100 if (tex->dirty)
1101 FREE(tex->dirty);
1102 FREE(tex);
1103 return NULL;
1104 }
1105
1106 boolean
1107 svga_texture_generate_mipmap(struct pipe_context *pipe,
1108 struct pipe_resource *pt,
1109 enum pipe_format format,
1110 unsigned base_level,
1111 unsigned last_level,
1112 unsigned first_layer,
1113 unsigned last_layer)
1114 {
1115 struct pipe_sampler_view templ, *psv;
1116 struct svga_pipe_sampler_view *sv;
1117 struct svga_context *svga = svga_context(pipe);
1118 struct svga_texture *tex = svga_texture(pt);
1119 enum pipe_error ret;
1120
1121 assert(svga_have_vgpu10(svga));
1122
1123 /* Only support 2D texture for now */
1124 if (pt->target != PIPE_TEXTURE_2D)
1125 return FALSE;
1126
1127 /* Fallback to the mipmap generation utility for those formats that
1128 * do not support hw generate mipmap
1129 */
1130 if (!svga_format_support_gen_mips(format))
1131 return FALSE;
1132
1133 /* Make sure the texture surface was created with
1134 * SVGA3D_SURFACE_BIND_RENDER_TARGET
1135 */
1136 if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1137 return FALSE;
1138
1139 templ.format = format;
1140 templ.u.tex.first_layer = first_layer;
1141 templ.u.tex.last_layer = last_layer;
1142 templ.u.tex.first_level = base_level;
1143 templ.u.tex.last_level = last_level;
1144
1145 psv = pipe->create_sampler_view(pipe, pt, &templ);
1146 if (psv == NULL)
1147 return FALSE;
1148
1149 sv = svga_pipe_sampler_view(psv);
1150 ret = svga_validate_pipe_sampler_view(svga, sv);
1151 if (ret != PIPE_OK) {
1152 svga_context_flush(svga, NULL);
1153 ret = svga_validate_pipe_sampler_view(svga, sv);
1154 assert(ret == PIPE_OK);
1155 }
1156
1157 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1158 if (ret != PIPE_OK) {
1159 svga_context_flush(svga, NULL);
1160 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1161 }
1162 pipe_sampler_view_reference(&psv, NULL);
1163
1164 svga->hud.num_generate_mipmap++;
1165
1166 return TRUE;
1167 }