svga: silence some MSVC signed/unsigned comparison warnings
[mesa.git] / src / gallium / drivers / svga / svga_resource_texture.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga_cmd.h"
27
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_format.h"
33 #include "util/u_math.h"
34 #include "util/u_memory.h"
35
36 #include "svga_format.h"
37 #include "svga_screen.h"
38 #include "svga_context.h"
39 #include "svga_resource_texture.h"
40 #include "svga_resource_buffer.h"
41 #include "svga_sampler_view.h"
42 #include "svga_winsys.h"
43 #include "svga_debug.h"
44
45
46 /* XXX: This isn't a real hardware flag, but just a hack for kernel to
47 * know about primary surfaces. Find a better way to accomplish this.
48 */
49 #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
50
51
52 static INLINE void
53 svga_transfer_dma_band(struct svga_context *svga,
54 struct svga_transfer *st,
55 SVGA3dTransferType transfer,
56 unsigned y, unsigned h, unsigned srcy,
57 SVGA3dSurfaceDMAFlags flags)
58 {
59 struct svga_texture *texture = svga_texture(st->base.resource);
60 SVGA3dCopyBox box;
61 enum pipe_error ret;
62
63 box.x = st->base.box.x;
64 box.y = y;
65 box.z = st->base.box.z;
66 box.w = st->base.box.width;
67 box.h = h;
68 box.d = 1;
69 box.srcx = 0;
70 box.srcy = srcy;
71 box.srcz = 0;
72
73 if (st->base.resource->target == PIPE_TEXTURE_CUBE) {
74 st->face = st->base.box.z;
75 box.z = 0;
76 }
77 else
78 st->face = 0;
79
80 SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - (%u, %u, %u), %ubpp\n",
81 transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
82 texture->handle,
83 st->face,
84 st->base.box.x,
85 y,
86 box.z,
87 st->base.box.x + st->base.box.width,
88 y + h,
89 box.z + 1,
90 util_format_get_blocksize(texture->b.b.format) * 8 /
91 (util_format_get_blockwidth(texture->b.b.format)*util_format_get_blockheight(texture->b.b.format)));
92
93 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
94 if(ret != PIPE_OK) {
95 svga_context_flush(svga, NULL);
96 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
97 assert(ret == PIPE_OK);
98 }
99 }
100
101
102 static INLINE void
103 svga_transfer_dma(struct svga_context *svga,
104 struct svga_transfer *st,
105 SVGA3dTransferType transfer,
106 SVGA3dSurfaceDMAFlags flags)
107 {
108 struct svga_texture *texture = svga_texture(st->base.resource);
109 struct svga_screen *screen = svga_screen(texture->b.b.screen);
110 struct svga_winsys_screen *sws = screen->sws;
111 struct pipe_fence_handle *fence = NULL;
112
113 if (transfer == SVGA3D_READ_HOST_VRAM) {
114 SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
115 }
116
117 /* Ensure any pending operations on host surfaces are queued on the command
118 * buffer first.
119 */
120 svga_surfaces_flush( svga );
121
122 if(!st->swbuf) {
123 /* Do the DMA transfer in a single go */
124
125 svga_transfer_dma_band(svga, st, transfer,
126 st->base.box.y, st->base.box.height, 0,
127 flags);
128
129 if(transfer == SVGA3D_READ_HOST_VRAM) {
130 svga_context_flush(svga, &fence);
131 sws->fence_finish(sws, fence, 0);
132 sws->fence_reference(sws, &fence, NULL);
133 }
134 }
135 else {
136 int y, h, srcy;
137 unsigned blockheight = util_format_get_blockheight(st->base.resource->format);
138 h = st->hw_nblocksy * blockheight;
139 srcy = 0;
140 for(y = 0; y < st->base.box.height; y += h) {
141 unsigned offset, length;
142 void *hw, *sw;
143
144 if (y + h > st->base.box.height)
145 h = st->base.box.height - y;
146
147 /* Transfer band must be aligned to pixel block boundaries */
148 assert(y % blockheight == 0);
149 assert(h % blockheight == 0);
150
151 offset = y * st->base.stride / blockheight;
152 length = h * st->base.stride / blockheight;
153
154 sw = (uint8_t *)st->swbuf + offset;
155
156 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
157 unsigned usage = PIPE_TRANSFER_WRITE;
158
159 /* Wait for the previous DMAs to complete */
160 /* TODO: keep one DMA (at half the size) in the background */
161 if (y) {
162 svga_context_flush(svga, NULL);
163 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
164 }
165
166 hw = sws->buffer_map(sws, st->hwbuf, usage);
167 assert(hw);
168 if (hw) {
169 memcpy(hw, sw, length);
170 sws->buffer_unmap(sws, st->hwbuf);
171 }
172 }
173
174 svga_transfer_dma_band(svga, st, transfer, y, h, srcy, flags);
175
176 /*
177 * Prevent the texture contents to be discarded on the next band
178 * upload.
179 */
180
181 flags.discard = FALSE;
182
183 if(transfer == SVGA3D_READ_HOST_VRAM) {
184 svga_context_flush(svga, &fence);
185 sws->fence_finish(sws, fence, 0);
186
187 hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
188 assert(hw);
189 if(hw) {
190 memcpy(sw, hw, length);
191 sws->buffer_unmap(sws, st->hwbuf);
192 }
193 }
194 }
195 }
196 }
197
198
199 static boolean
200 svga_texture_get_handle(struct pipe_screen *screen,
201 struct pipe_resource *texture,
202 struct winsys_handle *whandle)
203 {
204 struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
205 unsigned stride;
206
207 assert(svga_texture(texture)->key.cachable == 0);
208 svga_texture(texture)->key.cachable = 0;
209 stride = util_format_get_nblocksx(texture->format, texture->width0) *
210 util_format_get_blocksize(texture->format);
211 return sws->surface_get_handle(sws, svga_texture(texture)->handle, stride, whandle);
212 }
213
214
215 static void
216 svga_texture_destroy(struct pipe_screen *screen,
217 struct pipe_resource *pt)
218 {
219 struct svga_screen *ss = svga_screen(screen);
220 struct svga_texture *tex = (struct svga_texture *)pt;
221
222 ss->texture_timestamp++;
223
224 svga_sampler_view_reference(&tex->cached_view, NULL);
225
226 /*
227 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
228 */
229 SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
230 svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
231
232 FREE(tex);
233 }
234
235
236 /* XXX: Still implementing this as if it was a screen function, but
237 * can now modify it to queue transfers on the context.
238 */
239 static void *
240 svga_texture_transfer_map(struct pipe_context *pipe,
241 struct pipe_resource *texture,
242 unsigned level,
243 unsigned usage,
244 const struct pipe_box *box,
245 struct pipe_transfer **ptransfer)
246 {
247 struct svga_context *svga = svga_context(pipe);
248 struct svga_screen *ss = svga_screen(pipe->screen);
249 struct svga_winsys_screen *sws = ss->sws;
250 struct svga_transfer *st;
251 unsigned nblocksx = util_format_get_nblocksx(texture->format, box->width);
252 unsigned nblocksy = util_format_get_nblocksy(texture->format, box->height);
253
254 /* We can't map texture storage directly */
255 if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
256 return NULL;
257
258 assert(box->depth == 1);
259 st = CALLOC_STRUCT(svga_transfer);
260 if (!st)
261 return NULL;
262
263 st->base.resource = texture;
264 st->base.level = level;
265 st->base.usage = usage;
266 st->base.box = *box;
267 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
268 st->base.layer_stride = 0;
269
270 st->hw_nblocksy = nblocksy;
271
272 st->hwbuf = svga_winsys_buffer_create(svga,
273 1,
274 0,
275 st->hw_nblocksy*st->base.stride);
276 while(!st->hwbuf && (st->hw_nblocksy /= 2)) {
277 st->hwbuf = svga_winsys_buffer_create(svga,
278 1,
279 0,
280 st->hw_nblocksy*st->base.stride);
281 }
282
283 if(!st->hwbuf)
284 goto no_hwbuf;
285
286 if(st->hw_nblocksy < nblocksy) {
287 /* We couldn't allocate a hardware buffer big enough for the transfer,
288 * so allocate regular malloc memory instead */
289 if (0) {
290 debug_printf("%s: failed to allocate %u KB of DMA, "
291 "splitting into %u x %u KB DMA transfers\n",
292 __FUNCTION__,
293 (nblocksy*st->base.stride + 1023)/1024,
294 (nblocksy + st->hw_nblocksy - 1)/st->hw_nblocksy,
295 (st->hw_nblocksy*st->base.stride + 1023)/1024);
296 }
297
298 st->swbuf = MALLOC(nblocksy*st->base.stride);
299 if(!st->swbuf)
300 goto no_swbuf;
301 }
302
303 if (usage & PIPE_TRANSFER_READ) {
304 SVGA3dSurfaceDMAFlags flags;
305 memset(&flags, 0, sizeof flags);
306 svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
307 }
308
309 if (st->swbuf) {
310 *ptransfer = &st->base;
311 return st->swbuf;
312 } else {
313 /* The wait for read transfers already happened when svga_transfer_dma
314 * was called. */
315 void *map = sws->buffer_map(sws, st->hwbuf, usage);
316 if (!map)
317 goto fail;
318
319 *ptransfer = &st->base;
320 return map;
321 }
322
323 fail:
324 FREE(st->swbuf);
325 no_swbuf:
326 sws->buffer_destroy(sws, st->hwbuf);
327 no_hwbuf:
328 FREE(st);
329 return NULL;
330 }
331
332
333 /* XXX: Still implementing this as if it was a screen function, but
334 * can now modify it to queue transfers on the context.
335 */
336 static void
337 svga_texture_transfer_unmap(struct pipe_context *pipe,
338 struct pipe_transfer *transfer)
339 {
340 struct svga_context *svga = svga_context(pipe);
341 struct svga_screen *ss = svga_screen(pipe->screen);
342 struct svga_winsys_screen *sws = ss->sws;
343 struct svga_transfer *st = svga_transfer(transfer);
344 struct svga_texture *tex = svga_texture(transfer->resource);
345
346 if(!st->swbuf)
347 sws->buffer_unmap(sws, st->hwbuf);
348
349 if (st->base.usage & PIPE_TRANSFER_WRITE) {
350 SVGA3dSurfaceDMAFlags flags;
351
352 memset(&flags, 0, sizeof flags);
353 if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
354 flags.discard = TRUE;
355 }
356 if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
357 flags.unsynchronized = TRUE;
358 }
359
360 svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
361 ss->texture_timestamp++;
362 tex->view_age[transfer->level] = ++(tex->age);
363 if (transfer->resource->target == PIPE_TEXTURE_CUBE)
364 tex->defined[transfer->box.z][transfer->level] = TRUE;
365 else
366 tex->defined[0][transfer->level] = TRUE;
367 }
368
369 FREE(st->swbuf);
370 sws->buffer_destroy(sws, st->hwbuf);
371 FREE(st);
372 }
373
374
375 struct u_resource_vtbl svga_texture_vtbl =
376 {
377 svga_texture_get_handle, /* get_handle */
378 svga_texture_destroy, /* resource_destroy */
379 svga_texture_transfer_map, /* transfer_map */
380 u_default_transfer_flush_region, /* transfer_flush_region */
381 svga_texture_transfer_unmap, /* transfer_unmap */
382 u_default_transfer_inline_write /* transfer_inline_write */
383 };
384
385
386 struct pipe_resource *
387 svga_texture_create(struct pipe_screen *screen,
388 const struct pipe_resource *template)
389 {
390 struct svga_screen *svgascreen = svga_screen(screen);
391 struct svga_texture *tex = CALLOC_STRUCT(svga_texture);
392
393 if (!tex)
394 goto error1;
395
396 tex->b.b = *template;
397 tex->b.vtbl = &svga_texture_vtbl;
398 pipe_reference_init(&tex->b.b.reference, 1);
399 tex->b.b.screen = screen;
400
401 assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
402 if(template->last_level >= SVGA_MAX_TEXTURE_LEVELS)
403 goto error2;
404
405 tex->key.flags = 0;
406 tex->key.size.width = template->width0;
407 tex->key.size.height = template->height0;
408 tex->key.size.depth = template->depth0;
409
410 if(template->target == PIPE_TEXTURE_CUBE) {
411 tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
412 tex->key.numFaces = 6;
413 }
414 else {
415 tex->key.numFaces = 1;
416 }
417
418 tex->key.cachable = 1;
419
420 if (template->bind & PIPE_BIND_SAMPLER_VIEW)
421 tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
422
423 if (template->bind & PIPE_BIND_DISPLAY_TARGET) {
424 tex->key.cachable = 0;
425 }
426
427 if (template->bind & PIPE_BIND_SHARED) {
428 tex->key.cachable = 0;
429 }
430
431 if (template->bind & (PIPE_BIND_SCANOUT |
432 PIPE_BIND_CURSOR)) {
433 tex->key.flags |= SVGA3D_SURFACE_HINT_SCANOUT;
434 tex->key.cachable = 0;
435 }
436
437 /*
438 * Note: Previously we never passed the
439 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
440 * know beforehand whether a texture will be used as a rendertarget or not
441 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
442 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
443 *
444 * However, this was changed since other state trackers
445 * (XA for example) uses it accurately and certain device versions
446 * relies on it in certain situations to render correctly.
447 */
448 if((template->bind & PIPE_BIND_RENDER_TARGET) &&
449 !util_format_is_s3tc(template->format))
450 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
451
452 if(template->bind & PIPE_BIND_DEPTH_STENCIL)
453 tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
454
455 tex->key.numMipLevels = template->last_level + 1;
456
457 tex->key.format = svga_translate_format(svgascreen, template->format, template->bind);
458 if(tex->key.format == SVGA3D_FORMAT_INVALID)
459 goto error2;
460
461 SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
462 tex->handle = svga_screen_surface_create(svgascreen, &tex->key);
463 if (tex->handle)
464 SVGA_DBG(DEBUG_DMA, " --> got sid %p (texture)\n", tex->handle);
465
466 debug_reference(&tex->b.b.reference,
467 (debug_reference_descriptor)debug_describe_resource, 0);
468
469 return &tex->b.b;
470
471 error2:
472 FREE(tex);
473 error1:
474 return NULL;
475 }
476
477
478 struct pipe_resource *
479 svga_texture_from_handle(struct pipe_screen *screen,
480 const struct pipe_resource *template,
481 struct winsys_handle *whandle)
482 {
483 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
484 struct svga_winsys_surface *srf;
485 struct svga_texture *tex;
486 enum SVGA3dSurfaceFormat format = 0;
487 assert(screen);
488
489 /* Only supports one type */
490 if ((template->target != PIPE_TEXTURE_2D &&
491 template->target != PIPE_TEXTURE_RECT) ||
492 template->last_level != 0 ||
493 template->depth0 != 1) {
494 return NULL;
495 }
496
497 srf = sws->surface_from_handle(sws, whandle, &format);
498
499 if (!srf)
500 return NULL;
501
502 if (svga_translate_format(svga_screen(screen), template->format, template->bind) != format) {
503 unsigned f1 = svga_translate_format(svga_screen(screen), template->format, template->bind);
504 unsigned f2 = format;
505
506 /* It's okay for XRGB and ARGB or depth with/out stencil to get mixed up */
507 if ( !( (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_A8R8G8B8) ||
508 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_X8R8G8B8) ||
509 (f1 == SVGA3D_Z_D24X8 && f2 == SVGA3D_Z_D24S8) ||
510 (f1 == SVGA3D_Z_DF24 && f2 == SVGA3D_Z_D24S8_INT) ) ) {
511 debug_printf("%s wrong format %u != %u\n", __FUNCTION__, f1, f2);
512 return NULL;
513 }
514 }
515
516 tex = CALLOC_STRUCT(svga_texture);
517 if (!tex)
518 return NULL;
519
520 tex->b.b = *template;
521 tex->b.vtbl = &svga_texture_vtbl;
522 pipe_reference_init(&tex->b.b.reference, 1);
523 tex->b.b.screen = screen;
524
525 SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
526
527 tex->key.cachable = 0;
528 tex->handle = srf;
529
530 return &tex->b.b;
531 }