gallium: split transfer_inline_write into buffer and texture callbacks
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
42
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48
49 /*
50 * Reference counting helper functions.
51 */
52
53
54 static inline void
55 pipe_reference_init(struct pipe_reference *reference, unsigned count)
56 {
57 p_atomic_set(&reference->count, count);
58 }
59
60 static inline boolean
61 pipe_is_referenced(struct pipe_reference *reference)
62 {
63 return p_atomic_read(&reference->count) != 0;
64 }
65
66 /**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'ptr' and 'reference' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72 static inline boolean
73 pipe_reference_described(struct pipe_reference *ptr,
74 struct pipe_reference *reference,
75 debug_reference_descriptor get_desc)
76 {
77 boolean destroy = FALSE;
78
79 if(ptr != reference) {
80 /* bump the reference.count first */
81 if (reference) {
82 assert(pipe_is_referenced(reference));
83 p_atomic_inc(&reference->count);
84 debug_reference(reference, get_desc, 1);
85 }
86
87 if (ptr) {
88 assert(pipe_is_referenced(ptr));
89 if (p_atomic_dec_zero(&ptr->count)) {
90 destroy = TRUE;
91 }
92 debug_reference(ptr, get_desc, -1);
93 }
94 }
95
96 return destroy;
97 }
98
99 static inline boolean
100 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
101 {
102 return pipe_reference_described(ptr, reference,
103 (debug_reference_descriptor)debug_describe_reference);
104 }
105
106 static inline void
107 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
108 {
109 struct pipe_surface *old_surf = *ptr;
110
111 if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
112 (debug_reference_descriptor)debug_describe_surface))
113 old_surf->context->surface_destroy(old_surf->context, old_surf);
114 *ptr = surf;
115 }
116
117 /**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context. The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123 static inline void
124 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125 {
126 if (pipe_reference_described(&(*ptr)->reference, NULL,
127 (debug_reference_descriptor)debug_describe_surface))
128 pipe->surface_destroy(pipe, *ptr);
129 *ptr = NULL;
130 }
131
132
133 static inline void
134 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
135 {
136 struct pipe_resource *old_tex = *ptr;
137
138 if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
139 (debug_reference_descriptor)debug_describe_resource))
140 old_tex->screen->resource_destroy(old_tex->screen, old_tex);
141 *ptr = tex;
142 }
143
144 static inline void
145 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
146 {
147 struct pipe_sampler_view *old_view = *ptr;
148
149 if (pipe_reference_described(&(*ptr)->reference, &view->reference,
150 (debug_reference_descriptor)debug_describe_sampler_view))
151 old_view->context->sampler_view_destroy(old_view->context, old_view);
152 *ptr = view;
153 }
154
155 /**
156 * Similar to pipe_sampler_view_reference() but always set the pointer to
157 * NULL and pass in an explicit context. Passing an explicit context is a
158 * work-around for fixing a dangling context pointer problem when textures
159 * are shared by multiple contexts. XXX fix this someday.
160 */
161 static inline void
162 pipe_sampler_view_release(struct pipe_context *ctx,
163 struct pipe_sampler_view **ptr)
164 {
165 struct pipe_sampler_view *old_view = *ptr;
166 if (*ptr && (*ptr)->context != ctx) {
167 debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
168 }
169 if (pipe_reference_described(&(*ptr)->reference, NULL,
170 (debug_reference_descriptor)debug_describe_sampler_view)) {
171 ctx->sampler_view_destroy(ctx, old_view);
172 }
173 *ptr = NULL;
174 }
175
176 static inline void
177 pipe_so_target_reference(struct pipe_stream_output_target **ptr,
178 struct pipe_stream_output_target *target)
179 {
180 struct pipe_stream_output_target *old = *ptr;
181
182 if (pipe_reference_described(&(*ptr)->reference, &target->reference,
183 (debug_reference_descriptor)debug_describe_so_target))
184 old->context->stream_output_target_destroy(old->context, old);
185 *ptr = target;
186 }
187
188 static inline void
189 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
190 struct pipe_resource *pt, unsigned level, unsigned layer)
191 {
192 pipe_resource_reference(&ps->texture, pt);
193 ps->format = pt->format;
194 ps->width = u_minify(pt->width0, level);
195 ps->height = u_minify(pt->height0, level);
196 ps->u.tex.level = level;
197 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
198 ps->context = ctx;
199 }
200
201 static inline void
202 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
203 struct pipe_resource *pt, unsigned level, unsigned layer)
204 {
205 ps->texture = 0;
206 pipe_reference_init(&ps->reference, 1);
207 pipe_surface_reset(ctx, ps, pt, level, layer);
208 }
209
210 /* Return true if the surfaces are equal. */
211 static inline boolean
212 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
213 {
214 return s1->texture == s2->texture &&
215 s1->format == s2->format &&
216 (s1->texture->target != PIPE_BUFFER ||
217 (s1->u.buf.first_element == s2->u.buf.first_element &&
218 s1->u.buf.last_element == s2->u.buf.last_element)) &&
219 (s1->texture->target == PIPE_BUFFER ||
220 (s1->u.tex.level == s2->u.tex.level &&
221 s1->u.tex.first_layer == s2->u.tex.first_layer &&
222 s1->u.tex.last_layer == s2->u.tex.last_layer));
223 }
224
225 /*
226 * Convenience wrappers for screen buffer functions.
227 */
228
229
230 /**
231 * Create a new resource.
232 * \param bind bitmask of PIPE_BIND_x flags
233 * \param usage a PIPE_USAGE_x value
234 */
235 static inline struct pipe_resource *
236 pipe_buffer_create( struct pipe_screen *screen,
237 unsigned bind,
238 enum pipe_resource_usage usage,
239 unsigned size )
240 {
241 struct pipe_resource buffer;
242 memset(&buffer, 0, sizeof buffer);
243 buffer.target = PIPE_BUFFER;
244 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
245 buffer.bind = bind;
246 buffer.usage = usage;
247 buffer.flags = 0;
248 buffer.width0 = size;
249 buffer.height0 = 1;
250 buffer.depth0 = 1;
251 buffer.array_size = 1;
252 return screen->resource_create(screen, &buffer);
253 }
254
255
256 /**
257 * Map a range of a resource.
258 * \param offset start of region, in bytes
259 * \param length size of region, in bytes
260 * \param access bitmask of PIPE_TRANSFER_x flags
261 * \param transfer returns a transfer object
262 */
263 static inline void *
264 pipe_buffer_map_range(struct pipe_context *pipe,
265 struct pipe_resource *buffer,
266 unsigned offset,
267 unsigned length,
268 unsigned access,
269 struct pipe_transfer **transfer)
270 {
271 struct pipe_box box;
272 void *map;
273
274 assert(offset < buffer->width0);
275 assert(offset + length <= buffer->width0);
276 assert(length);
277
278 u_box_1d(offset, length, &box);
279
280 map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
281 if (!map) {
282 return NULL;
283 }
284
285 return map;
286 }
287
288
289 /**
290 * Map whole resource.
291 * \param access bitmask of PIPE_TRANSFER_x flags
292 * \param transfer returns a transfer object
293 */
294 static inline void *
295 pipe_buffer_map(struct pipe_context *pipe,
296 struct pipe_resource *buffer,
297 unsigned access,
298 struct pipe_transfer **transfer)
299 {
300 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, access, transfer);
301 }
302
303
304 static inline void
305 pipe_buffer_unmap(struct pipe_context *pipe,
306 struct pipe_transfer *transfer)
307 {
308 pipe->transfer_unmap(pipe, transfer);
309 }
310
311 static inline void
312 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
313 struct pipe_transfer *transfer,
314 unsigned offset,
315 unsigned length)
316 {
317 struct pipe_box box;
318 int transfer_offset;
319
320 assert(length);
321 assert(transfer->box.x <= (int) offset);
322 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
323
324 /* Match old screen->buffer_flush_mapped_range() behaviour, where
325 * offset parameter is relative to the start of the buffer, not the
326 * mapped range.
327 */
328 transfer_offset = offset - transfer->box.x;
329
330 u_box_1d(transfer_offset, length, &box);
331
332 pipe->transfer_flush_region(pipe, transfer, &box);
333 }
334
335 static inline void
336 pipe_buffer_write(struct pipe_context *pipe,
337 struct pipe_resource *buf,
338 unsigned offset,
339 unsigned size,
340 const void *data)
341 {
342 unsigned access = PIPE_TRANSFER_WRITE;
343
344 if (offset == 0 && size == buf->width0) {
345 access |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
346 } else {
347 access |= PIPE_TRANSFER_DISCARD_RANGE;
348 }
349
350 pipe->buffer_subdata(pipe, buf, access, offset, size, data);
351 }
352
353 /**
354 * Special case for writing non-overlapping ranges.
355 *
356 * We can avoid GPU/CPU synchronization when writing range that has never
357 * been written before.
358 */
359 static inline void
360 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
361 struct pipe_resource *buf,
362 unsigned offset, unsigned size,
363 const void *data)
364 {
365 pipe->buffer_subdata(pipe, buf,
366 (PIPE_TRANSFER_WRITE |
367 PIPE_TRANSFER_UNSYNCHRONIZED),
368 offset, size, data);
369 }
370
371
372 /**
373 * Create a new resource and immediately put data into it
374 * \param bind bitmask of PIPE_BIND_x flags
375 * \param usage bitmask of PIPE_USAGE_x flags
376 */
377 static inline struct pipe_resource *
378 pipe_buffer_create_with_data(struct pipe_context *pipe,
379 unsigned bind,
380 enum pipe_resource_usage usage,
381 unsigned size,
382 const void *ptr)
383 {
384 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
385 bind, usage, size);
386 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
387 return res;
388 }
389
390 static inline void
391 pipe_buffer_read(struct pipe_context *pipe,
392 struct pipe_resource *buf,
393 unsigned offset,
394 unsigned size,
395 void *data)
396 {
397 struct pipe_transfer *src_transfer;
398 ubyte *map;
399
400 map = (ubyte *) pipe_buffer_map_range(pipe,
401 buf,
402 offset, size,
403 PIPE_TRANSFER_READ,
404 &src_transfer);
405 if (!map)
406 return;
407
408 memcpy(data, map, size);
409 pipe_buffer_unmap(pipe, src_transfer);
410 }
411
412
413 /**
414 * Map a resource for reading/writing.
415 * \param access bitmask of PIPE_TRANSFER_x flags
416 */
417 static inline void *
418 pipe_transfer_map(struct pipe_context *context,
419 struct pipe_resource *resource,
420 unsigned level, unsigned layer,
421 unsigned access,
422 unsigned x, unsigned y,
423 unsigned w, unsigned h,
424 struct pipe_transfer **transfer)
425 {
426 struct pipe_box box;
427 u_box_2d_zslice(x, y, layer, w, h, &box);
428 return context->transfer_map(context,
429 resource,
430 level,
431 access,
432 &box, transfer);
433 }
434
435
436 /**
437 * Map a 3D (texture) resource for reading/writing.
438 * \param access bitmask of PIPE_TRANSFER_x flags
439 */
440 static inline void *
441 pipe_transfer_map_3d(struct pipe_context *context,
442 struct pipe_resource *resource,
443 unsigned level,
444 unsigned access,
445 unsigned x, unsigned y, unsigned z,
446 unsigned w, unsigned h, unsigned d,
447 struct pipe_transfer **transfer)
448 {
449 struct pipe_box box;
450 u_box_3d(x, y, z, w, h, d, &box);
451 return context->transfer_map(context,
452 resource,
453 level,
454 access,
455 &box, transfer);
456 }
457
458 static inline void
459 pipe_transfer_unmap( struct pipe_context *context,
460 struct pipe_transfer *transfer )
461 {
462 context->transfer_unmap( context, transfer );
463 }
464
465 static inline void
466 pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
467 struct pipe_resource *buf)
468 {
469 if (buf) {
470 struct pipe_constant_buffer cb;
471 cb.buffer = buf;
472 cb.buffer_offset = 0;
473 cb.buffer_size = buf->width0;
474 cb.user_buffer = NULL;
475 pipe->set_constant_buffer(pipe, shader, index, &cb);
476 } else {
477 pipe->set_constant_buffer(pipe, shader, index, NULL);
478 }
479 }
480
481
482 /**
483 * Get the polygon offset enable/disable flag for the given polygon fill mode.
484 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
485 */
486 static inline boolean
487 util_get_offset(const struct pipe_rasterizer_state *templ,
488 unsigned fill_mode)
489 {
490 switch(fill_mode) {
491 case PIPE_POLYGON_MODE_POINT:
492 return templ->offset_point;
493 case PIPE_POLYGON_MODE_LINE:
494 return templ->offset_line;
495 case PIPE_POLYGON_MODE_FILL:
496 return templ->offset_tri;
497 default:
498 assert(0);
499 return FALSE;
500 }
501 }
502
503 static inline float
504 util_get_min_point_size(const struct pipe_rasterizer_state *state)
505 {
506 /* The point size should be clamped to this value at the rasterizer stage.
507 */
508 return !state->point_quad_rasterization &&
509 !state->point_smooth &&
510 !state->multisample ? 1.0f : 0.0f;
511 }
512
513 static inline void
514 util_query_clear_result(union pipe_query_result *result, unsigned type)
515 {
516 switch (type) {
517 case PIPE_QUERY_OCCLUSION_PREDICATE:
518 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
519 case PIPE_QUERY_GPU_FINISHED:
520 result->b = FALSE;
521 break;
522 case PIPE_QUERY_OCCLUSION_COUNTER:
523 case PIPE_QUERY_TIMESTAMP:
524 case PIPE_QUERY_TIME_ELAPSED:
525 case PIPE_QUERY_PRIMITIVES_GENERATED:
526 case PIPE_QUERY_PRIMITIVES_EMITTED:
527 result->u64 = 0;
528 break;
529 case PIPE_QUERY_SO_STATISTICS:
530 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
531 break;
532 case PIPE_QUERY_TIMESTAMP_DISJOINT:
533 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
534 break;
535 case PIPE_QUERY_PIPELINE_STATISTICS:
536 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
537 break;
538 default:
539 memset(result, 0, sizeof(*result));
540 }
541 }
542
543 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
544 static inline unsigned
545 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
546 unsigned nr_samples)
547 {
548 switch (pipe_tex_target) {
549 case PIPE_BUFFER:
550 return TGSI_TEXTURE_BUFFER;
551
552 case PIPE_TEXTURE_1D:
553 assert(nr_samples <= 1);
554 return TGSI_TEXTURE_1D;
555
556 case PIPE_TEXTURE_2D:
557 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
558
559 case PIPE_TEXTURE_RECT:
560 assert(nr_samples <= 1);
561 return TGSI_TEXTURE_RECT;
562
563 case PIPE_TEXTURE_3D:
564 assert(nr_samples <= 1);
565 return TGSI_TEXTURE_3D;
566
567 case PIPE_TEXTURE_CUBE:
568 assert(nr_samples <= 1);
569 return TGSI_TEXTURE_CUBE;
570
571 case PIPE_TEXTURE_1D_ARRAY:
572 assert(nr_samples <= 1);
573 return TGSI_TEXTURE_1D_ARRAY;
574
575 case PIPE_TEXTURE_2D_ARRAY:
576 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
577 TGSI_TEXTURE_2D_ARRAY;
578
579 case PIPE_TEXTURE_CUBE_ARRAY:
580 return TGSI_TEXTURE_CUBE_ARRAY;
581
582 default:
583 assert(0 && "unexpected texture target");
584 return TGSI_TEXTURE_UNKNOWN;
585 }
586 }
587
588
589 static inline void
590 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
591 const struct pipe_constant_buffer *src)
592 {
593 if (src) {
594 pipe_resource_reference(&dst->buffer, src->buffer);
595 dst->buffer_offset = src->buffer_offset;
596 dst->buffer_size = src->buffer_size;
597 dst->user_buffer = src->user_buffer;
598 }
599 else {
600 pipe_resource_reference(&dst->buffer, NULL);
601 dst->buffer_offset = 0;
602 dst->buffer_size = 0;
603 dst->user_buffer = NULL;
604 }
605 }
606
607 static inline void
608 util_copy_image_view(struct pipe_image_view *dst,
609 const struct pipe_image_view *src)
610 {
611 if (src) {
612 pipe_resource_reference(&dst->resource, src->resource);
613 dst->format = src->format;
614 dst->access = src->access;
615 dst->u = src->u;
616 } else {
617 pipe_resource_reference(&dst->resource, NULL);
618 dst->format = PIPE_FORMAT_NONE;
619 dst->access = 0;
620 memset(&dst->u, 0, sizeof(dst->u));
621 }
622 }
623
624 static inline unsigned
625 util_max_layer(const struct pipe_resource *r, unsigned level)
626 {
627 switch (r->target) {
628 case PIPE_TEXTURE_3D:
629 return u_minify(r->depth0, level) - 1;
630 case PIPE_TEXTURE_CUBE:
631 assert(r->array_size == 6);
632 /* fall-through */
633 case PIPE_TEXTURE_1D_ARRAY:
634 case PIPE_TEXTURE_2D_ARRAY:
635 case PIPE_TEXTURE_CUBE_ARRAY:
636 return r->array_size - 1;
637 default:
638 return 0;
639 }
640 }
641
642 static inline bool
643 util_texrange_covers_whole_level(const struct pipe_resource *tex,
644 unsigned level, unsigned x, unsigned y,
645 unsigned z, unsigned width,
646 unsigned height, unsigned depth)
647 {
648 return x == 0 && y == 0 && z == 0 &&
649 width == u_minify(tex->width0, level) &&
650 height == u_minify(tex->height0, level) &&
651 depth == util_max_layer(tex, level) + 1;
652 }
653
654 #ifdef __cplusplus
655 }
656 #endif
657
658 #endif /* U_INLINES_H */