1254e3f9e98cb4b702a50fadb977fc787d688690
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
42
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48
49 /*
50 * Reference counting helper functions.
51 */
52
53
54 static inline void
55 pipe_reference_init(struct pipe_reference *dst, unsigned count)
56 {
57 p_atomic_set(&dst->count, count);
58 }
59
60 static inline boolean
61 pipe_is_referenced(struct pipe_reference *src)
62 {
63 return p_atomic_read(&src->count) != 0;
64 }
65
66 /**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'dst' and 'src' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72 static inline boolean
73 pipe_reference_described(struct pipe_reference *dst,
74 struct pipe_reference *src,
75 debug_reference_descriptor get_desc)
76 {
77 if (dst != src) {
78 /* bump the src.count first */
79 if (src) {
80 MAYBE_UNUSED int count = p_atomic_inc_return(&src->count);
81 assert(count != 1); /* src had to be referenced */
82 debug_reference(src, get_desc, 1);
83 }
84
85 if (dst) {
86 int count = p_atomic_dec_return(&dst->count);
87 assert(count != -1); /* dst had to be referenced */
88 debug_reference(dst, get_desc, -1);
89 if (!count)
90 return true;
91 }
92 }
93
94 return false;
95 }
96
97 static inline boolean
98 pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
99 {
100 return pipe_reference_described(dst, src,
101 (debug_reference_descriptor)
102 debug_describe_reference);
103 }
104
105 static inline void
106 pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
107 {
108 struct pipe_surface *old_dst = *dst;
109
110 if (pipe_reference_described(&old_dst->reference, &src->reference,
111 (debug_reference_descriptor)
112 debug_describe_surface))
113 old_dst->context->surface_destroy(old_dst->context, old_dst);
114 *dst = src;
115 }
116
117 /**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context. The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123 static inline void
124 pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125 {
126 struct pipe_surface *old = *ptr;
127
128 if (pipe_reference_described(&old->reference, NULL,
129 (debug_reference_descriptor)
130 debug_describe_surface))
131 pipe->surface_destroy(pipe, old);
132 *ptr = NULL;
133 }
134
135
136 static inline void
137 pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
138 {
139 struct pipe_resource *old_dst = *dst;
140
141 if (pipe_reference_described(&old_dst->reference, &src->reference,
142 (debug_reference_descriptor)
143 debug_describe_resource)) {
144 /* Avoid recursion, which would prevent inlining this function */
145 do {
146 struct pipe_resource *next = old_dst->next;
147
148 old_dst->screen->resource_destroy(old_dst->screen, old_dst);
149 old_dst = next;
150 } while (pipe_reference_described(&old_dst->reference, NULL,
151 (debug_reference_descriptor)
152 debug_describe_resource));
153 }
154 *dst = src;
155 }
156
157 /**
158 * Same as pipe_surface_release, but used when pipe_context doesn't exist
159 * anymore.
160 */
161 static inline void
162 pipe_surface_release_no_context(struct pipe_surface **ptr)
163 {
164 struct pipe_surface *surf = *ptr;
165
166 if (pipe_reference_described(&surf->reference, NULL,
167 (debug_reference_descriptor)
168 debug_describe_surface)) {
169 /* trivially destroy pipe_surface */
170 pipe_resource_reference(&surf->texture, NULL);
171 free(surf);
172 }
173 *ptr = NULL;
174 }
175
176 /**
177 * Set *dst to \p src with proper reference counting.
178 *
179 * The caller must guarantee that \p src and *dst were created in
180 * the same context (if they exist), and that this must be the current context.
181 */
182 static inline void
183 pipe_sampler_view_reference(struct pipe_sampler_view **dst,
184 struct pipe_sampler_view *src)
185 {
186 struct pipe_sampler_view *old_dst = *dst;
187
188 if (pipe_reference_described(&old_dst->reference, &src->reference,
189 (debug_reference_descriptor)
190 debug_describe_sampler_view))
191 old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
192 *dst = src;
193 }
194
195 static inline void
196 pipe_so_target_reference(struct pipe_stream_output_target **dst,
197 struct pipe_stream_output_target *src)
198 {
199 struct pipe_stream_output_target *old_dst = *dst;
200
201 if (pipe_reference_described(&old_dst->reference, &src->reference,
202 (debug_reference_descriptor)debug_describe_so_target))
203 old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
204 *dst = src;
205 }
206
207 static inline void
208 pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
209 {
210 if (dst->is_user_buffer)
211 dst->buffer.user = NULL;
212 else
213 pipe_resource_reference(&dst->buffer.resource, NULL);
214 }
215
216 static inline void
217 pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
218 const struct pipe_vertex_buffer *src)
219 {
220 pipe_vertex_buffer_unreference(dst);
221 if (!src->is_user_buffer)
222 pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
223 memcpy(dst, src, sizeof(*src));
224 }
225
226 static inline void
227 pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
228 struct pipe_resource *pt, unsigned level, unsigned layer)
229 {
230 pipe_resource_reference(&ps->texture, pt);
231 ps->format = pt->format;
232 ps->width = u_minify(pt->width0, level);
233 ps->height = u_minify(pt->height0, level);
234 ps->u.tex.level = level;
235 ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
236 ps->context = ctx;
237 }
238
239 static inline void
240 pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
241 struct pipe_resource *pt, unsigned level, unsigned layer)
242 {
243 ps->texture = 0;
244 pipe_reference_init(&ps->reference, 1);
245 pipe_surface_reset(ctx, ps, pt, level, layer);
246 }
247
248 /* Return true if the surfaces are equal. */
249 static inline boolean
250 pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
251 {
252 return s1->texture == s2->texture &&
253 s1->format == s2->format &&
254 (s1->texture->target != PIPE_BUFFER ||
255 (s1->u.buf.first_element == s2->u.buf.first_element &&
256 s1->u.buf.last_element == s2->u.buf.last_element)) &&
257 (s1->texture->target == PIPE_BUFFER ||
258 (s1->u.tex.level == s2->u.tex.level &&
259 s1->u.tex.first_layer == s2->u.tex.first_layer &&
260 s1->u.tex.last_layer == s2->u.tex.last_layer));
261 }
262
263 /*
264 * Convenience wrappers for screen buffer functions.
265 */
266
267
268 /**
269 * Create a new resource.
270 * \param bind bitmask of PIPE_BIND_x flags
271 * \param usage a PIPE_USAGE_x value
272 */
273 static inline struct pipe_resource *
274 pipe_buffer_create(struct pipe_screen *screen,
275 unsigned bind,
276 enum pipe_resource_usage usage,
277 unsigned size)
278 {
279 struct pipe_resource buffer;
280 memset(&buffer, 0, sizeof buffer);
281 buffer.target = PIPE_BUFFER;
282 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
283 buffer.bind = bind;
284 buffer.usage = usage;
285 buffer.flags = 0;
286 buffer.width0 = size;
287 buffer.height0 = 1;
288 buffer.depth0 = 1;
289 buffer.array_size = 1;
290 return screen->resource_create(screen, &buffer);
291 }
292
293
294 static inline struct pipe_resource *
295 pipe_buffer_create_const0(struct pipe_screen *screen,
296 unsigned bind,
297 enum pipe_resource_usage usage,
298 unsigned size)
299 {
300 struct pipe_resource buffer;
301 memset(&buffer, 0, sizeof buffer);
302 buffer.target = PIPE_BUFFER;
303 buffer.format = PIPE_FORMAT_R8_UNORM;
304 buffer.bind = bind;
305 buffer.usage = usage;
306 buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
307 buffer.width0 = size;
308 buffer.height0 = 1;
309 buffer.depth0 = 1;
310 buffer.array_size = 1;
311 return screen->resource_create(screen, &buffer);
312 }
313
314
315 /**
316 * Map a range of a resource.
317 * \param offset start of region, in bytes
318 * \param length size of region, in bytes
319 * \param access bitmask of PIPE_TRANSFER_x flags
320 * \param transfer returns a transfer object
321 */
322 static inline void *
323 pipe_buffer_map_range(struct pipe_context *pipe,
324 struct pipe_resource *buffer,
325 unsigned offset,
326 unsigned length,
327 unsigned access,
328 struct pipe_transfer **transfer)
329 {
330 struct pipe_box box;
331 void *map;
332
333 assert(offset < buffer->width0);
334 assert(offset + length <= buffer->width0);
335 assert(length);
336
337 u_box_1d(offset, length, &box);
338
339 map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
340 if (!map) {
341 return NULL;
342 }
343
344 return map;
345 }
346
347
348 /**
349 * Map whole resource.
350 * \param access bitmask of PIPE_TRANSFER_x flags
351 * \param transfer returns a transfer object
352 */
353 static inline void *
354 pipe_buffer_map(struct pipe_context *pipe,
355 struct pipe_resource *buffer,
356 unsigned access,
357 struct pipe_transfer **transfer)
358 {
359 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
360 access, transfer);
361 }
362
363
364 static inline void
365 pipe_buffer_unmap(struct pipe_context *pipe,
366 struct pipe_transfer *transfer)
367 {
368 pipe->transfer_unmap(pipe, transfer);
369 }
370
371 static inline void
372 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
373 struct pipe_transfer *transfer,
374 unsigned offset,
375 unsigned length)
376 {
377 struct pipe_box box;
378 int transfer_offset;
379
380 assert(length);
381 assert(transfer->box.x <= (int) offset);
382 assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
383
384 /* Match old screen->buffer_flush_mapped_range() behaviour, where
385 * offset parameter is relative to the start of the buffer, not the
386 * mapped range.
387 */
388 transfer_offset = offset - transfer->box.x;
389
390 u_box_1d(transfer_offset, length, &box);
391
392 pipe->transfer_flush_region(pipe, transfer, &box);
393 }
394
395 static inline void
396 pipe_buffer_write(struct pipe_context *pipe,
397 struct pipe_resource *buf,
398 unsigned offset,
399 unsigned size,
400 const void *data)
401 {
402 /* Don't set any other usage bits. Drivers should derive them. */
403 pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
404 }
405
406 /**
407 * Special case for writing non-overlapping ranges.
408 *
409 * We can avoid GPU/CPU synchronization when writing range that has never
410 * been written before.
411 */
412 static inline void
413 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
414 struct pipe_resource *buf,
415 unsigned offset, unsigned size,
416 const void *data)
417 {
418 pipe->buffer_subdata(pipe, buf,
419 (PIPE_TRANSFER_WRITE |
420 PIPE_TRANSFER_UNSYNCHRONIZED),
421 offset, size, data);
422 }
423
424
425 /**
426 * Create a new resource and immediately put data into it
427 * \param bind bitmask of PIPE_BIND_x flags
428 * \param usage bitmask of PIPE_USAGE_x flags
429 */
430 static inline struct pipe_resource *
431 pipe_buffer_create_with_data(struct pipe_context *pipe,
432 unsigned bind,
433 enum pipe_resource_usage usage,
434 unsigned size,
435 const void *ptr)
436 {
437 struct pipe_resource *res = pipe_buffer_create(pipe->screen,
438 bind, usage, size);
439 pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
440 return res;
441 }
442
443 static inline void
444 pipe_buffer_read(struct pipe_context *pipe,
445 struct pipe_resource *buf,
446 unsigned offset,
447 unsigned size,
448 void *data)
449 {
450 struct pipe_transfer *src_transfer;
451 ubyte *map;
452
453 map = (ubyte *) pipe_buffer_map_range(pipe,
454 buf,
455 offset, size,
456 PIPE_TRANSFER_READ,
457 &src_transfer);
458 if (!map)
459 return;
460
461 memcpy(data, map, size);
462 pipe_buffer_unmap(pipe, src_transfer);
463 }
464
465
466 /**
467 * Map a resource for reading/writing.
468 * \param access bitmask of PIPE_TRANSFER_x flags
469 */
470 static inline void *
471 pipe_transfer_map(struct pipe_context *context,
472 struct pipe_resource *resource,
473 unsigned level, unsigned layer,
474 unsigned access,
475 unsigned x, unsigned y,
476 unsigned w, unsigned h,
477 struct pipe_transfer **transfer)
478 {
479 struct pipe_box box;
480 u_box_2d_zslice(x, y, layer, w, h, &box);
481 return context->transfer_map(context,
482 resource,
483 level,
484 access,
485 &box, transfer);
486 }
487
488
489 /**
490 * Map a 3D (texture) resource for reading/writing.
491 * \param access bitmask of PIPE_TRANSFER_x flags
492 */
493 static inline void *
494 pipe_transfer_map_3d(struct pipe_context *context,
495 struct pipe_resource *resource,
496 unsigned level,
497 unsigned access,
498 unsigned x, unsigned y, unsigned z,
499 unsigned w, unsigned h, unsigned d,
500 struct pipe_transfer **transfer)
501 {
502 struct pipe_box box;
503 u_box_3d(x, y, z, w, h, d, &box);
504 return context->transfer_map(context,
505 resource,
506 level,
507 access,
508 &box, transfer);
509 }
510
511 static inline void
512 pipe_transfer_unmap(struct pipe_context *context,
513 struct pipe_transfer *transfer)
514 {
515 context->transfer_unmap(context, transfer);
516 }
517
518 static inline void
519 pipe_set_constant_buffer(struct pipe_context *pipe,
520 enum pipe_shader_type shader, uint index,
521 struct pipe_resource *buf)
522 {
523 if (buf) {
524 struct pipe_constant_buffer cb;
525 cb.buffer = buf;
526 cb.buffer_offset = 0;
527 cb.buffer_size = buf->width0;
528 cb.user_buffer = NULL;
529 pipe->set_constant_buffer(pipe, shader, index, &cb);
530 } else {
531 pipe->set_constant_buffer(pipe, shader, index, NULL);
532 }
533 }
534
535
536 /**
537 * Get the polygon offset enable/disable flag for the given polygon fill mode.
538 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
539 */
540 static inline boolean
541 util_get_offset(const struct pipe_rasterizer_state *templ,
542 unsigned fill_mode)
543 {
544 switch(fill_mode) {
545 case PIPE_POLYGON_MODE_POINT:
546 return templ->offset_point;
547 case PIPE_POLYGON_MODE_LINE:
548 return templ->offset_line;
549 case PIPE_POLYGON_MODE_FILL:
550 return templ->offset_tri;
551 default:
552 assert(0);
553 return FALSE;
554 }
555 }
556
557 static inline float
558 util_get_min_point_size(const struct pipe_rasterizer_state *state)
559 {
560 /* The point size should be clamped to this value at the rasterizer stage.
561 */
562 return !state->point_quad_rasterization &&
563 !state->point_smooth &&
564 !state->multisample ? 1.0f : 0.0f;
565 }
566
567 static inline void
568 util_query_clear_result(union pipe_query_result *result, unsigned type)
569 {
570 switch (type) {
571 case PIPE_QUERY_OCCLUSION_PREDICATE:
572 case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
573 case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
574 case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
575 case PIPE_QUERY_GPU_FINISHED:
576 result->b = FALSE;
577 break;
578 case PIPE_QUERY_OCCLUSION_COUNTER:
579 case PIPE_QUERY_TIMESTAMP:
580 case PIPE_QUERY_TIME_ELAPSED:
581 case PIPE_QUERY_PRIMITIVES_GENERATED:
582 case PIPE_QUERY_PRIMITIVES_EMITTED:
583 result->u64 = 0;
584 break;
585 case PIPE_QUERY_SO_STATISTICS:
586 memset(&result->so_statistics, 0, sizeof(result->so_statistics));
587 break;
588 case PIPE_QUERY_TIMESTAMP_DISJOINT:
589 memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
590 break;
591 case PIPE_QUERY_PIPELINE_STATISTICS:
592 memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
593 break;
594 default:
595 memset(result, 0, sizeof(*result));
596 }
597 }
598
599 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
600 static inline enum tgsi_texture_type
601 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
602 unsigned nr_samples)
603 {
604 switch (pipe_tex_target) {
605 case PIPE_BUFFER:
606 return TGSI_TEXTURE_BUFFER;
607
608 case PIPE_TEXTURE_1D:
609 assert(nr_samples <= 1);
610 return TGSI_TEXTURE_1D;
611
612 case PIPE_TEXTURE_2D:
613 return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
614
615 case PIPE_TEXTURE_RECT:
616 assert(nr_samples <= 1);
617 return TGSI_TEXTURE_RECT;
618
619 case PIPE_TEXTURE_3D:
620 assert(nr_samples <= 1);
621 return TGSI_TEXTURE_3D;
622
623 case PIPE_TEXTURE_CUBE:
624 assert(nr_samples <= 1);
625 return TGSI_TEXTURE_CUBE;
626
627 case PIPE_TEXTURE_1D_ARRAY:
628 assert(nr_samples <= 1);
629 return TGSI_TEXTURE_1D_ARRAY;
630
631 case PIPE_TEXTURE_2D_ARRAY:
632 return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
633 TGSI_TEXTURE_2D_ARRAY;
634
635 case PIPE_TEXTURE_CUBE_ARRAY:
636 return TGSI_TEXTURE_CUBE_ARRAY;
637
638 default:
639 assert(0 && "unexpected texture target");
640 return TGSI_TEXTURE_UNKNOWN;
641 }
642 }
643
644
645 static inline void
646 util_copy_constant_buffer(struct pipe_constant_buffer *dst,
647 const struct pipe_constant_buffer *src)
648 {
649 if (src) {
650 pipe_resource_reference(&dst->buffer, src->buffer);
651 dst->buffer_offset = src->buffer_offset;
652 dst->buffer_size = src->buffer_size;
653 dst->user_buffer = src->user_buffer;
654 }
655 else {
656 pipe_resource_reference(&dst->buffer, NULL);
657 dst->buffer_offset = 0;
658 dst->buffer_size = 0;
659 dst->user_buffer = NULL;
660 }
661 }
662
663 static inline void
664 util_copy_shader_buffer(struct pipe_shader_buffer *dst,
665 const struct pipe_shader_buffer *src)
666 {
667 if (src) {
668 pipe_resource_reference(&dst->buffer, src->buffer);
669 dst->buffer_offset = src->buffer_offset;
670 dst->buffer_size = src->buffer_size;
671 }
672 else {
673 pipe_resource_reference(&dst->buffer, NULL);
674 dst->buffer_offset = 0;
675 dst->buffer_size = 0;
676 }
677 }
678
679 static inline void
680 util_copy_image_view(struct pipe_image_view *dst,
681 const struct pipe_image_view *src)
682 {
683 if (src) {
684 pipe_resource_reference(&dst->resource, src->resource);
685 dst->format = src->format;
686 dst->access = src->access;
687 dst->shader_access = src->shader_access;
688 dst->u = src->u;
689 } else {
690 pipe_resource_reference(&dst->resource, NULL);
691 dst->format = PIPE_FORMAT_NONE;
692 dst->access = 0;
693 dst->shader_access = 0;
694 memset(&dst->u, 0, sizeof(dst->u));
695 }
696 }
697
698 static inline unsigned
699 util_max_layer(const struct pipe_resource *r, unsigned level)
700 {
701 switch (r->target) {
702 case PIPE_TEXTURE_3D:
703 return u_minify(r->depth0, level) - 1;
704 case PIPE_TEXTURE_CUBE:
705 assert(r->array_size == 6);
706 /* fall-through */
707 case PIPE_TEXTURE_1D_ARRAY:
708 case PIPE_TEXTURE_2D_ARRAY:
709 case PIPE_TEXTURE_CUBE_ARRAY:
710 return r->array_size - 1;
711 default:
712 return 0;
713 }
714 }
715
716 static inline unsigned
717 util_num_layers(const struct pipe_resource *r, unsigned level)
718 {
719 return util_max_layer(r, level) + 1;
720 }
721
722 static inline bool
723 util_texrange_covers_whole_level(const struct pipe_resource *tex,
724 unsigned level, unsigned x, unsigned y,
725 unsigned z, unsigned width,
726 unsigned height, unsigned depth)
727 {
728 return x == 0 && y == 0 && z == 0 &&
729 width == u_minify(tex->width0, level) &&
730 height == u_minify(tex->height0, level) &&
731 depth == util_num_layers(tex, level);
732 }
733
734 static inline struct pipe_context *
735 pipe_create_multimedia_context(struct pipe_screen *screen)
736 {
737 unsigned flags = 0;
738
739 if (!screen->get_param(screen, PIPE_CAP_GRAPHICS))
740 flags |= PIPE_CONTEXT_COMPUTE_ONLY;
741
742 return screen->context_create(screen, NULL, flags);
743 }
744
745 #ifdef __cplusplus
746 }
747 #endif
748
749 #endif /* U_INLINES_H */