1 /**************************************************************************
3 * Copyright 2007 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_shader_tokens.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_screen.h"
36 #include "util/u_debug.h"
37 #include "util/u_debug_describe.h"
38 #include "util/u_debug_refcnt.h"
39 #include "util/u_atomic.h"
40 #include "util/u_box.h"
41 #include "util/u_math.h"
50 * Reference counting helper functions.
55 pipe_reference_init(struct pipe_reference
*reference
, unsigned count
)
57 p_atomic_set(&reference
->count
, count
);
61 pipe_is_referenced(struct pipe_reference
*reference
)
63 return p_atomic_read(&reference
->count
) != 0;
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'ptr' and 'reference' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
73 pipe_reference_described(struct pipe_reference
*ptr
,
74 struct pipe_reference
*reference
,
75 debug_reference_descriptor get_desc
)
77 boolean destroy
= FALSE
;
79 if(ptr
!= reference
) {
80 /* bump the reference.count first */
82 assert(pipe_is_referenced(reference
));
83 p_atomic_inc(&reference
->count
);
84 debug_reference(reference
, get_desc
, 1);
88 assert(pipe_is_referenced(ptr
));
89 if (p_atomic_dec_zero(&ptr
->count
)) {
92 debug_reference(ptr
, get_desc
, -1);
100 pipe_reference(struct pipe_reference
*ptr
, struct pipe_reference
*reference
)
102 return pipe_reference_described(ptr
, reference
,
103 (debug_reference_descriptor
)debug_describe_reference
);
107 pipe_surface_reference(struct pipe_surface
**ptr
, struct pipe_surface
*surf
)
109 struct pipe_surface
*old_surf
= *ptr
;
111 if (pipe_reference_described(&(*ptr
)->reference
, &surf
->reference
,
112 (debug_reference_descriptor
)debug_describe_surface
))
113 old_surf
->context
->surface_destroy(old_surf
->context
, old_surf
);
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context. The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
124 pipe_surface_release(struct pipe_context
*pipe
, struct pipe_surface
**ptr
)
126 if (pipe_reference_described(&(*ptr
)->reference
, NULL
,
127 (debug_reference_descriptor
)debug_describe_surface
))
128 pipe
->surface_destroy(pipe
, *ptr
);
134 pipe_resource_reference(struct pipe_resource
**ptr
, struct pipe_resource
*tex
)
136 struct pipe_resource
*old_tex
= *ptr
;
138 if (pipe_reference_described(&(*ptr
)->reference
, &tex
->reference
,
139 (debug_reference_descriptor
)debug_describe_resource
)) {
140 /* Avoid recursion, which would prevent inlining this function */
142 struct pipe_resource
*next
= old_tex
->next
;
144 old_tex
->screen
->resource_destroy(old_tex
->screen
, old_tex
);
146 } while (pipe_reference_described(&old_tex
->reference
, NULL
,
147 (debug_reference_descriptor
)debug_describe_resource
));
153 pipe_sampler_view_reference(struct pipe_sampler_view
**ptr
, struct pipe_sampler_view
*view
)
155 struct pipe_sampler_view
*old_view
= *ptr
;
157 if (pipe_reference_described(&(*ptr
)->reference
, &view
->reference
,
158 (debug_reference_descriptor
)debug_describe_sampler_view
))
159 old_view
->context
->sampler_view_destroy(old_view
->context
, old_view
);
164 * Similar to pipe_sampler_view_reference() but always set the pointer to
165 * NULL and pass in an explicit context. Passing an explicit context is a
166 * work-around for fixing a dangling context pointer problem when textures
167 * are shared by multiple contexts. XXX fix this someday.
170 pipe_sampler_view_release(struct pipe_context
*ctx
,
171 struct pipe_sampler_view
**ptr
)
173 struct pipe_sampler_view
*old_view
= *ptr
;
174 if (*ptr
&& (*ptr
)->context
!= ctx
) {
175 debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
177 if (pipe_reference_described(&(*ptr
)->reference
, NULL
,
178 (debug_reference_descriptor
)debug_describe_sampler_view
)) {
179 ctx
->sampler_view_destroy(ctx
, old_view
);
185 pipe_so_target_reference(struct pipe_stream_output_target
**ptr
,
186 struct pipe_stream_output_target
*target
)
188 struct pipe_stream_output_target
*old
= *ptr
;
190 if (pipe_reference_described(&(*ptr
)->reference
, &target
->reference
,
191 (debug_reference_descriptor
)debug_describe_so_target
))
192 old
->context
->stream_output_target_destroy(old
->context
, old
);
197 pipe_vertex_buffer_unreference(struct pipe_vertex_buffer
*dst
)
199 if (dst
->is_user_buffer
)
200 dst
->buffer
.user
= NULL
;
202 pipe_resource_reference(&dst
->buffer
.resource
, NULL
);
206 pipe_vertex_buffer_reference(struct pipe_vertex_buffer
*dst
,
207 const struct pipe_vertex_buffer
*src
)
209 pipe_vertex_buffer_unreference(dst
);
210 if (!src
->is_user_buffer
)
211 pipe_resource_reference(&dst
->buffer
.resource
, src
->buffer
.resource
);
212 memcpy(dst
, src
, sizeof(*src
));
216 pipe_surface_reset(struct pipe_context
*ctx
, struct pipe_surface
* ps
,
217 struct pipe_resource
*pt
, unsigned level
, unsigned layer
)
219 pipe_resource_reference(&ps
->texture
, pt
);
220 ps
->format
= pt
->format
;
221 ps
->width
= u_minify(pt
->width0
, level
);
222 ps
->height
= u_minify(pt
->height0
, level
);
223 ps
->u
.tex
.level
= level
;
224 ps
->u
.tex
.first_layer
= ps
->u
.tex
.last_layer
= layer
;
229 pipe_surface_init(struct pipe_context
*ctx
, struct pipe_surface
* ps
,
230 struct pipe_resource
*pt
, unsigned level
, unsigned layer
)
233 pipe_reference_init(&ps
->reference
, 1);
234 pipe_surface_reset(ctx
, ps
, pt
, level
, layer
);
237 /* Return true if the surfaces are equal. */
238 static inline boolean
239 pipe_surface_equal(struct pipe_surface
*s1
, struct pipe_surface
*s2
)
241 return s1
->texture
== s2
->texture
&&
242 s1
->format
== s2
->format
&&
243 (s1
->texture
->target
!= PIPE_BUFFER
||
244 (s1
->u
.buf
.first_element
== s2
->u
.buf
.first_element
&&
245 s1
->u
.buf
.last_element
== s2
->u
.buf
.last_element
)) &&
246 (s1
->texture
->target
== PIPE_BUFFER
||
247 (s1
->u
.tex
.level
== s2
->u
.tex
.level
&&
248 s1
->u
.tex
.first_layer
== s2
->u
.tex
.first_layer
&&
249 s1
->u
.tex
.last_layer
== s2
->u
.tex
.last_layer
));
253 * Convenience wrappers for screen buffer functions.
258 * Create a new resource.
259 * \param bind bitmask of PIPE_BIND_x flags
260 * \param usage a PIPE_USAGE_x value
262 static inline struct pipe_resource
*
263 pipe_buffer_create( struct pipe_screen
*screen
,
265 enum pipe_resource_usage usage
,
268 struct pipe_resource buffer
;
269 memset(&buffer
, 0, sizeof buffer
);
270 buffer
.target
= PIPE_BUFFER
;
271 buffer
.format
= PIPE_FORMAT_R8_UNORM
; /* want TYPELESS or similar */
273 buffer
.usage
= usage
;
275 buffer
.width0
= size
;
278 buffer
.array_size
= 1;
279 return screen
->resource_create(screen
, &buffer
);
284 * Map a range of a resource.
285 * \param offset start of region, in bytes
286 * \param length size of region, in bytes
287 * \param access bitmask of PIPE_TRANSFER_x flags
288 * \param transfer returns a transfer object
291 pipe_buffer_map_range(struct pipe_context
*pipe
,
292 struct pipe_resource
*buffer
,
296 struct pipe_transfer
**transfer
)
301 assert(offset
< buffer
->width0
);
302 assert(offset
+ length
<= buffer
->width0
);
305 u_box_1d(offset
, length
, &box
);
307 map
= pipe
->transfer_map(pipe
, buffer
, 0, access
, &box
, transfer
);
317 * Map whole resource.
318 * \param access bitmask of PIPE_TRANSFER_x flags
319 * \param transfer returns a transfer object
322 pipe_buffer_map(struct pipe_context
*pipe
,
323 struct pipe_resource
*buffer
,
325 struct pipe_transfer
**transfer
)
327 return pipe_buffer_map_range(pipe
, buffer
, 0, buffer
->width0
, access
, transfer
);
332 pipe_buffer_unmap(struct pipe_context
*pipe
,
333 struct pipe_transfer
*transfer
)
335 pipe
->transfer_unmap(pipe
, transfer
);
339 pipe_buffer_flush_mapped_range(struct pipe_context
*pipe
,
340 struct pipe_transfer
*transfer
,
348 assert(transfer
->box
.x
<= (int) offset
);
349 assert((int) (offset
+ length
) <= transfer
->box
.x
+ transfer
->box
.width
);
351 /* Match old screen->buffer_flush_mapped_range() behaviour, where
352 * offset parameter is relative to the start of the buffer, not the
355 transfer_offset
= offset
- transfer
->box
.x
;
357 u_box_1d(transfer_offset
, length
, &box
);
359 pipe
->transfer_flush_region(pipe
, transfer
, &box
);
363 pipe_buffer_write(struct pipe_context
*pipe
,
364 struct pipe_resource
*buf
,
369 /* Don't set any other usage bits. Drivers should derive them. */
370 pipe
->buffer_subdata(pipe
, buf
, PIPE_TRANSFER_WRITE
, offset
, size
, data
);
374 * Special case for writing non-overlapping ranges.
376 * We can avoid GPU/CPU synchronization when writing range that has never
377 * been written before.
380 pipe_buffer_write_nooverlap(struct pipe_context
*pipe
,
381 struct pipe_resource
*buf
,
382 unsigned offset
, unsigned size
,
385 pipe
->buffer_subdata(pipe
, buf
,
386 (PIPE_TRANSFER_WRITE
|
387 PIPE_TRANSFER_UNSYNCHRONIZED
),
393 * Create a new resource and immediately put data into it
394 * \param bind bitmask of PIPE_BIND_x flags
395 * \param usage bitmask of PIPE_USAGE_x flags
397 static inline struct pipe_resource
*
398 pipe_buffer_create_with_data(struct pipe_context
*pipe
,
400 enum pipe_resource_usage usage
,
404 struct pipe_resource
*res
= pipe_buffer_create(pipe
->screen
,
406 pipe_buffer_write_nooverlap(pipe
, res
, 0, size
, ptr
);
411 pipe_buffer_read(struct pipe_context
*pipe
,
412 struct pipe_resource
*buf
,
417 struct pipe_transfer
*src_transfer
;
420 map
= (ubyte
*) pipe_buffer_map_range(pipe
,
428 memcpy(data
, map
, size
);
429 pipe_buffer_unmap(pipe
, src_transfer
);
434 * Map a resource for reading/writing.
435 * \param access bitmask of PIPE_TRANSFER_x flags
438 pipe_transfer_map(struct pipe_context
*context
,
439 struct pipe_resource
*resource
,
440 unsigned level
, unsigned layer
,
442 unsigned x
, unsigned y
,
443 unsigned w
, unsigned h
,
444 struct pipe_transfer
**transfer
)
447 u_box_2d_zslice(x
, y
, layer
, w
, h
, &box
);
448 return context
->transfer_map(context
,
457 * Map a 3D (texture) resource for reading/writing.
458 * \param access bitmask of PIPE_TRANSFER_x flags
461 pipe_transfer_map_3d(struct pipe_context
*context
,
462 struct pipe_resource
*resource
,
465 unsigned x
, unsigned y
, unsigned z
,
466 unsigned w
, unsigned h
, unsigned d
,
467 struct pipe_transfer
**transfer
)
470 u_box_3d(x
, y
, z
, w
, h
, d
, &box
);
471 return context
->transfer_map(context
,
479 pipe_transfer_unmap( struct pipe_context
*context
,
480 struct pipe_transfer
*transfer
)
482 context
->transfer_unmap( context
, transfer
);
486 pipe_set_constant_buffer(struct pipe_context
*pipe
,
487 enum pipe_shader_type shader
, uint index
,
488 struct pipe_resource
*buf
)
491 struct pipe_constant_buffer cb
;
493 cb
.buffer_offset
= 0;
494 cb
.buffer_size
= buf
->width0
;
495 cb
.user_buffer
= NULL
;
496 pipe
->set_constant_buffer(pipe
, shader
, index
, &cb
);
498 pipe
->set_constant_buffer(pipe
, shader
, index
, NULL
);
504 * Get the polygon offset enable/disable flag for the given polygon fill mode.
505 * \param fill_mode one of PIPE_POLYGON_MODE_POINT/LINE/FILL
507 static inline boolean
508 util_get_offset(const struct pipe_rasterizer_state
*templ
,
512 case PIPE_POLYGON_MODE_POINT
:
513 return templ
->offset_point
;
514 case PIPE_POLYGON_MODE_LINE
:
515 return templ
->offset_line
;
516 case PIPE_POLYGON_MODE_FILL
:
517 return templ
->offset_tri
;
525 util_get_min_point_size(const struct pipe_rasterizer_state
*state
)
527 /* The point size should be clamped to this value at the rasterizer stage.
529 return !state
->point_quad_rasterization
&&
530 !state
->point_smooth
&&
531 !state
->multisample
? 1.0f
: 0.0f
;
535 util_query_clear_result(union pipe_query_result
*result
, unsigned type
)
538 case PIPE_QUERY_OCCLUSION_PREDICATE
:
539 case PIPE_QUERY_SO_OVERFLOW_PREDICATE
:
540 case PIPE_QUERY_GPU_FINISHED
:
543 case PIPE_QUERY_OCCLUSION_COUNTER
:
544 case PIPE_QUERY_TIMESTAMP
:
545 case PIPE_QUERY_TIME_ELAPSED
:
546 case PIPE_QUERY_PRIMITIVES_GENERATED
:
547 case PIPE_QUERY_PRIMITIVES_EMITTED
:
550 case PIPE_QUERY_SO_STATISTICS
:
551 memset(&result
->so_statistics
, 0, sizeof(result
->so_statistics
));
553 case PIPE_QUERY_TIMESTAMP_DISJOINT
:
554 memset(&result
->timestamp_disjoint
, 0, sizeof(result
->timestamp_disjoint
));
556 case PIPE_QUERY_PIPELINE_STATISTICS
:
557 memset(&result
->pipeline_statistics
, 0, sizeof(result
->pipeline_statistics
));
560 memset(result
, 0, sizeof(*result
));
564 /** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
565 static inline unsigned
566 util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target
,
569 switch (pipe_tex_target
) {
571 return TGSI_TEXTURE_BUFFER
;
573 case PIPE_TEXTURE_1D
:
574 assert(nr_samples
<= 1);
575 return TGSI_TEXTURE_1D
;
577 case PIPE_TEXTURE_2D
:
578 return nr_samples
> 1 ? TGSI_TEXTURE_2D_MSAA
: TGSI_TEXTURE_2D
;
580 case PIPE_TEXTURE_RECT
:
581 assert(nr_samples
<= 1);
582 return TGSI_TEXTURE_RECT
;
584 case PIPE_TEXTURE_3D
:
585 assert(nr_samples
<= 1);
586 return TGSI_TEXTURE_3D
;
588 case PIPE_TEXTURE_CUBE
:
589 assert(nr_samples
<= 1);
590 return TGSI_TEXTURE_CUBE
;
592 case PIPE_TEXTURE_1D_ARRAY
:
593 assert(nr_samples
<= 1);
594 return TGSI_TEXTURE_1D_ARRAY
;
596 case PIPE_TEXTURE_2D_ARRAY
:
597 return nr_samples
> 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA
:
598 TGSI_TEXTURE_2D_ARRAY
;
600 case PIPE_TEXTURE_CUBE_ARRAY
:
601 return TGSI_TEXTURE_CUBE_ARRAY
;
604 assert(0 && "unexpected texture target");
605 return TGSI_TEXTURE_UNKNOWN
;
611 util_copy_constant_buffer(struct pipe_constant_buffer
*dst
,
612 const struct pipe_constant_buffer
*src
)
615 pipe_resource_reference(&dst
->buffer
, src
->buffer
);
616 dst
->buffer_offset
= src
->buffer_offset
;
617 dst
->buffer_size
= src
->buffer_size
;
618 dst
->user_buffer
= src
->user_buffer
;
621 pipe_resource_reference(&dst
->buffer
, NULL
);
622 dst
->buffer_offset
= 0;
623 dst
->buffer_size
= 0;
624 dst
->user_buffer
= NULL
;
629 util_copy_image_view(struct pipe_image_view
*dst
,
630 const struct pipe_image_view
*src
)
633 pipe_resource_reference(&dst
->resource
, src
->resource
);
634 dst
->format
= src
->format
;
635 dst
->access
= src
->access
;
638 pipe_resource_reference(&dst
->resource
, NULL
);
639 dst
->format
= PIPE_FORMAT_NONE
;
641 memset(&dst
->u
, 0, sizeof(dst
->u
));
645 static inline unsigned
646 util_max_layer(const struct pipe_resource
*r
, unsigned level
)
649 case PIPE_TEXTURE_3D
:
650 return u_minify(r
->depth0
, level
) - 1;
651 case PIPE_TEXTURE_CUBE
:
652 assert(r
->array_size
== 6);
654 case PIPE_TEXTURE_1D_ARRAY
:
655 case PIPE_TEXTURE_2D_ARRAY
:
656 case PIPE_TEXTURE_CUBE_ARRAY
:
657 return r
->array_size
- 1;
664 util_texrange_covers_whole_level(const struct pipe_resource
*tex
,
665 unsigned level
, unsigned x
, unsigned y
,
666 unsigned z
, unsigned width
,
667 unsigned height
, unsigned depth
)
669 return x
== 0 && y
== 0 && z
== 0 &&
670 width
== u_minify(tex
->width0
, level
) &&
671 height
== u_minify(tex
->height0
, level
) &&
672 depth
== util_max_layer(tex
, level
) + 1;
679 #endif /* U_INLINES_H */