1 /**************************************************************************
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_state.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_debug.h"
36 #include "util/u_atomic.h"
45 * Reference counting helper functions.
50 pipe_reference_init(struct pipe_reference
*reference
, unsigned count
)
52 p_atomic_set(&reference
->count
, count
);
56 pipe_is_referenced(struct pipe_reference
*reference
)
58 return p_atomic_read(&reference
->count
) != 0;
62 * Update reference counting.
63 * The old thing pointed to, if any, will be unreferenced.
64 * Both 'ptr' and 'reference' may be NULL.
65 * \return TRUE if the object's refcount hits zero and should be destroyed.
68 pipe_reference(struct pipe_reference
*ptr
, struct pipe_reference
*reference
)
70 boolean destroy
= FALSE
;
72 if(ptr
!= reference
) {
73 /* bump the reference.count first */
75 assert(pipe_is_referenced(reference
));
76 p_atomic_inc(&reference
->count
);
80 assert(pipe_is_referenced(ptr
));
81 if (p_atomic_dec_zero(&ptr
->count
)) {
91 pipe_buffer_reference(struct pipe_buffer
**ptr
, struct pipe_buffer
*buf
)
93 struct pipe_buffer
*old_buf
;
98 if (pipe_reference(&(*ptr
)->reference
, &buf
->reference
))
99 old_buf
->screen
->buffer_destroy(old_buf
);
104 pipe_surface_reference(struct pipe_surface
**ptr
, struct pipe_surface
*surf
)
106 struct pipe_surface
*old_surf
= *ptr
;
108 if (pipe_reference(&(*ptr
)->reference
, &surf
->reference
))
109 old_surf
->texture
->screen
->tex_surface_destroy(old_surf
);
114 pipe_texture_reference(struct pipe_texture
**ptr
, struct pipe_texture
*tex
)
116 struct pipe_texture
*old_tex
= *ptr
;
118 if (pipe_reference(&(*ptr
)->reference
, &tex
->reference
))
119 old_tex
->screen
->texture_destroy(old_tex
);
124 pipe_sampler_view_reference(struct pipe_sampler_view
**ptr
, struct pipe_sampler_view
*view
)
126 struct pipe_sampler_view
*old_view
= *ptr
;
128 if (pipe_reference(&(*ptr
)->reference
, &view
->reference
))
129 old_view
->context
->sampler_view_destroy(old_view
->context
, old_view
);
135 * Convenience wrappers for screen buffer functions.
138 static INLINE
struct pipe_buffer
*
139 pipe_buffer_create( struct pipe_screen
*screen
,
140 unsigned alignment
, unsigned usage
, unsigned size
)
142 return screen
->buffer_create(screen
, alignment
, usage
, size
);
145 static INLINE
struct pipe_buffer
*
146 pipe_user_buffer_create( struct pipe_screen
*screen
, void *ptr
, unsigned size
)
148 return screen
->user_buffer_create(screen
, ptr
, size
);
152 pipe_buffer_map(struct pipe_screen
*screen
,
153 struct pipe_buffer
*buf
,
156 if(screen
->buffer_map_range
) {
158 unsigned length
= buf
->size
;
159 return screen
->buffer_map_range(screen
, buf
, offset
, length
, usage
);
162 return screen
->buffer_map(screen
, buf
, usage
);
166 pipe_buffer_unmap(struct pipe_screen
*screen
,
167 struct pipe_buffer
*buf
)
169 screen
->buffer_unmap(screen
, buf
);
173 pipe_buffer_map_range(struct pipe_screen
*screen
,
174 struct pipe_buffer
*buf
,
179 assert(offset
< buf
->size
);
180 assert(offset
+ length
<= buf
->size
);
182 if(screen
->buffer_map_range
)
183 return screen
->buffer_map_range(screen
, buf
, offset
, length
, usage
);
185 return screen
->buffer_map(screen
, buf
, usage
);
189 pipe_buffer_flush_mapped_range(struct pipe_screen
*screen
,
190 struct pipe_buffer
*buf
,
194 assert(offset
< buf
->size
);
195 assert(offset
+ length
<= buf
->size
);
197 if(screen
->buffer_flush_mapped_range
)
198 screen
->buffer_flush_mapped_range(screen
, buf
, offset
, length
);
202 pipe_buffer_write(struct pipe_screen
*screen
,
203 struct pipe_buffer
*buf
,
204 unsigned offset
, unsigned size
,
209 assert(offset
< buf
->size
);
210 assert(offset
+ size
<= buf
->size
);
213 map
= pipe_buffer_map_range(screen
, buf
, offset
, size
,
214 PIPE_BUFFER_USAGE_CPU_WRITE
|
215 PIPE_BUFFER_USAGE_FLUSH_EXPLICIT
|
216 PIPE_BUFFER_USAGE_DISCARD
);
219 memcpy((uint8_t *)map
+ offset
, data
, size
);
220 pipe_buffer_flush_mapped_range(screen
, buf
, offset
, size
);
221 pipe_buffer_unmap(screen
, buf
);
226 * Special case for writing non-overlapping ranges.
228 * We can avoid GPU/CPU synchronization when writing range that has never
229 * been written before.
232 pipe_buffer_write_nooverlap(struct pipe_screen
*screen
,
233 struct pipe_buffer
*buf
,
234 unsigned offset
, unsigned size
,
239 assert(offset
< buf
->size
);
240 assert(offset
+ size
<= buf
->size
);
243 map
= pipe_buffer_map_range(screen
, buf
, offset
, size
,
244 PIPE_BUFFER_USAGE_CPU_WRITE
|
245 PIPE_BUFFER_USAGE_FLUSH_EXPLICIT
|
246 PIPE_BUFFER_USAGE_DISCARD
|
247 PIPE_BUFFER_USAGE_UNSYNCHRONIZED
);
250 memcpy((uint8_t *)map
+ offset
, data
, size
);
251 pipe_buffer_flush_mapped_range(screen
, buf
, offset
, size
);
252 pipe_buffer_unmap(screen
, buf
);
257 pipe_buffer_read(struct pipe_screen
*screen
,
258 struct pipe_buffer
*buf
,
259 unsigned offset
, unsigned size
,
264 assert(offset
< buf
->size
);
265 assert(offset
+ size
<= buf
->size
);
268 map
= pipe_buffer_map_range(screen
, buf
, offset
, size
, PIPE_BUFFER_USAGE_CPU_READ
);
271 memcpy(data
, (const uint8_t *)map
+ offset
, size
);
272 pipe_buffer_unmap(screen
, buf
);
277 pipe_transfer_map( struct pipe_context
*context
,
278 struct pipe_transfer
*transf
)
280 return context
->transfer_map(context
, transf
);
284 pipe_transfer_unmap( struct pipe_context
*context
,
285 struct pipe_transfer
*transf
)
287 context
->transfer_unmap(context
, transf
);
291 pipe_transfer_destroy( struct pipe_context
*context
,
292 struct pipe_transfer
*transfer
)
294 context
->tex_transfer_destroy(context
, transfer
);
297 static INLINE
unsigned
298 pipe_transfer_buffer_flags( struct pipe_transfer
*transf
)
300 switch (transf
->usage
& PIPE_TRANSFER_READ_WRITE
) {
301 case PIPE_TRANSFER_READ_WRITE
:
302 return PIPE_BUFFER_USAGE_CPU_READ
| PIPE_BUFFER_USAGE_CPU_WRITE
;
303 case PIPE_TRANSFER_READ
:
304 return PIPE_BUFFER_USAGE_CPU_READ
;
305 case PIPE_TRANSFER_WRITE
:
306 return PIPE_BUFFER_USAGE_CPU_WRITE
;
317 #endif /* U_INLINES_H */