1 /**************************************************************************
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_state.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_debug.h"
36 #include "util/u_atomic.h"
45 * Reference counting helper functions.
50 pipe_reference_init(struct pipe_reference
*reference
, unsigned count
)
52 p_atomic_set(&reference
->count
, count
);
56 pipe_is_referenced(struct pipe_reference
*reference
)
58 return p_atomic_read(&reference
->count
) != 0;
62 * Update reference counting.
63 * The old thing pointed to, if any, will be unreferenced.
64 * Both 'ptr' and 'reference' may be NULL.
65 * \return TRUE if the object's refcount hits zero and should be destroyed.
68 pipe_reference(struct pipe_reference
*ptr
, struct pipe_reference
*reference
)
70 boolean destroy
= FALSE
;
72 if(ptr
!= reference
) {
73 /* bump the reference.count first */
75 assert(pipe_is_referenced(reference
));
76 p_atomic_inc(&reference
->count
);
80 assert(pipe_is_referenced(ptr
));
81 if (p_atomic_dec_zero(&ptr
->count
)) {
91 pipe_buffer_reference(struct pipe_buffer
**ptr
, struct pipe_buffer
*buf
)
93 struct pipe_buffer
*old_buf
= *ptr
;
95 if (pipe_reference(&(*ptr
)->reference
, &buf
->reference
))
96 old_buf
->screen
->buffer_destroy(old_buf
);
101 pipe_surface_reference(struct pipe_surface
**ptr
, struct pipe_surface
*surf
)
103 struct pipe_surface
*old_surf
= *ptr
;
105 if (pipe_reference(&(*ptr
)->reference
, &surf
->reference
))
106 old_surf
->texture
->screen
->tex_surface_destroy(old_surf
);
111 pipe_texture_reference(struct pipe_texture
**ptr
, struct pipe_texture
*tex
)
113 struct pipe_texture
*old_tex
= *ptr
;
115 if (pipe_reference(&(*ptr
)->reference
, &tex
->reference
))
116 old_tex
->screen
->texture_destroy(old_tex
);
122 * Convenience wrappers for screen buffer functions.
125 static INLINE
struct pipe_buffer
*
126 pipe_buffer_create( struct pipe_screen
*screen
,
127 unsigned alignment
, unsigned usage
, unsigned size
)
129 return screen
->buffer_create(screen
, alignment
, usage
, size
);
132 static INLINE
struct pipe_buffer
*
133 pipe_user_buffer_create( struct pipe_screen
*screen
, void *ptr
, unsigned size
)
135 return screen
->user_buffer_create(screen
, ptr
, size
);
139 pipe_buffer_map(struct pipe_screen
*screen
,
140 struct pipe_buffer
*buf
,
143 if(screen
->buffer_map_range
) {
145 unsigned length
= buf
->size
;
146 return screen
->buffer_map_range(screen
, buf
, offset
, length
, usage
);
149 return screen
->buffer_map(screen
, buf
, usage
);
153 pipe_buffer_unmap(struct pipe_screen
*screen
,
154 struct pipe_buffer
*buf
)
156 screen
->buffer_unmap(screen
, buf
);
160 pipe_buffer_map_range(struct pipe_screen
*screen
,
161 struct pipe_buffer
*buf
,
166 assert(offset
< buf
->size
);
167 assert(offset
+ length
<= buf
->size
);
169 if(screen
->buffer_map_range
)
170 return screen
->buffer_map_range(screen
, buf
, offset
, length
, usage
);
172 return screen
->buffer_map(screen
, buf
, usage
);
176 pipe_buffer_flush_mapped_range(struct pipe_screen
*screen
,
177 struct pipe_buffer
*buf
,
181 assert(offset
< buf
->size
);
182 assert(offset
+ length
<= buf
->size
);
184 if(screen
->buffer_flush_mapped_range
)
185 screen
->buffer_flush_mapped_range(screen
, buf
, offset
, length
);
189 pipe_buffer_write(struct pipe_screen
*screen
,
190 struct pipe_buffer
*buf
,
191 unsigned offset
, unsigned size
,
196 assert(offset
< buf
->size
);
197 assert(offset
+ size
<= buf
->size
);
200 map
= pipe_buffer_map_range(screen
, buf
, offset
, size
,
201 PIPE_BUFFER_USAGE_CPU_WRITE
|
202 PIPE_BUFFER_USAGE_FLUSH_EXPLICIT
|
203 PIPE_BUFFER_USAGE_DISCARD
);
206 memcpy((uint8_t *)map
+ offset
, data
, size
);
207 pipe_buffer_flush_mapped_range(screen
, buf
, offset
, size
);
208 pipe_buffer_unmap(screen
, buf
);
213 * Special case for writing non-overlapping ranges.
215 * We can avoid GPU/CPU synchronization when writing range that has never
216 * been written before.
219 pipe_buffer_write_nooverlap(struct pipe_screen
*screen
,
220 struct pipe_buffer
*buf
,
221 unsigned offset
, unsigned size
,
226 assert(offset
< buf
->size
);
227 assert(offset
+ size
<= buf
->size
);
230 map
= pipe_buffer_map_range(screen
, buf
, offset
, size
,
231 PIPE_BUFFER_USAGE_CPU_WRITE
|
232 PIPE_BUFFER_USAGE_FLUSH_EXPLICIT
|
233 PIPE_BUFFER_USAGE_DISCARD
|
234 PIPE_BUFFER_USAGE_UNSYNCHRONIZED
);
237 memcpy((uint8_t *)map
+ offset
, data
, size
);
238 pipe_buffer_flush_mapped_range(screen
, buf
, offset
, size
);
239 pipe_buffer_unmap(screen
, buf
);
244 pipe_buffer_read(struct pipe_screen
*screen
,
245 struct pipe_buffer
*buf
,
246 unsigned offset
, unsigned size
,
251 assert(offset
< buf
->size
);
252 assert(offset
+ size
<= buf
->size
);
255 map
= pipe_buffer_map_range(screen
, buf
, offset
, size
, PIPE_BUFFER_USAGE_CPU_READ
);
258 memcpy(data
, (const uint8_t *)map
+ offset
, size
);
259 pipe_buffer_unmap(screen
, buf
);
264 pipe_transfer_map( struct pipe_transfer
*transf
)
266 struct pipe_screen
*screen
= transf
->texture
->screen
;
267 return screen
->transfer_map(screen
, transf
);
271 pipe_transfer_unmap( struct pipe_transfer
*transf
)
273 struct pipe_screen
*screen
= transf
->texture
->screen
;
274 screen
->transfer_unmap(screen
, transf
);
278 pipe_transfer_destroy( struct pipe_transfer
*transf
)
280 struct pipe_screen
*screen
= transf
->texture
->screen
;
281 screen
->tex_transfer_destroy(transf
);
284 static INLINE
unsigned
285 pipe_transfer_buffer_flags( struct pipe_transfer
*transf
)
287 switch (transf
->usage
& PIPE_TRANSFER_READ_WRITE
) {
288 case PIPE_TRANSFER_READ_WRITE
:
289 return PIPE_BUFFER_USAGE_CPU_READ
| PIPE_BUFFER_USAGE_CPU_WRITE
;
290 case PIPE_TRANSFER_READ
:
291 return PIPE_BUFFER_USAGE_CPU_READ
;
292 case PIPE_TRANSFER_WRITE
:
293 return PIPE_BUFFER_USAGE_CPU_WRITE
;
304 #endif /* U_INLINES_H */