Merge remote branch 'origin/7.8'
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_state.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_debug.h"
36 #include "util/u_atomic.h"
37 #include "util/u_box.h"
38 #include "util/u_math.h"
39
40
41 #ifdef __cplusplus
42 extern "C" {
43 #endif
44
45
46 /*
47 * Reference counting helper functions.
48 */
49
50
51 static INLINE void
52 pipe_reference_init(struct pipe_reference *reference, unsigned count)
53 {
54 p_atomic_set(&reference->count, count);
55 }
56
57 static INLINE boolean
58 pipe_is_referenced(struct pipe_reference *reference)
59 {
60 return p_atomic_read(&reference->count) != 0;
61 }
62
63 /**
64 * Update reference counting.
65 * The old thing pointed to, if any, will be unreferenced.
66 * Both 'ptr' and 'reference' may be NULL.
67 * \return TRUE if the object's refcount hits zero and should be destroyed.
68 */
69 static INLINE boolean
70 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
71 {
72 boolean destroy = FALSE;
73
74 if(ptr != reference) {
75 /* bump the reference.count first */
76 if (reference) {
77 assert(pipe_is_referenced(reference));
78 p_atomic_inc(&reference->count);
79 }
80
81 if (ptr) {
82 assert(pipe_is_referenced(ptr));
83 if (p_atomic_dec_zero(&ptr->count)) {
84 destroy = TRUE;
85 }
86 }
87 }
88
89 return destroy;
90 }
91
92
93 static INLINE void
94 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
95 {
96 struct pipe_surface *old_surf = *ptr;
97
98 if (pipe_reference(&(*ptr)->reference, &surf->reference))
99 old_surf->texture->screen->tex_surface_destroy(old_surf);
100 *ptr = surf;
101 }
102
103
104 static INLINE void
105 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
106 {
107 struct pipe_resource *old_tex = *ptr;
108
109 if (pipe_reference(&(*ptr)->reference, &tex->reference))
110 old_tex->screen->resource_destroy(old_tex->screen, old_tex);
111 *ptr = tex;
112 }
113
114
115 static INLINE void
116 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
117 {
118 struct pipe_sampler_view *old_view = *ptr;
119
120 if (pipe_reference(&(*ptr)->reference, &view->reference))
121 old_view->context->sampler_view_destroy(old_view->context, old_view);
122 *ptr = view;
123 }
124
125 static INLINE void
126 pipe_surface_reset(struct pipe_surface* ps, struct pipe_resource *pt,
127 unsigned face, unsigned level, unsigned zslice, unsigned flags)
128 {
129 pipe_resource_reference(&ps->texture, pt);
130 ps->format = pt->format;
131 ps->width = u_minify(pt->width0, level);
132 ps->height = u_minify(pt->height0, level);
133 ps->usage = flags;
134 ps->face = face;
135 ps->level = level;
136 ps->zslice = zslice;
137 }
138
139 static INLINE void
140 pipe_surface_init(struct pipe_surface* ps, struct pipe_resource *pt,
141 unsigned face, unsigned level, unsigned zslice, unsigned flags)
142 {
143 ps->texture = 0;
144 pipe_reference_init(&ps->reference, 1);
145 pipe_surface_reset(ps, pt, face, level, zslice, flags);
146 }
147
148 /*
149 * Convenience wrappers for screen buffer functions.
150 */
151
152 static INLINE struct pipe_resource *
153 pipe_buffer_create( struct pipe_screen *screen,
154 unsigned bind,
155 unsigned size )
156 {
157 struct pipe_resource buffer;
158 memset(&buffer, 0, sizeof buffer);
159 buffer.target = PIPE_BUFFER;
160 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
161 buffer.bind = bind;
162 buffer.usage = PIPE_USAGE_DEFAULT;
163 buffer.flags = 0;
164 buffer.width0 = size;
165 buffer.height0 = 1;
166 buffer.depth0 = 1;
167 return screen->resource_create(screen, &buffer);
168 }
169
170
171 static INLINE struct pipe_resource *
172 pipe_user_buffer_create( struct pipe_screen *screen, void *ptr, unsigned size,
173 unsigned usage )
174 {
175 return screen->user_buffer_create(screen, ptr, size, usage);
176 }
177
178 static INLINE void *
179 pipe_buffer_map_range(struct pipe_context *pipe,
180 struct pipe_resource *buffer,
181 unsigned offset,
182 unsigned length,
183 unsigned usage,
184 struct pipe_transfer **transfer)
185 {
186 struct pipe_box box;
187 void *map;
188
189 assert(offset < buffer->width0);
190 assert(offset + length <= buffer->width0);
191 assert(length);
192
193 u_box_1d(offset, length, &box);
194
195 *transfer = pipe->get_transfer( pipe,
196 buffer,
197 u_subresource(0, 0),
198 usage,
199 &box);
200
201 if (*transfer == NULL)
202 return NULL;
203
204 map = pipe->transfer_map( pipe, *transfer );
205 if (map == NULL) {
206 pipe->transfer_destroy( pipe, *transfer );
207 return NULL;
208 }
209
210 /* Match old screen->buffer_map_range() behaviour, return pointer
211 * to where the beginning of the buffer would be:
212 */
213 return (void *)((char *)map - offset);
214 }
215
216
217 static INLINE void *
218 pipe_buffer_map(struct pipe_context *pipe,
219 struct pipe_resource *buffer,
220 unsigned usage,
221 struct pipe_transfer **transfer)
222 {
223 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, usage, transfer);
224 }
225
226
227 static INLINE void
228 pipe_buffer_unmap(struct pipe_context *pipe,
229 struct pipe_resource *buf,
230 struct pipe_transfer *transfer)
231 {
232 if (transfer) {
233 pipe->transfer_unmap(pipe, transfer);
234 pipe->transfer_destroy(pipe, transfer);
235 }
236 }
237
238 static INLINE void
239 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
240 struct pipe_transfer *transfer,
241 unsigned offset,
242 unsigned length)
243 {
244 struct pipe_box box;
245 int transfer_offset;
246
247 assert(length);
248 assert(transfer->box.x <= offset);
249 assert(offset + length <= transfer->box.x + transfer->box.width);
250
251 /* Match old screen->buffer_flush_mapped_range() behaviour, where
252 * offset parameter is relative to the start of the buffer, not the
253 * mapped range.
254 */
255 transfer_offset = offset - transfer->box.x;
256
257 u_box_1d(transfer_offset, length, &box);
258
259 pipe->transfer_flush_region(pipe, transfer, &box);
260 }
261
262 static INLINE void
263 pipe_buffer_write(struct pipe_context *pipe,
264 struct pipe_resource *buf,
265 unsigned offset,
266 unsigned size,
267 const void *data)
268 {
269 struct pipe_box box;
270
271 u_box_1d(offset, size, &box);
272
273 pipe->transfer_inline_write( pipe,
274 buf,
275 u_subresource(0,0),
276 PIPE_TRANSFER_WRITE,
277 &box,
278 data,
279 size,
280 0);
281 }
282
283 /**
284 * Special case for writing non-overlapping ranges.
285 *
286 * We can avoid GPU/CPU synchronization when writing range that has never
287 * been written before.
288 */
289 static INLINE void
290 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
291 struct pipe_resource *buf,
292 unsigned offset, unsigned size,
293 const void *data)
294 {
295 struct pipe_box box;
296
297 u_box_1d(offset, size, &box);
298
299 pipe->transfer_inline_write(pipe,
300 buf,
301 u_subresource(0,0),
302 (PIPE_TRANSFER_WRITE |
303 PIPE_TRANSFER_NOOVERWRITE),
304 &box,
305 data,
306 0, 0);
307 }
308
309 static INLINE void
310 pipe_buffer_read(struct pipe_context *pipe,
311 struct pipe_resource *buf,
312 unsigned offset,
313 unsigned size,
314 void *data)
315 {
316 struct pipe_transfer *src_transfer;
317 ubyte *map;
318
319 map = (ubyte *) pipe_buffer_map_range(pipe,
320 buf,
321 offset, size,
322 PIPE_TRANSFER_READ,
323 &src_transfer);
324
325 if (map)
326 memcpy(data, map + offset, size);
327
328 pipe_buffer_unmap(pipe, buf, src_transfer);
329 }
330
331 static INLINE struct pipe_transfer *
332 pipe_get_transfer( struct pipe_context *context,
333 struct pipe_resource *resource,
334 unsigned face, unsigned level,
335 unsigned zslice,
336 enum pipe_transfer_usage usage,
337 unsigned x, unsigned y,
338 unsigned w, unsigned h)
339 {
340 struct pipe_box box;
341 u_box_2d_zslice( x, y, zslice, w, h, &box );
342 return context->get_transfer( context,
343 resource,
344 u_subresource(face, level),
345 usage,
346 &box );
347 }
348
349 static INLINE void *
350 pipe_transfer_map( struct pipe_context *context,
351 struct pipe_transfer *transfer )
352 {
353 return context->transfer_map( context, transfer );
354 }
355
356 static INLINE void
357 pipe_transfer_unmap( struct pipe_context *context,
358 struct pipe_transfer *transfer )
359 {
360 context->transfer_unmap( context, transfer );
361 }
362
363
364 static INLINE void
365 pipe_transfer_destroy( struct pipe_context *context,
366 struct pipe_transfer *transfer )
367 {
368 context->transfer_destroy(context, transfer);
369 }
370
371
372 #ifdef __cplusplus
373 }
374 #endif
375
376 #endif /* U_INLINES_H */