u_inlines: add function to initialize pipe_surface
[mesa.git] / src / gallium / auxiliary / util / u_inlines.h
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #ifndef U_INLINES_H
29 #define U_INLINES_H
30
31 #include "pipe/p_context.h"
32 #include "pipe/p_defines.h"
33 #include "pipe/p_state.h"
34 #include "pipe/p_screen.h"
35 #include "util/u_debug.h"
36 #include "util/u_atomic.h"
37 #include "util/u_box.h"
38 #include "util/u_math.h"
39
40
41 #ifdef __cplusplus
42 extern "C" {
43 #endif
44
45
46 /*
47 * Reference counting helper functions.
48 */
49
50
51 static INLINE void
52 pipe_reference_init(struct pipe_reference *reference, unsigned count)
53 {
54 p_atomic_set(&reference->count, count);
55 }
56
57 static INLINE boolean
58 pipe_is_referenced(struct pipe_reference *reference)
59 {
60 return p_atomic_read(&reference->count) != 0;
61 }
62
63 /**
64 * Update reference counting.
65 * The old thing pointed to, if any, will be unreferenced.
66 * Both 'ptr' and 'reference' may be NULL.
67 * \return TRUE if the object's refcount hits zero and should be destroyed.
68 */
69 static INLINE boolean
70 pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
71 {
72 boolean destroy = FALSE;
73
74 if(ptr != reference) {
75 /* bump the reference.count first */
76 if (reference) {
77 assert(pipe_is_referenced(reference));
78 p_atomic_inc(&reference->count);
79 }
80
81 if (ptr) {
82 assert(pipe_is_referenced(ptr));
83 if (p_atomic_dec_zero(&ptr->count)) {
84 destroy = TRUE;
85 }
86 }
87 }
88
89 return destroy;
90 }
91
92
93 static INLINE void
94 pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
95 {
96 struct pipe_surface *old_surf = *ptr;
97
98 if (pipe_reference(&(*ptr)->reference, &surf->reference))
99 old_surf->texture->screen->tex_surface_destroy(old_surf);
100 *ptr = surf;
101 }
102
103
104 static INLINE void
105 pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
106 {
107 struct pipe_resource *old_tex = *ptr;
108
109 if (pipe_reference(&(*ptr)->reference, &tex->reference))
110 old_tex->screen->resource_destroy(old_tex->screen, old_tex);
111 *ptr = tex;
112 }
113
114
115 static INLINE void
116 pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
117 {
118 struct pipe_sampler_view *old_view = *ptr;
119
120 if (pipe_reference(&(*ptr)->reference, &view->reference))
121 old_view->context->sampler_view_destroy(old_view->context, old_view);
122 *ptr = view;
123 }
124
125 /* you have to call pipe_reference_init(&ps->reference, 1) yourself if it is just allocated */
126 static INLINE void
127 pipe_surface_init(struct pipe_surface* ps, struct pipe_resource *pt,
128 unsigned face, unsigned level, unsigned zslice, unsigned flags)
129 {
130 pipe_resource_reference(&ps->texture, pt);
131 ps->format = pt->format;
132 ps->width = u_minify(pt->width0, level);
133 ps->height = u_minify(pt->height0, level);
134 ps->usage = flags;
135 ps->face = face;
136 ps->level = level;
137 ps->zslice = zslice;
138 }
139
140 /*
141 * Convenience wrappers for screen buffer functions.
142 */
143
144 static INLINE struct pipe_resource *
145 pipe_buffer_create( struct pipe_screen *screen,
146 unsigned bind,
147 unsigned size )
148 {
149 struct pipe_resource buffer;
150 memset(&buffer, 0, sizeof buffer);
151 buffer.target = PIPE_BUFFER;
152 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
153 buffer.bind = bind;
154 buffer._usage = PIPE_USAGE_DEFAULT;
155 buffer.flags = 0;
156 buffer.width0 = size;
157 buffer.height0 = 1;
158 buffer.depth0 = 1;
159 return screen->resource_create(screen, &buffer);
160 }
161
162
163 static INLINE struct pipe_resource *
164 pipe_user_buffer_create( struct pipe_screen *screen, void *ptr, unsigned size,
165 unsigned usage )
166 {
167 return screen->user_buffer_create(screen, ptr, size, usage);
168 }
169
170 static INLINE void *
171 pipe_buffer_map_range(struct pipe_context *pipe,
172 struct pipe_resource *buffer,
173 unsigned offset,
174 unsigned length,
175 unsigned usage,
176 struct pipe_transfer **transfer)
177 {
178 struct pipe_box box;
179 void *map;
180
181 assert(offset < buffer->width0);
182 assert(offset + length <= buffer->width0);
183 assert(length);
184
185 u_box_1d(offset, length, &box);
186
187 *transfer = pipe->get_transfer( pipe,
188 buffer,
189 u_subresource(0, 0),
190 usage,
191 &box);
192
193 if (*transfer == NULL)
194 return NULL;
195
196 map = pipe->transfer_map( pipe, *transfer );
197 if (map == NULL) {
198 pipe->transfer_destroy( pipe, *transfer );
199 return NULL;
200 }
201
202 /* Match old screen->buffer_map_range() behaviour, return pointer
203 * to where the beginning of the buffer would be:
204 */
205 return (void *)((char *)map - offset);
206 }
207
208
209 static INLINE void *
210 pipe_buffer_map(struct pipe_context *pipe,
211 struct pipe_resource *buffer,
212 unsigned usage,
213 struct pipe_transfer **transfer)
214 {
215 return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, usage, transfer);
216 }
217
218
219 static INLINE void
220 pipe_buffer_unmap(struct pipe_context *pipe,
221 struct pipe_resource *buf,
222 struct pipe_transfer *transfer)
223 {
224 if (transfer) {
225 pipe->transfer_unmap(pipe, transfer);
226 pipe->transfer_destroy(pipe, transfer);
227 }
228 }
229
230 static INLINE void
231 pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
232 struct pipe_transfer *transfer,
233 unsigned offset,
234 unsigned length)
235 {
236 struct pipe_box box;
237 int transfer_offset;
238
239 assert(length);
240 assert(transfer->box.x <= offset);
241 assert(offset + length <= transfer->box.x + transfer->box.width);
242
243 /* Match old screen->buffer_flush_mapped_range() behaviour, where
244 * offset parameter is relative to the start of the buffer, not the
245 * mapped range.
246 */
247 transfer_offset = offset - transfer->box.x;
248
249 u_box_1d(transfer_offset, length, &box);
250
251 pipe->transfer_flush_region(pipe, transfer, &box);
252 }
253
254 static INLINE void
255 pipe_buffer_write(struct pipe_context *pipe,
256 struct pipe_resource *buf,
257 unsigned offset,
258 unsigned size,
259 const void *data)
260 {
261 struct pipe_box box;
262
263 u_box_1d(offset, size, &box);
264
265 pipe->transfer_inline_write( pipe,
266 buf,
267 u_subresource(0,0),
268 PIPE_TRANSFER_WRITE,
269 &box,
270 data,
271 size,
272 0);
273 }
274
275 /**
276 * Special case for writing non-overlapping ranges.
277 *
278 * We can avoid GPU/CPU synchronization when writing range that has never
279 * been written before.
280 */
281 static INLINE void
282 pipe_buffer_write_nooverlap(struct pipe_context *pipe,
283 struct pipe_resource *buf,
284 unsigned offset, unsigned size,
285 const void *data)
286 {
287 struct pipe_box box;
288
289 u_box_1d(offset, size, &box);
290
291 pipe->transfer_inline_write(pipe,
292 buf,
293 u_subresource(0,0),
294 (PIPE_TRANSFER_WRITE |
295 PIPE_TRANSFER_NOOVERWRITE),
296 &box,
297 data,
298 0, 0);
299 }
300
301 static INLINE void
302 pipe_buffer_read(struct pipe_context *pipe,
303 struct pipe_resource *buf,
304 unsigned offset,
305 unsigned size,
306 void *data)
307 {
308 struct pipe_transfer *src_transfer;
309 ubyte *map;
310
311 map = (ubyte *) pipe_buffer_map_range(pipe,
312 buf,
313 offset, size,
314 PIPE_TRANSFER_READ,
315 &src_transfer);
316
317 if (map)
318 memcpy(data, map + offset, size);
319
320 pipe_buffer_unmap(pipe, buf, src_transfer);
321 }
322
323 static INLINE struct pipe_transfer *
324 pipe_get_transfer( struct pipe_context *context,
325 struct pipe_resource *resource,
326 unsigned face, unsigned level,
327 unsigned zslice,
328 enum pipe_transfer_usage usage,
329 unsigned x, unsigned y,
330 unsigned w, unsigned h)
331 {
332 struct pipe_box box;
333 u_box_2d_zslice( x, y, zslice, w, h, &box );
334 return context->get_transfer( context,
335 resource,
336 u_subresource(face, level),
337 usage,
338 &box );
339 }
340
341 static INLINE void *
342 pipe_transfer_map( struct pipe_context *context,
343 struct pipe_transfer *transfer )
344 {
345 return context->transfer_map( context, transfer );
346 }
347
348 static INLINE void
349 pipe_transfer_unmap( struct pipe_context *context,
350 struct pipe_transfer *transfer )
351 {
352 context->transfer_unmap( context, transfer );
353 }
354
355
356 static INLINE void
357 pipe_transfer_destroy( struct pipe_context *context,
358 struct pipe_transfer *transfer )
359 {
360 context->transfer_destroy(context, transfer);
361 }
362
363
364 #ifdef __cplusplus
365 }
366 #endif
367
368 #endif /* U_INLINES_H */