67142a131620ddbdb00600fc2ce4f86412e6f834
[mesa.git] / src / mesa / drivers / dri / intel / intel_regions.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* Provide additional functionality on top of bufmgr buffers:
29 * - 2d semantics and blit operations
30 * - refcounting of buffers for multiple images in a buffer.
31 * - refcounting of buffer mappings.
32 * - some logic for moving the buffers to the best memory pools for
33 * given operations.
34 *
35 * Most of this is to make it easier to implement the fixed-layout
36 * mipmap tree required by intel hardware in the face of GL's
37 * programming interface where each image can be specifed in random
38 * order and it isn't clear what layout the tree should have until the
39 * last moment.
40 */
41
42 #include <sys/ioctl.h>
43 #include <errno.h>
44
45 #include "main/hash.h"
46 #include "intel_context.h"
47 #include "intel_regions.h"
48 #include "intel_blit.h"
49 #include "intel_buffer_objects.h"
50 #include "intel_bufmgr.h"
51 #include "intel_batchbuffer.h"
52
53 #define FILE_DEBUG_FLAG DEBUG_REGION
54
55 /* This should be set to the maximum backtrace size desired.
56 * Set it to 0 to disable backtrace debugging.
57 */
58 #define DEBUG_BACKTRACE_SIZE 0
59
60 #if DEBUG_BACKTRACE_SIZE == 0
61 /* Use the standard debug output */
62 #define _DBG(...) DBG(__VA_ARGS__)
63 #else
64 /* Use backtracing debug output */
65 #define _DBG(...) {debug_backtrace(); DBG(__VA_ARGS__);}
66
67 /* Backtracing debug support */
68 #include <execinfo.h>
69
70 static void
71 debug_backtrace(void)
72 {
73 void *trace[DEBUG_BACKTRACE_SIZE];
74 char **strings = NULL;
75 int traceSize;
76 register int i;
77
78 traceSize = backtrace(trace, DEBUG_BACKTRACE_SIZE);
79 strings = backtrace_symbols(trace, traceSize);
80 if (strings == NULL) {
81 DBG("no backtrace:");
82 return;
83 }
84
85 /* Spit out all the strings with a colon separator. Ignore
86 * the first, since we don't really care about the call
87 * to debug_backtrace() itself. Skip until the final "/" in
88 * the trace to avoid really long lines.
89 */
90 for (i = 1; i < traceSize; i++) {
91 char *p = strings[i], *slash = strings[i];
92 while (*p) {
93 if (*p++ == '/') {
94 slash = p;
95 }
96 }
97
98 DBG("%s:", slash);
99 }
100
101 /* Free up the memory, and we're done */
102 free(strings);
103 }
104
105 #endif
106
107
108
109 /* XXX: Thread safety?
110 */
111 GLubyte *
112 intel_region_map(struct intel_context *intel, struct intel_region *region)
113 {
114 /* We have the region->map_refcount controlling mapping of the BO because
115 * in software fallbacks we may end up mapping the same buffer multiple
116 * times on Mesa's behalf, so we refcount our mappings to make sure that
117 * the pointer stays valid until the end of the unmap chain. However, we
118 * must not emit any batchbuffers between the start of mapping and the end
119 * of unmapping, or further use of the map will be incoherent with the GPU
120 * rendering done by that batchbuffer. Hence we assert in
121 * intel_batchbuffer_flush() that that doesn't happen, which means that the
122 * flush is only needed on first map of the buffer.
123 */
124
125 _DBG("%s %p\n", __FUNCTION__, region);
126 if (!region->map_refcount++) {
127 intel_flush(&intel->ctx);
128
129 if (region->tiling != I915_TILING_NONE)
130 drm_intel_gem_bo_map_gtt(region->bo);
131 else
132 drm_intel_bo_map(region->bo, GL_TRUE);
133
134 region->map = region->bo->virtual;
135 ++intel->num_mapped_regions;
136 }
137
138 return region->map;
139 }
140
141 void
142 intel_region_unmap(struct intel_context *intel, struct intel_region *region)
143 {
144 _DBG("%s %p\n", __FUNCTION__, region);
145 if (!--region->map_refcount) {
146 if (region->tiling != I915_TILING_NONE)
147 drm_intel_gem_bo_unmap_gtt(region->bo);
148 else
149 drm_intel_bo_unmap(region->bo);
150
151 region->map = NULL;
152 --intel->num_mapped_regions;
153 assert(intel->num_mapped_regions >= 0);
154 }
155 }
156
157 static struct intel_region *
158 intel_region_alloc_internal(struct intel_screen *screen,
159 GLuint cpp,
160 GLuint width, GLuint height, GLuint pitch,
161 uint32_t tiling, drm_intel_bo *buffer)
162 {
163 struct intel_region *region;
164
165 region = calloc(sizeof(*region), 1);
166 if (region == NULL)
167 return region;
168
169 region->cpp = cpp;
170 region->width = width;
171 region->height = height;
172 region->pitch = pitch;
173 region->refcount = 1;
174 region->bo = buffer;
175 region->tiling = tiling;
176 region->screen = screen;
177
178 _DBG("%s <-- %p\n", __FUNCTION__, region);
179 return region;
180 }
181
182 struct intel_region *
183 intel_region_alloc(struct intel_screen *screen,
184 uint32_t tiling,
185 GLuint cpp, GLuint width, GLuint height,
186 GLboolean expect_accelerated_upload)
187 {
188 drm_intel_bo *buffer;
189 unsigned long flags = 0;
190 unsigned long aligned_pitch;
191 struct intel_region *region;
192
193 if (expect_accelerated_upload)
194 flags |= BO_ALLOC_FOR_RENDER;
195
196 buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "region",
197 width, height, cpp,
198 &tiling, &aligned_pitch, flags);
199 if (buffer == NULL)
200 return NULL;
201
202 region = intel_region_alloc_internal(screen, cpp, width, height,
203 aligned_pitch / cpp, tiling, buffer);
204 if (region == NULL) {
205 drm_intel_bo_unreference(buffer);
206 return NULL;
207 }
208
209 return region;
210 }
211
212 GLboolean
213 intel_region_flink(struct intel_region *region, uint32_t *name)
214 {
215 if (region->name == 0) {
216 if (drm_intel_bo_flink(region->bo, &region->name))
217 return GL_FALSE;
218
219 _mesa_HashInsert(region->screen->named_regions,
220 region->name, region);
221 }
222
223 *name = region->name;
224
225 return GL_TRUE;
226 }
227
228 struct intel_region *
229 intel_region_alloc_for_handle(struct intel_screen *screen,
230 GLuint cpp,
231 GLuint width, GLuint height, GLuint pitch,
232 GLuint handle, const char *name)
233 {
234 struct intel_region *region, *dummy;
235 drm_intel_bo *buffer;
236 int ret;
237 uint32_t bit_6_swizzle, tiling;
238
239 region = _mesa_HashLookup(screen->named_regions, handle);
240 if (region != NULL) {
241 dummy = NULL;
242 if (region->width != width || region->height != height ||
243 region->cpp != cpp || region->pitch != pitch) {
244 fprintf(stderr,
245 "Region for name %d already exists but is not compatible\n",
246 handle);
247 return NULL;
248 }
249 intel_region_reference(&dummy, region);
250 return dummy;
251 }
252
253 buffer = intel_bo_gem_create_from_name(screen->bufmgr, name, handle);
254 if (buffer == NULL)
255 return NULL;
256 ret = drm_intel_bo_get_tiling(buffer, &tiling, &bit_6_swizzle);
257 if (ret != 0) {
258 fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
259 handle, name, strerror(-ret));
260 drm_intel_bo_unreference(buffer);
261 return NULL;
262 }
263
264 region = intel_region_alloc_internal(screen, cpp,
265 width, height, pitch, tiling, buffer);
266 if (region == NULL) {
267 drm_intel_bo_unreference(buffer);
268 return NULL;
269 }
270
271 region->name = handle;
272 _mesa_HashInsert(screen->named_regions, handle, region);
273
274 return region;
275 }
276
277 void
278 intel_region_reference(struct intel_region **dst, struct intel_region *src)
279 {
280 _DBG("%s: %p(%d) -> %p(%d)\n", __FUNCTION__,
281 *dst, *dst ? (*dst)->refcount : 0, src, src ? src->refcount : 0);
282
283 if (src != *dst) {
284 if (*dst)
285 intel_region_release(dst);
286
287 if (src)
288 src->refcount++;
289 *dst = src;
290 }
291 }
292
293 void
294 intel_region_release(struct intel_region **region_handle)
295 {
296 struct intel_region *region = *region_handle;
297
298 if (region == NULL) {
299 _DBG("%s NULL\n", __FUNCTION__);
300 return;
301 }
302
303 _DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1);
304
305 ASSERT(region->refcount > 0);
306 region->refcount--;
307
308 if (region->refcount == 0) {
309 assert(region->map_refcount == 0);
310
311 drm_intel_bo_unreference(region->bo);
312
313 if (region->name > 0)
314 _mesa_HashRemove(region->screen->named_regions, region->name);
315
316 free(region);
317 }
318 *region_handle = NULL;
319 }
320
321 /*
322 * XXX Move this into core Mesa?
323 */
324 void
325 _mesa_copy_rect(GLubyte * dst,
326 GLuint cpp,
327 GLuint dst_pitch,
328 GLuint dst_x,
329 GLuint dst_y,
330 GLuint width,
331 GLuint height,
332 const GLubyte * src,
333 GLuint src_pitch, GLuint src_x, GLuint src_y)
334 {
335 GLuint i;
336
337 dst_pitch *= cpp;
338 src_pitch *= cpp;
339 dst += dst_x * cpp;
340 src += src_x * cpp;
341 dst += dst_y * dst_pitch;
342 src += src_y * src_pitch;
343 width *= cpp;
344
345 if (width == dst_pitch && width == src_pitch)
346 memcpy(dst, src, height * width);
347 else {
348 for (i = 0; i < height; i++) {
349 memcpy(dst, src, width);
350 dst += dst_pitch;
351 src += src_pitch;
352 }
353 }
354 }
355
356 /* Copy rectangular sub-regions. Need better logic about when to
357 * push buffers into AGP - will currently do so whenever possible.
358 */
359 GLboolean
360 intel_region_copy(struct intel_context *intel,
361 struct intel_region *dst,
362 GLuint dst_offset,
363 GLuint dstx, GLuint dsty,
364 struct intel_region *src,
365 GLuint src_offset,
366 GLuint srcx, GLuint srcy, GLuint width, GLuint height,
367 GLboolean flip,
368 GLenum logicop)
369 {
370 uint32_t src_pitch = src->pitch;
371
372 _DBG("%s\n", __FUNCTION__);
373
374 if (intel == NULL)
375 return GL_FALSE;
376
377 assert(src->cpp == dst->cpp);
378
379 if (flip)
380 src_pitch = -src_pitch;
381
382 return intelEmitCopyBlit(intel,
383 dst->cpp,
384 src_pitch, src->bo, src_offset, src->tiling,
385 dst->pitch, dst->bo, dst_offset, dst->tiling,
386 srcx, srcy, dstx, dsty, width, height,
387 logicop);
388 }