Merge branch 'mesa_7_7_branch'
[mesa.git] / src / mesa / drivers / dri / intel / intel_regions.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* Provide additional functionality on top of bufmgr buffers:
29 * - 2d semantics and blit operations
30 * - refcounting of buffers for multiple images in a buffer.
31 * - refcounting of buffer mappings.
32 * - some logic for moving the buffers to the best memory pools for
33 * given operations.
34 *
35 * Most of this is to make it easier to implement the fixed-layout
36 * mipmap tree required by intel hardware in the face of GL's
37 * programming interface where each image can be specifed in random
38 * order and it isn't clear what layout the tree should have until the
39 * last moment.
40 */
41
42 #include <sys/ioctl.h>
43 #include <errno.h>
44
45 #include "intel_context.h"
46 #include "intel_regions.h"
47 #include "intel_blit.h"
48 #include "intel_buffer_objects.h"
49 #include "intel_bufmgr.h"
50 #include "intel_batchbuffer.h"
51
52 #define FILE_DEBUG_FLAG DEBUG_REGION
53
54 /* This should be set to the maximum backtrace size desired.
55 * Set it to 0 to disable backtrace debugging.
56 */
57 #define DEBUG_BACKTRACE_SIZE 0
58
59 #if DEBUG_BACKTRACE_SIZE == 0
60 /* Use the standard debug output */
61 #define _DBG(...) DBG(__VA_ARGS__)
62 #else
63 /* Use backtracing debug output */
64 #define _DBG(...) {debug_backtrace(); DBG(__VA_ARGS__);}
65
66 /* Backtracing debug support */
67 #include <execinfo.h>
68
69 static void
70 debug_backtrace(void)
71 {
72 void *trace[DEBUG_BACKTRACE_SIZE];
73 char **strings = NULL;
74 int traceSize;
75 register int i;
76
77 traceSize = backtrace(trace, DEBUG_BACKTRACE_SIZE);
78 strings = backtrace_symbols(trace, traceSize);
79 if (strings == NULL) {
80 DBG("no backtrace:");
81 return;
82 }
83
84 /* Spit out all the strings with a colon separator. Ignore
85 * the first, since we don't really care about the call
86 * to debug_backtrace() itself. Skip until the final "/" in
87 * the trace to avoid really long lines.
88 */
89 for (i = 1; i < traceSize; i++) {
90 char *p = strings[i], *slash = strings[i];
91 while (*p) {
92 if (*p++ == '/') {
93 slash = p;
94 }
95 }
96
97 DBG("%s:", slash);
98 }
99
100 /* Free up the memory, and we're done */
101 free(strings);
102 }
103
104 #endif
105
106
107
108 /* XXX: Thread safety?
109 */
110 GLubyte *
111 intel_region_map(struct intel_context *intel, struct intel_region *region)
112 {
113 intelFlush(&intel->ctx);
114
115 _DBG("%s %p\n", __FUNCTION__, region);
116 if (!region->map_refcount++) {
117 if (region->pbo)
118 intel_region_cow(intel, region);
119
120 if (region->tiling != I915_TILING_NONE &&
121 intel->intelScreen->kernel_exec_fencing)
122 drm_intel_gem_bo_map_gtt(region->buffer);
123 else
124 dri_bo_map(region->buffer, GL_TRUE);
125 region->map = region->buffer->virtual;
126 }
127
128 return region->map;
129 }
130
131 void
132 intel_region_unmap(struct intel_context *intel, struct intel_region *region)
133 {
134 _DBG("%s %p\n", __FUNCTION__, region);
135 if (!--region->map_refcount) {
136 if (region->tiling != I915_TILING_NONE &&
137 intel->intelScreen->kernel_exec_fencing)
138 drm_intel_gem_bo_unmap_gtt(region->buffer);
139 else
140 dri_bo_unmap(region->buffer);
141 region->map = NULL;
142 }
143 }
144
145 static struct intel_region *
146 intel_region_alloc_internal(struct intel_context *intel,
147 GLuint cpp,
148 GLuint width, GLuint height, GLuint pitch,
149 dri_bo *buffer)
150 {
151 struct intel_region *region;
152
153 if (buffer == NULL) {
154 _DBG("%s <-- NULL\n", __FUNCTION__);
155 return NULL;
156 }
157
158 region = calloc(sizeof(*region), 1);
159 region->cpp = cpp;
160 region->width = width;
161 region->height = height;
162 region->pitch = pitch;
163 region->refcount = 1;
164 region->buffer = buffer;
165
166 /* Default to no tiling */
167 region->tiling = I915_TILING_NONE;
168 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
169
170 _DBG("%s <-- %p\n", __FUNCTION__, region);
171 return region;
172 }
173
174 struct intel_region *
175 intel_region_alloc(struct intel_context *intel,
176 uint32_t tiling,
177 GLuint cpp, GLuint width, GLuint height, GLuint pitch,
178 GLboolean expect_accelerated_upload)
179 {
180 dri_bo *buffer;
181 struct intel_region *region;
182
183 /* If we're tiled, our allocations are in 8 or 32-row blocks, so
184 * failure to align our height means that we won't allocate enough pages.
185 *
186 * If we're untiled, we still have to align to 2 rows high because the
187 * data port accesses 2x2 blocks even if the bottom row isn't to be
188 * rendered, so failure to align means we could walk off the end of the
189 * GTT and fault.
190 */
191 if (tiling == I915_TILING_X)
192 height = ALIGN(height, 8);
193 else if (tiling == I915_TILING_Y)
194 height = ALIGN(height, 32);
195 else
196 height = ALIGN(height, 2);
197
198 /* If we're untiled, we have to align to 2 rows high because the
199 * data port accesses 2x2 blocks even if the bottom row isn't to be
200 * rendered, so failure to align means we could walk off the end of the
201 * GTT and fault.
202 */
203 height = ALIGN(height, 2);
204
205 if (expect_accelerated_upload) {
206 buffer = drm_intel_bo_alloc_for_render(intel->bufmgr, "region",
207 pitch * cpp * height, 64);
208 } else {
209 buffer = drm_intel_bo_alloc(intel->bufmgr, "region",
210 pitch * cpp * height, 64);
211 }
212
213 region = intel_region_alloc_internal(intel, cpp, width, height,
214 pitch, buffer);
215
216 if (tiling != I915_TILING_NONE) {
217 assert(((pitch * cpp) & 127) == 0);
218 drm_intel_bo_set_tiling(buffer, &tiling, pitch * cpp);
219 drm_intel_bo_get_tiling(buffer, &region->tiling, &region->bit_6_swizzle);
220 }
221
222 return region;
223 }
224
225 struct intel_region *
226 intel_region_alloc_for_handle(struct intel_context *intel,
227 GLuint cpp,
228 GLuint width, GLuint height, GLuint pitch,
229 GLuint handle, const char *name)
230 {
231 struct intel_region *region;
232 dri_bo *buffer;
233 int ret;
234
235 buffer = intel_bo_gem_create_from_name(intel->bufmgr, name, handle);
236
237 region = intel_region_alloc_internal(intel, cpp,
238 width, height, pitch, buffer);
239 if (region == NULL)
240 return region;
241
242 ret = dri_bo_get_tiling(region->buffer, &region->tiling,
243 &region->bit_6_swizzle);
244 if (ret != 0) {
245 fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
246 handle, name, strerror(-ret));
247 intel_region_release(&region);
248 return NULL;
249 }
250
251 return region;
252 }
253
254 void
255 intel_region_reference(struct intel_region **dst, struct intel_region *src)
256 {
257 if (src)
258 _DBG("%s %p %d\n", __FUNCTION__, src, src->refcount);
259
260 assert(*dst == NULL);
261 if (src) {
262 src->refcount++;
263 *dst = src;
264 }
265 }
266
267 void
268 intel_region_release(struct intel_region **region_handle)
269 {
270 struct intel_region *region = *region_handle;
271
272 if (region == NULL) {
273 _DBG("%s NULL\n", __FUNCTION__);
274 return;
275 }
276
277 _DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1);
278
279 ASSERT(region->refcount > 0);
280 region->refcount--;
281
282 if (region->refcount == 0) {
283 assert(region->map_refcount == 0);
284
285 if (region->pbo)
286 region->pbo->region = NULL;
287 region->pbo = NULL;
288 dri_bo_unreference(region->buffer);
289
290 if (region->classic_map != NULL) {
291 drmUnmap(region->classic_map,
292 region->pitch * region->cpp * region->height);
293 }
294
295 free(region);
296 }
297 *region_handle = NULL;
298 }
299
300 /*
301 * XXX Move this into core Mesa?
302 */
303 void
304 _mesa_copy_rect(GLubyte * dst,
305 GLuint cpp,
306 GLuint dst_pitch,
307 GLuint dst_x,
308 GLuint dst_y,
309 GLuint width,
310 GLuint height,
311 const GLubyte * src,
312 GLuint src_pitch, GLuint src_x, GLuint src_y)
313 {
314 GLuint i;
315
316 dst_pitch *= cpp;
317 src_pitch *= cpp;
318 dst += dst_x * cpp;
319 src += src_x * cpp;
320 dst += dst_y * dst_pitch;
321 src += src_y * dst_pitch;
322 width *= cpp;
323
324 if (width == dst_pitch && width == src_pitch)
325 memcpy(dst, src, height * width);
326 else {
327 for (i = 0; i < height; i++) {
328 memcpy(dst, src, width);
329 dst += dst_pitch;
330 src += src_pitch;
331 }
332 }
333 }
334
335
336 /* Upload data to a rectangular sub-region. Lots of choices how to do this:
337 *
338 * - memcpy by span to current destination
339 * - upload data as new buffer and blit
340 *
341 * Currently always memcpy.
342 */
343 void
344 intel_region_data(struct intel_context *intel,
345 struct intel_region *dst,
346 GLuint dst_offset,
347 GLuint dstx, GLuint dsty,
348 const void *src, GLuint src_pitch,
349 GLuint srcx, GLuint srcy, GLuint width, GLuint height)
350 {
351 _DBG("%s\n", __FUNCTION__);
352
353 if (intel == NULL)
354 return;
355
356 if (dst->pbo) {
357 if (dstx == 0 &&
358 dsty == 0 && width == dst->pitch && height == dst->height)
359 intel_region_release_pbo(intel, dst);
360 else
361 intel_region_cow(intel, dst);
362 }
363
364 _mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
365 dst->cpp,
366 dst->pitch,
367 dstx, dsty, width, height, src, src_pitch, srcx, srcy);
368
369 intel_region_unmap(intel, dst);
370 }
371
372 /* Copy rectangular sub-regions. Need better logic about when to
373 * push buffers into AGP - will currently do so whenever possible.
374 */
375 GLboolean
376 intel_region_copy(struct intel_context *intel,
377 struct intel_region *dst,
378 GLuint dst_offset,
379 GLuint dstx, GLuint dsty,
380 struct intel_region *src,
381 GLuint src_offset,
382 GLuint srcx, GLuint srcy, GLuint width, GLuint height,
383 GLenum logicop)
384 {
385 _DBG("%s\n", __FUNCTION__);
386
387 if (intel == NULL)
388 return GL_FALSE;
389
390 if (dst->pbo) {
391 if (dstx == 0 &&
392 dsty == 0 && width == dst->pitch && height == dst->height)
393 intel_region_release_pbo(intel, dst);
394 else
395 intel_region_cow(intel, dst);
396 }
397
398 assert(src->cpp == dst->cpp);
399
400 return intelEmitCopyBlit(intel,
401 dst->cpp,
402 src->pitch, src->buffer, src_offset, src->tiling,
403 dst->pitch, dst->buffer, dst_offset, dst->tiling,
404 srcx, srcy, dstx, dsty, width, height,
405 logicop);
406 }
407
408 /* Attach to a pbo, discarding our data. Effectively zero-copy upload
409 * the pbo's data.
410 */
411 void
412 intel_region_attach_pbo(struct intel_context *intel,
413 struct intel_region *region,
414 struct intel_buffer_object *pbo)
415 {
416 dri_bo *buffer;
417
418 if (region->pbo == pbo)
419 return;
420
421 _DBG("%s %p %p\n", __FUNCTION__, region, pbo);
422
423 /* If there is already a pbo attached, break the cow tie now.
424 * Don't call intel_region_release_pbo() as that would
425 * unnecessarily allocate a new buffer we would have to immediately
426 * discard.
427 */
428 if (region->pbo) {
429 region->pbo->region = NULL;
430 region->pbo = NULL;
431 }
432
433 if (region->buffer) {
434 dri_bo_unreference(region->buffer);
435 region->buffer = NULL;
436 }
437
438 /* make sure pbo has a buffer of its own */
439 buffer = intel_bufferobj_buffer(intel, pbo, INTEL_WRITE_FULL);
440
441 region->pbo = pbo;
442 region->pbo->region = region;
443 dri_bo_reference(buffer);
444 region->buffer = buffer;
445 }
446
447
448 /* Break the COW tie to the pbo and allocate a new buffer.
449 * The pbo gets to keep the data.
450 */
451 void
452 intel_region_release_pbo(struct intel_context *intel,
453 struct intel_region *region)
454 {
455 _DBG("%s %p\n", __FUNCTION__, region);
456 assert(region->buffer == region->pbo->buffer);
457 region->pbo->region = NULL;
458 region->pbo = NULL;
459 dri_bo_unreference(region->buffer);
460 region->buffer = NULL;
461
462 region->buffer = dri_bo_alloc(intel->bufmgr, "region",
463 region->pitch * region->cpp * region->height,
464 64);
465 }
466
467 /* Break the COW tie to the pbo. Both the pbo and the region end up
468 * with a copy of the data.
469 */
470 void
471 intel_region_cow(struct intel_context *intel, struct intel_region *region)
472 {
473 struct intel_buffer_object *pbo = region->pbo;
474 GLboolean ok;
475
476 intel_region_release_pbo(intel, region);
477
478 assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
479
480 _DBG("%s %p (%d bytes)\n", __FUNCTION__, region, pbo->Base.Size);
481
482 /* Now blit from the texture buffer to the new buffer:
483 */
484
485 ok = intelEmitCopyBlit(intel,
486 region->cpp,
487 region->pitch, pbo->buffer, 0, region->tiling,
488 region->pitch, region->buffer, 0, region->tiling,
489 0, 0, 0, 0,
490 region->pitch, region->height,
491 GL_COPY);
492 assert(ok);
493 }
494
495 dri_bo *
496 intel_region_buffer(struct intel_context *intel,
497 struct intel_region *region, GLuint flag)
498 {
499 if (region->pbo) {
500 if (flag == INTEL_WRITE_PART)
501 intel_region_cow(intel, region);
502 else if (flag == INTEL_WRITE_FULL)
503 intel_region_release_pbo(intel, region);
504 }
505
506 return region->buffer;
507 }