Merge branch 'mesa_7_5_branch'
[mesa.git] / src / mesa / drivers / dri / intel / intel_regions.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* Provide additional functionality on top of bufmgr buffers:
29 * - 2d semantics and blit operations
30 * - refcounting of buffers for multiple images in a buffer.
31 * - refcounting of buffer mappings.
32 * - some logic for moving the buffers to the best memory pools for
33 * given operations.
34 *
35 * Most of this is to make it easier to implement the fixed-layout
36 * mipmap tree required by intel hardware in the face of GL's
37 * programming interface where each image can be specifed in random
38 * order and it isn't clear what layout the tree should have until the
39 * last moment.
40 */
41
42 #include <sys/ioctl.h>
43 #include <errno.h>
44
45 #include "intel_context.h"
46 #include "intel_regions.h"
47 #include "intel_blit.h"
48 #include "intel_buffer_objects.h"
49 #include "intel_bufmgr.h"
50 #include "intel_batchbuffer.h"
51 #include "intel_chipset.h"
52
53 #define FILE_DEBUG_FLAG DEBUG_REGION
54
55 /* This should be set to the maximum backtrace size desired.
56 * Set it to 0 to disable backtrace debugging.
57 */
58 #define DEBUG_BACKTRACE_SIZE 0
59
60 #if DEBUG_BACKTRACE_SIZE == 0
61 /* Use the standard debug output */
62 #define _DBG(...) DBG(__VA_ARGS__)
63 #else
64 /* Use backtracing debug output */
65 #define _DBG(...) {debug_backtrace(); DBG(__VA_ARGS__);}
66
67 /* Backtracing debug support */
68 #include <execinfo.h>
69
70 static void
71 debug_backtrace(void)
72 {
73 void *trace[DEBUG_BACKTRACE_SIZE];
74 char **strings = NULL;
75 int traceSize;
76 register int i;
77
78 traceSize = backtrace(trace, DEBUG_BACKTRACE_SIZE);
79 strings = backtrace_symbols(trace, traceSize);
80 if (strings == NULL) {
81 DBG("no backtrace:");
82 return;
83 }
84
85 /* Spit out all the strings with a colon separator. Ignore
86 * the first, since we don't really care about the call
87 * to debug_backtrace() itself. Skip until the final "/" in
88 * the trace to avoid really long lines.
89 */
90 for (i = 1; i < traceSize; i++) {
91 char *p = strings[i], *slash = strings[i];
92 while (*p) {
93 if (*p++ == '/') {
94 slash = p;
95 }
96 }
97
98 DBG("%s:", slash);
99 }
100
101 /* Free up the memory, and we're done */
102 free(strings);
103 }
104
105 #endif
106
107
108
109 /* XXX: Thread safety?
110 */
111 GLubyte *
112 intel_region_map(struct intel_context *intel, struct intel_region *region)
113 {
114 intelFlush(&intel->ctx);
115
116 _DBG("%s %p\n", __FUNCTION__, region);
117 if (!region->map_refcount++) {
118 if (region->pbo)
119 intel_region_cow(intel, region);
120
121 if (region->tiling != I915_TILING_NONE &&
122 intel->intelScreen->kernel_exec_fencing)
123 drm_intel_gem_bo_map_gtt(region->buffer);
124 else
125 dri_bo_map(region->buffer, GL_TRUE);
126 region->map = region->buffer->virtual;
127 }
128
129 return region->map;
130 }
131
132 void
133 intel_region_unmap(struct intel_context *intel, struct intel_region *region)
134 {
135 _DBG("%s %p\n", __FUNCTION__, region);
136 if (!--region->map_refcount) {
137 if (region->tiling != I915_TILING_NONE &&
138 intel->intelScreen->kernel_exec_fencing)
139 drm_intel_gem_bo_unmap_gtt(region->buffer);
140 else
141 dri_bo_unmap(region->buffer);
142 region->map = NULL;
143 }
144 }
145
146 static struct intel_region *
147 intel_region_alloc_internal(struct intel_context *intel,
148 GLuint cpp,
149 GLuint width, GLuint height, GLuint pitch,
150 dri_bo *buffer)
151 {
152 struct intel_region *region;
153
154 if (buffer == NULL) {
155 _DBG("%s <-- NULL\n", __FUNCTION__);
156 return NULL;
157 }
158
159 region = calloc(sizeof(*region), 1);
160 region->cpp = cpp;
161 region->width = width;
162 region->height = height;
163 region->pitch = pitch;
164 region->refcount = 1;
165 region->buffer = buffer;
166
167 /* Default to no tiling */
168 region->tiling = I915_TILING_NONE;
169 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
170
171 _DBG("%s <-- %p\n", __FUNCTION__, region);
172 return region;
173 }
174
175 struct intel_region *
176 intel_region_alloc(struct intel_context *intel,
177 uint32_t tiling,
178 GLuint cpp, GLuint width, GLuint height, GLuint pitch,
179 GLboolean expect_accelerated_upload)
180 {
181 dri_bo *buffer;
182 struct intel_region *region;
183
184 /* If we're tiled, our allocations are in 8 or 32-row blocks, so
185 * failure to align our height means that we won't allocate enough pages.
186 *
187 * If we're untiled, we still have to align to 2 rows high because the
188 * data port accesses 2x2 blocks even if the bottom row isn't to be
189 * rendered, so failure to align means we could walk off the end of the
190 * GTT and fault.
191 */
192 if (tiling == I915_TILING_X)
193 height = ALIGN(height, 8);
194 else if (tiling == I915_TILING_Y)
195 height = ALIGN(height, 32);
196 else
197 height = ALIGN(height, 2);
198
199 if (expect_accelerated_upload) {
200 buffer = drm_intel_bo_alloc_for_render(intel->bufmgr, "region",
201 pitch * cpp * height, 64);
202 } else {
203 buffer = drm_intel_bo_alloc(intel->bufmgr, "region",
204 pitch * cpp * height, 64);
205 }
206
207 region = intel_region_alloc_internal(intel, cpp, width, height,
208 pitch, buffer);
209
210 if (tiling != I915_TILING_NONE) {
211 assert(((pitch * cpp) & 127) == 0);
212 drm_intel_bo_set_tiling(buffer, &tiling, pitch * cpp);
213 drm_intel_bo_get_tiling(buffer, &region->tiling, &region->bit_6_swizzle);
214 }
215
216 return region;
217 }
218
219 struct intel_region *
220 intel_region_alloc_for_handle(struct intel_context *intel,
221 GLuint cpp,
222 GLuint width, GLuint height, GLuint pitch,
223 GLuint handle, const char *name)
224 {
225 struct intel_region *region;
226 dri_bo *buffer;
227 int ret;
228
229 buffer = intel_bo_gem_create_from_name(intel->bufmgr, name, handle);
230
231 region = intel_region_alloc_internal(intel, cpp,
232 width, height, pitch, buffer);
233 if (region == NULL)
234 return region;
235
236 ret = dri_bo_get_tiling(region->buffer, &region->tiling,
237 &region->bit_6_swizzle);
238 if (ret != 0) {
239 fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
240 handle, name, strerror(-ret));
241 intel_region_release(&region);
242 return NULL;
243 }
244
245 return region;
246 }
247
248 void
249 intel_region_reference(struct intel_region **dst, struct intel_region *src)
250 {
251 if (src)
252 _DBG("%s %p %d\n", __FUNCTION__, src, src->refcount);
253
254 assert(*dst == NULL);
255 if (src) {
256 src->refcount++;
257 *dst = src;
258 }
259 }
260
261 void
262 intel_region_release(struct intel_region **region_handle)
263 {
264 struct intel_region *region = *region_handle;
265
266 if (region == NULL) {
267 _DBG("%s NULL\n", __FUNCTION__);
268 return;
269 }
270
271 _DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1);
272
273 ASSERT(region->refcount > 0);
274 region->refcount--;
275
276 if (region->refcount == 0) {
277 assert(region->map_refcount == 0);
278
279 if (region->pbo)
280 region->pbo->region = NULL;
281 region->pbo = NULL;
282 dri_bo_unreference(region->buffer);
283
284 if (region->classic_map != NULL) {
285 drmUnmap(region->classic_map,
286 region->pitch * region->cpp * region->height);
287 }
288
289 free(region);
290 }
291 *region_handle = NULL;
292 }
293
294 /*
295 * XXX Move this into core Mesa?
296 */
297 void
298 _mesa_copy_rect(GLubyte * dst,
299 GLuint cpp,
300 GLuint dst_pitch,
301 GLuint dst_x,
302 GLuint dst_y,
303 GLuint width,
304 GLuint height,
305 const GLubyte * src,
306 GLuint src_pitch, GLuint src_x, GLuint src_y)
307 {
308 GLuint i;
309
310 dst_pitch *= cpp;
311 src_pitch *= cpp;
312 dst += dst_x * cpp;
313 src += src_x * cpp;
314 dst += dst_y * dst_pitch;
315 src += src_y * dst_pitch;
316 width *= cpp;
317
318 if (width == dst_pitch && width == src_pitch)
319 memcpy(dst, src, height * width);
320 else {
321 for (i = 0; i < height; i++) {
322 memcpy(dst, src, width);
323 dst += dst_pitch;
324 src += src_pitch;
325 }
326 }
327 }
328
329
330 /* Upload data to a rectangular sub-region. Lots of choices how to do this:
331 *
332 * - memcpy by span to current destination
333 * - upload data as new buffer and blit
334 *
335 * Currently always memcpy.
336 */
337 void
338 intel_region_data(struct intel_context *intel,
339 struct intel_region *dst,
340 GLuint dst_offset,
341 GLuint dstx, GLuint dsty,
342 const void *src, GLuint src_pitch,
343 GLuint srcx, GLuint srcy, GLuint width, GLuint height)
344 {
345 _DBG("%s\n", __FUNCTION__);
346
347 if (intel == NULL)
348 return;
349
350 if (dst->pbo) {
351 if (dstx == 0 &&
352 dsty == 0 && width == dst->pitch && height == dst->height)
353 intel_region_release_pbo(intel, dst);
354 else
355 intel_region_cow(intel, dst);
356 }
357
358 LOCK_HARDWARE(intel);
359 _mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
360 dst->cpp,
361 dst->pitch,
362 dstx, dsty, width, height, src, src_pitch, srcx, srcy);
363
364 intel_region_unmap(intel, dst);
365 UNLOCK_HARDWARE(intel);
366 }
367
368 /* Copy rectangular sub-regions. Need better logic about when to
369 * push buffers into AGP - will currently do so whenever possible.
370 */
371 GLboolean
372 intel_region_copy(struct intel_context *intel,
373 struct intel_region *dst,
374 GLuint dst_offset,
375 GLuint dstx, GLuint dsty,
376 struct intel_region *src,
377 GLuint src_offset,
378 GLuint srcx, GLuint srcy, GLuint width, GLuint height,
379 GLenum logicop)
380 {
381 _DBG("%s\n", __FUNCTION__);
382
383 if (intel == NULL)
384 return GL_FALSE;
385
386 if (dst->pbo) {
387 if (dstx == 0 &&
388 dsty == 0 && width == dst->pitch && height == dst->height)
389 intel_region_release_pbo(intel, dst);
390 else
391 intel_region_cow(intel, dst);
392 }
393
394 assert(src->cpp == dst->cpp);
395
396 return intelEmitCopyBlit(intel,
397 dst->cpp,
398 src->pitch, src->buffer, src_offset, src->tiling,
399 dst->pitch, dst->buffer, dst_offset, dst->tiling,
400 srcx, srcy, dstx, dsty, width, height,
401 logicop);
402 }
403
404 /* Attach to a pbo, discarding our data. Effectively zero-copy upload
405 * the pbo's data.
406 */
407 void
408 intel_region_attach_pbo(struct intel_context *intel,
409 struct intel_region *region,
410 struct intel_buffer_object *pbo)
411 {
412 dri_bo *buffer;
413
414 if (region->pbo == pbo)
415 return;
416
417 _DBG("%s %p %p\n", __FUNCTION__, region, pbo);
418
419 /* If there is already a pbo attached, break the cow tie now.
420 * Don't call intel_region_release_pbo() as that would
421 * unnecessarily allocate a new buffer we would have to immediately
422 * discard.
423 */
424 if (region->pbo) {
425 region->pbo->region = NULL;
426 region->pbo = NULL;
427 }
428
429 if (region->buffer) {
430 dri_bo_unreference(region->buffer);
431 region->buffer = NULL;
432 }
433
434 /* make sure pbo has a buffer of its own */
435 buffer = intel_bufferobj_buffer(intel, pbo, INTEL_WRITE_FULL);
436
437 region->pbo = pbo;
438 region->pbo->region = region;
439 dri_bo_reference(buffer);
440 region->buffer = buffer;
441 }
442
443
444 /* Break the COW tie to the pbo and allocate a new buffer.
445 * The pbo gets to keep the data.
446 */
447 void
448 intel_region_release_pbo(struct intel_context *intel,
449 struct intel_region *region)
450 {
451 _DBG("%s %p\n", __FUNCTION__, region);
452 assert(region->buffer == region->pbo->buffer);
453 region->pbo->region = NULL;
454 region->pbo = NULL;
455 dri_bo_unreference(region->buffer);
456 region->buffer = NULL;
457
458 region->buffer = dri_bo_alloc(intel->bufmgr, "region",
459 region->pitch * region->cpp * region->height,
460 64);
461 }
462
463 /* Break the COW tie to the pbo. Both the pbo and the region end up
464 * with a copy of the data.
465 */
466 void
467 intel_region_cow(struct intel_context *intel, struct intel_region *region)
468 {
469 struct intel_buffer_object *pbo = region->pbo;
470 GLboolean ok;
471
472 intel_region_release_pbo(intel, region);
473
474 assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
475
476 _DBG("%s %p (%d bytes)\n", __FUNCTION__, region, pbo->Base.Size);
477
478 /* Now blit from the texture buffer to the new buffer:
479 */
480
481 LOCK_HARDWARE(intel);
482 ok = intelEmitCopyBlit(intel,
483 region->cpp,
484 region->pitch, pbo->buffer, 0, region->tiling,
485 region->pitch, region->buffer, 0, region->tiling,
486 0, 0, 0, 0,
487 region->pitch, region->height,
488 GL_COPY);
489 assert(ok);
490 UNLOCK_HARDWARE(intel);
491 }
492
493 dri_bo *
494 intel_region_buffer(struct intel_context *intel,
495 struct intel_region *region, GLuint flag)
496 {
497 if (region->pbo) {
498 if (flag == INTEL_WRITE_PART)
499 intel_region_cow(intel, region);
500 else if (flag == INTEL_WRITE_FULL)
501 intel_region_release_pbo(intel, region);
502 }
503
504 return region->buffer;
505 }
506
507 static struct intel_region *
508 intel_recreate_static(struct intel_context *intel,
509 const char *name,
510 struct intel_region *region,
511 intelRegion *region_desc)
512 {
513 intelScreenPrivate *intelScreen = intel->intelScreen;
514 int ret;
515
516 if (region == NULL) {
517 region = calloc(sizeof(*region), 1);
518 region->refcount = 1;
519 _DBG("%s creating new region %p\n", __FUNCTION__, region);
520 }
521 else {
522 _DBG("%s %p\n", __FUNCTION__, region);
523 }
524
525 if (intel->ctx.Visual.rgbBits == 24)
526 region->cpp = 4;
527 else
528 region->cpp = intel->ctx.Visual.rgbBits / 8;
529 region->pitch = intelScreen->pitch;
530 region->width = intelScreen->width;
531 region->height = intelScreen->height;
532
533 if (region->buffer != NULL) {
534 dri_bo_unreference(region->buffer);
535 region->buffer = NULL;
536 }
537
538 if (intel->ttm) {
539 assert(region_desc->bo_handle != -1);
540 region->buffer = intel_bo_gem_create_from_name(intel->bufmgr,
541 name,
542 region_desc->bo_handle);
543
544 ret = dri_bo_get_tiling(region->buffer, &region->tiling,
545 &region->bit_6_swizzle);
546 if (ret != 0) {
547 fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
548 region_desc->bo_handle, name, strerror(-ret));
549 intel_region_release(&region);
550 return NULL;
551 }
552 } else {
553 if (region->classic_map != NULL) {
554 drmUnmap(region->classic_map,
555 region->pitch * region->cpp * region->height);
556 region->classic_map = NULL;
557 }
558 ret = drmMap(intel->driFd, region_desc->handle,
559 region->pitch * region->cpp * region->height,
560 &region->classic_map);
561 if (ret != 0) {
562 fprintf(stderr, "Failed to drmMap %s buffer\n", name);
563 free(region);
564 return NULL;
565 }
566
567 region->buffer = intel_bo_fake_alloc_static(intel->bufmgr,
568 name,
569 region_desc->offset,
570 region->pitch * region->cpp *
571 region->height,
572 region->classic_map);
573
574 /* The sarea just gives us a boolean for whether it's tiled or not,
575 * instead of which tiling mode it is. Guess.
576 */
577 if (region_desc->tiled) {
578 if (IS_965(intel->intelScreen->deviceID) &&
579 region_desc == &intelScreen->depth)
580 region->tiling = I915_TILING_Y;
581 else
582 region->tiling = I915_TILING_X;
583 } else {
584 region->tiling = I915_TILING_NONE;
585 }
586
587 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
588 }
589
590 assert(region->buffer != NULL);
591
592 return region;
593 }
594
595 /**
596 * Create intel_region structs to describe the static front, back, and depth
597 * buffers created by the xserver.
598 *
599 * Although FBO's mean we now no longer use these as render targets in
600 * all circumstances, they won't go away until the back and depth
601 * buffers become private, and the front buffer will remain even then.
602 *
603 * Note that these don't allocate video memory, just describe
604 * allocations alread made by the X server.
605 */
606 void
607 intel_recreate_static_regions(struct intel_context *intel)
608 {
609 intelScreenPrivate *intelScreen = intel->intelScreen;
610
611 intel->front_region =
612 intel_recreate_static(intel, "front",
613 intel->front_region,
614 &intelScreen->front);
615
616 intel->back_region =
617 intel_recreate_static(intel, "back",
618 intel->back_region,
619 &intelScreen->back);
620
621 /* Still assumes front.cpp == depth.cpp. We can kill this when we move to
622 * private buffers.
623 */
624 intel->depth_region =
625 intel_recreate_static(intel, "depth",
626 intel->depth_region,
627 &intelScreen->depth);
628 }