Merge branch 'mesa_7_5_branch'
[mesa.git] / src / mesa / drivers / dri / intel / intel_regions.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* Provide additional functionality on top of bufmgr buffers:
29 * - 2d semantics and blit operations
30 * - refcounting of buffers for multiple images in a buffer.
31 * - refcounting of buffer mappings.
32 * - some logic for moving the buffers to the best memory pools for
33 * given operations.
34 *
35 * Most of this is to make it easier to implement the fixed-layout
36 * mipmap tree required by intel hardware in the face of GL's
37 * programming interface where each image can be specifed in random
38 * order and it isn't clear what layout the tree should have until the
39 * last moment.
40 */
41
42 #include <sys/ioctl.h>
43 #include <errno.h>
44
45 #include "intel_context.h"
46 #include "intel_regions.h"
47 #include "intel_blit.h"
48 #include "intel_buffer_objects.h"
49 #include "intel_bufmgr.h"
50 #include "intel_batchbuffer.h"
51 #include "intel_chipset.h"
52
53 #define FILE_DEBUG_FLAG DEBUG_REGION
54
55 /* This should be set to the maximum backtrace size desired.
56 * Set it to 0 to disable backtrace debugging.
57 */
58 #define DEBUG_BACKTRACE_SIZE 0
59
60 #if DEBUG_BACKTRACE_SIZE == 0
61 /* Use the standard debug output */
62 #define _DBG(...) DBG(__VA_ARGS__)
63 #else
64 /* Use backtracing debug output */
65 #define _DBG(...) {debug_backtrace(); DBG(__VA_ARGS__);}
66
67 /* Backtracing debug support */
68 #include <execinfo.h>
69
70 static void
71 debug_backtrace(void)
72 {
73 void *trace[DEBUG_BACKTRACE_SIZE];
74 char **strings = NULL;
75 int traceSize;
76 register int i;
77
78 traceSize = backtrace(trace, DEBUG_BACKTRACE_SIZE);
79 strings = backtrace_symbols(trace, traceSize);
80 if (strings == NULL) {
81 DBG("no backtrace:");
82 return;
83 }
84
85 /* Spit out all the strings with a colon separator. Ignore
86 * the first, since we don't really care about the call
87 * to debug_backtrace() itself. Skip until the final "/" in
88 * the trace to avoid really long lines.
89 */
90 for (i = 1; i < traceSize; i++) {
91 char *p = strings[i], *slash = strings[i];
92 while (*p) {
93 if (*p++ == '/') {
94 slash = p;
95 }
96 }
97
98 DBG("%s:", slash);
99 }
100
101 /* Free up the memory, and we're done */
102 free(strings);
103 }
104
105 #endif
106
107
108
109 /* XXX: Thread safety?
110 */
111 GLubyte *
112 intel_region_map(struct intel_context *intel, struct intel_region *region)
113 {
114 _DBG("%s %p\n", __FUNCTION__, region);
115 if (!region->map_refcount++) {
116 if (region->pbo)
117 intel_region_cow(intel, region);
118
119 if (region->tiling != I915_TILING_NONE &&
120 intel->intelScreen->kernel_exec_fencing)
121 drm_intel_gem_bo_map_gtt(region->buffer);
122 else
123 dri_bo_map(region->buffer, GL_TRUE);
124 region->map = region->buffer->virtual;
125 }
126
127 return region->map;
128 }
129
130 void
131 intel_region_unmap(struct intel_context *intel, struct intel_region *region)
132 {
133 _DBG("%s %p\n", __FUNCTION__, region);
134 if (!--region->map_refcount) {
135 if (region->tiling != I915_TILING_NONE &&
136 intel->intelScreen->kernel_exec_fencing)
137 drm_intel_gem_bo_unmap_gtt(region->buffer);
138 else
139 dri_bo_unmap(region->buffer);
140 region->map = NULL;
141 }
142 }
143
144 static struct intel_region *
145 intel_region_alloc_internal(struct intel_context *intel,
146 GLuint cpp,
147 GLuint width, GLuint height, GLuint pitch,
148 dri_bo *buffer)
149 {
150 struct intel_region *region;
151
152 if (buffer == NULL) {
153 _DBG("%s <-- NULL\n", __FUNCTION__);
154 return NULL;
155 }
156
157 region = calloc(sizeof(*region), 1);
158 region->cpp = cpp;
159 region->width = width;
160 region->height = height;
161 region->pitch = pitch;
162 region->refcount = 1;
163 region->buffer = buffer;
164
165 /* Default to no tiling */
166 region->tiling = I915_TILING_NONE;
167 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
168
169 _DBG("%s <-- %p\n", __FUNCTION__, region);
170 return region;
171 }
172
173 struct intel_region *
174 intel_region_alloc(struct intel_context *intel,
175 uint32_t tiling,
176 GLuint cpp, GLuint width, GLuint height, GLuint pitch,
177 GLboolean expect_accelerated_upload)
178 {
179 dri_bo *buffer;
180 struct intel_region *region;
181
182 if (expect_accelerated_upload) {
183 buffer = drm_intel_bo_alloc_for_render(intel->bufmgr, "region",
184 pitch * cpp * height, 64);
185 } else {
186 buffer = drm_intel_bo_alloc(intel->bufmgr, "region",
187 pitch * cpp * height, 64);
188 }
189
190 region = intel_region_alloc_internal(intel, cpp, width, height,
191 pitch, buffer);
192
193 if (tiling != I915_TILING_NONE) {
194 assert(((pitch * cpp) & 127) == 0);
195 drm_intel_bo_set_tiling(buffer, &tiling, pitch * cpp);
196 drm_intel_bo_get_tiling(buffer, &region->tiling, &region->bit_6_swizzle);
197 }
198
199 return region;
200 }
201
202 struct intel_region *
203 intel_region_alloc_for_handle(struct intel_context *intel,
204 GLuint cpp,
205 GLuint width, GLuint height, GLuint pitch,
206 GLuint handle, const char *name)
207 {
208 struct intel_region *region;
209 dri_bo *buffer;
210 int ret;
211
212 buffer = intel_bo_gem_create_from_name(intel->bufmgr, name, handle);
213
214 region = intel_region_alloc_internal(intel, cpp,
215 width, height, pitch, buffer);
216 if (region == NULL)
217 return region;
218
219 ret = dri_bo_get_tiling(region->buffer, &region->tiling,
220 &region->bit_6_swizzle);
221 if (ret != 0) {
222 fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
223 handle, name, strerror(-ret));
224 intel_region_release(&region);
225 return NULL;
226 }
227
228 return region;
229 }
230
231 void
232 intel_region_reference(struct intel_region **dst, struct intel_region *src)
233 {
234 if (src)
235 _DBG("%s %p %d\n", __FUNCTION__, src, src->refcount);
236
237 assert(*dst == NULL);
238 if (src) {
239 src->refcount++;
240 *dst = src;
241 }
242 }
243
244 void
245 intel_region_release(struct intel_region **region_handle)
246 {
247 struct intel_region *region = *region_handle;
248
249 if (region == NULL) {
250 _DBG("%s NULL\n", __FUNCTION__);
251 return;
252 }
253
254 _DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1);
255
256 ASSERT(region->refcount > 0);
257 region->refcount--;
258
259 if (region->refcount == 0) {
260 assert(region->map_refcount == 0);
261
262 if (region->pbo)
263 region->pbo->region = NULL;
264 region->pbo = NULL;
265 dri_bo_unreference(region->buffer);
266
267 if (region->classic_map != NULL) {
268 drmUnmap(region->classic_map,
269 region->pitch * region->cpp * region->height);
270 }
271
272 free(region);
273 }
274 *region_handle = NULL;
275 }
276
277 /*
278 * XXX Move this into core Mesa?
279 */
280 void
281 _mesa_copy_rect(GLubyte * dst,
282 GLuint cpp,
283 GLuint dst_pitch,
284 GLuint dst_x,
285 GLuint dst_y,
286 GLuint width,
287 GLuint height,
288 const GLubyte * src,
289 GLuint src_pitch, GLuint src_x, GLuint src_y)
290 {
291 GLuint i;
292
293 dst_pitch *= cpp;
294 src_pitch *= cpp;
295 dst += dst_x * cpp;
296 src += src_x * cpp;
297 dst += dst_y * dst_pitch;
298 src += src_y * dst_pitch;
299 width *= cpp;
300
301 if (width == dst_pitch && width == src_pitch)
302 memcpy(dst, src, height * width);
303 else {
304 for (i = 0; i < height; i++) {
305 memcpy(dst, src, width);
306 dst += dst_pitch;
307 src += src_pitch;
308 }
309 }
310 }
311
312
313 /* Upload data to a rectangular sub-region. Lots of choices how to do this:
314 *
315 * - memcpy by span to current destination
316 * - upload data as new buffer and blit
317 *
318 * Currently always memcpy.
319 */
320 void
321 intel_region_data(struct intel_context *intel,
322 struct intel_region *dst,
323 GLuint dst_offset,
324 GLuint dstx, GLuint dsty,
325 const void *src, GLuint src_pitch,
326 GLuint srcx, GLuint srcy, GLuint width, GLuint height)
327 {
328 GLboolean locked = GL_FALSE;
329
330 _DBG("%s\n", __FUNCTION__);
331
332 if (intel == NULL)
333 return;
334
335 if (dst->pbo) {
336 if (dstx == 0 &&
337 dsty == 0 && width == dst->pitch && height == dst->height)
338 intel_region_release_pbo(intel, dst);
339 else
340 intel_region_cow(intel, dst);
341 }
342
343 if (!intel->locked) {
344 LOCK_HARDWARE(intel);
345 locked = GL_TRUE;
346 }
347
348 _mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
349 dst->cpp,
350 dst->pitch,
351 dstx, dsty, width, height, src, src_pitch, srcx, srcy);
352
353 intel_region_unmap(intel, dst);
354
355 if (locked)
356 UNLOCK_HARDWARE(intel);
357
358 }
359
360 /* Copy rectangular sub-regions. Need better logic about when to
361 * push buffers into AGP - will currently do so whenever possible.
362 */
363 GLboolean
364 intel_region_copy(struct intel_context *intel,
365 struct intel_region *dst,
366 GLuint dst_offset,
367 GLuint dstx, GLuint dsty,
368 struct intel_region *src,
369 GLuint src_offset,
370 GLuint srcx, GLuint srcy, GLuint width, GLuint height,
371 GLenum logicop)
372 {
373 _DBG("%s\n", __FUNCTION__);
374
375 if (intel == NULL)
376 return GL_FALSE;
377
378 if (dst->pbo) {
379 if (dstx == 0 &&
380 dsty == 0 && width == dst->pitch && height == dst->height)
381 intel_region_release_pbo(intel, dst);
382 else
383 intel_region_cow(intel, dst);
384 }
385
386 assert(src->cpp == dst->cpp);
387
388 return intelEmitCopyBlit(intel,
389 dst->cpp,
390 src->pitch, src->buffer, src_offset, src->tiling,
391 dst->pitch, dst->buffer, dst_offset, dst->tiling,
392 srcx, srcy, dstx, dsty, width, height,
393 logicop);
394 }
395
396 /* Attach to a pbo, discarding our data. Effectively zero-copy upload
397 * the pbo's data.
398 */
399 void
400 intel_region_attach_pbo(struct intel_context *intel,
401 struct intel_region *region,
402 struct intel_buffer_object *pbo)
403 {
404 dri_bo *buffer;
405
406 if (region->pbo == pbo)
407 return;
408
409 _DBG("%s %p %p\n", __FUNCTION__, region, pbo);
410
411 /* If there is already a pbo attached, break the cow tie now.
412 * Don't call intel_region_release_pbo() as that would
413 * unnecessarily allocate a new buffer we would have to immediately
414 * discard.
415 */
416 if (region->pbo) {
417 region->pbo->region = NULL;
418 region->pbo = NULL;
419 }
420
421 if (region->buffer) {
422 dri_bo_unreference(region->buffer);
423 region->buffer = NULL;
424 }
425
426 /* make sure pbo has a buffer of its own */
427 buffer = intel_bufferobj_buffer(intel, pbo, INTEL_WRITE_FULL);
428
429 region->pbo = pbo;
430 region->pbo->region = region;
431 dri_bo_reference(buffer);
432 region->buffer = buffer;
433 }
434
435
436 /* Break the COW tie to the pbo and allocate a new buffer.
437 * The pbo gets to keep the data.
438 */
439 void
440 intel_region_release_pbo(struct intel_context *intel,
441 struct intel_region *region)
442 {
443 _DBG("%s %p\n", __FUNCTION__, region);
444 assert(region->buffer == region->pbo->buffer);
445 region->pbo->region = NULL;
446 region->pbo = NULL;
447 dri_bo_unreference(region->buffer);
448 region->buffer = NULL;
449
450 region->buffer = dri_bo_alloc(intel->bufmgr, "region",
451 region->pitch * region->cpp * region->height,
452 64);
453 }
454
455 /* Break the COW tie to the pbo. Both the pbo and the region end up
456 * with a copy of the data.
457 */
458 void
459 intel_region_cow(struct intel_context *intel, struct intel_region *region)
460 {
461 struct intel_buffer_object *pbo = region->pbo;
462 GLboolean was_locked = intel->locked;
463
464 if (intel == NULL)
465 return;
466
467 intel_region_release_pbo(intel, region);
468
469 assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
470
471 _DBG("%s %p (%d bytes)\n", __FUNCTION__, region, pbo->Base.Size);
472
473 /* Now blit from the texture buffer to the new buffer:
474 */
475
476 was_locked = intel->locked;
477 if (!was_locked)
478 LOCK_HARDWARE(intel);
479
480 assert(intelEmitCopyBlit(intel,
481 region->cpp,
482 region->pitch, pbo->buffer, 0, region->tiling,
483 region->pitch, region->buffer, 0, region->tiling,
484 0, 0, 0, 0,
485 region->pitch, region->height,
486 GL_COPY));
487
488 if (!was_locked)
489 UNLOCK_HARDWARE(intel);
490 }
491
492 dri_bo *
493 intel_region_buffer(struct intel_context *intel,
494 struct intel_region *region, GLuint flag)
495 {
496 if (region->pbo) {
497 if (flag == INTEL_WRITE_PART)
498 intel_region_cow(intel, region);
499 else if (flag == INTEL_WRITE_FULL)
500 intel_region_release_pbo(intel, region);
501 }
502
503 return region->buffer;
504 }
505
506 static struct intel_region *
507 intel_recreate_static(struct intel_context *intel,
508 const char *name,
509 struct intel_region *region,
510 intelRegion *region_desc)
511 {
512 intelScreenPrivate *intelScreen = intel->intelScreen;
513 int ret;
514
515 if (region == NULL) {
516 region = calloc(sizeof(*region), 1);
517 region->refcount = 1;
518 _DBG("%s creating new region %p\n", __FUNCTION__, region);
519 }
520 else {
521 _DBG("%s %p\n", __FUNCTION__, region);
522 }
523
524 if (intel->ctx.Visual.rgbBits == 24)
525 region->cpp = 4;
526 else
527 region->cpp = intel->ctx.Visual.rgbBits / 8;
528 region->pitch = intelScreen->pitch;
529 region->width = intelScreen->width;
530 region->height = intelScreen->height;
531
532 if (region->buffer != NULL) {
533 dri_bo_unreference(region->buffer);
534 region->buffer = NULL;
535 }
536
537 if (intel->ttm) {
538 assert(region_desc->bo_handle != -1);
539 region->buffer = intel_bo_gem_create_from_name(intel->bufmgr,
540 name,
541 region_desc->bo_handle);
542
543 ret = dri_bo_get_tiling(region->buffer, &region->tiling,
544 &region->bit_6_swizzle);
545 if (ret != 0) {
546 fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
547 region_desc->bo_handle, name, strerror(-ret));
548 intel_region_release(&region);
549 return NULL;
550 }
551 } else {
552 if (region->classic_map != NULL) {
553 drmUnmap(region->classic_map,
554 region->pitch * region->cpp * region->height);
555 region->classic_map = NULL;
556 }
557 ret = drmMap(intel->driFd, region_desc->handle,
558 region->pitch * region->cpp * region->height,
559 &region->classic_map);
560 if (ret != 0) {
561 fprintf(stderr, "Failed to drmMap %s buffer\n", name);
562 free(region);
563 return NULL;
564 }
565
566 region->buffer = intel_bo_fake_alloc_static(intel->bufmgr,
567 name,
568 region_desc->offset,
569 region->pitch * region->cpp *
570 region->height,
571 region->classic_map);
572
573 /* The sarea just gives us a boolean for whether it's tiled or not,
574 * instead of which tiling mode it is. Guess.
575 */
576 if (region_desc->tiled) {
577 if (IS_965(intel->intelScreen->deviceID) &&
578 region_desc == &intelScreen->depth)
579 region->tiling = I915_TILING_Y;
580 else
581 region->tiling = I915_TILING_X;
582 } else {
583 region->tiling = I915_TILING_NONE;
584 }
585
586 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
587 }
588
589 assert(region->buffer != NULL);
590
591 return region;
592 }
593
594 /**
595 * Create intel_region structs to describe the static front, back, and depth
596 * buffers created by the xserver.
597 *
598 * Although FBO's mean we now no longer use these as render targets in
599 * all circumstances, they won't go away until the back and depth
600 * buffers become private, and the front buffer will remain even then.
601 *
602 * Note that these don't allocate video memory, just describe
603 * allocations alread made by the X server.
604 */
605 void
606 intel_recreate_static_regions(struct intel_context *intel)
607 {
608 intelScreenPrivate *intelScreen = intel->intelScreen;
609
610 intel->front_region =
611 intel_recreate_static(intel, "front",
612 intel->front_region,
613 &intelScreen->front);
614
615 intel->back_region =
616 intel_recreate_static(intel, "back",
617 intel->back_region,
618 &intelScreen->back);
619
620 /* Still assumes front.cpp == depth.cpp. We can kill this when we move to
621 * private buffers.
622 */
623 intel->depth_region =
624 intel_recreate_static(intel, "depth",
625 intel->depth_region,
626 &intelScreen->depth);
627 }