intel: Also get the DRI2 front buffer when doing front buffer reading.
[mesa.git] / src / mesa / drivers / dri / intel / intel_regions.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* Provide additional functionality on top of bufmgr buffers:
29 * - 2d semantics and blit operations
30 * - refcounting of buffers for multiple images in a buffer.
31 * - refcounting of buffer mappings.
32 * - some logic for moving the buffers to the best memory pools for
33 * given operations.
34 *
35 * Most of this is to make it easier to implement the fixed-layout
36 * mipmap tree required by intel hardware in the face of GL's
37 * programming interface where each image can be specifed in random
38 * order and it isn't clear what layout the tree should have until the
39 * last moment.
40 */
41
42 #include <sys/ioctl.h>
43 #include <errno.h>
44
45 #include "intel_context.h"
46 #include "intel_regions.h"
47 #include "intel_blit.h"
48 #include "intel_buffer_objects.h"
49 #include "intel_bufmgr.h"
50 #include "intel_batchbuffer.h"
51 #include "intel_chipset.h"
52
53 #define FILE_DEBUG_FLAG DEBUG_REGION
54
55 /* XXX: Thread safety?
56 */
57 GLubyte *
58 intel_region_map(struct intel_context *intel, struct intel_region *region)
59 {
60 DBG("%s\n", __FUNCTION__);
61 if (!region->map_refcount++) {
62 if (region->pbo)
63 intel_region_cow(intel, region);
64
65 dri_bo_map(region->buffer, GL_TRUE);
66 region->map = region->buffer->virtual;
67 }
68
69 return region->map;
70 }
71
72 void
73 intel_region_unmap(struct intel_context *intel, struct intel_region *region)
74 {
75 DBG("%s\n", __FUNCTION__);
76 if (!--region->map_refcount) {
77 dri_bo_unmap(region->buffer);
78 region->map = NULL;
79 }
80 }
81
82 static struct intel_region *
83 intel_region_alloc_internal(struct intel_context *intel,
84 GLuint cpp,
85 GLuint width, GLuint height, GLuint pitch,
86 dri_bo *buffer)
87 {
88 struct intel_region *region;
89
90 DBG("%s\n", __FUNCTION__);
91
92 if (buffer == NULL)
93 return NULL;
94
95 region = calloc(sizeof(*region), 1);
96 region->cpp = cpp;
97 region->width = width;
98 region->height = height;
99 region->pitch = pitch;
100 region->refcount = 1;
101 region->buffer = buffer;
102
103 /* Default to no tiling */
104 region->tiling = I915_TILING_NONE;
105 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
106
107 return region;
108 }
109
110 struct intel_region *
111 intel_region_alloc(struct intel_context *intel,
112 GLuint cpp, GLuint width, GLuint height, GLuint pitch,
113 GLboolean expect_accelerated_upload)
114 {
115 dri_bo *buffer;
116
117 /* If we're untiled, we have to align to 2 rows high because the
118 * data port accesses 2x2 blocks even if the bottom row isn't to be
119 * rendered, so failure to align means we could walk off the end of the
120 * GTT and fault.
121 */
122 height = ALIGN(height, 2);
123
124 if (expect_accelerated_upload) {
125 buffer = drm_intel_bo_alloc_for_render(intel->bufmgr, "region",
126 pitch * cpp * height, 64);
127 } else {
128 buffer = drm_intel_bo_alloc(intel->bufmgr, "region",
129 pitch * cpp * height, 64);
130 }
131
132 return intel_region_alloc_internal(intel, cpp, width, height, pitch, buffer);
133 }
134
135 struct intel_region *
136 intel_region_alloc_for_handle(struct intel_context *intel,
137 GLuint cpp,
138 GLuint width, GLuint height, GLuint pitch,
139 GLuint handle, const char *name)
140 {
141 struct intel_region *region;
142 dri_bo *buffer;
143 int ret;
144
145 buffer = intel_bo_gem_create_from_name(intel->bufmgr, name, handle);
146
147 region = intel_region_alloc_internal(intel, cpp,
148 width, height, pitch, buffer);
149 if (region == NULL)
150 return region;
151
152 ret = dri_bo_get_tiling(region->buffer, &region->tiling,
153 &region->bit_6_swizzle);
154 if (ret != 0) {
155 fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
156 handle, name, strerror(-ret));
157 intel_region_release(&region);
158 return NULL;
159 }
160
161 return region;
162 }
163
164 void
165 intel_region_reference(struct intel_region **dst, struct intel_region *src)
166 {
167 if (src)
168 DBG("%s %p %d\n", __FUNCTION__, src, src->refcount);
169
170 assert(*dst == NULL);
171 if (src) {
172 src->refcount++;
173 *dst = src;
174 }
175 }
176
177 void
178 intel_region_release(struct intel_region **region_handle)
179 {
180 struct intel_region *region = *region_handle;
181
182 if (region == NULL)
183 return;
184
185 DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1);
186
187 ASSERT(region->refcount > 0);
188 region->refcount--;
189
190 if (region->refcount == 0) {
191 assert(region->map_refcount == 0);
192
193 if (region->pbo)
194 region->pbo->region = NULL;
195 region->pbo = NULL;
196 dri_bo_unreference(region->buffer);
197
198 if (region->classic_map != NULL) {
199 drmUnmap(region->classic_map,
200 region->pitch * region->cpp * region->height);
201 }
202
203 free(region);
204 }
205 *region_handle = NULL;
206 }
207
208 /*
209 * XXX Move this into core Mesa?
210 */
211 void
212 _mesa_copy_rect(GLubyte * dst,
213 GLuint cpp,
214 GLuint dst_pitch,
215 GLuint dst_x,
216 GLuint dst_y,
217 GLuint width,
218 GLuint height,
219 const GLubyte * src,
220 GLuint src_pitch, GLuint src_x, GLuint src_y)
221 {
222 GLuint i;
223
224 dst_pitch *= cpp;
225 src_pitch *= cpp;
226 dst += dst_x * cpp;
227 src += src_x * cpp;
228 dst += dst_y * dst_pitch;
229 src += src_y * dst_pitch;
230 width *= cpp;
231
232 if (width == dst_pitch && width == src_pitch)
233 memcpy(dst, src, height * width);
234 else {
235 for (i = 0; i < height; i++) {
236 memcpy(dst, src, width);
237 dst += dst_pitch;
238 src += src_pitch;
239 }
240 }
241 }
242
243
244 /* Upload data to a rectangular sub-region. Lots of choices how to do this:
245 *
246 * - memcpy by span to current destination
247 * - upload data as new buffer and blit
248 *
249 * Currently always memcpy.
250 */
251 void
252 intel_region_data(struct intel_context *intel,
253 struct intel_region *dst,
254 GLuint dst_offset,
255 GLuint dstx, GLuint dsty,
256 const void *src, GLuint src_pitch,
257 GLuint srcx, GLuint srcy, GLuint width, GLuint height)
258 {
259 GLboolean locked = GL_FALSE;
260
261 DBG("%s\n", __FUNCTION__);
262
263 if (intel == NULL)
264 return;
265
266 if (dst->pbo) {
267 if (dstx == 0 &&
268 dsty == 0 && width == dst->pitch && height == dst->height)
269 intel_region_release_pbo(intel, dst);
270 else
271 intel_region_cow(intel, dst);
272 }
273
274 if (!intel->locked) {
275 LOCK_HARDWARE(intel);
276 locked = GL_TRUE;
277 }
278
279 _mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
280 dst->cpp,
281 dst->pitch,
282 dstx, dsty, width, height, src, src_pitch, srcx, srcy);
283
284 intel_region_unmap(intel, dst);
285
286 if (locked)
287 UNLOCK_HARDWARE(intel);
288
289 }
290
291 /* Copy rectangular sub-regions. Need better logic about when to
292 * push buffers into AGP - will currently do so whenever possible.
293 */
294 void
295 intel_region_copy(struct intel_context *intel,
296 struct intel_region *dst,
297 GLuint dst_offset,
298 GLuint dstx, GLuint dsty,
299 struct intel_region *src,
300 GLuint src_offset,
301 GLuint srcx, GLuint srcy, GLuint width, GLuint height)
302 {
303 DBG("%s\n", __FUNCTION__);
304
305 if (intel == NULL)
306 return;
307
308 if (dst->pbo) {
309 if (dstx == 0 &&
310 dsty == 0 && width == dst->pitch && height == dst->height)
311 intel_region_release_pbo(intel, dst);
312 else
313 intel_region_cow(intel, dst);
314 }
315
316 assert(src->cpp == dst->cpp);
317
318 intelEmitCopyBlit(intel,
319 dst->cpp,
320 src->pitch, src->buffer, src_offset, src->tiling,
321 dst->pitch, dst->buffer, dst_offset, dst->tiling,
322 srcx, srcy, dstx, dsty, width, height,
323 GL_COPY);
324 }
325
326 /* Fill a rectangular sub-region. Need better logic about when to
327 * push buffers into AGP - will currently do so whenever possible.
328 */
329 void
330 intel_region_fill(struct intel_context *intel,
331 struct intel_region *dst,
332 GLuint dst_offset,
333 GLuint dstx, GLuint dsty,
334 GLuint width, GLuint height, GLuint color)
335 {
336 DBG("%s\n", __FUNCTION__);
337
338 if (intel == NULL)
339 return;
340
341 if (dst->pbo) {
342 if (dstx == 0 &&
343 dsty == 0 && width == dst->pitch && height == dst->height)
344 intel_region_release_pbo(intel, dst);
345 else
346 intel_region_cow(intel, dst);
347 }
348
349 intelEmitFillBlit(intel,
350 dst->cpp,
351 dst->pitch, dst->buffer, dst_offset, dst->tiling,
352 dstx, dsty, width, height, color);
353 }
354
355 /* Attach to a pbo, discarding our data. Effectively zero-copy upload
356 * the pbo's data.
357 */
358 void
359 intel_region_attach_pbo(struct intel_context *intel,
360 struct intel_region *region,
361 struct intel_buffer_object *pbo)
362 {
363 if (region->pbo == pbo)
364 return;
365
366 /* If there is already a pbo attached, break the cow tie now.
367 * Don't call intel_region_release_pbo() as that would
368 * unnecessarily allocate a new buffer we would have to immediately
369 * discard.
370 */
371 if (region->pbo) {
372 region->pbo->region = NULL;
373 region->pbo = NULL;
374 }
375
376 if (region->buffer) {
377 dri_bo_unreference(region->buffer);
378 region->buffer = NULL;
379 }
380
381 region->pbo = pbo;
382 region->pbo->region = region;
383 dri_bo_reference(pbo->buffer);
384 region->buffer = pbo->buffer;
385 }
386
387
388 /* Break the COW tie to the pbo and allocate a new buffer.
389 * The pbo gets to keep the data.
390 */
391 void
392 intel_region_release_pbo(struct intel_context *intel,
393 struct intel_region *region)
394 {
395 assert(region->buffer == region->pbo->buffer);
396 region->pbo->region = NULL;
397 region->pbo = NULL;
398 dri_bo_unreference(region->buffer);
399 region->buffer = NULL;
400
401 region->buffer = dri_bo_alloc(intel->bufmgr, "region",
402 region->pitch * region->cpp * region->height,
403 64);
404 }
405
406 /* Break the COW tie to the pbo. Both the pbo and the region end up
407 * with a copy of the data.
408 */
409 void
410 intel_region_cow(struct intel_context *intel, struct intel_region *region)
411 {
412 struct intel_buffer_object *pbo = region->pbo;
413 GLboolean was_locked = intel->locked;
414
415 if (intel == NULL)
416 return;
417
418 intel_region_release_pbo(intel, region);
419
420 assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
421
422 DBG("%s (%d bytes)\n", __FUNCTION__, pbo->Base.Size);
423
424 /* Now blit from the texture buffer to the new buffer:
425 */
426
427 was_locked = intel->locked;
428 if (!was_locked)
429 LOCK_HARDWARE(intel);
430
431 intelEmitCopyBlit(intel,
432 region->cpp,
433 region->pitch, region->buffer, 0, region->tiling,
434 region->pitch, pbo->buffer, 0, region->tiling,
435 0, 0, 0, 0,
436 region->pitch, region->height,
437 GL_COPY);
438
439 if (!was_locked)
440 UNLOCK_HARDWARE(intel);
441 }
442
443 dri_bo *
444 intel_region_buffer(struct intel_context *intel,
445 struct intel_region *region, GLuint flag)
446 {
447 if (region->pbo) {
448 if (flag == INTEL_WRITE_PART)
449 intel_region_cow(intel, region);
450 else if (flag == INTEL_WRITE_FULL)
451 intel_region_release_pbo(intel, region);
452 }
453
454 return region->buffer;
455 }
456
457 static struct intel_region *
458 intel_recreate_static(struct intel_context *intel,
459 const char *name,
460 struct intel_region *region,
461 intelRegion *region_desc)
462 {
463 intelScreenPrivate *intelScreen = intel->intelScreen;
464 int ret;
465
466 if (region == NULL) {
467 region = calloc(sizeof(*region), 1);
468 region->refcount = 1;
469 }
470
471 if (intel->ctx.Visual.rgbBits == 24)
472 region->cpp = 4;
473 else
474 region->cpp = intel->ctx.Visual.rgbBits / 8;
475 region->pitch = intelScreen->pitch;
476 region->width = intelScreen->width;
477 region->height = intelScreen->height;
478
479 if (region->buffer != NULL) {
480 dri_bo_unreference(region->buffer);
481 region->buffer = NULL;
482 }
483
484 if (intel->ttm) {
485 assert(region_desc->bo_handle != -1);
486 region->buffer = intel_bo_gem_create_from_name(intel->bufmgr,
487 name,
488 region_desc->bo_handle);
489
490 ret = dri_bo_get_tiling(region->buffer, &region->tiling,
491 &region->bit_6_swizzle);
492 if (ret != 0) {
493 fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
494 region_desc->bo_handle, name, strerror(-ret));
495 intel_region_release(&region);
496 return NULL;
497 }
498 } else {
499 if (region->classic_map != NULL) {
500 drmUnmap(region->classic_map,
501 region->pitch * region->cpp * region->height);
502 region->classic_map = NULL;
503 }
504 ret = drmMap(intel->driFd, region_desc->handle,
505 region->pitch * region->cpp * region->height,
506 &region->classic_map);
507 if (ret != 0) {
508 fprintf(stderr, "Failed to drmMap %s buffer\n", name);
509 free(region);
510 return NULL;
511 }
512
513 region->buffer = intel_bo_fake_alloc_static(intel->bufmgr,
514 name,
515 region_desc->offset,
516 region->pitch * region->cpp *
517 region->height,
518 region->classic_map);
519
520 /* The sarea just gives us a boolean for whether it's tiled or not,
521 * instead of which tiling mode it is. Guess.
522 */
523 if (region_desc->tiled) {
524 if (IS_965(intel->intelScreen->deviceID) &&
525 region_desc == &intelScreen->depth)
526 region->tiling = I915_TILING_Y;
527 else
528 region->tiling = I915_TILING_X;
529 } else {
530 region->tiling = I915_TILING_NONE;
531 }
532
533 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
534 }
535
536 assert(region->buffer != NULL);
537
538 return region;
539 }
540
541 /**
542 * Create intel_region structs to describe the static front, back, and depth
543 * buffers created by the xserver.
544 *
545 * Although FBO's mean we now no longer use these as render targets in
546 * all circumstances, they won't go away until the back and depth
547 * buffers become private, and the front buffer will remain even then.
548 *
549 * Note that these don't allocate video memory, just describe
550 * allocations alread made by the X server.
551 */
552 void
553 intel_recreate_static_regions(struct intel_context *intel)
554 {
555 intelScreenPrivate *intelScreen = intel->intelScreen;
556
557 intel->front_region =
558 intel_recreate_static(intel, "front",
559 intel->front_region,
560 &intelScreen->front);
561
562 intel->back_region =
563 intel_recreate_static(intel, "back",
564 intel->back_region,
565 &intelScreen->back);
566
567 /* Still assumes front.cpp == depth.cpp. We can kill this when we move to
568 * private buffers.
569 */
570 intel->depth_region =
571 intel_recreate_static(intel, "depth",
572 intel->depth_region,
573 &intelScreen->depth);
574 }