Merge branch 'gallium-0.1' into gallium-0.2
[mesa.git] / src / mesa / drivers / dri / intel / intel_regions.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /* Provide additional functionality on top of bufmgr buffers:
29 * - 2d semantics and blit operations
30 * - refcounting of buffers for multiple images in a buffer.
31 * - refcounting of buffer mappings.
32 * - some logic for moving the buffers to the best memory pools for
33 * given operations.
34 *
35 * Most of this is to make it easier to implement the fixed-layout
36 * mipmap tree required by intel hardware in the face of GL's
37 * programming interface where each image can be specifed in random
38 * order and it isn't clear what layout the tree should have until the
39 * last moment.
40 */
41
42 #include <sys/ioctl.h>
43 #include <errno.h>
44
45 #include "intel_context.h"
46 #include "intel_regions.h"
47 #include "intel_blit.h"
48 #include "intel_buffer_objects.h"
49 #include "dri_bufmgr.h"
50 #include "intel_bufmgr.h"
51 #include "intel_batchbuffer.h"
52 #include "intel_chipset.h"
53
54 #define FILE_DEBUG_FLAG DEBUG_REGION
55
56 /* XXX: Thread safety?
57 */
58 GLubyte *
59 intel_region_map(struct intel_context *intel, struct intel_region *region)
60 {
61 DBG("%s\n", __FUNCTION__);
62 if (!region->map_refcount++) {
63 if (region->pbo)
64 intel_region_cow(intel, region);
65
66 dri_bo_map(region->buffer, GL_TRUE);
67 region->map = region->buffer->virtual;
68 }
69
70 return region->map;
71 }
72
73 void
74 intel_region_unmap(struct intel_context *intel, struct intel_region *region)
75 {
76 DBG("%s\n", __FUNCTION__);
77 if (!--region->map_refcount) {
78 dri_bo_unmap(region->buffer);
79 region->map = NULL;
80 }
81 }
82
83 static int
84 intel_set_region_tiling_gem(struct intel_context *intel,
85 struct intel_region *region,
86 uint32_t bo_handle)
87 {
88 struct drm_i915_gem_get_tiling get_tiling;
89 int ret;
90
91 memset(&get_tiling, 0, sizeof(get_tiling));
92
93 get_tiling.handle = bo_handle;
94 ret = ioctl(intel->driFd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
95 if (ret != 0) {
96 fprintf(stderr, "Failed to get tiling state for region: %s\n",
97 strerror(errno));
98 return ret;
99 }
100
101 region->tiling = get_tiling.tiling_mode;
102 region->bit_6_swizzle = get_tiling.swizzle_mode;
103
104 return 0;
105 }
106
107 static struct intel_region *
108 intel_region_alloc_internal(struct intel_context *intel,
109 GLuint cpp, GLuint pitch, GLuint height,
110 dri_bo *buffer)
111 {
112 struct intel_region *region;
113
114 DBG("%s\n", __FUNCTION__);
115
116 if (buffer == NULL)
117 return NULL;
118
119 region = calloc(sizeof(*region), 1);
120 region->cpp = cpp;
121 region->pitch = pitch;
122 region->height = height; /* needed? */
123 region->refcount = 1;
124 region->buffer = buffer;
125
126 /* Default to no tiling */
127 region->tiling = I915_TILING_NONE;
128 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
129
130 return region;
131 }
132
133 struct intel_region *
134 intel_region_alloc(struct intel_context *intel,
135 GLuint cpp, GLuint pitch, GLuint height)
136 {
137 dri_bo *buffer;
138
139 buffer = dri_bo_alloc(intel->bufmgr, "region",
140 pitch * cpp * height, 64);
141
142 return intel_region_alloc_internal(intel, cpp, pitch, height, buffer);
143 }
144
145 struct intel_region *
146 intel_region_alloc_for_handle(struct intel_context *intel,
147 GLuint cpp, GLuint pitch, GLuint height,
148 GLuint handle)
149 {
150 struct intel_region *region;
151 dri_bo *buffer;
152
153 buffer = intel_bo_gem_create_from_name(intel->bufmgr, "dri2 region", handle);
154
155 region = intel_region_alloc_internal(intel, cpp, pitch, height, buffer);
156 if (region == NULL)
157 return region;
158
159 intel_set_region_tiling_gem(intel, region, handle);
160
161 return region;
162 }
163
164 void
165 intel_region_reference(struct intel_region **dst, struct intel_region *src)
166 {
167 assert(*dst == NULL);
168 if (src) {
169 src->refcount++;
170 *dst = src;
171 }
172 }
173
174 void
175 intel_region_release(struct intel_region **region_handle)
176 {
177 struct intel_region *region = *region_handle;
178
179 if (region == NULL)
180 return;
181
182 DBG("%s %d\n", __FUNCTION__, region->refcount - 1);
183
184 ASSERT(region->refcount > 0);
185 region->refcount--;
186
187 if (region->refcount == 0) {
188 assert(region->map_refcount == 0);
189
190 if (region->pbo)
191 region->pbo->region = NULL;
192 region->pbo = NULL;
193 dri_bo_unreference(region->buffer);
194
195 if (region->classic_map != NULL) {
196 drmUnmap(region->classic_map,
197 region->pitch * region->cpp * region->height);
198 }
199
200 free(region);
201 }
202 *region_handle = NULL;
203 }
204
205 /*
206 * XXX Move this into core Mesa?
207 */
208 void
209 _mesa_copy_rect(GLubyte * dst,
210 GLuint cpp,
211 GLuint dst_pitch,
212 GLuint dst_x,
213 GLuint dst_y,
214 GLuint width,
215 GLuint height,
216 const GLubyte * src,
217 GLuint src_pitch, GLuint src_x, GLuint src_y)
218 {
219 GLuint i;
220
221 dst_pitch *= cpp;
222 src_pitch *= cpp;
223 dst += dst_x * cpp;
224 src += src_x * cpp;
225 dst += dst_y * dst_pitch;
226 src += src_y * dst_pitch;
227 width *= cpp;
228
229 if (width == dst_pitch && width == src_pitch)
230 memcpy(dst, src, height * width);
231 else {
232 for (i = 0; i < height; i++) {
233 memcpy(dst, src, width);
234 dst += dst_pitch;
235 src += src_pitch;
236 }
237 }
238 }
239
240
241 /* Upload data to a rectangular sub-region. Lots of choices how to do this:
242 *
243 * - memcpy by span to current destination
244 * - upload data as new buffer and blit
245 *
246 * Currently always memcpy.
247 */
248 void
249 intel_region_data(struct intel_context *intel,
250 struct intel_region *dst,
251 GLuint dst_offset,
252 GLuint dstx, GLuint dsty,
253 const void *src, GLuint src_pitch,
254 GLuint srcx, GLuint srcy, GLuint width, GLuint height)
255 {
256 GLboolean locked = GL_FALSE;
257
258 DBG("%s\n", __FUNCTION__);
259
260 if (intel == NULL)
261 return;
262
263 if (dst->pbo) {
264 if (dstx == 0 &&
265 dsty == 0 && width == dst->pitch && height == dst->height)
266 intel_region_release_pbo(intel, dst);
267 else
268 intel_region_cow(intel, dst);
269 }
270
271 if (!intel->locked) {
272 LOCK_HARDWARE(intel);
273 locked = GL_TRUE;
274 }
275
276 _mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
277 dst->cpp,
278 dst->pitch,
279 dstx, dsty, width, height, src, src_pitch, srcx, srcy);
280
281 intel_region_unmap(intel, dst);
282
283 if (locked)
284 UNLOCK_HARDWARE(intel);
285
286 }
287
288 /* Copy rectangular sub-regions. Need better logic about when to
289 * push buffers into AGP - will currently do so whenever possible.
290 */
291 void
292 intel_region_copy(struct intel_context *intel,
293 struct intel_region *dst,
294 GLuint dst_offset,
295 GLuint dstx, GLuint dsty,
296 struct intel_region *src,
297 GLuint src_offset,
298 GLuint srcx, GLuint srcy, GLuint width, GLuint height)
299 {
300 DBG("%s\n", __FUNCTION__);
301
302 if (intel == NULL)
303 return;
304
305 if (dst->pbo) {
306 if (dstx == 0 &&
307 dsty == 0 && width == dst->pitch && height == dst->height)
308 intel_region_release_pbo(intel, dst);
309 else
310 intel_region_cow(intel, dst);
311 }
312
313 assert(src->cpp == dst->cpp);
314
315 intelEmitCopyBlit(intel,
316 dst->cpp,
317 src->pitch, src->buffer, src_offset, src->tiling,
318 dst->pitch, dst->buffer, dst_offset, dst->tiling,
319 srcx, srcy, dstx, dsty, width, height,
320 GL_COPY);
321 }
322
323 /* Fill a rectangular sub-region. Need better logic about when to
324 * push buffers into AGP - will currently do so whenever possible.
325 */
326 void
327 intel_region_fill(struct intel_context *intel,
328 struct intel_region *dst,
329 GLuint dst_offset,
330 GLuint dstx, GLuint dsty,
331 GLuint width, GLuint height, GLuint color)
332 {
333 DBG("%s\n", __FUNCTION__);
334
335 if (intel == NULL)
336 return;
337
338 if (dst->pbo) {
339 if (dstx == 0 &&
340 dsty == 0 && width == dst->pitch && height == dst->height)
341 intel_region_release_pbo(intel, dst);
342 else
343 intel_region_cow(intel, dst);
344 }
345
346 intelEmitFillBlit(intel,
347 dst->cpp,
348 dst->pitch, dst->buffer, dst_offset, dst->tiling,
349 dstx, dsty, width, height, color);
350 }
351
352 /* Attach to a pbo, discarding our data. Effectively zero-copy upload
353 * the pbo's data.
354 */
355 void
356 intel_region_attach_pbo(struct intel_context *intel,
357 struct intel_region *region,
358 struct intel_buffer_object *pbo)
359 {
360 if (region->pbo == pbo)
361 return;
362
363 /* If there is already a pbo attached, break the cow tie now.
364 * Don't call intel_region_release_pbo() as that would
365 * unnecessarily allocate a new buffer we would have to immediately
366 * discard.
367 */
368 if (region->pbo) {
369 region->pbo->region = NULL;
370 region->pbo = NULL;
371 }
372
373 if (region->buffer) {
374 dri_bo_unreference(region->buffer);
375 region->buffer = NULL;
376 }
377
378 region->pbo = pbo;
379 region->pbo->region = region;
380 dri_bo_reference(pbo->buffer);
381 region->buffer = pbo->buffer;
382 }
383
384
385 /* Break the COW tie to the pbo and allocate a new buffer.
386 * The pbo gets to keep the data.
387 */
388 void
389 intel_region_release_pbo(struct intel_context *intel,
390 struct intel_region *region)
391 {
392 assert(region->buffer == region->pbo->buffer);
393 region->pbo->region = NULL;
394 region->pbo = NULL;
395 dri_bo_unreference(region->buffer);
396 region->buffer = NULL;
397
398 region->buffer = dri_bo_alloc(intel->bufmgr, "region",
399 region->pitch * region->cpp * region->height,
400 64);
401 }
402
403 /* Break the COW tie to the pbo. Both the pbo and the region end up
404 * with a copy of the data.
405 */
406 void
407 intel_region_cow(struct intel_context *intel, struct intel_region *region)
408 {
409 struct intel_buffer_object *pbo = region->pbo;
410 GLboolean was_locked = intel->locked;
411
412 if (intel == NULL)
413 return;
414
415 intel_region_release_pbo(intel, region);
416
417 assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
418
419 DBG("%s (%d bytes)\n", __FUNCTION__, pbo->Base.Size);
420
421 /* Now blit from the texture buffer to the new buffer:
422 */
423
424 was_locked = intel->locked;
425 if (!was_locked)
426 LOCK_HARDWARE(intel);
427
428 intelEmitCopyBlit(intel,
429 region->cpp,
430 region->pitch, region->buffer, 0, region->tiling,
431 region->pitch, pbo->buffer, 0, region->tiling,
432 0, 0, 0, 0,
433 region->pitch, region->height,
434 GL_COPY);
435
436 if (!was_locked)
437 UNLOCK_HARDWARE(intel);
438 }
439
440 dri_bo *
441 intel_region_buffer(struct intel_context *intel,
442 struct intel_region *region, GLuint flag)
443 {
444 if (region->pbo) {
445 if (flag == INTEL_WRITE_PART)
446 intel_region_cow(intel, region);
447 else if (flag == INTEL_WRITE_FULL)
448 intel_region_release_pbo(intel, region);
449 }
450
451 return region->buffer;
452 }
453
454 static struct intel_region *
455 intel_recreate_static(struct intel_context *intel,
456 const char *name,
457 struct intel_region *region,
458 intelRegion *region_desc)
459 {
460 intelScreenPrivate *intelScreen = intel->intelScreen;
461 int ret;
462
463 if (region == NULL) {
464 region = calloc(sizeof(*region), 1);
465 region->refcount = 1;
466 }
467
468 if (intel->ctx.Visual.rgbBits == 24)
469 region->cpp = 4;
470 else
471 region->cpp = intel->ctx.Visual.rgbBits / 8;
472 region->pitch = intelScreen->pitch;
473 region->height = intelScreen->height; /* needed? */
474
475 if (intel->ttm) {
476 assert(region_desc->bo_handle != -1);
477 region->buffer = intel_bo_gem_create_from_name(intel->bufmgr,
478 name,
479 region_desc->bo_handle);
480
481 intel_set_region_tiling_gem(intel, region, region_desc->bo_handle);
482 } else {
483 ret = drmMap(intel->driFd, region_desc->handle,
484 region->pitch * region->cpp * region->height,
485 &region->classic_map);
486 if (ret != 0) {
487 fprintf(stderr, "Failed to drmMap %s buffer\n", name);
488 free(region);
489 return NULL;
490 }
491
492 region->buffer = intel_bo_fake_alloc_static(intel->bufmgr,
493 name,
494 region_desc->offset,
495 region->pitch * region->cpp *
496 region->height,
497 region->classic_map);
498
499 /* The sarea just gives us a boolean for whether it's tiled or not,
500 * instead of which tiling mode it is. Guess.
501 */
502 if (region_desc->tiled) {
503 if (IS_965(intel->intelScreen->deviceID) &&
504 region_desc == &intelScreen->depth)
505 region->tiling = I915_TILING_Y;
506 else
507 region->tiling = I915_TILING_X;
508 } else {
509 region->tiling = I915_TILING_NONE;
510 }
511
512 region->bit_6_swizzle = I915_BIT_6_SWIZZLE_NONE;
513 }
514
515 assert(region->buffer != NULL);
516
517 return region;
518 }
519
520 /**
521 * Create intel_region structs to describe the static front, back, and depth
522 * buffers created by the xserver.
523 *
524 * Although FBO's mean we now no longer use these as render targets in
525 * all circumstances, they won't go away until the back and depth
526 * buffers become private, and the front buffer will remain even then.
527 *
528 * Note that these don't allocate video memory, just describe
529 * allocations alread made by the X server.
530 */
531 void
532 intel_recreate_static_regions(struct intel_context *intel)
533 {
534 intelScreenPrivate *intelScreen = intel->intelScreen;
535
536 intel->front_region =
537 intel_recreate_static(intel, "front",
538 intel->front_region,
539 &intelScreen->front);
540
541 intel->back_region =
542 intel_recreate_static(intel, "back",
543 intel->back_region,
544 &intelScreen->back);
545
546 #ifdef I915
547 if (intelScreen->third.handle) {
548 intel->third_region =
549 intel_recreate_static(intel, "third",
550 intel->third_region,
551 &intelScreen->third);
552 }
553 #endif /* I915 */
554
555 /* Still assumes front.cpp == depth.cpp. We can kill this when we move to
556 * private buffers.
557 */
558 intel->depth_region =
559 intel_recreate_static(intel, "depth",
560 intel->depth_region,
561 &intelScreen->depth);
562 }