1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 /* Provide additional functionality on top of bufmgr buffers:
29 * - 2d semantics and blit operations
30 * - refcounting of buffers for multiple images in a buffer.
31 * - refcounting of buffer mappings.
32 * - some logic for moving the buffers to the best memory pools for
35 * Most of this is to make it easier to implement the fixed-layout
36 * mipmap tree required by intel hardware in the face of GL's
37 * programming interface where each image can be specifed in random
38 * order and it isn't clear what layout the tree should have until the
42 #include "intel_context.h"
43 #include "intel_regions.h"
44 #include "intel_blit.h"
45 #include "intel_buffer_objects.h"
46 #include "dri_bufmgr.h"
47 #include "intel_bufmgr_ttm.h"
48 #include "intel_batchbuffer.h"
50 #define FILE_DEBUG_FLAG DEBUG_REGION
52 /* XXX: Thread safety?
55 intel_region_map(struct intel_context
*intel
, struct intel_region
*region
)
57 DBG("%s\n", __FUNCTION__
);
58 if (!region
->map_refcount
++) {
60 intel_region_cow(intel
, region
);
62 dri_bo_map(region
->buffer
, GL_TRUE
);
63 region
->map
= region
->buffer
->virtual;
70 intel_region_unmap(struct intel_context
*intel
, struct intel_region
*region
)
72 DBG("%s\n", __FUNCTION__
);
73 if (!--region
->map_refcount
) {
74 dri_bo_unmap(region
->buffer
);
79 static struct intel_region
*
80 intel_region_alloc_internal(struct intel_context
*intel
,
81 GLuint cpp
, GLuint pitch
, GLuint height
,
82 GLuint tiled
, dri_bo
*buffer
)
84 struct intel_region
*region
;
86 DBG("%s\n", __FUNCTION__
);
91 region
= calloc(sizeof(*region
), 1);
93 region
->pitch
= pitch
;
94 region
->height
= height
; /* needed? */
96 region
->tiled
= tiled
;
97 region
->buffer
= buffer
;
102 struct intel_region
*
103 intel_region_alloc(struct intel_context
*intel
,
104 GLuint cpp
, GLuint pitch
, GLuint height
)
108 buffer
= dri_bo_alloc(intel
->bufmgr
, "region",
109 pitch
* cpp
* height
, 64,
110 DRM_BO_FLAG_MEM_LOCAL
|
112 DRM_BO_FLAG_CACHED_MAPPED
);
114 return intel_region_alloc_internal(intel
, cpp
, pitch
, height
, 0, buffer
);
117 struct intel_region
*
118 intel_region_alloc_for_handle(struct intel_context
*intel
,
119 GLuint cpp
, GLuint pitch
, GLuint height
,
120 GLuint tiled
, GLuint handle
)
124 buffer
= intel_ttm_bo_create_from_handle(intel
->bufmgr
, "region", handle
);
126 return intel_region_alloc_internal(intel
,
127 cpp
, pitch
, height
, tiled
, buffer
);
131 intel_region_reference(struct intel_region
**dst
, struct intel_region
*src
)
133 assert(*dst
== NULL
);
141 intel_region_release(struct intel_region
**region
)
146 DBG("%s %d\n", __FUNCTION__
, (*region
)->refcount
- 1);
148 ASSERT((*region
)->refcount
> 0);
149 (*region
)->refcount
--;
151 if ((*region
)->refcount
== 0) {
152 assert((*region
)->map_refcount
== 0);
155 (*region
)->pbo
->region
= NULL
;
156 (*region
)->pbo
= NULL
;
157 dri_bo_unreference((*region
)->buffer
);
164 * XXX Move this into core Mesa?
167 _mesa_copy_rect(GLubyte
* dst
,
175 GLuint src_pitch
, GLuint src_x
, GLuint src_y
)
183 dst
+= dst_y
* dst_pitch
;
184 src
+= src_y
* dst_pitch
;
187 if (width
== dst_pitch
&& width
== src_pitch
)
188 memcpy(dst
, src
, height
* width
);
190 for (i
= 0; i
< height
; i
++) {
191 memcpy(dst
, src
, width
);
199 /* Upload data to a rectangular sub-region. Lots of choices how to do this:
201 * - memcpy by span to current destination
202 * - upload data as new buffer and blit
204 * Currently always memcpy.
207 intel_region_data(struct intel_context
*intel
,
208 struct intel_region
*dst
,
210 GLuint dstx
, GLuint dsty
,
211 const void *src
, GLuint src_pitch
,
212 GLuint srcx
, GLuint srcy
, GLuint width
, GLuint height
)
214 GLboolean locked
= GL_FALSE
;
216 DBG("%s\n", __FUNCTION__
);
223 dsty
== 0 && width
== dst
->pitch
&& height
== dst
->height
)
224 intel_region_release_pbo(intel
, dst
);
226 intel_region_cow(intel
, dst
);
229 if (!intel
->locked
) {
230 LOCK_HARDWARE(intel
);
234 _mesa_copy_rect(intel_region_map(intel
, dst
) + dst_offset
,
237 dstx
, dsty
, width
, height
, src
, src_pitch
, srcx
, srcy
);
239 intel_region_unmap(intel
, dst
);
242 UNLOCK_HARDWARE(intel
);
246 /* Copy rectangular sub-regions. Need better logic about when to
247 * push buffers into AGP - will currently do so whenever possible.
250 intel_region_copy(struct intel_context
*intel
,
251 struct intel_region
*dst
,
253 GLuint dstx
, GLuint dsty
,
254 struct intel_region
*src
,
256 GLuint srcx
, GLuint srcy
, GLuint width
, GLuint height
)
258 DBG("%s\n", __FUNCTION__
);
265 dsty
== 0 && width
== dst
->pitch
&& height
== dst
->height
)
266 intel_region_release_pbo(intel
, dst
);
268 intel_region_cow(intel
, dst
);
271 assert(src
->cpp
== dst
->cpp
);
273 intelEmitCopyBlit(intel
,
275 src
->pitch
, src
->buffer
, src_offset
, src
->tiled
,
276 dst
->pitch
, dst
->buffer
, dst_offset
, dst
->tiled
,
277 srcx
, srcy
, dstx
, dsty
, width
, height
,
281 /* Fill a rectangular sub-region. Need better logic about when to
282 * push buffers into AGP - will currently do so whenever possible.
285 intel_region_fill(struct intel_context
*intel
,
286 struct intel_region
*dst
,
288 GLuint dstx
, GLuint dsty
,
289 GLuint width
, GLuint height
, GLuint color
)
291 DBG("%s\n", __FUNCTION__
);
298 dsty
== 0 && width
== dst
->pitch
&& height
== dst
->height
)
299 intel_region_release_pbo(intel
, dst
);
301 intel_region_cow(intel
, dst
);
304 intelEmitFillBlit(intel
,
306 dst
->pitch
, dst
->buffer
, dst_offset
, dst
->tiled
,
307 dstx
, dsty
, width
, height
, color
);
310 /* Attach to a pbo, discarding our data. Effectively zero-copy upload
314 intel_region_attach_pbo(struct intel_context
*intel
,
315 struct intel_region
*region
,
316 struct intel_buffer_object
*pbo
)
318 if (region
->pbo
== pbo
)
321 /* If there is already a pbo attached, break the cow tie now.
322 * Don't call intel_region_release_pbo() as that would
323 * unnecessarily allocate a new buffer we would have to immediately
327 region
->pbo
->region
= NULL
;
331 if (region
->buffer
) {
332 dri_bo_unreference(region
->buffer
);
333 region
->buffer
= NULL
;
337 region
->pbo
->region
= region
;
338 dri_bo_reference(pbo
->buffer
);
339 region
->buffer
= pbo
->buffer
;
343 /* Break the COW tie to the pbo and allocate a new buffer.
344 * The pbo gets to keep the data.
347 intel_region_release_pbo(struct intel_context
*intel
,
348 struct intel_region
*region
)
350 assert(region
->buffer
== region
->pbo
->buffer
);
351 region
->pbo
->region
= NULL
;
353 dri_bo_unreference(region
->buffer
);
354 region
->buffer
= NULL
;
356 region
->buffer
= dri_bo_alloc(intel
->bufmgr
, "region",
357 region
->pitch
* region
->cpp
* region
->height
,
359 DRM_BO_FLAG_MEM_LOCAL
|
361 DRM_BO_FLAG_CACHED_MAPPED
);
364 /* Break the COW tie to the pbo. Both the pbo and the region end up
365 * with a copy of the data.
368 intel_region_cow(struct intel_context
*intel
, struct intel_region
*region
)
370 struct intel_buffer_object
*pbo
= region
->pbo
;
371 GLboolean was_locked
= intel
->locked
;
376 intel_region_release_pbo(intel
, region
);
378 assert(region
->cpp
* region
->pitch
* region
->height
== pbo
->Base
.Size
);
380 DBG("%s (%d bytes)\n", __FUNCTION__
, pbo
->Base
.Size
);
382 /* Now blit from the texture buffer to the new buffer:
385 intel_batchbuffer_flush(intel
->batch
);
387 was_locked
= intel
->locked
;
389 LOCK_HARDWARE(intel
);
391 intelEmitCopyBlit(intel
,
393 region
->pitch
, region
->buffer
, 0, region
->tiled
,
394 region
->pitch
, pbo
->buffer
, 0, region
->tiled
,
396 region
->pitch
, region
->height
,
399 intel_batchbuffer_flush(intel
->batch
);
402 UNLOCK_HARDWARE(intel
);
406 intel_region_buffer(struct intel_context
*intel
,
407 struct intel_region
*region
, GLuint flag
)
410 if (flag
== INTEL_WRITE_PART
)
411 intel_region_cow(intel
, region
);
412 else if (flag
== INTEL_WRITE_FULL
)
413 intel_region_release_pbo(intel
, region
);
416 return region
->buffer
;
419 static struct intel_region
*
420 intel_recreate_static(struct intel_context
*intel
,
422 struct intel_region
*region
,
423 intelRegion
*region_desc
,
426 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
428 if (region
== NULL
) {
429 region
= calloc(sizeof(*region
), 1);
430 region
->refcount
= 1;
433 if (intel
->ctx
.Visual
.rgbBits
== 24)
436 region
->cpp
= intel
->ctx
.Visual
.rgbBits
/ 8;
437 region
->pitch
= intelScreen
->pitch
;
438 region
->height
= intelScreen
->height
; /* needed? */
439 region
->tiled
= region_desc
->tiled
;
442 assert(region_desc
->bo_handle
!= -1);
443 region
->buffer
= intel_ttm_bo_create_from_handle(intel
->bufmgr
,
445 region_desc
->bo_handle
);
447 region
->buffer
= dri_bo_alloc_static(intel
->bufmgr
,
456 assert(region
->buffer
!= NULL
);
462 * Create intel_region structs to describe the static front, back, and depth
463 * buffers created by the xserver.
465 * Although FBO's mean we now no longer use these as render targets in
466 * all circumstances, they won't go away until the back and depth
467 * buffers become private, and the front buffer will remain even then.
469 * Note that these don't allocate video memory, just describe
470 * allocations alread made by the X server.
473 intel_recreate_static_regions(struct intel_context
*intel
)
475 intelScreenPrivate
*intelScreen
= intel
->intelScreen
;
477 intel
->front_region
=
478 intel_recreate_static(intel
, "front",
484 intel_recreate_static(intel
, "back",
490 if (intelScreen
->third
.handle
) {
491 intel
->third_region
=
492 intel_recreate_static(intel
, "third",
499 /* Still assumes front.cpp == depth.cpp. We can kill this when we move to
502 intel
->depth_region
=
503 intel_recreate_static(intel
, "depth",