2 * Copyright 2010 Dave Airlie
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <pipe/p_compiler.h>
27 #include <pipe/p_screen.h>
28 #include <pipebuffer/pb_bufmgr.h>
29 #include "state_tracker/drm_driver.h"
30 #include "r600_priv.h"
33 #include "radeon_drm.h"
35 struct r600_bo
*r600_bo(struct radeon
*radeon
,
36 unsigned size
, unsigned alignment
,
37 unsigned binding
, unsigned usage
)
40 struct radeon_bo
*rbo
;
42 if (binding
& (PIPE_BIND_CONSTANT_BUFFER
| PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
)) {
43 bo
= r600_bomgr_bo_create(radeon
->bomgr
, size
, alignment
, *radeon
->cfence
);
49 rbo
= radeon_bo(radeon
, 0, size
, alignment
);
54 bo
= calloc(1, sizeof(struct r600_bo
));
56 bo
->alignment
= alignment
;
58 if (binding
& (PIPE_BIND_CONSTANT_BUFFER
| PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
)) {
59 r600_bomgr_bo_init(radeon
->bomgr
, bo
);
62 /* Staging resources particpate in transfers and blits only
63 * and are used for uploads and downloads from regular
64 * resources. We generate them internally for some transfers.
67 case PIPE_USAGE_DEFAULT
:
68 bo
->domains
= RADEON_GEM_DOMAIN_CPU
|
69 RADEON_GEM_DOMAIN_GTT
|
70 RADEON_GEM_DOMAIN_VRAM
;
73 case PIPE_USAGE_DYNAMIC
:
74 case PIPE_USAGE_STREAM
:
75 case PIPE_USAGE_STAGING
:
76 bo
->domains
= RADEON_GEM_DOMAIN_CPU
|
77 RADEON_GEM_DOMAIN_GTT
;
80 case PIPE_USAGE_STATIC
:
81 case PIPE_USAGE_IMMUTABLE
:
82 bo
->domains
= RADEON_GEM_DOMAIN_VRAM
;
86 pipe_reference_init(&bo
->reference
, 1);
90 struct r600_bo
*r600_bo_handle(struct radeon
*radeon
,
91 unsigned handle
, unsigned *array_mode
)
93 struct r600_bo
*bo
= calloc(1, sizeof(struct r600_bo
));
94 struct radeon_bo
*rbo
;
96 rbo
= bo
->bo
= radeon_bo(radeon
, handle
, 0, 0);
101 bo
->size
= rbo
->size
;
102 bo
->domains
= (RADEON_GEM_DOMAIN_CPU
|
103 RADEON_GEM_DOMAIN_GTT
|
104 RADEON_GEM_DOMAIN_VRAM
);
106 pipe_reference_init(&bo
->reference
, 1);
108 radeon_bo_get_tiling_flags(radeon
, rbo
, &bo
->tiling_flags
, &bo
->kernel_pitch
);
110 if (bo
->tiling_flags
) {
111 if (bo
->tiling_flags
& RADEON_TILING_MACRO
)
112 *array_mode
= V_0280A0_ARRAY_2D_TILED_THIN1
;
113 else if (bo
->tiling_flags
& RADEON_TILING_MICRO
)
114 *array_mode
= V_0280A0_ARRAY_1D_TILED_THIN1
;
122 void *r600_bo_map(struct radeon
*radeon
, struct r600_bo
*bo
, unsigned usage
, void *ctx
)
124 struct pipe_context
*pctx
= ctx
;
126 if (usage
& PB_USAGE_UNSYNCHRONIZED
) {
127 radeon_bo_map(radeon
, bo
->bo
);
128 return (uint8_t *) bo
->bo
->data
+ bo
->offset
;
131 if (p_atomic_read(&bo
->bo
->reference
.count
) > 1) {
132 if (usage
& PB_USAGE_DONTBLOCK
) {
136 pctx
->flush(pctx
, 0, NULL
);
140 if (usage
& PB_USAGE_DONTBLOCK
) {
143 if (radeon_bo_busy(radeon
, bo
->bo
, &domain
))
145 if (radeon_bo_map(radeon
, bo
->bo
)) {
151 radeon_bo_map(radeon
, bo
->bo
);
152 if (radeon_bo_wait(radeon
, bo
->bo
)) {
153 radeon_bo_unmap(radeon
, bo
->bo
);
158 return (uint8_t *) bo
->bo
->data
+ bo
->offset
;
161 void r600_bo_unmap(struct radeon
*radeon
, struct r600_bo
*bo
)
163 radeon_bo_unmap(radeon
, bo
->bo
);
166 void r600_bo_destroy(struct radeon
*radeon
, struct r600_bo
*bo
)
168 if (bo
->manager_id
) {
169 if (!r600_bomgr_bo_destroy(radeon
->bomgr
, bo
)) {
170 /* destroy is delayed by buffer manager */
174 radeon_bo_reference(radeon
, &bo
->bo
, NULL
);
178 void r600_bo_reference(struct radeon
*radeon
, struct r600_bo
**dst
, struct r600_bo
*src
)
180 struct r600_bo
*old
= *dst
;
182 if (pipe_reference(&(*dst
)->reference
, &src
->reference
)) {
183 r600_bo_destroy(radeon
, old
);
188 boolean
r600_bo_get_winsys_handle(struct radeon
*radeon
, struct r600_bo
*bo
,
189 unsigned stride
, struct winsys_handle
*whandle
)
191 whandle
->stride
= stride
;
192 switch(whandle
->type
) {
193 case DRM_API_HANDLE_TYPE_KMS
:
194 whandle
->handle
= r600_bo_get_handle(bo
);
196 case DRM_API_HANDLE_TYPE_SHARED
:
197 if (radeon_bo_get_name(radeon
, bo
->bo
, &whandle
->handle
))