2 * Copyright 2010 Dave Airlie
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "r600_priv.h"
28 #include "state_tracker/drm_driver.h"
29 #include "radeon_drm.h"
31 struct r600_bo
*r600_bo(struct radeon
*radeon
,
32 unsigned size
, unsigned alignment
,
33 unsigned binding
, unsigned usage
)
36 struct radeon_bo
*rbo
;
37 uint32_t initial_domain
, domains
;
39 /* Staging resources particpate in transfers and blits only
40 * and are used for uploads and downloads from regular
41 * resources. We generate them internally for some transfers.
43 if (usage
== PIPE_USAGE_STAGING
)
44 domains
= RADEON_GEM_DOMAIN_CPU
| RADEON_GEM_DOMAIN_GTT
;
46 domains
= (RADEON_GEM_DOMAIN_CPU
|
47 RADEON_GEM_DOMAIN_GTT
|
48 RADEON_GEM_DOMAIN_VRAM
);
50 if (binding
& (PIPE_BIND_CONSTANT_BUFFER
| PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
)) {
51 bo
= r600_bomgr_bo_create(radeon
->bomgr
, size
, alignment
, *radeon
->cfence
);
53 bo
->domains
= domains
;
59 case PIPE_USAGE_DYNAMIC
:
60 case PIPE_USAGE_STREAM
:
61 case PIPE_USAGE_STAGING
:
62 initial_domain
= RADEON_GEM_DOMAIN_GTT
;
64 case PIPE_USAGE_DEFAULT
:
65 case PIPE_USAGE_STATIC
:
66 case PIPE_USAGE_IMMUTABLE
:
68 initial_domain
= RADEON_GEM_DOMAIN_VRAM
;
71 rbo
= radeon_bo(radeon
, 0, size
, alignment
, initial_domain
);
76 bo
= calloc(1, sizeof(struct r600_bo
));
78 bo
->alignment
= alignment
;
79 bo
->domains
= domains
;
81 if (binding
& (PIPE_BIND_CONSTANT_BUFFER
| PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
)) {
82 r600_bomgr_bo_init(radeon
->bomgr
, bo
);
85 pipe_reference_init(&bo
->reference
, 1);
89 struct r600_bo
*r600_bo_handle(struct radeon
*radeon
, struct winsys_handle
*whandle
,
90 unsigned *stride
, unsigned *array_mode
)
92 struct r600_bo
*bo
= calloc(1, sizeof(struct r600_bo
));
93 struct radeon_bo
*rbo
;
95 rbo
= bo
->bo
= radeon_bo(radeon
, whandle
->handle
, 0, 0, 0);
101 pipe_reference_init(&bo
->reference
, 1);
102 bo
->size
= rbo
->size
;
103 bo
->domains
= (RADEON_GEM_DOMAIN_CPU
|
104 RADEON_GEM_DOMAIN_GTT
|
105 RADEON_GEM_DOMAIN_VRAM
);
108 *stride
= whandle
->stride
;
110 radeon_bo_get_tiling_flags(radeon
, rbo
, &bo
->tiling_flags
);
112 if (bo
->tiling_flags
) {
113 if (bo
->tiling_flags
& RADEON_TILING_MACRO
)
114 *array_mode
= V_0280A0_ARRAY_2D_TILED_THIN1
;
115 else if (bo
->tiling_flags
& RADEON_TILING_MICRO
)
116 *array_mode
= V_0280A0_ARRAY_1D_TILED_THIN1
;
124 void *r600_bo_map(struct radeon
*radeon
, struct r600_bo
*bo
, unsigned usage
, void *ctx
)
126 struct pipe_context
*pctx
= ctx
;
128 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
129 radeon_bo_map(radeon
, bo
->bo
);
130 return (uint8_t *) bo
->bo
->data
+ bo
->offset
;
133 if (p_atomic_read(&bo
->bo
->reference
.count
) > 1) {
134 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
138 pctx
->flush(pctx
, NULL
);
142 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
145 if (radeon_bo_busy(radeon
, bo
->bo
, &domain
))
147 if (radeon_bo_map(radeon
, bo
->bo
)) {
153 radeon_bo_map(radeon
, bo
->bo
);
154 if (radeon_bo_wait(radeon
, bo
->bo
)) {
155 radeon_bo_unmap(radeon
, bo
->bo
);
160 return (uint8_t *) bo
->bo
->data
+ bo
->offset
;
163 void r600_bo_unmap(struct radeon
*radeon
, struct r600_bo
*bo
)
165 radeon_bo_unmap(radeon
, bo
->bo
);
168 void r600_bo_destroy(struct radeon
*radeon
, struct r600_bo
*bo
)
170 if (bo
->manager_id
) {
171 if (!r600_bomgr_bo_destroy(radeon
->bomgr
, bo
)) {
172 /* destroy is delayed by buffer manager */
176 radeon_bo_reference(radeon
, &bo
->bo
, NULL
);
180 boolean
r600_bo_get_winsys_handle(struct radeon
*radeon
, struct r600_bo
*bo
,
181 unsigned stride
, struct winsys_handle
*whandle
)
183 whandle
->stride
= stride
;
184 switch(whandle
->type
) {
185 case DRM_API_HANDLE_TYPE_KMS
:
186 whandle
->handle
= bo
->bo
->handle
;
188 case DRM_API_HANDLE_TYPE_SHARED
:
189 if (radeon_bo_get_name(radeon
, bo
->bo
, &whandle
->handle
))