r600g: cleanup includes in winsys
[mesa.git] / src / gallium / winsys / r600 / drm / r600_bo.c
1 /*
2 * Copyright 2010 Dave Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie
25 */
26 #include "r600_priv.h"
27 #include "r600d.h"
28 #include "state_tracker/drm_driver.h"
29 #include "radeon_drm.h"
30
31 struct r600_bo *r600_bo(struct radeon *radeon,
32 unsigned size, unsigned alignment,
33 unsigned binding, unsigned usage)
34 {
35 struct r600_bo *bo;
36 struct radeon_bo *rbo;
37 uint32_t initial_domain, domains;
38
39 /* Staging resources particpate in transfers and blits only
40 * and are used for uploads and downloads from regular
41 * resources. We generate them internally for some transfers.
42 */
43 if (usage == PIPE_USAGE_STAGING)
44 domains = RADEON_GEM_DOMAIN_CPU | RADEON_GEM_DOMAIN_GTT;
45 else
46 domains = (RADEON_GEM_DOMAIN_CPU |
47 RADEON_GEM_DOMAIN_GTT |
48 RADEON_GEM_DOMAIN_VRAM);
49
50 if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
51 bo = r600_bomgr_bo_create(radeon->bomgr, size, alignment, *radeon->cfence);
52 if (bo) {
53 bo->domains = domains;
54 return bo;
55 }
56 }
57
58 switch(usage) {
59 case PIPE_USAGE_DYNAMIC:
60 case PIPE_USAGE_STREAM:
61 case PIPE_USAGE_STAGING:
62 initial_domain = RADEON_GEM_DOMAIN_GTT;
63 break;
64 case PIPE_USAGE_DEFAULT:
65 case PIPE_USAGE_STATIC:
66 case PIPE_USAGE_IMMUTABLE:
67 default:
68 initial_domain = RADEON_GEM_DOMAIN_VRAM;
69 break;
70 }
71 rbo = radeon_bo(radeon, 0, size, alignment, initial_domain);
72 if (rbo == NULL) {
73 return NULL;
74 }
75
76 bo = calloc(1, sizeof(struct r600_bo));
77 bo->size = size;
78 bo->alignment = alignment;
79 bo->domains = domains;
80 bo->bo = rbo;
81 if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
82 r600_bomgr_bo_init(radeon->bomgr, bo);
83 }
84
85 pipe_reference_init(&bo->reference, 1);
86 return bo;
87 }
88
89 struct r600_bo *r600_bo_handle(struct radeon *radeon,
90 unsigned handle, unsigned *array_mode)
91 {
92 struct r600_bo *bo = calloc(1, sizeof(struct r600_bo));
93 struct radeon_bo *rbo;
94
95 rbo = bo->bo = radeon_bo(radeon, handle, 0, 0, 0);
96 if (rbo == NULL) {
97 free(bo);
98 return NULL;
99 }
100 bo->size = rbo->size;
101 bo->domains = (RADEON_GEM_DOMAIN_CPU |
102 RADEON_GEM_DOMAIN_GTT |
103 RADEON_GEM_DOMAIN_VRAM);
104
105 pipe_reference_init(&bo->reference, 1);
106
107 radeon_bo_get_tiling_flags(radeon, rbo, &bo->tiling_flags, &bo->kernel_pitch);
108 if (array_mode) {
109 if (bo->tiling_flags) {
110 if (bo->tiling_flags & RADEON_TILING_MACRO)
111 *array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
112 else if (bo->tiling_flags & RADEON_TILING_MICRO)
113 *array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
114 } else {
115 *array_mode = 0;
116 }
117 }
118 return bo;
119 }
120
121 void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx)
122 {
123 struct pipe_context *pctx = ctx;
124
125 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
126 radeon_bo_map(radeon, bo->bo);
127 return (uint8_t *) bo->bo->data + bo->offset;
128 }
129
130 if (p_atomic_read(&bo->bo->reference.count) > 1) {
131 if (usage & PIPE_TRANSFER_DONTBLOCK) {
132 return NULL;
133 }
134 if (ctx) {
135 pctx->flush(pctx, NULL);
136 }
137 }
138
139 if (usage & PIPE_TRANSFER_DONTBLOCK) {
140 uint32_t domain;
141
142 if (radeon_bo_busy(radeon, bo->bo, &domain))
143 return NULL;
144 if (radeon_bo_map(radeon, bo->bo)) {
145 return NULL;
146 }
147 goto out;
148 }
149
150 radeon_bo_map(radeon, bo->bo);
151 if (radeon_bo_wait(radeon, bo->bo)) {
152 radeon_bo_unmap(radeon, bo->bo);
153 return NULL;
154 }
155
156 out:
157 return (uint8_t *) bo->bo->data + bo->offset;
158 }
159
160 void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo)
161 {
162 radeon_bo_unmap(radeon, bo->bo);
163 }
164
165 void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo)
166 {
167 if (bo->manager_id) {
168 if (!r600_bomgr_bo_destroy(radeon->bomgr, bo)) {
169 /* destroy is delayed by buffer manager */
170 return;
171 }
172 }
173 radeon_bo_reference(radeon, &bo->bo, NULL);
174 free(bo);
175 }
176
177 boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *bo,
178 unsigned stride, struct winsys_handle *whandle)
179 {
180 whandle->stride = stride;
181 switch(whandle->type) {
182 case DRM_API_HANDLE_TYPE_KMS:
183 whandle->handle = bo->bo->handle;
184 break;
185 case DRM_API_HANDLE_TYPE_SHARED:
186 if (radeon_bo_get_name(radeon, bo->bo, &whandle->handle))
187 return FALSE;
188 break;
189 default:
190 return FALSE;
191 }
192
193 return TRUE;
194 }