Merge remote branch 'origin/master' into pipe-video
[mesa.git] / src / gallium / winsys / r600 / drm / r600_bo.c
1 /*
2 * Copyright 2010 Dave Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie
25 */
26 #include <pipe/p_compiler.h>
27 #include <pipe/p_screen.h>
28 #include <pipebuffer/pb_bufmgr.h>
29 #include "state_tracker/drm_driver.h"
30 #include "r600_priv.h"
31 #include "r600d.h"
32 #include "drm.h"
33 #include "radeon_drm.h"
34
35 struct r600_bo *r600_bo(struct radeon *radeon,
36 unsigned size, unsigned alignment,
37 unsigned binding, unsigned usage)
38 {
39 struct r600_bo *bo;
40 struct radeon_bo *rbo;
41
42 if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
43 bo = r600_bomgr_bo_create(radeon->bomgr, size, alignment, *radeon->cfence);
44 if (bo) {
45 return bo;
46 }
47 }
48
49 rbo = radeon_bo(radeon, 0, size, alignment);
50 if (rbo == NULL) {
51 return NULL;
52 }
53
54 bo = calloc(1, sizeof(struct r600_bo));
55 bo->size = size;
56 bo->alignment = alignment;
57 bo->bo = rbo;
58 if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
59 r600_bomgr_bo_init(radeon->bomgr, bo);
60 }
61
62 /* Staging resources particpate in transfers and blits only
63 * and are used for uploads and downloads from regular
64 * resources. We generate them internally for some transfers.
65 */
66 switch (usage) {
67 case PIPE_USAGE_DEFAULT:
68 bo->domains = RADEON_GEM_DOMAIN_CPU |
69 RADEON_GEM_DOMAIN_GTT |
70 RADEON_GEM_DOMAIN_VRAM;
71 break;
72
73 case PIPE_USAGE_DYNAMIC:
74 case PIPE_USAGE_STREAM:
75 case PIPE_USAGE_STAGING:
76 bo->domains = RADEON_GEM_DOMAIN_CPU |
77 RADEON_GEM_DOMAIN_GTT;
78 break;
79
80 case PIPE_USAGE_STATIC:
81 case PIPE_USAGE_IMMUTABLE:
82 bo->domains = RADEON_GEM_DOMAIN_VRAM;
83 break;
84 }
85
86 pipe_reference_init(&bo->reference, 1);
87 return bo;
88 }
89
90 struct r600_bo *r600_bo_handle(struct radeon *radeon,
91 unsigned handle, unsigned *array_mode)
92 {
93 struct r600_bo *bo = calloc(1, sizeof(struct r600_bo));
94 struct radeon_bo *rbo;
95
96 rbo = bo->bo = radeon_bo(radeon, handle, 0, 0);
97 if (rbo == NULL) {
98 free(bo);
99 return NULL;
100 }
101 bo->size = rbo->size;
102 bo->domains = (RADEON_GEM_DOMAIN_CPU |
103 RADEON_GEM_DOMAIN_GTT |
104 RADEON_GEM_DOMAIN_VRAM);
105
106 pipe_reference_init(&bo->reference, 1);
107
108 radeon_bo_get_tiling_flags(radeon, rbo, &bo->tiling_flags, &bo->kernel_pitch);
109 if (array_mode) {
110 if (bo->tiling_flags) {
111 if (bo->tiling_flags & RADEON_TILING_MACRO)
112 *array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
113 else if (bo->tiling_flags & RADEON_TILING_MICRO)
114 *array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
115 } else {
116 *array_mode = 0;
117 }
118 }
119 return bo;
120 }
121
122 void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx)
123 {
124 struct pipe_context *pctx = ctx;
125
126 if (usage & PB_USAGE_UNSYNCHRONIZED) {
127 radeon_bo_map(radeon, bo->bo);
128 return (uint8_t *) bo->bo->data + bo->offset;
129 }
130
131 if (p_atomic_read(&bo->bo->reference.count) > 1) {
132 if (usage & PB_USAGE_DONTBLOCK) {
133 return NULL;
134 }
135 if (ctx) {
136 pctx->flush(pctx, 0, NULL);
137 }
138 }
139
140 if (usage & PB_USAGE_DONTBLOCK) {
141 uint32_t domain;
142
143 if (radeon_bo_busy(radeon, bo->bo, &domain))
144 return NULL;
145 if (radeon_bo_map(radeon, bo->bo)) {
146 return NULL;
147 }
148 goto out;
149 }
150
151 radeon_bo_map(radeon, bo->bo);
152 if (radeon_bo_wait(radeon, bo->bo)) {
153 radeon_bo_unmap(radeon, bo->bo);
154 return NULL;
155 }
156
157 out:
158 return (uint8_t *) bo->bo->data + bo->offset;
159 }
160
161 void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo)
162 {
163 radeon_bo_unmap(radeon, bo->bo);
164 }
165
166 void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo)
167 {
168 if (bo->manager_id) {
169 if (!r600_bomgr_bo_destroy(radeon->bomgr, bo)) {
170 /* destroy is delayed by buffer manager */
171 return;
172 }
173 }
174 radeon_bo_reference(radeon, &bo->bo, NULL);
175 free(bo);
176 }
177
178 void r600_bo_reference(struct radeon *radeon, struct r600_bo **dst, struct r600_bo *src)
179 {
180 struct r600_bo *old = *dst;
181
182 if (pipe_reference(&(*dst)->reference, &src->reference)) {
183 r600_bo_destroy(radeon, old);
184 }
185 *dst = src;
186 }
187
188 boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *bo,
189 unsigned stride, struct winsys_handle *whandle)
190 {
191 whandle->stride = stride;
192 switch(whandle->type) {
193 case DRM_API_HANDLE_TYPE_KMS:
194 whandle->handle = r600_bo_get_handle(bo);
195 break;
196 case DRM_API_HANDLE_TYPE_SHARED:
197 if (radeon_bo_get_name(radeon, bo->bo, &whandle->handle))
198 return FALSE;
199 break;
200 default:
201 return FALSE;
202 }
203
204 return TRUE;
205 }