r300g: expose radeon_bo_wait to the driver
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_buffer.c
1
2 #include <sys/ioctl.h>
3 #include "radeon_drm.h"
4 #include "radeon_bo_gem.h"
5 #include "radeon_cs_gem.h"
6 #include "radeon_buffer.h"
7
8 #include "util/u_inlines.h"
9 #include "util/u_memory.h"
10 #include "util/u_simple_list.h"
11 #include "pipebuffer/pb_buffer.h"
12 #include "pipebuffer/pb_bufmgr.h"
13
14 #include "radeon_winsys.h"
15 struct radeon_drm_bufmgr;
16
17 struct radeon_drm_buffer {
18 struct pb_buffer base;
19 struct radeon_drm_bufmgr *mgr;
20
21 struct radeon_bo *bo;
22
23 boolean flinked;
24 uint32_t flink;
25 uint32_t tileflags;
26 uint32_t pitch;
27
28 struct radeon_drm_buffer *next, *prev;
29 };
30
31 extern const struct pb_vtbl radeon_drm_buffer_vtbl;
32
33
34 static INLINE struct radeon_drm_buffer *
35 radeon_drm_buffer(struct pb_buffer *buf)
36 {
37 assert(buf);
38 assert(buf->vtbl == &radeon_drm_buffer_vtbl);
39 return (struct radeon_drm_buffer *)buf;
40 }
41
42 struct radeon_drm_bufmgr {
43 struct pb_manager base;
44 struct radeon_libdrm_winsys *rws;
45 struct radeon_drm_buffer buffer_map_list;
46 };
47
48 static INLINE struct radeon_drm_bufmgr *
49 radeon_drm_bufmgr(struct pb_manager *mgr)
50 {
51 assert(mgr);
52 return (struct radeon_drm_bufmgr *)mgr;
53 }
54
55 static void
56 radeon_drm_buffer_destroy(struct pb_buffer *_buf)
57 {
58 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
59
60 if (buf->bo->ptr != NULL) {
61 remove_from_list(buf);
62 radeon_bo_unmap(buf->bo);
63 buf->bo->ptr = NULL;
64 }
65 radeon_bo_unref(buf->bo);
66
67 FREE(buf);
68 }
69
70 static void *
71 radeon_drm_buffer_map(struct pb_buffer *_buf,
72 unsigned flags)
73 {
74 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
75 int write = 0;
76
77 if (flags & PIPE_TRANSFER_DONTBLOCK) {
78 if ((_buf->base.usage & PIPE_BIND_VERTEX_BUFFER) ||
79 (_buf->base.usage & PIPE_BIND_INDEX_BUFFER))
80 if (radeon_bo_is_referenced_by_cs(buf->bo, buf->mgr->rws->cs))
81 return NULL;
82 }
83
84 if (buf->bo->ptr != NULL)
85 return buf->bo->ptr;
86
87 if (flags & PIPE_TRANSFER_DONTBLOCK) {
88 uint32_t domain;
89 if (radeon_bo_is_busy(buf->bo, &domain))
90 return NULL;
91 }
92
93 if (radeon_bo_is_referenced_by_cs(buf->bo, buf->mgr->rws->cs)) {
94 buf->mgr->rws->flush_cb(buf->mgr->rws->flush_data);
95 }
96
97 if (flags & PIPE_TRANSFER_WRITE) {
98 write = 1;
99 }
100
101 if (radeon_bo_map(buf->bo, write)) {
102 return NULL;
103 }
104 insert_at_tail(&buf->mgr->buffer_map_list, buf);
105 return buf->bo->ptr;
106 }
107
108 static void
109 radeon_drm_buffer_unmap(struct pb_buffer *_buf)
110 {
111 (void)_buf;
112 }
113
114 static void
115 radeon_drm_buffer_get_base_buffer(struct pb_buffer *buf,
116 struct pb_buffer **base_buf,
117 unsigned *offset)
118 {
119 *base_buf = buf;
120 *offset = 0;
121 }
122
123
124 static enum pipe_error
125 radeon_drm_buffer_validate(struct pb_buffer *_buf,
126 struct pb_validate *vl,
127 unsigned flags)
128 {
129 /* Always pinned */
130 return PIPE_OK;
131 }
132
133 static void
134 radeon_drm_buffer_fence(struct pb_buffer *buf,
135 struct pipe_fence_handle *fence)
136 {
137 }
138
139 const struct pb_vtbl radeon_drm_buffer_vtbl = {
140 radeon_drm_buffer_destroy,
141 radeon_drm_buffer_map,
142 radeon_drm_buffer_unmap,
143 radeon_drm_buffer_validate,
144 radeon_drm_buffer_fence,
145 radeon_drm_buffer_get_base_buffer,
146 };
147
148
149 static uint32_t radeon_domain_from_usage(unsigned usage)
150 {
151 uint32_t domain = 0;
152
153 if (usage & PIPE_BIND_RENDER_TARGET) {
154 domain |= RADEON_GEM_DOMAIN_VRAM;
155 }
156 if (usage & PIPE_BIND_DEPTH_STENCIL) {
157 domain |= RADEON_GEM_DOMAIN_VRAM;
158 }
159 if (usage & PIPE_BIND_SAMPLER_VIEW) {
160 domain |= RADEON_GEM_DOMAIN_VRAM;
161 }
162 /* also need BIND_BLIT_SOURCE/DESTINATION ? */
163 if (usage & PIPE_BIND_VERTEX_BUFFER) {
164 domain |= RADEON_GEM_DOMAIN_GTT;
165 }
166 if (usage & PIPE_BIND_INDEX_BUFFER) {
167 domain |= RADEON_GEM_DOMAIN_GTT;
168 }
169
170 return domain;
171 }
172
173 struct pb_buffer *radeon_drm_bufmgr_create_buffer_from_handle(struct pb_manager *_mgr,
174 uint32_t handle)
175 {
176 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
177 struct radeon_libdrm_winsys *rws = mgr->rws;
178 struct radeon_drm_buffer *buf;
179 struct radeon_bo *bo;
180
181 bo = radeon_bo_open(rws->bom, handle, 0,
182 0, 0, 0);
183 if (bo == NULL)
184 return NULL;
185
186 buf = CALLOC_STRUCT(radeon_drm_buffer);
187 if (!buf) {
188 radeon_bo_unref(bo);
189 return NULL;
190 }
191
192 make_empty_list(buf);
193
194 pipe_reference_init(&buf->base.base.reference, 1);
195 buf->base.base.alignment = 0;
196 buf->base.base.usage = PIPE_BIND_SAMPLER_VIEW;
197 buf->base.base.size = 0;
198 buf->base.vtbl = &radeon_drm_buffer_vtbl;
199 buf->mgr = mgr;
200
201 buf->bo = bo;
202
203 return &buf->base;
204 }
205
206 static struct pb_buffer *
207 radeon_drm_bufmgr_create_buffer(struct pb_manager *_mgr,
208 pb_size size,
209 const struct pb_desc *desc)
210 {
211 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
212 struct radeon_libdrm_winsys *rws = mgr->rws;
213 struct radeon_drm_buffer *buf;
214 uint32_t domain;
215
216 buf = CALLOC_STRUCT(radeon_drm_buffer);
217 if (!buf)
218 goto error1;
219
220 pipe_reference_init(&buf->base.base.reference, 1);
221 buf->base.base.alignment = desc->alignment;
222 buf->base.base.usage = desc->usage;
223 buf->base.base.size = size;
224 buf->base.vtbl = &radeon_drm_buffer_vtbl;
225 buf->mgr = mgr;
226
227 make_empty_list(buf);
228 domain = radeon_domain_from_usage(desc->usage);
229 buf->bo = radeon_bo_open(rws->bom, 0, size,
230 desc->alignment, domain, 0);
231 if (buf->bo == NULL)
232 goto error2;
233
234 return &buf->base;
235
236 error2:
237 FREE(buf);
238 error1:
239 return NULL;
240 }
241
242 static void
243 radeon_drm_bufmgr_flush(struct pb_manager *mgr)
244 {
245 /* NOP */
246 }
247
248 static void
249 radeon_drm_bufmgr_destroy(struct pb_manager *_mgr)
250 {
251 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
252 FREE(mgr);
253 }
254
255 struct pb_manager *
256 radeon_drm_bufmgr_create(struct radeon_libdrm_winsys *rws)
257 {
258 struct radeon_drm_bufmgr *mgr;
259
260 mgr = CALLOC_STRUCT(radeon_drm_bufmgr);
261 if (!mgr)
262 return NULL;
263
264 mgr->base.destroy = radeon_drm_bufmgr_destroy;
265 mgr->base.create_buffer = radeon_drm_bufmgr_create_buffer;
266 mgr->base.flush = radeon_drm_bufmgr_flush;
267
268 mgr->rws = rws;
269 make_empty_list(&mgr->buffer_map_list);
270 return &mgr->base;
271 }
272
273 static struct radeon_drm_buffer *get_drm_buffer(struct pb_buffer *_buf)
274 {
275 struct radeon_drm_buffer *buf;
276 if (_buf->vtbl == &radeon_drm_buffer_vtbl) {
277 buf = radeon_drm_buffer(_buf);
278 } else {
279 struct pb_buffer *base_buf;
280 pb_size offset;
281 pb_get_base_buffer(_buf, &base_buf, &offset);
282
283 buf = radeon_drm_buffer(base_buf);
284 }
285 return buf;
286 }
287
288 boolean radeon_drm_bufmgr_get_handle(struct pb_buffer *_buf,
289 struct winsys_handle *whandle)
290 {
291 int retval, fd;
292 struct drm_gem_flink flink;
293 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
294 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
295 if (!buf->flinked) {
296 fd = buf->mgr->rws->fd;
297 flink.handle = buf->bo->handle;
298
299 retval = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
300 if (retval) {
301 return FALSE;
302 }
303
304 buf->flinked = TRUE;
305 buf->flink = flink.name;
306 }
307 whandle->handle = buf->flink;
308 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
309 whandle->handle = buf->bo->handle;
310 }
311 return TRUE;
312 }
313
314 void radeon_drm_bufmgr_get_tiling(struct pb_buffer *_buf,
315 enum r300_buffer_tiling *microtiled,
316 enum r300_buffer_tiling *macrotiled)
317 {
318 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
319 uint32_t flags = 0, pitch;
320
321 radeon_bo_get_tiling(buf->bo, &flags, &pitch);
322
323 buf->tileflags = flags;
324 buf->pitch = pitch;
325
326 *microtiled = R300_BUFFER_LINEAR;
327 *macrotiled = R300_BUFFER_LINEAR;
328 if (flags & RADEON_BO_FLAGS_MICRO_TILE)
329 *microtiled = R300_BUFFER_TILED;
330
331 if (flags & RADEON_BO_FLAGS_MACRO_TILE)
332 *macrotiled = R300_BUFFER_TILED;
333 }
334
335 void radeon_drm_bufmgr_set_tiling(struct pb_buffer *_buf,
336 enum r300_buffer_tiling microtiled,
337 enum r300_buffer_tiling macrotiled,
338 uint32_t pitch)
339 {
340 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
341 uint32_t flags = 0;
342 if (microtiled == R300_BUFFER_TILED)
343 flags |= RADEON_BO_FLAGS_MICRO_TILE;
344 /* XXX Remove this ifdef when libdrm version 2.4.19 becomes mandatory. */
345 #ifdef RADEON_BO_FLAGS_MICRO_TILE_SQUARE
346 else if (microtiled == R300_BUFFER_SQUARETILED)
347 flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
348 #endif
349 if (macrotiled == R300_BUFFER_TILED)
350 flags |= RADEON_BO_FLAGS_MACRO_TILE;
351
352 if (flags != buf->tileflags || pitch != buf->pitch) {
353 /* Tiling determines how DRM treats the buffer data.
354 * We must flush CS when changing it if the buffer is referenced. */
355 if (radeon_bo_is_referenced_by_cs(buf->bo, buf->mgr->rws->cs)) {
356 buf->mgr->rws->flush_cb(buf->mgr->rws->flush_data);
357 }
358
359 radeon_bo_set_tiling(buf->bo, flags, pitch);
360 }
361 }
362
363 boolean radeon_drm_bufmgr_add_buffer(struct pb_buffer *_buf,
364 uint32_t rd, uint32_t wd)
365 {
366 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
367 radeon_cs_space_add_persistent_bo(buf->mgr->rws->cs, buf->bo,
368 rd, wd);
369 return TRUE;
370 }
371
372 void radeon_drm_bufmgr_write_reloc(struct pb_buffer *_buf,
373 uint32_t rd, uint32_t wd,
374 uint32_t flags)
375 {
376 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
377 int retval;
378
379 retval = radeon_cs_write_reloc(buf->mgr->rws->cs,
380 buf->bo, rd, wd, flags);
381 if (retval) {
382 debug_printf("radeon: Relocation of %p (%d, %d, %d) failed!\n",
383 buf, rd, wd, flags);
384 }
385 }
386
387 boolean radeon_drm_bufmgr_is_buffer_referenced(struct pb_buffer *_buf,
388 enum r300_reference_domain domain)
389 {
390 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
391 uint32_t tmp;
392
393 if (domain & R300_REF_CS) {
394 if (radeon_bo_is_referenced_by_cs(buf->bo, buf->mgr->rws->cs)) {
395 return TRUE;
396 }
397 }
398
399 if (domain & R300_REF_HW) {
400 if (radeon_bo_is_busy(buf->bo, &tmp)) {
401 return TRUE;
402 }
403 }
404
405 return FALSE;
406 }
407
408
409 void radeon_drm_bufmgr_flush_maps(struct pb_manager *_mgr)
410 {
411 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
412 struct radeon_drm_buffer *rpb, *t_rpb;
413
414 foreach_s(rpb, t_rpb, &mgr->buffer_map_list) {
415 radeon_bo_unmap(rpb->bo);
416 rpb->bo->ptr = NULL;
417 remove_from_list(rpb);
418 }
419
420 make_empty_list(&mgr->buffer_map_list);
421 }
422
423 void radeon_drm_bufmgr_wait(struct pb_buffer *_buf)
424 {
425 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
426
427 radeon_bo_wait(buf->bo);
428 }