r300g: rework command submission and resource space checking
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_buffer.c
1 #include "radeon_drm_buffer.h"
2 #include "radeon_drm_cs.h"
3
4 #include "util/u_hash_table.h"
5 #include "util/u_memory.h"
6 #include "util/u_simple_list.h"
7 #include "pipebuffer/pb_bufmgr.h"
8 #include "os/os_thread.h"
9
10 #include "state_tracker/drm_driver.h"
11
12 #include <radeon_drm.h>
13 #include <radeon_bo_gem.h>
14 #include <sys/ioctl.h>
15
16 struct radeon_drm_bufmgr;
17
18 struct radeon_drm_buffer {
19 struct pb_buffer base;
20 struct radeon_drm_bufmgr *mgr;
21
22 struct radeon_bo *bo;
23
24 boolean flinked;
25 uint32_t flink;
26
27 struct radeon_drm_buffer *next, *prev;
28 };
29
30 extern const struct pb_vtbl radeon_drm_buffer_vtbl;
31
32
33 static INLINE struct radeon_drm_buffer *
34 radeon_drm_buffer(struct pb_buffer *buf)
35 {
36 assert(buf);
37 assert(buf->vtbl == &radeon_drm_buffer_vtbl);
38 return (struct radeon_drm_buffer *)buf;
39 }
40
41 struct radeon_drm_bufmgr {
42 /* Base class. */
43 struct pb_manager base;
44
45 /* Winsys. */
46 struct radeon_drm_winsys *rws;
47
48 /* List of mapped buffers and its mutex. */
49 struct radeon_drm_buffer buffer_map_list;
50 pipe_mutex buffer_map_list_mutex;
51
52 /* List of buffer handles and its mutex. */
53 struct util_hash_table *buffer_handles;
54 pipe_mutex buffer_handles_mutex;
55 };
56
57 static INLINE struct radeon_drm_bufmgr *
58 radeon_drm_bufmgr(struct pb_manager *mgr)
59 {
60 assert(mgr);
61 return (struct radeon_drm_bufmgr *)mgr;
62 }
63
64 static void
65 radeon_drm_buffer_destroy(struct pb_buffer *_buf)
66 {
67 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
68 int name;
69
70 if (buf->bo->ptr != NULL) {
71 pipe_mutex_lock(buf->mgr->buffer_map_list_mutex);
72 /* Now test it again inside the mutex. */
73 if (buf->bo->ptr != NULL) {
74 remove_from_list(buf);
75 radeon_bo_unmap(buf->bo);
76 buf->bo->ptr = NULL;
77 }
78 pipe_mutex_unlock(buf->mgr->buffer_map_list_mutex);
79 }
80 name = radeon_gem_name_bo(buf->bo);
81 if (name) {
82 pipe_mutex_lock(buf->mgr->buffer_handles_mutex);
83 util_hash_table_remove(buf->mgr->buffer_handles,
84 (void*)(uintptr_t)name);
85 pipe_mutex_unlock(buf->mgr->buffer_handles_mutex);
86 }
87 radeon_bo_unref(buf->bo);
88
89 FREE(buf);
90 }
91
92 static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage)
93 {
94 unsigned res = 0;
95
96 if (usage & PIPE_TRANSFER_READ)
97 res |= PB_USAGE_CPU_READ;
98
99 if (usage & PIPE_TRANSFER_WRITE)
100 res |= PB_USAGE_CPU_WRITE;
101
102 if (usage & PIPE_TRANSFER_DONTBLOCK)
103 res |= PB_USAGE_DONTBLOCK;
104
105 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
106 res |= PB_USAGE_UNSYNCHRONIZED;
107
108 return res;
109 }
110
111 static void *
112 radeon_drm_buffer_map_internal(struct pb_buffer *_buf,
113 unsigned flags, void *flush_ctx)
114 {
115 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
116 struct radeon_drm_cs *cs = flush_ctx;
117 int write = 0;
118
119 /* Note how we use radeon_bo_is_referenced_by_cs here. There are
120 * basically two places this map function can be called from:
121 * - pb_map
122 * - create_buffer (in the buffer reuse case)
123 *
124 * Since pb managers are per-winsys managers, not per-context managers,
125 * and we shouldn't reuse buffers if they are in-use in any context,
126 * we simply ask: is this buffer referenced by *any* CS?
127 *
128 * The problem with buffer_create is that it comes from pipe_screen,
129 * so we have no CS to look at, though luckily the following code
130 * is sufficient to tell whether the buffer is in use. */
131 if (flags & PB_USAGE_DONTBLOCK) {
132 if (_buf->base.usage & RADEON_PB_USAGE_VERTEX)
133 if (radeon_bo_is_referenced_by_cs(buf->bo, NULL))
134 return NULL;
135 }
136
137 if (buf->bo->ptr != NULL) {
138 pipe_mutex_lock(buf->mgr->buffer_map_list_mutex);
139 /* Now test ptr again inside the mutex. We might have gotten a race
140 * during the first test. */
141 if (buf->bo->ptr != NULL) {
142 remove_from_list(buf);
143 }
144 pipe_mutex_unlock(buf->mgr->buffer_map_list_mutex);
145 return buf->bo->ptr;
146 }
147
148 if (flags & PB_USAGE_DONTBLOCK) {
149 uint32_t domain;
150 if (radeon_bo_is_busy(buf->bo, &domain))
151 return NULL;
152 }
153
154 /* If we don't have any CS and the buffer is referenced,
155 * we cannot flush. */
156 assert(cs || !radeon_bo_is_referenced_by_cs(buf->bo, NULL));
157
158 if (cs && radeon_bo_is_referenced_by_cs(buf->bo, NULL)) {
159 cs->flush_cs(cs->flush_data);
160 }
161
162 if (flags & PB_USAGE_CPU_WRITE) {
163 write = 1;
164 }
165
166 if (radeon_bo_map(buf->bo, write)) {
167 return NULL;
168 }
169
170 pipe_mutex_lock(buf->mgr->buffer_map_list_mutex);
171 remove_from_list(buf);
172 pipe_mutex_unlock(buf->mgr->buffer_map_list_mutex);
173 return buf->bo->ptr;
174 }
175
176 static void
177 radeon_drm_buffer_unmap_internal(struct pb_buffer *_buf)
178 {
179 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
180 pipe_mutex_lock(buf->mgr->buffer_map_list_mutex);
181 if (is_empty_list(buf)) { /* = is not inserted... */
182 insert_at_tail(&buf->mgr->buffer_map_list, buf);
183 }
184 pipe_mutex_unlock(buf->mgr->buffer_map_list_mutex);
185 }
186
187 static void
188 radeon_drm_buffer_get_base_buffer(struct pb_buffer *buf,
189 struct pb_buffer **base_buf,
190 unsigned *offset)
191 {
192 *base_buf = buf;
193 *offset = 0;
194 }
195
196
197 static enum pipe_error
198 radeon_drm_buffer_validate(struct pb_buffer *_buf,
199 struct pb_validate *vl,
200 unsigned flags)
201 {
202 /* Always pinned */
203 return PIPE_OK;
204 }
205
206 static void
207 radeon_drm_buffer_fence(struct pb_buffer *buf,
208 struct pipe_fence_handle *fence)
209 {
210 }
211
212 const struct pb_vtbl radeon_drm_buffer_vtbl = {
213 radeon_drm_buffer_destroy,
214 radeon_drm_buffer_map_internal,
215 radeon_drm_buffer_unmap_internal,
216 radeon_drm_buffer_validate,
217 radeon_drm_buffer_fence,
218 radeon_drm_buffer_get_base_buffer,
219 };
220
221 static struct pb_buffer *
222 radeon_drm_bufmgr_create_buffer_from_handle_unsafe(struct pb_manager *_mgr,
223 uint32_t handle)
224 {
225 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
226 struct radeon_drm_winsys *rws = mgr->rws;
227 struct radeon_drm_buffer *buf;
228 struct radeon_bo *bo;
229
230 buf = util_hash_table_get(mgr->buffer_handles, (void*)(uintptr_t)handle);
231
232 if (buf) {
233 struct pb_buffer *b = NULL;
234 pb_reference(&b, &buf->base);
235 return b;
236 }
237
238 bo = radeon_bo_open(rws->bom, handle, 0,
239 0, 0, 0);
240 if (bo == NULL)
241 return NULL;
242
243 buf = CALLOC_STRUCT(radeon_drm_buffer);
244 if (!buf) {
245 radeon_bo_unref(bo);
246 return NULL;
247 }
248
249 make_empty_list(buf);
250
251 pipe_reference_init(&buf->base.base.reference, 1);
252 buf->base.base.alignment = 0;
253 buf->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
254 buf->base.base.size = bo->size;
255 buf->base.vtbl = &radeon_drm_buffer_vtbl;
256 buf->mgr = mgr;
257
258 buf->bo = bo;
259
260 util_hash_table_set(mgr->buffer_handles, (void*)(uintptr_t)handle, buf);
261
262 return &buf->base;
263 }
264
265 struct pb_buffer *
266 radeon_drm_bufmgr_create_buffer_from_handle(struct pb_manager *_mgr,
267 uint32_t handle)
268 {
269 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
270 struct pb_buffer *pb;
271
272 pipe_mutex_lock(mgr->buffer_handles_mutex);
273 pb = radeon_drm_bufmgr_create_buffer_from_handle_unsafe(_mgr, handle);
274 pipe_mutex_unlock(mgr->buffer_handles_mutex);
275
276 return pb;
277 }
278
279 static struct pb_buffer *
280 radeon_drm_bufmgr_create_buffer(struct pb_manager *_mgr,
281 pb_size size,
282 const struct pb_desc *desc)
283 {
284 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
285 struct radeon_drm_winsys *rws = mgr->rws;
286 struct radeon_drm_buffer *buf;
287 uint32_t domain;
288
289 buf = CALLOC_STRUCT(radeon_drm_buffer);
290 if (!buf)
291 goto error1;
292
293 pipe_reference_init(&buf->base.base.reference, 1);
294 buf->base.base.alignment = desc->alignment;
295 buf->base.base.usage = desc->usage;
296 buf->base.base.size = size;
297 buf->base.vtbl = &radeon_drm_buffer_vtbl;
298 buf->mgr = mgr;
299
300 make_empty_list(buf);
301
302 domain =
303 (desc->usage & RADEON_PB_USAGE_DOMAIN_GTT ? RADEON_GEM_DOMAIN_GTT : 0) |
304 (desc->usage & RADEON_PB_USAGE_DOMAIN_VRAM ? RADEON_GEM_DOMAIN_VRAM : 0);
305
306 buf->bo = radeon_bo_open(rws->bom, 0, size,
307 desc->alignment, domain, 0);
308 if (buf->bo == NULL)
309 goto error2;
310
311 return &buf->base;
312
313 error2:
314 FREE(buf);
315 error1:
316 return NULL;
317 }
318
319 static void
320 radeon_drm_bufmgr_flush(struct pb_manager *mgr)
321 {
322 /* NOP */
323 }
324
325 static void
326 radeon_drm_bufmgr_destroy(struct pb_manager *_mgr)
327 {
328 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
329 util_hash_table_destroy(mgr->buffer_handles);
330 pipe_mutex_destroy(mgr->buffer_map_list_mutex);
331 pipe_mutex_destroy(mgr->buffer_handles_mutex);
332 FREE(mgr);
333 }
334
335 static unsigned handle_hash(void *key)
336 {
337 return (unsigned)key;
338 }
339
340 static int handle_compare(void *key1, void *key2)
341 {
342 return !((int)key1 == (int)key2);
343 }
344
345 struct pb_manager *
346 radeon_drm_bufmgr_create(struct radeon_drm_winsys *rws)
347 {
348 struct radeon_drm_bufmgr *mgr;
349
350 mgr = CALLOC_STRUCT(radeon_drm_bufmgr);
351 if (!mgr)
352 return NULL;
353
354 mgr->base.destroy = radeon_drm_bufmgr_destroy;
355 mgr->base.create_buffer = radeon_drm_bufmgr_create_buffer;
356 mgr->base.flush = radeon_drm_bufmgr_flush;
357
358 mgr->rws = rws;
359 make_empty_list(&mgr->buffer_map_list);
360 mgr->buffer_handles = util_hash_table_create(handle_hash, handle_compare);
361 pipe_mutex_init(mgr->buffer_map_list_mutex);
362 pipe_mutex_init(mgr->buffer_handles_mutex);
363 return &mgr->base;
364 }
365
366 static struct radeon_drm_buffer *get_drm_buffer(struct pb_buffer *_buf)
367 {
368 struct radeon_drm_buffer *buf = NULL;
369
370 if (_buf->vtbl == &radeon_drm_buffer_vtbl) {
371 buf = radeon_drm_buffer(_buf);
372 } else {
373 struct pb_buffer *base_buf;
374 pb_size offset;
375 pb_get_base_buffer(_buf, &base_buf, &offset);
376
377 if (base_buf->vtbl == &radeon_drm_buffer_vtbl)
378 buf = radeon_drm_buffer(base_buf);
379 }
380
381 return buf;
382 }
383
384 static void *radeon_drm_buffer_map(struct r300_winsys_screen *ws,
385 struct r300_winsys_buffer *buf,
386 struct r300_winsys_cs *cs,
387 enum pipe_transfer_usage usage)
388 {
389 struct pb_buffer *_buf = radeon_pb_buffer(buf);
390
391 return pb_map(_buf, get_pb_usage_from_transfer_flags(usage), radeon_drm_cs(cs));
392 }
393
394 static void radeon_drm_buffer_unmap(struct r300_winsys_screen *ws,
395 struct r300_winsys_buffer *buf)
396 {
397 struct pb_buffer *_buf = radeon_pb_buffer(buf);
398
399 pb_unmap(_buf);
400 }
401
402 boolean radeon_drm_bufmgr_get_handle(struct pb_buffer *_buf,
403 struct winsys_handle *whandle)
404 {
405 struct drm_gem_flink flink;
406 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
407
408 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
409 if (!buf->flinked) {
410 flink.handle = buf->bo->handle;
411
412 if (ioctl(buf->mgr->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
413 return FALSE;
414 }
415
416 buf->flinked = TRUE;
417 buf->flink = flink.name;
418 }
419 whandle->handle = buf->flink;
420 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
421 whandle->handle = buf->bo->handle;
422 }
423 return TRUE;
424 }
425
426 static void radeon_drm_buffer_get_tiling(struct r300_winsys_screen *ws,
427 struct r300_winsys_buffer *_buf,
428 enum r300_buffer_tiling *microtiled,
429 enum r300_buffer_tiling *macrotiled)
430 {
431 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
432 uint32_t flags = 0, pitch;
433
434 radeon_bo_get_tiling(buf->bo, &flags, &pitch);
435
436 *microtiled = R300_BUFFER_LINEAR;
437 *macrotiled = R300_BUFFER_LINEAR;
438 if (flags & RADEON_BO_FLAGS_MICRO_TILE)
439 *microtiled = R300_BUFFER_TILED;
440
441 if (flags & RADEON_BO_FLAGS_MACRO_TILE)
442 *macrotiled = R300_BUFFER_TILED;
443 }
444
445 static void radeon_drm_buffer_set_tiling(struct r300_winsys_screen *ws,
446 struct r300_winsys_buffer *_buf,
447 enum r300_buffer_tiling microtiled,
448 enum r300_buffer_tiling macrotiled,
449 uint32_t pitch)
450 {
451 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
452 uint32_t flags = 0;
453 if (microtiled == R300_BUFFER_TILED)
454 flags |= RADEON_BO_FLAGS_MICRO_TILE;
455 /* XXX Remove this ifdef when libdrm version 2.4.19 becomes mandatory. */
456 #ifdef RADEON_BO_FLAGS_MICRO_TILE_SQUARE
457 else if (microtiled == R300_BUFFER_SQUARETILED)
458 flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
459 #endif
460 if (macrotiled == R300_BUFFER_TILED)
461 flags |= RADEON_BO_FLAGS_MACRO_TILE;
462
463 radeon_bo_set_tiling(buf->bo, flags, pitch);
464 }
465
466 static struct r300_winsys_cs_buffer *radeon_drm_get_cs_handle(
467 struct r300_winsys_screen *rws,
468 struct r300_winsys_buffer *_buf)
469 {
470 /* return pure radeon_bo. */
471 return (struct r300_winsys_cs_buffer*)
472 get_drm_buffer(radeon_pb_buffer(_buf))->bo;
473 }
474
475 static boolean radeon_drm_is_buffer_referenced(struct r300_winsys_cs *rcs,
476 struct r300_winsys_cs_buffer *_buf,
477 enum r300_reference_domain domain)
478 {
479 struct radeon_bo *bo = (struct radeon_bo*)_buf;
480 uint32_t tmp;
481
482 if (domain & R300_REF_CS) {
483 if (radeon_bo_is_referenced_by_cs(bo, NULL)) {
484 return TRUE;
485 }
486 }
487
488 if (domain & R300_REF_HW) {
489 if (radeon_bo_is_busy(bo, &tmp)) {
490 return TRUE;
491 }
492 }
493
494 return FALSE;
495 }
496
497 void radeon_drm_bufmgr_flush_maps(struct pb_manager *_mgr)
498 {
499 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
500 struct radeon_drm_buffer *rpb, *t_rpb;
501
502 pipe_mutex_lock(mgr->buffer_map_list_mutex);
503
504 foreach_s(rpb, t_rpb, &mgr->buffer_map_list) {
505 radeon_bo_unmap(rpb->bo);
506 rpb->bo->ptr = NULL;
507 remove_from_list(rpb);
508 }
509
510 make_empty_list(&mgr->buffer_map_list);
511
512 pipe_mutex_unlock(mgr->buffer_map_list_mutex);
513 }
514
515 static void radeon_drm_buffer_wait(struct r300_winsys_screen *ws,
516 struct r300_winsys_buffer *_buf)
517 {
518 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
519
520 radeon_bo_wait(buf->bo);
521 }
522
523 void radeon_drm_bufmgr_init_functions(struct radeon_drm_winsys *ws)
524 {
525 ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
526 ws->base.buffer_set_tiling = radeon_drm_buffer_set_tiling;
527 ws->base.buffer_get_tiling = radeon_drm_buffer_get_tiling;
528 ws->base.buffer_map = radeon_drm_buffer_map;
529 ws->base.buffer_unmap = radeon_drm_buffer_unmap;
530 ws->base.buffer_wait = radeon_drm_buffer_wait;
531 ws->base.cs_is_buffer_referenced = radeon_drm_is_buffer_referenced;
532 }