cf665241c4892bced114f32da054261b1f6721b9
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_buffer.c
1
2 #include <sys/ioctl.h>
3 #include "radeon_drm.h"
4 #include "radeon_bo_gem.h"
5 #include "radeon_cs_gem.h"
6 #include "radeon_buffer.h"
7
8 #include "util/u_hash_table.h"
9 #include "util/u_inlines.h"
10 #include "util/u_memory.h"
11 #include "util/u_simple_list.h"
12 #include "pipebuffer/pb_buffer.h"
13 #include "pipebuffer/pb_bufmgr.h"
14
15 #include "radeon_winsys.h"
16 struct radeon_drm_bufmgr;
17
18 struct radeon_drm_buffer {
19 struct pb_buffer base;
20 struct radeon_drm_bufmgr *mgr;
21
22 struct radeon_bo *bo;
23
24 boolean flinked;
25 uint32_t flink;
26
27 struct radeon_drm_buffer *next, *prev;
28 };
29
30 extern const struct pb_vtbl radeon_drm_buffer_vtbl;
31
32
33 static INLINE struct radeon_drm_buffer *
34 radeon_drm_buffer(struct pb_buffer *buf)
35 {
36 assert(buf);
37 assert(buf->vtbl == &radeon_drm_buffer_vtbl);
38 return (struct radeon_drm_buffer *)buf;
39 }
40
41 struct radeon_drm_bufmgr {
42 struct pb_manager base;
43 struct radeon_libdrm_winsys *rws;
44 struct radeon_drm_buffer buffer_map_list;
45 struct util_hash_table *buffer_handles;
46 };
47
48 static INLINE struct radeon_drm_bufmgr *
49 radeon_drm_bufmgr(struct pb_manager *mgr)
50 {
51 assert(mgr);
52 return (struct radeon_drm_bufmgr *)mgr;
53 }
54
55 static void
56 radeon_drm_buffer_destroy(struct pb_buffer *_buf)
57 {
58 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
59 int name;
60
61 if (buf->bo->ptr != NULL) {
62 remove_from_list(buf);
63 radeon_bo_unmap(buf->bo);
64 buf->bo->ptr = NULL;
65 }
66 name = radeon_gem_name_bo(buf->bo);
67 if (name) {
68 util_hash_table_remove(buf->mgr->buffer_handles,
69 (void*)(uintptr_t)name);
70 }
71 radeon_bo_unref(buf->bo);
72
73 FREE(buf);
74 }
75
76 static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage)
77 {
78 unsigned res = 0;
79
80 if (usage & PIPE_TRANSFER_READ)
81 res |= PB_USAGE_CPU_READ;
82
83 if (usage & PIPE_TRANSFER_WRITE)
84 res |= PB_USAGE_CPU_WRITE;
85
86 if (usage & PIPE_TRANSFER_DONTBLOCK)
87 res |= PB_USAGE_DONTBLOCK;
88
89 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
90 res |= PB_USAGE_UNSYNCHRONIZED;
91
92 return res;
93 }
94
95 static void *
96 radeon_drm_buffer_map_internal(struct pb_buffer *_buf,
97 unsigned flags, void *flush_ctx)
98 {
99 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
100 struct radeon_libdrm_cs *cs = flush_ctx;
101 int write = 0;
102
103 /* Note how we use radeon_bo_is_referenced_by_cs here. There are
104 * basically two places this map function can be called from:
105 * - pb_map
106 * - create_buffer (in the buffer reuse case)
107 *
108 * Since pb managers are per-winsys managers, not per-context managers,
109 * and we shouldn't reuse buffers if they are in-use in any context,
110 * we simply ask: is this buffer referenced by *any* CS?
111 *
112 * The problem with buffer_create is that it comes from pipe_screen,
113 * so we have no CS to look at, though luckily the following code
114 * is sufficient to tell whether the buffer is in use. */
115 if (flags & PB_USAGE_DONTBLOCK) {
116 if (_buf->base.usage & RADEON_PB_USAGE_VERTEX)
117 if (radeon_bo_is_referenced_by_cs(buf->bo, NULL))
118 return NULL;
119 }
120
121 if (buf->bo->ptr != NULL)
122 return buf->bo->ptr;
123
124 if (flags & PB_USAGE_DONTBLOCK) {
125 uint32_t domain;
126 if (radeon_bo_is_busy(buf->bo, &domain))
127 return NULL;
128 }
129
130 /* If we don't have any CS and the buffer is referenced,
131 * we cannot flush. */
132 assert(cs || !radeon_bo_is_referenced_by_cs(buf->bo, NULL));
133
134 if (cs && radeon_bo_is_referenced_by_cs(buf->bo, cs->cs)) {
135 cs->flush_cs(cs->flush_data);
136 }
137
138 if (flags & PB_USAGE_CPU_WRITE) {
139 write = 1;
140 }
141
142 if (radeon_bo_map(buf->bo, write)) {
143 return NULL;
144 }
145 insert_at_tail(&buf->mgr->buffer_map_list, buf);
146 return buf->bo->ptr;
147 }
148
149 static void
150 radeon_drm_buffer_unmap_internal(struct pb_buffer *_buf)
151 {
152 (void)_buf;
153 }
154
155 static void
156 radeon_drm_buffer_get_base_buffer(struct pb_buffer *buf,
157 struct pb_buffer **base_buf,
158 unsigned *offset)
159 {
160 *base_buf = buf;
161 *offset = 0;
162 }
163
164
165 static enum pipe_error
166 radeon_drm_buffer_validate(struct pb_buffer *_buf,
167 struct pb_validate *vl,
168 unsigned flags)
169 {
170 /* Always pinned */
171 return PIPE_OK;
172 }
173
174 static void
175 radeon_drm_buffer_fence(struct pb_buffer *buf,
176 struct pipe_fence_handle *fence)
177 {
178 }
179
180 const struct pb_vtbl radeon_drm_buffer_vtbl = {
181 radeon_drm_buffer_destroy,
182 radeon_drm_buffer_map_internal,
183 radeon_drm_buffer_unmap_internal,
184 radeon_drm_buffer_validate,
185 radeon_drm_buffer_fence,
186 radeon_drm_buffer_get_base_buffer,
187 };
188
189 struct pb_buffer *radeon_drm_bufmgr_create_buffer_from_handle(struct pb_manager *_mgr,
190 uint32_t handle)
191 {
192 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
193 struct radeon_libdrm_winsys *rws = mgr->rws;
194 struct radeon_drm_buffer *buf;
195 struct radeon_bo *bo;
196
197 buf = util_hash_table_get(mgr->buffer_handles, (void*)(uintptr_t)handle);
198 if (buf) {
199 struct pb_buffer *b = NULL;
200 pb_reference(&b, &buf->base);
201 return b;
202 }
203
204 bo = radeon_bo_open(rws->bom, handle, 0,
205 0, 0, 0);
206 if (bo == NULL)
207 return NULL;
208
209 buf = CALLOC_STRUCT(radeon_drm_buffer);
210 if (!buf) {
211 radeon_bo_unref(bo);
212 return NULL;
213 }
214
215 make_empty_list(buf);
216
217 pipe_reference_init(&buf->base.base.reference, 1);
218 buf->base.base.alignment = 0;
219 buf->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
220 buf->base.base.size = bo->size;
221 buf->base.vtbl = &radeon_drm_buffer_vtbl;
222 buf->mgr = mgr;
223
224 buf->bo = bo;
225
226 util_hash_table_set(mgr->buffer_handles, (void*)(uintptr_t)handle, buf);
227
228 return &buf->base;
229 }
230
231 static struct pb_buffer *
232 radeon_drm_bufmgr_create_buffer(struct pb_manager *_mgr,
233 pb_size size,
234 const struct pb_desc *desc)
235 {
236 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
237 struct radeon_libdrm_winsys *rws = mgr->rws;
238 struct radeon_drm_buffer *buf;
239 uint32_t domain;
240
241 buf = CALLOC_STRUCT(radeon_drm_buffer);
242 if (!buf)
243 goto error1;
244
245 pipe_reference_init(&buf->base.base.reference, 1);
246 buf->base.base.alignment = desc->alignment;
247 buf->base.base.usage = desc->usage;
248 buf->base.base.size = size;
249 buf->base.vtbl = &radeon_drm_buffer_vtbl;
250 buf->mgr = mgr;
251
252 make_empty_list(buf);
253
254 domain =
255 (desc->usage & RADEON_PB_USAGE_DOMAIN_GTT ? RADEON_GEM_DOMAIN_GTT : 0) |
256 (desc->usage & RADEON_PB_USAGE_DOMAIN_VRAM ? RADEON_GEM_DOMAIN_VRAM : 0);
257
258 buf->bo = radeon_bo_open(rws->bom, 0, size,
259 desc->alignment, domain, 0);
260 if (buf->bo == NULL)
261 goto error2;
262
263 return &buf->base;
264
265 error2:
266 FREE(buf);
267 error1:
268 return NULL;
269 }
270
271 static void
272 radeon_drm_bufmgr_flush(struct pb_manager *mgr)
273 {
274 /* NOP */
275 }
276
277 static void
278 radeon_drm_bufmgr_destroy(struct pb_manager *_mgr)
279 {
280 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
281 util_hash_table_destroy(mgr->buffer_handles);
282 FREE(mgr);
283 }
284
285 static unsigned handle_hash(void *key)
286 {
287 return (unsigned)key;
288 }
289
290 static int handle_compare(void *key1, void *key2)
291 {
292 return !((int)key1 == (int)key2);
293 }
294
295 struct pb_manager *
296 radeon_drm_bufmgr_create(struct radeon_libdrm_winsys *rws)
297 {
298 struct radeon_drm_bufmgr *mgr;
299
300 mgr = CALLOC_STRUCT(radeon_drm_bufmgr);
301 if (!mgr)
302 return NULL;
303
304 mgr->base.destroy = radeon_drm_bufmgr_destroy;
305 mgr->base.create_buffer = radeon_drm_bufmgr_create_buffer;
306 mgr->base.flush = radeon_drm_bufmgr_flush;
307
308 mgr->rws = rws;
309 make_empty_list(&mgr->buffer_map_list);
310 mgr->buffer_handles = util_hash_table_create(handle_hash, handle_compare);
311 return &mgr->base;
312 }
313
314 static struct radeon_drm_buffer *get_drm_buffer(struct pb_buffer *_buf)
315 {
316 struct radeon_drm_buffer *buf = NULL;
317
318 if (_buf->vtbl == &radeon_drm_buffer_vtbl) {
319 buf = radeon_drm_buffer(_buf);
320 } else {
321 struct pb_buffer *base_buf;
322 pb_size offset;
323 pb_get_base_buffer(_buf, &base_buf, &offset);
324
325 if (base_buf->vtbl == &radeon_drm_buffer_vtbl)
326 buf = radeon_drm_buffer(base_buf);
327 }
328
329 return buf;
330 }
331
332 void *radeon_drm_buffer_map(struct r300_winsys_screen *ws,
333 struct r300_winsys_buffer *buf,
334 struct r300_winsys_cs *cs,
335 enum pipe_transfer_usage usage)
336 {
337 struct pb_buffer *_buf = radeon_pb_buffer(buf);
338
339 return pb_map(_buf, get_pb_usage_from_transfer_flags(usage), radeon_libdrm_cs(cs));
340 }
341
342 void radeon_drm_buffer_unmap(struct r300_winsys_screen *ws,
343 struct r300_winsys_buffer *buf)
344 {
345 struct pb_buffer *_buf = radeon_pb_buffer(buf);
346
347 pb_unmap(_buf);
348 }
349
350 boolean radeon_drm_bufmgr_get_handle(struct pb_buffer *_buf,
351 struct winsys_handle *whandle)
352 {
353 struct drm_gem_flink flink;
354 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
355
356 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
357 if (!buf->flinked) {
358 flink.handle = buf->bo->handle;
359
360 if (ioctl(buf->mgr->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
361 return FALSE;
362 }
363
364 buf->flinked = TRUE;
365 buf->flink = flink.name;
366 }
367 whandle->handle = buf->flink;
368 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
369 whandle->handle = buf->bo->handle;
370 }
371 return TRUE;
372 }
373
374 void radeon_drm_bufmgr_get_tiling(struct r300_winsys_screen *ws,
375 struct r300_winsys_buffer *_buf,
376 enum r300_buffer_tiling *microtiled,
377 enum r300_buffer_tiling *macrotiled)
378 {
379 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
380 uint32_t flags = 0, pitch;
381
382 radeon_bo_get_tiling(buf->bo, &flags, &pitch);
383
384 *microtiled = R300_BUFFER_LINEAR;
385 *macrotiled = R300_BUFFER_LINEAR;
386 if (flags & RADEON_BO_FLAGS_MICRO_TILE)
387 *microtiled = R300_BUFFER_TILED;
388
389 if (flags & RADEON_BO_FLAGS_MACRO_TILE)
390 *macrotiled = R300_BUFFER_TILED;
391 }
392
393 void radeon_drm_bufmgr_set_tiling(struct r300_winsys_screen *ws,
394 struct r300_winsys_buffer *_buf,
395 enum r300_buffer_tiling microtiled,
396 enum r300_buffer_tiling macrotiled,
397 uint32_t pitch)
398 {
399 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
400 uint32_t flags = 0;
401 if (microtiled == R300_BUFFER_TILED)
402 flags |= RADEON_BO_FLAGS_MICRO_TILE;
403 /* XXX Remove this ifdef when libdrm version 2.4.19 becomes mandatory. */
404 #ifdef RADEON_BO_FLAGS_MICRO_TILE_SQUARE
405 else if (microtiled == R300_BUFFER_SQUARETILED)
406 flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
407 #endif
408 if (macrotiled == R300_BUFFER_TILED)
409 flags |= RADEON_BO_FLAGS_MACRO_TILE;
410
411 radeon_bo_set_tiling(buf->bo, flags, pitch);
412 }
413
414 static uint32_t get_gem_domain(enum r300_buffer_domain domain)
415 {
416 uint32_t res = 0;
417
418 if (domain & R300_DOMAIN_GTT)
419 res |= RADEON_GEM_DOMAIN_GTT;
420 if (domain & R300_DOMAIN_VRAM)
421 res |= RADEON_GEM_DOMAIN_VRAM;
422 return res;
423 }
424
425 void radeon_drm_bufmgr_add_buffer(struct r300_winsys_cs *rcs,
426 struct r300_winsys_buffer *_buf,
427 enum r300_buffer_domain rd,
428 enum r300_buffer_domain wd)
429 {
430 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
431 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
432 uint32_t gem_rd = get_gem_domain(rd);
433 uint32_t gem_wd = get_gem_domain(wd);
434
435 radeon_cs_space_add_persistent_bo(cs->cs, buf->bo, gem_rd, gem_wd);
436 }
437
438 void radeon_drm_bufmgr_write_reloc(struct r300_winsys_cs *rcs,
439 struct r300_winsys_buffer *_buf,
440 enum r300_buffer_domain rd,
441 enum r300_buffer_domain wd)
442 {
443 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
444 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
445 int retval;
446 uint32_t gem_rd = get_gem_domain(rd);
447 uint32_t gem_wd = get_gem_domain(wd);
448
449 cs->cs->cdw = cs->base.cdw;
450 retval = radeon_cs_write_reloc(cs->cs, buf->bo, gem_rd, gem_wd, 0);
451 cs->base.cdw = cs->cs->cdw;
452 if (retval) {
453 fprintf(stderr, "radeon: Relocation of %p (%d, %d, %d) failed!\n",
454 buf, gem_rd, gem_wd, 0);
455 }
456 }
457
458 boolean radeon_drm_bufmgr_is_buffer_referenced(struct r300_winsys_cs *rcs,
459 struct r300_winsys_buffer *_buf,
460 enum r300_reference_domain domain)
461 {
462 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
463 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
464 uint32_t tmp;
465
466 if (domain & R300_REF_CS) {
467 if (radeon_bo_is_referenced_by_cs(buf->bo, cs->cs)) {
468 return TRUE;
469 }
470 }
471
472 if (domain & R300_REF_HW) {
473 if (radeon_bo_is_busy(buf->bo, &tmp)) {
474 return TRUE;
475 }
476 }
477
478 return FALSE;
479 }
480
481 void radeon_drm_bufmgr_flush_maps(struct pb_manager *_mgr)
482 {
483 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
484 struct radeon_drm_buffer *rpb, *t_rpb;
485
486 foreach_s(rpb, t_rpb, &mgr->buffer_map_list) {
487 radeon_bo_unmap(rpb->bo);
488 rpb->bo->ptr = NULL;
489 remove_from_list(rpb);
490 }
491
492 make_empty_list(&mgr->buffer_map_list);
493 }
494
495 void radeon_drm_bufmgr_wait(struct r300_winsys_screen *ws,
496 struct r300_winsys_buffer *_buf)
497 {
498 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
499
500 radeon_bo_wait(buf->bo);
501 }