r300g: reject resources from handles which are not large enough
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_buffer.c
1
2 #include <sys/ioctl.h>
3 #include "radeon_drm.h"
4 #include "radeon_bo_gem.h"
5 #include "radeon_cs_gem.h"
6 #include "radeon_buffer.h"
7
8 #include "util/u_inlines.h"
9 #include "util/u_memory.h"
10 #include "util/u_simple_list.h"
11 #include "pipebuffer/pb_buffer.h"
12 #include "pipebuffer/pb_bufmgr.h"
13
14 #include "radeon_winsys.h"
15 struct radeon_drm_bufmgr;
16
17 struct radeon_drm_buffer {
18 struct pb_buffer base;
19 struct radeon_drm_bufmgr *mgr;
20
21 struct radeon_bo *bo;
22
23 /* The CS associated with the last buffer_map. */
24 struct radeon_libdrm_cs *cs;
25
26 boolean flinked;
27 uint32_t flink;
28
29 struct radeon_drm_buffer *next, *prev;
30 };
31
32 extern const struct pb_vtbl radeon_drm_buffer_vtbl;
33
34
35 static INLINE struct radeon_drm_buffer *
36 radeon_drm_buffer(struct pb_buffer *buf)
37 {
38 assert(buf);
39 assert(buf->vtbl == &radeon_drm_buffer_vtbl);
40 return (struct radeon_drm_buffer *)buf;
41 }
42
43 struct radeon_drm_bufmgr {
44 struct pb_manager base;
45 struct radeon_libdrm_winsys *rws;
46 struct radeon_drm_buffer buffer_map_list;
47 };
48
49 static INLINE struct radeon_drm_bufmgr *
50 radeon_drm_bufmgr(struct pb_manager *mgr)
51 {
52 assert(mgr);
53 return (struct radeon_drm_bufmgr *)mgr;
54 }
55
56 static void
57 radeon_drm_buffer_destroy(struct pb_buffer *_buf)
58 {
59 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
60
61 if (buf->bo->ptr != NULL) {
62 remove_from_list(buf);
63 radeon_bo_unmap(buf->bo);
64 buf->bo->ptr = NULL;
65 }
66 radeon_bo_unref(buf->bo);
67
68 FREE(buf);
69 }
70
71 static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage)
72 {
73 unsigned res = 0;
74
75 if (usage & PIPE_TRANSFER_READ)
76 res |= PB_USAGE_CPU_READ;
77
78 if (usage & PIPE_TRANSFER_WRITE)
79 res |= PB_USAGE_CPU_WRITE;
80
81 if (usage & PIPE_TRANSFER_DONTBLOCK)
82 res |= PB_USAGE_DONTBLOCK;
83
84 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
85 res |= PB_USAGE_UNSYNCHRONIZED;
86
87 return res;
88 }
89
90 static void *
91 radeon_drm_buffer_map_internal(struct pb_buffer *_buf,
92 unsigned flags)
93 {
94 struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
95 struct radeon_libdrm_cs *cs = buf->cs;
96 int write = 0;
97
98 if (flags & PB_USAGE_DONTBLOCK) {
99 if (_buf->base.usage & RADEON_PB_USAGE_VERTEX)
100 if (cs && radeon_bo_is_referenced_by_cs(buf->bo, cs->cs))
101 return NULL;
102 }
103
104 if (buf->bo->ptr != NULL)
105 return buf->bo->ptr;
106
107 if (flags & PB_USAGE_DONTBLOCK) {
108 uint32_t domain;
109 if (radeon_bo_is_busy(buf->bo, &domain))
110 return NULL;
111 }
112
113 if (cs && radeon_bo_is_referenced_by_cs(buf->bo, cs->cs)) {
114 cs->flush_cs(cs->flush_data);
115 }
116
117 if (flags & PB_USAGE_CPU_WRITE) {
118 write = 1;
119 }
120
121 if (radeon_bo_map(buf->bo, write)) {
122 return NULL;
123 }
124 insert_at_tail(&buf->mgr->buffer_map_list, buf);
125 return buf->bo->ptr;
126 }
127
128 static void
129 radeon_drm_buffer_unmap_internal(struct pb_buffer *_buf)
130 {
131 (void)_buf;
132 }
133
134 static void
135 radeon_drm_buffer_get_base_buffer(struct pb_buffer *buf,
136 struct pb_buffer **base_buf,
137 unsigned *offset)
138 {
139 *base_buf = buf;
140 *offset = 0;
141 }
142
143
144 static enum pipe_error
145 radeon_drm_buffer_validate(struct pb_buffer *_buf,
146 struct pb_validate *vl,
147 unsigned flags)
148 {
149 /* Always pinned */
150 return PIPE_OK;
151 }
152
153 static void
154 radeon_drm_buffer_fence(struct pb_buffer *buf,
155 struct pipe_fence_handle *fence)
156 {
157 }
158
159 const struct pb_vtbl radeon_drm_buffer_vtbl = {
160 radeon_drm_buffer_destroy,
161 radeon_drm_buffer_map_internal,
162 radeon_drm_buffer_unmap_internal,
163 radeon_drm_buffer_validate,
164 radeon_drm_buffer_fence,
165 radeon_drm_buffer_get_base_buffer,
166 };
167
168 struct pb_buffer *radeon_drm_bufmgr_create_buffer_from_handle(struct pb_manager *_mgr,
169 uint32_t handle)
170 {
171 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
172 struct radeon_libdrm_winsys *rws = mgr->rws;
173 struct radeon_drm_buffer *buf;
174 struct radeon_bo *bo;
175
176 bo = radeon_bo_open(rws->bom, handle, 0,
177 0, 0, 0);
178 if (bo == NULL)
179 return NULL;
180
181 buf = CALLOC_STRUCT(radeon_drm_buffer);
182 if (!buf) {
183 radeon_bo_unref(bo);
184 return NULL;
185 }
186
187 make_empty_list(buf);
188
189 pipe_reference_init(&buf->base.base.reference, 1);
190 buf->base.base.alignment = 0;
191 buf->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
192 buf->base.base.size = bo->size;
193 buf->base.vtbl = &radeon_drm_buffer_vtbl;
194 buf->mgr = mgr;
195
196 buf->bo = bo;
197
198 return &buf->base;
199 }
200
201 static struct pb_buffer *
202 radeon_drm_bufmgr_create_buffer(struct pb_manager *_mgr,
203 pb_size size,
204 const struct pb_desc *desc)
205 {
206 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
207 struct radeon_libdrm_winsys *rws = mgr->rws;
208 struct radeon_drm_buffer *buf;
209 uint32_t domain;
210
211 buf = CALLOC_STRUCT(radeon_drm_buffer);
212 if (!buf)
213 goto error1;
214
215 pipe_reference_init(&buf->base.base.reference, 1);
216 buf->base.base.alignment = desc->alignment;
217 buf->base.base.usage = desc->usage;
218 buf->base.base.size = size;
219 buf->base.vtbl = &radeon_drm_buffer_vtbl;
220 buf->mgr = mgr;
221
222 make_empty_list(buf);
223
224 domain =
225 (desc->usage & RADEON_PB_USAGE_DOMAIN_GTT ? RADEON_GEM_DOMAIN_GTT : 0) |
226 (desc->usage & RADEON_PB_USAGE_DOMAIN_VRAM ? RADEON_GEM_DOMAIN_VRAM : 0);
227
228 buf->bo = radeon_bo_open(rws->bom, 0, size,
229 desc->alignment, domain, 0);
230 if (buf->bo == NULL)
231 goto error2;
232
233 return &buf->base;
234
235 error2:
236 FREE(buf);
237 error1:
238 return NULL;
239 }
240
241 static void
242 radeon_drm_bufmgr_flush(struct pb_manager *mgr)
243 {
244 /* NOP */
245 }
246
247 static void
248 radeon_drm_bufmgr_destroy(struct pb_manager *_mgr)
249 {
250 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
251 FREE(mgr);
252 }
253
254 struct pb_manager *
255 radeon_drm_bufmgr_create(struct radeon_libdrm_winsys *rws)
256 {
257 struct radeon_drm_bufmgr *mgr;
258
259 mgr = CALLOC_STRUCT(radeon_drm_bufmgr);
260 if (!mgr)
261 return NULL;
262
263 mgr->base.destroy = radeon_drm_bufmgr_destroy;
264 mgr->base.create_buffer = radeon_drm_bufmgr_create_buffer;
265 mgr->base.flush = radeon_drm_bufmgr_flush;
266
267 mgr->rws = rws;
268 make_empty_list(&mgr->buffer_map_list);
269 return &mgr->base;
270 }
271
272 static struct radeon_drm_buffer *get_drm_buffer(struct pb_buffer *_buf)
273 {
274 struct radeon_drm_buffer *buf = NULL;
275
276 if (_buf->vtbl == &radeon_drm_buffer_vtbl) {
277 buf = radeon_drm_buffer(_buf);
278 } else {
279 struct pb_buffer *base_buf;
280 pb_size offset;
281 pb_get_base_buffer(_buf, &base_buf, &offset);
282
283 if (base_buf->vtbl == &radeon_drm_buffer_vtbl)
284 buf = radeon_drm_buffer(base_buf);
285 }
286
287 return buf;
288 }
289
290 void *radeon_drm_buffer_map(struct r300_winsys_screen *ws,
291 struct r300_winsys_buffer *buf,
292 struct r300_winsys_cs *cs,
293 enum pipe_transfer_usage usage)
294 {
295 struct pb_buffer *_buf = radeon_pb_buffer(buf);
296 struct radeon_drm_buffer *rbuf = get_drm_buffer(_buf);
297
298 if (rbuf)
299 rbuf->cs = radeon_libdrm_cs(cs);
300
301 return pb_map(_buf, get_pb_usage_from_transfer_flags(usage));
302 }
303
304 void radeon_drm_buffer_unmap(struct r300_winsys_screen *ws,
305 struct r300_winsys_buffer *buf)
306 {
307 struct pb_buffer *_buf = radeon_pb_buffer(buf);
308
309 pb_unmap(_buf);
310 }
311
312 boolean radeon_drm_bufmgr_get_handle(struct pb_buffer *_buf,
313 struct winsys_handle *whandle)
314 {
315 struct drm_gem_flink flink;
316 struct radeon_drm_buffer *buf = get_drm_buffer(_buf);
317
318 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
319 if (!buf->flinked) {
320 flink.handle = buf->bo->handle;
321
322 if (ioctl(buf->mgr->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
323 return FALSE;
324 }
325
326 buf->flinked = TRUE;
327 buf->flink = flink.name;
328 }
329 whandle->handle = buf->flink;
330 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
331 whandle->handle = buf->bo->handle;
332 }
333 return TRUE;
334 }
335
336 void radeon_drm_bufmgr_get_tiling(struct r300_winsys_screen *ws,
337 struct r300_winsys_buffer *_buf,
338 enum r300_buffer_tiling *microtiled,
339 enum r300_buffer_tiling *macrotiled)
340 {
341 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
342 uint32_t flags = 0, pitch;
343
344 radeon_bo_get_tiling(buf->bo, &flags, &pitch);
345
346 *microtiled = R300_BUFFER_LINEAR;
347 *macrotiled = R300_BUFFER_LINEAR;
348 if (flags & RADEON_BO_FLAGS_MICRO_TILE)
349 *microtiled = R300_BUFFER_TILED;
350
351 if (flags & RADEON_BO_FLAGS_MACRO_TILE)
352 *macrotiled = R300_BUFFER_TILED;
353 }
354
355 void radeon_drm_bufmgr_set_tiling(struct r300_winsys_screen *ws,
356 struct r300_winsys_buffer *_buf,
357 enum r300_buffer_tiling microtiled,
358 enum r300_buffer_tiling macrotiled,
359 uint32_t pitch)
360 {
361 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
362 uint32_t flags = 0;
363 if (microtiled == R300_BUFFER_TILED)
364 flags |= RADEON_BO_FLAGS_MICRO_TILE;
365 /* XXX Remove this ifdef when libdrm version 2.4.19 becomes mandatory. */
366 #ifdef RADEON_BO_FLAGS_MICRO_TILE_SQUARE
367 else if (microtiled == R300_BUFFER_SQUARETILED)
368 flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
369 #endif
370 if (macrotiled == R300_BUFFER_TILED)
371 flags |= RADEON_BO_FLAGS_MACRO_TILE;
372
373 radeon_bo_set_tiling(buf->bo, flags, pitch);
374 }
375
376 static uint32_t get_gem_domain(enum r300_buffer_domain domain)
377 {
378 uint32_t res = 0;
379
380 if (domain & R300_DOMAIN_GTT)
381 res |= RADEON_GEM_DOMAIN_GTT;
382 if (domain & R300_DOMAIN_VRAM)
383 res |= RADEON_GEM_DOMAIN_VRAM;
384 return res;
385 }
386
387 void radeon_drm_bufmgr_add_buffer(struct r300_winsys_cs *rcs,
388 struct r300_winsys_buffer *_buf,
389 enum r300_buffer_domain rd,
390 enum r300_buffer_domain wd)
391 {
392 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
393 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
394 uint32_t gem_rd = get_gem_domain(rd);
395 uint32_t gem_wd = get_gem_domain(wd);
396
397 radeon_cs_space_add_persistent_bo(cs->cs, buf->bo, gem_rd, gem_wd);
398 }
399
400 void radeon_drm_bufmgr_write_reloc(struct r300_winsys_cs *rcs,
401 struct r300_winsys_buffer *_buf,
402 enum r300_buffer_domain rd,
403 enum r300_buffer_domain wd)
404 {
405 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
406 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
407 int retval;
408 uint32_t gem_rd = get_gem_domain(rd);
409 uint32_t gem_wd = get_gem_domain(wd);
410
411 cs->cs->cdw = cs->base.cdw;
412 retval = radeon_cs_write_reloc(cs->cs, buf->bo, gem_rd, gem_wd, 0);
413 cs->base.cdw = cs->cs->cdw;
414 if (retval) {
415 fprintf(stderr, "radeon: Relocation of %p (%d, %d, %d) failed!\n",
416 buf, gem_rd, gem_wd, 0);
417 }
418 }
419
420 boolean radeon_drm_bufmgr_is_buffer_referenced(struct r300_winsys_cs *rcs,
421 struct r300_winsys_buffer *_buf,
422 enum r300_reference_domain domain)
423 {
424 struct radeon_libdrm_cs *cs = radeon_libdrm_cs(rcs);
425 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
426 uint32_t tmp;
427
428 if (domain & R300_REF_CS) {
429 if (radeon_bo_is_referenced_by_cs(buf->bo, cs->cs)) {
430 return TRUE;
431 }
432 }
433
434 if (domain & R300_REF_HW) {
435 if (radeon_bo_is_busy(buf->bo, &tmp)) {
436 return TRUE;
437 }
438 }
439
440 return FALSE;
441 }
442
443 void radeon_drm_bufmgr_flush_maps(struct pb_manager *_mgr)
444 {
445 struct radeon_drm_bufmgr *mgr = radeon_drm_bufmgr(_mgr);
446 struct radeon_drm_buffer *rpb, *t_rpb;
447
448 foreach_s(rpb, t_rpb, &mgr->buffer_map_list) {
449 radeon_bo_unmap(rpb->bo);
450 rpb->bo->ptr = NULL;
451 remove_from_list(rpb);
452 }
453
454 make_empty_list(&mgr->buffer_map_list);
455 }
456
457 void radeon_drm_bufmgr_wait(struct r300_winsys_screen *ws,
458 struct r300_winsys_buffer *_buf)
459 {
460 struct radeon_drm_buffer *buf = get_drm_buffer(radeon_pb_buffer(_buf));
461
462 radeon_bo_wait(buf->bo);
463 }