Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 *
31 * Based on work from libdrm_radeon by:
32 * Aapo Tahkola <aet@rasterburn.org>
33 * Nicolai Haehnle <prefect_@gmx.net>
34 * Jérôme Glisse <glisse@freedesktop.org>
35 */
36
37 /*
38 This file replaces libdrm's radeon_cs_gem with our own implemention.
39 It's optimized specifically for Radeon DRM.
40 Reloc writes and space checking are faster and simpler than their
41 counterparts in libdrm (the time complexity of all the functions
42 is O(1) in nearly all scenarios, thanks to hashing).
43
44 It works like this:
45
46 cs_add_reloc(cs, buf, read_domain, write_domain) adds a new relocation and
47 also adds the size of 'buf' to the used_gart and used_vram winsys variables
48 based on the domains, which are simply or'd for the accounting purposes.
49 The adding is skipped if the reloc is already present in the list, but it
50 accounts any newly-referenced domains.
51
52 cs_validate is then called, which just checks:
53 used_vram/gart < vram/gart_size * 0.8
54 The 0.8 number allows for some memory fragmentation. If the validation
55 fails, the pipe driver flushes CS and tries do the validation again,
56 i.e. it validates only that one operation. If it fails again, it drops
57 the operation on the floor and prints some nasty message to stderr.
58 (done in the pipe driver)
59
60 cs_write_reloc(cs, buf) just writes a reloc that has been added using
61 cs_add_reloc. The read_domain and write_domain parameters have been removed,
62 because we already specify them in cs_add_reloc.
63 */
64
65 #include "radeon_drm_cs.h"
66
67 #include "util/u_memory.h"
68 #include "os/os_time.h"
69
70 #include <stdio.h>
71 #include <stdlib.h>
72 #include <stdint.h>
73 #include <xf86drm.h>
74
75
76 #define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
77
78 static struct pipe_fence_handle *
79 radeon_cs_create_fence(struct radeon_winsys_cs *rcs);
80 static void radeon_fence_reference(struct pipe_fence_handle **dst,
81 struct pipe_fence_handle *src);
82
83 static struct radeon_winsys_ctx *radeon_drm_ctx_create(struct radeon_winsys *ws)
84 {
85 /* No context support here. Just return the winsys pointer
86 * as the "context". */
87 return (struct radeon_winsys_ctx*)ws;
88 }
89
90 static void radeon_drm_ctx_destroy(struct radeon_winsys_ctx *ctx)
91 {
92 /* No context support here. */
93 }
94
95 static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
96 struct radeon_drm_winsys *ws)
97 {
98 int i;
99
100 csc->fd = ws->fd;
101 csc->nrelocs = 512;
102 csc->relocs_bo = (struct radeon_bo**)
103 CALLOC(1, csc->nrelocs * sizeof(struct radeon_bo*));
104 if (!csc->relocs_bo) {
105 return FALSE;
106 }
107
108 csc->relocs = (struct drm_radeon_cs_reloc*)
109 CALLOC(1, csc->nrelocs * sizeof(struct drm_radeon_cs_reloc));
110 if (!csc->relocs) {
111 FREE(csc->relocs_bo);
112 return FALSE;
113 }
114
115 csc->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
116 csc->chunks[0].length_dw = 0;
117 csc->chunks[0].chunk_data = (uint64_t)(uintptr_t)csc->buf;
118 csc->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
119 csc->chunks[1].length_dw = 0;
120 csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
121 csc->chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;
122 csc->chunks[2].length_dw = 2;
123 csc->chunks[2].chunk_data = (uint64_t)(uintptr_t)&csc->flags;
124
125 csc->chunk_array[0] = (uint64_t)(uintptr_t)&csc->chunks[0];
126 csc->chunk_array[1] = (uint64_t)(uintptr_t)&csc->chunks[1];
127 csc->chunk_array[2] = (uint64_t)(uintptr_t)&csc->chunks[2];
128
129 csc->cs.chunks = (uint64_t)(uintptr_t)csc->chunk_array;
130
131 for (i = 0; i < Elements(csc->reloc_indices_hashlist); i++) {
132 csc->reloc_indices_hashlist[i] = -1;
133 }
134 return TRUE;
135 }
136
137 static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
138 {
139 unsigned i;
140
141 for (i = 0; i < csc->crelocs; i++) {
142 p_atomic_dec(&csc->relocs_bo[i]->num_cs_references);
143 radeon_bo_reference(&csc->relocs_bo[i], NULL);
144 }
145
146 csc->crelocs = 0;
147 csc->validated_crelocs = 0;
148 csc->chunks[0].length_dw = 0;
149 csc->chunks[1].length_dw = 0;
150 csc->used_gart = 0;
151 csc->used_vram = 0;
152
153 for (i = 0; i < Elements(csc->reloc_indices_hashlist); i++) {
154 csc->reloc_indices_hashlist[i] = -1;
155 }
156 }
157
158 static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
159 {
160 radeon_cs_context_cleanup(csc);
161 FREE(csc->relocs_bo);
162 FREE(csc->relocs);
163 }
164
165
166 static struct radeon_winsys_cs *
167 radeon_drm_cs_create(struct radeon_winsys_ctx *ctx,
168 enum ring_type ring_type,
169 void (*flush)(void *ctx, unsigned flags,
170 struct pipe_fence_handle **fence),
171 void *flush_ctx,
172 struct radeon_winsys_cs_handle *trace_buf)
173 {
174 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)ctx;
175 struct radeon_drm_cs *cs;
176
177 cs = CALLOC_STRUCT(radeon_drm_cs);
178 if (!cs) {
179 return NULL;
180 }
181 pipe_semaphore_init(&cs->flush_completed, 1);
182
183 cs->ws = ws;
184 cs->flush_cs = flush;
185 cs->flush_data = flush_ctx;
186 cs->trace_buf = (struct radeon_bo*)trace_buf;
187
188 if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {
189 FREE(cs);
190 return NULL;
191 }
192 if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
193 radeon_destroy_cs_context(&cs->csc1);
194 FREE(cs);
195 return NULL;
196 }
197
198 /* Set the first command buffer as current. */
199 cs->csc = &cs->csc1;
200 cs->cst = &cs->csc2;
201 cs->base.buf = cs->csc->buf;
202 cs->base.ring_type = ring_type;
203 cs->base.max_dw = ARRAY_SIZE(cs->csc->buf);
204
205 p_atomic_inc(&ws->num_cs);
206 return &cs->base;
207 }
208
209 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
210
211 static inline void update_reloc(struct drm_radeon_cs_reloc *reloc,
212 enum radeon_bo_domain rd,
213 enum radeon_bo_domain wd,
214 unsigned priority,
215 enum radeon_bo_domain *added_domains)
216 {
217 *added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain);
218
219 reloc->read_domains |= rd;
220 reloc->write_domain |= wd;
221 reloc->flags = MAX2(reloc->flags, priority);
222 }
223
224 int radeon_get_reloc(struct radeon_cs_context *csc, struct radeon_bo *bo)
225 {
226 unsigned hash = bo->handle & (Elements(csc->reloc_indices_hashlist)-1);
227 int i = csc->reloc_indices_hashlist[hash];
228
229 /* not found or found */
230 if (i == -1 || csc->relocs_bo[i] == bo)
231 return i;
232
233 /* Hash collision, look for the BO in the list of relocs linearly. */
234 for (i = csc->crelocs - 1; i >= 0; i--) {
235 if (csc->relocs_bo[i] == bo) {
236 /* Put this reloc in the hash list.
237 * This will prevent additional hash collisions if there are
238 * several consecutive get_reloc calls for the same buffer.
239 *
240 * Example: Assuming buffers A,B,C collide in the hash list,
241 * the following sequence of relocs:
242 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
243 * will collide here: ^ and here: ^,
244 * meaning that we should get very few collisions in the end. */
245 csc->reloc_indices_hashlist[hash] = i;
246 return i;
247 }
248 }
249 return -1;
250 }
251
252 static unsigned radeon_add_reloc(struct radeon_drm_cs *cs,
253 struct radeon_bo *bo,
254 enum radeon_bo_usage usage,
255 enum radeon_bo_domain domains,
256 unsigned priority,
257 enum radeon_bo_domain *added_domains)
258 {
259 struct radeon_cs_context *csc = cs->csc;
260 struct drm_radeon_cs_reloc *reloc;
261 unsigned hash = bo->handle & (Elements(csc->reloc_indices_hashlist)-1);
262 enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0;
263 enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0;
264 int i = -1;
265
266 priority = MIN2(priority, 15);
267 *added_domains = 0;
268
269 i = radeon_get_reloc(csc, bo);
270
271 if (i >= 0) {
272 reloc = &csc->relocs[i];
273 update_reloc(reloc, rd, wd, priority, added_domains);
274
275 /* For async DMA, every add_reloc call must add a buffer to the list
276 * no matter how many duplicates there are. This is due to the fact
277 * the DMA CS checker doesn't use NOP packets for offset patching,
278 * but always uses the i-th buffer from the list to patch the i-th
279 * offset. If there are N offsets in a DMA CS, there must also be N
280 * buffers in the relocation list.
281 *
282 * This doesn't have to be done if virtual memory is enabled,
283 * because there is no offset patching with virtual memory.
284 */
285 if (cs->base.ring_type != RING_DMA || cs->ws->info.r600_virtual_address) {
286 return i;
287 }
288 }
289
290 /* New relocation, check if the backing array is large enough. */
291 if (csc->crelocs >= csc->nrelocs) {
292 uint32_t size;
293 csc->nrelocs += 10;
294
295 size = csc->nrelocs * sizeof(struct radeon_bo*);
296 csc->relocs_bo = realloc(csc->relocs_bo, size);
297
298 size = csc->nrelocs * sizeof(struct drm_radeon_cs_reloc);
299 csc->relocs = realloc(csc->relocs, size);
300
301 csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
302 }
303
304 /* Initialize the new relocation. */
305 csc->relocs_bo[csc->crelocs] = NULL;
306 radeon_bo_reference(&csc->relocs_bo[csc->crelocs], bo);
307 p_atomic_inc(&bo->num_cs_references);
308 reloc = &csc->relocs[csc->crelocs];
309 reloc->handle = bo->handle;
310 reloc->read_domains = rd;
311 reloc->write_domain = wd;
312 reloc->flags = priority;
313
314 csc->reloc_indices_hashlist[hash] = csc->crelocs;
315
316 csc->chunks[1].length_dw += RELOC_DWORDS;
317
318 *added_domains = rd | wd;
319 return csc->crelocs++;
320 }
321
322 static unsigned radeon_drm_cs_add_reloc(struct radeon_winsys_cs *rcs,
323 struct radeon_winsys_cs_handle *buf,
324 enum radeon_bo_usage usage,
325 enum radeon_bo_domain domains,
326 enum radeon_bo_priority priority)
327 {
328 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
329 struct radeon_bo *bo = (struct radeon_bo*)buf;
330 enum radeon_bo_domain added_domains;
331 unsigned index = radeon_add_reloc(cs, bo, usage, domains, priority, &added_domains);
332
333 if (added_domains & RADEON_DOMAIN_GTT)
334 cs->csc->used_gart += bo->base.size;
335 if (added_domains & RADEON_DOMAIN_VRAM)
336 cs->csc->used_vram += bo->base.size;
337
338 return index;
339 }
340
341 static int radeon_drm_cs_get_reloc(struct radeon_winsys_cs *rcs,
342 struct radeon_winsys_cs_handle *buf)
343 {
344 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
345
346 return radeon_get_reloc(cs->csc, (struct radeon_bo*)buf);
347 }
348
349 static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
350 {
351 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
352 boolean status =
353 cs->csc->used_gart < cs->ws->info.gart_size * 0.8 &&
354 cs->csc->used_vram < cs->ws->info.vram_size * 0.8;
355
356 if (status) {
357 cs->csc->validated_crelocs = cs->csc->crelocs;
358 } else {
359 /* Remove lately-added relocations. The validation failed with them
360 * and the CS is about to be flushed because of that. Keep only
361 * the already-validated relocations. */
362 unsigned i;
363
364 for (i = cs->csc->validated_crelocs; i < cs->csc->crelocs; i++) {
365 p_atomic_dec(&cs->csc->relocs_bo[i]->num_cs_references);
366 radeon_bo_reference(&cs->csc->relocs_bo[i], NULL);
367 }
368 cs->csc->crelocs = cs->csc->validated_crelocs;
369
370 /* Flush if there are any relocs. Clean up otherwise. */
371 if (cs->csc->crelocs) {
372 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
373 } else {
374 radeon_cs_context_cleanup(cs->csc);
375
376 assert(cs->base.cdw == 0);
377 if (cs->base.cdw != 0) {
378 fprintf(stderr, "radeon: Unexpected error in %s.\n", __func__);
379 }
380 }
381 }
382 return status;
383 }
384
385 static boolean radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
386 {
387 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
388
389 vram += cs->csc->used_vram;
390 gtt += cs->csc->used_gart;
391
392 /* Anything that goes above the VRAM size should go to GTT. */
393 if (vram > cs->ws->info.vram_size)
394 gtt += vram - cs->ws->info.vram_size;
395
396 /* Now we just need to check if we have enough GTT. */
397 return gtt < cs->ws->info.gart_size * 0.7;
398 }
399
400 void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs *cs, struct radeon_cs_context *csc)
401 {
402 unsigned i;
403 int r;
404
405 r = drmCommandWriteRead(csc->fd, DRM_RADEON_CS,
406 &csc->cs, sizeof(struct drm_radeon_cs));
407 if (r) {
408 if (r == -ENOMEM)
409 fprintf(stderr, "radeon: Not enough memory for command submission.\n");
410 else if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) {
411 unsigned i;
412
413 fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
414 for (i = 0; i < csc->chunks[0].length_dw; i++) {
415 fprintf(stderr, "0x%08X\n", csc->buf[i]);
416 }
417 } else {
418 fprintf(stderr, "radeon: The kernel rejected CS, "
419 "see dmesg for more information.\n");
420 }
421 }
422
423 if (cs->trace_buf) {
424 radeon_dump_cs_on_lockup(cs, csc);
425 }
426
427 for (i = 0; i < csc->crelocs; i++)
428 p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls);
429
430 radeon_cs_context_cleanup(csc);
431 }
432
433 /*
434 * Make sure previous submission of this cs are completed
435 */
436 void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
437 {
438 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
439
440 /* Wait for any pending ioctl to complete. */
441 if (cs->ws->thread) {
442 pipe_semaphore_wait(&cs->flush_completed);
443 pipe_semaphore_signal(&cs->flush_completed);
444 }
445 }
446
447 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
448
449 static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
450 unsigned flags,
451 struct pipe_fence_handle **fence,
452 uint32_t cs_trace_id)
453 {
454 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
455 struct radeon_cs_context *tmp;
456
457 switch (cs->base.ring_type) {
458 case RING_DMA:
459 /* pad DMA ring to 8 DWs */
460 if (cs->ws->info.chip_class <= SI) {
461 while (rcs->cdw & 7)
462 OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
463 } else {
464 while (rcs->cdw & 7)
465 OUT_CS(&cs->base, 0x00000000); /* NOP packet */
466 }
467 break;
468 case RING_GFX:
469 /* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
470 * r6xx, requires at least 4 dw alignment to avoid a hw bug.
471 * hawaii with old firmware needs type2 nop packet.
472 * accel_working2 with value 3 indicates the new firmware.
473 */
474 if (cs->ws->info.chip_class <= SI ||
475 (cs->ws->info.family == CHIP_HAWAII &&
476 cs->ws->accel_working2 < 3)) {
477 while (rcs->cdw & 7)
478 OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
479 } else {
480 while (rcs->cdw & 7)
481 OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
482 }
483 break;
484 case RING_UVD:
485 while (rcs->cdw & 15)
486 OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
487 break;
488 default:
489 break;
490 }
491
492 if (rcs->cdw > rcs->max_dw) {
493 fprintf(stderr, "radeon: command stream overflowed\n");
494 }
495
496 if (fence) {
497 radeon_fence_reference(fence, NULL);
498 *fence = radeon_cs_create_fence(rcs);
499 }
500
501 radeon_drm_cs_sync_flush(rcs);
502
503 /* Swap command streams. */
504 tmp = cs->csc;
505 cs->csc = cs->cst;
506 cs->cst = tmp;
507
508 cs->cst->cs_trace_id = cs_trace_id;
509
510 /* If the CS is not empty or overflowed, emit it in a separate thread. */
511 if (cs->base.cdw && cs->base.cdw <= cs->base.max_dw && !debug_get_option_noop()) {
512 unsigned i, crelocs;
513
514 crelocs = cs->cst->crelocs;
515
516 cs->cst->chunks[0].length_dw = cs->base.cdw;
517
518 for (i = 0; i < crelocs; i++) {
519 /* Update the number of active asynchronous CS ioctls for the buffer. */
520 p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
521 }
522
523 switch (cs->base.ring_type) {
524 case RING_DMA:
525 cs->cst->flags[0] = 0;
526 cs->cst->flags[1] = RADEON_CS_RING_DMA;
527 cs->cst->cs.num_chunks = 3;
528 if (cs->ws->info.r600_virtual_address) {
529 cs->cst->flags[0] |= RADEON_CS_USE_VM;
530 }
531 break;
532
533 case RING_UVD:
534 cs->cst->flags[0] = 0;
535 cs->cst->flags[1] = RADEON_CS_RING_UVD;
536 cs->cst->cs.num_chunks = 3;
537 break;
538
539 case RING_VCE:
540 cs->cst->flags[0] = 0;
541 cs->cst->flags[1] = RADEON_CS_RING_VCE;
542 cs->cst->cs.num_chunks = 3;
543 break;
544
545 default:
546 case RING_GFX:
547 case RING_COMPUTE:
548 cs->cst->flags[0] = 0;
549 cs->cst->flags[1] = RADEON_CS_RING_GFX;
550 cs->cst->cs.num_chunks = 2;
551 if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
552 cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
553 cs->cst->cs.num_chunks = 3;
554 }
555 if (cs->ws->info.r600_virtual_address) {
556 cs->cst->flags[0] |= RADEON_CS_USE_VM;
557 cs->cst->cs.num_chunks = 3;
558 }
559 if (flags & RADEON_FLUSH_END_OF_FRAME) {
560 cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
561 cs->cst->cs.num_chunks = 3;
562 }
563 if (cs->base.ring_type == RING_COMPUTE) {
564 cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
565 cs->cst->cs.num_chunks = 3;
566 }
567 break;
568 }
569
570 if (cs->ws->thread) {
571 pipe_semaphore_wait(&cs->flush_completed);
572 radeon_drm_ws_queue_cs(cs->ws, cs);
573 if (!(flags & RADEON_FLUSH_ASYNC))
574 radeon_drm_cs_sync_flush(rcs);
575 } else {
576 radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
577 }
578 } else {
579 radeon_cs_context_cleanup(cs->cst);
580 }
581
582 /* Prepare a new CS. */
583 cs->base.buf = cs->csc->buf;
584 cs->base.cdw = 0;
585
586 cs->ws->num_cs_flushes++;
587 }
588
589 static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
590 {
591 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
592
593 radeon_drm_cs_sync_flush(rcs);
594 pipe_semaphore_destroy(&cs->flush_completed);
595 radeon_cs_context_cleanup(&cs->csc1);
596 radeon_cs_context_cleanup(&cs->csc2);
597 p_atomic_dec(&cs->ws->num_cs);
598 radeon_destroy_cs_context(&cs->csc1);
599 radeon_destroy_cs_context(&cs->csc2);
600 FREE(cs);
601 }
602
603 static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
604 struct radeon_winsys_cs_handle *_buf,
605 enum radeon_bo_usage usage)
606 {
607 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
608 struct radeon_bo *bo = (struct radeon_bo*)_buf;
609 int index;
610
611 if (!bo->num_cs_references)
612 return FALSE;
613
614 index = radeon_get_reloc(cs->csc, bo);
615 if (index == -1)
616 return FALSE;
617
618 if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)
619 return TRUE;
620 if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)
621 return TRUE;
622
623 return FALSE;
624 }
625
626 /* FENCES */
627
628 static struct pipe_fence_handle *
629 radeon_cs_create_fence(struct radeon_winsys_cs *rcs)
630 {
631 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
632 struct pb_buffer *fence;
633
634 /* Create a fence, which is a dummy BO. */
635 fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1, TRUE,
636 RADEON_DOMAIN_GTT, 0);
637 /* Add the fence as a dummy relocation. */
638 cs->ws->base.cs_add_reloc(rcs, cs->ws->base.buffer_get_cs_handle(fence),
639 RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,
640 RADEON_PRIO_MIN);
641 return (struct pipe_fence_handle*)fence;
642 }
643
644 static bool radeon_fence_wait(struct radeon_winsys *ws,
645 struct pipe_fence_handle *fence,
646 uint64_t timeout)
647 {
648 struct pb_buffer *rfence = (struct pb_buffer*)fence;
649
650 if (timeout == 0)
651 return ws->buffer_wait(rfence, 0, RADEON_USAGE_READWRITE);
652
653 if (timeout != PIPE_TIMEOUT_INFINITE) {
654 int64_t start_time = os_time_get();
655
656 /* Convert to microseconds. */
657 timeout /= 1000;
658
659 /* Wait in a loop. */
660 while (!ws->buffer_wait(rfence, 0, RADEON_USAGE_READWRITE)) {
661 if (os_time_get() - start_time >= timeout) {
662 return FALSE;
663 }
664 os_time_sleep(10);
665 }
666 return TRUE;
667 }
668
669 ws->buffer_wait(rfence, PIPE_TIMEOUT_INFINITE, RADEON_USAGE_READWRITE);
670 return TRUE;
671 }
672
673 static void radeon_fence_reference(struct pipe_fence_handle **dst,
674 struct pipe_fence_handle *src)
675 {
676 pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
677 }
678
679 void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
680 {
681 ws->base.ctx_create = radeon_drm_ctx_create;
682 ws->base.ctx_destroy = radeon_drm_ctx_destroy;
683 ws->base.cs_create = radeon_drm_cs_create;
684 ws->base.cs_destroy = radeon_drm_cs_destroy;
685 ws->base.cs_add_reloc = radeon_drm_cs_add_reloc;
686 ws->base.cs_get_reloc = radeon_drm_cs_get_reloc;
687 ws->base.cs_validate = radeon_drm_cs_validate;
688 ws->base.cs_memory_below_limit = radeon_drm_cs_memory_below_limit;
689 ws->base.cs_flush = radeon_drm_cs_flush;
690 ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
691 ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
692 ws->base.fence_wait = radeon_fence_wait;
693 ws->base.fence_reference = radeon_fence_reference;
694 }