gallium/util: replace pipe_mutex_lock() with mtx_lock()
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 *
31 * Based on work from libdrm_radeon by:
32 * Aapo Tahkola <aet@rasterburn.org>
33 * Nicolai Haehnle <prefect_@gmx.net>
34 * Jérôme Glisse <glisse@freedesktop.org>
35 */
36
37 /*
38 This file replaces libdrm's radeon_cs_gem with our own implemention.
39 It's optimized specifically for Radeon DRM.
40 Adding buffers and space checking are faster and simpler than their
41 counterparts in libdrm (the time complexity of all the functions
42 is O(1) in nearly all scenarios, thanks to hashing).
43
44 It works like this:
45
46 cs_add_buffer(cs, buf, read_domain, write_domain) adds a new relocation and
47 also adds the size of 'buf' to the used_gart and used_vram winsys variables
48 based on the domains, which are simply or'd for the accounting purposes.
49 The adding is skipped if the reloc is already present in the list, but it
50 accounts any newly-referenced domains.
51
52 cs_validate is then called, which just checks:
53 used_vram/gart < vram/gart_size * 0.8
54 The 0.8 number allows for some memory fragmentation. If the validation
55 fails, the pipe driver flushes CS and tries do the validation again,
56 i.e. it validates only that one operation. If it fails again, it drops
57 the operation on the floor and prints some nasty message to stderr.
58 (done in the pipe driver)
59
60 cs_write_reloc(cs, buf) just writes a reloc that has been added using
61 cs_add_buffer. The read_domain and write_domain parameters have been removed,
62 because we already specify them in cs_add_buffer.
63 */
64
65 #include "radeon_drm_cs.h"
66
67 #include "util/u_memory.h"
68 #include "os/os_time.h"
69
70 #include <stdio.h>
71 #include <stdlib.h>
72 #include <stdint.h>
73 #include <xf86drm.h>
74
75
76 #define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
77
78 static struct pipe_fence_handle *
79 radeon_cs_create_fence(struct radeon_winsys_cs *rcs);
80 static void radeon_fence_reference(struct pipe_fence_handle **dst,
81 struct pipe_fence_handle *src);
82
83 static struct radeon_winsys_ctx *radeon_drm_ctx_create(struct radeon_winsys *ws)
84 {
85 /* No context support here. Just return the winsys pointer
86 * as the "context". */
87 return (struct radeon_winsys_ctx*)ws;
88 }
89
90 static void radeon_drm_ctx_destroy(struct radeon_winsys_ctx *ctx)
91 {
92 /* No context support here. */
93 }
94
95 static bool radeon_init_cs_context(struct radeon_cs_context *csc,
96 struct radeon_drm_winsys *ws)
97 {
98 int i;
99
100 csc->fd = ws->fd;
101
102 csc->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
103 csc->chunks[0].length_dw = 0;
104 csc->chunks[0].chunk_data = (uint64_t)(uintptr_t)csc->buf;
105 csc->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
106 csc->chunks[1].length_dw = 0;
107 csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
108 csc->chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;
109 csc->chunks[2].length_dw = 2;
110 csc->chunks[2].chunk_data = (uint64_t)(uintptr_t)&csc->flags;
111
112 csc->chunk_array[0] = (uint64_t)(uintptr_t)&csc->chunks[0];
113 csc->chunk_array[1] = (uint64_t)(uintptr_t)&csc->chunks[1];
114 csc->chunk_array[2] = (uint64_t)(uintptr_t)&csc->chunks[2];
115
116 csc->cs.chunks = (uint64_t)(uintptr_t)csc->chunk_array;
117
118 for (i = 0; i < ARRAY_SIZE(csc->reloc_indices_hashlist); i++) {
119 csc->reloc_indices_hashlist[i] = -1;
120 }
121 return true;
122 }
123
124 static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
125 {
126 unsigned i;
127
128 for (i = 0; i < csc->num_relocs; i++) {
129 p_atomic_dec(&csc->relocs_bo[i].bo->num_cs_references);
130 radeon_bo_reference(&csc->relocs_bo[i].bo, NULL);
131 }
132 for (i = 0; i < csc->num_slab_buffers; ++i) {
133 p_atomic_dec(&csc->slab_buffers[i].bo->num_cs_references);
134 radeon_bo_reference(&csc->slab_buffers[i].bo, NULL);
135 }
136
137 csc->num_relocs = 0;
138 csc->num_validated_relocs = 0;
139 csc->num_slab_buffers = 0;
140 csc->chunks[0].length_dw = 0;
141 csc->chunks[1].length_dw = 0;
142
143 for (i = 0; i < ARRAY_SIZE(csc->reloc_indices_hashlist); i++) {
144 csc->reloc_indices_hashlist[i] = -1;
145 }
146 }
147
148 static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
149 {
150 radeon_cs_context_cleanup(csc);
151 FREE(csc->slab_buffers);
152 FREE(csc->relocs_bo);
153 FREE(csc->relocs);
154 }
155
156
157 static struct radeon_winsys_cs *
158 radeon_drm_cs_create(struct radeon_winsys_ctx *ctx,
159 enum ring_type ring_type,
160 void (*flush)(void *ctx, unsigned flags,
161 struct pipe_fence_handle **fence),
162 void *flush_ctx)
163 {
164 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)ctx;
165 struct radeon_drm_cs *cs;
166
167 cs = CALLOC_STRUCT(radeon_drm_cs);
168 if (!cs) {
169 return NULL;
170 }
171 util_queue_fence_init(&cs->flush_completed);
172
173 cs->ws = ws;
174 cs->flush_cs = flush;
175 cs->flush_data = flush_ctx;
176
177 if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {
178 FREE(cs);
179 return NULL;
180 }
181 if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
182 radeon_destroy_cs_context(&cs->csc1);
183 FREE(cs);
184 return NULL;
185 }
186
187 /* Set the first command buffer as current. */
188 cs->csc = &cs->csc1;
189 cs->cst = &cs->csc2;
190 cs->base.current.buf = cs->csc->buf;
191 cs->base.current.max_dw = ARRAY_SIZE(cs->csc->buf);
192 cs->ring_type = ring_type;
193
194 p_atomic_inc(&ws->num_cs);
195 return &cs->base;
196 }
197
198 int radeon_lookup_buffer(struct radeon_cs_context *csc, struct radeon_bo *bo)
199 {
200 unsigned hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);
201 struct radeon_bo_item *buffers;
202 unsigned num_buffers;
203 int i = csc->reloc_indices_hashlist[hash];
204
205 if (bo->handle) {
206 buffers = csc->relocs_bo;
207 num_buffers = csc->num_relocs;
208 } else {
209 buffers = csc->slab_buffers;
210 num_buffers = csc->num_slab_buffers;
211 }
212
213 /* not found or found */
214 if (i == -1 || (i < num_buffers && buffers[i].bo == bo))
215 return i;
216
217 /* Hash collision, look for the BO in the list of relocs linearly. */
218 for (i = num_buffers - 1; i >= 0; i--) {
219 if (buffers[i].bo == bo) {
220 /* Put this reloc in the hash list.
221 * This will prevent additional hash collisions if there are
222 * several consecutive lookup_buffer calls for the same buffer.
223 *
224 * Example: Assuming buffers A,B,C collide in the hash list,
225 * the following sequence of relocs:
226 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
227 * will collide here: ^ and here: ^,
228 * meaning that we should get very few collisions in the end. */
229 csc->reloc_indices_hashlist[hash] = i;
230 return i;
231 }
232 }
233 return -1;
234 }
235
236 static unsigned radeon_lookup_or_add_real_buffer(struct radeon_drm_cs *cs,
237 struct radeon_bo *bo)
238 {
239 struct radeon_cs_context *csc = cs->csc;
240 struct drm_radeon_cs_reloc *reloc;
241 unsigned hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);
242 int i = -1;
243
244 i = radeon_lookup_buffer(csc, bo);
245
246 if (i >= 0) {
247 /* For async DMA, every add_buffer call must add a buffer to the list
248 * no matter how many duplicates there are. This is due to the fact
249 * the DMA CS checker doesn't use NOP packets for offset patching,
250 * but always uses the i-th buffer from the list to patch the i-th
251 * offset. If there are N offsets in a DMA CS, there must also be N
252 * buffers in the relocation list.
253 *
254 * This doesn't have to be done if virtual memory is enabled,
255 * because there is no offset patching with virtual memory.
256 */
257 if (cs->ring_type != RING_DMA || cs->ws->info.has_virtual_memory) {
258 return i;
259 }
260 }
261
262 /* New relocation, check if the backing array is large enough. */
263 if (csc->num_relocs >= csc->max_relocs) {
264 uint32_t size;
265 csc->max_relocs = MAX2(csc->max_relocs + 16, (unsigned)(csc->max_relocs * 1.3));
266
267 size = csc->max_relocs * sizeof(csc->relocs_bo[0]);
268 csc->relocs_bo = realloc(csc->relocs_bo, size);
269
270 size = csc->max_relocs * sizeof(struct drm_radeon_cs_reloc);
271 csc->relocs = realloc(csc->relocs, size);
272
273 csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
274 }
275
276 /* Initialize the new relocation. */
277 csc->relocs_bo[csc->num_relocs].bo = NULL;
278 csc->relocs_bo[csc->num_relocs].u.real.priority_usage = 0;
279 radeon_bo_reference(&csc->relocs_bo[csc->num_relocs].bo, bo);
280 p_atomic_inc(&bo->num_cs_references);
281 reloc = &csc->relocs[csc->num_relocs];
282 reloc->handle = bo->handle;
283 reloc->read_domains = 0;
284 reloc->write_domain = 0;
285 reloc->flags = 0;
286
287 csc->reloc_indices_hashlist[hash] = csc->num_relocs;
288
289 csc->chunks[1].length_dw += RELOC_DWORDS;
290
291 return csc->num_relocs++;
292 }
293
294 static int radeon_lookup_or_add_slab_buffer(struct radeon_drm_cs *cs,
295 struct radeon_bo *bo)
296 {
297 struct radeon_cs_context *csc = cs->csc;
298 unsigned hash;
299 struct radeon_bo_item *item;
300 int idx;
301 int real_idx;
302
303 idx = radeon_lookup_buffer(csc, bo);
304 if (idx >= 0)
305 return idx;
306
307 real_idx = radeon_lookup_or_add_real_buffer(cs, bo->u.slab.real);
308
309 /* Check if the backing array is large enough. */
310 if (csc->num_slab_buffers >= csc->max_slab_buffers) {
311 unsigned new_max = MAX2(csc->max_slab_buffers + 16,
312 (unsigned)(csc->max_slab_buffers * 1.3));
313 struct radeon_bo_item *new_buffers =
314 REALLOC(csc->slab_buffers,
315 csc->max_slab_buffers * sizeof(*new_buffers),
316 new_max * sizeof(*new_buffers));
317 if (!new_buffers) {
318 fprintf(stderr, "radeon_lookup_or_add_slab_buffer: allocation failure\n");
319 return -1;
320 }
321
322 csc->max_slab_buffers = new_max;
323 csc->slab_buffers = new_buffers;
324 }
325
326 /* Initialize the new relocation. */
327 idx = csc->num_slab_buffers++;
328 item = &csc->slab_buffers[idx];
329
330 item->bo = NULL;
331 item->u.slab.real_idx = real_idx;
332 radeon_bo_reference(&item->bo, bo);
333 p_atomic_inc(&bo->num_cs_references);
334
335 hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);
336 csc->reloc_indices_hashlist[hash] = idx;
337
338 return idx;
339 }
340
341 static unsigned radeon_drm_cs_add_buffer(struct radeon_winsys_cs *rcs,
342 struct pb_buffer *buf,
343 enum radeon_bo_usage usage,
344 enum radeon_bo_domain domains,
345 enum radeon_bo_priority priority)
346 {
347 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
348 struct radeon_bo *bo = (struct radeon_bo*)buf;
349 enum radeon_bo_domain added_domains;
350 enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0;
351 enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0;
352 struct drm_radeon_cs_reloc *reloc;
353 int index;
354
355 if (!bo->handle) {
356 index = radeon_lookup_or_add_slab_buffer(cs, bo);
357 if (index < 0)
358 return 0;
359
360 index = cs->csc->slab_buffers[index].u.slab.real_idx;
361 } else {
362 index = radeon_lookup_or_add_real_buffer(cs, bo);
363 }
364
365 reloc = &cs->csc->relocs[index];
366 added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain);
367 reloc->read_domains |= rd;
368 reloc->write_domain |= wd;
369 reloc->flags = MAX2(reloc->flags, priority);
370 cs->csc->relocs_bo[index].u.real.priority_usage |= 1llu << priority;
371
372 if (added_domains & RADEON_DOMAIN_VRAM)
373 cs->base.used_vram += bo->base.size;
374 else if (added_domains & RADEON_DOMAIN_GTT)
375 cs->base.used_gart += bo->base.size;
376
377 return index;
378 }
379
380 static int radeon_drm_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
381 struct pb_buffer *buf)
382 {
383 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
384
385 return radeon_lookup_buffer(cs->csc, (struct radeon_bo*)buf);
386 }
387
388 static bool radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
389 {
390 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
391 bool status =
392 cs->base.used_gart < cs->ws->info.gart_size * 0.8 &&
393 cs->base.used_vram < cs->ws->info.vram_size * 0.8;
394
395 if (status) {
396 cs->csc->num_validated_relocs = cs->csc->num_relocs;
397 } else {
398 /* Remove lately-added buffers. The validation failed with them
399 * and the CS is about to be flushed because of that. Keep only
400 * the already-validated buffers. */
401 unsigned i;
402
403 for (i = cs->csc->num_validated_relocs; i < cs->csc->num_relocs; i++) {
404 p_atomic_dec(&cs->csc->relocs_bo[i].bo->num_cs_references);
405 radeon_bo_reference(&cs->csc->relocs_bo[i].bo, NULL);
406 }
407 cs->csc->num_relocs = cs->csc->num_validated_relocs;
408
409 /* Flush if there are any relocs. Clean up otherwise. */
410 if (cs->csc->num_relocs) {
411 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
412 } else {
413 radeon_cs_context_cleanup(cs->csc);
414 cs->base.used_vram = 0;
415 cs->base.used_gart = 0;
416
417 assert(cs->base.current.cdw == 0);
418 if (cs->base.current.cdw != 0) {
419 fprintf(stderr, "radeon: Unexpected error in %s.\n", __func__);
420 }
421 }
422 }
423 return status;
424 }
425
426 static bool radeon_drm_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
427 {
428 assert(rcs->current.cdw <= rcs->current.max_dw);
429 return rcs->current.max_dw - rcs->current.cdw >= dw;
430 }
431
432 static unsigned radeon_drm_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
433 struct radeon_bo_list_item *list)
434 {
435 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
436 int i;
437
438 if (list) {
439 for (i = 0; i < cs->csc->num_relocs; i++) {
440 list[i].bo_size = cs->csc->relocs_bo[i].bo->base.size;
441 list[i].vm_address = cs->csc->relocs_bo[i].bo->va;
442 list[i].priority_usage = cs->csc->relocs_bo[i].u.real.priority_usage;
443 }
444 }
445 return cs->csc->num_relocs;
446 }
447
448 void radeon_drm_cs_emit_ioctl_oneshot(void *job, int thread_index)
449 {
450 struct radeon_cs_context *csc = ((struct radeon_drm_cs*)job)->cst;
451 unsigned i;
452 int r;
453
454 r = drmCommandWriteRead(csc->fd, DRM_RADEON_CS,
455 &csc->cs, sizeof(struct drm_radeon_cs));
456 if (r) {
457 if (r == -ENOMEM)
458 fprintf(stderr, "radeon: Not enough memory for command submission.\n");
459 else if (debug_get_bool_option("RADEON_DUMP_CS", false)) {
460 unsigned i;
461
462 fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
463 for (i = 0; i < csc->chunks[0].length_dw; i++) {
464 fprintf(stderr, "0x%08X\n", csc->buf[i]);
465 }
466 } else {
467 fprintf(stderr, "radeon: The kernel rejected CS, "
468 "see dmesg for more information (%i).\n", r);
469 }
470 }
471
472 for (i = 0; i < csc->num_relocs; i++)
473 p_atomic_dec(&csc->relocs_bo[i].bo->num_active_ioctls);
474 for (i = 0; i < csc->num_slab_buffers; i++)
475 p_atomic_dec(&csc->slab_buffers[i].bo->num_active_ioctls);
476
477 radeon_cs_context_cleanup(csc);
478 }
479
480 /*
481 * Make sure previous submission of this cs are completed
482 */
483 void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
484 {
485 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
486
487 /* Wait for any pending ioctl of this CS to complete. */
488 if (util_queue_is_initialized(&cs->ws->cs_queue))
489 util_queue_fence_wait(&cs->flush_completed);
490 }
491
492 /* Add the given fence to a slab buffer fence list.
493 *
494 * There is a potential race condition when bo participates in submissions on
495 * two or more threads simultaneously. Since we do not know which of the
496 * submissions will be sent to the GPU first, we have to keep the fences
497 * of all submissions.
498 *
499 * However, fences that belong to submissions that have already returned from
500 * their respective ioctl do not have to be kept, because we know that they
501 * will signal earlier.
502 */
503 static void radeon_bo_slab_fence(struct radeon_bo *bo, struct radeon_bo *fence)
504 {
505 unsigned dst;
506
507 assert(fence->num_cs_references);
508
509 /* Cleanup older fences */
510 dst = 0;
511 for (unsigned src = 0; src < bo->u.slab.num_fences; ++src) {
512 if (bo->u.slab.fences[src]->num_cs_references) {
513 bo->u.slab.fences[dst] = bo->u.slab.fences[src];
514 dst++;
515 } else {
516 radeon_bo_reference(&bo->u.slab.fences[src], NULL);
517 }
518 }
519 bo->u.slab.num_fences = dst;
520
521 /* Check available space for the new fence */
522 if (bo->u.slab.num_fences >= bo->u.slab.max_fences) {
523 unsigned new_max_fences = bo->u.slab.max_fences + 1;
524 struct radeon_bo **new_fences = REALLOC(bo->u.slab.fences,
525 bo->u.slab.max_fences * sizeof(*new_fences),
526 new_max_fences * sizeof(*new_fences));
527 if (!new_fences) {
528 fprintf(stderr, "radeon_bo_slab_fence: allocation failure, dropping fence\n");
529 return;
530 }
531
532 bo->u.slab.fences = new_fences;
533 bo->u.slab.max_fences = new_max_fences;
534 }
535
536 /* Add the new fence */
537 bo->u.slab.fences[bo->u.slab.num_fences] = NULL;
538 radeon_bo_reference(&bo->u.slab.fences[bo->u.slab.num_fences], fence);
539 bo->u.slab.num_fences++;
540 }
541
542 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
543
544 static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
545 unsigned flags,
546 struct pipe_fence_handle **pfence)
547 {
548 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
549 struct radeon_cs_context *tmp;
550
551 switch (cs->ring_type) {
552 case RING_DMA:
553 /* pad DMA ring to 8 DWs */
554 if (cs->ws->info.chip_class <= SI) {
555 while (rcs->current.cdw & 7)
556 radeon_emit(&cs->base, 0xf0000000); /* NOP packet */
557 } else {
558 while (rcs->current.cdw & 7)
559 radeon_emit(&cs->base, 0x00000000); /* NOP packet */
560 }
561 break;
562 case RING_GFX:
563 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements
564 * r6xx, requires at least 4 dw alignment to avoid a hw bug.
565 */
566 if (cs->ws->info.gfx_ib_pad_with_type2) {
567 while (rcs->current.cdw & 7)
568 radeon_emit(&cs->base, 0x80000000); /* type2 nop packet */
569 } else {
570 while (rcs->current.cdw & 7)
571 radeon_emit(&cs->base, 0xffff1000); /* type3 nop packet */
572 }
573 break;
574 case RING_UVD:
575 while (rcs->current.cdw & 15)
576 radeon_emit(&cs->base, 0x80000000); /* type2 nop packet */
577 break;
578 default:
579 break;
580 }
581
582 if (rcs->current.cdw > rcs->current.max_dw) {
583 fprintf(stderr, "radeon: command stream overflowed\n");
584 }
585
586 if (pfence || cs->csc->num_slab_buffers) {
587 struct pipe_fence_handle *fence;
588
589 if (cs->next_fence) {
590 fence = cs->next_fence;
591 cs->next_fence = NULL;
592 } else {
593 fence = radeon_cs_create_fence(rcs);
594 }
595
596 if (pfence)
597 radeon_fence_reference(pfence, fence);
598
599 mtx_lock(&cs->ws->bo_fence_lock);
600 for (unsigned i = 0; i < cs->csc->num_slab_buffers; ++i) {
601 struct radeon_bo *bo = cs->csc->slab_buffers[i].bo;
602 p_atomic_inc(&bo->num_active_ioctls);
603 radeon_bo_slab_fence(bo, (struct radeon_bo *)fence);
604 }
605 pipe_mutex_unlock(cs->ws->bo_fence_lock);
606
607 radeon_fence_reference(&fence, NULL);
608 } else {
609 radeon_fence_reference(&cs->next_fence, NULL);
610 }
611
612 radeon_drm_cs_sync_flush(rcs);
613
614 /* Swap command streams. */
615 tmp = cs->csc;
616 cs->csc = cs->cst;
617 cs->cst = tmp;
618
619 /* If the CS is not empty or overflowed, emit it in a separate thread. */
620 if (cs->base.current.cdw && cs->base.current.cdw <= cs->base.current.max_dw && !debug_get_option_noop()) {
621 unsigned i, num_relocs;
622
623 num_relocs = cs->cst->num_relocs;
624
625 cs->cst->chunks[0].length_dw = cs->base.current.cdw;
626
627 for (i = 0; i < num_relocs; i++) {
628 /* Update the number of active asynchronous CS ioctls for the buffer. */
629 p_atomic_inc(&cs->cst->relocs_bo[i].bo->num_active_ioctls);
630 }
631
632 switch (cs->ring_type) {
633 case RING_DMA:
634 cs->cst->flags[0] = 0;
635 cs->cst->flags[1] = RADEON_CS_RING_DMA;
636 cs->cst->cs.num_chunks = 3;
637 if (cs->ws->info.has_virtual_memory) {
638 cs->cst->flags[0] |= RADEON_CS_USE_VM;
639 }
640 break;
641
642 case RING_UVD:
643 cs->cst->flags[0] = 0;
644 cs->cst->flags[1] = RADEON_CS_RING_UVD;
645 cs->cst->cs.num_chunks = 3;
646 break;
647
648 case RING_VCE:
649 cs->cst->flags[0] = 0;
650 cs->cst->flags[1] = RADEON_CS_RING_VCE;
651 cs->cst->cs.num_chunks = 3;
652 break;
653
654 default:
655 case RING_GFX:
656 case RING_COMPUTE:
657 cs->cst->flags[0] = RADEON_CS_KEEP_TILING_FLAGS;
658 cs->cst->flags[1] = RADEON_CS_RING_GFX;
659 cs->cst->cs.num_chunks = 3;
660
661 if (cs->ws->info.has_virtual_memory) {
662 cs->cst->flags[0] |= RADEON_CS_USE_VM;
663 cs->cst->cs.num_chunks = 3;
664 }
665 if (flags & RADEON_FLUSH_END_OF_FRAME) {
666 cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
667 cs->cst->cs.num_chunks = 3;
668 }
669 if (cs->ring_type == RING_COMPUTE) {
670 cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
671 cs->cst->cs.num_chunks = 3;
672 }
673 break;
674 }
675
676 if (util_queue_is_initialized(&cs->ws->cs_queue)) {
677 util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
678 radeon_drm_cs_emit_ioctl_oneshot, NULL);
679 if (!(flags & RADEON_FLUSH_ASYNC))
680 radeon_drm_cs_sync_flush(rcs);
681 } else {
682 radeon_drm_cs_emit_ioctl_oneshot(cs, 0);
683 }
684 } else {
685 radeon_cs_context_cleanup(cs->cst);
686 }
687
688 /* Prepare a new CS. */
689 cs->base.current.buf = cs->csc->buf;
690 cs->base.current.cdw = 0;
691 cs->base.used_vram = 0;
692 cs->base.used_gart = 0;
693
694 if (cs->ring_type == RING_GFX)
695 cs->ws->num_gfx_IBs++;
696 else if (cs->ring_type == RING_DMA)
697 cs->ws->num_sdma_IBs++;
698 return 0;
699 }
700
701 static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
702 {
703 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
704
705 radeon_drm_cs_sync_flush(rcs);
706 util_queue_fence_destroy(&cs->flush_completed);
707 radeon_cs_context_cleanup(&cs->csc1);
708 radeon_cs_context_cleanup(&cs->csc2);
709 p_atomic_dec(&cs->ws->num_cs);
710 radeon_destroy_cs_context(&cs->csc1);
711 radeon_destroy_cs_context(&cs->csc2);
712 radeon_fence_reference(&cs->next_fence, NULL);
713 FREE(cs);
714 }
715
716 static bool radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
717 struct pb_buffer *_buf,
718 enum radeon_bo_usage usage)
719 {
720 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
721 struct radeon_bo *bo = (struct radeon_bo*)_buf;
722 int index;
723
724 if (!bo->num_cs_references)
725 return false;
726
727 index = radeon_lookup_buffer(cs->csc, bo);
728 if (index == -1)
729 return false;
730
731 if (!bo->handle)
732 index = cs->csc->slab_buffers[index].u.slab.real_idx;
733
734 if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)
735 return true;
736 if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)
737 return true;
738
739 return false;
740 }
741
742 /* FENCES */
743
744 static struct pipe_fence_handle *
745 radeon_cs_create_fence(struct radeon_winsys_cs *rcs)
746 {
747 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
748 struct pb_buffer *fence;
749
750 /* Create a fence, which is a dummy BO. */
751 fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1,
752 RADEON_DOMAIN_GTT, RADEON_FLAG_HANDLE);
753 /* Add the fence as a dummy relocation. */
754 cs->ws->base.cs_add_buffer(rcs, fence,
755 RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,
756 RADEON_PRIO_FENCE);
757 return (struct pipe_fence_handle*)fence;
758 }
759
760 static bool radeon_fence_wait(struct radeon_winsys *ws,
761 struct pipe_fence_handle *fence,
762 uint64_t timeout)
763 {
764 return ws->buffer_wait((struct pb_buffer*)fence, timeout,
765 RADEON_USAGE_READWRITE);
766 }
767
768 static void radeon_fence_reference(struct pipe_fence_handle **dst,
769 struct pipe_fence_handle *src)
770 {
771 pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
772 }
773
774 static struct pipe_fence_handle *
775 radeon_drm_cs_get_next_fence(struct radeon_winsys_cs *rcs)
776 {
777 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
778 struct pipe_fence_handle *fence = NULL;
779
780 if (cs->next_fence) {
781 radeon_fence_reference(&fence, cs->next_fence);
782 return fence;
783 }
784
785 fence = radeon_cs_create_fence(rcs);
786 if (!fence)
787 return NULL;
788
789 radeon_fence_reference(&cs->next_fence, fence);
790 return fence;
791 }
792
793 void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
794 {
795 ws->base.ctx_create = radeon_drm_ctx_create;
796 ws->base.ctx_destroy = radeon_drm_ctx_destroy;
797 ws->base.cs_create = radeon_drm_cs_create;
798 ws->base.cs_destroy = radeon_drm_cs_destroy;
799 ws->base.cs_add_buffer = radeon_drm_cs_add_buffer;
800 ws->base.cs_lookup_buffer = radeon_drm_cs_lookup_buffer;
801 ws->base.cs_validate = radeon_drm_cs_validate;
802 ws->base.cs_check_space = radeon_drm_cs_check_space;
803 ws->base.cs_get_buffer_list = radeon_drm_cs_get_buffer_list;
804 ws->base.cs_flush = radeon_drm_cs_flush;
805 ws->base.cs_get_next_fence = radeon_drm_cs_get_next_fence;
806 ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
807 ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
808 ws->base.fence_wait = radeon_fence_wait;
809 ws->base.fence_reference = radeon_fence_reference;
810 }