2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Marek Olšák <maraeo@gmail.com>
31 * Based on work from libdrm_radeon by:
32 * Aapo Tahkola <aet@rasterburn.org>
33 * Nicolai Haehnle <prefect_@gmx.net>
34 * Jérôme Glisse <glisse@freedesktop.org>
38 This file replaces libdrm's radeon_cs_gem with our own implemention.
39 It's optimized specifically for Radeon DRM.
40 Adding buffers and space checking are faster and simpler than their
41 counterparts in libdrm (the time complexity of all the functions
42 is O(1) in nearly all scenarios, thanks to hashing).
46 cs_add_buffer(cs, buf, read_domain, write_domain) adds a new relocation and
47 also adds the size of 'buf' to the used_gart and used_vram winsys variables
48 based on the domains, which are simply or'd for the accounting purposes.
49 The adding is skipped if the reloc is already present in the list, but it
50 accounts any newly-referenced domains.
52 cs_validate is then called, which just checks:
53 used_vram/gart < vram/gart_size * 0.8
54 The 0.8 number allows for some memory fragmentation. If the validation
55 fails, the pipe driver flushes CS and tries do the validation again,
56 i.e. it validates only that one operation. If it fails again, it drops
57 the operation on the floor and prints some nasty message to stderr.
58 (done in the pipe driver)
60 cs_write_reloc(cs, buf) just writes a reloc that has been added using
61 cs_add_buffer. The read_domain and write_domain parameters have been removed,
62 because we already specify them in cs_add_buffer.
65 #include "radeon_drm_cs.h"
67 #include "util/u_memory.h"
68 #include "os/os_time.h"
76 #define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
78 static struct pipe_fence_handle
*
79 radeon_cs_create_fence(struct radeon_winsys_cs
*rcs
);
80 static void radeon_fence_reference(struct pipe_fence_handle
**dst
,
81 struct pipe_fence_handle
*src
);
83 static struct radeon_winsys_ctx
*radeon_drm_ctx_create(struct radeon_winsys
*ws
)
85 /* No context support here. Just return the winsys pointer
86 * as the "context". */
87 return (struct radeon_winsys_ctx
*)ws
;
90 static void radeon_drm_ctx_destroy(struct radeon_winsys_ctx
*ctx
)
92 /* No context support here. */
95 static boolean
radeon_init_cs_context(struct radeon_cs_context
*csc
,
96 struct radeon_drm_winsys
*ws
)
102 csc
->relocs_bo
= (struct radeon_bo_item
*)
103 CALLOC(1, csc
->nrelocs
* sizeof(csc
->relocs_bo
[0]));
104 if (!csc
->relocs_bo
) {
108 csc
->relocs
= (struct drm_radeon_cs_reloc
*)
109 CALLOC(1, csc
->nrelocs
* sizeof(struct drm_radeon_cs_reloc
));
111 FREE(csc
->relocs_bo
);
115 csc
->chunks
[0].chunk_id
= RADEON_CHUNK_ID_IB
;
116 csc
->chunks
[0].length_dw
= 0;
117 csc
->chunks
[0].chunk_data
= (uint64_t)(uintptr_t)csc
->buf
;
118 csc
->chunks
[1].chunk_id
= RADEON_CHUNK_ID_RELOCS
;
119 csc
->chunks
[1].length_dw
= 0;
120 csc
->chunks
[1].chunk_data
= (uint64_t)(uintptr_t)csc
->relocs
;
121 csc
->chunks
[2].chunk_id
= RADEON_CHUNK_ID_FLAGS
;
122 csc
->chunks
[2].length_dw
= 2;
123 csc
->chunks
[2].chunk_data
= (uint64_t)(uintptr_t)&csc
->flags
;
125 csc
->chunk_array
[0] = (uint64_t)(uintptr_t)&csc
->chunks
[0];
126 csc
->chunk_array
[1] = (uint64_t)(uintptr_t)&csc
->chunks
[1];
127 csc
->chunk_array
[2] = (uint64_t)(uintptr_t)&csc
->chunks
[2];
129 csc
->cs
.chunks
= (uint64_t)(uintptr_t)csc
->chunk_array
;
131 for (i
= 0; i
< ARRAY_SIZE(csc
->reloc_indices_hashlist
); i
++) {
132 csc
->reloc_indices_hashlist
[i
] = -1;
137 static void radeon_cs_context_cleanup(struct radeon_cs_context
*csc
)
141 for (i
= 0; i
< csc
->crelocs
; i
++) {
142 p_atomic_dec(&csc
->relocs_bo
[i
].bo
->num_cs_references
);
143 radeon_bo_reference(&csc
->relocs_bo
[i
].bo
, NULL
);
147 csc
->validated_crelocs
= 0;
148 csc
->chunks
[0].length_dw
= 0;
149 csc
->chunks
[1].length_dw
= 0;
153 for (i
= 0; i
< ARRAY_SIZE(csc
->reloc_indices_hashlist
); i
++) {
154 csc
->reloc_indices_hashlist
[i
] = -1;
158 static void radeon_destroy_cs_context(struct radeon_cs_context
*csc
)
160 radeon_cs_context_cleanup(csc
);
161 FREE(csc
->relocs_bo
);
166 static struct radeon_winsys_cs
*
167 radeon_drm_cs_create(struct radeon_winsys_ctx
*ctx
,
168 enum ring_type ring_type
,
169 void (*flush
)(void *ctx
, unsigned flags
,
170 struct pipe_fence_handle
**fence
),
173 struct radeon_drm_winsys
*ws
= (struct radeon_drm_winsys
*)ctx
;
174 struct radeon_drm_cs
*cs
;
176 cs
= CALLOC_STRUCT(radeon_drm_cs
);
180 util_queue_fence_init(&cs
->flush_completed
);
183 cs
->flush_cs
= flush
;
184 cs
->flush_data
= flush_ctx
;
186 if (!radeon_init_cs_context(&cs
->csc1
, cs
->ws
)) {
190 if (!radeon_init_cs_context(&cs
->csc2
, cs
->ws
)) {
191 radeon_destroy_cs_context(&cs
->csc1
);
196 /* Set the first command buffer as current. */
199 cs
->base
.current
.buf
= cs
->csc
->buf
;
200 cs
->base
.current
.max_dw
= ARRAY_SIZE(cs
->csc
->buf
);
201 cs
->ring_type
= ring_type
;
203 p_atomic_inc(&ws
->num_cs
);
207 #define OUT_CS(cs, value) (cs)->current.buf[(cs)->current.cdw++] = (value)
209 static inline void update_reloc(struct drm_radeon_cs_reloc
*reloc
,
210 enum radeon_bo_domain rd
,
211 enum radeon_bo_domain wd
,
213 enum radeon_bo_domain
*added_domains
)
215 *added_domains
= (rd
| wd
) & ~(reloc
->read_domains
| reloc
->write_domain
);
217 reloc
->read_domains
|= rd
;
218 reloc
->write_domain
|= wd
;
219 reloc
->flags
= MAX2(reloc
->flags
, priority
);
222 int radeon_lookup_buffer(struct radeon_cs_context
*csc
, struct radeon_bo
*bo
)
224 unsigned hash
= bo
->handle
& (ARRAY_SIZE(csc
->reloc_indices_hashlist
)-1);
225 int i
= csc
->reloc_indices_hashlist
[hash
];
227 /* not found or found */
228 if (i
== -1 || csc
->relocs_bo
[i
].bo
== bo
)
231 /* Hash collision, look for the BO in the list of relocs linearly. */
232 for (i
= csc
->crelocs
- 1; i
>= 0; i
--) {
233 if (csc
->relocs_bo
[i
].bo
== bo
) {
234 /* Put this reloc in the hash list.
235 * This will prevent additional hash collisions if there are
236 * several consecutive lookup_buffer calls for the same buffer.
238 * Example: Assuming buffers A,B,C collide in the hash list,
239 * the following sequence of relocs:
240 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
241 * will collide here: ^ and here: ^,
242 * meaning that we should get very few collisions in the end. */
243 csc
->reloc_indices_hashlist
[hash
] = i
;
250 static unsigned radeon_add_buffer(struct radeon_drm_cs
*cs
,
251 struct radeon_bo
*bo
,
252 enum radeon_bo_usage usage
,
253 enum radeon_bo_domain domains
,
255 enum radeon_bo_domain
*added_domains
)
257 struct radeon_cs_context
*csc
= cs
->csc
;
258 struct drm_radeon_cs_reloc
*reloc
;
259 unsigned hash
= bo
->handle
& (ARRAY_SIZE(csc
->reloc_indices_hashlist
)-1);
260 enum radeon_bo_domain rd
= usage
& RADEON_USAGE_READ
? domains
: 0;
261 enum radeon_bo_domain wd
= usage
& RADEON_USAGE_WRITE
? domains
: 0;
264 assert(priority
< 64);
267 i
= radeon_lookup_buffer(csc
, bo
);
270 reloc
= &csc
->relocs
[i
];
271 update_reloc(reloc
, rd
, wd
, priority
/ 4, added_domains
);
272 csc
->relocs_bo
[i
].priority_usage
|= 1llu << priority
;
274 /* For async DMA, every add_buffer call must add a buffer to the list
275 * no matter how many duplicates there are. This is due to the fact
276 * the DMA CS checker doesn't use NOP packets for offset patching,
277 * but always uses the i-th buffer from the list to patch the i-th
278 * offset. If there are N offsets in a DMA CS, there must also be N
279 * buffers in the relocation list.
281 * This doesn't have to be done if virtual memory is enabled,
282 * because there is no offset patching with virtual memory.
284 if (cs
->ring_type
!= RING_DMA
|| cs
->ws
->info
.has_virtual_memory
) {
289 /* New relocation, check if the backing array is large enough. */
290 if (csc
->crelocs
>= csc
->nrelocs
) {
294 size
= csc
->nrelocs
* sizeof(csc
->relocs_bo
[0]);
295 csc
->relocs_bo
= realloc(csc
->relocs_bo
, size
);
297 size
= csc
->nrelocs
* sizeof(struct drm_radeon_cs_reloc
);
298 csc
->relocs
= realloc(csc
->relocs
, size
);
300 csc
->chunks
[1].chunk_data
= (uint64_t)(uintptr_t)csc
->relocs
;
303 /* Initialize the new relocation. */
304 csc
->relocs_bo
[csc
->crelocs
].bo
= NULL
;
305 csc
->relocs_bo
[csc
->crelocs
].priority_usage
= 1llu << priority
;
306 radeon_bo_reference(&csc
->relocs_bo
[csc
->crelocs
].bo
, bo
);
307 p_atomic_inc(&bo
->num_cs_references
);
308 reloc
= &csc
->relocs
[csc
->crelocs
];
309 reloc
->handle
= bo
->handle
;
310 reloc
->read_domains
= rd
;
311 reloc
->write_domain
= wd
;
312 reloc
->flags
= priority
/ 4;
314 csc
->reloc_indices_hashlist
[hash
] = csc
->crelocs
;
316 csc
->chunks
[1].length_dw
+= RELOC_DWORDS
;
318 *added_domains
= rd
| wd
;
319 return csc
->crelocs
++;
322 static unsigned radeon_drm_cs_add_buffer(struct radeon_winsys_cs
*rcs
,
323 struct pb_buffer
*buf
,
324 enum radeon_bo_usage usage
,
325 enum radeon_bo_domain domains
,
326 enum radeon_bo_priority priority
)
328 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
329 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
330 enum radeon_bo_domain added_domains
;
331 unsigned index
= radeon_add_buffer(cs
, bo
, usage
, domains
, priority
,
334 if (added_domains
& RADEON_DOMAIN_VRAM
)
335 cs
->csc
->used_vram
+= bo
->base
.size
;
336 else if (added_domains
& RADEON_DOMAIN_GTT
)
337 cs
->csc
->used_gart
+= bo
->base
.size
;
342 static int radeon_drm_cs_lookup_buffer(struct radeon_winsys_cs
*rcs
,
343 struct pb_buffer
*buf
)
345 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
347 return radeon_lookup_buffer(cs
->csc
, (struct radeon_bo
*)buf
);
350 static boolean
radeon_drm_cs_validate(struct radeon_winsys_cs
*rcs
)
352 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
354 cs
->csc
->used_gart
< cs
->ws
->info
.gart_size
* 0.8 &&
355 cs
->csc
->used_vram
< cs
->ws
->info
.vram_size
* 0.8;
358 cs
->csc
->validated_crelocs
= cs
->csc
->crelocs
;
360 /* Remove lately-added buffers. The validation failed with them
361 * and the CS is about to be flushed because of that. Keep only
362 * the already-validated buffers. */
365 for (i
= cs
->csc
->validated_crelocs
; i
< cs
->csc
->crelocs
; i
++) {
366 p_atomic_dec(&cs
->csc
->relocs_bo
[i
].bo
->num_cs_references
);
367 radeon_bo_reference(&cs
->csc
->relocs_bo
[i
].bo
, NULL
);
369 cs
->csc
->crelocs
= cs
->csc
->validated_crelocs
;
371 /* Flush if there are any relocs. Clean up otherwise. */
372 if (cs
->csc
->crelocs
) {
373 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
375 radeon_cs_context_cleanup(cs
->csc
);
377 assert(cs
->base
.current
.cdw
== 0);
378 if (cs
->base
.current
.cdw
!= 0) {
379 fprintf(stderr
, "radeon: Unexpected error in %s.\n", __func__
);
386 static bool radeon_drm_cs_check_space(struct radeon_winsys_cs
*rcs
, unsigned dw
)
388 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
389 return rcs
->current
.max_dw
- rcs
->current
.cdw
>= dw
;
392 static boolean
radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs
*rcs
, uint64_t vram
, uint64_t gtt
)
394 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
396 vram
+= cs
->csc
->used_vram
;
397 gtt
+= cs
->csc
->used_gart
;
399 /* Anything that goes above the VRAM size should go to GTT. */
400 if (vram
> cs
->ws
->info
.vram_size
)
401 gtt
+= vram
- cs
->ws
->info
.vram_size
;
403 /* Now we just need to check if we have enough GTT. */
404 return gtt
< cs
->ws
->info
.gart_size
* 0.7;
407 static uint64_t radeon_drm_cs_query_memory_usage(struct radeon_winsys_cs
*rcs
)
409 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
411 return cs
->csc
->used_vram
+ cs
->csc
->used_gart
;
414 static unsigned radeon_drm_cs_get_buffer_list(struct radeon_winsys_cs
*rcs
,
415 struct radeon_bo_list_item
*list
)
417 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
421 for (i
= 0; i
< cs
->csc
->crelocs
; i
++) {
422 pb_reference(&list
[i
].buf
, &cs
->csc
->relocs_bo
[i
].bo
->base
);
423 list
[i
].vm_address
= cs
->csc
->relocs_bo
[i
].bo
->va
;
424 list
[i
].priority_usage
= cs
->csc
->relocs_bo
[i
].priority_usage
;
427 return cs
->csc
->crelocs
;
430 void radeon_drm_cs_emit_ioctl_oneshot(void *job
)
432 struct radeon_cs_context
*csc
= ((struct radeon_drm_cs
*)job
)->cst
;
436 r
= drmCommandWriteRead(csc
->fd
, DRM_RADEON_CS
,
437 &csc
->cs
, sizeof(struct drm_radeon_cs
));
440 fprintf(stderr
, "radeon: Not enough memory for command submission.\n");
441 else if (debug_get_bool_option("RADEON_DUMP_CS", FALSE
)) {
444 fprintf(stderr
, "radeon: The kernel rejected CS, dumping...\n");
445 for (i
= 0; i
< csc
->chunks
[0].length_dw
; i
++) {
446 fprintf(stderr
, "0x%08X\n", csc
->buf
[i
]);
449 fprintf(stderr
, "radeon: The kernel rejected CS, "
450 "see dmesg for more information.\n");
454 for (i
= 0; i
< csc
->crelocs
; i
++)
455 p_atomic_dec(&csc
->relocs_bo
[i
].bo
->num_active_ioctls
);
457 radeon_cs_context_cleanup(csc
);
461 * Make sure previous submission of this cs are completed
463 void radeon_drm_cs_sync_flush(struct radeon_winsys_cs
*rcs
)
465 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
467 /* Wait for any pending ioctl of this CS to complete. */
468 if (util_queue_is_initialized(&cs
->ws
->cs_queue
))
469 util_queue_job_wait(&cs
->flush_completed
);
472 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", FALSE
)
474 static void radeon_drm_cs_flush(struct radeon_winsys_cs
*rcs
,
476 struct pipe_fence_handle
**fence
)
478 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
479 struct radeon_cs_context
*tmp
;
481 switch (cs
->ring_type
) {
483 /* pad DMA ring to 8 DWs */
484 if (cs
->ws
->info
.chip_class
<= SI
) {
485 while (rcs
->current
.cdw
& 7)
486 OUT_CS(&cs
->base
, 0xf0000000); /* NOP packet */
488 while (rcs
->current
.cdw
& 7)
489 OUT_CS(&cs
->base
, 0x00000000); /* NOP packet */
493 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements
494 * r6xx, requires at least 4 dw alignment to avoid a hw bug.
496 if (cs
->ws
->info
.gfx_ib_pad_with_type2
) {
497 while (rcs
->current
.cdw
& 7)
498 OUT_CS(&cs
->base
, 0x80000000); /* type2 nop packet */
500 while (rcs
->current
.cdw
& 7)
501 OUT_CS(&cs
->base
, 0xffff1000); /* type3 nop packet */
505 while (rcs
->current
.cdw
& 15)
506 OUT_CS(&cs
->base
, 0x80000000); /* type2 nop packet */
512 if (rcs
->current
.cdw
> rcs
->current
.max_dw
) {
513 fprintf(stderr
, "radeon: command stream overflowed\n");
517 radeon_fence_reference(fence
, NULL
);
518 *fence
= radeon_cs_create_fence(rcs
);
521 radeon_drm_cs_sync_flush(rcs
);
523 /* Swap command streams. */
528 /* If the CS is not empty or overflowed, emit it in a separate thread. */
529 if (cs
->base
.current
.cdw
&& cs
->base
.current
.cdw
<= cs
->base
.current
.max_dw
&& !debug_get_option_noop()) {
532 crelocs
= cs
->cst
->crelocs
;
534 cs
->cst
->chunks
[0].length_dw
= cs
->base
.current
.cdw
;
536 for (i
= 0; i
< crelocs
; i
++) {
537 /* Update the number of active asynchronous CS ioctls for the buffer. */
538 p_atomic_inc(&cs
->cst
->relocs_bo
[i
].bo
->num_active_ioctls
);
541 switch (cs
->ring_type
) {
543 cs
->cst
->flags
[0] = 0;
544 cs
->cst
->flags
[1] = RADEON_CS_RING_DMA
;
545 cs
->cst
->cs
.num_chunks
= 3;
546 if (cs
->ws
->info
.has_virtual_memory
) {
547 cs
->cst
->flags
[0] |= RADEON_CS_USE_VM
;
552 cs
->cst
->flags
[0] = 0;
553 cs
->cst
->flags
[1] = RADEON_CS_RING_UVD
;
554 cs
->cst
->cs
.num_chunks
= 3;
558 cs
->cst
->flags
[0] = 0;
559 cs
->cst
->flags
[1] = RADEON_CS_RING_VCE
;
560 cs
->cst
->cs
.num_chunks
= 3;
566 cs
->cst
->flags
[0] = 0;
567 cs
->cst
->flags
[1] = RADEON_CS_RING_GFX
;
568 cs
->cst
->cs
.num_chunks
= 2;
569 if (flags
& RADEON_FLUSH_KEEP_TILING_FLAGS
) {
570 cs
->cst
->flags
[0] |= RADEON_CS_KEEP_TILING_FLAGS
;
571 cs
->cst
->cs
.num_chunks
= 3;
573 if (cs
->ws
->info
.has_virtual_memory
) {
574 cs
->cst
->flags
[0] |= RADEON_CS_USE_VM
;
575 cs
->cst
->cs
.num_chunks
= 3;
577 if (flags
& RADEON_FLUSH_END_OF_FRAME
) {
578 cs
->cst
->flags
[0] |= RADEON_CS_END_OF_FRAME
;
579 cs
->cst
->cs
.num_chunks
= 3;
581 if (cs
->ring_type
== RING_COMPUTE
) {
582 cs
->cst
->flags
[1] = RADEON_CS_RING_COMPUTE
;
583 cs
->cst
->cs
.num_chunks
= 3;
588 if (util_queue_is_initialized(&cs
->ws
->cs_queue
)) {
589 util_queue_add_job(&cs
->ws
->cs_queue
, cs
, &cs
->flush_completed
);
590 if (!(flags
& RADEON_FLUSH_ASYNC
))
591 radeon_drm_cs_sync_flush(rcs
);
593 radeon_drm_cs_emit_ioctl_oneshot(cs
);
596 radeon_cs_context_cleanup(cs
->cst
);
599 /* Prepare a new CS. */
600 cs
->base
.current
.buf
= cs
->csc
->buf
;
601 cs
->base
.current
.cdw
= 0;
603 cs
->ws
->num_cs_flushes
++;
606 static void radeon_drm_cs_destroy(struct radeon_winsys_cs
*rcs
)
608 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
610 radeon_drm_cs_sync_flush(rcs
);
611 util_queue_fence_destroy(&cs
->flush_completed
);
612 radeon_cs_context_cleanup(&cs
->csc1
);
613 radeon_cs_context_cleanup(&cs
->csc2
);
614 p_atomic_dec(&cs
->ws
->num_cs
);
615 radeon_destroy_cs_context(&cs
->csc1
);
616 radeon_destroy_cs_context(&cs
->csc2
);
620 static boolean
radeon_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
621 struct pb_buffer
*_buf
,
622 enum radeon_bo_usage usage
)
624 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
625 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
628 if (!bo
->num_cs_references
)
631 index
= radeon_lookup_buffer(cs
->csc
, bo
);
635 if ((usage
& RADEON_USAGE_WRITE
) && cs
->csc
->relocs
[index
].write_domain
)
637 if ((usage
& RADEON_USAGE_READ
) && cs
->csc
->relocs
[index
].read_domains
)
645 static struct pipe_fence_handle
*
646 radeon_cs_create_fence(struct radeon_winsys_cs
*rcs
)
648 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
649 struct pb_buffer
*fence
;
651 /* Create a fence, which is a dummy BO. */
652 fence
= cs
->ws
->base
.buffer_create(&cs
->ws
->base
, 1, 1,
653 RADEON_DOMAIN_GTT
, 0);
654 /* Add the fence as a dummy relocation. */
655 cs
->ws
->base
.cs_add_buffer(rcs
, fence
,
656 RADEON_USAGE_READWRITE
, RADEON_DOMAIN_GTT
,
658 return (struct pipe_fence_handle
*)fence
;
661 static bool radeon_fence_wait(struct radeon_winsys
*ws
,
662 struct pipe_fence_handle
*fence
,
665 return ws
->buffer_wait((struct pb_buffer
*)fence
, timeout
,
666 RADEON_USAGE_READWRITE
);
669 static void radeon_fence_reference(struct pipe_fence_handle
**dst
,
670 struct pipe_fence_handle
*src
)
672 pb_reference((struct pb_buffer
**)dst
, (struct pb_buffer
*)src
);
675 void radeon_drm_cs_init_functions(struct radeon_drm_winsys
*ws
)
677 ws
->base
.ctx_create
= radeon_drm_ctx_create
;
678 ws
->base
.ctx_destroy
= radeon_drm_ctx_destroy
;
679 ws
->base
.cs_create
= radeon_drm_cs_create
;
680 ws
->base
.cs_destroy
= radeon_drm_cs_destroy
;
681 ws
->base
.cs_add_buffer
= radeon_drm_cs_add_buffer
;
682 ws
->base
.cs_lookup_buffer
= radeon_drm_cs_lookup_buffer
;
683 ws
->base
.cs_validate
= radeon_drm_cs_validate
;
684 ws
->base
.cs_check_space
= radeon_drm_cs_check_space
;
685 ws
->base
.cs_memory_below_limit
= radeon_drm_cs_memory_below_limit
;
686 ws
->base
.cs_query_memory_usage
= radeon_drm_cs_query_memory_usage
;
687 ws
->base
.cs_get_buffer_list
= radeon_drm_cs_get_buffer_list
;
688 ws
->base
.cs_flush
= radeon_drm_cs_flush
;
689 ws
->base
.cs_is_buffer_referenced
= radeon_bo_is_referenced
;
690 ws
->base
.cs_sync_flush
= radeon_drm_cs_sync_flush
;
691 ws
->base
.fence_wait
= radeon_fence_wait
;
692 ws
->base
.fence_reference
= radeon_fence_reference
;