2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Marek Olšák <maraeo@gmail.com>
31 * Based on work from libdrm_radeon by:
32 * Aapo Tahkola <aet@rasterburn.org>
33 * Nicolai Haehnle <prefect_@gmx.net>
34 * Jérôme Glisse <glisse@freedesktop.org>
38 This file replaces libdrm's radeon_cs_gem with our own implemention.
39 It's optimized specifically for Radeon DRM.
40 Reloc writes and space checking are faster and simpler than their
41 counterparts in libdrm (the time complexity of all the functions
42 is O(1) in nearly all scenarios, thanks to hashing).
46 cs_add_reloc(cs, buf, read_domain, write_domain) adds a new relocation and
47 also adds the size of 'buf' to the used_gart and used_vram winsys variables
48 based on the domains, which are simply or'd for the accounting purposes.
49 The adding is skipped if the reloc is already present in the list, but it
50 accounts any newly-referenced domains.
52 cs_validate is then called, which just checks:
53 used_vram/gart < vram/gart_size * 0.8
54 The 0.8 number allows for some memory fragmentation. If the validation
55 fails, the pipe driver flushes CS and tries do the validation again,
56 i.e. it validates only that one operation. If it fails again, it drops
57 the operation on the floor and prints some nasty message to stderr.
58 (done in the pipe driver)
60 cs_write_reloc(cs, buf) just writes a reloc that has been added using
61 cs_add_reloc. The read_domain and write_domain parameters have been removed,
62 because we already specify them in cs_add_reloc.
65 #include "radeon_drm_cs.h"
67 #include "util/u_memory.h"
74 #define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
76 static boolean
radeon_init_cs_context(struct radeon_cs_context
*csc
, int fd
)
80 csc
->relocs_bo
= (struct radeon_bo
**)
81 CALLOC(1, csc
->nrelocs
* sizeof(struct radeon_bo
*));
82 if (!csc
->relocs_bo
) {
86 csc
->relocs
= (struct drm_radeon_cs_reloc
*)
87 CALLOC(1, csc
->nrelocs
* sizeof(struct drm_radeon_cs_reloc
));
93 csc
->chunks
[0].chunk_id
= RADEON_CHUNK_ID_IB
;
94 csc
->chunks
[0].length_dw
= 0;
95 csc
->chunks
[0].chunk_data
= (uint64_t)(uintptr_t)csc
->buf
;
96 csc
->chunks
[1].chunk_id
= RADEON_CHUNK_ID_RELOCS
;
97 csc
->chunks
[1].length_dw
= 0;
98 csc
->chunks
[1].chunk_data
= (uint64_t)(uintptr_t)csc
->relocs
;
100 csc
->chunk_array
[0] = (uint64_t)(uintptr_t)&csc
->chunks
[0];
101 csc
->chunk_array
[1] = (uint64_t)(uintptr_t)&csc
->chunks
[1];
103 csc
->cs
.num_chunks
= 2;
104 csc
->cs
.chunks
= (uint64_t)(uintptr_t)csc
->chunk_array
;
108 static void radeon_cs_context_cleanup(struct radeon_cs_context
*csc
)
112 for (i
= 0; i
< csc
->crelocs
; i
++) {
113 p_atomic_dec(&csc
->relocs_bo
[i
]->num_cs_references
);
114 radeon_bo_reference(&csc
->relocs_bo
[i
], NULL
);
118 csc
->validated_crelocs
= 0;
119 csc
->chunks
[0].length_dw
= 0;
120 csc
->chunks
[1].length_dw
= 0;
123 memset(csc
->is_handle_added
, 0, sizeof(csc
->is_handle_added
));
126 static void radeon_destroy_cs_context(struct radeon_cs_context
*csc
)
128 radeon_cs_context_cleanup(csc
);
129 FREE(csc
->relocs_bo
);
133 static struct radeon_winsys_cs
*radeon_drm_cs_create(struct radeon_winsys
*rws
)
135 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
136 struct radeon_drm_cs
*cs
;
138 cs
= CALLOC_STRUCT(radeon_drm_cs
);
145 if (!radeon_init_cs_context(&cs
->csc1
, cs
->ws
->fd
)) {
149 if (!radeon_init_cs_context(&cs
->csc2
, cs
->ws
->fd
)) {
150 radeon_destroy_cs_context(&cs
->csc1
);
155 /* Set the first command buffer as current. */
158 cs
->base
.buf
= cs
->csc
->buf
;
160 p_atomic_inc(&ws
->num_cs
);
164 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
166 static INLINE
void update_domains(struct drm_radeon_cs_reloc
*reloc
,
167 enum radeon_bo_domain rd
,
168 enum radeon_bo_domain wd
,
169 enum radeon_bo_domain
*added_domains
)
171 *added_domains
= (rd
| wd
) & ~(reloc
->read_domains
| reloc
->write_domain
);
173 if (reloc
->read_domains
& wd
) {
174 reloc
->read_domains
= rd
;
175 reloc
->write_domain
= wd
;
176 } else if (rd
& reloc
->write_domain
) {
177 reloc
->read_domains
= rd
;
178 reloc
->write_domain
|= wd
;
180 reloc
->read_domains
|= rd
;
181 reloc
->write_domain
|= wd
;
185 int radeon_get_reloc(struct radeon_cs_context
*csc
, struct radeon_bo
*bo
)
187 struct drm_radeon_cs_reloc
*reloc
;
189 unsigned hash
= bo
->handle
& (sizeof(csc
->is_handle_added
)-1);
191 if (csc
->is_handle_added
[hash
]) {
192 reloc
= csc
->relocs_hashlist
[hash
];
193 if (reloc
->handle
== bo
->handle
) {
194 return csc
->reloc_indices_hashlist
[hash
];
197 /* Hash collision, look for the BO in the list of relocs linearly. */
198 for (i
= csc
->crelocs
; i
!= 0;) {
200 reloc
= &csc
->relocs
[i
];
201 if (reloc
->handle
== bo
->handle
) {
202 /* Put this reloc in the hash list.
203 * This will prevent additional hash collisions if there are
204 * several subsequent get_reloc calls of the same buffer.
206 * Example: Assuming buffers A,B,C collide in the hash list,
207 * the following sequence of relocs:
208 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
209 * will collide here: ^ and here: ^,
210 * meaning that we should get very few collisions in the end. */
211 csc
->relocs_hashlist
[hash
] = reloc
;
212 csc
->reloc_indices_hashlist
[hash
] = i
;
213 /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
222 static unsigned radeon_add_reloc(struct radeon_cs_context
*csc
,
223 struct radeon_bo
*bo
,
224 enum radeon_bo_domain rd
,
225 enum radeon_bo_domain wd
,
226 enum radeon_bo_domain
*added_domains
)
228 struct drm_radeon_cs_reloc
*reloc
;
230 unsigned hash
= bo
->handle
& (sizeof(csc
->is_handle_added
)-1);
232 if (csc
->is_handle_added
[hash
]) {
233 reloc
= csc
->relocs_hashlist
[hash
];
234 if (reloc
->handle
== bo
->handle
) {
235 update_domains(reloc
, rd
, wd
, added_domains
);
236 return csc
->reloc_indices_hashlist
[hash
];
239 /* Hash collision, look for the BO in the list of relocs linearly. */
240 for (i
= csc
->crelocs
; i
!= 0;) {
242 reloc
= &csc
->relocs
[i
];
243 if (reloc
->handle
== bo
->handle
) {
244 update_domains(reloc
, rd
, wd
, added_domains
);
246 csc
->relocs_hashlist
[hash
] = reloc
;
247 csc
->reloc_indices_hashlist
[hash
] = i
;
248 /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
254 /* New relocation, check if the backing array is large enough. */
255 if (csc
->crelocs
>= csc
->nrelocs
) {
259 size
= csc
->nrelocs
* sizeof(struct radeon_bo
*);
260 csc
->relocs_bo
= (struct radeon_bo
**)realloc(csc
->relocs_bo
, size
);
262 size
= csc
->nrelocs
* sizeof(struct drm_radeon_cs_reloc
);
263 csc
->relocs
= (struct drm_radeon_cs_reloc
*)realloc(csc
->relocs
, size
);
265 csc
->chunks
[1].chunk_data
= (uint64_t)(uintptr_t)csc
->relocs
;
268 /* Initialize the new relocation. */
269 csc
->relocs_bo
[csc
->crelocs
] = NULL
;
270 radeon_bo_reference(&csc
->relocs_bo
[csc
->crelocs
], bo
);
271 p_atomic_inc(&bo
->num_cs_references
);
272 reloc
= &csc
->relocs
[csc
->crelocs
];
273 reloc
->handle
= bo
->handle
;
274 reloc
->read_domains
= rd
;
275 reloc
->write_domain
= wd
;
278 csc
->is_handle_added
[hash
] = TRUE
;
279 csc
->relocs_hashlist
[hash
] = reloc
;
280 csc
->reloc_indices_hashlist
[hash
] = csc
->crelocs
;
282 csc
->chunks
[1].length_dw
+= RELOC_DWORDS
;
284 *added_domains
= rd
| wd
;
285 return csc
->crelocs
++;
288 static unsigned radeon_drm_cs_add_reloc(struct radeon_winsys_cs
*rcs
,
289 struct radeon_winsys_cs_handle
*buf
,
290 enum radeon_bo_domain rd
,
291 enum radeon_bo_domain wd
)
293 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
294 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
295 enum radeon_bo_domain added_domains
;
297 unsigned index
= radeon_add_reloc(cs
->csc
, bo
, rd
, wd
, &added_domains
);
299 if (added_domains
& RADEON_DOMAIN_GTT
)
300 cs
->csc
->used_gart
+= bo
->size
;
301 if (added_domains
& RADEON_DOMAIN_VRAM
)
302 cs
->csc
->used_vram
+= bo
->size
;
307 static boolean
radeon_drm_cs_validate(struct radeon_winsys_cs
*rcs
)
309 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
311 cs
->csc
->used_gart
< cs
->ws
->info
.gart_size
* 0.8 &&
312 cs
->csc
->used_vram
< cs
->ws
->info
.vram_size
* 0.8;
315 cs
->csc
->validated_crelocs
= cs
->csc
->crelocs
;
317 /* Remove lately-added relocations. The validation failed with them
318 * and the CS is about to be flushed because of that. Keep only
319 * the already-validated relocations. */
322 for (i
= cs
->csc
->validated_crelocs
; i
< cs
->csc
->crelocs
; i
++) {
323 p_atomic_dec(&cs
->csc
->relocs_bo
[i
]->num_cs_references
);
324 radeon_bo_reference(&cs
->csc
->relocs_bo
[i
], NULL
);
326 cs
->csc
->crelocs
= cs
->csc
->validated_crelocs
;
328 /* Flush if there are any relocs. Clean up otherwise. */
329 if (cs
->csc
->crelocs
) {
330 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
);
332 radeon_cs_context_cleanup(cs
->csc
);
334 assert(cs
->base
.cdw
== 0);
335 if (cs
->base
.cdw
!= 0) {
336 fprintf(stderr
, "radeon: Unexpected error in %s.\n", __func__
);
343 static void radeon_drm_cs_write_reloc(struct radeon_winsys_cs
*rcs
,
344 struct radeon_winsys_cs_handle
*buf
)
346 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
347 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
349 unsigned index
= radeon_get_reloc(cs
->csc
, bo
);
352 fprintf(stderr
, "radeon: Cannot get a relocation in %s.\n", __func__
);
356 OUT_CS(&cs
->base
, 0xc0001000);
357 OUT_CS(&cs
->base
, index
* RELOC_DWORDS
);
360 static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl
, param
)
362 struct radeon_cs_context
*csc
= (struct radeon_cs_context
*)param
;
365 if (drmCommandWriteRead(csc
->fd
, DRM_RADEON_CS
,
366 &csc
->cs
, sizeof(struct drm_radeon_cs
))) {
367 if (debug_get_bool_option("RADEON_DUMP_CS", FALSE
)) {
370 fprintf(stderr
, "radeon: The kernel rejected CS, dumping...\n");
371 for (i
= 0; i
< csc
->chunks
[0].length_dw
; i
++) {
372 fprintf(stderr
, "0x%08X\n", csc
->buf
[i
]);
375 fprintf(stderr
, "radeon: The kernel rejected CS, "
376 "see dmesg for more information.\n");
380 for (i
= 0; i
< csc
->crelocs
; i
++)
381 p_atomic_dec(&csc
->relocs_bo
[i
]->num_active_ioctls
);
383 radeon_cs_context_cleanup(csc
);
387 void radeon_drm_cs_sync_flush(struct radeon_drm_cs
*cs
)
389 /* Wait for any pending ioctl to complete. */
391 pipe_thread_wait(cs
->thread
);
396 DEBUG_GET_ONCE_BOOL_OPTION(thread
, "RADEON_THREAD", TRUE
)
398 static void radeon_drm_cs_flush(struct radeon_winsys_cs
*rcs
, unsigned flags
)
400 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
401 struct radeon_cs_context
*tmp
;
403 radeon_drm_cs_sync_flush(cs
);
405 /* If the CS is not empty, emit it in a newly-spawned thread. */
407 unsigned i
, crelocs
= cs
->csc
->crelocs
;
409 cs
->csc
->chunks
[0].length_dw
= cs
->base
.cdw
;
411 for (i
= 0; i
< crelocs
; i
++) {
412 /* Update the number of active asynchronous CS ioctls for the buffer. */
413 p_atomic_inc(&cs
->csc
->relocs_bo
[i
]->num_active_ioctls
);
415 /* Update whether the buffer is busy for write. */
416 if (cs
->csc
->relocs
[i
].write_domain
) {
417 cs
->csc
->relocs_bo
[i
]->busy_for_write
= TRUE
;
421 if (cs
->ws
->num_cpus
> 1 && debug_get_option_thread() &&
422 (flags
& RADEON_FLUSH_ASYNC
)) {
423 cs
->thread
= pipe_thread_create(radeon_drm_cs_emit_ioctl
, cs
->csc
);
426 radeon_drm_cs_emit_ioctl(cs
->csc
);
429 radeon_cs_context_cleanup(cs
->csc
);
432 /* Flip command streams. */
437 /* Prepare a new CS. */
438 cs
->base
.buf
= cs
->csc
->buf
;
442 static void radeon_drm_cs_destroy(struct radeon_winsys_cs
*rcs
)
444 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
445 radeon_drm_cs_sync_flush(cs
);
446 radeon_cs_context_cleanup(&cs
->csc1
);
447 radeon_cs_context_cleanup(&cs
->csc2
);
448 p_atomic_dec(&cs
->ws
->num_cs
);
449 radeon_destroy_cs_context(&cs
->csc1
);
450 radeon_destroy_cs_context(&cs
->csc2
);
454 static void radeon_drm_cs_set_flush(struct radeon_winsys_cs
*rcs
,
455 void (*flush
)(void *ctx
, unsigned flags
),
458 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
459 cs
->flush_cs
= flush
;
460 cs
->flush_data
= user
;
463 static boolean
radeon_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
464 struct radeon_winsys_cs_handle
*_buf
)
466 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
467 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
469 return radeon_bo_is_referenced_by_cs(cs
, bo
);
472 void radeon_drm_cs_init_functions(struct radeon_drm_winsys
*ws
)
474 ws
->base
.cs_create
= radeon_drm_cs_create
;
475 ws
->base
.cs_destroy
= radeon_drm_cs_destroy
;
476 ws
->base
.cs_add_reloc
= radeon_drm_cs_add_reloc
;
477 ws
->base
.cs_validate
= radeon_drm_cs_validate
;
478 ws
->base
.cs_write_reloc
= radeon_drm_cs_write_reloc
;
479 ws
->base
.cs_flush
= radeon_drm_cs_flush
;
480 ws
->base
.cs_set_flush_callback
= radeon_drm_cs_set_flush
;
481 ws
->base
.cs_is_buffer_referenced
= radeon_bo_is_referenced
;