etnaviv: split dump_shader
[mesa.git] / src / gallium / drivers / radeon / radeon_winsys.h
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
25
26 #ifndef RADEON_WINSYS_H
27 #define RADEON_WINSYS_H
28
29 /* The public winsys interface header for the radeon driver. */
30
31 /* Whether the next IB can start immediately and not wait for draws and
32 * dispatches from the current IB to finish. */
33 #define RADEON_FLUSH_START_NEXT_GFX_IB_NOW (1u << 31)
34
35 #define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW \
36 (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
37
38 #include "pipebuffer/pb_buffer.h"
39
40 #include "amd/common/ac_gpu_info.h"
41 #include "amd/common/ac_surface.h"
42
43 /* Tiling flags. */
44 enum radeon_bo_layout {
45 RADEON_LAYOUT_LINEAR = 0,
46 RADEON_LAYOUT_TILED,
47 RADEON_LAYOUT_SQUARETILED,
48
49 RADEON_LAYOUT_UNKNOWN
50 };
51
52 enum radeon_bo_domain { /* bitfield */
53 RADEON_DOMAIN_GTT = 2,
54 RADEON_DOMAIN_VRAM = 4,
55 RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT,
56 RADEON_DOMAIN_GDS = 8,
57 RADEON_DOMAIN_OA = 16,
58 };
59
60 enum radeon_bo_flag { /* bitfield */
61 RADEON_FLAG_GTT_WC = (1 << 0),
62 RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
63 RADEON_FLAG_NO_SUBALLOC = (1 << 2),
64 RADEON_FLAG_SPARSE = (1 << 3),
65 RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
66 RADEON_FLAG_READ_ONLY = (1 << 5),
67 RADEON_FLAG_32BIT = (1 << 6),
68 };
69
70 enum radeon_dependency_flag {
71 /* Add the dependency to the parallel compute IB only. */
72 RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY = 1 << 0,
73
74 /* Instead of waiting for a job to finish execution, the dependency will
75 * be signaled when the job starts execution.
76 */
77 RADEON_DEPENDENCY_START_FENCE = 1 << 1,
78 };
79
80 enum radeon_bo_usage { /* bitfield */
81 RADEON_USAGE_READ = 2,
82 RADEON_USAGE_WRITE = 4,
83 RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
84
85 /* The winsys ensures that the CS submission will be scheduled after
86 * previously flushed CSs referencing this BO in a conflicting way.
87 */
88 RADEON_USAGE_SYNCHRONIZED = 8
89 };
90
91 enum radeon_transfer_flags {
92 /* Indicates that the caller will unmap the buffer.
93 *
94 * Not unmapping buffers is an important performance optimization for
95 * OpenGL (avoids kernel overhead for frequently mapped buffers).
96 */
97 RADEON_TRANSFER_TEMPORARY = (PIPE_TRANSFER_DRV_PRV << 0),
98 };
99
100 #define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
101
102 enum ring_type {
103 RING_GFX = 0,
104 RING_COMPUTE,
105 RING_DMA,
106 RING_UVD,
107 RING_VCE,
108 RING_UVD_ENC,
109 RING_VCN_DEC,
110 RING_VCN_ENC,
111 RING_VCN_JPEG,
112 RING_LAST,
113 };
114
115 enum radeon_value_id {
116 RADEON_REQUESTED_VRAM_MEMORY,
117 RADEON_REQUESTED_GTT_MEMORY,
118 RADEON_MAPPED_VRAM,
119 RADEON_MAPPED_GTT,
120 RADEON_BUFFER_WAIT_TIME_NS,
121 RADEON_NUM_MAPPED_BUFFERS,
122 RADEON_TIMESTAMP,
123 RADEON_NUM_GFX_IBS,
124 RADEON_NUM_SDMA_IBS,
125 RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
126 RADEON_GFX_IB_SIZE_COUNTER,
127 RADEON_NUM_BYTES_MOVED,
128 RADEON_NUM_EVICTIONS,
129 RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
130 RADEON_VRAM_USAGE,
131 RADEON_VRAM_VIS_USAGE,
132 RADEON_GTT_USAGE,
133 RADEON_GPU_TEMPERATURE, /* DRM 2.42.0 */
134 RADEON_CURRENT_SCLK,
135 RADEON_CURRENT_MCLK,
136 RADEON_CS_THREAD_TIME,
137 };
138
139 enum radeon_bo_priority {
140 /* Each group of two has the same priority. */
141 RADEON_PRIO_FENCE = 0,
142 RADEON_PRIO_TRACE,
143
144 RADEON_PRIO_SO_FILLED_SIZE = 2,
145 RADEON_PRIO_QUERY,
146
147 RADEON_PRIO_IB1 = 4, /* main IB submitted to the kernel */
148 RADEON_PRIO_IB2, /* IB executed with INDIRECT_BUFFER */
149
150 RADEON_PRIO_DRAW_INDIRECT = 6,
151 RADEON_PRIO_INDEX_BUFFER,
152
153 RADEON_PRIO_CP_DMA = 8,
154 RADEON_PRIO_BORDER_COLORS,
155
156 RADEON_PRIO_CONST_BUFFER = 10,
157 RADEON_PRIO_DESCRIPTORS,
158
159 RADEON_PRIO_SAMPLER_BUFFER = 12,
160 RADEON_PRIO_VERTEX_BUFFER,
161
162 RADEON_PRIO_SHADER_RW_BUFFER = 14,
163 RADEON_PRIO_COMPUTE_GLOBAL,
164
165 RADEON_PRIO_SAMPLER_TEXTURE = 16,
166 RADEON_PRIO_SHADER_RW_IMAGE,
167
168 RADEON_PRIO_SAMPLER_TEXTURE_MSAA = 18,
169 RADEON_PRIO_COLOR_BUFFER,
170
171 RADEON_PRIO_DEPTH_BUFFER = 20,
172
173 RADEON_PRIO_COLOR_BUFFER_MSAA = 22,
174
175 RADEON_PRIO_DEPTH_BUFFER_MSAA = 24,
176
177 RADEON_PRIO_SEPARATE_META = 26,
178 RADEON_PRIO_SHADER_BINARY, /* the hw can't hide instruction cache misses */
179
180 RADEON_PRIO_SHADER_RINGS = 28,
181
182 RADEON_PRIO_SCRATCH_BUFFER = 30,
183 /* 31 is the maximum value */
184 };
185
186 struct winsys_handle;
187 struct radeon_winsys_ctx;
188
189 struct radeon_cmdbuf_chunk {
190 unsigned cdw; /* Number of used dwords. */
191 unsigned max_dw; /* Maximum number of dwords. */
192 uint32_t *buf; /* The base pointer of the chunk. */
193 };
194
195 struct radeon_cmdbuf {
196 struct radeon_cmdbuf_chunk current;
197 struct radeon_cmdbuf_chunk *prev;
198 unsigned num_prev; /* Number of previous chunks. */
199 unsigned max_prev; /* Space in array pointed to by prev. */
200 unsigned prev_dw; /* Total number of dwords in previous chunks. */
201
202 /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
203 uint64_t used_vram;
204 uint64_t used_gart;
205 uint64_t gpu_address;
206 };
207
208 /* Tiling info for display code, DRI sharing, and other data. */
209 struct radeon_bo_metadata {
210 /* Tiling flags describing the texture layout for display code
211 * and DRI sharing.
212 */
213 union {
214 struct {
215 enum radeon_bo_layout microtile;
216 enum radeon_bo_layout macrotile;
217 unsigned pipe_config;
218 unsigned bankw;
219 unsigned bankh;
220 unsigned tile_split;
221 unsigned mtilea;
222 unsigned num_banks;
223 unsigned stride;
224 bool scanout;
225 } legacy;
226
227 struct {
228 /* surface flags */
229 unsigned swizzle_mode:5;
230
231 /* DCC flags */
232 /* [31:8]: max offset = 4GB - 256; 0 = DCC disabled */
233 unsigned dcc_offset_256B:24;
234 unsigned dcc_pitch_max:14; /* (mip chain pitch - 1) for DCN */
235 unsigned dcc_independent_64B:1;
236 } gfx9;
237 } u;
238
239 /* Additional metadata associated with the buffer, in bytes.
240 * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
241 * Supported by amdgpu only.
242 */
243 uint32_t size_metadata;
244 uint32_t metadata[64];
245 };
246
247 enum radeon_feature_id {
248 RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
249 RADEON_FID_R300_CMASK_ACCESS,
250 };
251
252 struct radeon_bo_list_item {
253 uint64_t bo_size;
254 uint64_t vm_address;
255 uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
256 };
257
258 struct radeon_winsys {
259 /**
260 * The screen object this winsys was created for
261 */
262 struct pipe_screen *screen;
263
264 /**
265 * Decrement the winsys reference count.
266 *
267 * \param ws The winsys this function is called for.
268 * \return True if the winsys and screen should be destroyed.
269 */
270 bool (*unref)(struct radeon_winsys *ws);
271
272 /**
273 * Destroy this winsys.
274 *
275 * \param ws The winsys this function is called from.
276 */
277 void (*destroy)(struct radeon_winsys *ws);
278
279 /**
280 * Query an info structure from winsys.
281 *
282 * \param ws The winsys this function is called from.
283 * \param info Return structure
284 */
285 void (*query_info)(struct radeon_winsys *ws,
286 struct radeon_info *info);
287
288 /**
289 * A hint for the winsys that it should pin its execution threads to
290 * a group of cores sharing a specific L3 cache if the CPU has multiple
291 * L3 caches. This is needed for good multithreading performance on
292 * AMD Zen CPUs.
293 */
294 void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cache);
295
296 /**************************************************************************
297 * Buffer management. Buffer attributes are mostly fixed over its lifetime.
298 *
299 * Remember that gallium gets to choose the interface it needs, and the
300 * window systems must then implement that interface (rather than the
301 * other way around...).
302 *************************************************************************/
303
304 /**
305 * Create a buffer object.
306 *
307 * \param ws The winsys this function is called from.
308 * \param size The size to allocate.
309 * \param alignment An alignment of the buffer in memory.
310 * \param use_reusable_pool Whether the cache buffer manager should be used.
311 * \param domain A bitmask of the RADEON_DOMAIN_* flags.
312 * \return The created buffer object.
313 */
314 struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws,
315 uint64_t size,
316 unsigned alignment,
317 enum radeon_bo_domain domain,
318 enum radeon_bo_flag flags);
319
320 /**
321 * Map the entire data store of a buffer object into the client's address
322 * space.
323 *
324 * Callers are expected to unmap buffers again if and only if the
325 * RADEON_TRANSFER_TEMPORARY flag is set in \p usage.
326 *
327 * \param buf A winsys buffer object to map.
328 * \param cs A command stream to flush if the buffer is referenced by it.
329 * \param usage A bitmask of the PIPE_TRANSFER_* and RADEON_TRANSFER_* flags.
330 * \return The pointer at the beginning of the buffer.
331 */
332 void *(*buffer_map)(struct pb_buffer *buf,
333 struct radeon_cmdbuf *cs,
334 enum pipe_transfer_usage usage);
335
336 /**
337 * Unmap a buffer object from the client's address space.
338 *
339 * \param buf A winsys buffer object to unmap.
340 */
341 void (*buffer_unmap)(struct pb_buffer *buf);
342
343 /**
344 * Wait for the buffer and return true if the buffer is not used
345 * by the device.
346 *
347 * The timeout of 0 will only return the status.
348 * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
349 * is idle.
350 */
351 bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout,
352 enum radeon_bo_usage usage);
353
354 /**
355 * Return buffer metadata.
356 * (tiling info for display code, DRI sharing, and other data)
357 *
358 * \param buf A winsys buffer object to get the flags from.
359 * \param md Metadata
360 */
361 void (*buffer_get_metadata)(struct pb_buffer *buf,
362 struct radeon_bo_metadata *md);
363
364 /**
365 * Set buffer metadata.
366 * (tiling info for display code, DRI sharing, and other data)
367 *
368 * \param buf A winsys buffer object to set the flags for.
369 * \param md Metadata
370 */
371 void (*buffer_set_metadata)(struct pb_buffer *buf,
372 struct radeon_bo_metadata *md);
373
374 /**
375 * Get a winsys buffer from a winsys handle. The internal structure
376 * of the handle is platform-specific and only a winsys should access it.
377 *
378 * \param ws The winsys this function is called from.
379 * \param whandle A winsys handle pointer as was received from a state
380 * tracker.
381 * \param stride The returned buffer stride in bytes.
382 */
383 struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws,
384 struct winsys_handle *whandle,
385 unsigned vm_alignment,
386 unsigned *stride, unsigned *offset);
387
388 /**
389 * Get a winsys buffer from a user pointer. The resulting buffer can't
390 * be exported. Both pointer and size must be page aligned.
391 *
392 * \param ws The winsys this function is called from.
393 * \param pointer User pointer to turn into a buffer object.
394 * \param Size Size in bytes for the new buffer.
395 */
396 struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws,
397 void *pointer, uint64_t size);
398
399 /**
400 * Whether the buffer was created from a user pointer.
401 *
402 * \param buf A winsys buffer object
403 * \return whether \p buf was created via buffer_from_ptr
404 */
405 bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
406
407 /** Whether the buffer was suballocated. */
408 bool (*buffer_is_suballocated)(struct pb_buffer *buf);
409
410 /**
411 * Get a winsys handle from a winsys buffer. The internal structure
412 * of the handle is platform-specific and only a winsys should access it.
413 *
414 * \param ws The winsys instance for which the handle is to be valid
415 * \param buf A winsys buffer object to get the handle from.
416 * \param whandle A winsys handle pointer.
417 * \param stride A stride of the buffer in bytes, for texturing.
418 * \return true on success.
419 */
420 bool (*buffer_get_handle)(struct radeon_winsys *ws,
421 struct pb_buffer *buf,
422 unsigned stride, unsigned offset,
423 unsigned slice_size,
424 struct winsys_handle *whandle);
425
426 /**
427 * Change the commitment of a (64KB-page aligned) region of the given
428 * sparse buffer.
429 *
430 * \warning There is no automatic synchronization with command submission.
431 *
432 * \note Only implemented by the amdgpu winsys.
433 *
434 * \return false on out of memory or other failure, true on success.
435 */
436 bool (*buffer_commit)(struct pb_buffer *buf,
437 uint64_t offset, uint64_t size,
438 bool commit);
439
440 /**
441 * Return the virtual address of a buffer.
442 *
443 * When virtual memory is not in use, this is the offset relative to the
444 * relocation base (non-zero for sub-allocated buffers).
445 *
446 * \param buf A winsys buffer object
447 * \return virtual address
448 */
449 uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
450
451 /**
452 * Return the offset of this buffer relative to the relocation base.
453 * This is only non-zero for sub-allocated buffers.
454 *
455 * This is only supported in the radeon winsys, since amdgpu uses virtual
456 * addresses in submissions even for the video engines.
457 *
458 * \param buf A winsys buffer object
459 * \return the offset for relocations
460 */
461 unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
462
463 /**
464 * Query the initial placement of the buffer from the kernel driver.
465 */
466 enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
467
468 /**************************************************************************
469 * Command submission.
470 *
471 * Each pipe context should create its own command stream and submit
472 * commands independently of other contexts.
473 *************************************************************************/
474
475 /**
476 * Create a command submission context.
477 * Various command streams can be submitted to the same context.
478 */
479 struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws);
480
481 /**
482 * Destroy a context.
483 */
484 void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
485
486 /**
487 * Query a GPU reset status.
488 */
489 enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx);
490
491 /**
492 * Create a command stream.
493 *
494 * \param ctx The submission context
495 * \param ring_type The ring type (GFX, DMA, UVD)
496 * \param flush Flush callback function associated with the command stream.
497 * \param user User pointer that will be passed to the flush callback.
498 */
499 struct radeon_cmdbuf *(*cs_create)(struct radeon_winsys_ctx *ctx,
500 enum ring_type ring_type,
501 void (*flush)(void *ctx, unsigned flags,
502 struct pipe_fence_handle **fence),
503 void *flush_ctx,
504 bool stop_exec_on_failure);
505
506 /**
507 * Add a parallel compute IB to a gfx IB. It will share the buffer list
508 * and fence dependencies with the gfx IB. The gfx flush call will submit
509 * both IBs at the same time.
510 *
511 * The compute IB doesn't have an output fence, so the primary IB has
512 * to use a wait packet for synchronization.
513 *
514 * The returned IB is only a stream for writing packets to the new
515 * IB. Calling other winsys functions with it is not allowed, not even
516 * "cs_destroy". Use the gfx IB instead.
517 *
518 * \param cs Gfx IB
519 */
520 struct radeon_cmdbuf *(*cs_add_parallel_compute_ib)(struct radeon_cmdbuf *cs,
521 bool uses_gds_ordered_append);
522
523 /**
524 * Destroy a command stream.
525 *
526 * \param cs A command stream to destroy.
527 */
528 void (*cs_destroy)(struct radeon_cmdbuf *cs);
529
530 /**
531 * Add a buffer. Each buffer used by a CS must be added using this function.
532 *
533 * \param cs Command stream
534 * \param buf Buffer
535 * \param usage Whether the buffer is used for read and/or write.
536 * \param domain Bitmask of the RADEON_DOMAIN_* flags.
537 * \param priority A higher number means a greater chance of being
538 * placed in the requested domain. 15 is the maximum.
539 * \return Buffer index.
540 */
541 unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs,
542 struct pb_buffer *buf,
543 enum radeon_bo_usage usage,
544 enum radeon_bo_domain domain,
545 enum radeon_bo_priority priority);
546
547 /**
548 * Return the index of an already-added buffer.
549 *
550 * Not supported on amdgpu. Drivers with GPUVM should not care about
551 * buffer indices.
552 *
553 * \param cs Command stream
554 * \param buf Buffer
555 * \return The buffer index, or -1 if the buffer has not been added.
556 */
557 int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs,
558 struct pb_buffer *buf);
559
560 /**
561 * Return true if there is enough memory in VRAM and GTT for the buffers
562 * added so far. If the validation fails, all buffers which have
563 * been added since the last call of cs_validate will be removed and
564 * the CS will be flushed (provided there are still any buffers).
565 *
566 * \param cs A command stream to validate.
567 */
568 bool (*cs_validate)(struct radeon_cmdbuf *cs);
569
570 /**
571 * Check whether the given number of dwords is available in the IB.
572 * Optionally chain a new chunk of the IB if necessary and supported.
573 *
574 * \param cs A command stream.
575 * \param dw Number of CS dwords requested by the caller.
576 * \param force_chaining Chain the IB into a new buffer now to discard
577 * the CP prefetch cache (to emulate PKT3_REWIND)
578 * \return true if there is enough space
579 */
580 bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw,
581 bool force_chaining);
582
583 /**
584 * Return the buffer list.
585 *
586 * This is the buffer list as passed to the kernel, i.e. it only contains
587 * the parent buffers of sub-allocated buffers.
588 *
589 * \param cs Command stream
590 * \param list Returned buffer list. Set to NULL to query the count only.
591 * \return The buffer count.
592 */
593 unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs,
594 struct radeon_bo_list_item *list);
595
596 /**
597 * Flush a command stream.
598 *
599 * \param cs A command stream to flush.
600 * \param flags, PIPE_FLUSH_* flags.
601 * \param fence Pointer to a fence. If non-NULL, a fence is inserted
602 * after the CS and is returned through this parameter.
603 * \return Negative POSIX error code or 0 for success.
604 * Asynchronous submissions never return an error.
605 */
606 int (*cs_flush)(struct radeon_cmdbuf *cs,
607 unsigned flags,
608 struct pipe_fence_handle **fence);
609
610 /**
611 * Create a fence before the CS is flushed.
612 * The user must flush manually to complete the initializaton of the fence.
613 *
614 * The fence must not be used for anything except \ref cs_add_fence_dependency
615 * before the flush.
616 */
617 struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
618
619 /**
620 * Return true if a buffer is referenced by a command stream.
621 *
622 * \param cs A command stream.
623 * \param buf A winsys buffer.
624 */
625 bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs,
626 struct pb_buffer *buf,
627 enum radeon_bo_usage usage);
628
629 /**
630 * Request access to a feature for a command stream.
631 *
632 * \param cs A command stream.
633 * \param fid Feature ID, one of RADEON_FID_*
634 * \param enable Whether to enable or disable the feature.
635 */
636 bool (*cs_request_feature)(struct radeon_cmdbuf *cs,
637 enum radeon_feature_id fid,
638 bool enable);
639 /**
640 * Make sure all asynchronous flush of the cs have completed
641 *
642 * \param cs A command stream.
643 */
644 void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
645
646 /**
647 * Add a fence dependency to the CS, so that the CS will wait for
648 * the fence before execution.
649 *
650 * \param dependency_flags Bitmask of RADEON_DEPENDENCY_*
651 */
652 void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs,
653 struct pipe_fence_handle *fence,
654 unsigned dependency_flags);
655
656 /**
657 * Signal a syncobj when the CS finishes execution.
658 */
659 void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs,
660 struct pipe_fence_handle *fence);
661
662 /**
663 * Wait for the fence and return true if the fence has been signalled.
664 * The timeout of 0 will only return the status.
665 * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
666 * is signalled.
667 */
668 bool (*fence_wait)(struct radeon_winsys *ws,
669 struct pipe_fence_handle *fence,
670 uint64_t timeout);
671
672 /**
673 * Reference counting for fences.
674 */
675 void (*fence_reference)(struct pipe_fence_handle **dst,
676 struct pipe_fence_handle *src);
677
678 /**
679 * Create a new fence object corresponding to the given syncobj fd.
680 */
681 struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws,
682 int fd);
683
684 /**
685 * Create a new fence object corresponding to the given sync_file.
686 */
687 struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws,
688 int fd);
689
690 /**
691 * Return a sync_file FD corresponding to the given fence object.
692 */
693 int (*fence_export_sync_file)(struct radeon_winsys *ws,
694 struct pipe_fence_handle *fence);
695
696 /**
697 * Return a sync file FD that is already signalled.
698 */
699 int (*export_signalled_sync_file)(struct radeon_winsys *ws);
700
701 /**
702 * Initialize surface
703 *
704 * \param ws The winsys this function is called from.
705 * \param tex Input texture description
706 * \param flags Bitmask of RADEON_SURF_* flags
707 * \param bpe Bytes per pixel, it can be different for Z buffers.
708 * \param mode Preferred tile mode. (linear, 1D, or 2D)
709 * \param surf Output structure
710 */
711 int (*surface_init)(struct radeon_winsys *ws,
712 const struct pipe_resource *tex,
713 unsigned flags, unsigned bpe,
714 enum radeon_surf_mode mode,
715 struct radeon_surf *surf);
716
717 uint64_t (*query_value)(struct radeon_winsys *ws,
718 enum radeon_value_id value);
719
720 bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset,
721 unsigned num_registers, uint32_t *out);
722 };
723
724 static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
725 {
726 return cs && (cs->prev_dw + cs->current.cdw > num_dw);
727 }
728
729 static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
730 {
731 cs->current.buf[cs->current.cdw++] = value;
732 }
733
734 static inline void radeon_emit_array(struct radeon_cmdbuf *cs,
735 const uint32_t *values, unsigned count)
736 {
737 memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
738 cs->current.cdw += count;
739 }
740
741 enum radeon_heap {
742 RADEON_HEAP_VRAM_NO_CPU_ACCESS,
743 RADEON_HEAP_VRAM_READ_ONLY,
744 RADEON_HEAP_VRAM_READ_ONLY_32BIT,
745 RADEON_HEAP_VRAM_32BIT,
746 RADEON_HEAP_VRAM,
747 RADEON_HEAP_GTT_WC,
748 RADEON_HEAP_GTT_WC_READ_ONLY,
749 RADEON_HEAP_GTT_WC_READ_ONLY_32BIT,
750 RADEON_HEAP_GTT_WC_32BIT,
751 RADEON_HEAP_GTT,
752 RADEON_MAX_SLAB_HEAPS,
753 RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
754 };
755
756 static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
757 {
758 switch (heap) {
759 case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
760 case RADEON_HEAP_VRAM_READ_ONLY:
761 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
762 case RADEON_HEAP_VRAM_32BIT:
763 case RADEON_HEAP_VRAM:
764 return RADEON_DOMAIN_VRAM;
765 case RADEON_HEAP_GTT_WC:
766 case RADEON_HEAP_GTT_WC_READ_ONLY:
767 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
768 case RADEON_HEAP_GTT_WC_32BIT:
769 case RADEON_HEAP_GTT:
770 return RADEON_DOMAIN_GTT;
771 default:
772 assert(0);
773 return (enum radeon_bo_domain)0;
774 }
775 }
776
777 static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
778 {
779 unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING |
780 (heap != RADEON_HEAP_GTT ? RADEON_FLAG_GTT_WC : 0);
781
782 switch (heap) {
783 case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
784 return flags |
785 RADEON_FLAG_NO_CPU_ACCESS;
786
787 case RADEON_HEAP_VRAM_READ_ONLY:
788 case RADEON_HEAP_GTT_WC_READ_ONLY:
789 return flags |
790 RADEON_FLAG_READ_ONLY;
791
792 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
793 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
794 return flags |
795 RADEON_FLAG_READ_ONLY |
796 RADEON_FLAG_32BIT;
797
798 case RADEON_HEAP_VRAM_32BIT:
799 case RADEON_HEAP_GTT_WC_32BIT:
800 return flags |
801 RADEON_FLAG_32BIT;
802
803 case RADEON_HEAP_VRAM:
804 case RADEON_HEAP_GTT_WC:
805 case RADEON_HEAP_GTT:
806 default:
807 return flags;
808 }
809 }
810
811 /* Return the heap index for winsys allocators, or -1 on failure. */
812 static inline int radeon_get_heap_index(enum radeon_bo_domain domain,
813 enum radeon_bo_flag flags)
814 {
815 /* VRAM implies WC (write combining) */
816 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
817 /* NO_CPU_ACCESS implies VRAM only. */
818 assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
819
820 /* Resources with interprocess sharing don't use any winsys allocators. */
821 if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
822 return -1;
823
824 /* Unsupported flags: NO_SUBALLOC, SPARSE. */
825 if (flags & ~(RADEON_FLAG_GTT_WC |
826 RADEON_FLAG_NO_CPU_ACCESS |
827 RADEON_FLAG_NO_INTERPROCESS_SHARING |
828 RADEON_FLAG_READ_ONLY |
829 RADEON_FLAG_32BIT))
830 return -1;
831
832 switch (domain) {
833 case RADEON_DOMAIN_VRAM:
834 switch (flags & (RADEON_FLAG_NO_CPU_ACCESS |
835 RADEON_FLAG_READ_ONLY |
836 RADEON_FLAG_32BIT)) {
837 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
838 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY:
839 assert(!"NO_CPU_ACCESS | READ_ONLY doesn't make sense");
840 return -1;
841 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_32BIT:
842 assert(!"NO_CPU_ACCESS with 32BIT is disallowed");
843 return -1;
844 case RADEON_FLAG_NO_CPU_ACCESS:
845 return RADEON_HEAP_VRAM_NO_CPU_ACCESS;
846 case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
847 return RADEON_HEAP_VRAM_READ_ONLY_32BIT;
848 case RADEON_FLAG_READ_ONLY:
849 return RADEON_HEAP_VRAM_READ_ONLY;
850 case RADEON_FLAG_32BIT:
851 return RADEON_HEAP_VRAM_32BIT;
852 case 0:
853 return RADEON_HEAP_VRAM;
854 }
855 break;
856 case RADEON_DOMAIN_GTT:
857 switch (flags & (RADEON_FLAG_GTT_WC |
858 RADEON_FLAG_READ_ONLY |
859 RADEON_FLAG_32BIT)) {
860 case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
861 return RADEON_HEAP_GTT_WC_READ_ONLY_32BIT;
862 case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY:
863 return RADEON_HEAP_GTT_WC_READ_ONLY;
864 case RADEON_FLAG_GTT_WC | RADEON_FLAG_32BIT:
865 return RADEON_HEAP_GTT_WC_32BIT;
866 case RADEON_FLAG_GTT_WC:
867 return RADEON_HEAP_GTT_WC;
868 case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
869 case RADEON_FLAG_READ_ONLY:
870 assert(!"READ_ONLY without WC is disallowed");
871 return -1;
872 case RADEON_FLAG_32BIT:
873 assert(!"32BIT without WC is disallowed");
874 return -1;
875 case 0:
876 return RADEON_HEAP_GTT;
877 }
878 break;
879 default:
880 break;
881 }
882 return -1;
883 }
884
885 #endif