9aba2a9d95fb8890211676cc91de110bdaa9cade
[mesa.git] / src / gallium / drivers / radeon / radeon_winsys.h
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
25
26 #ifndef RADEON_WINSYS_H
27 #define RADEON_WINSYS_H
28
29 /* The public winsys interface header for the radeon driver. */
30
31 /* Whether the next IB can start immediately and not wait for draws and
32 * dispatches from the current IB to finish. */
33 #define RADEON_FLUSH_START_NEXT_GFX_IB_NOW (1u << 31)
34
35 #define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW \
36 (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
37
38 #include "amd/common/ac_gpu_info.h"
39 #include "amd/common/ac_surface.h"
40 #include "pipebuffer/pb_buffer.h"
41
42 /* Tiling flags. */
43 enum radeon_bo_layout
44 {
45 RADEON_LAYOUT_LINEAR = 0,
46 RADEON_LAYOUT_TILED,
47 RADEON_LAYOUT_SQUARETILED,
48
49 RADEON_LAYOUT_UNKNOWN
50 };
51
52 enum radeon_bo_domain
53 { /* bitfield */
54 RADEON_DOMAIN_GTT = 2,
55 RADEON_DOMAIN_VRAM = 4,
56 RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT,
57 RADEON_DOMAIN_GDS = 8,
58 RADEON_DOMAIN_OA = 16,
59 };
60
61 enum radeon_bo_flag
62 { /* bitfield */
63 RADEON_FLAG_GTT_WC = (1 << 0),
64 RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
65 RADEON_FLAG_NO_SUBALLOC = (1 << 2),
66 RADEON_FLAG_SPARSE = (1 << 3),
67 RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
68 RADEON_FLAG_READ_ONLY = (1 << 5),
69 RADEON_FLAG_32BIT = (1 << 6),
70 RADEON_FLAG_ENCRYPTED = (1 << 7),
71 RADEON_FLAG_UNCACHED = (1 << 8), /* only gfx9 and newer */
72 };
73
74 enum radeon_dependency_flag
75 {
76 /* Add the dependency to the parallel compute IB only. */
77 RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY = 1 << 0,
78
79 /* Instead of waiting for a job to finish execution, the dependency will
80 * be signaled when the job starts execution.
81 */
82 RADEON_DEPENDENCY_START_FENCE = 1 << 1,
83 };
84
85 enum radeon_bo_usage
86 { /* bitfield */
87 RADEON_USAGE_READ = 2,
88 RADEON_USAGE_WRITE = 4,
89 RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
90
91 /* The winsys ensures that the CS submission will be scheduled after
92 * previously flushed CSs referencing this BO in a conflicting way.
93 */
94 RADEON_USAGE_SYNCHRONIZED = 8
95 };
96
97 enum radeon_transfer_flags
98 {
99 /* Indicates that the caller will unmap the buffer.
100 *
101 * Not unmapping buffers is an important performance optimization for
102 * OpenGL (avoids kernel overhead for frequently mapped buffers).
103 */
104 RADEON_TRANSFER_TEMPORARY = (PIPE_TRANSFER_DRV_PRV << 0),
105 };
106
107 #define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
108
109 enum radeon_value_id
110 {
111 RADEON_REQUESTED_VRAM_MEMORY,
112 RADEON_REQUESTED_GTT_MEMORY,
113 RADEON_MAPPED_VRAM,
114 RADEON_MAPPED_GTT,
115 RADEON_BUFFER_WAIT_TIME_NS,
116 RADEON_NUM_MAPPED_BUFFERS,
117 RADEON_TIMESTAMP,
118 RADEON_NUM_GFX_IBS,
119 RADEON_NUM_SDMA_IBS,
120 RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
121 RADEON_GFX_IB_SIZE_COUNTER,
122 RADEON_NUM_BYTES_MOVED,
123 RADEON_NUM_EVICTIONS,
124 RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
125 RADEON_VRAM_USAGE,
126 RADEON_VRAM_VIS_USAGE,
127 RADEON_GTT_USAGE,
128 RADEON_GPU_TEMPERATURE, /* DRM 2.42.0 */
129 RADEON_CURRENT_SCLK,
130 RADEON_CURRENT_MCLK,
131 RADEON_CS_THREAD_TIME,
132 };
133
134 enum radeon_bo_priority
135 {
136 /* Each group of two has the same priority. */
137 RADEON_PRIO_FENCE = 0,
138 RADEON_PRIO_TRACE,
139
140 RADEON_PRIO_SO_FILLED_SIZE = 2,
141 RADEON_PRIO_QUERY,
142
143 RADEON_PRIO_IB1 = 4, /* main IB submitted to the kernel */
144 RADEON_PRIO_IB2, /* IB executed with INDIRECT_BUFFER */
145
146 RADEON_PRIO_DRAW_INDIRECT = 6,
147 RADEON_PRIO_INDEX_BUFFER,
148
149 RADEON_PRIO_CP_DMA = 8,
150 RADEON_PRIO_BORDER_COLORS,
151
152 RADEON_PRIO_CONST_BUFFER = 10,
153 RADEON_PRIO_DESCRIPTORS,
154
155 RADEON_PRIO_SAMPLER_BUFFER = 12,
156 RADEON_PRIO_VERTEX_BUFFER,
157
158 RADEON_PRIO_SHADER_RW_BUFFER = 14,
159 RADEON_PRIO_COMPUTE_GLOBAL,
160
161 RADEON_PRIO_SAMPLER_TEXTURE = 16,
162 RADEON_PRIO_SHADER_RW_IMAGE,
163
164 RADEON_PRIO_SAMPLER_TEXTURE_MSAA = 18,
165 RADEON_PRIO_COLOR_BUFFER,
166
167 RADEON_PRIO_DEPTH_BUFFER = 20,
168
169 RADEON_PRIO_COLOR_BUFFER_MSAA = 22,
170
171 RADEON_PRIO_DEPTH_BUFFER_MSAA = 24,
172
173 RADEON_PRIO_SEPARATE_META = 26,
174 RADEON_PRIO_SHADER_BINARY, /* the hw can't hide instruction cache misses */
175
176 RADEON_PRIO_SHADER_RINGS = 28,
177
178 RADEON_PRIO_SCRATCH_BUFFER = 30,
179 /* 31 is the maximum value */
180 };
181
182 struct winsys_handle;
183 struct radeon_winsys_ctx;
184
185 struct radeon_cmdbuf_chunk {
186 unsigned cdw; /* Number of used dwords. */
187 unsigned max_dw; /* Maximum number of dwords. */
188 uint32_t *buf; /* The base pointer of the chunk. */
189 };
190
191 struct radeon_cmdbuf {
192 struct radeon_cmdbuf_chunk current;
193 struct radeon_cmdbuf_chunk *prev;
194 unsigned num_prev; /* Number of previous chunks. */
195 unsigned max_prev; /* Space in array pointed to by prev. */
196 unsigned prev_dw; /* Total number of dwords in previous chunks. */
197
198 /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
199 uint64_t used_vram;
200 uint64_t used_gart;
201 uint64_t gpu_address;
202 };
203
204 /* Tiling info for display code, DRI sharing, and other data. */
205 struct radeon_bo_metadata {
206 /* Tiling flags describing the texture layout for display code
207 * and DRI sharing.
208 */
209 union {
210 struct {
211 enum radeon_bo_layout microtile;
212 enum radeon_bo_layout macrotile;
213 unsigned pipe_config;
214 unsigned bankw;
215 unsigned bankh;
216 unsigned tile_split;
217 unsigned mtilea;
218 unsigned num_banks;
219 unsigned stride;
220 bool scanout;
221 } legacy;
222 } u;
223
224 enum radeon_surf_mode mode; /* Output from buffer_get_metadata */
225
226 /* Additional metadata associated with the buffer, in bytes.
227 * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
228 * Supported by amdgpu only.
229 */
230 uint32_t size_metadata;
231 uint32_t metadata[64];
232 };
233
234 enum radeon_feature_id
235 {
236 RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
237 RADEON_FID_R300_CMASK_ACCESS,
238 };
239
240 struct radeon_bo_list_item {
241 uint64_t bo_size;
242 uint64_t vm_address;
243 uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
244 };
245
246 struct radeon_winsys {
247 /**
248 * The screen object this winsys was created for
249 */
250 struct pipe_screen *screen;
251
252 /**
253 * Decrement the winsys reference count.
254 *
255 * \param ws The winsys this function is called for.
256 * \return True if the winsys and screen should be destroyed.
257 */
258 bool (*unref)(struct radeon_winsys *ws);
259
260 /**
261 * Destroy this winsys.
262 *
263 * \param ws The winsys this function is called from.
264 */
265 void (*destroy)(struct radeon_winsys *ws);
266
267 /**
268 * Query an info structure from winsys.
269 *
270 * \param ws The winsys this function is called from.
271 * \param info Return structure
272 */
273 void (*query_info)(struct radeon_winsys *ws, struct radeon_info *info);
274
275 /**
276 * A hint for the winsys that it should pin its execution threads to
277 * a group of cores sharing a specific L3 cache if the CPU has multiple
278 * L3 caches. This is needed for good multithreading performance on
279 * AMD Zen CPUs.
280 */
281 void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cache);
282
283 /**************************************************************************
284 * Buffer management. Buffer attributes are mostly fixed over its lifetime.
285 *
286 * Remember that gallium gets to choose the interface it needs, and the
287 * window systems must then implement that interface (rather than the
288 * other way around...).
289 *************************************************************************/
290
291 /**
292 * Create a buffer object.
293 *
294 * \param ws The winsys this function is called from.
295 * \param size The size to allocate.
296 * \param alignment An alignment of the buffer in memory.
297 * \param use_reusable_pool Whether the cache buffer manager should be used.
298 * \param domain A bitmask of the RADEON_DOMAIN_* flags.
299 * \return The created buffer object.
300 */
301 struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws, uint64_t size, unsigned alignment,
302 enum radeon_bo_domain domain, enum radeon_bo_flag flags);
303
304 /**
305 * Map the entire data store of a buffer object into the client's address
306 * space.
307 *
308 * Callers are expected to unmap buffers again if and only if the
309 * RADEON_TRANSFER_TEMPORARY flag is set in \p usage.
310 *
311 * \param buf A winsys buffer object to map.
312 * \param cs A command stream to flush if the buffer is referenced by it.
313 * \param usage A bitmask of the PIPE_TRANSFER_* and RADEON_TRANSFER_* flags.
314 * \return The pointer at the beginning of the buffer.
315 */
316 void *(*buffer_map)(struct pb_buffer *buf, struct radeon_cmdbuf *cs,
317 enum pipe_transfer_usage usage);
318
319 /**
320 * Unmap a buffer object from the client's address space.
321 *
322 * \param buf A winsys buffer object to unmap.
323 */
324 void (*buffer_unmap)(struct pb_buffer *buf);
325
326 /**
327 * Wait for the buffer and return true if the buffer is not used
328 * by the device.
329 *
330 * The timeout of 0 will only return the status.
331 * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
332 * is idle.
333 */
334 bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout, enum radeon_bo_usage usage);
335
336 /**
337 * Return buffer metadata.
338 * (tiling info for display code, DRI sharing, and other data)
339 *
340 * \param buf A winsys buffer object to get the flags from.
341 * \param md Metadata
342 */
343 void (*buffer_get_metadata)(struct pb_buffer *buf, struct radeon_bo_metadata *md,
344 struct radeon_surf *surf);
345
346 /**
347 * Set buffer metadata.
348 * (tiling info for display code, DRI sharing, and other data)
349 *
350 * \param buf A winsys buffer object to set the flags for.
351 * \param md Metadata
352 */
353 void (*buffer_set_metadata)(struct pb_buffer *buf, struct radeon_bo_metadata *md,
354 struct radeon_surf *surf);
355
356 /**
357 * Get a winsys buffer from a winsys handle. The internal structure
358 * of the handle is platform-specific and only a winsys should access it.
359 *
360 * \param ws The winsys this function is called from.
361 * \param whandle A winsys handle pointer as was received from a state
362 * tracker.
363 */
364 struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws, struct winsys_handle *whandle,
365 unsigned vm_alignment);
366
367 /**
368 * Get a winsys buffer from a user pointer. The resulting buffer can't
369 * be exported. Both pointer and size must be page aligned.
370 *
371 * \param ws The winsys this function is called from.
372 * \param pointer User pointer to turn into a buffer object.
373 * \param Size Size in bytes for the new buffer.
374 */
375 struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer, uint64_t size);
376
377 /**
378 * Whether the buffer was created from a user pointer.
379 *
380 * \param buf A winsys buffer object
381 * \return whether \p buf was created via buffer_from_ptr
382 */
383 bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
384
385 /** Whether the buffer was suballocated. */
386 bool (*buffer_is_suballocated)(struct pb_buffer *buf);
387
388 /**
389 * Get a winsys handle from a winsys buffer. The internal structure
390 * of the handle is platform-specific and only a winsys should access it.
391 *
392 * \param ws The winsys instance for which the handle is to be valid
393 * \param buf A winsys buffer object to get the handle from.
394 * \param whandle A winsys handle pointer.
395 * \return true on success.
396 */
397 bool (*buffer_get_handle)(struct radeon_winsys *ws, struct pb_buffer *buf,
398 struct winsys_handle *whandle);
399
400 /**
401 * Change the commitment of a (64KB-page aligned) region of the given
402 * sparse buffer.
403 *
404 * \warning There is no automatic synchronization with command submission.
405 *
406 * \note Only implemented by the amdgpu winsys.
407 *
408 * \return false on out of memory or other failure, true on success.
409 */
410 bool (*buffer_commit)(struct pb_buffer *buf, uint64_t offset, uint64_t size, bool commit);
411
412 /**
413 * Return the virtual address of a buffer.
414 *
415 * When virtual memory is not in use, this is the offset relative to the
416 * relocation base (non-zero for sub-allocated buffers).
417 *
418 * \param buf A winsys buffer object
419 * \return virtual address
420 */
421 uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
422
423 /**
424 * Return the offset of this buffer relative to the relocation base.
425 * This is only non-zero for sub-allocated buffers.
426 *
427 * This is only supported in the radeon winsys, since amdgpu uses virtual
428 * addresses in submissions even for the video engines.
429 *
430 * \param buf A winsys buffer object
431 * \return the offset for relocations
432 */
433 unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
434
435 /**
436 * Query the initial placement of the buffer from the kernel driver.
437 */
438 enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
439
440 /**
441 * Query the flags used for creation of this buffer.
442 *
443 * Note that for imported buffer this may be lossy since not all flags
444 * are passed 1:1.
445 */
446 enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer *buf);
447
448 /**************************************************************************
449 * Command submission.
450 *
451 * Each pipe context should create its own command stream and submit
452 * commands independently of other contexts.
453 *************************************************************************/
454
455 /**
456 * Create a command submission context.
457 * Various command streams can be submitted to the same context.
458 */
459 struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws);
460
461 /**
462 * Destroy a context.
463 */
464 void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
465
466 /**
467 * Query a GPU reset status.
468 */
469 enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx);
470
471 /**
472 * Create a command stream.
473 *
474 * \param ctx The submission context
475 * \param ring_type The ring type (GFX, DMA, UVD)
476 * \param flush Flush callback function associated with the command stream.
477 * \param user User pointer that will be passed to the flush callback.
478 */
479 struct radeon_cmdbuf *(*cs_create)(struct radeon_winsys_ctx *ctx, enum ring_type ring_type,
480 void (*flush)(void *ctx, unsigned flags,
481 struct pipe_fence_handle **fence),
482 void *flush_ctx, bool stop_exec_on_failure);
483
484 /**
485 * Add a parallel compute IB to a gfx IB. It will share the buffer list
486 * and fence dependencies with the gfx IB. The gfx flush call will submit
487 * both IBs at the same time.
488 *
489 * The compute IB doesn't have an output fence, so the primary IB has
490 * to use a wait packet for synchronization.
491 *
492 * The returned IB is only a stream for writing packets to the new
493 * IB. Calling other winsys functions with it is not allowed, not even
494 * "cs_destroy". Use the gfx IB instead.
495 *
496 * \param cs Gfx IB
497 */
498 struct radeon_cmdbuf *(*cs_add_parallel_compute_ib)(struct radeon_cmdbuf *cs,
499 bool uses_gds_ordered_append);
500
501 /**
502 * Destroy a command stream.
503 *
504 * \param cs A command stream to destroy.
505 */
506 void (*cs_destroy)(struct radeon_cmdbuf *cs);
507
508 /**
509 * Add a buffer. Each buffer used by a CS must be added using this function.
510 *
511 * \param cs Command stream
512 * \param buf Buffer
513 * \param usage Whether the buffer is used for read and/or write.
514 * \param domain Bitmask of the RADEON_DOMAIN_* flags.
515 * \param priority A higher number means a greater chance of being
516 * placed in the requested domain. 15 is the maximum.
517 * \return Buffer index.
518 */
519 unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
520 enum radeon_bo_usage usage, enum radeon_bo_domain domain,
521 enum radeon_bo_priority priority);
522
523 /**
524 * Return the index of an already-added buffer.
525 *
526 * Not supported on amdgpu. Drivers with GPUVM should not care about
527 * buffer indices.
528 *
529 * \param cs Command stream
530 * \param buf Buffer
531 * \return The buffer index, or -1 if the buffer has not been added.
532 */
533 int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf);
534
535 /**
536 * Return true if there is enough memory in VRAM and GTT for the buffers
537 * added so far. If the validation fails, all buffers which have
538 * been added since the last call of cs_validate will be removed and
539 * the CS will be flushed (provided there are still any buffers).
540 *
541 * \param cs A command stream to validate.
542 */
543 bool (*cs_validate)(struct radeon_cmdbuf *cs);
544
545 /**
546 * Check whether the given number of dwords is available in the IB.
547 * Optionally chain a new chunk of the IB if necessary and supported.
548 *
549 * \param cs A command stream.
550 * \param dw Number of CS dwords requested by the caller.
551 * \param force_chaining Chain the IB into a new buffer now to discard
552 * the CP prefetch cache (to emulate PKT3_REWIND)
553 * \return true if there is enough space
554 */
555 bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw, bool force_chaining);
556
557 /**
558 * Return the buffer list.
559 *
560 * This is the buffer list as passed to the kernel, i.e. it only contains
561 * the parent buffers of sub-allocated buffers.
562 *
563 * \param cs Command stream
564 * \param list Returned buffer list. Set to NULL to query the count only.
565 * \return The buffer count.
566 */
567 unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs, struct radeon_bo_list_item *list);
568
569 /**
570 * Flush a command stream.
571 *
572 * \param cs A command stream to flush.
573 * \param flags, PIPE_FLUSH_* flags.
574 * \param fence Pointer to a fence. If non-NULL, a fence is inserted
575 * after the CS and is returned through this parameter.
576 * \return Negative POSIX error code or 0 for success.
577 * Asynchronous submissions never return an error.
578 */
579 int (*cs_flush)(struct radeon_cmdbuf *cs, unsigned flags, struct pipe_fence_handle **fence);
580
581 /**
582 * Create a fence before the CS is flushed.
583 * The user must flush manually to complete the initializaton of the fence.
584 *
585 * The fence must not be used for anything except \ref cs_add_fence_dependency
586 * before the flush.
587 */
588 struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
589
590 /**
591 * Return true if a buffer is referenced by a command stream.
592 *
593 * \param cs A command stream.
594 * \param buf A winsys buffer.
595 */
596 bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
597 enum radeon_bo_usage usage);
598
599 /**
600 * Request access to a feature for a command stream.
601 *
602 * \param cs A command stream.
603 * \param fid Feature ID, one of RADEON_FID_*
604 * \param enable Whether to enable or disable the feature.
605 */
606 bool (*cs_request_feature)(struct radeon_cmdbuf *cs, enum radeon_feature_id fid, bool enable);
607 /**
608 * Make sure all asynchronous flush of the cs have completed
609 *
610 * \param cs A command stream.
611 */
612 void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
613
614 /**
615 * Add a fence dependency to the CS, so that the CS will wait for
616 * the fence before execution.
617 *
618 * \param dependency_flags Bitmask of RADEON_DEPENDENCY_*
619 */
620 void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence,
621 unsigned dependency_flags);
622
623 /**
624 * Signal a syncobj when the CS finishes execution.
625 */
626 void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs, struct pipe_fence_handle *fence);
627
628 /**
629 * Wait for the fence and return true if the fence has been signalled.
630 * The timeout of 0 will only return the status.
631 * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
632 * is signalled.
633 */
634 bool (*fence_wait)(struct radeon_winsys *ws, struct pipe_fence_handle *fence, uint64_t timeout);
635
636 /**
637 * Reference counting for fences.
638 */
639 void (*fence_reference)(struct pipe_fence_handle **dst, struct pipe_fence_handle *src);
640
641 /**
642 * Create a new fence object corresponding to the given syncobj fd.
643 */
644 struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws, int fd);
645
646 /**
647 * Create a new fence object corresponding to the given sync_file.
648 */
649 struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws, int fd);
650
651 /**
652 * Return a sync_file FD corresponding to the given fence object.
653 */
654 int (*fence_export_sync_file)(struct radeon_winsys *ws, struct pipe_fence_handle *fence);
655
656 /**
657 * Return a sync file FD that is already signalled.
658 */
659 int (*export_signalled_sync_file)(struct radeon_winsys *ws);
660
661 /**
662 * Initialize surface
663 *
664 * \param ws The winsys this function is called from.
665 * \param tex Input texture description
666 * \param flags Bitmask of RADEON_SURF_* flags
667 * \param bpe Bytes per pixel, it can be different for Z buffers.
668 * \param mode Preferred tile mode. (linear, 1D, or 2D)
669 * \param surf Output structure
670 */
671 int (*surface_init)(struct radeon_winsys *ws, const struct pipe_resource *tex, unsigned flags,
672 unsigned bpe, enum radeon_surf_mode mode, struct radeon_surf *surf);
673
674 uint64_t (*query_value)(struct radeon_winsys *ws, enum radeon_value_id value);
675
676 bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset, unsigned num_registers,
677 uint32_t *out);
678
679 /**
680 * Secure context
681 */
682 bool (*ws_is_secure)(struct radeon_winsys *ws);
683 bool (*cs_is_secure)(struct radeon_cmdbuf *cs);
684 void (*cs_set_secure)(struct radeon_cmdbuf *cs, bool secure);
685 };
686
687 static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
688 {
689 return cs && (cs->prev_dw + cs->current.cdw > num_dw);
690 }
691
692 static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
693 {
694 cs->current.buf[cs->current.cdw++] = value;
695 }
696
697 static inline void radeon_emit_array(struct radeon_cmdbuf *cs, const uint32_t *values,
698 unsigned count)
699 {
700 memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
701 cs->current.cdw += count;
702 }
703
704 enum radeon_heap
705 {
706 RADEON_HEAP_VRAM_NO_CPU_ACCESS,
707 RADEON_HEAP_VRAM_READ_ONLY,
708 RADEON_HEAP_VRAM_READ_ONLY_32BIT,
709 RADEON_HEAP_VRAM_32BIT,
710 RADEON_HEAP_VRAM,
711 RADEON_HEAP_GTT_WC,
712 RADEON_HEAP_GTT_WC_READ_ONLY,
713 RADEON_HEAP_GTT_WC_READ_ONLY_32BIT,
714 RADEON_HEAP_GTT_WC_32BIT,
715 RADEON_HEAP_GTT,
716 RADEON_HEAP_GTT_UNCACHED_WC,
717 RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY,
718 RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT,
719 RADEON_HEAP_GTT_UNCACHED_WC_32BIT,
720 RADEON_HEAP_GTT_UNCACHED,
721 RADEON_MAX_SLAB_HEAPS,
722 RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
723 };
724
725 static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
726 {
727 switch (heap) {
728 case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
729 case RADEON_HEAP_VRAM_READ_ONLY:
730 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
731 case RADEON_HEAP_VRAM_32BIT:
732 case RADEON_HEAP_VRAM:
733 return RADEON_DOMAIN_VRAM;
734 case RADEON_HEAP_GTT_WC:
735 case RADEON_HEAP_GTT_WC_READ_ONLY:
736 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
737 case RADEON_HEAP_GTT_WC_32BIT:
738 case RADEON_HEAP_GTT:
739 case RADEON_HEAP_GTT_UNCACHED_WC:
740 case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
741 case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
742 case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
743 case RADEON_HEAP_GTT_UNCACHED:
744 return RADEON_DOMAIN_GTT;
745 default:
746 assert(0);
747 return (enum radeon_bo_domain)0;
748 }
749 }
750
751 static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
752 {
753 unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING;
754
755 switch (heap) {
756 case RADEON_HEAP_GTT:
757 case RADEON_HEAP_GTT_UNCACHED:
758 break;
759 default:
760 flags |= RADEON_FLAG_GTT_WC;
761 }
762
763 switch (heap) {
764 case RADEON_HEAP_GTT_UNCACHED_WC:
765 case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
766 case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
767 case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
768 case RADEON_HEAP_GTT_UNCACHED:
769 flags |= RADEON_FLAG_UNCACHED;
770 break;
771 default:
772 break;
773 }
774
775 switch (heap) {
776 case RADEON_HEAP_VRAM_READ_ONLY:
777 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
778 case RADEON_HEAP_GTT_WC_READ_ONLY:
779 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
780 case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY:
781 case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
782 flags |= RADEON_FLAG_READ_ONLY;
783 break;
784 default:
785 break;
786 }
787
788 switch (heap) {
789 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
790 case RADEON_HEAP_VRAM_32BIT:
791 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
792 case RADEON_HEAP_GTT_WC_32BIT:
793 case RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT:
794 case RADEON_HEAP_GTT_UNCACHED_WC_32BIT:
795 flags |= RADEON_FLAG_32BIT;
796 default:
797 break;
798 }
799
800 switch (heap) {
801 case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
802 flags |= RADEON_FLAG_NO_CPU_ACCESS;
803 break;
804 default:
805 break;
806 }
807
808 return flags;
809 }
810
811 /* Return the heap index for winsys allocators, or -1 on failure. */
812 static inline int radeon_get_heap_index(enum radeon_bo_domain domain, enum radeon_bo_flag flags)
813 {
814 bool uncached;
815
816 /* VRAM implies WC (write combining) */
817 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
818 /* NO_CPU_ACCESS implies VRAM only. */
819 assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
820
821 /* Resources with interprocess sharing don't use any winsys allocators. */
822 if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
823 return -1;
824
825 /* Unsupported flags: NO_SUBALLOC, SPARSE. */
826 if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_UNCACHED |
827 RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT))
828 return -1;
829
830 switch (domain) {
831 case RADEON_DOMAIN_VRAM:
832 switch (flags & (RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT)) {
833 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
834 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY:
835 assert(!"NO_CPU_ACCESS | READ_ONLY doesn't make sense");
836 return -1;
837 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_32BIT:
838 assert(!"NO_CPU_ACCESS with 32BIT is disallowed");
839 return -1;
840 case RADEON_FLAG_NO_CPU_ACCESS:
841 return RADEON_HEAP_VRAM_NO_CPU_ACCESS;
842 case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
843 return RADEON_HEAP_VRAM_READ_ONLY_32BIT;
844 case RADEON_FLAG_READ_ONLY:
845 return RADEON_HEAP_VRAM_READ_ONLY;
846 case RADEON_FLAG_32BIT:
847 return RADEON_HEAP_VRAM_32BIT;
848 case 0:
849 return RADEON_HEAP_VRAM;
850 }
851 break;
852 case RADEON_DOMAIN_GTT:
853 uncached = flags & RADEON_FLAG_UNCACHED;
854
855 switch (flags & (RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT)) {
856 case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
857 return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY_32BIT
858 : RADEON_HEAP_GTT_WC_READ_ONLY_32BIT;
859 case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY:
860 return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_READ_ONLY
861 : RADEON_HEAP_GTT_WC_READ_ONLY;
862 case RADEON_FLAG_GTT_WC | RADEON_FLAG_32BIT:
863 return uncached ? RADEON_HEAP_GTT_UNCACHED_WC_32BIT
864 : RADEON_HEAP_GTT_WC_32BIT;
865 case RADEON_FLAG_GTT_WC:
866 return uncached ? RADEON_HEAP_GTT_UNCACHED_WC : RADEON_HEAP_GTT_WC;
867 case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
868 case RADEON_FLAG_READ_ONLY:
869 assert(!"READ_ONLY without WC is disallowed");
870 return -1;
871 case RADEON_FLAG_32BIT:
872 assert(!"32BIT without WC is disallowed");
873 return -1;
874 case 0:
875 return uncached ? RADEON_HEAP_GTT_UNCACHED : RADEON_HEAP_GTT;
876 }
877 break;
878 default:
879 break;
880 }
881 return -1;
882 }
883
884 #endif