radeonsi: stop command submission with PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET only
[mesa.git] / src / gallium / drivers / radeon / radeon_winsys.h
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
25
26 #ifndef RADEON_WINSYS_H
27 #define RADEON_WINSYS_H
28
29 /* The public winsys interface header for the radeon driver. */
30
31 /* Whether the next IB can start immediately and not wait for draws and
32 * dispatches from the current IB to finish. */
33 #define RADEON_FLUSH_START_NEXT_GFX_IB_NOW (1u << 31)
34
35 #define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW \
36 (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
37
38 #include "pipebuffer/pb_buffer.h"
39
40 #include "amd/common/ac_gpu_info.h"
41 #include "amd/common/ac_surface.h"
42
43 /* Tiling flags. */
44 enum radeon_bo_layout {
45 RADEON_LAYOUT_LINEAR = 0,
46 RADEON_LAYOUT_TILED,
47 RADEON_LAYOUT_SQUARETILED,
48
49 RADEON_LAYOUT_UNKNOWN
50 };
51
52 enum radeon_bo_domain { /* bitfield */
53 RADEON_DOMAIN_GTT = 2,
54 RADEON_DOMAIN_VRAM = 4,
55 RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT
56 };
57
58 enum radeon_bo_flag { /* bitfield */
59 RADEON_FLAG_GTT_WC = (1 << 0),
60 RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
61 RADEON_FLAG_NO_SUBALLOC = (1 << 2),
62 RADEON_FLAG_SPARSE = (1 << 3),
63 RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
64 RADEON_FLAG_READ_ONLY = (1 << 5),
65 RADEON_FLAG_32BIT = (1 << 6),
66 };
67
68 enum radeon_bo_usage { /* bitfield */
69 RADEON_USAGE_READ = 2,
70 RADEON_USAGE_WRITE = 4,
71 RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
72
73 /* The winsys ensures that the CS submission will be scheduled after
74 * previously flushed CSs referencing this BO in a conflicting way.
75 */
76 RADEON_USAGE_SYNCHRONIZED = 8
77 };
78
79 #define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
80
81 enum ring_type {
82 RING_GFX = 0,
83 RING_COMPUTE,
84 RING_DMA,
85 RING_UVD,
86 RING_VCE,
87 RING_UVD_ENC,
88 RING_VCN_DEC,
89 RING_VCN_ENC,
90 RING_VCN_JPEG,
91 RING_LAST,
92 };
93
94 enum radeon_value_id {
95 RADEON_REQUESTED_VRAM_MEMORY,
96 RADEON_REQUESTED_GTT_MEMORY,
97 RADEON_MAPPED_VRAM,
98 RADEON_MAPPED_GTT,
99 RADEON_BUFFER_WAIT_TIME_NS,
100 RADEON_NUM_MAPPED_BUFFERS,
101 RADEON_TIMESTAMP,
102 RADEON_NUM_GFX_IBS,
103 RADEON_NUM_SDMA_IBS,
104 RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
105 RADEON_GFX_IB_SIZE_COUNTER,
106 RADEON_NUM_BYTES_MOVED,
107 RADEON_NUM_EVICTIONS,
108 RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
109 RADEON_VRAM_USAGE,
110 RADEON_VRAM_VIS_USAGE,
111 RADEON_GTT_USAGE,
112 RADEON_GPU_TEMPERATURE, /* DRM 2.42.0 */
113 RADEON_CURRENT_SCLK,
114 RADEON_CURRENT_MCLK,
115 RADEON_GPU_RESET_COUNTER, /* DRM 2.43.0 */
116 RADEON_CS_THREAD_TIME,
117 };
118
119 enum radeon_bo_priority {
120 /* Each group of two has the same priority. */
121 RADEON_PRIO_FENCE = 0,
122 RADEON_PRIO_TRACE,
123
124 RADEON_PRIO_SO_FILLED_SIZE = 2,
125 RADEON_PRIO_QUERY,
126
127 RADEON_PRIO_IB1 = 4, /* main IB submitted to the kernel */
128 RADEON_PRIO_IB2, /* IB executed with INDIRECT_BUFFER */
129
130 RADEON_PRIO_DRAW_INDIRECT = 6,
131 RADEON_PRIO_INDEX_BUFFER,
132
133 RADEON_PRIO_CP_DMA = 8,
134 RADEON_PRIO_BORDER_COLORS,
135
136 RADEON_PRIO_CONST_BUFFER = 10,
137 RADEON_PRIO_DESCRIPTORS,
138
139 RADEON_PRIO_SAMPLER_BUFFER = 12,
140 RADEON_PRIO_VERTEX_BUFFER,
141
142 RADEON_PRIO_SHADER_RW_BUFFER = 14,
143 RADEON_PRIO_COMPUTE_GLOBAL,
144
145 RADEON_PRIO_SAMPLER_TEXTURE = 16,
146 RADEON_PRIO_SHADER_RW_IMAGE,
147
148 RADEON_PRIO_SAMPLER_TEXTURE_MSAA = 18,
149 RADEON_PRIO_COLOR_BUFFER,
150
151 RADEON_PRIO_DEPTH_BUFFER = 20,
152
153 RADEON_PRIO_COLOR_BUFFER_MSAA = 22,
154
155 RADEON_PRIO_DEPTH_BUFFER_MSAA = 24,
156
157 RADEON_PRIO_SEPARATE_META = 26,
158 RADEON_PRIO_SHADER_BINARY, /* the hw can't hide instruction cache misses */
159
160 RADEON_PRIO_SHADER_RINGS = 28,
161
162 RADEON_PRIO_SCRATCH_BUFFER = 30,
163 /* 31 is the maximum value */
164 };
165
166 struct winsys_handle;
167 struct radeon_winsys_ctx;
168
169 struct radeon_cmdbuf_chunk {
170 unsigned cdw; /* Number of used dwords. */
171 unsigned max_dw; /* Maximum number of dwords. */
172 uint32_t *buf; /* The base pointer of the chunk. */
173 };
174
175 struct radeon_cmdbuf {
176 struct radeon_cmdbuf_chunk current;
177 struct radeon_cmdbuf_chunk *prev;
178 unsigned num_prev; /* Number of previous chunks. */
179 unsigned max_prev; /* Space in array pointed to by prev. */
180 unsigned prev_dw; /* Total number of dwords in previous chunks. */
181
182 /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
183 uint64_t used_vram;
184 uint64_t used_gart;
185 };
186
187 /* Tiling info for display code, DRI sharing, and other data. */
188 struct radeon_bo_metadata {
189 /* Tiling flags describing the texture layout for display code
190 * and DRI sharing.
191 */
192 union {
193 struct {
194 enum radeon_bo_layout microtile;
195 enum radeon_bo_layout macrotile;
196 unsigned pipe_config;
197 unsigned bankw;
198 unsigned bankh;
199 unsigned tile_split;
200 unsigned mtilea;
201 unsigned num_banks;
202 unsigned stride;
203 bool scanout;
204 } legacy;
205
206 struct {
207 /* surface flags */
208 unsigned swizzle_mode:5;
209 } gfx9;
210 } u;
211
212 /* Additional metadata associated with the buffer, in bytes.
213 * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
214 * Supported by amdgpu only.
215 */
216 uint32_t size_metadata;
217 uint32_t metadata[64];
218 };
219
220 enum radeon_feature_id {
221 RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
222 RADEON_FID_R300_CMASK_ACCESS,
223 };
224
225 struct radeon_bo_list_item {
226 uint64_t bo_size;
227 uint64_t vm_address;
228 uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
229 };
230
231 struct radeon_winsys {
232 /**
233 * The screen object this winsys was created for
234 */
235 struct pipe_screen *screen;
236
237 /**
238 * Decrement the winsys reference count.
239 *
240 * \param ws The winsys this function is called for.
241 * \return True if the winsys and screen should be destroyed.
242 */
243 bool (*unref)(struct radeon_winsys *ws);
244
245 /**
246 * Destroy this winsys.
247 *
248 * \param ws The winsys this function is called from.
249 */
250 void (*destroy)(struct radeon_winsys *ws);
251
252 /**
253 * Query an info structure from winsys.
254 *
255 * \param ws The winsys this function is called from.
256 * \param info Return structure
257 */
258 void (*query_info)(struct radeon_winsys *ws,
259 struct radeon_info *info);
260
261 /**
262 * A hint for the winsys that it should pin its execution threads to
263 * a group of cores sharing a specific L3 cache if the CPU has multiple
264 * L3 caches. This is needed for good multithreading performance on
265 * AMD Zen CPUs.
266 */
267 void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cache);
268
269 /**************************************************************************
270 * Buffer management. Buffer attributes are mostly fixed over its lifetime.
271 *
272 * Remember that gallium gets to choose the interface it needs, and the
273 * window systems must then implement that interface (rather than the
274 * other way around...).
275 *************************************************************************/
276
277 /**
278 * Create a buffer object.
279 *
280 * \param ws The winsys this function is called from.
281 * \param size The size to allocate.
282 * \param alignment An alignment of the buffer in memory.
283 * \param use_reusable_pool Whether the cache buffer manager should be used.
284 * \param domain A bitmask of the RADEON_DOMAIN_* flags.
285 * \return The created buffer object.
286 */
287 struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws,
288 uint64_t size,
289 unsigned alignment,
290 enum radeon_bo_domain domain,
291 enum radeon_bo_flag flags);
292
293 /**
294 * Map the entire data store of a buffer object into the client's address
295 * space.
296 *
297 * \param buf A winsys buffer object to map.
298 * \param cs A command stream to flush if the buffer is referenced by it.
299 * \param usage A bitmask of the PIPE_TRANSFER_* flags.
300 * \return The pointer at the beginning of the buffer.
301 */
302 void *(*buffer_map)(struct pb_buffer *buf,
303 struct radeon_cmdbuf *cs,
304 enum pipe_transfer_usage usage);
305
306 /**
307 * Unmap a buffer object from the client's address space.
308 *
309 * \param buf A winsys buffer object to unmap.
310 */
311 void (*buffer_unmap)(struct pb_buffer *buf);
312
313 /**
314 * Wait for the buffer and return true if the buffer is not used
315 * by the device.
316 *
317 * The timeout of 0 will only return the status.
318 * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
319 * is idle.
320 */
321 bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout,
322 enum radeon_bo_usage usage);
323
324 /**
325 * Return buffer metadata.
326 * (tiling info for display code, DRI sharing, and other data)
327 *
328 * \param buf A winsys buffer object to get the flags from.
329 * \param md Metadata
330 */
331 void (*buffer_get_metadata)(struct pb_buffer *buf,
332 struct radeon_bo_metadata *md);
333
334 /**
335 * Set buffer metadata.
336 * (tiling info for display code, DRI sharing, and other data)
337 *
338 * \param buf A winsys buffer object to set the flags for.
339 * \param md Metadata
340 */
341 void (*buffer_set_metadata)(struct pb_buffer *buf,
342 struct radeon_bo_metadata *md);
343
344 /**
345 * Get a winsys buffer from a winsys handle. The internal structure
346 * of the handle is platform-specific and only a winsys should access it.
347 *
348 * \param ws The winsys this function is called from.
349 * \param whandle A winsys handle pointer as was received from a state
350 * tracker.
351 * \param stride The returned buffer stride in bytes.
352 */
353 struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws,
354 struct winsys_handle *whandle,
355 unsigned *stride, unsigned *offset);
356
357 /**
358 * Get a winsys buffer from a user pointer. The resulting buffer can't
359 * be exported. Both pointer and size must be page aligned.
360 *
361 * \param ws The winsys this function is called from.
362 * \param pointer User pointer to turn into a buffer object.
363 * \param Size Size in bytes for the new buffer.
364 */
365 struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws,
366 void *pointer, uint64_t size);
367
368 /**
369 * Whether the buffer was created from a user pointer.
370 *
371 * \param buf A winsys buffer object
372 * \return whether \p buf was created via buffer_from_ptr
373 */
374 bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
375
376 /** Whether the buffer was suballocated. */
377 bool (*buffer_is_suballocated)(struct pb_buffer *buf);
378
379 /**
380 * Get a winsys handle from a winsys buffer. The internal structure
381 * of the handle is platform-specific and only a winsys should access it.
382 *
383 * \param buf A winsys buffer object to get the handle from.
384 * \param whandle A winsys handle pointer.
385 * \param stride A stride of the buffer in bytes, for texturing.
386 * \return true on success.
387 */
388 bool (*buffer_get_handle)(struct pb_buffer *buf,
389 unsigned stride, unsigned offset,
390 unsigned slice_size,
391 struct winsys_handle *whandle);
392
393 /**
394 * Change the commitment of a (64KB-page aligned) region of the given
395 * sparse buffer.
396 *
397 * \warning There is no automatic synchronization with command submission.
398 *
399 * \note Only implemented by the amdgpu winsys.
400 *
401 * \return false on out of memory or other failure, true on success.
402 */
403 bool (*buffer_commit)(struct pb_buffer *buf,
404 uint64_t offset, uint64_t size,
405 bool commit);
406
407 /**
408 * Return the virtual address of a buffer.
409 *
410 * When virtual memory is not in use, this is the offset relative to the
411 * relocation base (non-zero for sub-allocated buffers).
412 *
413 * \param buf A winsys buffer object
414 * \return virtual address
415 */
416 uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
417
418 /**
419 * Return the offset of this buffer relative to the relocation base.
420 * This is only non-zero for sub-allocated buffers.
421 *
422 * This is only supported in the radeon winsys, since amdgpu uses virtual
423 * addresses in submissions even for the video engines.
424 *
425 * \param buf A winsys buffer object
426 * \return the offset for relocations
427 */
428 unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
429
430 /**
431 * Query the initial placement of the buffer from the kernel driver.
432 */
433 enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
434
435 /**************************************************************************
436 * Command submission.
437 *
438 * Each pipe context should create its own command stream and submit
439 * commands independently of other contexts.
440 *************************************************************************/
441
442 /**
443 * Create a command submission context.
444 * Various command streams can be submitted to the same context.
445 */
446 struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws);
447
448 /**
449 * Destroy a context.
450 */
451 void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
452
453 /**
454 * Query a GPU reset status.
455 */
456 enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx);
457
458 /**
459 * Create a command stream.
460 *
461 * \param ctx The submission context
462 * \param ring_type The ring type (GFX, DMA, UVD)
463 * \param flush Flush callback function associated with the command stream.
464 * \param user User pointer that will be passed to the flush callback.
465 */
466 struct radeon_cmdbuf *(*cs_create)(struct radeon_winsys_ctx *ctx,
467 enum ring_type ring_type,
468 void (*flush)(void *ctx, unsigned flags,
469 struct pipe_fence_handle **fence),
470 void *flush_ctx,
471 bool stop_exec_on_failure);
472
473 /**
474 * Destroy a command stream.
475 *
476 * \param cs A command stream to destroy.
477 */
478 void (*cs_destroy)(struct radeon_cmdbuf *cs);
479
480 /**
481 * Add a buffer. Each buffer used by a CS must be added using this function.
482 *
483 * \param cs Command stream
484 * \param buf Buffer
485 * \param usage Whether the buffer is used for read and/or write.
486 * \param domain Bitmask of the RADEON_DOMAIN_* flags.
487 * \param priority A higher number means a greater chance of being
488 * placed in the requested domain. 15 is the maximum.
489 * \return Buffer index.
490 */
491 unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs,
492 struct pb_buffer *buf,
493 enum radeon_bo_usage usage,
494 enum radeon_bo_domain domain,
495 enum radeon_bo_priority priority);
496
497 /**
498 * Return the index of an already-added buffer.
499 *
500 * Not supported on amdgpu. Drivers with GPUVM should not care about
501 * buffer indices.
502 *
503 * \param cs Command stream
504 * \param buf Buffer
505 * \return The buffer index, or -1 if the buffer has not been added.
506 */
507 int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs,
508 struct pb_buffer *buf);
509
510 /**
511 * Return true if there is enough memory in VRAM and GTT for the buffers
512 * added so far. If the validation fails, all buffers which have
513 * been added since the last call of cs_validate will be removed and
514 * the CS will be flushed (provided there are still any buffers).
515 *
516 * \param cs A command stream to validate.
517 */
518 bool (*cs_validate)(struct radeon_cmdbuf *cs);
519
520 /**
521 * Check whether the given number of dwords is available in the IB.
522 * Optionally chain a new chunk of the IB if necessary and supported.
523 *
524 * \param cs A command stream.
525 * \param dw Number of CS dwords requested by the caller.
526 */
527 bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw);
528
529 /**
530 * Return the buffer list.
531 *
532 * This is the buffer list as passed to the kernel, i.e. it only contains
533 * the parent buffers of sub-allocated buffers.
534 *
535 * \param cs Command stream
536 * \param list Returned buffer list. Set to NULL to query the count only.
537 * \return The buffer count.
538 */
539 unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs,
540 struct radeon_bo_list_item *list);
541
542 /**
543 * Flush a command stream.
544 *
545 * \param cs A command stream to flush.
546 * \param flags, PIPE_FLUSH_* flags.
547 * \param fence Pointer to a fence. If non-NULL, a fence is inserted
548 * after the CS and is returned through this parameter.
549 * \return Negative POSIX error code or 0 for success.
550 * Asynchronous submissions never return an error.
551 */
552 int (*cs_flush)(struct radeon_cmdbuf *cs,
553 unsigned flags,
554 struct pipe_fence_handle **fence);
555
556 /**
557 * Create a fence before the CS is flushed.
558 * The user must flush manually to complete the initializaton of the fence.
559 *
560 * The fence must not be used for anything except \ref cs_add_fence_dependency
561 * before the flush.
562 */
563 struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
564
565 /**
566 * Return true if a buffer is referenced by a command stream.
567 *
568 * \param cs A command stream.
569 * \param buf A winsys buffer.
570 */
571 bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs,
572 struct pb_buffer *buf,
573 enum radeon_bo_usage usage);
574
575 /**
576 * Request access to a feature for a command stream.
577 *
578 * \param cs A command stream.
579 * \param fid Feature ID, one of RADEON_FID_*
580 * \param enable Whether to enable or disable the feature.
581 */
582 bool (*cs_request_feature)(struct radeon_cmdbuf *cs,
583 enum radeon_feature_id fid,
584 bool enable);
585 /**
586 * Make sure all asynchronous flush of the cs have completed
587 *
588 * \param cs A command stream.
589 */
590 void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
591
592 /**
593 * Add a fence dependency to the CS, so that the CS will wait for
594 * the fence before execution.
595 */
596 void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs,
597 struct pipe_fence_handle *fence);
598
599 /**
600 * Signal a syncobj when the CS finishes execution.
601 */
602 void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs,
603 struct pipe_fence_handle *fence);
604
605 /**
606 * Wait for the fence and return true if the fence has been signalled.
607 * The timeout of 0 will only return the status.
608 * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
609 * is signalled.
610 */
611 bool (*fence_wait)(struct radeon_winsys *ws,
612 struct pipe_fence_handle *fence,
613 uint64_t timeout);
614
615 /**
616 * Reference counting for fences.
617 */
618 void (*fence_reference)(struct pipe_fence_handle **dst,
619 struct pipe_fence_handle *src);
620
621 /**
622 * Create a new fence object corresponding to the given syncobj fd.
623 */
624 struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws,
625 int fd);
626
627 /**
628 * Create a new fence object corresponding to the given sync_file.
629 */
630 struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws,
631 int fd);
632
633 /**
634 * Return a sync_file FD corresponding to the given fence object.
635 */
636 int (*fence_export_sync_file)(struct radeon_winsys *ws,
637 struct pipe_fence_handle *fence);
638
639 /**
640 * Return a sync file FD that is already signalled.
641 */
642 int (*export_signalled_sync_file)(struct radeon_winsys *ws);
643
644 /**
645 * Initialize surface
646 *
647 * \param ws The winsys this function is called from.
648 * \param tex Input texture description
649 * \param flags Bitmask of RADEON_SURF_* flags
650 * \param bpe Bytes per pixel, it can be different for Z buffers.
651 * \param mode Preferred tile mode. (linear, 1D, or 2D)
652 * \param surf Output structure
653 */
654 int (*surface_init)(struct radeon_winsys *ws,
655 const struct pipe_resource *tex,
656 unsigned flags, unsigned bpe,
657 enum radeon_surf_mode mode,
658 struct radeon_surf *surf);
659
660 uint64_t (*query_value)(struct radeon_winsys *ws,
661 enum radeon_value_id value);
662
663 bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset,
664 unsigned num_registers, uint32_t *out);
665
666 const char* (*get_chip_name)(struct radeon_winsys *ws);
667 };
668
669 static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
670 {
671 return cs && (cs->prev_dw + cs->current.cdw > num_dw);
672 }
673
674 static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
675 {
676 cs->current.buf[cs->current.cdw++] = value;
677 }
678
679 static inline void radeon_emit_array(struct radeon_cmdbuf *cs,
680 const uint32_t *values, unsigned count)
681 {
682 memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
683 cs->current.cdw += count;
684 }
685
686 enum radeon_heap {
687 RADEON_HEAP_VRAM_NO_CPU_ACCESS,
688 RADEON_HEAP_VRAM_READ_ONLY,
689 RADEON_HEAP_VRAM_READ_ONLY_32BIT,
690 RADEON_HEAP_VRAM_32BIT,
691 RADEON_HEAP_VRAM,
692 RADEON_HEAP_GTT_WC,
693 RADEON_HEAP_GTT_WC_READ_ONLY,
694 RADEON_HEAP_GTT_WC_READ_ONLY_32BIT,
695 RADEON_HEAP_GTT_WC_32BIT,
696 RADEON_HEAP_GTT,
697 RADEON_MAX_SLAB_HEAPS,
698 RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
699 };
700
701 static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
702 {
703 switch (heap) {
704 case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
705 case RADEON_HEAP_VRAM_READ_ONLY:
706 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
707 case RADEON_HEAP_VRAM_32BIT:
708 case RADEON_HEAP_VRAM:
709 return RADEON_DOMAIN_VRAM;
710 case RADEON_HEAP_GTT_WC:
711 case RADEON_HEAP_GTT_WC_READ_ONLY:
712 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
713 case RADEON_HEAP_GTT_WC_32BIT:
714 case RADEON_HEAP_GTT:
715 return RADEON_DOMAIN_GTT;
716 default:
717 assert(0);
718 return (enum radeon_bo_domain)0;
719 }
720 }
721
722 static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
723 {
724 unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING |
725 (heap != RADEON_HEAP_GTT ? RADEON_FLAG_GTT_WC : 0);
726
727 switch (heap) {
728 case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
729 return flags |
730 RADEON_FLAG_NO_CPU_ACCESS;
731
732 case RADEON_HEAP_VRAM_READ_ONLY:
733 case RADEON_HEAP_GTT_WC_READ_ONLY:
734 return flags |
735 RADEON_FLAG_READ_ONLY;
736
737 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
738 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
739 return flags |
740 RADEON_FLAG_READ_ONLY |
741 RADEON_FLAG_32BIT;
742
743 case RADEON_HEAP_VRAM_32BIT:
744 case RADEON_HEAP_GTT_WC_32BIT:
745 return flags |
746 RADEON_FLAG_32BIT;
747
748 case RADEON_HEAP_VRAM:
749 case RADEON_HEAP_GTT_WC:
750 case RADEON_HEAP_GTT:
751 default:
752 return flags;
753 }
754 }
755
756 /* Return the heap index for winsys allocators, or -1 on failure. */
757 static inline int radeon_get_heap_index(enum radeon_bo_domain domain,
758 enum radeon_bo_flag flags)
759 {
760 /* VRAM implies WC (write combining) */
761 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
762 /* NO_CPU_ACCESS implies VRAM only. */
763 assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
764
765 /* Resources with interprocess sharing don't use any winsys allocators. */
766 if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
767 return -1;
768
769 /* Unsupported flags: NO_SUBALLOC, SPARSE. */
770 if (flags & ~(RADEON_FLAG_GTT_WC |
771 RADEON_FLAG_NO_CPU_ACCESS |
772 RADEON_FLAG_NO_INTERPROCESS_SHARING |
773 RADEON_FLAG_READ_ONLY |
774 RADEON_FLAG_32BIT))
775 return -1;
776
777 switch (domain) {
778 case RADEON_DOMAIN_VRAM:
779 switch (flags & (RADEON_FLAG_NO_CPU_ACCESS |
780 RADEON_FLAG_READ_ONLY |
781 RADEON_FLAG_32BIT)) {
782 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
783 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY:
784 assert(!"NO_CPU_ACCESS | READ_ONLY doesn't make sense");
785 return -1;
786 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_32BIT:
787 assert(!"NO_CPU_ACCESS with 32BIT is disallowed");
788 return -1;
789 case RADEON_FLAG_NO_CPU_ACCESS:
790 return RADEON_HEAP_VRAM_NO_CPU_ACCESS;
791 case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
792 return RADEON_HEAP_VRAM_READ_ONLY_32BIT;
793 case RADEON_FLAG_READ_ONLY:
794 return RADEON_HEAP_VRAM_READ_ONLY;
795 case RADEON_FLAG_32BIT:
796 return RADEON_HEAP_VRAM_32BIT;
797 case 0:
798 return RADEON_HEAP_VRAM;
799 }
800 break;
801 case RADEON_DOMAIN_GTT:
802 switch (flags & (RADEON_FLAG_GTT_WC |
803 RADEON_FLAG_READ_ONLY |
804 RADEON_FLAG_32BIT)) {
805 case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
806 return RADEON_HEAP_GTT_WC_READ_ONLY_32BIT;
807 case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY:
808 return RADEON_HEAP_GTT_WC_READ_ONLY;
809 case RADEON_FLAG_GTT_WC | RADEON_FLAG_32BIT:
810 return RADEON_HEAP_GTT_WC_32BIT;
811 case RADEON_FLAG_GTT_WC:
812 return RADEON_HEAP_GTT_WC;
813 case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
814 case RADEON_FLAG_READ_ONLY:
815 assert(!"READ_ONLY without WC is disallowed");
816 return -1;
817 case RADEON_FLAG_32BIT:
818 assert(!"32BIT without WC is disallowed");
819 return -1;
820 case 0:
821 return RADEON_HEAP_GTT;
822 }
823 break;
824 default:
825 break;
826 }
827 return -1;
828 }
829
830 #endif