winsys/amdgpu,radeon: pass vm_alignment to buffer_from_handle
[mesa.git] / src / gallium / drivers / radeon / radeon_winsys.h
1 /*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
25
26 #ifndef RADEON_WINSYS_H
27 #define RADEON_WINSYS_H
28
29 /* The public winsys interface header for the radeon driver. */
30
31 /* Whether the next IB can start immediately and not wait for draws and
32 * dispatches from the current IB to finish. */
33 #define RADEON_FLUSH_START_NEXT_GFX_IB_NOW (1u << 31)
34
35 #define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW \
36 (PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
37
38 #include "pipebuffer/pb_buffer.h"
39
40 #include "amd/common/ac_gpu_info.h"
41 #include "amd/common/ac_surface.h"
42
43 /* Tiling flags. */
44 enum radeon_bo_layout {
45 RADEON_LAYOUT_LINEAR = 0,
46 RADEON_LAYOUT_TILED,
47 RADEON_LAYOUT_SQUARETILED,
48
49 RADEON_LAYOUT_UNKNOWN
50 };
51
52 enum radeon_bo_domain { /* bitfield */
53 RADEON_DOMAIN_GTT = 2,
54 RADEON_DOMAIN_VRAM = 4,
55 RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT
56 };
57
58 enum radeon_bo_flag { /* bitfield */
59 RADEON_FLAG_GTT_WC = (1 << 0),
60 RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
61 RADEON_FLAG_NO_SUBALLOC = (1 << 2),
62 RADEON_FLAG_SPARSE = (1 << 3),
63 RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
64 RADEON_FLAG_READ_ONLY = (1 << 5),
65 RADEON_FLAG_32BIT = (1 << 6),
66 };
67
68 enum radeon_bo_usage { /* bitfield */
69 RADEON_USAGE_READ = 2,
70 RADEON_USAGE_WRITE = 4,
71 RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
72
73 /* The winsys ensures that the CS submission will be scheduled after
74 * previously flushed CSs referencing this BO in a conflicting way.
75 */
76 RADEON_USAGE_SYNCHRONIZED = 8
77 };
78
79 enum radeon_transfer_flags {
80 /* Indicates that the caller will unmap the buffer.
81 *
82 * Not unmapping buffers is an important performance optimization for
83 * OpenGL (avoids kernel overhead for frequently mapped buffers).
84 */
85 RADEON_TRANSFER_TEMPORARY = (PIPE_TRANSFER_DRV_PRV << 0),
86 };
87
88 #define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
89
90 enum ring_type {
91 RING_GFX = 0,
92 RING_COMPUTE,
93 RING_DMA,
94 RING_UVD,
95 RING_VCE,
96 RING_UVD_ENC,
97 RING_VCN_DEC,
98 RING_VCN_ENC,
99 RING_VCN_JPEG,
100 RING_LAST,
101 };
102
103 enum radeon_value_id {
104 RADEON_REQUESTED_VRAM_MEMORY,
105 RADEON_REQUESTED_GTT_MEMORY,
106 RADEON_MAPPED_VRAM,
107 RADEON_MAPPED_GTT,
108 RADEON_BUFFER_WAIT_TIME_NS,
109 RADEON_NUM_MAPPED_BUFFERS,
110 RADEON_TIMESTAMP,
111 RADEON_NUM_GFX_IBS,
112 RADEON_NUM_SDMA_IBS,
113 RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
114 RADEON_GFX_IB_SIZE_COUNTER,
115 RADEON_NUM_BYTES_MOVED,
116 RADEON_NUM_EVICTIONS,
117 RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
118 RADEON_VRAM_USAGE,
119 RADEON_VRAM_VIS_USAGE,
120 RADEON_GTT_USAGE,
121 RADEON_GPU_TEMPERATURE, /* DRM 2.42.0 */
122 RADEON_CURRENT_SCLK,
123 RADEON_CURRENT_MCLK,
124 RADEON_GPU_RESET_COUNTER, /* DRM 2.43.0 */
125 RADEON_CS_THREAD_TIME,
126 };
127
128 enum radeon_bo_priority {
129 /* Each group of two has the same priority. */
130 RADEON_PRIO_FENCE = 0,
131 RADEON_PRIO_TRACE,
132
133 RADEON_PRIO_SO_FILLED_SIZE = 2,
134 RADEON_PRIO_QUERY,
135
136 RADEON_PRIO_IB1 = 4, /* main IB submitted to the kernel */
137 RADEON_PRIO_IB2, /* IB executed with INDIRECT_BUFFER */
138
139 RADEON_PRIO_DRAW_INDIRECT = 6,
140 RADEON_PRIO_INDEX_BUFFER,
141
142 RADEON_PRIO_CP_DMA = 8,
143 RADEON_PRIO_BORDER_COLORS,
144
145 RADEON_PRIO_CONST_BUFFER = 10,
146 RADEON_PRIO_DESCRIPTORS,
147
148 RADEON_PRIO_SAMPLER_BUFFER = 12,
149 RADEON_PRIO_VERTEX_BUFFER,
150
151 RADEON_PRIO_SHADER_RW_BUFFER = 14,
152 RADEON_PRIO_COMPUTE_GLOBAL,
153
154 RADEON_PRIO_SAMPLER_TEXTURE = 16,
155 RADEON_PRIO_SHADER_RW_IMAGE,
156
157 RADEON_PRIO_SAMPLER_TEXTURE_MSAA = 18,
158 RADEON_PRIO_COLOR_BUFFER,
159
160 RADEON_PRIO_DEPTH_BUFFER = 20,
161
162 RADEON_PRIO_COLOR_BUFFER_MSAA = 22,
163
164 RADEON_PRIO_DEPTH_BUFFER_MSAA = 24,
165
166 RADEON_PRIO_SEPARATE_META = 26,
167 RADEON_PRIO_SHADER_BINARY, /* the hw can't hide instruction cache misses */
168
169 RADEON_PRIO_SHADER_RINGS = 28,
170
171 RADEON_PRIO_SCRATCH_BUFFER = 30,
172 /* 31 is the maximum value */
173 };
174
175 struct winsys_handle;
176 struct radeon_winsys_ctx;
177
178 struct radeon_cmdbuf_chunk {
179 unsigned cdw; /* Number of used dwords. */
180 unsigned max_dw; /* Maximum number of dwords. */
181 uint32_t *buf; /* The base pointer of the chunk. */
182 };
183
184 struct radeon_cmdbuf {
185 struct radeon_cmdbuf_chunk current;
186 struct radeon_cmdbuf_chunk *prev;
187 unsigned num_prev; /* Number of previous chunks. */
188 unsigned max_prev; /* Space in array pointed to by prev. */
189 unsigned prev_dw; /* Total number of dwords in previous chunks. */
190
191 /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
192 uint64_t used_vram;
193 uint64_t used_gart;
194 };
195
196 /* Tiling info for display code, DRI sharing, and other data. */
197 struct radeon_bo_metadata {
198 /* Tiling flags describing the texture layout for display code
199 * and DRI sharing.
200 */
201 union {
202 struct {
203 enum radeon_bo_layout microtile;
204 enum radeon_bo_layout macrotile;
205 unsigned pipe_config;
206 unsigned bankw;
207 unsigned bankh;
208 unsigned tile_split;
209 unsigned mtilea;
210 unsigned num_banks;
211 unsigned stride;
212 bool scanout;
213 } legacy;
214
215 struct {
216 /* surface flags */
217 unsigned swizzle_mode:5;
218 } gfx9;
219 } u;
220
221 /* Additional metadata associated with the buffer, in bytes.
222 * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
223 * Supported by amdgpu only.
224 */
225 uint32_t size_metadata;
226 uint32_t metadata[64];
227 };
228
229 enum radeon_feature_id {
230 RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
231 RADEON_FID_R300_CMASK_ACCESS,
232 };
233
234 struct radeon_bo_list_item {
235 uint64_t bo_size;
236 uint64_t vm_address;
237 uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
238 };
239
240 struct radeon_winsys {
241 /**
242 * The screen object this winsys was created for
243 */
244 struct pipe_screen *screen;
245
246 /**
247 * Decrement the winsys reference count.
248 *
249 * \param ws The winsys this function is called for.
250 * \return True if the winsys and screen should be destroyed.
251 */
252 bool (*unref)(struct radeon_winsys *ws);
253
254 /**
255 * Destroy this winsys.
256 *
257 * \param ws The winsys this function is called from.
258 */
259 void (*destroy)(struct radeon_winsys *ws);
260
261 /**
262 * Query an info structure from winsys.
263 *
264 * \param ws The winsys this function is called from.
265 * \param info Return structure
266 */
267 void (*query_info)(struct radeon_winsys *ws,
268 struct radeon_info *info);
269
270 /**
271 * A hint for the winsys that it should pin its execution threads to
272 * a group of cores sharing a specific L3 cache if the CPU has multiple
273 * L3 caches. This is needed for good multithreading performance on
274 * AMD Zen CPUs.
275 */
276 void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cache);
277
278 /**************************************************************************
279 * Buffer management. Buffer attributes are mostly fixed over its lifetime.
280 *
281 * Remember that gallium gets to choose the interface it needs, and the
282 * window systems must then implement that interface (rather than the
283 * other way around...).
284 *************************************************************************/
285
286 /**
287 * Create a buffer object.
288 *
289 * \param ws The winsys this function is called from.
290 * \param size The size to allocate.
291 * \param alignment An alignment of the buffer in memory.
292 * \param use_reusable_pool Whether the cache buffer manager should be used.
293 * \param domain A bitmask of the RADEON_DOMAIN_* flags.
294 * \return The created buffer object.
295 */
296 struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws,
297 uint64_t size,
298 unsigned alignment,
299 enum radeon_bo_domain domain,
300 enum radeon_bo_flag flags);
301
302 /**
303 * Map the entire data store of a buffer object into the client's address
304 * space.
305 *
306 * Callers are expected to unmap buffers again if and only if the
307 * RADEON_TRANSFER_TEMPORARY flag is set in \p usage.
308 *
309 * \param buf A winsys buffer object to map.
310 * \param cs A command stream to flush if the buffer is referenced by it.
311 * \param usage A bitmask of the PIPE_TRANSFER_* and RADEON_TRANSFER_* flags.
312 * \return The pointer at the beginning of the buffer.
313 */
314 void *(*buffer_map)(struct pb_buffer *buf,
315 struct radeon_cmdbuf *cs,
316 enum pipe_transfer_usage usage);
317
318 /**
319 * Unmap a buffer object from the client's address space.
320 *
321 * \param buf A winsys buffer object to unmap.
322 */
323 void (*buffer_unmap)(struct pb_buffer *buf);
324
325 /**
326 * Wait for the buffer and return true if the buffer is not used
327 * by the device.
328 *
329 * The timeout of 0 will only return the status.
330 * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
331 * is idle.
332 */
333 bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout,
334 enum radeon_bo_usage usage);
335
336 /**
337 * Return buffer metadata.
338 * (tiling info for display code, DRI sharing, and other data)
339 *
340 * \param buf A winsys buffer object to get the flags from.
341 * \param md Metadata
342 */
343 void (*buffer_get_metadata)(struct pb_buffer *buf,
344 struct radeon_bo_metadata *md);
345
346 /**
347 * Set buffer metadata.
348 * (tiling info for display code, DRI sharing, and other data)
349 *
350 * \param buf A winsys buffer object to set the flags for.
351 * \param md Metadata
352 */
353 void (*buffer_set_metadata)(struct pb_buffer *buf,
354 struct radeon_bo_metadata *md);
355
356 /**
357 * Get a winsys buffer from a winsys handle. The internal structure
358 * of the handle is platform-specific and only a winsys should access it.
359 *
360 * \param ws The winsys this function is called from.
361 * \param whandle A winsys handle pointer as was received from a state
362 * tracker.
363 * \param stride The returned buffer stride in bytes.
364 */
365 struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws,
366 struct winsys_handle *whandle,
367 unsigned vm_alignment,
368 unsigned *stride, unsigned *offset);
369
370 /**
371 * Get a winsys buffer from a user pointer. The resulting buffer can't
372 * be exported. Both pointer and size must be page aligned.
373 *
374 * \param ws The winsys this function is called from.
375 * \param pointer User pointer to turn into a buffer object.
376 * \param Size Size in bytes for the new buffer.
377 */
378 struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws,
379 void *pointer, uint64_t size);
380
381 /**
382 * Whether the buffer was created from a user pointer.
383 *
384 * \param buf A winsys buffer object
385 * \return whether \p buf was created via buffer_from_ptr
386 */
387 bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
388
389 /** Whether the buffer was suballocated. */
390 bool (*buffer_is_suballocated)(struct pb_buffer *buf);
391
392 /**
393 * Get a winsys handle from a winsys buffer. The internal structure
394 * of the handle is platform-specific and only a winsys should access it.
395 *
396 * \param buf A winsys buffer object to get the handle from.
397 * \param whandle A winsys handle pointer.
398 * \param stride A stride of the buffer in bytes, for texturing.
399 * \return true on success.
400 */
401 bool (*buffer_get_handle)(struct pb_buffer *buf,
402 unsigned stride, unsigned offset,
403 unsigned slice_size,
404 struct winsys_handle *whandle);
405
406 /**
407 * Change the commitment of a (64KB-page aligned) region of the given
408 * sparse buffer.
409 *
410 * \warning There is no automatic synchronization with command submission.
411 *
412 * \note Only implemented by the amdgpu winsys.
413 *
414 * \return false on out of memory or other failure, true on success.
415 */
416 bool (*buffer_commit)(struct pb_buffer *buf,
417 uint64_t offset, uint64_t size,
418 bool commit);
419
420 /**
421 * Return the virtual address of a buffer.
422 *
423 * When virtual memory is not in use, this is the offset relative to the
424 * relocation base (non-zero for sub-allocated buffers).
425 *
426 * \param buf A winsys buffer object
427 * \return virtual address
428 */
429 uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
430
431 /**
432 * Return the offset of this buffer relative to the relocation base.
433 * This is only non-zero for sub-allocated buffers.
434 *
435 * This is only supported in the radeon winsys, since amdgpu uses virtual
436 * addresses in submissions even for the video engines.
437 *
438 * \param buf A winsys buffer object
439 * \return the offset for relocations
440 */
441 unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
442
443 /**
444 * Query the initial placement of the buffer from the kernel driver.
445 */
446 enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
447
448 /**************************************************************************
449 * Command submission.
450 *
451 * Each pipe context should create its own command stream and submit
452 * commands independently of other contexts.
453 *************************************************************************/
454
455 /**
456 * Create a command submission context.
457 * Various command streams can be submitted to the same context.
458 */
459 struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws);
460
461 /**
462 * Destroy a context.
463 */
464 void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
465
466 /**
467 * Query a GPU reset status.
468 */
469 enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx);
470
471 /**
472 * Create a command stream.
473 *
474 * \param ctx The submission context
475 * \param ring_type The ring type (GFX, DMA, UVD)
476 * \param flush Flush callback function associated with the command stream.
477 * \param user User pointer that will be passed to the flush callback.
478 */
479 struct radeon_cmdbuf *(*cs_create)(struct radeon_winsys_ctx *ctx,
480 enum ring_type ring_type,
481 void (*flush)(void *ctx, unsigned flags,
482 struct pipe_fence_handle **fence),
483 void *flush_ctx,
484 bool stop_exec_on_failure);
485
486 /**
487 * Destroy a command stream.
488 *
489 * \param cs A command stream to destroy.
490 */
491 void (*cs_destroy)(struct radeon_cmdbuf *cs);
492
493 /**
494 * Add a buffer. Each buffer used by a CS must be added using this function.
495 *
496 * \param cs Command stream
497 * \param buf Buffer
498 * \param usage Whether the buffer is used for read and/or write.
499 * \param domain Bitmask of the RADEON_DOMAIN_* flags.
500 * \param priority A higher number means a greater chance of being
501 * placed in the requested domain. 15 is the maximum.
502 * \return Buffer index.
503 */
504 unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs,
505 struct pb_buffer *buf,
506 enum radeon_bo_usage usage,
507 enum radeon_bo_domain domain,
508 enum radeon_bo_priority priority);
509
510 /**
511 * Return the index of an already-added buffer.
512 *
513 * Not supported on amdgpu. Drivers with GPUVM should not care about
514 * buffer indices.
515 *
516 * \param cs Command stream
517 * \param buf Buffer
518 * \return The buffer index, or -1 if the buffer has not been added.
519 */
520 int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs,
521 struct pb_buffer *buf);
522
523 /**
524 * Return true if there is enough memory in VRAM and GTT for the buffers
525 * added so far. If the validation fails, all buffers which have
526 * been added since the last call of cs_validate will be removed and
527 * the CS will be flushed (provided there are still any buffers).
528 *
529 * \param cs A command stream to validate.
530 */
531 bool (*cs_validate)(struct radeon_cmdbuf *cs);
532
533 /**
534 * Check whether the given number of dwords is available in the IB.
535 * Optionally chain a new chunk of the IB if necessary and supported.
536 *
537 * \param cs A command stream.
538 * \param dw Number of CS dwords requested by the caller.
539 */
540 bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw);
541
542 /**
543 * Return the buffer list.
544 *
545 * This is the buffer list as passed to the kernel, i.e. it only contains
546 * the parent buffers of sub-allocated buffers.
547 *
548 * \param cs Command stream
549 * \param list Returned buffer list. Set to NULL to query the count only.
550 * \return The buffer count.
551 */
552 unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs,
553 struct radeon_bo_list_item *list);
554
555 /**
556 * Flush a command stream.
557 *
558 * \param cs A command stream to flush.
559 * \param flags, PIPE_FLUSH_* flags.
560 * \param fence Pointer to a fence. If non-NULL, a fence is inserted
561 * after the CS and is returned through this parameter.
562 * \return Negative POSIX error code or 0 for success.
563 * Asynchronous submissions never return an error.
564 */
565 int (*cs_flush)(struct radeon_cmdbuf *cs,
566 unsigned flags,
567 struct pipe_fence_handle **fence);
568
569 /**
570 * Create a fence before the CS is flushed.
571 * The user must flush manually to complete the initializaton of the fence.
572 *
573 * The fence must not be used for anything except \ref cs_add_fence_dependency
574 * before the flush.
575 */
576 struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
577
578 /**
579 * Return true if a buffer is referenced by a command stream.
580 *
581 * \param cs A command stream.
582 * \param buf A winsys buffer.
583 */
584 bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs,
585 struct pb_buffer *buf,
586 enum radeon_bo_usage usage);
587
588 /**
589 * Request access to a feature for a command stream.
590 *
591 * \param cs A command stream.
592 * \param fid Feature ID, one of RADEON_FID_*
593 * \param enable Whether to enable or disable the feature.
594 */
595 bool (*cs_request_feature)(struct radeon_cmdbuf *cs,
596 enum radeon_feature_id fid,
597 bool enable);
598 /**
599 * Make sure all asynchronous flush of the cs have completed
600 *
601 * \param cs A command stream.
602 */
603 void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
604
605 /**
606 * Add a fence dependency to the CS, so that the CS will wait for
607 * the fence before execution.
608 */
609 void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs,
610 struct pipe_fence_handle *fence);
611
612 /**
613 * Signal a syncobj when the CS finishes execution.
614 */
615 void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs,
616 struct pipe_fence_handle *fence);
617
618 /**
619 * Wait for the fence and return true if the fence has been signalled.
620 * The timeout of 0 will only return the status.
621 * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
622 * is signalled.
623 */
624 bool (*fence_wait)(struct radeon_winsys *ws,
625 struct pipe_fence_handle *fence,
626 uint64_t timeout);
627
628 /**
629 * Reference counting for fences.
630 */
631 void (*fence_reference)(struct pipe_fence_handle **dst,
632 struct pipe_fence_handle *src);
633
634 /**
635 * Create a new fence object corresponding to the given syncobj fd.
636 */
637 struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws,
638 int fd);
639
640 /**
641 * Create a new fence object corresponding to the given sync_file.
642 */
643 struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws,
644 int fd);
645
646 /**
647 * Return a sync_file FD corresponding to the given fence object.
648 */
649 int (*fence_export_sync_file)(struct radeon_winsys *ws,
650 struct pipe_fence_handle *fence);
651
652 /**
653 * Return a sync file FD that is already signalled.
654 */
655 int (*export_signalled_sync_file)(struct radeon_winsys *ws);
656
657 /**
658 * Initialize surface
659 *
660 * \param ws The winsys this function is called from.
661 * \param tex Input texture description
662 * \param flags Bitmask of RADEON_SURF_* flags
663 * \param bpe Bytes per pixel, it can be different for Z buffers.
664 * \param mode Preferred tile mode. (linear, 1D, or 2D)
665 * \param surf Output structure
666 */
667 int (*surface_init)(struct radeon_winsys *ws,
668 const struct pipe_resource *tex,
669 unsigned flags, unsigned bpe,
670 enum radeon_surf_mode mode,
671 struct radeon_surf *surf);
672
673 uint64_t (*query_value)(struct radeon_winsys *ws,
674 enum radeon_value_id value);
675
676 bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset,
677 unsigned num_registers, uint32_t *out);
678
679 const char* (*get_chip_name)(struct radeon_winsys *ws);
680 };
681
682 static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
683 {
684 return cs && (cs->prev_dw + cs->current.cdw > num_dw);
685 }
686
687 static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
688 {
689 cs->current.buf[cs->current.cdw++] = value;
690 }
691
692 static inline void radeon_emit_array(struct radeon_cmdbuf *cs,
693 const uint32_t *values, unsigned count)
694 {
695 memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
696 cs->current.cdw += count;
697 }
698
699 enum radeon_heap {
700 RADEON_HEAP_VRAM_NO_CPU_ACCESS,
701 RADEON_HEAP_VRAM_READ_ONLY,
702 RADEON_HEAP_VRAM_READ_ONLY_32BIT,
703 RADEON_HEAP_VRAM_32BIT,
704 RADEON_HEAP_VRAM,
705 RADEON_HEAP_GTT_WC,
706 RADEON_HEAP_GTT_WC_READ_ONLY,
707 RADEON_HEAP_GTT_WC_READ_ONLY_32BIT,
708 RADEON_HEAP_GTT_WC_32BIT,
709 RADEON_HEAP_GTT,
710 RADEON_MAX_SLAB_HEAPS,
711 RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
712 };
713
714 static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
715 {
716 switch (heap) {
717 case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
718 case RADEON_HEAP_VRAM_READ_ONLY:
719 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
720 case RADEON_HEAP_VRAM_32BIT:
721 case RADEON_HEAP_VRAM:
722 return RADEON_DOMAIN_VRAM;
723 case RADEON_HEAP_GTT_WC:
724 case RADEON_HEAP_GTT_WC_READ_ONLY:
725 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
726 case RADEON_HEAP_GTT_WC_32BIT:
727 case RADEON_HEAP_GTT:
728 return RADEON_DOMAIN_GTT;
729 default:
730 assert(0);
731 return (enum radeon_bo_domain)0;
732 }
733 }
734
735 static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
736 {
737 unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING |
738 (heap != RADEON_HEAP_GTT ? RADEON_FLAG_GTT_WC : 0);
739
740 switch (heap) {
741 case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
742 return flags |
743 RADEON_FLAG_NO_CPU_ACCESS;
744
745 case RADEON_HEAP_VRAM_READ_ONLY:
746 case RADEON_HEAP_GTT_WC_READ_ONLY:
747 return flags |
748 RADEON_FLAG_READ_ONLY;
749
750 case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
751 case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
752 return flags |
753 RADEON_FLAG_READ_ONLY |
754 RADEON_FLAG_32BIT;
755
756 case RADEON_HEAP_VRAM_32BIT:
757 case RADEON_HEAP_GTT_WC_32BIT:
758 return flags |
759 RADEON_FLAG_32BIT;
760
761 case RADEON_HEAP_VRAM:
762 case RADEON_HEAP_GTT_WC:
763 case RADEON_HEAP_GTT:
764 default:
765 return flags;
766 }
767 }
768
769 /* Return the heap index for winsys allocators, or -1 on failure. */
770 static inline int radeon_get_heap_index(enum radeon_bo_domain domain,
771 enum radeon_bo_flag flags)
772 {
773 /* VRAM implies WC (write combining) */
774 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
775 /* NO_CPU_ACCESS implies VRAM only. */
776 assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
777
778 /* Resources with interprocess sharing don't use any winsys allocators. */
779 if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
780 return -1;
781
782 /* Unsupported flags: NO_SUBALLOC, SPARSE. */
783 if (flags & ~(RADEON_FLAG_GTT_WC |
784 RADEON_FLAG_NO_CPU_ACCESS |
785 RADEON_FLAG_NO_INTERPROCESS_SHARING |
786 RADEON_FLAG_READ_ONLY |
787 RADEON_FLAG_32BIT))
788 return -1;
789
790 switch (domain) {
791 case RADEON_DOMAIN_VRAM:
792 switch (flags & (RADEON_FLAG_NO_CPU_ACCESS |
793 RADEON_FLAG_READ_ONLY |
794 RADEON_FLAG_32BIT)) {
795 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
796 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY:
797 assert(!"NO_CPU_ACCESS | READ_ONLY doesn't make sense");
798 return -1;
799 case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_32BIT:
800 assert(!"NO_CPU_ACCESS with 32BIT is disallowed");
801 return -1;
802 case RADEON_FLAG_NO_CPU_ACCESS:
803 return RADEON_HEAP_VRAM_NO_CPU_ACCESS;
804 case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
805 return RADEON_HEAP_VRAM_READ_ONLY_32BIT;
806 case RADEON_FLAG_READ_ONLY:
807 return RADEON_HEAP_VRAM_READ_ONLY;
808 case RADEON_FLAG_32BIT:
809 return RADEON_HEAP_VRAM_32BIT;
810 case 0:
811 return RADEON_HEAP_VRAM;
812 }
813 break;
814 case RADEON_DOMAIN_GTT:
815 switch (flags & (RADEON_FLAG_GTT_WC |
816 RADEON_FLAG_READ_ONLY |
817 RADEON_FLAG_32BIT)) {
818 case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
819 return RADEON_HEAP_GTT_WC_READ_ONLY_32BIT;
820 case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY:
821 return RADEON_HEAP_GTT_WC_READ_ONLY;
822 case RADEON_FLAG_GTT_WC | RADEON_FLAG_32BIT:
823 return RADEON_HEAP_GTT_WC_32BIT;
824 case RADEON_FLAG_GTT_WC:
825 return RADEON_HEAP_GTT_WC;
826 case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
827 case RADEON_FLAG_READ_ONLY:
828 assert(!"READ_ONLY without WC is disallowed");
829 return -1;
830 case RADEON_FLAG_32BIT:
831 assert(!"32BIT without WC is disallowed");
832 return -1;
833 case 0:
834 return RADEON_HEAP_GTT;
835 }
836 break;
837 default:
838 break;
839 }
840 return -1;
841 }
842
843 #endif