2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors: Marek Olšák <maraeo@gmail.com>
27 #include "r600_pipe_common.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/list.h"
31 #include "util/u_draw_quad.h"
32 #include "util/u_memory.h"
33 #include "util/u_format_s3tc.h"
34 #include "util/u_upload_mgr.h"
35 #include "os/os_time.h"
36 #include "vl/vl_decoder.h"
37 #include "vl/vl_video_buffer.h"
38 #include "radeon_video.h"
40 #include <sys/utsname.h>
47 #include <llvm-c/TargetMachine.h>
50 #ifndef MESA_LLVM_VERSION_PATCH
51 #define MESA_LLVM_VERSION_PATCH 0
54 struct r600_multi_fence
{
55 struct pipe_reference reference
;
56 struct pipe_fence_handle
*gfx
;
57 struct pipe_fence_handle
*sdma
;
59 /* If the context wasn't flushed at fence creation, this is non-NULL. */
61 struct r600_common_context
*ctx
;
67 * shader binary helpers.
69 void radeon_shader_binary_init(struct ac_shader_binary
*b
)
71 memset(b
, 0, sizeof(*b
));
74 void radeon_shader_binary_clean(struct ac_shader_binary
*b
)
81 FREE(b
->global_symbol_offsets
);
83 FREE(b
->disasm_string
);
84 FREE(b
->llvm_ir_string
);
94 * \param event EVENT_TYPE_*
95 * \param event_flags Optional cache flush flags (TC)
96 * \param data_sel 1 = fence, 3 = timestamp
98 * \param va GPU address
99 * \param old_value Previous fence value (for a bug workaround)
100 * \param new_value Fence value to write for this event.
102 void r600_gfx_write_event_eop(struct r600_common_context
*ctx
,
103 unsigned event
, unsigned event_flags
,
105 struct r600_resource
*buf
, uint64_t va
,
106 uint32_t new_fence
, unsigned query_type
)
108 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
109 unsigned op
= EVENT_TYPE(event
) |
112 unsigned sel
= EOP_DATA_SEL(data_sel
);
114 /* Wait for write confirmation before writing data, but don't send
116 if (ctx
->chip_class
>= SI
&& data_sel
!= EOP_DATA_SEL_DISCARD
)
117 sel
|= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
);
119 if (ctx
->chip_class
>= GFX9
) {
120 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
121 * counters) must immediately precede every timestamp event to
122 * prevent a GPU hang on GFX9.
124 * Occlusion queries don't need to do it here, because they
125 * always do ZPASS_DONE before the timestamp.
127 if (ctx
->chip_class
== GFX9
&&
128 query_type
!= PIPE_QUERY_OCCLUSION_COUNTER
&&
129 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE
) {
130 struct r600_resource
*scratch
= ctx
->eop_bug_scratch
;
132 assert(16 * ctx
->screen
->info
.num_render_backends
<=
133 scratch
->b
.b
.width0
);
134 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
135 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
136 radeon_emit(cs
, scratch
->gpu_address
);
137 radeon_emit(cs
, scratch
->gpu_address
>> 32);
139 radeon_add_to_buffer_list(ctx
, &ctx
->gfx
, scratch
,
140 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
143 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, 6, 0));
145 radeon_emit(cs
, sel
);
146 radeon_emit(cs
, va
); /* address lo */
147 radeon_emit(cs
, va
>> 32); /* address hi */
148 radeon_emit(cs
, new_fence
); /* immediate data lo */
149 radeon_emit(cs
, 0); /* immediate data hi */
150 radeon_emit(cs
, 0); /* unused */
152 if (ctx
->chip_class
== CIK
||
153 ctx
->chip_class
== VI
) {
154 struct r600_resource
*scratch
= ctx
->eop_bug_scratch
;
155 uint64_t va
= scratch
->gpu_address
;
157 /* Two EOP events are required to make all engines go idle
158 * (and optional cache flushes executed) before the timestamp
161 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
164 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
165 radeon_emit(cs
, 0); /* immediate data */
166 radeon_emit(cs
, 0); /* unused */
168 radeon_add_to_buffer_list(ctx
, &ctx
->gfx
, scratch
,
169 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
172 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
175 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
176 radeon_emit(cs
, new_fence
); /* immediate data */
177 radeon_emit(cs
, 0); /* unused */
181 r600_emit_reloc(ctx
, &ctx
->gfx
, buf
, RADEON_USAGE_WRITE
,
185 unsigned r600_gfx_write_fence_dwords(struct r600_common_screen
*screen
)
189 if (screen
->chip_class
== CIK
||
190 screen
->chip_class
== VI
)
193 if (!screen
->info
.has_virtual_memory
)
199 void r600_gfx_wait_fence(struct r600_common_context
*ctx
,
200 uint64_t va
, uint32_t ref
, uint32_t mask
)
202 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
204 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
205 radeon_emit(cs
, WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_MEM_SPACE(1));
207 radeon_emit(cs
, va
>> 32);
208 radeon_emit(cs
, ref
); /* reference value */
209 radeon_emit(cs
, mask
); /* mask */
210 radeon_emit(cs
, 4); /* poll interval */
213 void r600_draw_rectangle(struct blitter_context
*blitter
,
214 void *vertex_elements_cso
,
215 int x1
, int y1
, int x2
, int y2
,
216 float depth
, unsigned num_instances
,
217 enum blitter_attrib_type type
,
218 const union blitter_attrib
*attrib
)
220 struct r600_common_context
*rctx
=
221 (struct r600_common_context
*)util_blitter_get_pipe(blitter
);
222 struct pipe_viewport_state viewport
;
223 struct pipe_resource
*buf
= NULL
;
227 rctx
->b
.bind_vertex_elements_state(&rctx
->b
, vertex_elements_cso
);
229 /* Some operations (like color resolve on r6xx) don't work
230 * with the conventional primitive types.
231 * One that works is PT_RECTLIST, which we use here. */
234 viewport
.scale
[0] = 1.0f
;
235 viewport
.scale
[1] = 1.0f
;
236 viewport
.scale
[2] = 1.0f
;
237 viewport
.translate
[0] = 0.0f
;
238 viewport
.translate
[1] = 0.0f
;
239 viewport
.translate
[2] = 0.0f
;
240 rctx
->b
.set_viewport_states(&rctx
->b
, 0, 1, &viewport
);
242 /* Upload vertices. The hw rectangle has only 3 vertices,
243 * The 4th one is derived from the first 3.
244 * The vertex specification should match u_blitter's vertex element state. */
245 u_upload_alloc(rctx
->b
.stream_uploader
, 0, sizeof(float) * 24,
246 rctx
->screen
->info
.tcc_cache_line_size
,
247 &offset
, &buf
, (void**)&vb
);
267 case UTIL_BLITTER_ATTRIB_COLOR
:
268 memcpy(vb
+4, attrib
->color
, sizeof(float)*4);
269 memcpy(vb
+12, attrib
->color
, sizeof(float)*4);
270 memcpy(vb
+20, attrib
->color
, sizeof(float)*4);
272 case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW
:
273 case UTIL_BLITTER_ATTRIB_TEXCOORD_XY
:
274 vb
[6] = vb
[14] = vb
[22] = attrib
->texcoord
.z
;
275 vb
[7] = vb
[15] = vb
[23] = attrib
->texcoord
.w
;
277 vb
[4] = attrib
->texcoord
.x1
;
278 vb
[5] = attrib
->texcoord
.y1
;
279 vb
[12] = attrib
->texcoord
.x1
;
280 vb
[13] = attrib
->texcoord
.y2
;
281 vb
[20] = attrib
->texcoord
.x2
;
282 vb
[21] = attrib
->texcoord
.y1
;
284 default:; /* Nothing to do. */
288 struct pipe_vertex_buffer vbuffer
= {};
289 vbuffer
.buffer
.resource
= buf
;
290 vbuffer
.stride
= 2 * 4 * sizeof(float); /* vertex size */
291 vbuffer
.buffer_offset
= offset
;
293 rctx
->b
.set_vertex_buffers(&rctx
->b
, blitter
->vb_slot
, 1, &vbuffer
);
294 util_draw_arrays_instanced(&rctx
->b
, R600_PRIM_RECTANGLE_LIST
, 0, 3,
296 pipe_resource_reference(&buf
, NULL
);
299 static void r600_dma_emit_wait_idle(struct r600_common_context
*rctx
)
301 struct radeon_winsys_cs
*cs
= rctx
->dma
.cs
;
303 /* NOP waits for idle on Evergreen and later. */
304 if (rctx
->chip_class
>= CIK
)
305 radeon_emit(cs
, 0x00000000); /* NOP */
306 else if (rctx
->chip_class
>= EVERGREEN
)
307 radeon_emit(cs
, 0xf0000000); /* NOP */
309 /* TODO: R600-R700 should use the FENCE packet.
310 * CS checker support is required. */
314 void r600_need_dma_space(struct r600_common_context
*ctx
, unsigned num_dw
,
315 struct r600_resource
*dst
, struct r600_resource
*src
)
317 uint64_t vram
= ctx
->dma
.cs
->used_vram
;
318 uint64_t gtt
= ctx
->dma
.cs
->used_gart
;
321 vram
+= dst
->vram_usage
;
322 gtt
+= dst
->gart_usage
;
325 vram
+= src
->vram_usage
;
326 gtt
+= src
->gart_usage
;
329 /* Flush the GFX IB if DMA depends on it. */
330 if (radeon_emitted(ctx
->gfx
.cs
, ctx
->initial_gfx_cs_size
) &&
332 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
, dst
->buf
,
333 RADEON_USAGE_READWRITE
)) ||
335 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
, src
->buf
,
336 RADEON_USAGE_WRITE
))))
337 ctx
->gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
339 /* Flush if there's not enough space, or if the memory usage per IB
342 * IBs using too little memory are limited by the IB submission overhead.
343 * IBs using too much memory are limited by the kernel/TTM overhead.
344 * Too long IBs create CPU-GPU pipeline bubbles and add latency.
346 * This heuristic makes sure that DMA requests are executed
347 * very soon after the call is made and lowers memory usage.
348 * It improves texture upload performance by keeping the DMA
349 * engine busy while uploads are being submitted.
351 num_dw
++; /* for emit_wait_idle below */
352 if (!ctx
->ws
->cs_check_space(ctx
->dma
.cs
, num_dw
) ||
353 ctx
->dma
.cs
->used_vram
+ ctx
->dma
.cs
->used_gart
> 64 * 1024 * 1024 ||
354 !radeon_cs_memory_below_limit(ctx
->screen
, ctx
->dma
.cs
, vram
, gtt
)) {
355 ctx
->dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
356 assert((num_dw
+ ctx
->dma
.cs
->current
.cdw
) <= ctx
->dma
.cs
->current
.max_dw
);
359 /* Wait for idle if either buffer has been used in the IB before to
360 * prevent read-after-write hazards.
363 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
, dst
->buf
,
364 RADEON_USAGE_READWRITE
)) ||
366 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
, src
->buf
,
367 RADEON_USAGE_WRITE
)))
368 r600_dma_emit_wait_idle(ctx
);
370 /* If GPUVM is not supported, the CS checker needs 2 entries
371 * in the buffer list per packet, which has to be done manually.
373 if (ctx
->screen
->info
.has_virtual_memory
) {
375 radeon_add_to_buffer_list(ctx
, &ctx
->dma
, dst
,
377 RADEON_PRIO_SDMA_BUFFER
);
379 radeon_add_to_buffer_list(ctx
, &ctx
->dma
, src
,
381 RADEON_PRIO_SDMA_BUFFER
);
384 /* this function is called before all DMA calls, so increment this. */
385 ctx
->num_dma_calls
++;
388 static void r600_memory_barrier(struct pipe_context
*ctx
, unsigned flags
)
392 void r600_preflush_suspend_features(struct r600_common_context
*ctx
)
394 /* suspend queries */
395 if (!LIST_IS_EMPTY(&ctx
->active_queries
))
396 r600_suspend_queries(ctx
);
398 ctx
->streamout
.suspended
= false;
399 if (ctx
->streamout
.begin_emitted
) {
400 r600_emit_streamout_end(ctx
);
401 ctx
->streamout
.suspended
= true;
405 void r600_postflush_resume_features(struct r600_common_context
*ctx
)
407 if (ctx
->streamout
.suspended
) {
408 ctx
->streamout
.append_bitmask
= ctx
->streamout
.enabled_mask
;
409 r600_streamout_buffers_dirty(ctx
);
413 if (!LIST_IS_EMPTY(&ctx
->active_queries
))
414 r600_resume_queries(ctx
);
417 static void r600_add_fence_dependency(struct r600_common_context
*rctx
,
418 struct pipe_fence_handle
*fence
)
420 struct radeon_winsys
*ws
= rctx
->ws
;
423 ws
->cs_add_fence_dependency(rctx
->dma
.cs
, fence
);
424 ws
->cs_add_fence_dependency(rctx
->gfx
.cs
, fence
);
427 static void r600_fence_server_sync(struct pipe_context
*ctx
,
428 struct pipe_fence_handle
*fence
)
430 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
431 struct r600_multi_fence
*rfence
= (struct r600_multi_fence
*)fence
;
433 /* Only amdgpu needs to handle fence dependencies (for fence imports).
434 * radeon synchronizes all rings by default and will not implement
437 if (rctx
->screen
->info
.drm_major
== 2)
440 /* Only imported fences need to be handled by fence_server_sync,
441 * because the winsys handles synchronizations automatically for BOs
442 * within the process.
444 * Simply skip unflushed fences here, and the winsys will drop no-op
445 * dependencies (i.e. dependencies within the same ring).
447 if (rfence
->gfx_unflushed
.ctx
)
450 /* All unflushed commands will not start execution before
451 * this fence dependency is signalled.
453 * Should we flush the context to allow more GPU parallelism?
456 r600_add_fence_dependency(rctx
, rfence
->sdma
);
458 r600_add_fence_dependency(rctx
, rfence
->gfx
);
461 static void r600_flush_from_st(struct pipe_context
*ctx
,
462 struct pipe_fence_handle
**fence
,
465 struct pipe_screen
*screen
= ctx
->screen
;
466 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
467 struct radeon_winsys
*ws
= rctx
->ws
;
468 struct pipe_fence_handle
*gfx_fence
= NULL
;
469 struct pipe_fence_handle
*sdma_fence
= NULL
;
470 bool deferred_fence
= false;
471 unsigned rflags
= RADEON_FLUSH_ASYNC
;
473 if (flags
& PIPE_FLUSH_END_OF_FRAME
)
474 rflags
|= RADEON_FLUSH_END_OF_FRAME
;
476 /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
478 rctx
->dma
.flush(rctx
, rflags
, fence
? &sdma_fence
: NULL
);
480 if (!radeon_emitted(rctx
->gfx
.cs
, rctx
->initial_gfx_cs_size
)) {
482 ws
->fence_reference(&gfx_fence
, rctx
->last_gfx_fence
);
483 if (!(flags
& PIPE_FLUSH_DEFERRED
))
484 ws
->cs_sync_flush(rctx
->gfx
.cs
);
486 /* Instead of flushing, create a deferred fence. Constraints:
487 * - The state tracker must allow a deferred flush.
488 * - The state tracker must request a fence.
489 * Thread safety in fence_finish must be ensured by the state tracker.
491 if (flags
& PIPE_FLUSH_DEFERRED
&& fence
) {
492 gfx_fence
= rctx
->ws
->cs_get_next_fence(rctx
->gfx
.cs
);
493 deferred_fence
= true;
495 rctx
->gfx
.flush(rctx
, rflags
, fence
? &gfx_fence
: NULL
);
499 /* Both engines can signal out of order, so we need to keep both fences. */
501 struct r600_multi_fence
*multi_fence
=
502 CALLOC_STRUCT(r600_multi_fence
);
504 ws
->fence_reference(&sdma_fence
, NULL
);
505 ws
->fence_reference(&gfx_fence
, NULL
);
509 multi_fence
->reference
.count
= 1;
510 /* If both fences are NULL, fence_finish will always return true. */
511 multi_fence
->gfx
= gfx_fence
;
512 multi_fence
->sdma
= sdma_fence
;
514 if (deferred_fence
) {
515 multi_fence
->gfx_unflushed
.ctx
= rctx
;
516 multi_fence
->gfx_unflushed
.ib_index
= rctx
->num_gfx_cs_flushes
;
519 screen
->fence_reference(screen
, fence
, NULL
);
520 *fence
= (struct pipe_fence_handle
*)multi_fence
;
523 if (!(flags
& PIPE_FLUSH_DEFERRED
)) {
525 ws
->cs_sync_flush(rctx
->dma
.cs
);
526 ws
->cs_sync_flush(rctx
->gfx
.cs
);
530 static void r600_flush_dma_ring(void *ctx
, unsigned flags
,
531 struct pipe_fence_handle
**fence
)
533 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
534 struct radeon_winsys_cs
*cs
= rctx
->dma
.cs
;
535 struct radeon_saved_cs saved
;
537 (rctx
->screen
->debug_flags
& DBG_CHECK_VM
) &&
538 rctx
->check_vm_faults
;
540 if (!radeon_emitted(cs
, 0)) {
542 rctx
->ws
->fence_reference(fence
, rctx
->last_sdma_fence
);
547 radeon_save_cs(rctx
->ws
, cs
, &saved
, true);
549 rctx
->ws
->cs_flush(cs
, flags
, &rctx
->last_sdma_fence
);
551 rctx
->ws
->fence_reference(fence
, rctx
->last_sdma_fence
);
554 /* Use conservative timeout 800ms, after which we won't wait any
555 * longer and assume the GPU is hung.
557 rctx
->ws
->fence_wait(rctx
->ws
, rctx
->last_sdma_fence
, 800*1000*1000);
559 rctx
->check_vm_faults(rctx
, &saved
, RING_DMA
);
560 radeon_clear_saved_cs(&saved
);
565 * Store a linearized copy of all chunks of \p cs together with the buffer
568 void radeon_save_cs(struct radeon_winsys
*ws
, struct radeon_winsys_cs
*cs
,
569 struct radeon_saved_cs
*saved
, bool get_buffer_list
)
574 /* Save the IB chunks. */
575 saved
->num_dw
= cs
->prev_dw
+ cs
->current
.cdw
;
576 saved
->ib
= MALLOC(4 * saved
->num_dw
);
581 for (i
= 0; i
< cs
->num_prev
; ++i
) {
582 memcpy(buf
, cs
->prev
[i
].buf
, cs
->prev
[i
].cdw
* 4);
583 buf
+= cs
->prev
[i
].cdw
;
585 memcpy(buf
, cs
->current
.buf
, cs
->current
.cdw
* 4);
587 if (!get_buffer_list
)
590 /* Save the buffer list. */
591 saved
->bo_count
= ws
->cs_get_buffer_list(cs
, NULL
);
592 saved
->bo_list
= CALLOC(saved
->bo_count
,
593 sizeof(saved
->bo_list
[0]));
594 if (!saved
->bo_list
) {
598 ws
->cs_get_buffer_list(cs
, saved
->bo_list
);
603 fprintf(stderr
, "%s: out of memory\n", __func__
);
604 memset(saved
, 0, sizeof(*saved
));
607 void radeon_clear_saved_cs(struct radeon_saved_cs
*saved
)
610 FREE(saved
->bo_list
);
612 memset(saved
, 0, sizeof(*saved
));
615 static enum pipe_reset_status
r600_get_reset_status(struct pipe_context
*ctx
)
617 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
618 unsigned latest
= rctx
->ws
->query_value(rctx
->ws
,
619 RADEON_GPU_RESET_COUNTER
);
621 if (rctx
->gpu_reset_counter
== latest
)
622 return PIPE_NO_RESET
;
624 rctx
->gpu_reset_counter
= latest
;
625 return PIPE_UNKNOWN_CONTEXT_RESET
;
628 static void r600_set_debug_callback(struct pipe_context
*ctx
,
629 const struct pipe_debug_callback
*cb
)
631 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
636 memset(&rctx
->debug
, 0, sizeof(rctx
->debug
));
639 static void r600_set_device_reset_callback(struct pipe_context
*ctx
,
640 const struct pipe_device_reset_callback
*cb
)
642 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
645 rctx
->device_reset_callback
= *cb
;
647 memset(&rctx
->device_reset_callback
, 0,
648 sizeof(rctx
->device_reset_callback
));
651 bool r600_check_device_reset(struct r600_common_context
*rctx
)
653 enum pipe_reset_status status
;
655 if (!rctx
->device_reset_callback
.reset
)
658 if (!rctx
->b
.get_device_reset_status
)
661 status
= rctx
->b
.get_device_reset_status(&rctx
->b
);
662 if (status
== PIPE_NO_RESET
)
665 rctx
->device_reset_callback
.reset(rctx
->device_reset_callback
.data
, status
);
669 static void r600_dma_clear_buffer_fallback(struct pipe_context
*ctx
,
670 struct pipe_resource
*dst
,
671 uint64_t offset
, uint64_t size
,
674 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
676 rctx
->clear_buffer(ctx
, dst
, offset
, size
, value
, R600_COHERENCY_NONE
);
679 static bool r600_resource_commit(struct pipe_context
*pctx
,
680 struct pipe_resource
*resource
,
681 unsigned level
, struct pipe_box
*box
,
684 struct r600_common_context
*ctx
= (struct r600_common_context
*)pctx
;
685 struct r600_resource
*res
= r600_resource(resource
);
688 * Since buffer commitment changes cannot be pipelined, we need to
689 * (a) flush any pending commands that refer to the buffer we're about
691 * (b) wait for threaded submit to finish, including those that were
692 * triggered by some other, earlier operation.
694 if (radeon_emitted(ctx
->gfx
.cs
, ctx
->initial_gfx_cs_size
) &&
695 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
,
696 res
->buf
, RADEON_USAGE_READWRITE
)) {
697 ctx
->gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
699 if (radeon_emitted(ctx
->dma
.cs
, 0) &&
700 ctx
->ws
->cs_is_buffer_referenced(ctx
->dma
.cs
,
701 res
->buf
, RADEON_USAGE_READWRITE
)) {
702 ctx
->dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
705 ctx
->ws
->cs_sync_flush(ctx
->dma
.cs
);
706 ctx
->ws
->cs_sync_flush(ctx
->gfx
.cs
);
708 assert(resource
->target
== PIPE_BUFFER
);
710 return ctx
->ws
->buffer_commit(res
->buf
, box
->x
, box
->width
, commit
);
713 bool r600_common_context_init(struct r600_common_context
*rctx
,
714 struct r600_common_screen
*rscreen
,
715 unsigned context_flags
)
717 slab_create_child(&rctx
->pool_transfers
, &rscreen
->pool_transfers
);
718 slab_create_child(&rctx
->pool_transfers_unsync
, &rscreen
->pool_transfers
);
720 rctx
->screen
= rscreen
;
721 rctx
->ws
= rscreen
->ws
;
722 rctx
->family
= rscreen
->family
;
723 rctx
->chip_class
= rscreen
->chip_class
;
725 rctx
->b
.invalidate_resource
= r600_invalidate_resource
;
726 rctx
->b
.resource_commit
= r600_resource_commit
;
727 rctx
->b
.transfer_map
= u_transfer_map_vtbl
;
728 rctx
->b
.transfer_flush_region
= u_transfer_flush_region_vtbl
;
729 rctx
->b
.transfer_unmap
= u_transfer_unmap_vtbl
;
730 rctx
->b
.texture_subdata
= u_default_texture_subdata
;
731 rctx
->b
.memory_barrier
= r600_memory_barrier
;
732 rctx
->b
.flush
= r600_flush_from_st
;
733 rctx
->b
.set_debug_callback
= r600_set_debug_callback
;
734 rctx
->b
.fence_server_sync
= r600_fence_server_sync
;
735 rctx
->dma_clear_buffer
= r600_dma_clear_buffer_fallback
;
737 /* evergreen_compute.c has a special codepath for global buffers.
738 * Everything else can use the direct path.
740 if ((rscreen
->chip_class
== EVERGREEN
|| rscreen
->chip_class
== CAYMAN
) &&
741 (context_flags
& PIPE_CONTEXT_COMPUTE_ONLY
))
742 rctx
->b
.buffer_subdata
= u_default_buffer_subdata
;
744 rctx
->b
.buffer_subdata
= r600_buffer_subdata
;
746 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 43) {
747 rctx
->b
.get_device_reset_status
= r600_get_reset_status
;
748 rctx
->gpu_reset_counter
=
749 rctx
->ws
->query_value(rctx
->ws
,
750 RADEON_GPU_RESET_COUNTER
);
753 rctx
->b
.set_device_reset_callback
= r600_set_device_reset_callback
;
755 r600_init_context_texture_functions(rctx
);
756 r600_init_viewport_functions(rctx
);
757 r600_streamout_init(rctx
);
758 r600_query_init(rctx
);
759 cayman_init_msaa(&rctx
->b
);
761 if (rctx
->chip_class
== CIK
||
762 rctx
->chip_class
== VI
||
763 rctx
->chip_class
== GFX9
) {
764 rctx
->eop_bug_scratch
= (struct r600_resource
*)
765 pipe_buffer_create(&rscreen
->b
, 0, PIPE_USAGE_DEFAULT
,
766 16 * rscreen
->info
.num_render_backends
);
767 if (!rctx
->eop_bug_scratch
)
771 rctx
->allocator_zeroed_memory
=
772 u_suballocator_create(&rctx
->b
, rscreen
->info
.gart_page_size
,
773 0, PIPE_USAGE_DEFAULT
, 0, true);
774 if (!rctx
->allocator_zeroed_memory
)
777 rctx
->b
.stream_uploader
= u_upload_create(&rctx
->b
, 1024 * 1024,
778 0, PIPE_USAGE_STREAM
);
779 if (!rctx
->b
.stream_uploader
)
782 rctx
->b
.const_uploader
= u_upload_create(&rctx
->b
, 128 * 1024,
783 0, PIPE_USAGE_DEFAULT
);
784 if (!rctx
->b
.const_uploader
)
787 rctx
->ctx
= rctx
->ws
->ctx_create(rctx
->ws
);
791 if (rscreen
->info
.num_sdma_rings
&& !(rscreen
->debug_flags
& DBG_NO_ASYNC_DMA
)) {
792 rctx
->dma
.cs
= rctx
->ws
->cs_create(rctx
->ctx
, RING_DMA
,
795 rctx
->dma
.flush
= r600_flush_dma_ring
;
801 void r600_common_context_cleanup(struct r600_common_context
*rctx
)
805 /* Release DCC stats. */
806 for (i
= 0; i
< ARRAY_SIZE(rctx
->dcc_stats
); i
++) {
807 assert(!rctx
->dcc_stats
[i
].query_active
);
809 for (j
= 0; j
< ARRAY_SIZE(rctx
->dcc_stats
[i
].ps_stats
); j
++)
810 if (rctx
->dcc_stats
[i
].ps_stats
[j
])
811 rctx
->b
.destroy_query(&rctx
->b
,
812 rctx
->dcc_stats
[i
].ps_stats
[j
]);
814 r600_texture_reference(&rctx
->dcc_stats
[i
].tex
, NULL
);
817 if (rctx
->query_result_shader
)
818 rctx
->b
.delete_compute_state(&rctx
->b
, rctx
->query_result_shader
);
821 rctx
->ws
->cs_destroy(rctx
->gfx
.cs
);
823 rctx
->ws
->cs_destroy(rctx
->dma
.cs
);
825 rctx
->ws
->ctx_destroy(rctx
->ctx
);
827 if (rctx
->b
.stream_uploader
)
828 u_upload_destroy(rctx
->b
.stream_uploader
);
829 if (rctx
->b
.const_uploader
)
830 u_upload_destroy(rctx
->b
.const_uploader
);
832 slab_destroy_child(&rctx
->pool_transfers
);
833 slab_destroy_child(&rctx
->pool_transfers_unsync
);
835 if (rctx
->allocator_zeroed_memory
) {
836 u_suballocator_destroy(rctx
->allocator_zeroed_memory
);
838 rctx
->ws
->fence_reference(&rctx
->last_gfx_fence
, NULL
);
839 rctx
->ws
->fence_reference(&rctx
->last_sdma_fence
, NULL
);
840 r600_resource_reference(&rctx
->eop_bug_scratch
, NULL
);
847 static const struct debug_named_value common_debug_options
[] = {
849 { "tex", DBG_TEX
, "Print texture info" },
850 { "nir", DBG_NIR
, "Enable experimental NIR shaders" },
851 { "compute", DBG_COMPUTE
, "Print compute info" },
852 { "vm", DBG_VM
, "Print virtual addresses when creating resources" },
853 { "info", DBG_INFO
, "Print driver information" },
856 { "fs", DBG_FS
, "Print fetch shaders" },
857 { "vs", DBG_VS
, "Print vertex shaders" },
858 { "gs", DBG_GS
, "Print geometry shaders" },
859 { "ps", DBG_PS
, "Print pixel shaders" },
860 { "cs", DBG_CS
, "Print compute shaders" },
861 { "tcs", DBG_TCS
, "Print tessellation control shaders" },
862 { "tes", DBG_TES
, "Print tessellation evaluation shaders" },
863 { "noir", DBG_NO_IR
, "Don't print the LLVM IR"},
864 { "notgsi", DBG_NO_TGSI
, "Don't print the TGSI"},
865 { "noasm", DBG_NO_ASM
, "Don't print disassembled shaders"},
866 { "preoptir", DBG_PREOPT_IR
, "Print the LLVM IR before initial optimizations" },
867 { "checkir", DBG_CHECK_IR
, "Enable additional sanity checks on shader IR" },
868 { "nooptvariant", DBG_NO_OPT_VARIANT
, "Disable compiling optimized shader variants." },
870 { "testdma", DBG_TEST_DMA
, "Invoke SDMA tests and exit." },
871 { "testvmfaultcp", DBG_TEST_VMFAULT_CP
, "Invoke a CP VM fault test and exit." },
872 { "testvmfaultsdma", DBG_TEST_VMFAULT_SDMA
, "Invoke a SDMA VM fault test and exit." },
873 { "testvmfaultshader", DBG_TEST_VMFAULT_SHADER
, "Invoke a shader VM fault test and exit." },
876 { "nodma", DBG_NO_ASYNC_DMA
, "Disable asynchronous DMA" },
877 { "nohyperz", DBG_NO_HYPERZ
, "Disable Hyper-Z" },
878 /* GL uses the word INVALIDATE, gallium uses the word DISCARD */
879 { "noinvalrange", DBG_NO_DISCARD_RANGE
, "Disable handling of INVALIDATE_RANGE map flags" },
880 { "no2d", DBG_NO_2D_TILING
, "Disable 2D tiling" },
881 { "notiling", DBG_NO_TILING
, "Disable tiling" },
882 { "switch_on_eop", DBG_SWITCH_ON_EOP
, "Program WD/IA to switch on end-of-packet." },
883 { "forcedma", DBG_FORCE_DMA
, "Use asynchronous DMA for all operations when possible." },
884 { "precompile", DBG_PRECOMPILE
, "Compile one shader variant at shader creation." },
885 { "nowc", DBG_NO_WC
, "Disable GTT write combining" },
886 { "check_vm", DBG_CHECK_VM
, "Check VM faults and dump debug info." },
887 { "nodcc", DBG_NO_DCC
, "Disable DCC." },
888 { "nodccclear", DBG_NO_DCC_CLEAR
, "Disable DCC fast clear." },
889 { "norbplus", DBG_NO_RB_PLUS
, "Disable RB+." },
890 { "sisched", DBG_SI_SCHED
, "Enable LLVM SI Machine Instruction Scheduler." },
891 { "mono", DBG_MONOLITHIC_SHADERS
, "Use old-style monolithic shaders compiled on demand" },
892 { "unsafemath", DBG_UNSAFE_MATH
, "Enable unsafe math shader optimizations" },
893 { "nodccfb", DBG_NO_DCC_FB
, "Disable separate DCC on the main framebuffer" },
894 { "nodpbb", DBG_NO_DPBB
, "Disable DPBB." },
895 { "nodfsm", DBG_NO_DFSM
, "Disable DFSM." },
897 DEBUG_NAMED_VALUE_END
/* must be last */
900 static const char* r600_get_vendor(struct pipe_screen
* pscreen
)
905 static const char* r600_get_device_vendor(struct pipe_screen
* pscreen
)
910 static const char *r600_get_marketing_name(struct radeon_winsys
*ws
)
912 if (!ws
->get_chip_name
)
914 return ws
->get_chip_name(ws
);
917 static const char *r600_get_family_name(const struct r600_common_screen
*rscreen
)
919 switch (rscreen
->info
.family
) {
920 case CHIP_R600
: return "AMD R600";
921 case CHIP_RV610
: return "AMD RV610";
922 case CHIP_RV630
: return "AMD RV630";
923 case CHIP_RV670
: return "AMD RV670";
924 case CHIP_RV620
: return "AMD RV620";
925 case CHIP_RV635
: return "AMD RV635";
926 case CHIP_RS780
: return "AMD RS780";
927 case CHIP_RS880
: return "AMD RS880";
928 case CHIP_RV770
: return "AMD RV770";
929 case CHIP_RV730
: return "AMD RV730";
930 case CHIP_RV710
: return "AMD RV710";
931 case CHIP_RV740
: return "AMD RV740";
932 case CHIP_CEDAR
: return "AMD CEDAR";
933 case CHIP_REDWOOD
: return "AMD REDWOOD";
934 case CHIP_JUNIPER
: return "AMD JUNIPER";
935 case CHIP_CYPRESS
: return "AMD CYPRESS";
936 case CHIP_HEMLOCK
: return "AMD HEMLOCK";
937 case CHIP_PALM
: return "AMD PALM";
938 case CHIP_SUMO
: return "AMD SUMO";
939 case CHIP_SUMO2
: return "AMD SUMO2";
940 case CHIP_BARTS
: return "AMD BARTS";
941 case CHIP_TURKS
: return "AMD TURKS";
942 case CHIP_CAICOS
: return "AMD CAICOS";
943 case CHIP_CAYMAN
: return "AMD CAYMAN";
944 case CHIP_ARUBA
: return "AMD ARUBA";
945 case CHIP_TAHITI
: return "AMD TAHITI";
946 case CHIP_PITCAIRN
: return "AMD PITCAIRN";
947 case CHIP_VERDE
: return "AMD CAPE VERDE";
948 case CHIP_OLAND
: return "AMD OLAND";
949 case CHIP_HAINAN
: return "AMD HAINAN";
950 case CHIP_BONAIRE
: return "AMD BONAIRE";
951 case CHIP_KAVERI
: return "AMD KAVERI";
952 case CHIP_KABINI
: return "AMD KABINI";
953 case CHIP_HAWAII
: return "AMD HAWAII";
954 case CHIP_MULLINS
: return "AMD MULLINS";
955 case CHIP_TONGA
: return "AMD TONGA";
956 case CHIP_ICELAND
: return "AMD ICELAND";
957 case CHIP_CARRIZO
: return "AMD CARRIZO";
958 case CHIP_FIJI
: return "AMD FIJI";
959 case CHIP_POLARIS10
: return "AMD POLARIS10";
960 case CHIP_POLARIS11
: return "AMD POLARIS11";
961 case CHIP_POLARIS12
: return "AMD POLARIS12";
962 case CHIP_STONEY
: return "AMD STONEY";
963 case CHIP_VEGA10
: return "AMD VEGA10";
964 case CHIP_RAVEN
: return "AMD RAVEN";
965 default: return "AMD unknown";
969 static void r600_disk_cache_create(struct r600_common_screen
*rscreen
)
971 /* Don't use the cache if shader dumping is enabled. */
972 if (rscreen
->debug_flags
& DBG_ALL_SHADERS
)
975 uint32_t mesa_timestamp
;
976 if (disk_cache_get_function_timestamp(r600_disk_cache_create
,
980 if (rscreen
->chip_class
< SI
) {
981 res
= asprintf(×tamp_str
, "%u",mesa_timestamp
);
985 uint32_t llvm_timestamp
;
986 if (disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo
,
988 res
= asprintf(×tamp_str
, "%u_%u",
989 mesa_timestamp
, llvm_timestamp
);
994 /* These flags affect shader compilation. */
995 uint64_t shader_debug_flags
=
996 rscreen
->debug_flags
&
997 (DBG_FS_CORRECT_DERIVS_AFTER_KILL
|
1001 rscreen
->disk_shader_cache
=
1002 disk_cache_create(r600_get_family_name(rscreen
),
1004 shader_debug_flags
);
1005 free(timestamp_str
);
1010 static struct disk_cache
*r600_get_disk_shader_cache(struct pipe_screen
*pscreen
)
1012 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)pscreen
;
1013 return rscreen
->disk_shader_cache
;
1016 static const char* r600_get_name(struct pipe_screen
* pscreen
)
1018 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)pscreen
;
1020 return rscreen
->renderer_string
;
1023 static float r600_get_paramf(struct pipe_screen
* pscreen
,
1024 enum pipe_capf param
)
1026 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)pscreen
;
1029 case PIPE_CAPF_MAX_LINE_WIDTH
:
1030 case PIPE_CAPF_MAX_LINE_WIDTH_AA
:
1031 case PIPE_CAPF_MAX_POINT_WIDTH
:
1032 case PIPE_CAPF_MAX_POINT_WIDTH_AA
:
1033 if (rscreen
->family
>= CHIP_CEDAR
)
1037 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY
:
1039 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS
:
1041 case PIPE_CAPF_GUARD_BAND_LEFT
:
1042 case PIPE_CAPF_GUARD_BAND_TOP
:
1043 case PIPE_CAPF_GUARD_BAND_RIGHT
:
1044 case PIPE_CAPF_GUARD_BAND_BOTTOM
:
1050 static int r600_get_video_param(struct pipe_screen
*screen
,
1051 enum pipe_video_profile profile
,
1052 enum pipe_video_entrypoint entrypoint
,
1053 enum pipe_video_cap param
)
1056 case PIPE_VIDEO_CAP_SUPPORTED
:
1057 return vl_profile_supported(screen
, profile
, entrypoint
);
1058 case PIPE_VIDEO_CAP_NPOT_TEXTURES
:
1060 case PIPE_VIDEO_CAP_MAX_WIDTH
:
1061 case PIPE_VIDEO_CAP_MAX_HEIGHT
:
1062 return vl_video_buffer_max_size(screen
);
1063 case PIPE_VIDEO_CAP_PREFERED_FORMAT
:
1064 return PIPE_FORMAT_NV12
;
1065 case PIPE_VIDEO_CAP_PREFERS_INTERLACED
:
1067 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED
:
1069 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE
:
1071 case PIPE_VIDEO_CAP_MAX_LEVEL
:
1072 return vl_level_supported(screen
, profile
);
1078 const char *r600_get_llvm_processor_name(enum radeon_family family
)
1121 case CHIP_TAHITI
: return "tahiti";
1122 case CHIP_PITCAIRN
: return "pitcairn";
1123 case CHIP_VERDE
: return "verde";
1124 case CHIP_OLAND
: return "oland";
1125 case CHIP_HAINAN
: return "hainan";
1126 case CHIP_BONAIRE
: return "bonaire";
1127 case CHIP_KABINI
: return "kabini";
1128 case CHIP_KAVERI
: return "kaveri";
1129 case CHIP_HAWAII
: return "hawaii";
1132 case CHIP_TONGA
: return "tonga";
1133 case CHIP_ICELAND
: return "iceland";
1134 case CHIP_CARRIZO
: return "carrizo";
1139 case CHIP_POLARIS10
:
1141 case CHIP_POLARIS11
:
1142 case CHIP_POLARIS12
: /* same as polaris11 */
1152 static unsigned get_max_threads_per_block(struct r600_common_screen
*screen
,
1153 enum pipe_shader_ir ir_type
)
1155 if (ir_type
!= PIPE_SHADER_IR_TGSI
)
1158 /* Only 16 waves per thread-group on gfx9. */
1159 if (screen
->chip_class
>= GFX9
)
1162 /* Up to 40 waves per thread-group on GCN < gfx9. Expose a nice
1165 if (screen
->chip_class
>= SI
)
1171 static int r600_get_compute_param(struct pipe_screen
*screen
,
1172 enum pipe_shader_ir ir_type
,
1173 enum pipe_compute_cap param
,
1176 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1178 //TODO: select these params by asic
1180 case PIPE_COMPUTE_CAP_IR_TARGET
: {
1183 if (rscreen
->family
<= CHIP_ARUBA
) {
1186 if (HAVE_LLVM
< 0x0400) {
1187 triple
= "amdgcn--";
1189 triple
= "amdgcn-mesa-mesa3d";
1192 switch(rscreen
->family
) {
1193 /* Clang < 3.6 is missing Hainan in its list of
1194 * GPUs, so we need to use the name of a similar GPU.
1197 gpu
= r600_get_llvm_processor_name(rscreen
->family
);
1201 sprintf(ret
, "%s-%s", gpu
, triple
);
1203 /* +2 for dash and terminating NIL byte */
1204 return (strlen(triple
) + strlen(gpu
) + 2) * sizeof(char);
1206 case PIPE_COMPUTE_CAP_GRID_DIMENSION
:
1208 uint64_t *grid_dimension
= ret
;
1209 grid_dimension
[0] = 3;
1211 return 1 * sizeof(uint64_t);
1213 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE
:
1215 uint64_t *grid_size
= ret
;
1216 grid_size
[0] = 65535;
1217 grid_size
[1] = 65535;
1218 grid_size
[2] = 65535;
1220 return 3 * sizeof(uint64_t) ;
1222 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE
:
1224 uint64_t *block_size
= ret
;
1225 unsigned threads_per_block
= get_max_threads_per_block(rscreen
, ir_type
);
1226 block_size
[0] = threads_per_block
;
1227 block_size
[1] = threads_per_block
;
1228 block_size
[2] = threads_per_block
;
1230 return 3 * sizeof(uint64_t);
1232 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
:
1234 uint64_t *max_threads_per_block
= ret
;
1235 *max_threads_per_block
= get_max_threads_per_block(rscreen
, ir_type
);
1237 return sizeof(uint64_t);
1238 case PIPE_COMPUTE_CAP_ADDRESS_BITS
:
1240 uint32_t *address_bits
= ret
;
1241 address_bits
[0] = 32;
1242 if (rscreen
->chip_class
>= SI
)
1243 address_bits
[0] = 64;
1245 return 1 * sizeof(uint32_t);
1247 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE
:
1249 uint64_t *max_global_size
= ret
;
1250 uint64_t max_mem_alloc_size
;
1252 r600_get_compute_param(screen
, ir_type
,
1253 PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE
,
1254 &max_mem_alloc_size
);
1256 /* In OpenCL, the MAX_MEM_ALLOC_SIZE must be at least
1257 * 1/4 of the MAX_GLOBAL_SIZE. Since the
1258 * MAX_MEM_ALLOC_SIZE is fixed for older kernels,
1259 * make sure we never report more than
1260 * 4 * MAX_MEM_ALLOC_SIZE.
1262 *max_global_size
= MIN2(4 * max_mem_alloc_size
,
1263 MAX2(rscreen
->info
.gart_size
,
1264 rscreen
->info
.vram_size
));
1266 return sizeof(uint64_t);
1268 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE
:
1270 uint64_t *max_local_size
= ret
;
1271 /* Value reported by the closed source driver. */
1272 *max_local_size
= 32768;
1274 return sizeof(uint64_t);
1276 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE
:
1278 uint64_t *max_input_size
= ret
;
1279 /* Value reported by the closed source driver. */
1280 *max_input_size
= 1024;
1282 return sizeof(uint64_t);
1284 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE
:
1286 uint64_t *max_mem_alloc_size
= ret
;
1288 *max_mem_alloc_size
= rscreen
->info
.max_alloc_size
;
1290 return sizeof(uint64_t);
1292 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY
:
1294 uint32_t *max_clock_frequency
= ret
;
1295 *max_clock_frequency
= rscreen
->info
.max_shader_clock
;
1297 return sizeof(uint32_t);
1299 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS
:
1301 uint32_t *max_compute_units
= ret
;
1302 *max_compute_units
= rscreen
->info
.num_good_compute_units
;
1304 return sizeof(uint32_t);
1306 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED
:
1308 uint32_t *images_supported
= ret
;
1309 *images_supported
= 0;
1311 return sizeof(uint32_t);
1312 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE
:
1314 case PIPE_COMPUTE_CAP_SUBGROUP_SIZE
:
1316 uint32_t *subgroup_size
= ret
;
1317 *subgroup_size
= r600_wavefront_size(rscreen
->family
);
1319 return sizeof(uint32_t);
1320 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK
:
1322 uint64_t *max_variable_threads_per_block
= ret
;
1323 if (rscreen
->chip_class
>= SI
&&
1324 ir_type
== PIPE_SHADER_IR_TGSI
)
1325 *max_variable_threads_per_block
= SI_MAX_VARIABLE_THREADS_PER_BLOCK
;
1327 *max_variable_threads_per_block
= 0;
1329 return sizeof(uint64_t);
1332 fprintf(stderr
, "unknown PIPE_COMPUTE_CAP %d\n", param
);
1336 static uint64_t r600_get_timestamp(struct pipe_screen
*screen
)
1338 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1340 return 1000000 * rscreen
->ws
->query_value(rscreen
->ws
, RADEON_TIMESTAMP
) /
1341 rscreen
->info
.clock_crystal_freq
;
1344 static void r600_fence_reference(struct pipe_screen
*screen
,
1345 struct pipe_fence_handle
**dst
,
1346 struct pipe_fence_handle
*src
)
1348 struct radeon_winsys
*ws
= ((struct r600_common_screen
*)screen
)->ws
;
1349 struct r600_multi_fence
**rdst
= (struct r600_multi_fence
**)dst
;
1350 struct r600_multi_fence
*rsrc
= (struct r600_multi_fence
*)src
;
1352 if (pipe_reference(&(*rdst
)->reference
, &rsrc
->reference
)) {
1353 ws
->fence_reference(&(*rdst
)->gfx
, NULL
);
1354 ws
->fence_reference(&(*rdst
)->sdma
, NULL
);
1360 static boolean
r600_fence_finish(struct pipe_screen
*screen
,
1361 struct pipe_context
*ctx
,
1362 struct pipe_fence_handle
*fence
,
1365 struct radeon_winsys
*rws
= ((struct r600_common_screen
*)screen
)->ws
;
1366 struct r600_multi_fence
*rfence
= (struct r600_multi_fence
*)fence
;
1367 struct r600_common_context
*rctx
;
1368 int64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
1370 ctx
= threaded_context_unwrap_sync(ctx
);
1371 rctx
= ctx
? (struct r600_common_context
*)ctx
: NULL
;
1374 if (!rws
->fence_wait(rws
, rfence
->sdma
, timeout
))
1377 /* Recompute the timeout after waiting. */
1378 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
1379 int64_t time
= os_time_get_nano();
1380 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
1387 /* Flush the gfx IB if it hasn't been flushed yet. */
1389 rfence
->gfx_unflushed
.ctx
== rctx
&&
1390 rfence
->gfx_unflushed
.ib_index
== rctx
->num_gfx_cs_flushes
) {
1391 rctx
->gfx
.flush(rctx
, timeout
? 0 : RADEON_FLUSH_ASYNC
, NULL
);
1392 rfence
->gfx_unflushed
.ctx
= NULL
;
1397 /* Recompute the timeout after all that. */
1398 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
1399 int64_t time
= os_time_get_nano();
1400 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
1404 return rws
->fence_wait(rws
, rfence
->gfx
, timeout
);
1407 static void r600_query_memory_info(struct pipe_screen
*screen
,
1408 struct pipe_memory_info
*info
)
1410 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1411 struct radeon_winsys
*ws
= rscreen
->ws
;
1412 unsigned vram_usage
, gtt_usage
;
1414 info
->total_device_memory
= rscreen
->info
.vram_size
/ 1024;
1415 info
->total_staging_memory
= rscreen
->info
.gart_size
/ 1024;
1417 /* The real TTM memory usage is somewhat random, because:
1419 * 1) TTM delays freeing memory, because it can only free it after
1422 * 2) The memory usage can be really low if big VRAM evictions are
1423 * taking place, but the real usage is well above the size of VRAM.
1425 * Instead, return statistics of this process.
1427 vram_usage
= ws
->query_value(ws
, RADEON_REQUESTED_VRAM_MEMORY
) / 1024;
1428 gtt_usage
= ws
->query_value(ws
, RADEON_REQUESTED_GTT_MEMORY
) / 1024;
1430 info
->avail_device_memory
=
1431 vram_usage
<= info
->total_device_memory
?
1432 info
->total_device_memory
- vram_usage
: 0;
1433 info
->avail_staging_memory
=
1434 gtt_usage
<= info
->total_staging_memory
?
1435 info
->total_staging_memory
- gtt_usage
: 0;
1437 info
->device_memory_evicted
=
1438 ws
->query_value(ws
, RADEON_NUM_BYTES_MOVED
) / 1024;
1440 if (rscreen
->info
.drm_major
== 3 && rscreen
->info
.drm_minor
>= 4)
1441 info
->nr_device_memory_evictions
=
1442 ws
->query_value(ws
, RADEON_NUM_EVICTIONS
);
1444 /* Just return the number of evicted 64KB pages. */
1445 info
->nr_device_memory_evictions
= info
->device_memory_evicted
/ 64;
1448 struct pipe_resource
*r600_resource_create_common(struct pipe_screen
*screen
,
1449 const struct pipe_resource
*templ
)
1451 if (templ
->target
== PIPE_BUFFER
) {
1452 return r600_buffer_create(screen
, templ
, 256);
1454 return r600_texture_create(screen
, templ
);
1458 bool r600_common_screen_init(struct r600_common_screen
*rscreen
,
1459 struct radeon_winsys
*ws
)
1461 char family_name
[32] = {}, llvm_string
[32] = {}, kernel_version
[128] = {};
1462 struct utsname uname_data
;
1463 const char *chip_name
;
1465 ws
->query_info(ws
, &rscreen
->info
);
1468 if ((chip_name
= r600_get_marketing_name(ws
)))
1469 snprintf(family_name
, sizeof(family_name
), "%s / ",
1470 r600_get_family_name(rscreen
) + 4);
1472 chip_name
= r600_get_family_name(rscreen
);
1474 if (uname(&uname_data
) == 0)
1475 snprintf(kernel_version
, sizeof(kernel_version
),
1476 " / %s", uname_data
.release
);
1478 if (HAVE_LLVM
> 0) {
1479 snprintf(llvm_string
, sizeof(llvm_string
),
1480 ", LLVM %i.%i.%i", (HAVE_LLVM
>> 8) & 0xff,
1481 HAVE_LLVM
& 0xff, MESA_LLVM_VERSION_PATCH
);
1484 snprintf(rscreen
->renderer_string
, sizeof(rscreen
->renderer_string
),
1485 "%s (%sDRM %i.%i.%i%s%s)",
1486 chip_name
, family_name
, rscreen
->info
.drm_major
,
1487 rscreen
->info
.drm_minor
, rscreen
->info
.drm_patchlevel
,
1488 kernel_version
, llvm_string
);
1490 rscreen
->b
.get_name
= r600_get_name
;
1491 rscreen
->b
.get_vendor
= r600_get_vendor
;
1492 rscreen
->b
.get_device_vendor
= r600_get_device_vendor
;
1493 rscreen
->b
.get_disk_shader_cache
= r600_get_disk_shader_cache
;
1494 rscreen
->b
.get_compute_param
= r600_get_compute_param
;
1495 rscreen
->b
.get_paramf
= r600_get_paramf
;
1496 rscreen
->b
.get_timestamp
= r600_get_timestamp
;
1497 rscreen
->b
.fence_finish
= r600_fence_finish
;
1498 rscreen
->b
.fence_reference
= r600_fence_reference
;
1499 rscreen
->b
.resource_destroy
= u_resource_destroy_vtbl
;
1500 rscreen
->b
.resource_from_user_memory
= r600_buffer_from_user_memory
;
1501 rscreen
->b
.query_memory_info
= r600_query_memory_info
;
1503 if (rscreen
->info
.has_hw_decode
) {
1504 rscreen
->b
.get_video_param
= rvid_get_video_param
;
1505 rscreen
->b
.is_video_format_supported
= rvid_is_format_supported
;
1507 rscreen
->b
.get_video_param
= r600_get_video_param
;
1508 rscreen
->b
.is_video_format_supported
= vl_video_buffer_is_format_supported
;
1511 r600_init_screen_texture_functions(rscreen
);
1512 r600_init_screen_query_functions(rscreen
);
1514 rscreen
->family
= rscreen
->info
.family
;
1515 rscreen
->chip_class
= rscreen
->info
.chip_class
;
1516 rscreen
->debug_flags
|= debug_get_flags_option("R600_DEBUG", common_debug_options
, 0);
1517 rscreen
->has_rbplus
= false;
1518 rscreen
->rbplus_allowed
= false;
1520 r600_disk_cache_create(rscreen
);
1522 slab_create_parent(&rscreen
->pool_transfers
, sizeof(struct r600_transfer
), 64);
1524 rscreen
->force_aniso
= MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
1525 if (rscreen
->force_aniso
>= 0) {
1526 printf("radeon: Forcing anisotropy filter to %ix\n",
1527 /* round down to a power of two */
1528 1 << util_logbase2(rscreen
->force_aniso
));
1531 (void) mtx_init(&rscreen
->aux_context_lock
, mtx_plain
);
1532 (void) mtx_init(&rscreen
->gpu_load_mutex
, mtx_plain
);
1534 if (rscreen
->debug_flags
& DBG_INFO
) {
1535 printf("pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
1536 rscreen
->info
.pci_domain
, rscreen
->info
.pci_bus
,
1537 rscreen
->info
.pci_dev
, rscreen
->info
.pci_func
);
1538 printf("pci_id = 0x%x\n", rscreen
->info
.pci_id
);
1539 printf("family = %i (%s)\n", rscreen
->info
.family
,
1540 r600_get_family_name(rscreen
));
1541 printf("chip_class = %i\n", rscreen
->info
.chip_class
);
1542 printf("pte_fragment_size = %u\n", rscreen
->info
.pte_fragment_size
);
1543 printf("gart_page_size = %u\n", rscreen
->info
.gart_page_size
);
1544 printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen
->info
.gart_size
, 1024*1024));
1545 printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen
->info
.vram_size
, 1024*1024));
1546 printf("vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(rscreen
->info
.vram_vis_size
, 1024*1024));
1547 printf("max_alloc_size = %i MB\n",
1548 (int)DIV_ROUND_UP(rscreen
->info
.max_alloc_size
, 1024*1024));
1549 printf("min_alloc_size = %u\n", rscreen
->info
.min_alloc_size
);
1550 printf("has_dedicated_vram = %u\n", rscreen
->info
.has_dedicated_vram
);
1551 printf("has_virtual_memory = %i\n", rscreen
->info
.has_virtual_memory
);
1552 printf("gfx_ib_pad_with_type2 = %i\n", rscreen
->info
.gfx_ib_pad_with_type2
);
1553 printf("has_hw_decode = %u\n", rscreen
->info
.has_hw_decode
);
1554 printf("num_sdma_rings = %i\n", rscreen
->info
.num_sdma_rings
);
1555 printf("num_compute_rings = %u\n", rscreen
->info
.num_compute_rings
);
1556 printf("uvd_fw_version = %u\n", rscreen
->info
.uvd_fw_version
);
1557 printf("vce_fw_version = %u\n", rscreen
->info
.vce_fw_version
);
1558 printf("me_fw_version = %i\n", rscreen
->info
.me_fw_version
);
1559 printf("pfp_fw_version = %i\n", rscreen
->info
.pfp_fw_version
);
1560 printf("ce_fw_version = %i\n", rscreen
->info
.ce_fw_version
);
1561 printf("vce_harvest_config = %i\n", rscreen
->info
.vce_harvest_config
);
1562 printf("clock_crystal_freq = %i\n", rscreen
->info
.clock_crystal_freq
);
1563 printf("tcc_cache_line_size = %u\n", rscreen
->info
.tcc_cache_line_size
);
1564 printf("drm = %i.%i.%i\n", rscreen
->info
.drm_major
,
1565 rscreen
->info
.drm_minor
, rscreen
->info
.drm_patchlevel
);
1566 printf("has_userptr = %i\n", rscreen
->info
.has_userptr
);
1567 printf("has_syncobj = %u\n", rscreen
->info
.has_syncobj
);
1569 printf("r600_max_quad_pipes = %i\n", rscreen
->info
.r600_max_quad_pipes
);
1570 printf("max_shader_clock = %i\n", rscreen
->info
.max_shader_clock
);
1571 printf("num_good_compute_units = %i\n", rscreen
->info
.num_good_compute_units
);
1572 printf("max_se = %i\n", rscreen
->info
.max_se
);
1573 printf("max_sh_per_se = %i\n", rscreen
->info
.max_sh_per_se
);
1575 printf("r600_gb_backend_map = %i\n", rscreen
->info
.r600_gb_backend_map
);
1576 printf("r600_gb_backend_map_valid = %i\n", rscreen
->info
.r600_gb_backend_map_valid
);
1577 printf("r600_num_banks = %i\n", rscreen
->info
.r600_num_banks
);
1578 printf("num_render_backends = %i\n", rscreen
->info
.num_render_backends
);
1579 printf("num_tile_pipes = %i\n", rscreen
->info
.num_tile_pipes
);
1580 printf("pipe_interleave_bytes = %i\n", rscreen
->info
.pipe_interleave_bytes
);
1581 printf("enabled_rb_mask = 0x%x\n", rscreen
->info
.enabled_rb_mask
);
1582 printf("max_alignment = %u\n", (unsigned)rscreen
->info
.max_alignment
);
1587 void r600_destroy_common_screen(struct r600_common_screen
*rscreen
)
1589 r600_perfcounters_destroy(rscreen
);
1590 r600_gpu_load_kill_thread(rscreen
);
1592 mtx_destroy(&rscreen
->gpu_load_mutex
);
1593 mtx_destroy(&rscreen
->aux_context_lock
);
1594 rscreen
->aux_context
->destroy(rscreen
->aux_context
);
1596 slab_destroy_parent(&rscreen
->pool_transfers
);
1598 disk_cache_destroy(rscreen
->disk_shader_cache
);
1599 rscreen
->ws
->destroy(rscreen
->ws
);
1603 bool r600_can_dump_shader(struct r600_common_screen
*rscreen
,
1606 return rscreen
->debug_flags
& (1 << processor
);
1609 bool r600_extra_shader_checks(struct r600_common_screen
*rscreen
, unsigned processor
)
1611 return (rscreen
->debug_flags
& DBG_CHECK_IR
) ||
1612 r600_can_dump_shader(rscreen
, processor
);
1615 void r600_screen_clear_buffer(struct r600_common_screen
*rscreen
, struct pipe_resource
*dst
,
1616 uint64_t offset
, uint64_t size
, unsigned value
)
1618 struct r600_common_context
*rctx
= (struct r600_common_context
*)rscreen
->aux_context
;
1620 mtx_lock(&rscreen
->aux_context_lock
);
1621 rctx
->dma_clear_buffer(&rctx
->b
, dst
, offset
, size
, value
);
1622 rscreen
->aux_context
->flush(rscreen
->aux_context
, NULL
, 0);
1623 mtx_unlock(&rscreen
->aux_context_lock
);