2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors: Marek Olšák <maraeo@gmail.com>
27 #include "r600_pipe_common.h"
29 #include "tgsi/tgsi_parse.h"
30 #include "util/list.h"
31 #include "util/u_draw_quad.h"
32 #include "util/u_memory.h"
33 #include "util/u_format_s3tc.h"
34 #include "util/u_upload_mgr.h"
35 #include "os/os_time.h"
36 #include "vl/vl_decoder.h"
37 #include "vl/vl_video_buffer.h"
38 #include "radeon/radeon_video.h"
40 #include <sys/utsname.h>
46 struct r600_multi_fence
{
47 struct pipe_reference reference
;
48 struct pipe_fence_handle
*gfx
;
49 struct pipe_fence_handle
*sdma
;
51 /* If the context wasn't flushed at fence creation, this is non-NULL. */
53 struct r600_common_context
*ctx
;
59 * shader binary helpers.
61 void radeon_shader_binary_init(struct radeon_shader_binary
*b
)
63 memset(b
, 0, sizeof(*b
));
66 void radeon_shader_binary_clean(struct radeon_shader_binary
*b
)
73 FREE(b
->global_symbol_offsets
);
75 FREE(b
->disasm_string
);
76 FREE(b
->llvm_ir_string
);
86 * \param event EVENT_TYPE_*
87 * \param event_flags Optional cache flush flags (TC)
88 * \param data_sel 1 = fence, 3 = timestamp
90 * \param va GPU address
91 * \param old_value Previous fence value (for a bug workaround)
92 * \param new_value Fence value to write for this event.
94 void r600_gfx_write_event_eop(struct r600_common_context
*ctx
,
95 unsigned event
, unsigned event_flags
,
97 struct r600_resource
*buf
, uint64_t va
,
98 uint32_t old_fence
, uint32_t new_fence
)
100 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
101 unsigned op
= EVENT_TYPE(event
) |
105 if (ctx
->chip_class
== CIK
) {
106 /* Two EOP events are required to make all engines go idle
107 * (and optional cache flushes executed) before the timestamp
110 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
113 radeon_emit(cs
, ((va
>> 32) & 0xffff) | EOP_DATA_SEL(data_sel
));
114 radeon_emit(cs
, old_fence
); /* immediate data */
115 radeon_emit(cs
, 0); /* unused */
118 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
121 radeon_emit(cs
, ((va
>> 32) & 0xffff) | EOP_DATA_SEL(data_sel
));
122 radeon_emit(cs
, new_fence
); /* immediate data */
123 radeon_emit(cs
, 0); /* unused */
125 r600_emit_reloc(ctx
, &ctx
->gfx
, buf
, RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
128 unsigned r600_gfx_write_fence_dwords(struct r600_common_screen
*screen
)
132 if (screen
->chip_class
== CIK
)
135 if (!screen
->info
.has_virtual_memory
)
141 void r600_gfx_wait_fence(struct r600_common_context
*ctx
,
142 uint64_t va
, uint32_t ref
, uint32_t mask
)
144 struct radeon_winsys_cs
*cs
= ctx
->gfx
.cs
;
146 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
147 radeon_emit(cs
, WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_MEM_SPACE(1));
149 radeon_emit(cs
, va
>> 32);
150 radeon_emit(cs
, ref
); /* reference value */
151 radeon_emit(cs
, mask
); /* mask */
152 radeon_emit(cs
, 4); /* poll interval */
155 void r600_draw_rectangle(struct blitter_context
*blitter
,
156 int x1
, int y1
, int x2
, int y2
, float depth
,
157 enum blitter_attrib_type type
,
158 const union pipe_color_union
*attrib
)
160 struct r600_common_context
*rctx
=
161 (struct r600_common_context
*)util_blitter_get_pipe(blitter
);
162 struct pipe_viewport_state viewport
;
163 struct pipe_resource
*buf
= NULL
;
167 if (type
== UTIL_BLITTER_ATTRIB_TEXCOORD
) {
168 util_blitter_draw_rectangle(blitter
, x1
, y1
, x2
, y2
, depth
, type
, attrib
);
172 /* Some operations (like color resolve on r6xx) don't work
173 * with the conventional primitive types.
174 * One that works is PT_RECTLIST, which we use here. */
177 viewport
.scale
[0] = 1.0f
;
178 viewport
.scale
[1] = 1.0f
;
179 viewport
.scale
[2] = 1.0f
;
180 viewport
.translate
[0] = 0.0f
;
181 viewport
.translate
[1] = 0.0f
;
182 viewport
.translate
[2] = 0.0f
;
183 rctx
->b
.set_viewport_states(&rctx
->b
, 0, 1, &viewport
);
185 /* Upload vertices. The hw rectangle has only 3 vertices,
186 * I guess the 4th one is derived from the first 3.
187 * The vertex specification should match u_blitter's vertex element state. */
188 u_upload_alloc(rctx
->uploader
, 0, sizeof(float) * 24, 256, &offset
, &buf
, (void**)&vb
);
208 memcpy(vb
+4, attrib
->f
, sizeof(float)*4);
209 memcpy(vb
+12, attrib
->f
, sizeof(float)*4);
210 memcpy(vb
+20, attrib
->f
, sizeof(float)*4);
214 util_draw_vertex_buffer(&rctx
->b
, NULL
, buf
, blitter
->vb_slot
, offset
,
215 R600_PRIM_RECTANGLE_LIST
, 3, 2);
216 pipe_resource_reference(&buf
, NULL
);
219 void r600_need_dma_space(struct r600_common_context
*ctx
, unsigned num_dw
,
220 struct r600_resource
*dst
, struct r600_resource
*src
)
222 uint64_t vram
= 0, gtt
= 0;
225 vram
+= dst
->vram_usage
;
226 gtt
+= dst
->gart_usage
;
229 vram
+= src
->vram_usage
;
230 gtt
+= src
->gart_usage
;
233 /* Flush the GFX IB if DMA depends on it. */
234 if (radeon_emitted(ctx
->gfx
.cs
, ctx
->initial_gfx_cs_size
) &&
236 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
, dst
->buf
,
237 RADEON_USAGE_READWRITE
)) ||
239 ctx
->ws
->cs_is_buffer_referenced(ctx
->gfx
.cs
, src
->buf
,
240 RADEON_USAGE_WRITE
))))
241 ctx
->gfx
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
243 /* Flush if there's not enough space, or if the memory usage per IB
246 if (!ctx
->ws
->cs_check_space(ctx
->dma
.cs
, num_dw
) ||
247 !radeon_cs_memory_below_limit(ctx
->screen
, ctx
->dma
.cs
, vram
, gtt
)) {
248 ctx
->dma
.flush(ctx
, RADEON_FLUSH_ASYNC
, NULL
);
249 assert((num_dw
+ ctx
->dma
.cs
->current
.cdw
) <= ctx
->dma
.cs
->current
.max_dw
);
252 /* If GPUVM is not supported, the CS checker needs 2 entries
253 * in the buffer list per packet, which has to be done manually.
255 if (ctx
->screen
->info
.has_virtual_memory
) {
257 radeon_add_to_buffer_list(ctx
, &ctx
->dma
, dst
,
259 RADEON_PRIO_SDMA_BUFFER
);
261 radeon_add_to_buffer_list(ctx
, &ctx
->dma
, src
,
263 RADEON_PRIO_SDMA_BUFFER
);
267 /* This is required to prevent read-after-write hazards. */
268 void r600_dma_emit_wait_idle(struct r600_common_context
*rctx
)
270 struct radeon_winsys_cs
*cs
= rctx
->dma
.cs
;
272 /* done at the end of DMA calls, so increment this. */
273 rctx
->num_dma_calls
++;
275 /* IBs using too little memory are limited by the IB submission overhead.
276 * IBs using too much memory are limited by the kernel/TTM overhead.
277 * Too long IBs create CPU-GPU pipeline bubbles and add latency.
279 * This heuristic makes sure that DMA requests are executed
280 * very soon after the call is made and lowers memory usage.
281 * It improves texture upload performance by keeping the DMA
282 * engine busy while uploads are being submitted.
284 if (cs
->used_vram
+ cs
->used_gart
> 64 * 1024 * 1024) {
285 rctx
->dma
.flush(rctx
, RADEON_FLUSH_ASYNC
, NULL
);
289 r600_need_dma_space(rctx
, 1, NULL
, NULL
);
291 if (!radeon_emitted(cs
, 0)) /* empty queue */
294 /* NOP waits for idle on Evergreen and later. */
295 if (rctx
->chip_class
>= CIK
)
296 radeon_emit(cs
, 0x00000000); /* NOP */
297 else if (rctx
->chip_class
>= EVERGREEN
)
298 radeon_emit(cs
, 0xf0000000); /* NOP */
300 /* TODO: R600-R700 should use the FENCE packet.
301 * CS checker support is required. */
305 static void r600_memory_barrier(struct pipe_context
*ctx
, unsigned flags
)
309 void r600_preflush_suspend_features(struct r600_common_context
*ctx
)
311 /* suspend queries */
312 if (!LIST_IS_EMPTY(&ctx
->active_queries
))
313 r600_suspend_queries(ctx
);
315 ctx
->streamout
.suspended
= false;
316 if (ctx
->streamout
.begin_emitted
) {
317 r600_emit_streamout_end(ctx
);
318 ctx
->streamout
.suspended
= true;
322 void r600_postflush_resume_features(struct r600_common_context
*ctx
)
324 if (ctx
->streamout
.suspended
) {
325 ctx
->streamout
.append_bitmask
= ctx
->streamout
.enabled_mask
;
326 r600_streamout_buffers_dirty(ctx
);
330 if (!LIST_IS_EMPTY(&ctx
->active_queries
))
331 r600_resume_queries(ctx
);
334 static void r600_flush_from_st(struct pipe_context
*ctx
,
335 struct pipe_fence_handle
**fence
,
338 struct pipe_screen
*screen
= ctx
->screen
;
339 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
340 struct radeon_winsys
*ws
= rctx
->ws
;
342 struct pipe_fence_handle
*gfx_fence
= NULL
;
343 struct pipe_fence_handle
*sdma_fence
= NULL
;
344 bool deferred_fence
= false;
346 if (flags
& PIPE_FLUSH_END_OF_FRAME
)
347 rflags
|= RADEON_FLUSH_END_OF_FRAME
;
348 if (flags
& PIPE_FLUSH_DEFERRED
)
349 rflags
|= RADEON_FLUSH_ASYNC
;
352 rctx
->dma
.flush(rctx
, rflags
, fence
? &sdma_fence
: NULL
);
355 if (!radeon_emitted(rctx
->gfx
.cs
, rctx
->initial_gfx_cs_size
)) {
357 ws
->fence_reference(&gfx_fence
, rctx
->last_gfx_fence
);
358 if (!(rflags
& RADEON_FLUSH_ASYNC
))
359 ws
->cs_sync_flush(rctx
->gfx
.cs
);
361 /* Instead of flushing, create a deferred fence. Constraints:
362 * - The state tracker must allow a deferred flush.
363 * - The state tracker must request a fence.
364 * Thread safety in fence_finish must be ensured by the state tracker.
366 if (flags
& PIPE_FLUSH_DEFERRED
&& fence
) {
367 gfx_fence
= rctx
->ws
->cs_get_next_fence(rctx
->gfx
.cs
);
368 deferred_fence
= true;
370 rctx
->gfx
.flush(rctx
, rflags
, fence
? &gfx_fence
: NULL
);
374 /* Both engines can signal out of order, so we need to keep both fences. */
376 struct r600_multi_fence
*multi_fence
=
377 CALLOC_STRUCT(r600_multi_fence
);
381 multi_fence
->reference
.count
= 1;
382 /* If both fences are NULL, fence_finish will always return true. */
383 multi_fence
->gfx
= gfx_fence
;
384 multi_fence
->sdma
= sdma_fence
;
386 if (deferred_fence
) {
387 multi_fence
->gfx_unflushed
.ctx
= rctx
;
388 multi_fence
->gfx_unflushed
.ib_index
= rctx
->num_gfx_cs_flushes
;
391 screen
->fence_reference(screen
, fence
, NULL
);
392 *fence
= (struct pipe_fence_handle
*)multi_fence
;
396 static void r600_flush_dma_ring(void *ctx
, unsigned flags
,
397 struct pipe_fence_handle
**fence
)
399 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
400 struct radeon_winsys_cs
*cs
= rctx
->dma
.cs
;
401 struct radeon_saved_cs saved
;
403 (rctx
->screen
->debug_flags
& DBG_CHECK_VM
) &&
404 rctx
->check_vm_faults
;
406 if (!radeon_emitted(cs
, 0)) {
408 rctx
->ws
->fence_reference(fence
, rctx
->last_sdma_fence
);
413 radeon_save_cs(rctx
->ws
, cs
, &saved
);
415 rctx
->ws
->cs_flush(cs
, flags
, &rctx
->last_sdma_fence
);
417 rctx
->ws
->fence_reference(fence
, rctx
->last_sdma_fence
);
420 /* Use conservative timeout 800ms, after which we won't wait any
421 * longer and assume the GPU is hung.
423 rctx
->ws
->fence_wait(rctx
->ws
, rctx
->last_sdma_fence
, 800*1000*1000);
425 rctx
->check_vm_faults(rctx
, &saved
, RING_DMA
);
426 radeon_clear_saved_cs(&saved
);
431 * Store a linearized copy of all chunks of \p cs together with the buffer
434 void radeon_save_cs(struct radeon_winsys
*ws
, struct radeon_winsys_cs
*cs
,
435 struct radeon_saved_cs
*saved
)
440 /* Save the IB chunks. */
441 saved
->num_dw
= cs
->prev_dw
+ cs
->current
.cdw
;
442 saved
->ib
= MALLOC(4 * saved
->num_dw
);
447 for (i
= 0; i
< cs
->num_prev
; ++i
) {
448 memcpy(buf
, cs
->prev
[i
].buf
, cs
->prev
[i
].cdw
* 4);
449 buf
+= cs
->prev
[i
].cdw
;
451 memcpy(buf
, cs
->current
.buf
, cs
->current
.cdw
* 4);
453 /* Save the buffer list. */
454 saved
->bo_count
= ws
->cs_get_buffer_list(cs
, NULL
);
455 saved
->bo_list
= CALLOC(saved
->bo_count
,
456 sizeof(saved
->bo_list
[0]));
457 if (!saved
->bo_list
) {
461 ws
->cs_get_buffer_list(cs
, saved
->bo_list
);
466 fprintf(stderr
, "%s: out of memory\n", __func__
);
467 memset(saved
, 0, sizeof(*saved
));
470 void radeon_clear_saved_cs(struct radeon_saved_cs
*saved
)
473 FREE(saved
->bo_list
);
475 memset(saved
, 0, sizeof(*saved
));
478 static enum pipe_reset_status
r600_get_reset_status(struct pipe_context
*ctx
)
480 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
481 unsigned latest
= rctx
->ws
->query_value(rctx
->ws
,
482 RADEON_GPU_RESET_COUNTER
);
484 if (rctx
->gpu_reset_counter
== latest
)
485 return PIPE_NO_RESET
;
487 rctx
->gpu_reset_counter
= latest
;
488 return PIPE_UNKNOWN_CONTEXT_RESET
;
491 static void r600_set_debug_callback(struct pipe_context
*ctx
,
492 const struct pipe_debug_callback
*cb
)
494 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
499 memset(&rctx
->debug
, 0, sizeof(rctx
->debug
));
502 static void r600_set_device_reset_callback(struct pipe_context
*ctx
,
503 const struct pipe_device_reset_callback
*cb
)
505 struct r600_common_context
*rctx
= (struct r600_common_context
*)ctx
;
508 rctx
->device_reset_callback
= *cb
;
510 memset(&rctx
->device_reset_callback
, 0,
511 sizeof(rctx
->device_reset_callback
));
514 bool r600_check_device_reset(struct r600_common_context
*rctx
)
516 enum pipe_reset_status status
;
518 if (!rctx
->device_reset_callback
.reset
)
521 if (!rctx
->b
.get_device_reset_status
)
524 status
= rctx
->b
.get_device_reset_status(&rctx
->b
);
525 if (status
== PIPE_NO_RESET
)
528 rctx
->device_reset_callback
.reset(rctx
->device_reset_callback
.data
, status
);
532 bool r600_common_context_init(struct r600_common_context
*rctx
,
533 struct r600_common_screen
*rscreen
,
534 unsigned context_flags
)
536 slab_create_child(&rctx
->pool_transfers
, &rscreen
->pool_transfers
);
538 rctx
->screen
= rscreen
;
539 rctx
->ws
= rscreen
->ws
;
540 rctx
->family
= rscreen
->family
;
541 rctx
->chip_class
= rscreen
->chip_class
;
543 if (rscreen
->chip_class
>= CIK
)
544 rctx
->max_db
= MAX2(8, rscreen
->info
.num_render_backends
);
545 else if (rscreen
->chip_class
>= EVERGREEN
)
550 rctx
->b
.invalidate_resource
= r600_invalidate_resource
;
551 rctx
->b
.transfer_map
= u_transfer_map_vtbl
;
552 rctx
->b
.transfer_flush_region
= u_transfer_flush_region_vtbl
;
553 rctx
->b
.transfer_unmap
= u_transfer_unmap_vtbl
;
554 rctx
->b
.texture_subdata
= u_default_texture_subdata
;
555 rctx
->b
.memory_barrier
= r600_memory_barrier
;
556 rctx
->b
.flush
= r600_flush_from_st
;
557 rctx
->b
.set_debug_callback
= r600_set_debug_callback
;
559 /* evergreen_compute.c has a special codepath for global buffers.
560 * Everything else can use the direct path.
562 if ((rscreen
->chip_class
== EVERGREEN
|| rscreen
->chip_class
== CAYMAN
) &&
563 (context_flags
& PIPE_CONTEXT_COMPUTE_ONLY
))
564 rctx
->b
.buffer_subdata
= u_default_buffer_subdata
;
566 rctx
->b
.buffer_subdata
= r600_buffer_subdata
;
568 if (rscreen
->info
.drm_major
== 2 && rscreen
->info
.drm_minor
>= 43) {
569 rctx
->b
.get_device_reset_status
= r600_get_reset_status
;
570 rctx
->gpu_reset_counter
=
571 rctx
->ws
->query_value(rctx
->ws
,
572 RADEON_GPU_RESET_COUNTER
);
575 rctx
->b
.set_device_reset_callback
= r600_set_device_reset_callback
;
577 r600_init_context_texture_functions(rctx
);
578 r600_init_viewport_functions(rctx
);
579 r600_streamout_init(rctx
);
580 r600_query_init(rctx
);
581 cayman_init_msaa(&rctx
->b
);
583 rctx
->allocator_zeroed_memory
=
584 u_suballocator_create(&rctx
->b
, rscreen
->info
.gart_page_size
,
585 0, PIPE_USAGE_DEFAULT
, true);
586 if (!rctx
->allocator_zeroed_memory
)
589 rctx
->uploader
= u_upload_create(&rctx
->b
, 1024 * 1024,
590 PIPE_BIND_INDEX_BUFFER
|
591 PIPE_BIND_CONSTANT_BUFFER
, PIPE_USAGE_STREAM
);
595 rctx
->ctx
= rctx
->ws
->ctx_create(rctx
->ws
);
599 if (rscreen
->info
.has_sdma
&& !(rscreen
->debug_flags
& DBG_NO_ASYNC_DMA
)) {
600 rctx
->dma
.cs
= rctx
->ws
->cs_create(rctx
->ctx
, RING_DMA
,
603 rctx
->dma
.flush
= r600_flush_dma_ring
;
609 void r600_common_context_cleanup(struct r600_common_context
*rctx
)
613 /* Release DCC stats. */
614 for (i
= 0; i
< ARRAY_SIZE(rctx
->dcc_stats
); i
++) {
615 assert(!rctx
->dcc_stats
[i
].query_active
);
617 for (j
= 0; j
< ARRAY_SIZE(rctx
->dcc_stats
[i
].ps_stats
); j
++)
618 if (rctx
->dcc_stats
[i
].ps_stats
[j
])
619 rctx
->b
.destroy_query(&rctx
->b
,
620 rctx
->dcc_stats
[i
].ps_stats
[j
]);
622 r600_texture_reference(&rctx
->dcc_stats
[i
].tex
, NULL
);
625 if (rctx
->query_result_shader
)
626 rctx
->b
.delete_compute_state(&rctx
->b
, rctx
->query_result_shader
);
629 rctx
->ws
->cs_destroy(rctx
->gfx
.cs
);
631 rctx
->ws
->cs_destroy(rctx
->dma
.cs
);
633 rctx
->ws
->ctx_destroy(rctx
->ctx
);
635 if (rctx
->uploader
) {
636 u_upload_destroy(rctx
->uploader
);
639 slab_destroy_child(&rctx
->pool_transfers
);
641 if (rctx
->allocator_zeroed_memory
) {
642 u_suballocator_destroy(rctx
->allocator_zeroed_memory
);
644 rctx
->ws
->fence_reference(&rctx
->last_gfx_fence
, NULL
);
645 rctx
->ws
->fence_reference(&rctx
->last_sdma_fence
, NULL
);
652 static const struct debug_named_value common_debug_options
[] = {
654 { "tex", DBG_TEX
, "Print texture info" },
655 { "compute", DBG_COMPUTE
, "Print compute info" },
656 { "vm", DBG_VM
, "Print virtual addresses when creating resources" },
657 { "info", DBG_INFO
, "Print driver information" },
660 { "fs", DBG_FS
, "Print fetch shaders" },
661 { "vs", DBG_VS
, "Print vertex shaders" },
662 { "gs", DBG_GS
, "Print geometry shaders" },
663 { "ps", DBG_PS
, "Print pixel shaders" },
664 { "cs", DBG_CS
, "Print compute shaders" },
665 { "tcs", DBG_TCS
, "Print tessellation control shaders" },
666 { "tes", DBG_TES
, "Print tessellation evaluation shaders" },
667 { "noir", DBG_NO_IR
, "Don't print the LLVM IR"},
668 { "notgsi", DBG_NO_TGSI
, "Don't print the TGSI"},
669 { "noasm", DBG_NO_ASM
, "Don't print disassembled shaders"},
670 { "preoptir", DBG_PREOPT_IR
, "Print the LLVM IR before initial optimizations" },
671 { "checkir", DBG_CHECK_IR
, "Enable additional sanity checks on shader IR" },
673 { "testdma", DBG_TEST_DMA
, "Invoke SDMA tests and exit." },
676 { "nodma", DBG_NO_ASYNC_DMA
, "Disable asynchronous DMA" },
677 { "nohyperz", DBG_NO_HYPERZ
, "Disable Hyper-Z" },
678 /* GL uses the word INVALIDATE, gallium uses the word DISCARD */
679 { "noinvalrange", DBG_NO_DISCARD_RANGE
, "Disable handling of INVALIDATE_RANGE map flags" },
680 { "no2d", DBG_NO_2D_TILING
, "Disable 2D tiling" },
681 { "notiling", DBG_NO_TILING
, "Disable tiling" },
682 { "switch_on_eop", DBG_SWITCH_ON_EOP
, "Program WD/IA to switch on end-of-packet." },
683 { "forcedma", DBG_FORCE_DMA
, "Use asynchronous DMA for all operations when possible." },
684 { "precompile", DBG_PRECOMPILE
, "Compile one shader variant at shader creation." },
685 { "nowc", DBG_NO_WC
, "Disable GTT write combining" },
686 { "check_vm", DBG_CHECK_VM
, "Check VM faults and dump debug info." },
687 { "nodcc", DBG_NO_DCC
, "Disable DCC." },
688 { "nodccclear", DBG_NO_DCC_CLEAR
, "Disable DCC fast clear." },
689 { "norbplus", DBG_NO_RB_PLUS
, "Disable RB+ on Stoney." },
690 { "sisched", DBG_SI_SCHED
, "Enable LLVM SI Machine Instruction Scheduler." },
691 { "mono", DBG_MONOLITHIC_SHADERS
, "Use old-style monolithic shaders compiled on demand" },
692 { "noce", DBG_NO_CE
, "Disable the constant engine"},
693 { "unsafemath", DBG_UNSAFE_MATH
, "Enable unsafe math shader optimizations" },
694 { "nodccfb", DBG_NO_DCC_FB
, "Disable separate DCC on the main framebuffer" },
696 DEBUG_NAMED_VALUE_END
/* must be last */
699 static const char* r600_get_vendor(struct pipe_screen
* pscreen
)
704 static const char* r600_get_device_vendor(struct pipe_screen
* pscreen
)
709 static const char* r600_get_chip_name(struct r600_common_screen
*rscreen
)
711 switch (rscreen
->info
.family
) {
712 case CHIP_R600
: return "AMD R600";
713 case CHIP_RV610
: return "AMD RV610";
714 case CHIP_RV630
: return "AMD RV630";
715 case CHIP_RV670
: return "AMD RV670";
716 case CHIP_RV620
: return "AMD RV620";
717 case CHIP_RV635
: return "AMD RV635";
718 case CHIP_RS780
: return "AMD RS780";
719 case CHIP_RS880
: return "AMD RS880";
720 case CHIP_RV770
: return "AMD RV770";
721 case CHIP_RV730
: return "AMD RV730";
722 case CHIP_RV710
: return "AMD RV710";
723 case CHIP_RV740
: return "AMD RV740";
724 case CHIP_CEDAR
: return "AMD CEDAR";
725 case CHIP_REDWOOD
: return "AMD REDWOOD";
726 case CHIP_JUNIPER
: return "AMD JUNIPER";
727 case CHIP_CYPRESS
: return "AMD CYPRESS";
728 case CHIP_HEMLOCK
: return "AMD HEMLOCK";
729 case CHIP_PALM
: return "AMD PALM";
730 case CHIP_SUMO
: return "AMD SUMO";
731 case CHIP_SUMO2
: return "AMD SUMO2";
732 case CHIP_BARTS
: return "AMD BARTS";
733 case CHIP_TURKS
: return "AMD TURKS";
734 case CHIP_CAICOS
: return "AMD CAICOS";
735 case CHIP_CAYMAN
: return "AMD CAYMAN";
736 case CHIP_ARUBA
: return "AMD ARUBA";
737 case CHIP_TAHITI
: return "AMD TAHITI";
738 case CHIP_PITCAIRN
: return "AMD PITCAIRN";
739 case CHIP_VERDE
: return "AMD CAPE VERDE";
740 case CHIP_OLAND
: return "AMD OLAND";
741 case CHIP_HAINAN
: return "AMD HAINAN";
742 case CHIP_BONAIRE
: return "AMD BONAIRE";
743 case CHIP_KAVERI
: return "AMD KAVERI";
744 case CHIP_KABINI
: return "AMD KABINI";
745 case CHIP_HAWAII
: return "AMD HAWAII";
746 case CHIP_MULLINS
: return "AMD MULLINS";
747 case CHIP_TONGA
: return "AMD TONGA";
748 case CHIP_ICELAND
: return "AMD ICELAND";
749 case CHIP_CARRIZO
: return "AMD CARRIZO";
750 case CHIP_FIJI
: return "AMD FIJI";
751 case CHIP_POLARIS10
: return "AMD POLARIS10";
752 case CHIP_POLARIS11
: return "AMD POLARIS11";
753 case CHIP_STONEY
: return "AMD STONEY";
754 default: return "AMD unknown";
758 static const char* r600_get_name(struct pipe_screen
* pscreen
)
760 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)pscreen
;
762 return rscreen
->renderer_string
;
765 static float r600_get_paramf(struct pipe_screen
* pscreen
,
766 enum pipe_capf param
)
768 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)pscreen
;
771 case PIPE_CAPF_MAX_LINE_WIDTH
:
772 case PIPE_CAPF_MAX_LINE_WIDTH_AA
:
773 case PIPE_CAPF_MAX_POINT_WIDTH
:
774 case PIPE_CAPF_MAX_POINT_WIDTH_AA
:
775 if (rscreen
->family
>= CHIP_CEDAR
)
779 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY
:
781 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS
:
783 case PIPE_CAPF_GUARD_BAND_LEFT
:
784 case PIPE_CAPF_GUARD_BAND_TOP
:
785 case PIPE_CAPF_GUARD_BAND_RIGHT
:
786 case PIPE_CAPF_GUARD_BAND_BOTTOM
:
792 static int r600_get_video_param(struct pipe_screen
*screen
,
793 enum pipe_video_profile profile
,
794 enum pipe_video_entrypoint entrypoint
,
795 enum pipe_video_cap param
)
798 case PIPE_VIDEO_CAP_SUPPORTED
:
799 return vl_profile_supported(screen
, profile
, entrypoint
);
800 case PIPE_VIDEO_CAP_NPOT_TEXTURES
:
802 case PIPE_VIDEO_CAP_MAX_WIDTH
:
803 case PIPE_VIDEO_CAP_MAX_HEIGHT
:
804 return vl_video_buffer_max_size(screen
);
805 case PIPE_VIDEO_CAP_PREFERED_FORMAT
:
806 return PIPE_FORMAT_NV12
;
807 case PIPE_VIDEO_CAP_PREFERS_INTERLACED
:
809 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED
:
811 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE
:
813 case PIPE_VIDEO_CAP_MAX_LEVEL
:
814 return vl_level_supported(screen
, profile
);
820 const char *r600_get_llvm_processor_name(enum radeon_family family
)
863 case CHIP_TAHITI
: return "tahiti";
864 case CHIP_PITCAIRN
: return "pitcairn";
865 case CHIP_VERDE
: return "verde";
866 case CHIP_OLAND
: return "oland";
867 case CHIP_HAINAN
: return "hainan";
868 case CHIP_BONAIRE
: return "bonaire";
869 case CHIP_KABINI
: return "kabini";
870 case CHIP_KAVERI
: return "kaveri";
871 case CHIP_HAWAII
: return "hawaii";
874 case CHIP_TONGA
: return "tonga";
875 case CHIP_ICELAND
: return "iceland";
876 case CHIP_CARRIZO
: return "carrizo";
877 #if HAVE_LLVM <= 0x0307
878 case CHIP_FIJI
: return "tonga";
879 case CHIP_STONEY
: return "carrizo";
881 case CHIP_FIJI
: return "fiji";
882 case CHIP_STONEY
: return "stoney";
884 #if HAVE_LLVM <= 0x0308
885 case CHIP_POLARIS10
: return "tonga";
886 case CHIP_POLARIS11
: return "tonga";
888 case CHIP_POLARIS10
: return "polaris10";
889 case CHIP_POLARIS11
: return "polaris11";
895 static int r600_get_compute_param(struct pipe_screen
*screen
,
896 enum pipe_shader_ir ir_type
,
897 enum pipe_compute_cap param
,
900 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
902 //TODO: select these params by asic
904 case PIPE_COMPUTE_CAP_IR_TARGET
: {
907 if (rscreen
->family
<= CHIP_ARUBA
) {
910 if (HAVE_LLVM
< 0x0400) {
913 triple
= "amdgcn-mesa-mesa3d";
916 switch(rscreen
->family
) {
917 /* Clang < 3.6 is missing Hainan in its list of
918 * GPUs, so we need to use the name of a similar GPU.
921 gpu
= r600_get_llvm_processor_name(rscreen
->family
);
925 sprintf(ret
, "%s-%s", gpu
, triple
);
927 /* +2 for dash and terminating NIL byte */
928 return (strlen(triple
) + strlen(gpu
) + 2) * sizeof(char);
930 case PIPE_COMPUTE_CAP_GRID_DIMENSION
:
932 uint64_t *grid_dimension
= ret
;
933 grid_dimension
[0] = 3;
935 return 1 * sizeof(uint64_t);
937 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE
:
939 uint64_t *grid_size
= ret
;
940 grid_size
[0] = 65535;
941 grid_size
[1] = 65535;
942 grid_size
[2] = 65535;
944 return 3 * sizeof(uint64_t) ;
946 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE
:
948 uint64_t *block_size
= ret
;
949 if (rscreen
->chip_class
>= SI
&& HAVE_LLVM
>= 0x309 &&
950 ir_type
== PIPE_SHADER_IR_TGSI
) {
951 block_size
[0] = 2048;
952 block_size
[1] = 2048;
953 block_size
[2] = 2048;
960 return 3 * sizeof(uint64_t);
962 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
:
964 uint64_t *max_threads_per_block
= ret
;
965 if (rscreen
->chip_class
>= SI
&& HAVE_LLVM
>= 0x309 &&
966 ir_type
== PIPE_SHADER_IR_TGSI
)
967 *max_threads_per_block
= 2048;
969 *max_threads_per_block
= 256;
971 return sizeof(uint64_t);
972 case PIPE_COMPUTE_CAP_ADDRESS_BITS
:
974 uint32_t *address_bits
= ret
;
975 address_bits
[0] = 32;
976 if (rscreen
->chip_class
>= SI
)
977 address_bits
[0] = 64;
979 return 1 * sizeof(uint32_t);
981 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE
:
983 uint64_t *max_global_size
= ret
;
984 uint64_t max_mem_alloc_size
;
986 r600_get_compute_param(screen
, ir_type
,
987 PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE
,
988 &max_mem_alloc_size
);
990 /* In OpenCL, the MAX_MEM_ALLOC_SIZE must be at least
991 * 1/4 of the MAX_GLOBAL_SIZE. Since the
992 * MAX_MEM_ALLOC_SIZE is fixed for older kernels,
993 * make sure we never report more than
994 * 4 * MAX_MEM_ALLOC_SIZE.
996 *max_global_size
= MIN2(4 * max_mem_alloc_size
,
997 MAX2(rscreen
->info
.gart_size
,
998 rscreen
->info
.vram_size
));
1000 return sizeof(uint64_t);
1002 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE
:
1004 uint64_t *max_local_size
= ret
;
1005 /* Value reported by the closed source driver. */
1006 *max_local_size
= 32768;
1008 return sizeof(uint64_t);
1010 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE
:
1012 uint64_t *max_input_size
= ret
;
1013 /* Value reported by the closed source driver. */
1014 *max_input_size
= 1024;
1016 return sizeof(uint64_t);
1018 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE
:
1020 uint64_t *max_mem_alloc_size
= ret
;
1022 *max_mem_alloc_size
= rscreen
->info
.max_alloc_size
;
1024 return sizeof(uint64_t);
1026 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY
:
1028 uint32_t *max_clock_frequency
= ret
;
1029 *max_clock_frequency
= rscreen
->info
.max_shader_clock
;
1031 return sizeof(uint32_t);
1033 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS
:
1035 uint32_t *max_compute_units
= ret
;
1036 *max_compute_units
= rscreen
->info
.num_good_compute_units
;
1038 return sizeof(uint32_t);
1040 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED
:
1042 uint32_t *images_supported
= ret
;
1043 *images_supported
= 0;
1045 return sizeof(uint32_t);
1046 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE
:
1048 case PIPE_COMPUTE_CAP_SUBGROUP_SIZE
:
1050 uint32_t *subgroup_size
= ret
;
1051 *subgroup_size
= r600_wavefront_size(rscreen
->family
);
1053 return sizeof(uint32_t);
1054 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK
:
1056 uint64_t *max_variable_threads_per_block
= ret
;
1057 if (rscreen
->chip_class
>= SI
&& HAVE_LLVM
>= 0x309 &&
1058 ir_type
== PIPE_SHADER_IR_TGSI
)
1059 *max_variable_threads_per_block
= SI_MAX_VARIABLE_THREADS_PER_BLOCK
;
1061 *max_variable_threads_per_block
= 0;
1063 return sizeof(uint64_t);
1066 fprintf(stderr
, "unknown PIPE_COMPUTE_CAP %d\n", param
);
1070 static uint64_t r600_get_timestamp(struct pipe_screen
*screen
)
1072 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1074 return 1000000 * rscreen
->ws
->query_value(rscreen
->ws
, RADEON_TIMESTAMP
) /
1075 rscreen
->info
.clock_crystal_freq
;
1078 static void r600_fence_reference(struct pipe_screen
*screen
,
1079 struct pipe_fence_handle
**dst
,
1080 struct pipe_fence_handle
*src
)
1082 struct radeon_winsys
*ws
= ((struct r600_common_screen
*)screen
)->ws
;
1083 struct r600_multi_fence
**rdst
= (struct r600_multi_fence
**)dst
;
1084 struct r600_multi_fence
*rsrc
= (struct r600_multi_fence
*)src
;
1086 if (pipe_reference(&(*rdst
)->reference
, &rsrc
->reference
)) {
1087 ws
->fence_reference(&(*rdst
)->gfx
, NULL
);
1088 ws
->fence_reference(&(*rdst
)->sdma
, NULL
);
1094 static boolean
r600_fence_finish(struct pipe_screen
*screen
,
1095 struct pipe_context
*ctx
,
1096 struct pipe_fence_handle
*fence
,
1099 struct radeon_winsys
*rws
= ((struct r600_common_screen
*)screen
)->ws
;
1100 struct r600_multi_fence
*rfence
= (struct r600_multi_fence
*)fence
;
1101 struct r600_common_context
*rctx
=
1102 ctx
? (struct r600_common_context
*)ctx
: NULL
;
1103 int64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
1106 if (!rws
->fence_wait(rws
, rfence
->sdma
, timeout
))
1109 /* Recompute the timeout after waiting. */
1110 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
1111 int64_t time
= os_time_get_nano();
1112 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
1119 /* Flush the gfx IB if it hasn't been flushed yet. */
1121 rfence
->gfx_unflushed
.ctx
== rctx
&&
1122 rfence
->gfx_unflushed
.ib_index
== rctx
->num_gfx_cs_flushes
) {
1123 rctx
->gfx
.flush(rctx
, timeout
? 0 : RADEON_FLUSH_ASYNC
, NULL
);
1124 rfence
->gfx_unflushed
.ctx
= NULL
;
1129 /* Recompute the timeout after all that. */
1130 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
1131 int64_t time
= os_time_get_nano();
1132 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
1136 return rws
->fence_wait(rws
, rfence
->gfx
, timeout
);
1139 static void r600_query_memory_info(struct pipe_screen
*screen
,
1140 struct pipe_memory_info
*info
)
1142 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)screen
;
1143 struct radeon_winsys
*ws
= rscreen
->ws
;
1144 unsigned vram_usage
, gtt_usage
;
1146 info
->total_device_memory
= rscreen
->info
.vram_size
/ 1024;
1147 info
->total_staging_memory
= rscreen
->info
.gart_size
/ 1024;
1149 /* The real TTM memory usage is somewhat random, because:
1151 * 1) TTM delays freeing memory, because it can only free it after
1154 * 2) The memory usage can be really low if big VRAM evictions are
1155 * taking place, but the real usage is well above the size of VRAM.
1157 * Instead, return statistics of this process.
1159 vram_usage
= ws
->query_value(ws
, RADEON_REQUESTED_VRAM_MEMORY
) / 1024;
1160 gtt_usage
= ws
->query_value(ws
, RADEON_REQUESTED_GTT_MEMORY
) / 1024;
1162 info
->avail_device_memory
=
1163 vram_usage
<= info
->total_device_memory
?
1164 info
->total_device_memory
- vram_usage
: 0;
1165 info
->avail_staging_memory
=
1166 gtt_usage
<= info
->total_staging_memory
?
1167 info
->total_staging_memory
- gtt_usage
: 0;
1169 info
->device_memory_evicted
=
1170 ws
->query_value(ws
, RADEON_NUM_BYTES_MOVED
) / 1024;
1172 if (rscreen
->info
.drm_major
== 3 && rscreen
->info
.drm_minor
>= 4)
1173 info
->nr_device_memory_evictions
=
1174 ws
->query_value(ws
, RADEON_NUM_EVICTIONS
);
1176 /* Just return the number of evicted 64KB pages. */
1177 info
->nr_device_memory_evictions
= info
->device_memory_evicted
/ 64;
1180 struct pipe_resource
*r600_resource_create_common(struct pipe_screen
*screen
,
1181 const struct pipe_resource
*templ
)
1183 if (templ
->target
== PIPE_BUFFER
) {
1184 return r600_buffer_create(screen
, templ
, 256);
1186 return r600_texture_create(screen
, templ
);
1190 bool r600_common_screen_init(struct r600_common_screen
*rscreen
,
1191 struct radeon_winsys
*ws
)
1193 char llvm_string
[32] = {}, kernel_version
[128] = {};
1194 struct utsname uname_data
;
1196 ws
->query_info(ws
, &rscreen
->info
);
1198 if (uname(&uname_data
) == 0)
1199 snprintf(kernel_version
, sizeof(kernel_version
),
1200 " / %s", uname_data
.release
);
1203 snprintf(llvm_string
, sizeof(llvm_string
),
1204 ", LLVM %i.%i.%i", (HAVE_LLVM
>> 8) & 0xff,
1205 HAVE_LLVM
& 0xff, MESA_LLVM_VERSION_PATCH
);
1208 snprintf(rscreen
->renderer_string
, sizeof(rscreen
->renderer_string
),
1209 "%s (DRM %i.%i.%i%s%s)",
1210 r600_get_chip_name(rscreen
), rscreen
->info
.drm_major
,
1211 rscreen
->info
.drm_minor
, rscreen
->info
.drm_patchlevel
,
1212 kernel_version
, llvm_string
);
1214 rscreen
->b
.get_name
= r600_get_name
;
1215 rscreen
->b
.get_vendor
= r600_get_vendor
;
1216 rscreen
->b
.get_device_vendor
= r600_get_device_vendor
;
1217 rscreen
->b
.get_compute_param
= r600_get_compute_param
;
1218 rscreen
->b
.get_paramf
= r600_get_paramf
;
1219 rscreen
->b
.get_timestamp
= r600_get_timestamp
;
1220 rscreen
->b
.fence_finish
= r600_fence_finish
;
1221 rscreen
->b
.fence_reference
= r600_fence_reference
;
1222 rscreen
->b
.resource_destroy
= u_resource_destroy_vtbl
;
1223 rscreen
->b
.resource_from_user_memory
= r600_buffer_from_user_memory
;
1224 rscreen
->b
.query_memory_info
= r600_query_memory_info
;
1226 if (rscreen
->info
.has_uvd
) {
1227 rscreen
->b
.get_video_param
= rvid_get_video_param
;
1228 rscreen
->b
.is_video_format_supported
= rvid_is_format_supported
;
1230 rscreen
->b
.get_video_param
= r600_get_video_param
;
1231 rscreen
->b
.is_video_format_supported
= vl_video_buffer_is_format_supported
;
1234 r600_init_screen_texture_functions(rscreen
);
1235 r600_init_screen_query_functions(rscreen
);
1238 rscreen
->family
= rscreen
->info
.family
;
1239 rscreen
->chip_class
= rscreen
->info
.chip_class
;
1240 rscreen
->debug_flags
= debug_get_flags_option("R600_DEBUG", common_debug_options
, 0);
1242 slab_create_parent(&rscreen
->pool_transfers
, sizeof(struct r600_transfer
), 64);
1244 rscreen
->force_aniso
= MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
1245 if (rscreen
->force_aniso
>= 0) {
1246 printf("radeon: Forcing anisotropy filter to %ix\n",
1247 /* round down to a power of two */
1248 1 << util_logbase2(rscreen
->force_aniso
));
1251 util_format_s3tc_init();
1252 pipe_mutex_init(rscreen
->aux_context_lock
);
1253 pipe_mutex_init(rscreen
->gpu_load_mutex
);
1255 if (rscreen
->debug_flags
& DBG_INFO
) {
1256 printf("pci_id = 0x%x\n", rscreen
->info
.pci_id
);
1257 printf("family = %i (%s)\n", rscreen
->info
.family
,
1258 r600_get_chip_name(rscreen
));
1259 printf("chip_class = %i\n", rscreen
->info
.chip_class
);
1260 printf("gart_size = %i MB\n", (int)DIV_ROUND_UP(rscreen
->info
.gart_size
, 1024*1024));
1261 printf("vram_size = %i MB\n", (int)DIV_ROUND_UP(rscreen
->info
.vram_size
, 1024*1024));
1262 printf("max_alloc_size = %i MB\n",
1263 (int)DIV_ROUND_UP(rscreen
->info
.max_alloc_size
, 1024*1024));
1264 printf("has_virtual_memory = %i\n", rscreen
->info
.has_virtual_memory
);
1265 printf("gfx_ib_pad_with_type2 = %i\n", rscreen
->info
.gfx_ib_pad_with_type2
);
1266 printf("has_sdma = %i\n", rscreen
->info
.has_sdma
);
1267 printf("has_uvd = %i\n", rscreen
->info
.has_uvd
);
1268 printf("me_fw_version = %i\n", rscreen
->info
.me_fw_version
);
1269 printf("pfp_fw_version = %i\n", rscreen
->info
.pfp_fw_version
);
1270 printf("ce_fw_version = %i\n", rscreen
->info
.ce_fw_version
);
1271 printf("vce_fw_version = %i\n", rscreen
->info
.vce_fw_version
);
1272 printf("vce_harvest_config = %i\n", rscreen
->info
.vce_harvest_config
);
1273 printf("clock_crystal_freq = %i\n", rscreen
->info
.clock_crystal_freq
);
1274 printf("drm = %i.%i.%i\n", rscreen
->info
.drm_major
,
1275 rscreen
->info
.drm_minor
, rscreen
->info
.drm_patchlevel
);
1276 printf("has_userptr = %i\n", rscreen
->info
.has_userptr
);
1278 printf("r600_max_quad_pipes = %i\n", rscreen
->info
.r600_max_quad_pipes
);
1279 printf("max_shader_clock = %i\n", rscreen
->info
.max_shader_clock
);
1280 printf("num_good_compute_units = %i\n", rscreen
->info
.num_good_compute_units
);
1281 printf("max_se = %i\n", rscreen
->info
.max_se
);
1282 printf("max_sh_per_se = %i\n", rscreen
->info
.max_sh_per_se
);
1284 printf("r600_gb_backend_map = %i\n", rscreen
->info
.r600_gb_backend_map
);
1285 printf("r600_gb_backend_map_valid = %i\n", rscreen
->info
.r600_gb_backend_map_valid
);
1286 printf("r600_num_banks = %i\n", rscreen
->info
.r600_num_banks
);
1287 printf("num_render_backends = %i\n", rscreen
->info
.num_render_backends
);
1288 printf("num_tile_pipes = %i\n", rscreen
->info
.num_tile_pipes
);
1289 printf("pipe_interleave_bytes = %i\n", rscreen
->info
.pipe_interleave_bytes
);
1294 void r600_destroy_common_screen(struct r600_common_screen
*rscreen
)
1296 r600_perfcounters_destroy(rscreen
);
1297 r600_gpu_load_kill_thread(rscreen
);
1299 pipe_mutex_destroy(rscreen
->gpu_load_mutex
);
1300 pipe_mutex_destroy(rscreen
->aux_context_lock
);
1301 rscreen
->aux_context
->destroy(rscreen
->aux_context
);
1303 slab_destroy_parent(&rscreen
->pool_transfers
);
1305 rscreen
->ws
->destroy(rscreen
->ws
);
1309 bool r600_can_dump_shader(struct r600_common_screen
*rscreen
,
1312 switch (processor
) {
1313 case PIPE_SHADER_VERTEX
:
1314 return (rscreen
->debug_flags
& DBG_VS
) != 0;
1315 case PIPE_SHADER_TESS_CTRL
:
1316 return (rscreen
->debug_flags
& DBG_TCS
) != 0;
1317 case PIPE_SHADER_TESS_EVAL
:
1318 return (rscreen
->debug_flags
& DBG_TES
) != 0;
1319 case PIPE_SHADER_GEOMETRY
:
1320 return (rscreen
->debug_flags
& DBG_GS
) != 0;
1321 case PIPE_SHADER_FRAGMENT
:
1322 return (rscreen
->debug_flags
& DBG_PS
) != 0;
1323 case PIPE_SHADER_COMPUTE
:
1324 return (rscreen
->debug_flags
& DBG_CS
) != 0;
1330 bool r600_extra_shader_checks(struct r600_common_screen
*rscreen
, unsigned processor
)
1332 return (rscreen
->debug_flags
& DBG_CHECK_IR
) ||
1333 r600_can_dump_shader(rscreen
, processor
);
1336 void r600_screen_clear_buffer(struct r600_common_screen
*rscreen
, struct pipe_resource
*dst
,
1337 uint64_t offset
, uint64_t size
, unsigned value
,
1338 enum r600_coherency coher
)
1340 struct r600_common_context
*rctx
= (struct r600_common_context
*)rscreen
->aux_context
;
1342 pipe_mutex_lock(rscreen
->aux_context_lock
);
1343 rctx
->clear_buffer(&rctx
->b
, dst
, offset
, size
, value
, coher
);
1344 rscreen
->aux_context
->flush(rscreen
->aux_context
, NULL
, 0);
1345 pipe_mutex_unlock(rscreen
->aux_context_lock
);