2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "r600_pipe.h"
24 #include "r600_public.h"
26 #include "evergreen_compute.h"
29 #include "sb/sb_public.h"
32 #include "pipe/p_shader_tokens.h"
33 #include "util/u_blitter.h"
34 #include "util/u_debug.h"
35 #include "util/u_memory.h"
36 #include "util/u_simple_shaders.h"
37 #include "util/u_upload_mgr.h"
38 #include "util/u_math.h"
39 #include "vl/vl_decoder.h"
40 #include "vl/vl_video_buffer.h"
41 #include "radeon/radeon_uvd.h"
42 #include "os/os_time.h"
44 static const struct debug_named_value r600_debug_options
[] = {
46 { "nohyperz", DBG_NO_HYPERZ
, "Disable Hyper-Z" },
47 #if defined(R600_USE_LLVM)
48 { "nollvm", DBG_NO_LLVM
, "Disable the LLVM shader compiler" },
50 { "nocpdma", DBG_NO_CP_DMA
, "Disable CP DMA" },
51 { "nodma", DBG_NO_ASYNC_DMA
, "Disable asynchronous DMA" },
52 /* GL uses the word INVALIDATE, gallium uses the word DISCARD */
53 { "noinvalrange", DBG_NO_DISCARD_RANGE
, "Disable handling of INVALIDATE_RANGE map flags" },
56 { "nosb", DBG_NO_SB
, "Disable sb backend for graphics shaders" },
57 { "sbcl", DBG_SB_CS
, "Enable sb backend for compute shaders" },
58 { "sbdry", DBG_SB_DRY_RUN
, "Don't use optimized bytecode (just print the dumps)" },
59 { "sbstat", DBG_SB_STAT
, "Print optimization statistics for shaders" },
60 { "sbdump", DBG_SB_DUMP
, "Print IR dumps after some optimization passes" },
61 { "sbnofallback", DBG_SB_NO_FALLBACK
, "Abort on errors instead of fallback" },
62 { "sbdisasm", DBG_SB_DISASM
, "Use sb disassembler for shader dumps" },
63 { "sbsafemath", DBG_SB_SAFEMATH
, "Disable unsafe math optimizations" },
65 DEBUG_NAMED_VALUE_END
/* must be last */
71 static struct r600_fence
*r600_create_fence(struct r600_context
*rctx
)
73 struct r600_screen
*rscreen
= rctx
->screen
;
74 struct r600_fence
*fence
= NULL
;
76 pipe_mutex_lock(rscreen
->fences
.mutex
);
78 if (!rscreen
->fences
.bo
) {
79 /* Create the shared buffer object */
80 rscreen
->fences
.bo
= (struct r600_resource
*)
81 pipe_buffer_create(&rscreen
->b
.b
, PIPE_BIND_CUSTOM
,
82 PIPE_USAGE_STAGING
, 4096);
83 if (!rscreen
->fences
.bo
) {
84 R600_ERR("r600: failed to create bo for fence objects\n");
87 rscreen
->fences
.data
= r600_buffer_map_sync_with_rings(&rctx
->b
, rscreen
->fences
.bo
, PIPE_TRANSFER_READ_WRITE
);
90 if (!LIST_IS_EMPTY(&rscreen
->fences
.pool
)) {
91 struct r600_fence
*entry
;
93 /* Try to find a freed fence that has been signalled */
94 LIST_FOR_EACH_ENTRY(entry
, &rscreen
->fences
.pool
, head
) {
95 if (rscreen
->fences
.data
[entry
->index
] != 0) {
96 LIST_DELINIT(&entry
->head
);
104 /* Allocate a new fence */
105 struct r600_fence_block
*block
;
108 if ((rscreen
->fences
.next_index
+ 1) >= 1024) {
109 R600_ERR("r600: too many concurrent fences\n");
113 index
= rscreen
->fences
.next_index
++;
115 if (!(index
% FENCE_BLOCK_SIZE
)) {
116 /* Allocate a new block */
117 block
= CALLOC_STRUCT(r600_fence_block
);
121 LIST_ADD(&block
->head
, &rscreen
->fences
.blocks
);
123 block
= LIST_ENTRY(struct r600_fence_block
, rscreen
->fences
.blocks
.next
, head
);
126 fence
= &block
->fences
[index
% FENCE_BLOCK_SIZE
];
127 fence
->index
= index
;
130 pipe_reference_init(&fence
->reference
, 1);
132 rscreen
->fences
.data
[fence
->index
] = 0;
133 r600_context_emit_fence(rctx
, rscreen
->fences
.bo
, fence
->index
, 1);
135 /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */
136 fence
->sleep_bo
= (struct r600_resource
*)
137 pipe_buffer_create(&rctx
->screen
->b
.b
, PIPE_BIND_CUSTOM
,
138 PIPE_USAGE_STAGING
, 1);
139 /* Add the fence as a dummy relocation. */
140 r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
, fence
->sleep_bo
, RADEON_USAGE_READWRITE
);
143 pipe_mutex_unlock(rscreen
->fences
.mutex
);
147 static void r600_flush(struct pipe_context
*ctx
, unsigned flags
)
149 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
150 struct pipe_query
*render_cond
= NULL
;
151 unsigned render_cond_mode
= 0;
152 boolean render_cond_cond
= FALSE
;
154 if (rctx
->b
.rings
.gfx
.cs
->cdw
== rctx
->initial_gfx_cs_size
)
157 rctx
->b
.rings
.gfx
.flushing
= true;
158 /* Disable render condition. */
159 if (rctx
->current_render_cond
) {
160 render_cond
= rctx
->current_render_cond
;
161 render_cond_cond
= rctx
->current_render_cond_cond
;
162 render_cond_mode
= rctx
->current_render_cond_mode
;
163 ctx
->render_condition(ctx
, NULL
, FALSE
, 0);
166 r600_context_flush(rctx
, flags
);
167 rctx
->b
.rings
.gfx
.flushing
= false;
168 r600_begin_new_cs(rctx
);
170 /* Re-enable render condition. */
172 ctx
->render_condition(ctx
, render_cond
, render_cond_cond
, render_cond_mode
);
175 rctx
->initial_gfx_cs_size
= rctx
->b
.rings
.gfx
.cs
->cdw
;
178 static void r600_flush_from_st(struct pipe_context
*ctx
,
179 struct pipe_fence_handle
**fence
,
182 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
183 struct r600_fence
**rfence
= (struct r600_fence
**)fence
;
186 fflags
= flags
& PIPE_FLUSH_END_OF_FRAME
? RADEON_FLUSH_END_OF_FRAME
: 0;
188 *rfence
= r600_create_fence(rctx
);
190 /* flush gfx & dma ring, order does not matter as only one can be live */
191 if (rctx
->b
.rings
.dma
.cs
) {
192 rctx
->b
.rings
.dma
.flush(rctx
, fflags
);
194 rctx
->b
.rings
.gfx
.flush(rctx
, fflags
);
197 static void r600_flush_gfx_ring(void *ctx
, unsigned flags
)
199 r600_flush((struct pipe_context
*)ctx
, flags
);
202 static void r600_flush_dma_ring(void *ctx
, unsigned flags
)
204 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
205 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.dma
.cs
;
211 rctx
->b
.rings
.dma
.flushing
= true;
212 rctx
->b
.ws
->cs_flush(cs
, flags
, 0);
213 rctx
->b
.rings
.dma
.flushing
= false;
216 static void r600_flush_from_winsys(void *ctx
, unsigned flags
)
218 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
220 rctx
->b
.rings
.gfx
.flush(rctx
, flags
);
223 static void r600_flush_dma_from_winsys(void *ctx
, unsigned flags
)
225 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
227 rctx
->b
.rings
.dma
.flush(rctx
, flags
);
230 static void r600_destroy_context(struct pipe_context
*context
)
232 struct r600_context
*rctx
= (struct r600_context
*)context
;
234 r600_isa_destroy(rctx
->isa
);
236 r600_sb_context_destroy(rctx
->sb_context
);
238 pipe_resource_reference((struct pipe_resource
**)&rctx
->dummy_cmask
, NULL
);
239 pipe_resource_reference((struct pipe_resource
**)&rctx
->dummy_fmask
, NULL
);
241 if (rctx
->dummy_pixel_shader
) {
242 rctx
->b
.b
.delete_fs_state(&rctx
->b
.b
, rctx
->dummy_pixel_shader
);
244 if (rctx
->custom_dsa_flush
) {
245 rctx
->b
.b
.delete_depth_stencil_alpha_state(&rctx
->b
.b
, rctx
->custom_dsa_flush
);
247 if (rctx
->custom_blend_resolve
) {
248 rctx
->b
.b
.delete_blend_state(&rctx
->b
.b
, rctx
->custom_blend_resolve
);
250 if (rctx
->custom_blend_decompress
) {
251 rctx
->b
.b
.delete_blend_state(&rctx
->b
.b
, rctx
->custom_blend_decompress
);
253 if (rctx
->custom_blend_fastclear
) {
254 rctx
->b
.b
.delete_blend_state(&rctx
->b
.b
, rctx
->custom_blend_fastclear
);
256 util_unreference_framebuffer_state(&rctx
->framebuffer
.state
);
259 util_blitter_destroy(rctx
->blitter
);
261 if (rctx
->uploader
) {
262 u_upload_destroy(rctx
->uploader
);
264 if (rctx
->allocator_fetch_shader
) {
265 u_suballocator_destroy(rctx
->allocator_fetch_shader
);
267 util_slab_destroy(&rctx
->pool_transfers
);
269 r600_release_command_buffer(&rctx
->start_cs_cmd
);
271 if (rctx
->b
.rings
.gfx
.cs
) {
272 rctx
->b
.ws
->cs_destroy(rctx
->b
.rings
.gfx
.cs
);
274 if (rctx
->b
.rings
.dma
.cs
) {
275 rctx
->b
.ws
->cs_destroy(rctx
->b
.rings
.dma
.cs
);
278 r600_common_context_cleanup(&rctx
->b
);
282 static struct pipe_context
*r600_create_context(struct pipe_screen
*screen
, void *priv
)
284 struct r600_context
*rctx
= CALLOC_STRUCT(r600_context
);
285 struct r600_screen
* rscreen
= (struct r600_screen
*)screen
;
290 util_slab_create(&rctx
->pool_transfers
,
291 sizeof(struct r600_transfer
), 64,
292 UTIL_SLAB_SINGLETHREADED
);
294 rctx
->b
.b
.screen
= screen
;
295 rctx
->b
.b
.priv
= priv
;
296 rctx
->b
.b
.destroy
= r600_destroy_context
;
297 rctx
->b
.b
.flush
= r600_flush_from_st
;
299 if (!r600_common_context_init(&rctx
->b
, &rscreen
->b
))
302 rctx
->screen
= rscreen
;
303 rctx
->keep_tiling_flags
= rscreen
->b
.info
.drm_minor
>= 12;
305 LIST_INITHEAD(&rctx
->active_nontimer_queries
);
307 r600_init_blit_functions(rctx
);
308 r600_init_query_functions(rctx
);
309 r600_init_context_resource_functions(rctx
);
311 if (rscreen
->b
.info
.has_uvd
) {
312 rctx
->b
.b
.create_video_codec
= r600_uvd_create_decoder
;
313 rctx
->b
.b
.create_video_buffer
= r600_video_buffer_create
;
315 rctx
->b
.b
.create_video_codec
= vl_create_decoder
;
316 rctx
->b
.b
.create_video_buffer
= vl_video_buffer_create
;
319 r600_init_common_state_functions(rctx
);
321 switch (rctx
->b
.chip_class
) {
324 r600_init_state_functions(rctx
);
325 r600_init_atom_start_cs(rctx
);
327 rctx
->custom_dsa_flush
= r600_create_db_flush_dsa(rctx
);
328 rctx
->custom_blend_resolve
= rctx
->b
.chip_class
== R700
? r700_create_resolve_blend(rctx
)
329 : r600_create_resolve_blend(rctx
);
330 rctx
->custom_blend_decompress
= r600_create_decompress_blend(rctx
);
331 rctx
->has_vertex_cache
= !(rctx
->b
.family
== CHIP_RV610
||
332 rctx
->b
.family
== CHIP_RV620
||
333 rctx
->b
.family
== CHIP_RS780
||
334 rctx
->b
.family
== CHIP_RS880
||
335 rctx
->b
.family
== CHIP_RV710
);
339 evergreen_init_state_functions(rctx
);
340 evergreen_init_atom_start_cs(rctx
);
341 evergreen_init_atom_start_compute_cs(rctx
);
343 rctx
->custom_dsa_flush
= evergreen_create_db_flush_dsa(rctx
);
344 rctx
->custom_blend_resolve
= evergreen_create_resolve_blend(rctx
);
345 rctx
->custom_blend_decompress
= evergreen_create_decompress_blend(rctx
);
346 rctx
->custom_blend_fastclear
= evergreen_create_fastclear_blend(rctx
);
347 rctx
->has_vertex_cache
= !(rctx
->b
.family
== CHIP_CEDAR
||
348 rctx
->b
.family
== CHIP_PALM
||
349 rctx
->b
.family
== CHIP_SUMO
||
350 rctx
->b
.family
== CHIP_SUMO2
||
351 rctx
->b
.family
== CHIP_CAICOS
||
352 rctx
->b
.family
== CHIP_CAYMAN
||
353 rctx
->b
.family
== CHIP_ARUBA
);
356 R600_ERR("Unsupported chip class %d.\n", rctx
->b
.chip_class
);
360 if (rscreen
->trace_bo
) {
361 rctx
->b
.rings
.gfx
.cs
= rctx
->b
.ws
->cs_create(rctx
->b
.ws
, RING_GFX
, rscreen
->trace_bo
->cs_buf
);
363 rctx
->b
.rings
.gfx
.cs
= rctx
->b
.ws
->cs_create(rctx
->b
.ws
, RING_GFX
, NULL
);
365 rctx
->b
.rings
.gfx
.flush
= r600_flush_gfx_ring
;
366 rctx
->b
.ws
->cs_set_flush_callback(rctx
->b
.rings
.gfx
.cs
, r600_flush_from_winsys
, rctx
);
367 rctx
->b
.rings
.gfx
.flushing
= false;
369 rctx
->b
.rings
.dma
.cs
= NULL
;
370 if (rscreen
->b
.info
.r600_has_dma
&& !(rscreen
->b
.debug_flags
& DBG_NO_ASYNC_DMA
)) {
371 rctx
->b
.rings
.dma
.cs
= rctx
->b
.ws
->cs_create(rctx
->b
.ws
, RING_DMA
, NULL
);
372 rctx
->b
.rings
.dma
.flush
= r600_flush_dma_ring
;
373 rctx
->b
.ws
->cs_set_flush_callback(rctx
->b
.rings
.dma
.cs
, r600_flush_dma_from_winsys
, rctx
);
374 rctx
->b
.rings
.dma
.flushing
= false;
377 rctx
->uploader
= u_upload_create(&rctx
->b
.b
, 1024 * 1024, 256,
378 PIPE_BIND_INDEX_BUFFER
|
379 PIPE_BIND_CONSTANT_BUFFER
);
383 rctx
->allocator_fetch_shader
= u_suballocator_create(&rctx
->b
.b
, 64 * 1024, 256,
384 0, PIPE_USAGE_STATIC
, FALSE
);
385 if (!rctx
->allocator_fetch_shader
)
388 rctx
->isa
= calloc(1, sizeof(struct r600_isa
));
389 if (!rctx
->isa
|| r600_isa_init(rctx
, rctx
->isa
))
392 rctx
->blitter
= util_blitter_create(&rctx
->b
.b
);
393 if (rctx
->blitter
== NULL
)
395 util_blitter_set_texture_multisample(rctx
->blitter
, rscreen
->has_msaa
);
396 rctx
->blitter
->draw_rectangle
= r600_draw_rectangle
;
398 r600_begin_new_cs(rctx
);
399 r600_get_backend_mask(rctx
); /* this emits commands and must be last */
401 rctx
->dummy_pixel_shader
=
402 util_make_fragment_cloneinput_shader(&rctx
->b
.b
, 0,
403 TGSI_SEMANTIC_GENERIC
,
404 TGSI_INTERPOLATE_CONSTANT
);
405 rctx
->b
.b
.bind_fs_state(&rctx
->b
.b
, rctx
->dummy_pixel_shader
);
410 r600_destroy_context(&rctx
->b
.b
);
417 static const char* r600_get_vendor(struct pipe_screen
* pscreen
)
422 static const char *r600_get_family_name(enum radeon_family family
)
425 case CHIP_R600
: return "AMD R600";
426 case CHIP_RV610
: return "AMD RV610";
427 case CHIP_RV630
: return "AMD RV630";
428 case CHIP_RV670
: return "AMD RV670";
429 case CHIP_RV620
: return "AMD RV620";
430 case CHIP_RV635
: return "AMD RV635";
431 case CHIP_RS780
: return "AMD RS780";
432 case CHIP_RS880
: return "AMD RS880";
433 case CHIP_RV770
: return "AMD RV770";
434 case CHIP_RV730
: return "AMD RV730";
435 case CHIP_RV710
: return "AMD RV710";
436 case CHIP_RV740
: return "AMD RV740";
437 case CHIP_CEDAR
: return "AMD CEDAR";
438 case CHIP_REDWOOD
: return "AMD REDWOOD";
439 case CHIP_JUNIPER
: return "AMD JUNIPER";
440 case CHIP_CYPRESS
: return "AMD CYPRESS";
441 case CHIP_HEMLOCK
: return "AMD HEMLOCK";
442 case CHIP_PALM
: return "AMD PALM";
443 case CHIP_SUMO
: return "AMD SUMO";
444 case CHIP_SUMO2
: return "AMD SUMO2";
445 case CHIP_BARTS
: return "AMD BARTS";
446 case CHIP_TURKS
: return "AMD TURKS";
447 case CHIP_CAICOS
: return "AMD CAICOS";
448 case CHIP_CAYMAN
: return "AMD CAYMAN";
449 case CHIP_ARUBA
: return "AMD ARUBA";
450 default: return "AMD unknown";
454 static const char* r600_get_name(struct pipe_screen
* pscreen
)
456 struct r600_screen
*rscreen
= (struct r600_screen
*)pscreen
;
458 return r600_get_family_name(rscreen
->b
.family
);
461 static int r600_get_param(struct pipe_screen
* pscreen
, enum pipe_cap param
)
463 struct r600_screen
*rscreen
= (struct r600_screen
*)pscreen
;
464 enum radeon_family family
= rscreen
->b
.family
;
467 /* Supported features (boolean caps). */
468 case PIPE_CAP_NPOT_TEXTURES
:
469 case PIPE_CAP_TWO_SIDED_STENCIL
:
470 case PIPE_CAP_ANISOTROPIC_FILTER
:
471 case PIPE_CAP_POINT_SPRITE
:
472 case PIPE_CAP_OCCLUSION_QUERY
:
473 case PIPE_CAP_TEXTURE_SHADOW_MAP
:
474 case PIPE_CAP_TEXTURE_MIRROR_CLAMP
:
475 case PIPE_CAP_BLEND_EQUATION_SEPARATE
:
476 case PIPE_CAP_TEXTURE_SWIZZLE
:
477 case PIPE_CAP_DEPTH_CLIP_DISABLE
:
478 case PIPE_CAP_SHADER_STENCIL_EXPORT
:
479 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR
:
480 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS
:
481 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
:
482 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
:
484 case PIPE_CAP_SEAMLESS_CUBE_MAP
:
485 case PIPE_CAP_PRIMITIVE_RESTART
:
486 case PIPE_CAP_CONDITIONAL_RENDER
:
487 case PIPE_CAP_TEXTURE_BARRIER
:
488 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED
:
489 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION
:
490 case PIPE_CAP_TGSI_INSTANCEID
:
491 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY
:
492 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY
:
493 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY
:
494 case PIPE_CAP_USER_INDEX_BUFFERS
:
495 case PIPE_CAP_USER_CONSTANT_BUFFERS
:
496 case PIPE_CAP_COMPUTE
:
497 case PIPE_CAP_START_INSTANCE
:
498 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS
:
499 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS
:
500 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER
:
501 case PIPE_CAP_QUERY_PIPELINE_STATISTICS
:
502 case PIPE_CAP_TEXTURE_MULTISAMPLE
:
505 case PIPE_CAP_TGSI_TEXCOORD
:
508 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE
:
509 return MIN2(rscreen
->b
.info
.vram_size
, 0xFFFFFFFF);
511 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT
:
512 return R600_MAP_BUFFER_ALIGNMENT
;
514 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT
:
517 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT
:
520 case PIPE_CAP_GLSL_FEATURE_LEVEL
:
523 /* Supported except the original R600. */
524 case PIPE_CAP_INDEP_BLEND_ENABLE
:
525 case PIPE_CAP_INDEP_BLEND_FUNC
:
526 /* R600 doesn't support per-MRT blends */
527 return family
== CHIP_R600
? 0 : 1;
529 /* Supported on Evergreen. */
530 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE
:
531 case PIPE_CAP_CUBE_MAP_ARRAY
:
532 return family
>= CHIP_CEDAR
? 1 : 0;
534 /* Unsupported features. */
535 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
:
536 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
:
537 case PIPE_CAP_SCALED_RESOLVE
:
538 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS
:
539 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED
:
540 case PIPE_CAP_VERTEX_COLOR_CLAMPED
:
541 case PIPE_CAP_USER_VERTEX_BUFFERS
:
545 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS
:
546 return rscreen
->has_streamout
? 4 : 0;
547 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME
:
548 return rscreen
->has_streamout
? 1 : 0;
549 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS
:
550 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS
:
554 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS
:
555 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS
:
556 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS
:
557 if (family
>= CHIP_CEDAR
)
561 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS
:
562 return rscreen
->b
.info
.drm_minor
>= 9 ?
563 (family
>= CHIP_CEDAR
? 16384 : 8192) : 0;
564 case PIPE_CAP_MAX_COMBINED_SAMPLERS
:
567 /* Render targets. */
568 case PIPE_CAP_MAX_RENDER_TARGETS
:
569 /* XXX some r6xx are buggy and can only do 4 */
572 case PIPE_CAP_MAX_VIEWPORTS
:
575 /* Timer queries, present when the clock frequency is non zero. */
576 case PIPE_CAP_QUERY_TIME_ELAPSED
:
577 return rscreen
->b
.info
.r600_clock_crystal_freq
!= 0;
578 case PIPE_CAP_QUERY_TIMESTAMP
:
579 return rscreen
->b
.info
.drm_minor
>= 20 &&
580 rscreen
->b
.info
.r600_clock_crystal_freq
!= 0;
582 case PIPE_CAP_MIN_TEXEL_OFFSET
:
585 case PIPE_CAP_MAX_TEXEL_OFFSET
:
588 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK
:
589 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600
;
590 case PIPE_CAP_ENDIANNESS
:
591 return PIPE_ENDIAN_LITTLE
;
596 static float r600_get_paramf(struct pipe_screen
* pscreen
,
597 enum pipe_capf param
)
599 struct r600_screen
*rscreen
= (struct r600_screen
*)pscreen
;
600 enum radeon_family family
= rscreen
->b
.family
;
603 case PIPE_CAPF_MAX_LINE_WIDTH
:
604 case PIPE_CAPF_MAX_LINE_WIDTH_AA
:
605 case PIPE_CAPF_MAX_POINT_WIDTH
:
606 case PIPE_CAPF_MAX_POINT_WIDTH_AA
:
607 if (family
>= CHIP_CEDAR
)
611 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY
:
613 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS
:
615 case PIPE_CAPF_GUARD_BAND_LEFT
:
616 case PIPE_CAPF_GUARD_BAND_TOP
:
617 case PIPE_CAPF_GUARD_BAND_RIGHT
:
618 case PIPE_CAPF_GUARD_BAND_BOTTOM
:
624 static int r600_get_shader_param(struct pipe_screen
* pscreen
, unsigned shader
, enum pipe_shader_cap param
)
628 case PIPE_SHADER_FRAGMENT
:
629 case PIPE_SHADER_VERTEX
:
630 case PIPE_SHADER_COMPUTE
:
632 case PIPE_SHADER_GEOMETRY
:
633 /* XXX: support and enable geometry programs */
636 /* XXX: support tessellation on Evergreen */
641 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS
:
642 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS
:
643 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS
:
644 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS
:
646 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH
:
648 case PIPE_SHADER_CAP_MAX_INPUTS
:
650 case PIPE_SHADER_CAP_MAX_TEMPS
:
651 return 256; /* Max native temporaries. */
652 case PIPE_SHADER_CAP_MAX_ADDRS
:
653 /* XXX Isn't this equal to TEMPS? */
654 return 1; /* Max native address registers */
655 case PIPE_SHADER_CAP_MAX_CONSTS
:
656 return R600_MAX_CONST_BUFFER_SIZE
;
657 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS
:
658 return R600_MAX_USER_CONST_BUFFERS
;
659 case PIPE_SHADER_CAP_MAX_PREDS
:
660 return 0; /* nothing uses this */
661 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED
:
663 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED
:
665 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR
:
666 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR
:
667 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR
:
668 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR
:
670 case PIPE_SHADER_CAP_SUBROUTINES
:
672 case PIPE_SHADER_CAP_INTEGERS
:
674 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS
:
676 case PIPE_SHADER_CAP_PREFERRED_IR
:
677 if (shader
== PIPE_SHADER_COMPUTE
) {
678 return PIPE_SHADER_IR_LLVM
;
680 return PIPE_SHADER_IR_TGSI
;
686 static int r600_get_video_param(struct pipe_screen
*screen
,
687 enum pipe_video_profile profile
,
688 enum pipe_video_entrypoint entrypoint
,
689 enum pipe_video_cap param
)
692 case PIPE_VIDEO_CAP_SUPPORTED
:
693 return vl_profile_supported(screen
, profile
, entrypoint
);
694 case PIPE_VIDEO_CAP_NPOT_TEXTURES
:
696 case PIPE_VIDEO_CAP_MAX_WIDTH
:
697 case PIPE_VIDEO_CAP_MAX_HEIGHT
:
698 return vl_video_buffer_max_size(screen
);
699 case PIPE_VIDEO_CAP_PREFERED_FORMAT
:
700 return PIPE_FORMAT_NV12
;
701 case PIPE_VIDEO_CAP_PREFERS_INTERLACED
:
703 case PIPE_VIDEO_CAP_SUPPORTS_INTERLACED
:
705 case PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE
:
707 case PIPE_VIDEO_CAP_MAX_LEVEL
:
708 return vl_level_supported(screen
, profile
);
714 const char * r600_llvm_gpu_string(enum radeon_family family
)
716 const char * gpu_family
;
729 gpu_family
= "rs880";
732 gpu_family
= "rv710";
735 gpu_family
= "rv730";
739 gpu_family
= "rv770";
743 gpu_family
= "cedar";
750 gpu_family
= "redwood";
753 gpu_family
= "juniper";
757 gpu_family
= "cypress";
760 gpu_family
= "barts";
763 gpu_family
= "turks";
766 gpu_family
= "caicos";
770 gpu_family
= "cayman";
774 fprintf(stderr
, "Chip not supported by r600 llvm "
775 "backend, please file a bug at " PACKAGE_BUGREPORT
"\n");
782 static int r600_get_compute_param(struct pipe_screen
*screen
,
783 enum pipe_compute_cap param
,
786 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
787 //TODO: select these params by asic
789 case PIPE_COMPUTE_CAP_IR_TARGET
: {
790 const char *gpu
= r600_llvm_gpu_string(rscreen
->b
.family
);
792 sprintf(ret
, "%s-r600--", gpu
);
794 return (8 + strlen(gpu
)) * sizeof(char);
796 case PIPE_COMPUTE_CAP_GRID_DIMENSION
:
798 uint64_t * grid_dimension
= ret
;
799 grid_dimension
[0] = 3;
801 return 1 * sizeof(uint64_t);
803 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE
:
805 uint64_t * grid_size
= ret
;
806 grid_size
[0] = 65535;
807 grid_size
[1] = 65535;
810 return 3 * sizeof(uint64_t) ;
812 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE
:
814 uint64_t * block_size
= ret
;
819 return 3 * sizeof(uint64_t);
821 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
:
823 uint64_t * max_threads_per_block
= ret
;
824 *max_threads_per_block
= 256;
826 return sizeof(uint64_t);
828 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE
:
830 uint64_t * max_global_size
= ret
;
831 /* XXX: This is what the proprietary driver reports, we
832 * may want to use a different value. */
833 *max_global_size
= 201326592;
835 return sizeof(uint64_t);
837 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE
:
839 uint64_t * max_input_size
= ret
;
840 *max_input_size
= 1024;
842 return sizeof(uint64_t);
844 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE
:
846 uint64_t * max_local_size
= ret
;
847 /* XXX: This is what the proprietary driver reports, we
848 * may want to use a different value. */
849 *max_local_size
= 32768;
851 return sizeof(uint64_t);
853 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE
:
855 uint64_t max_global_size
;
856 uint64_t * max_mem_alloc_size
= ret
;
857 r600_get_compute_param(screen
,
858 PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE
,
860 /* OpenCL requres this value be at least
861 * max(MAX_GLOBAL_SIZE / 4, 128 * 1024 *1024)
862 * I'm really not sure what value to report here, but
863 * MAX_GLOBAL_SIZE / 4 seems resonable.
865 *max_mem_alloc_size
= max_global_size
/ 4;
867 return sizeof(uint64_t);
870 fprintf(stderr
, "unknown PIPE_COMPUTE_CAP %d\n", param
);
875 static void r600_destroy_screen(struct pipe_screen
* pscreen
)
877 struct r600_screen
*rscreen
= (struct r600_screen
*)pscreen
;
882 if (!radeon_winsys_unref(rscreen
->b
.ws
))
885 r600_common_screen_cleanup(&rscreen
->b
);
887 if (rscreen
->global_pool
) {
888 compute_memory_pool_delete(rscreen
->global_pool
);
891 if (rscreen
->fences
.bo
) {
892 struct r600_fence_block
*entry
, *tmp
;
894 LIST_FOR_EACH_ENTRY_SAFE(entry
, tmp
, &rscreen
->fences
.blocks
, head
) {
895 LIST_DEL(&entry
->head
);
899 rscreen
->b
.ws
->buffer_unmap(rscreen
->fences
.bo
->cs_buf
);
900 pipe_resource_reference((struct pipe_resource
**)&rscreen
->fences
.bo
, NULL
);
902 if (rscreen
->trace_bo
) {
903 rscreen
->b
.ws
->buffer_unmap(rscreen
->trace_bo
->cs_buf
);
904 pipe_resource_reference((struct pipe_resource
**)&rscreen
->trace_bo
, NULL
);
906 pipe_mutex_destroy(rscreen
->fences
.mutex
);
908 rscreen
->b
.ws
->destroy(rscreen
->b
.ws
);
912 static void r600_fence_reference(struct pipe_screen
*pscreen
,
913 struct pipe_fence_handle
**ptr
,
914 struct pipe_fence_handle
*fence
)
916 struct r600_fence
**oldf
= (struct r600_fence
**)ptr
;
917 struct r600_fence
*newf
= (struct r600_fence
*)fence
;
919 if (pipe_reference(&(*oldf
)->reference
, &newf
->reference
)) {
920 struct r600_screen
*rscreen
= (struct r600_screen
*)pscreen
;
921 pipe_mutex_lock(rscreen
->fences
.mutex
);
922 pipe_resource_reference((struct pipe_resource
**)&(*oldf
)->sleep_bo
, NULL
);
923 LIST_ADDTAIL(&(*oldf
)->head
, &rscreen
->fences
.pool
);
924 pipe_mutex_unlock(rscreen
->fences
.mutex
);
930 static boolean
r600_fence_signalled(struct pipe_screen
*pscreen
,
931 struct pipe_fence_handle
*fence
)
933 struct r600_screen
*rscreen
= (struct r600_screen
*)pscreen
;
934 struct r600_fence
*rfence
= (struct r600_fence
*)fence
;
936 return rscreen
->fences
.data
[rfence
->index
] != 0;
939 static boolean
r600_fence_finish(struct pipe_screen
*pscreen
,
940 struct pipe_fence_handle
*fence
,
943 struct r600_screen
*rscreen
= (struct r600_screen
*)pscreen
;
944 struct r600_fence
*rfence
= (struct r600_fence
*)fence
;
945 int64_t start_time
= 0;
948 if (timeout
!= PIPE_TIMEOUT_INFINITE
) {
949 start_time
= os_time_get();
951 /* Convert to microseconds. */
955 while (rscreen
->fences
.data
[rfence
->index
] == 0) {
956 /* Special-case infinite timeout - wait for the dummy BO to become idle */
957 if (timeout
== PIPE_TIMEOUT_INFINITE
) {
958 rscreen
->b
.ws
->buffer_wait(rfence
->sleep_bo
->buf
, RADEON_USAGE_READWRITE
);
962 /* The dummy BO will be busy until the CS including the fence has completed, or
963 * the GPU is reset. Don't bother continuing to spin when the BO is idle. */
964 if (!rscreen
->b
.ws
->buffer_is_busy(rfence
->sleep_bo
->buf
, RADEON_USAGE_READWRITE
))
974 if (timeout
!= PIPE_TIMEOUT_INFINITE
&&
975 os_time_get() - start_time
>= timeout
) {
980 return rscreen
->fences
.data
[rfence
->index
] != 0;
983 static uint64_t r600_get_timestamp(struct pipe_screen
*screen
)
985 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
987 return 1000000 * rscreen
->b
.ws
->query_value(rscreen
->b
.ws
, RADEON_TIMESTAMP
) /
988 rscreen
->b
.info
.r600_clock_crystal_freq
;
991 static int r600_get_driver_query_info(struct pipe_screen
*screen
,
993 struct pipe_driver_query_info
*info
)
995 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
996 struct pipe_driver_query_info list
[] = {
997 {"draw-calls", R600_QUERY_DRAW_CALLS
, 0},
998 {"requested-VRAM", R600_QUERY_REQUESTED_VRAM
, rscreen
->b
.info
.vram_size
, TRUE
},
999 {"requested-GTT", R600_QUERY_REQUESTED_GTT
, rscreen
->b
.info
.gart_size
, TRUE
},
1000 {"buffer-wait-time", R600_QUERY_BUFFER_WAIT_TIME
, 0, FALSE
}
1004 return Elements(list
);
1006 if (index
>= Elements(list
))
1009 *info
= list
[index
];
1013 struct pipe_screen
*r600_screen_create(struct radeon_winsys
*ws
)
1015 struct r600_screen
*rscreen
= CALLOC_STRUCT(r600_screen
);
1017 if (rscreen
== NULL
) {
1021 /* Set functions first. */
1022 rscreen
->b
.b
.context_create
= r600_create_context
;
1023 rscreen
->b
.b
.destroy
= r600_destroy_screen
;
1024 rscreen
->b
.b
.get_name
= r600_get_name
;
1025 rscreen
->b
.b
.get_vendor
= r600_get_vendor
;
1026 rscreen
->b
.b
.get_param
= r600_get_param
;
1027 rscreen
->b
.b
.get_shader_param
= r600_get_shader_param
;
1028 rscreen
->b
.b
.get_paramf
= r600_get_paramf
;
1029 rscreen
->b
.b
.get_compute_param
= r600_get_compute_param
;
1030 rscreen
->b
.b
.get_timestamp
= r600_get_timestamp
;
1031 if (rscreen
->b
.chip_class
>= EVERGREEN
) {
1032 rscreen
->b
.b
.is_format_supported
= evergreen_is_format_supported
;
1034 rscreen
->b
.b
.is_format_supported
= r600_is_format_supported
;
1036 rscreen
->b
.b
.fence_reference
= r600_fence_reference
;
1037 rscreen
->b
.b
.fence_signalled
= r600_fence_signalled
;
1038 rscreen
->b
.b
.fence_finish
= r600_fence_finish
;
1039 rscreen
->b
.b
.get_driver_query_info
= r600_get_driver_query_info
;
1040 if (rscreen
->b
.info
.has_uvd
) {
1041 rscreen
->b
.b
.get_video_param
= ruvd_get_video_param
;
1042 rscreen
->b
.b
.is_video_format_supported
= ruvd_is_format_supported
;
1044 rscreen
->b
.b
.get_video_param
= r600_get_video_param
;
1045 rscreen
->b
.b
.is_video_format_supported
= vl_video_buffer_is_format_supported
;
1047 r600_init_screen_resource_functions(&rscreen
->b
.b
);
1049 if (!r600_common_screen_init(&rscreen
->b
, ws
)) {
1054 rscreen
->b
.debug_flags
|= debug_get_flags_option("R600_DEBUG", r600_debug_options
, 0);
1055 if (debug_get_bool_option("R600_DEBUG_COMPUTE", FALSE
))
1056 rscreen
->b
.debug_flags
|= DBG_COMPUTE
;
1057 if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE
))
1058 rscreen
->b
.debug_flags
|= DBG_FS
| DBG_VS
| DBG_GS
| DBG_PS
| DBG_CS
;
1059 if (!debug_get_bool_option("R600_HYPERZ", TRUE
))
1060 rscreen
->b
.debug_flags
|= DBG_NO_HYPERZ
;
1061 if (!debug_get_bool_option("R600_LLVM", TRUE
))
1062 rscreen
->b
.debug_flags
|= DBG_NO_LLVM
;
1064 if (rscreen
->b
.family
== CHIP_UNKNOWN
) {
1065 fprintf(stderr
, "r600: Unknown chipset 0x%04X\n", rscreen
->b
.info
.pci_id
);
1070 /* Figure out streamout kernel support. */
1071 switch (rscreen
->b
.chip_class
) {
1073 if (rscreen
->b
.family
< CHIP_RS780
) {
1074 rscreen
->has_streamout
= rscreen
->b
.info
.drm_minor
>= 14;
1076 rscreen
->has_streamout
= rscreen
->b
.info
.drm_minor
>= 23;
1080 rscreen
->has_streamout
= rscreen
->b
.info
.drm_minor
>= 17;
1084 rscreen
->has_streamout
= rscreen
->b
.info
.drm_minor
>= 14;
1087 rscreen
->has_streamout
= FALSE
;
1092 switch (rscreen
->b
.chip_class
) {
1095 rscreen
->has_msaa
= rscreen
->b
.info
.drm_minor
>= 22;
1096 rscreen
->has_compressed_msaa_texturing
= false;
1099 rscreen
->has_msaa
= rscreen
->b
.info
.drm_minor
>= 19;
1100 rscreen
->has_compressed_msaa_texturing
= rscreen
->b
.info
.drm_minor
>= 24;
1103 rscreen
->has_msaa
= rscreen
->b
.info
.drm_minor
>= 19;
1104 rscreen
->has_compressed_msaa_texturing
= true;
1107 rscreen
->has_msaa
= FALSE
;
1108 rscreen
->has_compressed_msaa_texturing
= false;
1111 rscreen
->has_cp_dma
= rscreen
->b
.info
.drm_minor
>= 27 &&
1112 !(rscreen
->b
.debug_flags
& DBG_NO_CP_DMA
);
1114 rscreen
->fences
.bo
= NULL
;
1115 rscreen
->fences
.data
= NULL
;
1116 rscreen
->fences
.next_index
= 0;
1117 LIST_INITHEAD(&rscreen
->fences
.pool
);
1118 LIST_INITHEAD(&rscreen
->fences
.blocks
);
1119 pipe_mutex_init(rscreen
->fences
.mutex
);
1121 rscreen
->global_pool
= compute_memory_pool_new(rscreen
);
1123 rscreen
->cs_count
= 0;
1124 if (rscreen
->b
.info
.drm_minor
>= 28 && (rscreen
->b
.debug_flags
& DBG_TRACE_CS
)) {
1125 rscreen
->trace_bo
= (struct r600_resource
*)pipe_buffer_create(&rscreen
->b
.b
,
1129 if (rscreen
->trace_bo
) {
1130 rscreen
->trace_ptr
= rscreen
->b
.ws
->buffer_map(rscreen
->trace_bo
->cs_buf
, NULL
,
1131 PIPE_TRANSFER_UNSYNCHRONIZED
);
1135 #if 0 /* This is for testing whether aux_context and buffer clearing work correctly. */
1136 struct pipe_resource templ
= {};
1139 templ
.height0
= 2048;
1141 templ
.array_size
= 1;
1142 templ
.target
= PIPE_TEXTURE_2D
;
1143 templ
.format
= PIPE_FORMAT_R8G8B8A8_UNORM
;
1144 templ
.usage
= PIPE_USAGE_STATIC
;
1146 struct r600_resource
*res
= r600_resource(rscreen
->screen
.resource_create(&rscreen
->screen
, &templ
));
1147 unsigned char *map
= ws
->buffer_map(res
->cs_buf
, NULL
, PIPE_TRANSFER_WRITE
);
1149 memset(map
, 0, 256);
1151 r600_screen_clear_buffer(rscreen
, &res
->b
.b
, 4, 4, 0xCC);
1152 r600_screen_clear_buffer(rscreen
, &res
->b
.b
, 8, 4, 0xDD);
1153 r600_screen_clear_buffer(rscreen
, &res
->b
.b
, 12, 4, 0xEE);
1154 r600_screen_clear_buffer(rscreen
, &res
->b
.b
, 20, 4, 0xFF);
1155 r600_screen_clear_buffer(rscreen
, &res
->b
.b
, 32, 20, 0x87);
1157 ws
->buffer_wait(res
->buf
, RADEON_USAGE_WRITE
);
1160 for (i
= 0; i
< 256; i
++) {
1161 printf("%02X", map
[i
]);
1167 return &rscreen
->b
.b
;