2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "util/u_blitter.h"
30 #include "util/u_slab.h"
31 #include "util/u_suballoc.h"
33 #include "r600_llvm.h"
34 #include "r600_public.h"
35 #include "r600_resource.h"
36 #include "evergreen_compute.h"
38 #define R600_NUM_ATOMS 37
40 #define R600_TRACE_CS 0
42 #define R600_MAX_USER_CONST_BUFFERS 13
43 #define R600_MAX_DRIVER_CONST_BUFFERS 2
44 #define R600_MAX_CONST_BUFFERS (R600_MAX_USER_CONST_BUFFERS + R600_MAX_DRIVER_CONST_BUFFERS)
46 /* start driver buffers after user buffers */
47 #define R600_UCP_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS)
48 #define R600_TXQ_CONST_BUFFER (R600_MAX_USER_CONST_BUFFERS + 1)
50 #define R600_MAX_CONST_BUFFER_SIZE 4096
52 #ifdef PIPE_ARCH_BIG_ENDIAN
53 #define R600_BIG_ENDIAN 1
55 #define R600_BIG_ENDIAN 0
58 #define R600_MAP_BUFFER_ALIGNMENT 64
61 struct r600_shader_key
;
63 /* This encapsulates a state or an operation which can emitted into the GPU
64 * command stream. It's not limited to states only, it can be used for anything
65 * that wants to write commands into the CS (e.g. cache flushes). */
67 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
);
73 /* This is an atom containing GPU commands that never change.
74 * This is supposed to be copied directly into the CS. */
75 struct r600_command_buffer
{
82 struct r600_db_state
{
83 struct r600_atom atom
;
84 struct r600_surface
*rsurf
;
87 struct r600_db_misc_state
{
88 struct r600_atom atom
;
89 bool occlusion_query_enabled
;
90 bool flush_depthstencil_through_cb
;
91 bool flush_depthstencil_in_place
;
92 bool copy_depth
, copy_stencil
;
95 unsigned db_shader_control
;
99 struct r600_cb_misc_state
{
100 struct r600_atom atom
;
101 unsigned cb_color_control
; /* this comes from blend state */
102 unsigned blend_colormask
; /* 8*4 bits for 8 RGBA colorbuffers */
104 unsigned nr_ps_color_outputs
;
109 struct r600_clip_misc_state
{
110 struct r600_atom atom
;
111 unsigned pa_cl_clip_cntl
; /* from rasterizer */
112 unsigned pa_cl_vs_out_cntl
; /* from vertex shader */
113 unsigned clip_plane_enable
; /* from rasterizer */
114 unsigned clip_dist_write
; /* from vertex shader */
117 struct r600_alphatest_state
{
118 struct r600_atom atom
;
119 unsigned sx_alpha_test_control
; /* this comes from dsa state */
120 unsigned sx_alpha_ref
; /* this comes from dsa state */
122 bool cb0_export_16bpc
; /* from set_framebuffer_state */
125 struct r600_vgt_state
{
126 struct r600_atom atom
;
127 uint32_t vgt_multi_prim_ib_reset_en
;
128 uint32_t vgt_multi_prim_ib_reset_indx
;
131 struct r600_vgt2_state
{
132 struct r600_atom atom
;
133 uint32_t vgt_indx_offset
;
136 struct r600_blend_color
{
137 struct r600_atom atom
;
138 struct pipe_blend_color state
;
141 struct r600_clip_state
{
142 struct r600_atom atom
;
143 struct pipe_clip_state state
;
146 struct r600_cs_shader_state
{
147 struct r600_atom atom
;
148 unsigned kernel_index
;
149 struct r600_pipe_compute
*shader
;
152 struct r600_framebuffer
{
153 struct r600_atom atom
;
154 struct pipe_framebuffer_state state
;
155 unsigned compressed_cb_mask
;
159 bool is_msaa_resolve
;
162 struct r600_sample_mask
{
163 struct r600_atom atom
;
164 uint16_t sample_mask
; /* there are only 8 bits on EG, 16 bits on Cayman */
167 struct r600_config_state
{
168 struct r600_atom atom
;
169 unsigned sq_gpr_resource_mgmt_1
;
172 struct r600_stencil_ref
179 struct r600_stencil_ref_state
{
180 struct r600_atom atom
;
181 struct r600_stencil_ref state
;
182 struct pipe_stencil_ref pipe_state
;
185 struct r600_viewport_state
{
186 struct r600_atom atom
;
187 struct pipe_viewport_state state
;
190 struct compute_memory_pool
;
191 void compute_memory_pool_delete(struct compute_memory_pool
* pool
);
192 struct compute_memory_pool
* compute_memory_pool_new(
193 struct r600_screen
*rscreen
);
195 struct r600_pipe_fences
{
196 struct r600_resource
*bo
;
199 /* linked list of preallocated blocks */
200 struct list_head blocks
;
201 /* linked list of freed fences */
202 struct list_head pool
;
206 enum r600_msaa_texture_mode
{
207 /* If the hw can fetch the first sample only (no decompression available).
208 * This means MSAA texturing is not fully implemented. */
209 MSAA_TEXTURE_SAMPLE_ZERO
,
211 /* If the hw can fetch decompressed MSAA textures.
212 * Supported families: R600, R700, Evergreen.
213 * Cayman cannot use this, because it cannot do the decompression. */
214 MSAA_TEXTURE_DECOMPRESSED
,
216 /* If the hw can fetch compressed MSAA textures, which means shaders can
217 * read resolved FMASK. This yields the best performance.
218 * Supported families: Evergreen, Cayman. */
219 MSAA_TEXTURE_COMPRESSED
223 struct pipe_screen screen
;
224 struct radeon_winsys
*ws
;
226 enum chip_class chip_class
;
227 struct radeon_info info
;
230 enum r600_msaa_texture_mode msaa_texture_support
;
232 struct r600_tiling_info tiling_info
;
233 struct r600_pipe_fences fences
;
235 /*for compute global memory binding, we allocate stuff here, instead of
237 * XXX: Not sure if this is the best place for global_pool. Also,
238 * it's not thread safe, so it won't work with multiple contexts. */
239 struct compute_memory_pool
*global_pool
;
241 struct r600_resource
*trace_bo
;
247 struct r600_pipe_sampler_view
{
248 struct pipe_sampler_view base
;
249 struct r600_resource
*tex_resource
;
250 uint32_t tex_resource_words
[8];
251 bool skip_mip_address_reloc
;
254 struct r600_rasterizer_state
{
255 struct r600_command_buffer buffer
;
258 unsigned sprite_coord_enable
;
259 unsigned clip_plane_enable
;
260 unsigned pa_sc_line_stipple
;
261 unsigned pa_cl_clip_cntl
;
266 bool multisample_enable
;
269 struct r600_poly_offset_state
{
270 struct r600_atom atom
;
271 enum pipe_format zs_format
;
276 struct r600_blend_state
{
277 struct r600_command_buffer buffer
;
278 struct r600_command_buffer buffer_no_blend
;
279 unsigned cb_target_mask
;
280 unsigned cb_color_control
;
281 unsigned cb_color_control_no_blend
;
286 struct r600_dsa_state
{
287 struct r600_command_buffer buffer
;
291 unsigned sx_alpha_test_control
;
294 struct r600_pipe_shader
;
296 struct r600_pipe_shader_selector
{
297 struct r600_pipe_shader
*current
;
299 struct tgsi_token
*tokens
;
300 struct pipe_stream_output_info so
;
302 unsigned num_shaders
;
304 /* PIPE_SHADER_[VERTEX|FRAGMENT|...] */
307 unsigned nr_ps_max_color_exports
;
310 struct r600_pipe_sampler_state
{
311 uint32_t tex_sampler_words
[3];
312 union pipe_color_union border_color
;
313 bool border_color_use
;
314 bool seamless_cube_map
;
317 /* needed for blitter save */
318 #define NUM_TEX_UNITS 16
320 struct r600_seamless_cube_map
{
321 struct r600_atom atom
;
325 struct r600_samplerview_state
{
326 struct r600_atom atom
;
327 struct r600_pipe_sampler_view
*views
[NUM_TEX_UNITS
];
328 uint32_t enabled_mask
;
330 uint32_t compressed_depthtex_mask
; /* which textures are depth */
331 uint32_t compressed_colortex_mask
;
332 boolean dirty_txq_constants
;
335 struct r600_sampler_states
{
336 struct r600_atom atom
;
337 struct r600_pipe_sampler_state
*states
[NUM_TEX_UNITS
];
338 uint32_t enabled_mask
;
340 uint32_t has_bordercolor_mask
; /* which states contain the border color */
343 struct r600_textures_info
{
344 struct r600_samplerview_state views
;
345 struct r600_sampler_states states
;
346 bool is_array_sampler
[NUM_TEX_UNITS
];
348 /* cube array txq workaround */
349 uint32_t *txq_constants
;
353 struct pipe_reference reference
;
354 unsigned index
; /* in the shared bo */
355 struct r600_resource
*sleep_bo
;
356 struct list_head head
;
359 #define FENCE_BLOCK_SIZE 16
361 struct r600_fence_block
{
362 struct r600_fence fences
[FENCE_BLOCK_SIZE
];
363 struct list_head head
;
366 #define R600_CONSTANT_ARRAY_SIZE 256
367 #define R600_RESOURCE_ARRAY_SIZE 160
369 struct r600_constbuf_state
371 struct r600_atom atom
;
372 struct pipe_constant_buffer cb
[PIPE_MAX_CONSTANT_BUFFERS
];
373 uint32_t enabled_mask
;
377 struct r600_vertexbuf_state
379 struct r600_atom atom
;
380 struct pipe_vertex_buffer vb
[PIPE_MAX_ATTRIBS
];
381 uint32_t enabled_mask
; /* non-NULL buffers */
385 /* CSO (constant state object, in other words, immutable state). */
386 struct r600_cso_state
388 struct r600_atom atom
;
389 void *cso
; /* e.g. r600_blend_state */
390 struct r600_command_buffer
*cb
;
393 struct r600_scissor_state
395 struct r600_atom atom
;
396 struct pipe_scissor_state scissor
;
397 bool enable
; /* r6xx only */
400 struct r600_fetch_shader
{
401 struct r600_resource
*buffer
;
405 struct r600_context
{
406 struct pipe_context context
;
407 struct r600_screen
*screen
;
408 struct radeon_winsys
*ws
;
409 struct radeon_winsys_cs
*cs
;
410 struct blitter_context
*blitter
;
411 struct u_upload_mgr
*uploader
;
412 struct u_suballocator
*allocator_so_filled_size
;
413 struct u_suballocator
*allocator_fetch_shader
;
414 struct util_slab_mempool pool_transfers
;
417 enum radeon_family family
;
418 enum chip_class chip_class
;
419 boolean has_vertex_cache
;
420 boolean keep_tiling_flags
;
421 unsigned default_ps_gprs
, default_vs_gprs
;
422 unsigned r6xx_num_clause_temp_gprs
;
423 unsigned backend_mask
;
424 unsigned max_db
; /* for OQ */
426 /* Miscellaneous state objects. */
427 void *custom_dsa_flush
;
428 void *custom_blend_resolve
;
429 void *custom_blend_decompress
;
430 void *custom_blend_fmask_decompress
;
431 /* With rasterizer discard, there doesn't have to be a pixel shader.
432 * In that case, we bind this one: */
433 void *dummy_pixel_shader
;
434 /* These dummy CMASK and FMASK buffers are used to get around the R6xx hardware
435 * bug where valid CMASK and FMASK are required to be present to avoid
436 * a hardlock in certain operations but aren't actually used
437 * for anything useful. */
438 struct r600_resource
*dummy_fmask
;
439 struct r600_resource
*dummy_cmask
;
441 /* State binding slots are here. */
442 struct r600_atom
*atoms
[R600_NUM_ATOMS
];
443 /* States for CS initialization. */
444 struct r600_command_buffer start_cs_cmd
; /* invariant state mostly */
445 /** Compute specific registers initializations. The start_cs_cmd atom
446 * must be emitted before start_compute_cs_cmd. */
447 struct r600_command_buffer start_compute_cs_cmd
;
448 /* Register states. */
449 struct r600_alphatest_state alphatest_state
;
450 struct r600_cso_state blend_state
;
451 struct r600_blend_color blend_color
;
452 struct r600_cb_misc_state cb_misc_state
;
453 struct r600_clip_misc_state clip_misc_state
;
454 struct r600_clip_state clip_state
;
455 struct r600_db_misc_state db_misc_state
;
456 struct r600_db_state db_state
;
457 struct r600_cso_state dsa_state
;
458 struct r600_framebuffer framebuffer
;
459 struct r600_poly_offset_state poly_offset_state
;
460 struct r600_cso_state rasterizer_state
;
461 struct r600_sample_mask sample_mask
;
462 struct r600_scissor_state scissor
;
463 struct r600_seamless_cube_map seamless_cube_map
;
464 struct r600_config_state config_state
;
465 struct r600_stencil_ref_state stencil_ref
;
466 struct r600_vgt_state vgt_state
;
467 struct r600_vgt2_state vgt2_state
;
468 struct r600_viewport_state viewport
;
469 /* Shaders and shader resources. */
470 struct r600_cso_state vertex_fetch_shader
;
471 struct r600_cs_shader_state cs_shader_state
;
472 struct r600_constbuf_state constbuf_state
[PIPE_SHADER_TYPES
];
473 struct r600_textures_info samplers
[PIPE_SHADER_TYPES
];
474 /** Vertex buffers for fetch shaders */
475 struct r600_vertexbuf_state vertex_buffer_state
;
476 /** Vertex buffers for compute shaders */
477 struct r600_vertexbuf_state cs_vertex_buffer_state
;
479 /* Additional context states. */
481 unsigned compute_cb_target_mask
;
482 struct r600_pipe_shader_selector
*ps_shader
;
483 struct r600_pipe_shader_selector
*vs_shader
;
484 struct r600_rasterizer_state
*rasterizer
;
486 bool force_blend_disable
;
487 boolean dual_src_blend
;
490 struct pipe_index_buffer index_buffer
;
492 /* Last draw state (-1 = unset). */
493 int last_primitive_type
; /* Last primitive type used in draw_vbo. */
494 int last_start_instance
;
497 /* The list of active queries. Only one query of each type can be active. */
498 int num_occlusion_queries
;
499 /* Keep track of non-timer queries, because they should be suspended
500 * during context flushing.
501 * The timer queries (TIME_ELAPSED) shouldn't be suspended. */
502 struct list_head active_nontimer_queries
;
503 unsigned num_cs_dw_nontimer_queries_suspend
;
504 /* If queries have been suspended. */
505 bool nontimer_queries_suspended
;
507 /* Render condition. */
508 struct pipe_query
*current_render_cond
;
509 unsigned current_render_cond_mode
;
510 boolean predicate_drawing
;
512 /* Streamout state. */
513 unsigned num_cs_dw_streamout_end
;
514 unsigned num_so_targets
;
515 struct r600_so_target
*so_targets
[PIPE_MAX_SO_BUFFERS
];
516 boolean streamout_start
;
517 unsigned streamout_append_bitmask
;
518 bool streamout_suspended
;
520 /* Deprecated state management. */
521 struct r600_range
*range
;
523 struct r600_block
**blocks
;
524 struct list_head dirty
;
525 struct list_head enable_list
;
526 unsigned pm4_dirty_cdwords
;
529 static INLINE
void r600_emit_command_buffer(struct radeon_winsys_cs
*cs
,
530 struct r600_command_buffer
*cb
)
532 assert(cs
->cdw
+ cb
->num_dw
<= RADEON_MAX_CMDBUF_DWORDS
);
533 memcpy(cs
->buf
+ cs
->cdw
, cb
->buf
, 4 * cb
->num_dw
);
534 cs
->cdw
+= cb
->num_dw
;
538 void r600_trace_emit(struct r600_context
*rctx
);
541 static INLINE
void r600_emit_atom(struct r600_context
*rctx
, struct r600_atom
*atom
)
543 atom
->emit(rctx
, atom
);
546 if (rctx
->screen
->trace_bo
) {
547 r600_trace_emit(rctx
);
552 static INLINE
void r600_set_cso_state(struct r600_cso_state
*state
, void *cso
)
555 state
->atom
.dirty
= cso
!= NULL
;
558 static INLINE
void r600_set_cso_state_with_cb(struct r600_cso_state
*state
, void *cso
,
559 struct r600_command_buffer
*cb
)
562 state
->atom
.num_dw
= cb
->num_dw
;
563 r600_set_cso_state(state
, cso
);
566 /* evergreen_state.c */
567 struct pipe_sampler_view
*
568 evergreen_create_sampler_view_custom(struct pipe_context
*ctx
,
569 struct pipe_resource
*texture
,
570 const struct pipe_sampler_view
*state
,
571 unsigned width0
, unsigned height0
);
572 void evergreen_init_common_regs(struct r600_command_buffer
*cb
,
573 enum chip_class ctx_chip_class
,
574 enum radeon_family ctx_family
,
576 void cayman_init_common_regs(struct r600_command_buffer
*cb
,
577 enum chip_class ctx_chip_class
,
578 enum radeon_family ctx_family
,
581 void evergreen_init_state_functions(struct r600_context
*rctx
);
582 void evergreen_init_atom_start_cs(struct r600_context
*rctx
);
583 void evergreen_pipe_shader_ps(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
);
584 void evergreen_pipe_shader_vs(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
);
585 void *evergreen_create_db_flush_dsa(struct r600_context
*rctx
);
586 void *evergreen_create_resolve_blend(struct r600_context
*rctx
);
587 void *evergreen_create_decompress_blend(struct r600_context
*rctx
);
588 void *evergreen_create_fmask_decompress_blend(struct r600_context
*rctx
);
589 boolean
evergreen_is_format_supported(struct pipe_screen
*screen
,
590 enum pipe_format format
,
591 enum pipe_texture_target target
,
592 unsigned sample_count
,
594 void evergreen_init_color_surface(struct r600_context
*rctx
,
595 struct r600_surface
*surf
);
596 void evergreen_init_color_surface_rat(struct r600_context
*rctx
,
597 struct r600_surface
*surf
);
598 void evergreen_update_db_shader_control(struct r600_context
* rctx
);
601 void r600_copy_buffer(struct pipe_context
*ctx
, struct pipe_resource
*dst
, unsigned dstx
,
602 struct pipe_resource
*src
, const struct pipe_box
*src_box
);
603 void r600_init_blit_functions(struct r600_context
*rctx
);
604 void r600_blit_decompress_depth(struct pipe_context
*ctx
,
605 struct r600_texture
*texture
,
606 struct r600_texture
*staging
,
607 unsigned first_level
, unsigned last_level
,
608 unsigned first_layer
, unsigned last_layer
,
609 unsigned first_sample
, unsigned last_sample
);
610 void r600_decompress_depth_textures(struct r600_context
*rctx
,
611 struct r600_samplerview_state
*textures
);
612 void r600_decompress_color_textures(struct r600_context
*rctx
,
613 struct r600_samplerview_state
*textures
);
616 bool r600_init_resource(struct r600_screen
*rscreen
,
617 struct r600_resource
*res
,
618 unsigned size
, unsigned alignment
,
619 bool use_reusable_pool
, unsigned usage
);
620 struct pipe_resource
*r600_buffer_create(struct pipe_screen
*screen
,
621 const struct pipe_resource
*templ
,
625 void r600_flush(struct pipe_context
*ctx
, struct pipe_fence_handle
**fence
,
629 void r600_init_query_functions(struct r600_context
*rctx
);
630 void r600_suspend_nontimer_queries(struct r600_context
*ctx
);
631 void r600_resume_nontimer_queries(struct r600_context
*ctx
);
633 /* r600_resource.c */
634 void r600_init_context_resource_functions(struct r600_context
*r600
);
637 int r600_pipe_shader_create(struct pipe_context
*ctx
,
638 struct r600_pipe_shader
*shader
,
639 struct r600_shader_key key
);
641 int r600_compute_shader_create(struct pipe_context
* ctx
,
642 LLVMModuleRef mod
, struct r600_bytecode
* bytecode
);
644 void r600_pipe_shader_destroy(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
);
647 struct pipe_sampler_view
*
648 r600_create_sampler_view_custom(struct pipe_context
*ctx
,
649 struct pipe_resource
*texture
,
650 const struct pipe_sampler_view
*state
,
651 unsigned width_first_level
, unsigned height_first_level
);
652 void r600_init_state_functions(struct r600_context
*rctx
);
653 void r600_init_atom_start_cs(struct r600_context
*rctx
);
654 void r600_pipe_shader_ps(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
);
655 void r600_pipe_shader_vs(struct pipe_context
*ctx
, struct r600_pipe_shader
*shader
);
656 void *r600_create_db_flush_dsa(struct r600_context
*rctx
);
657 void *r600_create_resolve_blend(struct r600_context
*rctx
);
658 void *r700_create_resolve_blend(struct r600_context
*rctx
);
659 void *r600_create_decompress_blend(struct r600_context
*rctx
);
660 bool r600_adjust_gprs(struct r600_context
*rctx
);
661 boolean
r600_is_format_supported(struct pipe_screen
*screen
,
662 enum pipe_format format
,
663 enum pipe_texture_target target
,
664 unsigned sample_count
,
666 void r600_update_db_shader_control(struct r600_context
* rctx
);
669 void r600_init_screen_texture_functions(struct pipe_screen
*screen
);
670 void r600_init_surface_functions(struct r600_context
*r600
);
671 uint32_t r600_translate_texformat(struct pipe_screen
*screen
, enum pipe_format format
,
672 const unsigned char *swizzle_view
,
673 uint32_t *word4_p
, uint32_t *yuv_format_p
);
674 unsigned r600_texture_get_offset(struct r600_texture
*rtex
,
675 unsigned level
, unsigned layer
);
676 struct pipe_surface
*r600_create_surface_custom(struct pipe_context
*pipe
,
677 struct pipe_resource
*texture
,
678 const struct pipe_surface
*templ
,
679 unsigned width
, unsigned height
);
681 /* r600_state_common.c */
682 void r600_init_common_state_functions(struct r600_context
*rctx
);
683 void r600_emit_cso_state(struct r600_context
*rctx
, struct r600_atom
*atom
);
684 void r600_emit_alphatest_state(struct r600_context
*rctx
, struct r600_atom
*atom
);
685 void r600_emit_blend_color(struct r600_context
*rctx
, struct r600_atom
*atom
);
686 void r600_emit_vgt_state(struct r600_context
*rctx
, struct r600_atom
*atom
);
687 void r600_emit_vgt2_state(struct r600_context
*rctx
, struct r600_atom
*atom
);
688 void r600_emit_clip_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
);
689 void r600_emit_stencil_ref(struct r600_context
*rctx
, struct r600_atom
*atom
);
690 void r600_emit_viewport_state(struct r600_context
*rctx
, struct r600_atom
*atom
);
691 void r600_init_atom(struct r600_context
*rctx
, struct r600_atom
*atom
, unsigned id
,
692 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
),
694 void r600_vertex_buffers_dirty(struct r600_context
*rctx
);
695 void r600_sampler_views_dirty(struct r600_context
*rctx
,
696 struct r600_samplerview_state
*state
);
697 void r600_sampler_states_dirty(struct r600_context
*rctx
,
698 struct r600_sampler_states
*state
);
699 void r600_constant_buffers_dirty(struct r600_context
*rctx
, struct r600_constbuf_state
*state
);
700 void r600_draw_rectangle(struct blitter_context
*blitter
,
701 int x1
, int y1
, int x2
, int y2
, float depth
,
702 enum blitter_attrib_type type
, const union pipe_color_union
*attrib
);
703 uint32_t r600_translate_stencil_op(int s_op
);
704 uint32_t r600_translate_fill(uint32_t func
);
705 unsigned r600_tex_wrap(unsigned wrap
);
706 unsigned r600_tex_filter(unsigned filter
);
707 unsigned r600_tex_mipfilter(unsigned filter
);
708 unsigned r600_tex_compare(unsigned compare
);
709 bool sampler_state_needs_border_color(const struct pipe_sampler_state
*state
);
712 * Helpers for building command buffers
715 #define PKT3_SET_CONFIG_REG 0x68
716 #define PKT3_SET_CONTEXT_REG 0x69
717 #define PKT3_SET_CTL_CONST 0x6F
718 #define PKT3_SET_LOOP_CONST 0x6C
720 #define R600_CONFIG_REG_OFFSET 0x08000
721 #define R600_CONTEXT_REG_OFFSET 0x28000
722 #define R600_CTL_CONST_OFFSET 0x3CFF0
723 #define R600_LOOP_CONST_OFFSET 0X0003E200
724 #define EG_LOOP_CONST_OFFSET 0x0003A200
726 #define PKT_TYPE_S(x) (((x) & 0x3) << 30)
727 #define PKT_COUNT_S(x) (((x) & 0x3FFF) << 16)
728 #define PKT3_IT_OPCODE_S(x) (((x) & 0xFF) << 8)
729 #define PKT3_PREDICATE(x) (((x) >> 0) & 0x1)
730 #define PKT3(op, count, predicate) (PKT_TYPE_S(3) | PKT_COUNT_S(count) | PKT3_IT_OPCODE_S(op) | PKT3_PREDICATE(predicate))
732 #define RADEON_CP_PACKET3_COMPUTE_MODE 0x00000002
734 /*Evergreen Compute packet3*/
735 #define PKT3C(op, count, predicate) (PKT_TYPE_S(3) | PKT3_IT_OPCODE_S(op) | PKT_COUNT_S(count) | PKT3_PREDICATE(predicate) | RADEON_CP_PACKET3_COMPUTE_MODE)
737 static INLINE
void r600_store_value(struct r600_command_buffer
*cb
, unsigned value
)
739 cb
->buf
[cb
->num_dw
++] = value
;
742 static INLINE
void r600_store_config_reg_seq(struct r600_command_buffer
*cb
, unsigned reg
, unsigned num
)
744 assert(reg
< R600_CONTEXT_REG_OFFSET
);
745 assert(cb
->num_dw
+2+num
<= cb
->max_num_dw
);
746 cb
->buf
[cb
->num_dw
++] = PKT3(PKT3_SET_CONFIG_REG
, num
, 0);
747 cb
->buf
[cb
->num_dw
++] = (reg
- R600_CONFIG_REG_OFFSET
) >> 2;
751 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
754 static INLINE
void r600_store_context_reg_seq(struct r600_command_buffer
*cb
, unsigned reg
, unsigned num
)
756 assert(reg
>= R600_CONTEXT_REG_OFFSET
&& reg
< R600_CTL_CONST_OFFSET
);
757 assert(cb
->num_dw
+2+num
<= cb
->max_num_dw
);
758 cb
->buf
[cb
->num_dw
++] = PKT3(PKT3_SET_CONTEXT_REG
, num
, 0) | cb
->pkt_flags
;
759 cb
->buf
[cb
->num_dw
++] = (reg
- R600_CONTEXT_REG_OFFSET
) >> 2;
763 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
766 static INLINE
void r600_store_ctl_const_seq(struct r600_command_buffer
*cb
, unsigned reg
, unsigned num
)
768 assert(reg
>= R600_CTL_CONST_OFFSET
);
769 assert(cb
->num_dw
+2+num
<= cb
->max_num_dw
);
770 cb
->buf
[cb
->num_dw
++] = PKT3(PKT3_SET_CTL_CONST
, num
, 0) | cb
->pkt_flags
;
771 cb
->buf
[cb
->num_dw
++] = (reg
- R600_CTL_CONST_OFFSET
) >> 2;
774 static INLINE
void r600_store_loop_const_seq(struct r600_command_buffer
*cb
, unsigned reg
, unsigned num
)
776 assert(reg
>= R600_LOOP_CONST_OFFSET
);
777 assert(cb
->num_dw
+2+num
<= cb
->max_num_dw
);
778 cb
->buf
[cb
->num_dw
++] = PKT3(PKT3_SET_LOOP_CONST
, num
, 0);
779 cb
->buf
[cb
->num_dw
++] = (reg
- R600_LOOP_CONST_OFFSET
) >> 2;
783 * Needs cb->pkt_flags set to RADEON_CP_PACKET3_COMPUTE_MODE for compute
786 static INLINE
void eg_store_loop_const_seq(struct r600_command_buffer
*cb
, unsigned reg
, unsigned num
)
788 assert(reg
>= EG_LOOP_CONST_OFFSET
);
789 assert(cb
->num_dw
+2+num
<= cb
->max_num_dw
);
790 cb
->buf
[cb
->num_dw
++] = PKT3(PKT3_SET_LOOP_CONST
, num
, 0) | cb
->pkt_flags
;
791 cb
->buf
[cb
->num_dw
++] = (reg
- EG_LOOP_CONST_OFFSET
) >> 2;
794 static INLINE
void r600_store_config_reg(struct r600_command_buffer
*cb
, unsigned reg
, unsigned value
)
796 r600_store_config_reg_seq(cb
, reg
, 1);
797 r600_store_value(cb
, value
);
800 static INLINE
void r600_store_context_reg(struct r600_command_buffer
*cb
, unsigned reg
, unsigned value
)
802 r600_store_context_reg_seq(cb
, reg
, 1);
803 r600_store_value(cb
, value
);
806 static INLINE
void r600_store_ctl_const(struct r600_command_buffer
*cb
, unsigned reg
, unsigned value
)
808 r600_store_ctl_const_seq(cb
, reg
, 1);
809 r600_store_value(cb
, value
);
812 static INLINE
void r600_store_loop_const(struct r600_command_buffer
*cb
, unsigned reg
, unsigned value
)
814 r600_store_loop_const_seq(cb
, reg
, 1);
815 r600_store_value(cb
, value
);
818 static INLINE
void eg_store_loop_const(struct r600_command_buffer
*cb
, unsigned reg
, unsigned value
)
820 eg_store_loop_const_seq(cb
, reg
, 1);
821 r600_store_value(cb
, value
);
824 void r600_init_command_buffer(struct r600_command_buffer
*cb
, unsigned num_dw
);
825 void r600_release_command_buffer(struct r600_command_buffer
*cb
);
828 * Helpers for emitting state into a command stream directly.
831 static INLINE
unsigned r600_context_bo_reloc(struct r600_context
*ctx
, struct r600_resource
*rbo
,
832 enum radeon_bo_usage usage
)
835 return ctx
->ws
->cs_add_reloc(ctx
->cs
, rbo
->cs_buf
, usage
, rbo
->domains
) * 4;
838 static INLINE
void r600_write_value(struct radeon_winsys_cs
*cs
, unsigned value
)
840 cs
->buf
[cs
->cdw
++] = value
;
843 static INLINE
void r600_write_array(struct radeon_winsys_cs
*cs
, unsigned num
, unsigned *ptr
)
845 assert(cs
->cdw
+num
<= RADEON_MAX_CMDBUF_DWORDS
);
846 memcpy(&cs
->buf
[cs
->cdw
], ptr
, num
* sizeof(ptr
[0]));
850 static INLINE
void r600_write_config_reg_seq(struct radeon_winsys_cs
*cs
, unsigned reg
, unsigned num
)
852 assert(reg
< R600_CONTEXT_REG_OFFSET
);
853 assert(cs
->cdw
+2+num
<= RADEON_MAX_CMDBUF_DWORDS
);
854 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONFIG_REG
, num
, 0);
855 cs
->buf
[cs
->cdw
++] = (reg
- R600_CONFIG_REG_OFFSET
) >> 2;
858 static INLINE
void r600_write_context_reg_seq(struct radeon_winsys_cs
*cs
, unsigned reg
, unsigned num
)
860 assert(reg
>= R600_CONTEXT_REG_OFFSET
&& reg
< R600_CTL_CONST_OFFSET
);
861 assert(cs
->cdw
+2+num
<= RADEON_MAX_CMDBUF_DWORDS
);
862 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CONTEXT_REG
, num
, 0);
863 cs
->buf
[cs
->cdw
++] = (reg
- R600_CONTEXT_REG_OFFSET
) >> 2;
866 static INLINE
void r600_write_compute_context_reg_seq(struct radeon_winsys_cs
*cs
, unsigned reg
, unsigned num
)
868 r600_write_context_reg_seq(cs
, reg
, num
);
869 /* Set the compute bit on the packet header */
870 cs
->buf
[cs
->cdw
- 2] |= RADEON_CP_PACKET3_COMPUTE_MODE
;
873 static INLINE
void r600_write_ctl_const_seq(struct radeon_winsys_cs
*cs
, unsigned reg
, unsigned num
)
875 assert(reg
>= R600_CTL_CONST_OFFSET
);
876 assert(cs
->cdw
+2+num
<= RADEON_MAX_CMDBUF_DWORDS
);
877 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SET_CTL_CONST
, num
, 0);
878 cs
->buf
[cs
->cdw
++] = (reg
- R600_CTL_CONST_OFFSET
) >> 2;
881 static INLINE
void r600_write_config_reg(struct radeon_winsys_cs
*cs
, unsigned reg
, unsigned value
)
883 r600_write_config_reg_seq(cs
, reg
, 1);
884 r600_write_value(cs
, value
);
887 static INLINE
void r600_write_context_reg(struct radeon_winsys_cs
*cs
, unsigned reg
, unsigned value
)
889 r600_write_context_reg_seq(cs
, reg
, 1);
890 r600_write_value(cs
, value
);
893 static INLINE
void r600_write_compute_context_reg(struct radeon_winsys_cs
*cs
, unsigned reg
, unsigned value
)
895 r600_write_compute_context_reg_seq(cs
, reg
, 1);
896 r600_write_value(cs
, value
);
899 static INLINE
void r600_write_ctl_const(struct radeon_winsys_cs
*cs
, unsigned reg
, unsigned value
)
901 r600_write_ctl_const_seq(cs
, reg
, 1);
902 r600_write_value(cs
, value
);
908 static INLINE
uint32_t S_FIXED(float value
, uint32_t frac_bits
)
910 return value
* (1 << frac_bits
);
912 #define ALIGN_DIVUP(x, y) (((x) + (y) - 1) / (y))
914 static inline unsigned r600_tex_aniso_filter(unsigned filter
)
916 if (filter
<= 1) return 0;
917 if (filter
<= 2) return 1;
918 if (filter
<= 4) return 2;
919 if (filter
<= 8) return 3;
923 /* 12.4 fixed-point */
924 static INLINE
unsigned r600_pack_float_12p4(float x
)
927 x
>= 4096 ? 0xffff : x
* 16;
930 static INLINE
uint64_t r600_resource_va(struct pipe_screen
*screen
, struct pipe_resource
*resource
)
932 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
933 struct r600_resource
*rresource
= (struct r600_resource
*)resource
;
935 return rscreen
->ws
->buffer_get_virtual_address(rresource
->cs_buf
);
938 static INLINE
unsigned u_max_layer(struct pipe_resource
*r
, unsigned level
)
941 case PIPE_TEXTURE_CUBE
:
943 case PIPE_TEXTURE_3D
:
944 return u_minify(r
->depth0
, level
) - 1;
945 case PIPE_TEXTURE_1D_ARRAY
:
946 case PIPE_TEXTURE_2D_ARRAY
:
947 case PIPE_TEXTURE_CUBE_ARRAY
:
948 return r
->array_size
- 1;