2 * Copyright 2010 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
27 #include "r600_formats.h"
28 #include "r600_shader.h"
31 #include "util/u_format_s3tc.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/u_math.h"
36 #include "tgsi/tgsi_parse.h"
38 void r600_init_command_buffer(struct r600_command_buffer
*cb
, unsigned num_dw
)
41 cb
->buf
= CALLOC(1, 4 * num_dw
);
42 cb
->max_num_dw
= num_dw
;
45 void r600_release_command_buffer(struct r600_command_buffer
*cb
)
50 void r600_init_atom(struct r600_context
*rctx
,
51 struct r600_atom
*atom
,
53 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
),
56 assert(id
< R600_NUM_ATOMS
);
57 assert(rctx
->atoms
[id
] == NULL
);
58 rctx
->atoms
[id
] = atom
;
59 atom
->emit
= (void*)emit
;
60 atom
->num_dw
= num_dw
;
64 void r600_emit_cso_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
66 r600_emit_command_buffer(rctx
->b
.rings
.gfx
.cs
, ((struct r600_cso_state
*)atom
)->cb
);
69 void r600_emit_alphatest_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
71 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
72 struct r600_alphatest_state
*a
= (struct r600_alphatest_state
*)atom
;
73 unsigned alpha_ref
= a
->sx_alpha_ref
;
75 if (rctx
->b
.chip_class
>= EVERGREEN
&& a
->cb0_export_16bpc
) {
79 r600_write_context_reg(cs
, R_028410_SX_ALPHA_TEST_CONTROL
,
80 a
->sx_alpha_test_control
|
81 S_028410_ALPHA_TEST_BYPASS(a
->bypass
));
82 r600_write_context_reg(cs
, R_028438_SX_ALPHA_REF
, alpha_ref
);
85 static void r600_texture_barrier(struct pipe_context
*ctx
)
87 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
89 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
|
90 R600_CONTEXT_FLUSH_AND_INV_CB
|
91 R600_CONTEXT_FLUSH_AND_INV
|
92 R600_CONTEXT_WAIT_3D_IDLE
;
95 static unsigned r600_conv_pipe_prim(unsigned prim
)
97 static const unsigned prim_conv
[] = {
98 V_008958_DI_PT_POINTLIST
,
99 V_008958_DI_PT_LINELIST
,
100 V_008958_DI_PT_LINELOOP
,
101 V_008958_DI_PT_LINESTRIP
,
102 V_008958_DI_PT_TRILIST
,
103 V_008958_DI_PT_TRISTRIP
,
104 V_008958_DI_PT_TRIFAN
,
105 V_008958_DI_PT_QUADLIST
,
106 V_008958_DI_PT_QUADSTRIP
,
107 V_008958_DI_PT_POLYGON
,
108 V_008958_DI_PT_LINELIST_ADJ
,
109 V_008958_DI_PT_LINESTRIP_ADJ
,
110 V_008958_DI_PT_TRILIST_ADJ
,
111 V_008958_DI_PT_TRISTRIP_ADJ
,
112 V_008958_DI_PT_RECTLIST
114 return prim_conv
[prim
];
117 /* common state between evergreen and r600 */
119 static void r600_bind_blend_state_internal(struct r600_context
*rctx
,
120 struct r600_blend_state
*blend
, bool blend_disable
)
122 unsigned color_control
;
123 bool update_cb
= false;
125 rctx
->alpha_to_one
= blend
->alpha_to_one
;
126 rctx
->dual_src_blend
= blend
->dual_src_blend
;
128 if (!blend_disable
) {
129 r600_set_cso_state_with_cb(&rctx
->blend_state
, blend
, &blend
->buffer
);
130 color_control
= blend
->cb_color_control
;
132 /* Blending is disabled. */
133 r600_set_cso_state_with_cb(&rctx
->blend_state
, blend
, &blend
->buffer_no_blend
);
134 color_control
= blend
->cb_color_control_no_blend
;
137 /* Update derived states. */
138 if (rctx
->cb_misc_state
.blend_colormask
!= blend
->cb_target_mask
) {
139 rctx
->cb_misc_state
.blend_colormask
= blend
->cb_target_mask
;
142 if (rctx
->b
.chip_class
<= R700
&&
143 rctx
->cb_misc_state
.cb_color_control
!= color_control
) {
144 rctx
->cb_misc_state
.cb_color_control
= color_control
;
147 if (rctx
->cb_misc_state
.dual_src_blend
!= blend
->dual_src_blend
) {
148 rctx
->cb_misc_state
.dual_src_blend
= blend
->dual_src_blend
;
152 rctx
->cb_misc_state
.atom
.dirty
= true;
156 static void r600_bind_blend_state(struct pipe_context
*ctx
, void *state
)
158 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
159 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
162 r600_set_cso_state_with_cb(&rctx
->blend_state
, NULL
, NULL
);
166 r600_bind_blend_state_internal(rctx
, blend
, rctx
->force_blend_disable
);
169 static void r600_set_blend_color(struct pipe_context
*ctx
,
170 const struct pipe_blend_color
*state
)
172 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
174 rctx
->blend_color
.state
= *state
;
175 rctx
->blend_color
.atom
.dirty
= true;
178 void r600_emit_blend_color(struct r600_context
*rctx
, struct r600_atom
*atom
)
180 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
181 struct pipe_blend_color
*state
= &rctx
->blend_color
.state
;
183 r600_write_context_reg_seq(cs
, R_028414_CB_BLEND_RED
, 4);
184 radeon_emit(cs
, fui(state
->color
[0])); /* R_028414_CB_BLEND_RED */
185 radeon_emit(cs
, fui(state
->color
[1])); /* R_028418_CB_BLEND_GREEN */
186 radeon_emit(cs
, fui(state
->color
[2])); /* R_02841C_CB_BLEND_BLUE */
187 radeon_emit(cs
, fui(state
->color
[3])); /* R_028420_CB_BLEND_ALPHA */
190 void r600_emit_vgt_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
192 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
193 struct r600_vgt_state
*a
= (struct r600_vgt_state
*)atom
;
195 r600_write_context_reg(cs
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, a
->vgt_multi_prim_ib_reset_en
);
196 r600_write_context_reg_seq(cs
, R_028408_VGT_INDX_OFFSET
, 2);
197 radeon_emit(cs
, a
->vgt_indx_offset
); /* R_028408_VGT_INDX_OFFSET */
198 radeon_emit(cs
, a
->vgt_multi_prim_ib_reset_indx
); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */
201 static void r600_set_clip_state(struct pipe_context
*ctx
,
202 const struct pipe_clip_state
*state
)
204 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
205 struct pipe_constant_buffer cb
;
207 rctx
->clip_state
.state
= *state
;
208 rctx
->clip_state
.atom
.dirty
= true;
211 cb
.user_buffer
= state
->ucp
;
212 cb
.buffer_offset
= 0;
213 cb
.buffer_size
= 4*4*8;
214 ctx
->set_constant_buffer(ctx
, PIPE_SHADER_VERTEX
, R600_UCP_CONST_BUFFER
, &cb
);
215 pipe_resource_reference(&cb
.buffer
, NULL
);
218 static void r600_set_stencil_ref(struct pipe_context
*ctx
,
219 const struct r600_stencil_ref
*state
)
221 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
223 rctx
->stencil_ref
.state
= *state
;
224 rctx
->stencil_ref
.atom
.dirty
= true;
227 void r600_emit_stencil_ref(struct r600_context
*rctx
, struct r600_atom
*atom
)
229 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
230 struct r600_stencil_ref_state
*a
= (struct r600_stencil_ref_state
*)atom
;
232 r600_write_context_reg_seq(cs
, R_028430_DB_STENCILREFMASK
, 2);
233 radeon_emit(cs
, /* R_028430_DB_STENCILREFMASK */
234 S_028430_STENCILREF(a
->state
.ref_value
[0]) |
235 S_028430_STENCILMASK(a
->state
.valuemask
[0]) |
236 S_028430_STENCILWRITEMASK(a
->state
.writemask
[0]));
237 radeon_emit(cs
, /* R_028434_DB_STENCILREFMASK_BF */
238 S_028434_STENCILREF_BF(a
->state
.ref_value
[1]) |
239 S_028434_STENCILMASK_BF(a
->state
.valuemask
[1]) |
240 S_028434_STENCILWRITEMASK_BF(a
->state
.writemask
[1]));
243 static void r600_set_pipe_stencil_ref(struct pipe_context
*ctx
,
244 const struct pipe_stencil_ref
*state
)
246 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
247 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)rctx
->dsa_state
.cso
;
248 struct r600_stencil_ref ref
;
250 rctx
->stencil_ref
.pipe_state
= *state
;
255 ref
.ref_value
[0] = state
->ref_value
[0];
256 ref
.ref_value
[1] = state
->ref_value
[1];
257 ref
.valuemask
[0] = dsa
->valuemask
[0];
258 ref
.valuemask
[1] = dsa
->valuemask
[1];
259 ref
.writemask
[0] = dsa
->writemask
[0];
260 ref
.writemask
[1] = dsa
->writemask
[1];
262 r600_set_stencil_ref(ctx
, &ref
);
265 static void r600_bind_dsa_state(struct pipe_context
*ctx
, void *state
)
267 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
268 struct r600_dsa_state
*dsa
= state
;
269 struct r600_stencil_ref ref
;
272 r600_set_cso_state_with_cb(&rctx
->dsa_state
, NULL
, NULL
);
276 r600_set_cso_state_with_cb(&rctx
->dsa_state
, dsa
, &dsa
->buffer
);
278 ref
.ref_value
[0] = rctx
->stencil_ref
.pipe_state
.ref_value
[0];
279 ref
.ref_value
[1] = rctx
->stencil_ref
.pipe_state
.ref_value
[1];
280 ref
.valuemask
[0] = dsa
->valuemask
[0];
281 ref
.valuemask
[1] = dsa
->valuemask
[1];
282 ref
.writemask
[0] = dsa
->writemask
[0];
283 ref
.writemask
[1] = dsa
->writemask
[1];
284 if (rctx
->zwritemask
!= dsa
->zwritemask
) {
285 rctx
->zwritemask
= dsa
->zwritemask
;
286 if (rctx
->b
.chip_class
>= EVERGREEN
) {
287 /* work around some issue when not writting to zbuffer
288 * we are having lockup on evergreen so do not enable
289 * hyperz when not writting zbuffer
291 rctx
->db_misc_state
.atom
.dirty
= true;
295 r600_set_stencil_ref(ctx
, &ref
);
297 /* Update alphatest state. */
298 if (rctx
->alphatest_state
.sx_alpha_test_control
!= dsa
->sx_alpha_test_control
||
299 rctx
->alphatest_state
.sx_alpha_ref
!= dsa
->alpha_ref
) {
300 rctx
->alphatest_state
.sx_alpha_test_control
= dsa
->sx_alpha_test_control
;
301 rctx
->alphatest_state
.sx_alpha_ref
= dsa
->alpha_ref
;
302 rctx
->alphatest_state
.atom
.dirty
= true;
306 static void r600_bind_rs_state(struct pipe_context
*ctx
, void *state
)
308 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
309 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
314 rctx
->rasterizer
= rs
;
316 r600_set_cso_state_with_cb(&rctx
->rasterizer_state
, rs
, &rs
->buffer
);
318 if (rs
->offset_enable
&&
319 (rs
->offset_units
!= rctx
->poly_offset_state
.offset_units
||
320 rs
->offset_scale
!= rctx
->poly_offset_state
.offset_scale
)) {
321 rctx
->poly_offset_state
.offset_units
= rs
->offset_units
;
322 rctx
->poly_offset_state
.offset_scale
= rs
->offset_scale
;
323 rctx
->poly_offset_state
.atom
.dirty
= true;
326 /* Update clip_misc_state. */
327 if (rctx
->clip_misc_state
.pa_cl_clip_cntl
!= rs
->pa_cl_clip_cntl
||
328 rctx
->clip_misc_state
.clip_plane_enable
!= rs
->clip_plane_enable
) {
329 rctx
->clip_misc_state
.pa_cl_clip_cntl
= rs
->pa_cl_clip_cntl
;
330 rctx
->clip_misc_state
.clip_plane_enable
= rs
->clip_plane_enable
;
331 rctx
->clip_misc_state
.atom
.dirty
= true;
334 /* Workaround for a missing scissor enable on r600. */
335 if (rctx
->b
.chip_class
== R600
&&
336 rs
->scissor_enable
!= rctx
->scissor
[0].enable
) {
337 rctx
->scissor
[0].enable
= rs
->scissor_enable
;
338 rctx
->scissor
[0].atom
.dirty
= true;
341 /* Re-emit PA_SC_LINE_STIPPLE. */
342 rctx
->last_primitive_type
= -1;
345 static void r600_delete_rs_state(struct pipe_context
*ctx
, void *state
)
347 struct r600_rasterizer_state
*rs
= (struct r600_rasterizer_state
*)state
;
349 r600_release_command_buffer(&rs
->buffer
);
353 static void r600_sampler_view_destroy(struct pipe_context
*ctx
,
354 struct pipe_sampler_view
*state
)
356 struct r600_pipe_sampler_view
*view
= (struct r600_pipe_sampler_view
*)state
;
358 if (view
->tex_resource
->gpu_address
&&
359 view
->tex_resource
->b
.b
.target
== PIPE_BUFFER
)
360 LIST_DELINIT(&view
->list
);
362 pipe_resource_reference(&state
->texture
, NULL
);
366 void r600_sampler_states_dirty(struct r600_context
*rctx
,
367 struct r600_sampler_states
*state
)
369 if (state
->dirty_mask
) {
370 if (state
->dirty_mask
& state
->has_bordercolor_mask
) {
371 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
374 util_bitcount(state
->dirty_mask
& state
->has_bordercolor_mask
) * 11 +
375 util_bitcount(state
->dirty_mask
& ~state
->has_bordercolor_mask
) * 5;
376 state
->atom
.dirty
= true;
380 static void r600_bind_sampler_states(struct pipe_context
*pipe
,
383 unsigned count
, void **states
)
385 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
386 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
387 struct r600_pipe_sampler_state
**rstates
= (struct r600_pipe_sampler_state
**)states
;
388 int seamless_cube_map
= -1;
390 /* This sets 1-bit for states with index >= count. */
391 uint32_t disable_mask
= ~((1ull << count
) - 1);
392 /* These are the new states set by this function. */
393 uint32_t new_mask
= 0;
395 assert(start
== 0); /* XXX fix below */
397 if (shader
!= PIPE_SHADER_VERTEX
&&
398 shader
!= PIPE_SHADER_FRAGMENT
) {
402 for (i
= 0; i
< count
; i
++) {
403 struct r600_pipe_sampler_state
*rstate
= rstates
[i
];
405 if (rstate
== dst
->states
.states
[i
]) {
410 if (rstate
->border_color_use
) {
411 dst
->states
.has_bordercolor_mask
|= 1 << i
;
413 dst
->states
.has_bordercolor_mask
&= ~(1 << i
);
415 seamless_cube_map
= rstate
->seamless_cube_map
;
419 disable_mask
|= 1 << i
;
423 memcpy(dst
->states
.states
, rstates
, sizeof(void*) * count
);
424 memset(dst
->states
.states
+ count
, 0, sizeof(void*) * (NUM_TEX_UNITS
- count
));
426 dst
->states
.enabled_mask
&= ~disable_mask
;
427 dst
->states
.dirty_mask
&= dst
->states
.enabled_mask
;
428 dst
->states
.enabled_mask
|= new_mask
;
429 dst
->states
.dirty_mask
|= new_mask
;
430 dst
->states
.has_bordercolor_mask
&= dst
->states
.enabled_mask
;
432 r600_sampler_states_dirty(rctx
, &dst
->states
);
434 /* Seamless cubemap state. */
435 if (rctx
->b
.chip_class
<= R700
&&
436 seamless_cube_map
!= -1 &&
437 seamless_cube_map
!= rctx
->seamless_cube_map
.enabled
) {
438 /* change in TA_CNTL_AUX need a pipeline flush */
439 rctx
->b
.flags
|= R600_CONTEXT_WAIT_3D_IDLE
;
440 rctx
->seamless_cube_map
.enabled
= seamless_cube_map
;
441 rctx
->seamless_cube_map
.atom
.dirty
= true;
445 static void r600_delete_sampler_state(struct pipe_context
*ctx
, void *state
)
450 static void r600_delete_blend_state(struct pipe_context
*ctx
, void *state
)
452 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
453 struct r600_blend_state
*blend
= (struct r600_blend_state
*)state
;
455 if (rctx
->blend_state
.cso
== state
) {
456 ctx
->bind_blend_state(ctx
, NULL
);
459 r600_release_command_buffer(&blend
->buffer
);
460 r600_release_command_buffer(&blend
->buffer_no_blend
);
464 static void r600_delete_dsa_state(struct pipe_context
*ctx
, void *state
)
466 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
467 struct r600_dsa_state
*dsa
= (struct r600_dsa_state
*)state
;
469 if (rctx
->dsa_state
.cso
== state
) {
470 ctx
->bind_depth_stencil_alpha_state(ctx
, NULL
);
473 r600_release_command_buffer(&dsa
->buffer
);
477 static void r600_bind_vertex_elements(struct pipe_context
*ctx
, void *state
)
479 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
481 r600_set_cso_state(&rctx
->vertex_fetch_shader
, state
);
484 static void r600_delete_vertex_elements(struct pipe_context
*ctx
, void *state
)
486 struct r600_fetch_shader
*shader
= (struct r600_fetch_shader
*)state
;
487 pipe_resource_reference((struct pipe_resource
**)&shader
->buffer
, NULL
);
491 static void r600_set_index_buffer(struct pipe_context
*ctx
,
492 const struct pipe_index_buffer
*ib
)
494 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
497 pipe_resource_reference(&rctx
->index_buffer
.buffer
, ib
->buffer
);
498 memcpy(&rctx
->index_buffer
, ib
, sizeof(*ib
));
499 r600_context_add_resource_size(ctx
, ib
->buffer
);
501 pipe_resource_reference(&rctx
->index_buffer
.buffer
, NULL
);
505 void r600_vertex_buffers_dirty(struct r600_context
*rctx
)
507 if (rctx
->vertex_buffer_state
.dirty_mask
) {
508 rctx
->b
.flags
|= R600_CONTEXT_INV_VERTEX_CACHE
;
509 rctx
->vertex_buffer_state
.atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 12 : 11) *
510 util_bitcount(rctx
->vertex_buffer_state
.dirty_mask
);
511 rctx
->vertex_buffer_state
.atom
.dirty
= true;
515 static void r600_set_vertex_buffers(struct pipe_context
*ctx
,
516 unsigned start_slot
, unsigned count
,
517 const struct pipe_vertex_buffer
*input
)
519 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
520 struct r600_vertexbuf_state
*state
= &rctx
->vertex_buffer_state
;
521 struct pipe_vertex_buffer
*vb
= state
->vb
+ start_slot
;
523 uint32_t disable_mask
= 0;
524 /* These are the new buffers set by this function. */
525 uint32_t new_buffer_mask
= 0;
527 /* Set vertex buffers. */
529 for (i
= 0; i
< count
; i
++) {
530 if (memcmp(&input
[i
], &vb
[i
], sizeof(struct pipe_vertex_buffer
))) {
531 if (input
[i
].buffer
) {
532 vb
[i
].stride
= input
[i
].stride
;
533 vb
[i
].buffer_offset
= input
[i
].buffer_offset
;
534 pipe_resource_reference(&vb
[i
].buffer
, input
[i
].buffer
);
535 new_buffer_mask
|= 1 << i
;
536 r600_context_add_resource_size(ctx
, input
[i
].buffer
);
538 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
539 disable_mask
|= 1 << i
;
544 for (i
= 0; i
< count
; i
++) {
545 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
547 disable_mask
= ((1ull << count
) - 1);
550 disable_mask
<<= start_slot
;
551 new_buffer_mask
<<= start_slot
;
553 rctx
->vertex_buffer_state
.enabled_mask
&= ~disable_mask
;
554 rctx
->vertex_buffer_state
.dirty_mask
&= rctx
->vertex_buffer_state
.enabled_mask
;
555 rctx
->vertex_buffer_state
.enabled_mask
|= new_buffer_mask
;
556 rctx
->vertex_buffer_state
.dirty_mask
|= new_buffer_mask
;
558 r600_vertex_buffers_dirty(rctx
);
561 void r600_sampler_views_dirty(struct r600_context
*rctx
,
562 struct r600_samplerview_state
*state
)
564 if (state
->dirty_mask
) {
565 rctx
->b
.flags
|= R600_CONTEXT_INV_TEX_CACHE
;
566 state
->atom
.num_dw
= (rctx
->b
.chip_class
>= EVERGREEN
? 14 : 13) *
567 util_bitcount(state
->dirty_mask
);
568 state
->atom
.dirty
= true;
572 static void r600_set_sampler_views(struct pipe_context
*pipe
, unsigned shader
,
573 unsigned start
, unsigned count
,
574 struct pipe_sampler_view
**views
)
576 struct r600_context
*rctx
= (struct r600_context
*) pipe
;
577 struct r600_textures_info
*dst
= &rctx
->samplers
[shader
];
578 struct r600_pipe_sampler_view
**rviews
= (struct r600_pipe_sampler_view
**)views
;
579 uint32_t dirty_sampler_states_mask
= 0;
581 /* This sets 1-bit for textures with index >= count. */
582 uint32_t disable_mask
= ~((1ull << count
) - 1);
583 /* These are the new textures set by this function. */
584 uint32_t new_mask
= 0;
586 /* Set textures with index >= count to NULL. */
587 uint32_t remaining_mask
;
589 assert(start
== 0); /* XXX fix below */
591 if (shader
== PIPE_SHADER_COMPUTE
) {
592 evergreen_set_cs_sampler_view(pipe
, start
, count
, views
);
596 remaining_mask
= dst
->views
.enabled_mask
& disable_mask
;
598 while (remaining_mask
) {
599 i
= u_bit_scan(&remaining_mask
);
600 assert(dst
->views
.views
[i
]);
602 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
605 for (i
= 0; i
< count
; i
++) {
606 if (rviews
[i
] == dst
->views
.views
[i
]) {
611 struct r600_texture
*rtex
=
612 (struct r600_texture
*)rviews
[i
]->base
.texture
;
614 if (rviews
[i
]->base
.texture
->target
!= PIPE_BUFFER
) {
615 if (rtex
->is_depth
&& !rtex
->is_flushing_texture
) {
616 dst
->views
.compressed_depthtex_mask
|= 1 << i
;
618 dst
->views
.compressed_depthtex_mask
&= ~(1 << i
);
621 /* Track compressed colorbuffers. */
622 if (rtex
->cmask
.size
) {
623 dst
->views
.compressed_colortex_mask
|= 1 << i
;
625 dst
->views
.compressed_colortex_mask
&= ~(1 << i
);
628 /* Changing from array to non-arrays textures and vice versa requires
629 * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */
630 if (rctx
->b
.chip_class
<= R700
&&
631 (dst
->states
.enabled_mask
& (1 << i
)) &&
632 (rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_1D_ARRAY
||
633 rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_2D_ARRAY
) != dst
->is_array_sampler
[i
]) {
634 dirty_sampler_states_mask
|= 1 << i
;
637 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], views
[i
]);
639 r600_context_add_resource_size(pipe
, views
[i
]->texture
);
641 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
642 disable_mask
|= 1 << i
;
646 dst
->views
.enabled_mask
&= ~disable_mask
;
647 dst
->views
.dirty_mask
&= dst
->views
.enabled_mask
;
648 dst
->views
.enabled_mask
|= new_mask
;
649 dst
->views
.dirty_mask
|= new_mask
;
650 dst
->views
.compressed_depthtex_mask
&= dst
->views
.enabled_mask
;
651 dst
->views
.compressed_colortex_mask
&= dst
->views
.enabled_mask
;
652 dst
->views
.dirty_buffer_constants
= TRUE
;
653 r600_sampler_views_dirty(rctx
, &dst
->views
);
655 if (dirty_sampler_states_mask
) {
656 dst
->states
.dirty_mask
|= dirty_sampler_states_mask
;
657 r600_sampler_states_dirty(rctx
, &dst
->states
);
661 static void r600_set_viewport_states(struct pipe_context
*ctx
,
663 unsigned num_viewports
,
664 const struct pipe_viewport_state
*state
)
666 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
669 for (i
= start_slot
; i
< start_slot
+ num_viewports
; i
++) {
670 rctx
->viewport
[i
].state
= state
[i
- start_slot
];
671 rctx
->viewport
[i
].atom
.dirty
= true;
675 void r600_emit_viewport_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
677 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
678 struct r600_viewport_state
*rstate
= (struct r600_viewport_state
*)atom
;
679 struct pipe_viewport_state
*state
= &rstate
->state
;
680 int offset
= rstate
->idx
* 6 * 4;
682 r600_write_context_reg_seq(cs
, R_02843C_PA_CL_VPORT_XSCALE_0
+ offset
, 6);
683 radeon_emit(cs
, fui(state
->scale
[0])); /* R_02843C_PA_CL_VPORT_XSCALE_0 */
684 radeon_emit(cs
, fui(state
->translate
[0])); /* R_028440_PA_CL_VPORT_XOFFSET_0 */
685 radeon_emit(cs
, fui(state
->scale
[1])); /* R_028444_PA_CL_VPORT_YSCALE_0 */
686 radeon_emit(cs
, fui(state
->translate
[1])); /* R_028448_PA_CL_VPORT_YOFFSET_0 */
687 radeon_emit(cs
, fui(state
->scale
[2])); /* R_02844C_PA_CL_VPORT_ZSCALE_0 */
688 radeon_emit(cs
, fui(state
->translate
[2])); /* R_028450_PA_CL_VPORT_ZOFFSET_0 */
691 /* Compute the key for the hw shader variant */
692 static INLINE
struct r600_shader_key
r600_shader_selector_key(struct pipe_context
* ctx
,
693 struct r600_pipe_shader_selector
* sel
)
695 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
696 struct r600_shader_key key
;
697 memset(&key
, 0, sizeof(key
));
699 if (sel
->type
== PIPE_SHADER_FRAGMENT
) {
700 key
.color_two_side
= rctx
->rasterizer
&& rctx
->rasterizer
->two_side
;
701 key
.alpha_to_one
= rctx
->alpha_to_one
&&
702 rctx
->rasterizer
&& rctx
->rasterizer
->multisample_enable
&&
703 !rctx
->framebuffer
.cb0_is_integer
;
704 key
.nr_cbufs
= rctx
->framebuffer
.state
.nr_cbufs
;
705 /* Dual-source blending only makes sense with nr_cbufs == 1. */
706 if (key
.nr_cbufs
== 1 && rctx
->dual_src_blend
)
708 } else if (sel
->type
== PIPE_SHADER_VERTEX
) {
709 key
.vs_as_es
= (rctx
->gs_shader
!= NULL
);
714 /* Select the hw shader variant depending on the current state.
715 * (*dirty) is set to 1 if current variant was changed */
716 static int r600_shader_select(struct pipe_context
*ctx
,
717 struct r600_pipe_shader_selector
* sel
,
720 struct r600_shader_key key
;
721 struct r600_pipe_shader
* shader
= NULL
;
724 memset(&key
, 0, sizeof(key
));
725 key
= r600_shader_selector_key(ctx
, sel
);
727 /* Check if we don't need to change anything.
728 * This path is also used for most shaders that don't need multiple
729 * variants, it will cost just a computation of the key and this
731 if (likely(sel
->current
&& memcmp(&sel
->current
->key
, &key
, sizeof(key
)) == 0)) {
735 /* lookup if we have other variants in the list */
736 if (sel
->num_shaders
> 1) {
737 struct r600_pipe_shader
*p
= sel
->current
, *c
= p
->next_variant
;
739 while (c
&& memcmp(&c
->key
, &key
, sizeof(key
)) != 0) {
745 p
->next_variant
= c
->next_variant
;
750 if (unlikely(!shader
)) {
751 shader
= CALLOC(1, sizeof(struct r600_pipe_shader
));
752 shader
->selector
= sel
;
754 r
= r600_pipe_shader_create(ctx
, shader
, key
);
756 R600_ERR("Failed to build shader variant (type=%u) %d\n",
763 /* We don't know the value of nr_ps_max_color_exports until we built
764 * at least one variant, so we may need to recompute the key after
765 * building first variant. */
766 if (sel
->type
== PIPE_SHADER_FRAGMENT
&&
767 sel
->num_shaders
== 0) {
768 sel
->nr_ps_max_color_exports
= shader
->shader
.nr_ps_max_color_exports
;
769 key
= r600_shader_selector_key(ctx
, sel
);
772 memcpy(&shader
->key
, &key
, sizeof(key
));
779 shader
->next_variant
= sel
->current
;
780 sel
->current
= shader
;
785 static void *r600_create_shader_state(struct pipe_context
*ctx
,
786 const struct pipe_shader_state
*state
,
787 unsigned pipe_shader_type
)
789 struct r600_pipe_shader_selector
*sel
= CALLOC_STRUCT(r600_pipe_shader_selector
);
791 sel
->type
= pipe_shader_type
;
792 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
793 sel
->so
= state
->stream_output
;
797 static void *r600_create_ps_state(struct pipe_context
*ctx
,
798 const struct pipe_shader_state
*state
)
800 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
803 static void *r600_create_vs_state(struct pipe_context
*ctx
,
804 const struct pipe_shader_state
*state
)
806 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
809 static void *r600_create_gs_state(struct pipe_context
*ctx
,
810 const struct pipe_shader_state
*state
)
812 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_GEOMETRY
);
815 static void r600_bind_ps_state(struct pipe_context
*ctx
, void *state
)
817 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
820 state
= rctx
->dummy_pixel_shader
;
822 rctx
->ps_shader
= (struct r600_pipe_shader_selector
*)state
;
825 static void r600_bind_vs_state(struct pipe_context
*ctx
, void *state
)
827 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
832 rctx
->vs_shader
= (struct r600_pipe_shader_selector
*)state
;
833 rctx
->b
.streamout
.stride_in_dw
= rctx
->vs_shader
->so
.stride
;
836 static void r600_bind_gs_state(struct pipe_context
*ctx
, void *state
)
838 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
840 rctx
->gs_shader
= (struct r600_pipe_shader_selector
*)state
;
844 rctx
->b
.streamout
.stride_in_dw
= rctx
->gs_shader
->so
.stride
;
847 static void r600_delete_shader_selector(struct pipe_context
*ctx
,
848 struct r600_pipe_shader_selector
*sel
)
850 struct r600_pipe_shader
*p
= sel
->current
, *c
;
853 r600_pipe_shader_destroy(ctx
, p
);
863 static void r600_delete_ps_state(struct pipe_context
*ctx
, void *state
)
865 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
866 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
868 if (rctx
->ps_shader
== sel
) {
869 rctx
->ps_shader
= NULL
;
872 r600_delete_shader_selector(ctx
, sel
);
875 static void r600_delete_vs_state(struct pipe_context
*ctx
, void *state
)
877 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
878 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
880 if (rctx
->vs_shader
== sel
) {
881 rctx
->vs_shader
= NULL
;
884 r600_delete_shader_selector(ctx
, sel
);
888 static void r600_delete_gs_state(struct pipe_context
*ctx
, void *state
)
890 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
891 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
893 if (rctx
->gs_shader
== sel
) {
894 rctx
->gs_shader
= NULL
;
897 r600_delete_shader_selector(ctx
, sel
);
901 void r600_constant_buffers_dirty(struct r600_context
*rctx
, struct r600_constbuf_state
*state
)
903 if (state
->dirty_mask
) {
904 rctx
->b
.flags
|= R600_CONTEXT_INV_CONST_CACHE
;
905 state
->atom
.num_dw
= rctx
->b
.chip_class
>= EVERGREEN
? util_bitcount(state
->dirty_mask
)*20
906 : util_bitcount(state
->dirty_mask
)*19;
907 state
->atom
.dirty
= true;
911 static void r600_set_constant_buffer(struct pipe_context
*ctx
, uint shader
, uint index
,
912 struct pipe_constant_buffer
*input
)
914 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
915 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
916 struct pipe_constant_buffer
*cb
;
919 /* Note that the state tracker can unbind constant buffers by
922 if (unlikely(!input
|| (!input
->buffer
&& !input
->user_buffer
))) {
923 state
->enabled_mask
&= ~(1 << index
);
924 state
->dirty_mask
&= ~(1 << index
);
925 pipe_resource_reference(&state
->cb
[index
].buffer
, NULL
);
929 cb
= &state
->cb
[index
];
930 cb
->buffer_size
= input
->buffer_size
;
932 ptr
= input
->user_buffer
;
935 /* Upload the user buffer. */
936 if (R600_BIG_ENDIAN
) {
938 unsigned i
, size
= input
->buffer_size
;
940 if (!(tmpPtr
= malloc(size
))) {
941 R600_ERR("Failed to allocate BE swap buffer.\n");
945 for (i
= 0; i
< size
/ 4; ++i
) {
946 tmpPtr
[i
] = util_cpu_to_le32(((uint32_t *)ptr
)[i
]);
949 u_upload_data(rctx
->b
.uploader
, 0, size
, tmpPtr
, &cb
->buffer_offset
, &cb
->buffer
);
952 u_upload_data(rctx
->b
.uploader
, 0, input
->buffer_size
, ptr
, &cb
->buffer_offset
, &cb
->buffer
);
954 /* account it in gtt */
955 rctx
->b
.gtt
+= input
->buffer_size
;
957 /* Setup the hw buffer. */
958 cb
->buffer_offset
= input
->buffer_offset
;
959 pipe_resource_reference(&cb
->buffer
, input
->buffer
);
960 r600_context_add_resource_size(ctx
, input
->buffer
);
963 state
->enabled_mask
|= 1 << index
;
964 state
->dirty_mask
|= 1 << index
;
965 r600_constant_buffers_dirty(rctx
, state
);
968 static void r600_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
970 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
972 if (rctx
->sample_mask
.sample_mask
== (uint16_t)sample_mask
)
975 rctx
->sample_mask
.sample_mask
= sample_mask
;
976 rctx
->sample_mask
.atom
.dirty
= true;
980 * On r600/700 hw we don't have vertex fetch swizzle, though TBO
981 * doesn't require full swizzles it does need masking and setting alpha
982 * to one, so we setup a set of 5 constants with the masks + alpha value
983 * then in the shader, we AND the 4 components with 0xffffffff or 0,
984 * then OR the alpha with the value given here.
985 * We use a 6th constant to store the txq buffer size in
986 * we use 7th slot for number of cube layers in a cube map array.
988 static void r600_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
990 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
993 struct pipe_constant_buffer cb
;
996 if (!samplers
->views
.dirty_buffer_constants
)
999 samplers
->views
.dirty_buffer_constants
= FALSE
;
1001 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1002 array_size
= bits
* 8 * sizeof(uint32_t) * 4;
1003 samplers
->buffer_constants
= realloc(samplers
->buffer_constants
, array_size
);
1004 memset(samplers
->buffer_constants
, 0, array_size
);
1005 for (i
= 0; i
< bits
; i
++) {
1006 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1008 const struct util_format_description
*desc
;
1009 desc
= util_format_description(samplers
->views
.views
[i
]->base
.format
);
1011 for (j
= 0; j
< 4; j
++)
1012 if (j
< desc
->nr_channels
)
1013 samplers
->buffer_constants
[offset
+j
] = 0xffffffff;
1015 samplers
->buffer_constants
[offset
+j
] = 0x0;
1016 if (desc
->nr_channels
< 4) {
1017 if (desc
->channel
[0].pure_integer
)
1018 samplers
->buffer_constants
[offset
+4] = 1;
1020 samplers
->buffer_constants
[offset
+4] = 0x3f800000;
1022 samplers
->buffer_constants
[offset
+ 4] = 0;
1024 samplers
->buffer_constants
[offset
+ 5] = samplers
->views
.views
[i
]->base
.texture
->width0
/ util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1025 samplers
->buffer_constants
[offset
+ 6] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1030 cb
.user_buffer
= samplers
->buffer_constants
;
1031 cb
.buffer_offset
= 0;
1032 cb
.buffer_size
= array_size
;
1033 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, shader_type
, R600_BUFFER_INFO_CONST_BUFFER
, &cb
);
1034 pipe_resource_reference(&cb
.buffer
, NULL
);
1037 /* On evergreen we store two values
1038 * 1. buffer size for TXQ
1039 * 2. number of cube layers in a cube map array.
1041 static void eg_setup_buffer_constants(struct r600_context
*rctx
, int shader_type
)
1043 struct r600_textures_info
*samplers
= &rctx
->samplers
[shader_type
];
1045 uint32_t array_size
;
1046 struct pipe_constant_buffer cb
;
1049 if (!samplers
->views
.dirty_buffer_constants
)
1052 samplers
->views
.dirty_buffer_constants
= FALSE
;
1054 bits
= util_last_bit(samplers
->views
.enabled_mask
);
1055 array_size
= bits
* 2 * sizeof(uint32_t) * 4;
1056 samplers
->buffer_constants
= realloc(samplers
->buffer_constants
, array_size
);
1057 memset(samplers
->buffer_constants
, 0, array_size
);
1058 for (i
= 0; i
< bits
; i
++) {
1059 if (samplers
->views
.enabled_mask
& (1 << i
)) {
1060 uint32_t offset
= i
* 2;
1061 samplers
->buffer_constants
[offset
] = samplers
->views
.views
[i
]->base
.texture
->width0
/ util_format_get_blocksize(samplers
->views
.views
[i
]->base
.format
);
1062 samplers
->buffer_constants
[offset
+ 1] = samplers
->views
.views
[i
]->base
.texture
->array_size
/ 6;
1067 cb
.user_buffer
= samplers
->buffer_constants
;
1068 cb
.buffer_offset
= 0;
1069 cb
.buffer_size
= array_size
;
1070 rctx
->b
.b
.set_constant_buffer(&rctx
->b
.b
, shader_type
, R600_BUFFER_INFO_CONST_BUFFER
, &cb
);
1071 pipe_resource_reference(&cb
.buffer
, NULL
);
1074 /* set sample xy locations as array of fragment shader constants */
1075 void r600_set_sample_locations_constant_buffer(struct r600_context
*rctx
)
1077 struct pipe_constant_buffer constbuf
= {0};
1078 float values
[4*16] = {0.0f
};
1080 struct pipe_context
*ctx
= &rctx
->b
.b
;
1082 assert(rctx
->framebuffer
.nr_samples
<= Elements(values
)/4);
1083 for (i
= 0; i
< rctx
->framebuffer
.nr_samples
; i
++) {
1084 ctx
->get_sample_position(ctx
, rctx
->framebuffer
.nr_samples
, i
, &values
[4*i
]);
1085 /* Also fill in center-zeroed positions used for interpolateAtSample */
1086 values
[4*i
+ 2] = values
[4*i
+ 0] - 0.5f
;
1087 values
[4*i
+ 3] = values
[4*i
+ 1] - 0.5f
;
1090 constbuf
.user_buffer
= values
;
1091 constbuf
.buffer_size
= rctx
->framebuffer
.nr_samples
* 4 * 4;
1092 ctx
->set_constant_buffer(ctx
, PIPE_SHADER_FRAGMENT
,
1093 R600_SAMPLE_POSITIONS_CONST_BUFFER
, &constbuf
);
1094 pipe_resource_reference(&constbuf
.buffer
, NULL
);
1097 static void update_shader_atom(struct pipe_context
*ctx
,
1098 struct r600_shader_state
*state
,
1099 struct r600_pipe_shader
*shader
)
1101 state
->shader
= shader
;
1103 state
->atom
.num_dw
= shader
->command_buffer
.num_dw
;
1104 state
->atom
.dirty
= true;
1105 r600_context_add_resource_size(ctx
, (struct pipe_resource
*)shader
->bo
);
1107 state
->atom
.num_dw
= 0;
1108 state
->atom
.dirty
= false;
1112 static void update_gs_block_state(struct r600_context
*rctx
, unsigned enable
)
1114 if (rctx
->shader_stages
.geom_enable
!= enable
) {
1115 rctx
->shader_stages
.geom_enable
= enable
;
1116 rctx
->shader_stages
.atom
.dirty
= true;
1119 if (rctx
->gs_rings
.enable
!= enable
) {
1120 rctx
->gs_rings
.enable
= enable
;
1121 rctx
->gs_rings
.atom
.dirty
= true;
1123 if (enable
&& !rctx
->gs_rings
.esgs_ring
.buffer
) {
1124 unsigned size
= 0x1C000;
1125 rctx
->gs_rings
.esgs_ring
.buffer
=
1126 pipe_buffer_create(rctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1127 PIPE_USAGE_DEFAULT
, size
);
1128 rctx
->gs_rings
.esgs_ring
.buffer_size
= size
;
1132 rctx
->gs_rings
.gsvs_ring
.buffer
=
1133 pipe_buffer_create(rctx
->b
.b
.screen
, PIPE_BIND_CUSTOM
,
1134 PIPE_USAGE_DEFAULT
, size
);
1135 rctx
->gs_rings
.gsvs_ring
.buffer_size
= size
;
1139 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1140 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.esgs_ring
);
1141 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1142 R600_GS_RING_CONST_BUFFER
, &rctx
->gs_rings
.gsvs_ring
);
1144 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_GEOMETRY
,
1145 R600_GS_RING_CONST_BUFFER
, NULL
);
1146 r600_set_constant_buffer(&rctx
->b
.b
, PIPE_SHADER_VERTEX
,
1147 R600_GS_RING_CONST_BUFFER
, NULL
);
1152 static bool r600_update_derived_state(struct r600_context
*rctx
)
1154 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
1155 bool ps_dirty
= false, vs_dirty
= false, gs_dirty
= false;
1157 bool need_buf_const
;
1158 if (!rctx
->blitter
->running
) {
1161 /* Decompress textures if needed. */
1162 for (i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
1163 struct r600_samplerview_state
*views
= &rctx
->samplers
[i
].views
;
1164 if (views
->compressed_depthtex_mask
) {
1165 r600_decompress_depth_textures(rctx
, views
);
1167 if (views
->compressed_colortex_mask
) {
1168 r600_decompress_color_textures(rctx
, views
);
1173 update_gs_block_state(rctx
, rctx
->gs_shader
!= NULL
);
1175 if (rctx
->gs_shader
) {
1176 r600_shader_select(ctx
, rctx
->gs_shader
, &gs_dirty
);
1177 if (unlikely(!rctx
->gs_shader
->current
))
1180 if (!rctx
->shader_stages
.geom_enable
) {
1181 rctx
->shader_stages
.geom_enable
= true;
1182 rctx
->shader_stages
.atom
.dirty
= true;
1185 /* gs_shader provides GS and VS (copy shader) */
1186 if (unlikely(rctx
->geometry_shader
.shader
!= rctx
->gs_shader
->current
)) {
1187 update_shader_atom(ctx
, &rctx
->geometry_shader
, rctx
->gs_shader
->current
);
1188 update_shader_atom(ctx
, &rctx
->vertex_shader
, rctx
->gs_shader
->current
->gs_copy_shader
);
1189 /* Update clip misc state. */
1190 if (rctx
->gs_shader
->current
->gs_copy_shader
->pa_cl_vs_out_cntl
!= rctx
->clip_misc_state
.pa_cl_vs_out_cntl
||
1191 rctx
->gs_shader
->current
->gs_copy_shader
->shader
.clip_dist_write
!= rctx
->clip_misc_state
.clip_dist_write
||
1192 rctx
->clip_misc_state
.clip_disable
!= rctx
->gs_shader
->current
->shader
.vs_position_window_space
) {
1193 rctx
->clip_misc_state
.pa_cl_vs_out_cntl
= rctx
->gs_shader
->current
->gs_copy_shader
->pa_cl_vs_out_cntl
;
1194 rctx
->clip_misc_state
.clip_dist_write
= rctx
->gs_shader
->current
->gs_copy_shader
->shader
.clip_dist_write
;
1195 rctx
->clip_misc_state
.clip_disable
= rctx
->gs_shader
->current
->shader
.vs_position_window_space
;
1196 rctx
->clip_misc_state
.atom
.dirty
= true;
1200 r600_shader_select(ctx
, rctx
->vs_shader
, &vs_dirty
);
1201 if (unlikely(!rctx
->vs_shader
->current
))
1204 /* vs_shader is used as ES */
1205 if (unlikely(vs_dirty
|| rctx
->export_shader
.shader
!= rctx
->vs_shader
->current
)) {
1206 update_shader_atom(ctx
, &rctx
->export_shader
, rctx
->vs_shader
->current
);
1209 if (unlikely(rctx
->geometry_shader
.shader
)) {
1210 update_shader_atom(ctx
, &rctx
->geometry_shader
, NULL
);
1211 update_shader_atom(ctx
, &rctx
->export_shader
, NULL
);
1212 rctx
->shader_stages
.geom_enable
= false;
1213 rctx
->shader_stages
.atom
.dirty
= true;
1216 r600_shader_select(ctx
, rctx
->vs_shader
, &vs_dirty
);
1217 if (unlikely(!rctx
->vs_shader
->current
))
1220 if (unlikely(vs_dirty
|| rctx
->vertex_shader
.shader
!= rctx
->vs_shader
->current
)) {
1221 update_shader_atom(ctx
, &rctx
->vertex_shader
, rctx
->vs_shader
->current
);
1223 /* Update clip misc state. */
1224 if (rctx
->vs_shader
->current
->pa_cl_vs_out_cntl
!= rctx
->clip_misc_state
.pa_cl_vs_out_cntl
||
1225 rctx
->vs_shader
->current
->shader
.clip_dist_write
!= rctx
->clip_misc_state
.clip_dist_write
||
1226 rctx
->clip_misc_state
.clip_disable
!= rctx
->vs_shader
->current
->shader
.vs_position_window_space
) {
1227 rctx
->clip_misc_state
.pa_cl_vs_out_cntl
= rctx
->vs_shader
->current
->pa_cl_vs_out_cntl
;
1228 rctx
->clip_misc_state
.clip_dist_write
= rctx
->vs_shader
->current
->shader
.clip_dist_write
;
1229 rctx
->clip_misc_state
.clip_disable
= rctx
->vs_shader
->current
->shader
.vs_position_window_space
;
1230 rctx
->clip_misc_state
.atom
.dirty
= true;
1235 r600_shader_select(ctx
, rctx
->ps_shader
, &ps_dirty
);
1236 if (unlikely(!rctx
->ps_shader
->current
))
1239 if (unlikely(ps_dirty
|| rctx
->pixel_shader
.shader
!= rctx
->ps_shader
->current
||
1240 rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
||
1241 rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)) {
1243 if (rctx
->cb_misc_state
.nr_ps_color_outputs
!= rctx
->ps_shader
->current
->nr_ps_color_outputs
) {
1244 rctx
->cb_misc_state
.nr_ps_color_outputs
= rctx
->ps_shader
->current
->nr_ps_color_outputs
;
1245 rctx
->cb_misc_state
.atom
.dirty
= true;
1248 if (rctx
->b
.chip_class
<= R700
) {
1249 bool multiwrite
= rctx
->ps_shader
->current
->shader
.fs_write_all
;
1251 if (rctx
->cb_misc_state
.multiwrite
!= multiwrite
) {
1252 rctx
->cb_misc_state
.multiwrite
= multiwrite
;
1253 rctx
->cb_misc_state
.atom
.dirty
= true;
1257 if (unlikely(!ps_dirty
&& rctx
->ps_shader
&& rctx
->rasterizer
&&
1258 ((rctx
->rasterizer
->sprite_coord_enable
!= rctx
->ps_shader
->current
->sprite_coord_enable
) ||
1259 (rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
)))) {
1261 if (rctx
->b
.chip_class
>= EVERGREEN
)
1262 evergreen_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1264 r600_update_ps_state(ctx
, rctx
->ps_shader
->current
);
1267 update_shader_atom(ctx
, &rctx
->pixel_shader
, rctx
->ps_shader
->current
);
1270 if (rctx
->b
.chip_class
>= EVERGREEN
) {
1271 evergreen_update_db_shader_control(rctx
);
1273 r600_update_db_shader_control(rctx
);
1276 /* on R600 we stuff masks + txq info into one constant buffer */
1277 /* on evergreen we only need a txq info one */
1278 if (rctx
->ps_shader
) {
1279 need_buf_const
= rctx
->ps_shader
->current
->shader
.uses_tex_buffers
|| rctx
->ps_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1280 if (need_buf_const
) {
1281 if (rctx
->b
.chip_class
< EVERGREEN
)
1282 r600_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1284 eg_setup_buffer_constants(rctx
, PIPE_SHADER_FRAGMENT
);
1288 if (rctx
->vs_shader
) {
1289 need_buf_const
= rctx
->vs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->vs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1290 if (need_buf_const
) {
1291 if (rctx
->b
.chip_class
< EVERGREEN
)
1292 r600_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1294 eg_setup_buffer_constants(rctx
, PIPE_SHADER_VERTEX
);
1298 if (rctx
->gs_shader
) {
1299 need_buf_const
= rctx
->gs_shader
->current
->shader
.uses_tex_buffers
|| rctx
->gs_shader
->current
->shader
.has_txq_cube_array_z_comp
;
1300 if (need_buf_const
) {
1301 if (rctx
->b
.chip_class
< EVERGREEN
)
1302 r600_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1304 eg_setup_buffer_constants(rctx
, PIPE_SHADER_GEOMETRY
);
1308 if (rctx
->b
.chip_class
< EVERGREEN
&& rctx
->ps_shader
&& rctx
->vs_shader
) {
1309 if (!r600_adjust_gprs(rctx
)) {
1310 /* discard rendering */
1315 blend_disable
= (rctx
->dual_src_blend
&&
1316 rctx
->ps_shader
->current
->nr_ps_color_outputs
< 2);
1318 if (blend_disable
!= rctx
->force_blend_disable
) {
1319 rctx
->force_blend_disable
= blend_disable
;
1320 r600_bind_blend_state_internal(rctx
,
1321 rctx
->blend_state
.cso
,
1328 void r600_emit_clip_misc_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
1330 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1331 struct r600_clip_misc_state
*state
= &rctx
->clip_misc_state
;
1333 r600_write_context_reg(cs
, R_028810_PA_CL_CLIP_CNTL
,
1334 state
->pa_cl_clip_cntl
|
1335 (state
->clip_dist_write
? 0 : state
->clip_plane_enable
& 0x3F) |
1336 S_028810_CLIP_DISABLE(state
->clip_disable
));
1337 r600_write_context_reg(cs
, R_02881C_PA_CL_VS_OUT_CNTL
,
1338 state
->pa_cl_vs_out_cntl
|
1339 (state
->clip_plane_enable
& state
->clip_dist_write
));
1342 static void r600_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*dinfo
)
1344 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1345 struct pipe_draw_info info
= *dinfo
;
1346 struct pipe_index_buffer ib
= {};
1348 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1350 if (!info
.count
&& (info
.indexed
|| !info
.count_from_stream_output
)) {
1354 if (!rctx
->vs_shader
|| !rctx
->ps_shader
) {
1359 /* make sure that the gfx ring is only one active */
1360 if (rctx
->b
.rings
.dma
.cs
&& rctx
->b
.rings
.dma
.cs
->cdw
) {
1361 rctx
->b
.rings
.dma
.flush(rctx
, RADEON_FLUSH_ASYNC
, NULL
);
1364 if (!r600_update_derived_state(rctx
)) {
1365 /* useless to render because current rendering command
1372 /* Initialize the index buffer struct. */
1373 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
1374 ib
.user_buffer
= rctx
->index_buffer
.user_buffer
;
1375 ib
.index_size
= rctx
->index_buffer
.index_size
;
1376 ib
.offset
= rctx
->index_buffer
.offset
+ info
.start
* ib
.index_size
;
1378 /* Translate 8-bit indices to 16-bit. */
1379 if (ib
.index_size
== 1) {
1380 struct pipe_resource
*out_buffer
= NULL
;
1381 unsigned out_offset
;
1384 u_upload_alloc(rctx
->b
.uploader
, 0, info
.count
* 2,
1385 &out_offset
, &out_buffer
, &ptr
);
1387 util_shorten_ubyte_elts_to_userptr(
1388 &rctx
->b
.b
, &ib
, 0, ib
.offset
, info
.count
, ptr
);
1390 pipe_resource_reference(&ib
.buffer
, NULL
);
1391 ib
.user_buffer
= NULL
;
1392 ib
.buffer
= out_buffer
;
1393 ib
.offset
= out_offset
;
1397 /* Upload the index buffer.
1398 * The upload is skipped for small index counts on little-endian machines
1399 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
1400 * Note: Instanced rendering in combination with immediate indices hangs. */
1401 if (ib
.user_buffer
&& (R600_BIG_ENDIAN
|| info
.instance_count
> 1 ||
1402 info
.count
*ib
.index_size
> 20)) {
1403 u_upload_data(rctx
->b
.uploader
, 0, info
.count
* ib
.index_size
,
1404 ib
.user_buffer
, &ib
.offset
, &ib
.buffer
);
1405 ib
.user_buffer
= NULL
;
1408 info
.index_bias
= info
.start
;
1411 /* Set the index offset and primitive restart. */
1412 if (rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
!= info
.primitive_restart
||
1413 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
!= info
.restart_index
||
1414 rctx
->vgt_state
.vgt_indx_offset
!= info
.index_bias
) {
1415 rctx
->vgt_state
.vgt_multi_prim_ib_reset_en
= info
.primitive_restart
;
1416 rctx
->vgt_state
.vgt_multi_prim_ib_reset_indx
= info
.restart_index
;
1417 rctx
->vgt_state
.vgt_indx_offset
= info
.index_bias
;
1418 rctx
->vgt_state
.atom
.dirty
= true;
1421 /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */
1422 if (rctx
->b
.chip_class
== R600
) {
1423 rctx
->b
.flags
|= R600_CONTEXT_PS_PARTIAL_FLUSH
;
1424 rctx
->cb_misc_state
.atom
.dirty
= true;
1428 r600_need_cs_space(rctx
, ib
.user_buffer
? 5 : 0, TRUE
);
1429 r600_flush_emit(rctx
);
1431 for (i
= 0; i
< R600_NUM_ATOMS
; i
++) {
1432 if (rctx
->atoms
[i
] == NULL
|| !rctx
->atoms
[i
]->dirty
) {
1435 r600_emit_atom(rctx
, rctx
->atoms
[i
]);
1438 if (rctx
->b
.chip_class
== CAYMAN
) {
1439 /* Copied from radeonsi. */
1440 unsigned primgroup_size
= 128; /* recommended without a GS */
1441 bool ia_switch_on_eop
= false;
1442 bool partial_vs_wave
= false;
1444 if (rctx
->gs_shader
)
1445 primgroup_size
= 64; /* recommended with a GS */
1447 if ((rctx
->rasterizer
&& rctx
->rasterizer
->pa_sc_line_stipple
) ||
1448 (rctx
->b
.screen
->debug_flags
& DBG_SWITCH_ON_EOP
)) {
1449 ia_switch_on_eop
= true;
1452 if (rctx
->b
.streamout
.streamout_enabled
||
1453 rctx
->b
.streamout
.prims_gen_query_enabled
)
1454 partial_vs_wave
= true;
1456 r600_write_context_reg(cs
, CM_R_028AA8_IA_MULTI_VGT_PARAM
,
1457 S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop
) |
1458 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave
) |
1459 S_028AA8_PRIMGROUP_SIZE(primgroup_size
- 1));
1462 /* On R6xx, CULL_FRONT=1 culls all points, lines, and rectangles,
1463 * even though it should have no effect on those. */
1464 if (rctx
->b
.chip_class
== R600
&& rctx
->rasterizer
) {
1465 unsigned su_sc_mode_cntl
= rctx
->rasterizer
->pa_su_sc_mode_cntl
;
1466 unsigned prim
= info
.mode
;
1468 if (rctx
->gs_shader
) {
1469 prim
= rctx
->gs_shader
->current
->shader
.gs_output_prim
;
1471 prim
= r600_conv_prim_to_gs_out(prim
); /* decrease the number of types to 3 */
1473 if (prim
== V_028A6C_OUTPRIM_TYPE_POINTLIST
||
1474 prim
== V_028A6C_OUTPRIM_TYPE_LINESTRIP
||
1475 info
.mode
== R600_PRIM_RECTANGLE_LIST
) {
1476 su_sc_mode_cntl
&= C_028814_CULL_FRONT
;
1478 r600_write_context_reg(cs
, R_028814_PA_SU_SC_MODE_CNTL
, su_sc_mode_cntl
);
1481 /* Update start instance. */
1482 if (rctx
->last_start_instance
!= info
.start_instance
) {
1483 r600_write_ctl_const(cs
, R_03CFF4_SQ_VTX_START_INST_LOC
, info
.start_instance
);
1484 rctx
->last_start_instance
= info
.start_instance
;
1487 /* Update the primitive type. */
1488 if (rctx
->last_primitive_type
!= info
.mode
) {
1489 unsigned ls_mask
= 0;
1491 if (info
.mode
== PIPE_PRIM_LINES
)
1493 else if (info
.mode
== PIPE_PRIM_LINE_STRIP
||
1494 info
.mode
== PIPE_PRIM_LINE_LOOP
)
1497 r600_write_context_reg(cs
, R_028A0C_PA_SC_LINE_STIPPLE
,
1498 S_028A0C_AUTO_RESET_CNTL(ls_mask
) |
1499 (rctx
->rasterizer
? rctx
->rasterizer
->pa_sc_line_stipple
: 0));
1500 r600_write_config_reg(cs
, R_008958_VGT_PRIMITIVE_TYPE
,
1501 r600_conv_pipe_prim(info
.mode
));
1503 rctx
->last_primitive_type
= info
.mode
;
1507 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NUM_INSTANCES
, 0, rctx
->b
.predicate_drawing
);
1508 cs
->buf
[cs
->cdw
++] = info
.instance_count
;
1510 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_INDEX_TYPE
, 0, rctx
->b
.predicate_drawing
);
1511 cs
->buf
[cs
->cdw
++] = ib
.index_size
== 4 ?
1512 (VGT_INDEX_32
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_32_BIT
: 0)) :
1513 (VGT_INDEX_16
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_16_BIT
: 0));
1515 if (ib
.user_buffer
) {
1516 unsigned size_bytes
= info
.count
*ib
.index_size
;
1517 unsigned size_dw
= align(size_bytes
, 4) / 4;
1518 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_DRAW_INDEX_IMMD
, 1 + size_dw
, rctx
->b
.predicate_drawing
);
1519 cs
->buf
[cs
->cdw
++] = info
.count
;
1520 cs
->buf
[cs
->cdw
++] = V_0287F0_DI_SRC_SEL_IMMEDIATE
;
1521 memcpy(cs
->buf
+cs
->cdw
, ib
.user_buffer
, size_bytes
);
1524 uint64_t va
= r600_resource(ib
.buffer
)->gpu_address
+ ib
.offset
;
1525 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_DRAW_INDEX
, 3, rctx
->b
.predicate_drawing
);
1526 cs
->buf
[cs
->cdw
++] = va
;
1527 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
1528 cs
->buf
[cs
->cdw
++] = info
.count
;
1529 cs
->buf
[cs
->cdw
++] = V_0287F0_DI_SRC_SEL_DMA
;
1530 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, rctx
->b
.predicate_drawing
);
1531 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
,
1532 (struct r600_resource
*)ib
.buffer
,
1533 RADEON_USAGE_READ
, RADEON_PRIO_MIN
);
1536 if (info
.count_from_stream_output
) {
1537 struct r600_so_target
*t
= (struct r600_so_target
*)info
.count_from_stream_output
;
1538 uint64_t va
= t
->buf_filled_size
->gpu_address
+ t
->buf_filled_size_offset
;
1540 r600_write_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
, t
->stride_in_dw
);
1542 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_COPY_DW
, 4, 0);
1543 cs
->buf
[cs
->cdw
++] = COPY_DW_SRC_IS_MEM
| COPY_DW_DST_IS_REG
;
1544 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* src address lo */
1545 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* src address hi */
1546 cs
->buf
[cs
->cdw
++] = R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2; /* dst register */
1547 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1549 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1550 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
,
1551 t
->buf_filled_size
, RADEON_USAGE_READ
,
1555 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_DRAW_INDEX_AUTO
, 1, rctx
->b
.predicate_drawing
);
1556 cs
->buf
[cs
->cdw
++] = info
.count
;
1557 cs
->buf
[cs
->cdw
++] = V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
1558 (info
.count_from_stream_output
? S_0287F0_USE_OPAQUE(1) : 0);
1561 if (rctx
->screen
->b
.trace_bo
) {
1562 r600_trace_emit(rctx
);
1565 /* Set the depth buffer as dirty. */
1566 if (rctx
->framebuffer
.state
.zsbuf
) {
1567 struct pipe_surface
*surf
= rctx
->framebuffer
.state
.zsbuf
;
1568 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
1570 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1572 if (rctx
->framebuffer
.compressed_cb_mask
) {
1573 struct pipe_surface
*surf
;
1574 struct r600_texture
*rtex
;
1575 unsigned mask
= rctx
->framebuffer
.compressed_cb_mask
;
1578 unsigned i
= u_bit_scan(&mask
);
1579 surf
= rctx
->framebuffer
.state
.cbufs
[i
];
1580 rtex
= (struct r600_texture
*)surf
->texture
;
1582 rtex
->dirty_level_mask
|= 1 << surf
->u
.tex
.level
;
1587 pipe_resource_reference(&ib
.buffer
, NULL
);
1588 rctx
->b
.num_draw_calls
++;
1591 uint32_t r600_translate_stencil_op(int s_op
)
1594 case PIPE_STENCIL_OP_KEEP
:
1595 return V_028800_STENCIL_KEEP
;
1596 case PIPE_STENCIL_OP_ZERO
:
1597 return V_028800_STENCIL_ZERO
;
1598 case PIPE_STENCIL_OP_REPLACE
:
1599 return V_028800_STENCIL_REPLACE
;
1600 case PIPE_STENCIL_OP_INCR
:
1601 return V_028800_STENCIL_INCR
;
1602 case PIPE_STENCIL_OP_DECR
:
1603 return V_028800_STENCIL_DECR
;
1604 case PIPE_STENCIL_OP_INCR_WRAP
:
1605 return V_028800_STENCIL_INCR_WRAP
;
1606 case PIPE_STENCIL_OP_DECR_WRAP
:
1607 return V_028800_STENCIL_DECR_WRAP
;
1608 case PIPE_STENCIL_OP_INVERT
:
1609 return V_028800_STENCIL_INVERT
;
1611 R600_ERR("Unknown stencil op %d", s_op
);
1618 uint32_t r600_translate_fill(uint32_t func
)
1621 case PIPE_POLYGON_MODE_FILL
:
1623 case PIPE_POLYGON_MODE_LINE
:
1625 case PIPE_POLYGON_MODE_POINT
:
1633 unsigned r600_tex_wrap(unsigned wrap
)
1637 case PIPE_TEX_WRAP_REPEAT
:
1638 return V_03C000_SQ_TEX_WRAP
;
1639 case PIPE_TEX_WRAP_CLAMP
:
1640 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER
;
1641 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
1642 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL
;
1643 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
1644 return V_03C000_SQ_TEX_CLAMP_BORDER
;
1645 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
1646 return V_03C000_SQ_TEX_MIRROR
;
1647 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
1648 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER
;
1649 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
1650 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL
;
1651 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
1652 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER
;
1656 unsigned r600_tex_filter(unsigned filter
)
1660 case PIPE_TEX_FILTER_NEAREST
:
1661 return V_03C000_SQ_TEX_XY_FILTER_POINT
;
1662 case PIPE_TEX_FILTER_LINEAR
:
1663 return V_03C000_SQ_TEX_XY_FILTER_BILINEAR
;
1667 unsigned r600_tex_mipfilter(unsigned filter
)
1670 case PIPE_TEX_MIPFILTER_NEAREST
:
1671 return V_03C000_SQ_TEX_Z_FILTER_POINT
;
1672 case PIPE_TEX_MIPFILTER_LINEAR
:
1673 return V_03C000_SQ_TEX_Z_FILTER_LINEAR
;
1675 case PIPE_TEX_MIPFILTER_NONE
:
1676 return V_03C000_SQ_TEX_Z_FILTER_NONE
;
1680 unsigned r600_tex_compare(unsigned compare
)
1684 case PIPE_FUNC_NEVER
:
1685 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER
;
1686 case PIPE_FUNC_LESS
:
1687 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS
;
1688 case PIPE_FUNC_EQUAL
:
1689 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL
;
1690 case PIPE_FUNC_LEQUAL
:
1691 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL
;
1692 case PIPE_FUNC_GREATER
:
1693 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER
;
1694 case PIPE_FUNC_NOTEQUAL
:
1695 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL
;
1696 case PIPE_FUNC_GEQUAL
:
1697 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL
;
1698 case PIPE_FUNC_ALWAYS
:
1699 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS
;
1703 static bool wrap_mode_uses_border_color(unsigned wrap
, bool linear_filter
)
1705 return wrap
== PIPE_TEX_WRAP_CLAMP_TO_BORDER
||
1706 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
||
1708 (wrap
== PIPE_TEX_WRAP_CLAMP
||
1709 wrap
== PIPE_TEX_WRAP_MIRROR_CLAMP
));
1712 bool sampler_state_needs_border_color(const struct pipe_sampler_state
*state
)
1714 bool linear_filter
= state
->min_img_filter
!= PIPE_TEX_FILTER_NEAREST
||
1715 state
->mag_img_filter
!= PIPE_TEX_FILTER_NEAREST
;
1717 return (state
->border_color
.ui
[0] || state
->border_color
.ui
[1] ||
1718 state
->border_color
.ui
[2] || state
->border_color
.ui
[3]) &&
1719 (wrap_mode_uses_border_color(state
->wrap_s
, linear_filter
) ||
1720 wrap_mode_uses_border_color(state
->wrap_t
, linear_filter
) ||
1721 wrap_mode_uses_border_color(state
->wrap_r
, linear_filter
));
1724 void r600_emit_shader(struct r600_context
*rctx
, struct r600_atom
*a
)
1727 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
1728 struct r600_pipe_shader
*shader
= ((struct r600_shader_state
*)a
)->shader
;
1733 r600_emit_command_buffer(cs
, &shader
->command_buffer
);
1734 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
1735 radeon_emit(cs
, r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
, shader
->bo
,
1736 RADEON_USAGE_READ
, RADEON_PRIO_SHADER_DATA
));
1739 unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format
,
1740 const unsigned char *swizzle_view
,
1744 unsigned char swizzle
[4];
1745 unsigned result
= 0;
1746 const uint32_t tex_swizzle_shift
[4] = {
1749 const uint32_t vtx_swizzle_shift
[4] = {
1752 const uint32_t swizzle_bit
[4] = {
1755 const uint32_t *swizzle_shift
= tex_swizzle_shift
;
1758 swizzle_shift
= vtx_swizzle_shift
;
1761 util_format_compose_swizzles(swizzle_format
, swizzle_view
, swizzle
);
1763 memcpy(swizzle
, swizzle_format
, 4);
1767 for (i
= 0; i
< 4; i
++) {
1768 switch (swizzle
[i
]) {
1769 case UTIL_FORMAT_SWIZZLE_Y
:
1770 result
|= swizzle_bit
[1] << swizzle_shift
[i
];
1772 case UTIL_FORMAT_SWIZZLE_Z
:
1773 result
|= swizzle_bit
[2] << swizzle_shift
[i
];
1775 case UTIL_FORMAT_SWIZZLE_W
:
1776 result
|= swizzle_bit
[3] << swizzle_shift
[i
];
1778 case UTIL_FORMAT_SWIZZLE_0
:
1779 result
|= V_038010_SQ_SEL_0
<< swizzle_shift
[i
];
1781 case UTIL_FORMAT_SWIZZLE_1
:
1782 result
|= V_038010_SQ_SEL_1
<< swizzle_shift
[i
];
1784 default: /* UTIL_FORMAT_SWIZZLE_X */
1785 result
|= swizzle_bit
[0] << swizzle_shift
[i
];
1791 /* texture format translate */
1792 uint32_t r600_translate_texformat(struct pipe_screen
*screen
,
1793 enum pipe_format format
,
1794 const unsigned char *swizzle_view
,
1795 uint32_t *word4_p
, uint32_t *yuv_format_p
)
1797 struct r600_screen
*rscreen
= (struct r600_screen
*)screen
;
1798 uint32_t result
= 0, word4
= 0, yuv_format
= 0;
1799 const struct util_format_description
*desc
;
1800 boolean uniform
= TRUE
;
1801 bool enable_s3tc
= rscreen
->b
.info
.drm_minor
>= 9;
1802 bool is_srgb_valid
= FALSE
;
1803 const unsigned char swizzle_xxxx
[4] = {0, 0, 0, 0};
1804 const unsigned char swizzle_yyyy
[4] = {1, 1, 1, 1};
1807 const uint32_t sign_bit
[4] = {
1808 S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED
),
1809 S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED
),
1810 S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED
),
1811 S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED
)
1813 desc
= util_format_description(format
);
1815 /* Depth and stencil swizzling is handled separately. */
1816 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_ZS
) {
1817 word4
|= r600_get_swizzle_combined(desc
->swizzle
, swizzle_view
, FALSE
);
1820 /* Colorspace (return non-RGB formats directly). */
1821 switch (desc
->colorspace
) {
1822 /* Depth stencil formats */
1823 case UTIL_FORMAT_COLORSPACE_ZS
:
1825 /* Depth sampler formats. */
1826 case PIPE_FORMAT_Z16_UNORM
:
1827 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
1830 case PIPE_FORMAT_Z24X8_UNORM
:
1831 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
1832 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
1835 case PIPE_FORMAT_X8Z24_UNORM
:
1836 case PIPE_FORMAT_S8_UINT_Z24_UNORM
:
1837 if (rscreen
->b
.chip_class
< EVERGREEN
)
1839 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
1842 case PIPE_FORMAT_Z32_FLOAT
:
1843 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
1844 result
= FMT_32_FLOAT
;
1846 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
1847 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
1848 result
= FMT_X24_8_32_FLOAT
;
1850 /* Stencil sampler formats. */
1851 case PIPE_FORMAT_S8_UINT
:
1852 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
1853 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
1856 case PIPE_FORMAT_X24S8_UINT
:
1857 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
1858 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
1861 case PIPE_FORMAT_S8X24_UINT
:
1862 if (rscreen
->b
.chip_class
< EVERGREEN
)
1864 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
1865 word4
|= r600_get_swizzle_combined(swizzle_xxxx
, swizzle_view
, FALSE
);
1868 case PIPE_FORMAT_X32_S8X24_UINT
:
1869 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
1870 word4
|= r600_get_swizzle_combined(swizzle_yyyy
, swizzle_view
, FALSE
);
1871 result
= FMT_X24_8_32_FLOAT
;
1877 case UTIL_FORMAT_COLORSPACE_YUV
:
1878 yuv_format
|= (1 << 30);
1880 case PIPE_FORMAT_UYVY
:
1881 case PIPE_FORMAT_YUYV
:
1885 goto out_unknown
; /* XXX */
1887 case UTIL_FORMAT_COLORSPACE_SRGB
:
1888 word4
|= S_038010_FORCE_DEGAMMA(1);
1895 if (desc
->layout
== UTIL_FORMAT_LAYOUT_RGTC
) {
1900 case PIPE_FORMAT_RGTC1_SNORM
:
1901 case PIPE_FORMAT_LATC1_SNORM
:
1902 word4
|= sign_bit
[0];
1903 case PIPE_FORMAT_RGTC1_UNORM
:
1904 case PIPE_FORMAT_LATC1_UNORM
:
1907 case PIPE_FORMAT_RGTC2_SNORM
:
1908 case PIPE_FORMAT_LATC2_SNORM
:
1909 word4
|= sign_bit
[0] | sign_bit
[1];
1910 case PIPE_FORMAT_RGTC2_UNORM
:
1911 case PIPE_FORMAT_LATC2_UNORM
:
1919 if (desc
->layout
== UTIL_FORMAT_LAYOUT_S3TC
) {
1924 if (!util_format_s3tc_enabled
) {
1929 case PIPE_FORMAT_DXT1_RGB
:
1930 case PIPE_FORMAT_DXT1_RGBA
:
1931 case PIPE_FORMAT_DXT1_SRGB
:
1932 case PIPE_FORMAT_DXT1_SRGBA
:
1934 is_srgb_valid
= TRUE
;
1936 case PIPE_FORMAT_DXT3_RGBA
:
1937 case PIPE_FORMAT_DXT3_SRGBA
:
1939 is_srgb_valid
= TRUE
;
1941 case PIPE_FORMAT_DXT5_RGBA
:
1942 case PIPE_FORMAT_DXT5_SRGBA
:
1944 is_srgb_valid
= TRUE
;
1951 if (desc
->layout
== UTIL_FORMAT_LAYOUT_BPTC
) {
1955 if (rscreen
->b
.chip_class
< EVERGREEN
)
1959 case PIPE_FORMAT_BPTC_RGBA_UNORM
:
1960 case PIPE_FORMAT_BPTC_SRGBA
:
1962 is_srgb_valid
= TRUE
;
1964 case PIPE_FORMAT_BPTC_RGB_FLOAT
:
1965 word4
|= sign_bit
[0] | sign_bit
[1] | sign_bit
[2];
1967 case PIPE_FORMAT_BPTC_RGB_UFLOAT
:
1975 if (desc
->layout
== UTIL_FORMAT_LAYOUT_SUBSAMPLED
) {
1977 case PIPE_FORMAT_R8G8_B8G8_UNORM
:
1978 case PIPE_FORMAT_G8R8_B8R8_UNORM
:
1981 case PIPE_FORMAT_G8R8_G8B8_UNORM
:
1982 case PIPE_FORMAT_R8G8_R8B8_UNORM
:
1990 if (format
== PIPE_FORMAT_R9G9B9E5_FLOAT
) {
1991 result
= FMT_5_9_9_9_SHAREDEXP
;
1993 } else if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) {
1994 result
= FMT_10_11_11_FLOAT
;
1999 for (i
= 0; i
< desc
->nr_channels
; i
++) {
2000 if (desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2001 word4
|= sign_bit
[i
];
2005 /* R8G8Bx_SNORM - XXX CxV8U8 */
2007 /* See whether the components are of the same size. */
2008 for (i
= 1; i
< desc
->nr_channels
; i
++) {
2009 uniform
= uniform
&& desc
->channel
[0].size
== desc
->channel
[i
].size
;
2012 /* Non-uniform formats. */
2014 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2015 desc
->channel
[0].pure_integer
)
2016 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2017 switch(desc
->nr_channels
) {
2019 if (desc
->channel
[0].size
== 5 &&
2020 desc
->channel
[1].size
== 6 &&
2021 desc
->channel
[2].size
== 5) {
2027 if (desc
->channel
[0].size
== 5 &&
2028 desc
->channel
[1].size
== 5 &&
2029 desc
->channel
[2].size
== 5 &&
2030 desc
->channel
[3].size
== 1) {
2031 result
= FMT_1_5_5_5
;
2034 if (desc
->channel
[0].size
== 10 &&
2035 desc
->channel
[1].size
== 10 &&
2036 desc
->channel
[2].size
== 10 &&
2037 desc
->channel
[3].size
== 2) {
2038 result
= FMT_2_10_10_10
;
2046 /* Find the first non-VOID channel. */
2047 for (i
= 0; i
< 4; i
++) {
2048 if (desc
->channel
[i
].type
!= UTIL_FORMAT_TYPE_VOID
) {
2056 /* uniform formats */
2057 switch (desc
->channel
[i
].type
) {
2058 case UTIL_FORMAT_TYPE_UNSIGNED
:
2059 case UTIL_FORMAT_TYPE_SIGNED
:
2061 if (!desc
->channel
[i
].normalized
&&
2062 desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
) {
2066 if (desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_SRGB
&&
2067 desc
->channel
[i
].pure_integer
)
2068 word4
|= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT
);
2070 switch (desc
->channel
[i
].size
) {
2072 switch (desc
->nr_channels
) {
2077 result
= FMT_4_4_4_4
;
2082 switch (desc
->nr_channels
) {
2090 result
= FMT_8_8_8_8
;
2091 is_srgb_valid
= TRUE
;
2096 switch (desc
->nr_channels
) {
2104 result
= FMT_16_16_16_16
;
2109 switch (desc
->nr_channels
) {
2117 result
= FMT_32_32_32_32
;
2123 case UTIL_FORMAT_TYPE_FLOAT
:
2124 switch (desc
->channel
[i
].size
) {
2126 switch (desc
->nr_channels
) {
2128 result
= FMT_16_FLOAT
;
2131 result
= FMT_16_16_FLOAT
;
2134 result
= FMT_16_16_16_16_FLOAT
;
2139 switch (desc
->nr_channels
) {
2141 result
= FMT_32_FLOAT
;
2144 result
= FMT_32_32_FLOAT
;
2147 result
= FMT_32_32_32_32_FLOAT
;
2156 if (desc
->colorspace
== UTIL_FORMAT_COLORSPACE_SRGB
&& !is_srgb_valid
)
2161 *yuv_format_p
= yuv_format
;
2164 /* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
2168 uint32_t r600_translate_colorformat(enum chip_class chip
, enum pipe_format format
)
2170 const struct util_format_description
*desc
= util_format_description(format
);
2171 int channel
= util_format_get_first_non_void_channel(format
);
2174 #define HAS_SIZE(x,y,z,w) \
2175 (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
2176 desc->channel[2].size == (z) && desc->channel[3].size == (w))
2178 if (format
== PIPE_FORMAT_R11G11B10_FLOAT
) /* isn't plain */
2179 return V_0280A0_COLOR_10_11_11_FLOAT
;
2181 if (desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
||
2185 is_float
= desc
->channel
[channel
].type
== UTIL_FORMAT_TYPE_FLOAT
;
2187 switch (desc
->nr_channels
) {
2189 switch (desc
->channel
[0].size
) {
2191 return V_0280A0_COLOR_8
;
2194 return V_0280A0_COLOR_16_FLOAT
;
2196 return V_0280A0_COLOR_16
;
2199 return V_0280A0_COLOR_32_FLOAT
;
2201 return V_0280A0_COLOR_32
;
2205 if (desc
->channel
[0].size
== desc
->channel
[1].size
) {
2206 switch (desc
->channel
[0].size
) {
2209 return V_0280A0_COLOR_4_4
;
2211 return ~0U; /* removed on Evergreen */
2213 return V_0280A0_COLOR_8_8
;
2216 return V_0280A0_COLOR_16_16_FLOAT
;
2218 return V_0280A0_COLOR_16_16
;
2221 return V_0280A0_COLOR_32_32_FLOAT
;
2223 return V_0280A0_COLOR_32_32
;
2225 } else if (HAS_SIZE(8,24,0,0)) {
2226 return V_0280A0_COLOR_24_8
;
2227 } else if (HAS_SIZE(24,8,0,0)) {
2228 return V_0280A0_COLOR_8_24
;
2232 if (HAS_SIZE(5,6,5,0)) {
2233 return V_0280A0_COLOR_5_6_5
;
2234 } else if (HAS_SIZE(32,8,24,0)) {
2235 return V_0280A0_COLOR_X24_8_32_FLOAT
;
2239 if (desc
->channel
[0].size
== desc
->channel
[1].size
&&
2240 desc
->channel
[0].size
== desc
->channel
[2].size
&&
2241 desc
->channel
[0].size
== desc
->channel
[3].size
) {
2242 switch (desc
->channel
[0].size
) {
2244 return V_0280A0_COLOR_4_4_4_4
;
2246 return V_0280A0_COLOR_8_8_8_8
;
2249 return V_0280A0_COLOR_16_16_16_16_FLOAT
;
2251 return V_0280A0_COLOR_16_16_16_16
;
2254 return V_0280A0_COLOR_32_32_32_32_FLOAT
;
2256 return V_0280A0_COLOR_32_32_32_32
;
2258 } else if (HAS_SIZE(5,5,5,1)) {
2259 return V_0280A0_COLOR_1_5_5_5
;
2260 } else if (HAS_SIZE(10,10,10,2)) {
2261 return V_0280A0_COLOR_2_10_10_10
;
2268 uint32_t r600_colorformat_endian_swap(uint32_t colorformat
)
2270 if (R600_BIG_ENDIAN
) {
2271 switch(colorformat
) {
2272 /* 8-bit buffers. */
2273 case V_0280A0_COLOR_4_4
:
2274 case V_0280A0_COLOR_8
:
2277 /* 16-bit buffers. */
2278 case V_0280A0_COLOR_5_6_5
:
2279 case V_0280A0_COLOR_1_5_5_5
:
2280 case V_0280A0_COLOR_4_4_4_4
:
2281 case V_0280A0_COLOR_16
:
2282 case V_0280A0_COLOR_8_8
:
2283 return ENDIAN_8IN16
;
2285 /* 32-bit buffers. */
2286 case V_0280A0_COLOR_8_8_8_8
:
2287 case V_0280A0_COLOR_2_10_10_10
:
2288 case V_0280A0_COLOR_8_24
:
2289 case V_0280A0_COLOR_24_8
:
2290 case V_0280A0_COLOR_32_FLOAT
:
2291 case V_0280A0_COLOR_16_16_FLOAT
:
2292 case V_0280A0_COLOR_16_16
:
2293 return ENDIAN_8IN32
;
2295 /* 64-bit buffers. */
2296 case V_0280A0_COLOR_16_16_16_16
:
2297 case V_0280A0_COLOR_16_16_16_16_FLOAT
:
2298 return ENDIAN_8IN16
;
2300 case V_0280A0_COLOR_32_32_FLOAT
:
2301 case V_0280A0_COLOR_32_32
:
2302 case V_0280A0_COLOR_X24_8_32_FLOAT
:
2303 return ENDIAN_8IN32
;
2305 /* 128-bit buffers. */
2306 case V_0280A0_COLOR_32_32_32_32_FLOAT
:
2307 case V_0280A0_COLOR_32_32_32_32
:
2308 return ENDIAN_8IN32
;
2310 return ENDIAN_NONE
; /* Unsupported. */
2317 static void r600_invalidate_buffer(struct pipe_context
*ctx
, struct pipe_resource
*buf
)
2319 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2320 struct r600_resource
*rbuffer
= r600_resource(buf
);
2321 unsigned i
, shader
, mask
, alignment
= rbuffer
->buf
->alignment
;
2322 struct r600_pipe_sampler_view
*view
;
2324 /* Reallocate the buffer in the same pipe_resource. */
2325 r600_init_resource(&rctx
->screen
->b
, rbuffer
, rbuffer
->b
.b
.width0
,
2328 /* We changed the buffer, now we need to bind it where the old one was bound. */
2329 /* Vertex buffers. */
2330 mask
= rctx
->vertex_buffer_state
.enabled_mask
;
2332 i
= u_bit_scan(&mask
);
2333 if (rctx
->vertex_buffer_state
.vb
[i
].buffer
== &rbuffer
->b
.b
) {
2334 rctx
->vertex_buffer_state
.dirty_mask
|= 1 << i
;
2335 r600_vertex_buffers_dirty(rctx
);
2338 /* Streamout buffers. */
2339 for (i
= 0; i
< rctx
->b
.streamout
.num_targets
; i
++) {
2340 if (rctx
->b
.streamout
.targets
[i
]->b
.buffer
== &rbuffer
->b
.b
) {
2341 if (rctx
->b
.streamout
.begin_emitted
) {
2342 r600_emit_streamout_end(&rctx
->b
);
2344 rctx
->b
.streamout
.append_bitmask
= rctx
->b
.streamout
.enabled_mask
;
2345 r600_streamout_buffers_dirty(&rctx
->b
);
2349 /* Constant buffers. */
2350 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
2351 struct r600_constbuf_state
*state
= &rctx
->constbuf_state
[shader
];
2353 uint32_t mask
= state
->enabled_mask
;
2356 unsigned i
= u_bit_scan(&mask
);
2357 if (state
->cb
[i
].buffer
== &rbuffer
->b
.b
) {
2359 state
->dirty_mask
|= 1 << i
;
2363 r600_constant_buffers_dirty(rctx
, state
);
2367 /* Texture buffer objects - update the virtual addresses in descriptors. */
2368 LIST_FOR_EACH_ENTRY(view
, &rctx
->b
.texture_buffers
, list
) {
2369 if (view
->base
.texture
== &rbuffer
->b
.b
) {
2370 unsigned stride
= util_format_get_blocksize(view
->base
.format
);
2371 uint64_t offset
= (uint64_t)view
->base
.u
.buf
.first_element
* stride
;
2372 uint64_t va
= rbuffer
->gpu_address
+ offset
;
2374 view
->tex_resource_words
[0] = va
;
2375 view
->tex_resource_words
[2] &= C_038008_BASE_ADDRESS_HI
;
2376 view
->tex_resource_words
[2] |= S_038008_BASE_ADDRESS_HI(va
>> 32);
2379 /* Texture buffer objects - make bindings dirty if needed. */
2380 for (shader
= 0; shader
< PIPE_SHADER_TYPES
; shader
++) {
2381 struct r600_samplerview_state
*state
= &rctx
->samplers
[shader
].views
;
2383 uint32_t mask
= state
->enabled_mask
;
2386 unsigned i
= u_bit_scan(&mask
);
2387 if (state
->views
[i
]->base
.texture
== &rbuffer
->b
.b
) {
2389 state
->dirty_mask
|= 1 << i
;
2393 r600_sampler_views_dirty(rctx
, state
);
2398 static void r600_set_occlusion_query_state(struct pipe_context
*ctx
, bool enable
)
2400 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
2402 if (rctx
->db_misc_state
.occlusion_query_enabled
!= enable
) {
2403 rctx
->db_misc_state
.occlusion_query_enabled
= enable
;
2404 rctx
->db_misc_state
.atom
.dirty
= true;
2408 static void r600_need_gfx_cs_space(struct pipe_context
*ctx
, unsigned num_dw
,
2409 bool include_draw_vbo
)
2411 r600_need_cs_space((struct r600_context
*)ctx
, num_dw
, include_draw_vbo
);
2414 /* keep this at the end of this file, please */
2415 void r600_init_common_state_functions(struct r600_context
*rctx
)
2417 rctx
->b
.b
.create_fs_state
= r600_create_ps_state
;
2418 rctx
->b
.b
.create_vs_state
= r600_create_vs_state
;
2419 rctx
->b
.b
.create_gs_state
= r600_create_gs_state
;
2420 rctx
->b
.b
.create_vertex_elements_state
= r600_create_vertex_fetch_shader
;
2421 rctx
->b
.b
.bind_blend_state
= r600_bind_blend_state
;
2422 rctx
->b
.b
.bind_depth_stencil_alpha_state
= r600_bind_dsa_state
;
2423 rctx
->b
.b
.bind_sampler_states
= r600_bind_sampler_states
;
2424 rctx
->b
.b
.bind_fs_state
= r600_bind_ps_state
;
2425 rctx
->b
.b
.bind_rasterizer_state
= r600_bind_rs_state
;
2426 rctx
->b
.b
.bind_vertex_elements_state
= r600_bind_vertex_elements
;
2427 rctx
->b
.b
.bind_vs_state
= r600_bind_vs_state
;
2428 rctx
->b
.b
.bind_gs_state
= r600_bind_gs_state
;
2429 rctx
->b
.b
.delete_blend_state
= r600_delete_blend_state
;
2430 rctx
->b
.b
.delete_depth_stencil_alpha_state
= r600_delete_dsa_state
;
2431 rctx
->b
.b
.delete_fs_state
= r600_delete_ps_state
;
2432 rctx
->b
.b
.delete_rasterizer_state
= r600_delete_rs_state
;
2433 rctx
->b
.b
.delete_sampler_state
= r600_delete_sampler_state
;
2434 rctx
->b
.b
.delete_vertex_elements_state
= r600_delete_vertex_elements
;
2435 rctx
->b
.b
.delete_vs_state
= r600_delete_vs_state
;
2436 rctx
->b
.b
.delete_gs_state
= r600_delete_gs_state
;
2437 rctx
->b
.b
.set_blend_color
= r600_set_blend_color
;
2438 rctx
->b
.b
.set_clip_state
= r600_set_clip_state
;
2439 rctx
->b
.b
.set_constant_buffer
= r600_set_constant_buffer
;
2440 rctx
->b
.b
.set_sample_mask
= r600_set_sample_mask
;
2441 rctx
->b
.b
.set_stencil_ref
= r600_set_pipe_stencil_ref
;
2442 rctx
->b
.b
.set_viewport_states
= r600_set_viewport_states
;
2443 rctx
->b
.b
.set_vertex_buffers
= r600_set_vertex_buffers
;
2444 rctx
->b
.b
.set_index_buffer
= r600_set_index_buffer
;
2445 rctx
->b
.b
.set_sampler_views
= r600_set_sampler_views
;
2446 rctx
->b
.b
.sampler_view_destroy
= r600_sampler_view_destroy
;
2447 rctx
->b
.b
.texture_barrier
= r600_texture_barrier
;
2448 rctx
->b
.b
.set_stream_output_targets
= r600_set_streamout_targets
;
2449 rctx
->b
.b
.draw_vbo
= r600_draw_vbo
;
2450 rctx
->b
.invalidate_buffer
= r600_invalidate_buffer
;
2451 rctx
->b
.set_occlusion_query_state
= r600_set_occlusion_query_state
;
2452 rctx
->b
.need_gfx_cs_space
= r600_need_gfx_cs_space
;
2455 void r600_trace_emit(struct r600_context
*rctx
)
2457 struct r600_screen
*rscreen
= rctx
->screen
;
2458 struct radeon_winsys_cs
*cs
= rctx
->b
.rings
.gfx
.cs
;
2462 va
= rscreen
->b
.trace_bo
->gpu_address
;
2463 reloc
= r600_context_bo_reloc(&rctx
->b
, &rctx
->b
.rings
.gfx
, rscreen
->b
.trace_bo
,
2464 RADEON_USAGE_READWRITE
, RADEON_PRIO_MIN
);
2465 radeon_emit(cs
, PKT3(PKT3_MEM_WRITE
, 3, 0));
2466 radeon_emit(cs
, va
& 0xFFFFFFFFUL
);
2467 radeon_emit(cs
, (va
>> 32UL) & 0xFFUL
);
2468 radeon_emit(cs
, cs
->cdw
);
2469 radeon_emit(cs
, rscreen
->b
.cs_count
);
2470 radeon_emit(cs
, PKT3(PKT3_NOP
, 0, 0));
2471 radeon_emit(cs
, reloc
);