2 * Copyright 2010 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
27 #include "r600_formats.h"
30 #include "util/u_blitter.h"
31 #include "util/u_upload_mgr.h"
32 #include "tgsi/tgsi_parse.h"
35 static void r600_emit_command_buffer(struct r600_context
*rctx
, struct r600_atom
*atom
)
37 struct radeon_winsys_cs
*cs
= rctx
->cs
;
38 struct r600_command_buffer
*cb
= (struct r600_command_buffer
*)atom
;
40 assert(cs
->cdw
+ cb
->atom
.num_dw
<= RADEON_MAX_CMDBUF_DWORDS
);
41 memcpy(cs
->buf
+ cs
->cdw
, cb
->buf
, 4 * cb
->atom
.num_dw
);
42 cs
->cdw
+= cb
->atom
.num_dw
;
45 void r600_init_command_buffer(struct r600_command_buffer
*cb
, unsigned num_dw
, enum r600_atom_flags flags
)
47 cb
->atom
.emit
= r600_emit_command_buffer
;
49 cb
->atom
.flags
= flags
;
50 cb
->buf
= CALLOC(1, 4 * num_dw
);
51 cb
->max_num_dw
= num_dw
;
54 void r600_release_command_buffer(struct r600_command_buffer
*cb
)
59 static void r600_emit_surface_sync(struct r600_context
*rctx
, struct r600_atom
*atom
)
61 struct radeon_winsys_cs
*cs
= rctx
->cs
;
62 struct r600_surface_sync_cmd
*a
= (struct r600_surface_sync_cmd
*)atom
;
64 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_SYNC
, 3, 0);
65 cs
->buf
[cs
->cdw
++] = a
->flush_flags
; /* CP_COHER_CNTL */
66 cs
->buf
[cs
->cdw
++] = 0xffffffff; /* CP_COHER_SIZE */
67 cs
->buf
[cs
->cdw
++] = 0; /* CP_COHER_BASE */
68 cs
->buf
[cs
->cdw
++] = 0x0000000A; /* POLL_INTERVAL */
73 static void r600_emit_r6xx_flush_and_inv(struct r600_context
*rctx
, struct r600_atom
*atom
)
75 struct radeon_winsys_cs
*cs
= rctx
->cs
;
76 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
77 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0);
80 void r600_init_atom(struct r600_atom
*atom
,
81 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
),
82 unsigned num_dw
, enum r600_atom_flags flags
)
85 atom
->num_dw
= num_dw
;
89 static void r600_emit_alphatest_state(struct r600_context
*rctx
, struct r600_atom
*atom
)
91 struct radeon_winsys_cs
*cs
= rctx
->cs
;
92 struct r600_alphatest_state
*a
= (struct r600_alphatest_state
*)atom
;
93 unsigned alpha_ref
= a
->sx_alpha_ref
;
95 if (rctx
->chip_class
>= EVERGREEN
&& a
->cb0_export_16bpc
) {
99 r600_write_context_reg(cs
, R_028410_SX_ALPHA_TEST_CONTROL
,
100 a
->sx_alpha_test_control
|
101 S_028410_ALPHA_TEST_BYPASS(a
->bypass
));
102 r600_write_context_reg(cs
, R_028438_SX_ALPHA_REF
, alpha_ref
);
105 void r600_init_common_atoms(struct r600_context
*rctx
)
107 r600_init_atom(&rctx
->surface_sync_cmd
.atom
, r600_emit_surface_sync
, 5, EMIT_EARLY
);
108 r600_init_atom(&rctx
->r6xx_flush_and_inv_cmd
, r600_emit_r6xx_flush_and_inv
, 2, EMIT_EARLY
);
109 r600_init_atom(&rctx
->alphatest_state
.atom
, r600_emit_alphatest_state
, 3, 0);
110 r600_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
113 unsigned r600_get_cb_flush_flags(struct r600_context
*rctx
)
117 if (rctx
->framebuffer
.nr_cbufs
) {
118 flags
|= S_0085F0_CB_ACTION_ENA(1) |
119 (((1 << rctx
->framebuffer
.nr_cbufs
) - 1) << S_0085F0_CB0_DEST_BASE_ENA_SHIFT
);
122 /* Workaround for broken flushing on some R6xx chipsets. */
123 if (rctx
->family
== CHIP_RV670
||
124 rctx
->family
== CHIP_RS780
||
125 rctx
->family
== CHIP_RS880
) {
126 flags
|= S_0085F0_CB1_DEST_BASE_ENA(1) |
127 S_0085F0_DEST_BASE_0_ENA(1);
132 void r600_texture_barrier(struct pipe_context
*ctx
)
134 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
136 rctx
->surface_sync_cmd
.flush_flags
|= S_0085F0_TC_ACTION_ENA(1) | r600_get_cb_flush_flags(rctx
);
137 r600_atom_dirty(rctx
, &rctx
->surface_sync_cmd
.atom
);
140 static bool r600_conv_pipe_prim(unsigned pprim
, unsigned *prim
)
142 static const int prim_conv
[] = {
143 V_008958_DI_PT_POINTLIST
,
144 V_008958_DI_PT_LINELIST
,
145 V_008958_DI_PT_LINELOOP
,
146 V_008958_DI_PT_LINESTRIP
,
147 V_008958_DI_PT_TRILIST
,
148 V_008958_DI_PT_TRISTRIP
,
149 V_008958_DI_PT_TRIFAN
,
150 V_008958_DI_PT_QUADLIST
,
151 V_008958_DI_PT_QUADSTRIP
,
152 V_008958_DI_PT_POLYGON
,
159 *prim
= prim_conv
[pprim
];
161 fprintf(stderr
, "%s:%d unsupported %d\n", __func__
, __LINE__
, pprim
);
167 /* common state between evergreen and r600 */
168 void r600_bind_blend_state(struct pipe_context
*ctx
, void *state
)
170 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
171 struct r600_pipe_blend
*blend
= (struct r600_pipe_blend
*)state
;
172 struct r600_pipe_state
*rstate
;
173 bool update_cb
= false;
177 rstate
= &blend
->rstate
;
178 rctx
->states
[rstate
->id
] = rstate
;
179 rctx
->dual_src_blend
= blend
->dual_src_blend
;
180 rctx
->alpha_to_one
= blend
->alpha_to_one
;
181 r600_context_pipe_state_set(rctx
, rstate
);
183 if (rctx
->cb_misc_state
.blend_colormask
!= blend
->cb_target_mask
) {
184 rctx
->cb_misc_state
.blend_colormask
= blend
->cb_target_mask
;
187 if (rctx
->chip_class
<= R700
&&
188 rctx
->cb_misc_state
.cb_color_control
!= blend
->cb_color_control
) {
189 rctx
->cb_misc_state
.cb_color_control
= blend
->cb_color_control
;
192 if (rctx
->cb_misc_state
.dual_src_blend
!= blend
->dual_src_blend
) {
193 rctx
->cb_misc_state
.dual_src_blend
= blend
->dual_src_blend
;
197 r600_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
201 void r600_set_blend_color(struct pipe_context
*ctx
,
202 const struct pipe_blend_color
*state
)
204 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
205 struct r600_pipe_state
*rstate
= CALLOC_STRUCT(r600_pipe_state
);
210 rstate
->id
= R600_PIPE_STATE_BLEND_COLOR
;
211 r600_pipe_state_add_reg(rstate
, R_028414_CB_BLEND_RED
, fui(state
->color
[0]));
212 r600_pipe_state_add_reg(rstate
, R_028418_CB_BLEND_GREEN
, fui(state
->color
[1]));
213 r600_pipe_state_add_reg(rstate
, R_02841C_CB_BLEND_BLUE
, fui(state
->color
[2]));
214 r600_pipe_state_add_reg(rstate
, R_028420_CB_BLEND_ALPHA
, fui(state
->color
[3]));
216 free(rctx
->states
[R600_PIPE_STATE_BLEND_COLOR
]);
217 rctx
->states
[R600_PIPE_STATE_BLEND_COLOR
] = rstate
;
218 r600_context_pipe_state_set(rctx
, rstate
);
221 static void r600_set_stencil_ref(struct pipe_context
*ctx
,
222 const struct r600_stencil_ref
*state
)
224 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
225 struct r600_pipe_state
*rstate
= CALLOC_STRUCT(r600_pipe_state
);
230 rstate
->id
= R600_PIPE_STATE_STENCIL_REF
;
231 r600_pipe_state_add_reg(rstate
,
232 R_028430_DB_STENCILREFMASK
,
233 S_028430_STENCILREF(state
->ref_value
[0]) |
234 S_028430_STENCILMASK(state
->valuemask
[0]) |
235 S_028430_STENCILWRITEMASK(state
->writemask
[0]));
236 r600_pipe_state_add_reg(rstate
,
237 R_028434_DB_STENCILREFMASK_BF
,
238 S_028434_STENCILREF_BF(state
->ref_value
[1]) |
239 S_028434_STENCILMASK_BF(state
->valuemask
[1]) |
240 S_028434_STENCILWRITEMASK_BF(state
->writemask
[1]));
242 free(rctx
->states
[R600_PIPE_STATE_STENCIL_REF
]);
243 rctx
->states
[R600_PIPE_STATE_STENCIL_REF
] = rstate
;
244 r600_context_pipe_state_set(rctx
, rstate
);
247 void r600_set_pipe_stencil_ref(struct pipe_context
*ctx
,
248 const struct pipe_stencil_ref
*state
)
250 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
251 struct r600_pipe_dsa
*dsa
= (struct r600_pipe_dsa
*)rctx
->states
[R600_PIPE_STATE_DSA
];
252 struct r600_stencil_ref ref
;
254 rctx
->stencil_ref
= *state
;
259 ref
.ref_value
[0] = state
->ref_value
[0];
260 ref
.ref_value
[1] = state
->ref_value
[1];
261 ref
.valuemask
[0] = dsa
->valuemask
[0];
262 ref
.valuemask
[1] = dsa
->valuemask
[1];
263 ref
.writemask
[0] = dsa
->writemask
[0];
264 ref
.writemask
[1] = dsa
->writemask
[1];
266 r600_set_stencil_ref(ctx
, &ref
);
269 void r600_bind_dsa_state(struct pipe_context
*ctx
, void *state
)
271 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
272 struct r600_pipe_dsa
*dsa
= state
;
273 struct r600_pipe_state
*rstate
;
274 struct r600_stencil_ref ref
;
278 rstate
= &dsa
->rstate
;
279 rctx
->states
[rstate
->id
] = rstate
;
280 r600_context_pipe_state_set(rctx
, rstate
);
282 ref
.ref_value
[0] = rctx
->stencil_ref
.ref_value
[0];
283 ref
.ref_value
[1] = rctx
->stencil_ref
.ref_value
[1];
284 ref
.valuemask
[0] = dsa
->valuemask
[0];
285 ref
.valuemask
[1] = dsa
->valuemask
[1];
286 ref
.writemask
[0] = dsa
->writemask
[0];
287 ref
.writemask
[1] = dsa
->writemask
[1];
289 r600_set_stencil_ref(ctx
, &ref
);
291 /* Update alphatest state. */
292 if (rctx
->alphatest_state
.sx_alpha_test_control
!= dsa
->sx_alpha_test_control
||
293 rctx
->alphatest_state
.sx_alpha_ref
!= dsa
->alpha_ref
) {
294 rctx
->alphatest_state
.sx_alpha_test_control
= dsa
->sx_alpha_test_control
;
295 rctx
->alphatest_state
.sx_alpha_ref
= dsa
->alpha_ref
;
296 r600_atom_dirty(rctx
, &rctx
->alphatest_state
.atom
);
300 void r600_set_max_scissor(struct r600_context
*rctx
)
302 /* Set a scissor state such that it doesn't do anything. */
303 struct pipe_scissor_state scissor
;
309 r600_set_scissor_state(rctx
, &scissor
);
312 void r600_bind_rs_state(struct pipe_context
*ctx
, void *state
)
314 struct r600_pipe_rasterizer
*rs
= (struct r600_pipe_rasterizer
*)state
;
315 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
320 rctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
321 rctx
->two_side
= rs
->two_side
;
322 rctx
->pa_sc_line_stipple
= rs
->pa_sc_line_stipple
;
323 rctx
->pa_cl_clip_cntl
= rs
->pa_cl_clip_cntl
;
324 rctx
->multisample_enable
= rs
->multisample_enable
;
326 rctx
->rasterizer
= rs
;
328 rctx
->states
[rs
->rstate
.id
] = &rs
->rstate
;
329 r600_context_pipe_state_set(rctx
, &rs
->rstate
);
331 if (rctx
->chip_class
>= EVERGREEN
) {
332 evergreen_polygon_offset_update(rctx
);
334 r600_polygon_offset_update(rctx
);
337 /* Workaround for a missing scissor enable on r600. */
338 if (rctx
->chip_class
== R600
) {
339 if (rs
->scissor_enable
!= rctx
->scissor_enable
) {
340 rctx
->scissor_enable
= rs
->scissor_enable
;
342 if (rs
->scissor_enable
) {
343 r600_set_scissor_state(rctx
, &rctx
->scissor_state
);
345 r600_set_max_scissor(rctx
);
351 void r600_delete_rs_state(struct pipe_context
*ctx
, void *state
)
353 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
354 struct r600_pipe_rasterizer
*rs
= (struct r600_pipe_rasterizer
*)state
;
356 if (rctx
->rasterizer
== rs
) {
357 rctx
->rasterizer
= NULL
;
359 if (rctx
->states
[rs
->rstate
.id
] == &rs
->rstate
) {
360 rctx
->states
[rs
->rstate
.id
] = NULL
;
365 void r600_sampler_view_destroy(struct pipe_context
*ctx
,
366 struct pipe_sampler_view
*state
)
368 struct r600_pipe_sampler_view
*resource
= (struct r600_pipe_sampler_view
*)state
;
370 pipe_resource_reference(&state
->texture
, NULL
);
374 static void r600_bind_samplers(struct pipe_context
*pipe
,
377 unsigned count
, void **states
)
379 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
380 struct r600_textures_info
*dst
;
381 int seamless_cube_map
= -1;
384 assert(start
== 0); /* XXX fix below */
387 case PIPE_SHADER_VERTEX
:
388 dst
= &rctx
->vs_samplers
;
390 case PIPE_SHADER_FRAGMENT
:
391 dst
= &rctx
->ps_samplers
;
394 debug_error("bad shader in r600_bind_samplers()");
398 memcpy(dst
->samplers
, states
, sizeof(void*) * count
);
399 dst
->n_samplers
= count
;
400 dst
->atom_sampler
.num_dw
= 0;
402 for (i
= 0; i
< count
; i
++) {
403 struct r600_pipe_sampler_state
*sampler
= states
[i
];
405 if (sampler
== NULL
) {
408 if (sampler
->border_color_use
) {
409 dst
->atom_sampler
.num_dw
+= 11;
410 rctx
->flags
|= R600_PARTIAL_FLUSH
;
412 dst
->atom_sampler
.num_dw
+= 5;
414 seamless_cube_map
= sampler
->seamless_cube_map
;
416 if (rctx
->chip_class
<= R700
&& seamless_cube_map
!= -1 && seamless_cube_map
!= rctx
->seamless_cube_map
.enabled
) {
417 /* change in TA_CNTL_AUX need a pipeline flush */
418 rctx
->flags
|= R600_PARTIAL_FLUSH
;
419 rctx
->seamless_cube_map
.enabled
= seamless_cube_map
;
420 r600_atom_dirty(rctx
, &rctx
->seamless_cube_map
.atom
);
422 if (dst
->atom_sampler
.num_dw
) {
423 r600_atom_dirty(rctx
, &dst
->atom_sampler
);
427 void r600_bind_vs_samplers(struct pipe_context
*ctx
, unsigned count
, void **states
)
429 r600_bind_samplers(ctx
, PIPE_SHADER_VERTEX
, 0, count
, states
);
432 void r600_bind_ps_samplers(struct pipe_context
*ctx
, unsigned count
, void **states
)
434 r600_bind_samplers(ctx
, PIPE_SHADER_FRAGMENT
, 0, count
, states
);
437 void r600_delete_sampler(struct pipe_context
*ctx
, void *state
)
442 void r600_delete_state(struct pipe_context
*ctx
, void *state
)
444 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
445 struct r600_pipe_state
*rstate
= (struct r600_pipe_state
*)state
;
447 if (rctx
->states
[rstate
->id
] == rstate
) {
448 rctx
->states
[rstate
->id
] = NULL
;
450 for (int i
= 0; i
< rstate
->nregs
; i
++) {
451 pipe_resource_reference((struct pipe_resource
**)&rstate
->regs
[i
].bo
, NULL
);
456 void r600_bind_vertex_elements(struct pipe_context
*ctx
, void *state
)
458 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
459 struct r600_vertex_element
*v
= (struct r600_vertex_element
*)state
;
461 rctx
->vertex_elements
= v
;
463 r600_inval_shader_cache(rctx
);
465 rctx
->states
[v
->rstate
.id
] = &v
->rstate
;
466 r600_context_pipe_state_set(rctx
, &v
->rstate
);
470 void r600_delete_vertex_element(struct pipe_context
*ctx
, void *state
)
472 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
473 struct r600_vertex_element
*v
= (struct r600_vertex_element
*)state
;
475 if (rctx
->states
[v
->rstate
.id
] == &v
->rstate
) {
476 rctx
->states
[v
->rstate
.id
] = NULL
;
478 if (rctx
->vertex_elements
== state
)
479 rctx
->vertex_elements
= NULL
;
481 pipe_resource_reference((struct pipe_resource
**)&v
->fetch_shader
, NULL
);
485 void r600_set_index_buffer(struct pipe_context
*ctx
,
486 const struct pipe_index_buffer
*ib
)
488 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
491 pipe_resource_reference(&rctx
->index_buffer
.buffer
, ib
->buffer
);
492 memcpy(&rctx
->index_buffer
, ib
, sizeof(*ib
));
494 pipe_resource_reference(&rctx
->index_buffer
.buffer
, NULL
);
498 void r600_vertex_buffers_dirty(struct r600_context
*rctx
)
500 if (rctx
->vertex_buffer_state
.dirty_mask
) {
501 r600_inval_vertex_cache(rctx
);
502 rctx
->vertex_buffer_state
.atom
.num_dw
= (rctx
->chip_class
>= EVERGREEN
? 12 : 11) *
503 util_bitcount(rctx
->vertex_buffer_state
.dirty_mask
);
504 r600_atom_dirty(rctx
, &rctx
->vertex_buffer_state
.atom
);
508 void r600_set_vertex_buffers(struct pipe_context
*ctx
, unsigned count
,
509 const struct pipe_vertex_buffer
*input
)
511 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
512 struct r600_vertexbuf_state
*state
= &rctx
->vertex_buffer_state
;
513 struct pipe_vertex_buffer
*vb
= state
->vb
;
515 /* This sets 1-bit for buffers with index >= count. */
516 uint32_t disable_mask
= ~((1ull << count
) - 1);
517 /* These are the new buffers set by this function. */
518 uint32_t new_buffer_mask
= 0;
520 /* Set buffers with index >= count to NULL. */
521 uint32_t remaining_buffers_mask
=
522 rctx
->vertex_buffer_state
.enabled_mask
& disable_mask
;
524 while (remaining_buffers_mask
) {
525 i
= u_bit_scan(&remaining_buffers_mask
);
526 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
529 /* Set vertex buffers. */
530 for (i
= 0; i
< count
; i
++) {
531 if (memcmp(&input
[i
], &vb
[i
], sizeof(struct pipe_vertex_buffer
))) {
532 if (input
[i
].buffer
) {
533 vb
[i
].stride
= input
[i
].stride
;
534 vb
[i
].buffer_offset
= input
[i
].buffer_offset
;
535 pipe_resource_reference(&vb
[i
].buffer
, input
[i
].buffer
);
536 new_buffer_mask
|= 1 << i
;
538 pipe_resource_reference(&vb
[i
].buffer
, NULL
);
539 disable_mask
|= 1 << i
;
544 rctx
->vertex_buffer_state
.enabled_mask
&= ~disable_mask
;
545 rctx
->vertex_buffer_state
.dirty_mask
&= rctx
->vertex_buffer_state
.enabled_mask
;
546 rctx
->vertex_buffer_state
.enabled_mask
|= new_buffer_mask
;
547 rctx
->vertex_buffer_state
.dirty_mask
|= new_buffer_mask
;
549 r600_vertex_buffers_dirty(rctx
);
552 void r600_sampler_views_dirty(struct r600_context
*rctx
,
553 struct r600_samplerview_state
*state
)
555 if (state
->dirty_mask
) {
556 r600_inval_texture_cache(rctx
);
557 state
->atom
.num_dw
= (rctx
->chip_class
>= EVERGREEN
? 14 : 13) *
558 util_bitcount(state
->dirty_mask
);
559 r600_atom_dirty(rctx
, &state
->atom
);
563 void r600_set_sampler_views(struct pipe_context
*pipe
,
567 struct pipe_sampler_view
**views
)
569 struct r600_context
*rctx
= (struct r600_context
*) pipe
;
570 struct r600_textures_info
*dst
;
571 struct r600_pipe_sampler_view
**rviews
= (struct r600_pipe_sampler_view
**)views
;
573 /* This sets 1-bit for textures with index >= count. */
574 uint32_t disable_mask
= ~((1ull << count
) - 1);
575 /* These are the new textures set by this function. */
576 uint32_t new_mask
= 0;
578 /* Set textures with index >= count to NULL. */
579 uint32_t remaining_mask
;
581 assert(start
== 0); /* XXX fix below */
584 case PIPE_SHADER_VERTEX
:
585 dst
= &rctx
->vs_samplers
;
587 case PIPE_SHADER_FRAGMENT
:
588 dst
= &rctx
->ps_samplers
;
591 debug_error("bad shader in r600_set_sampler_views()");
595 remaining_mask
= dst
->views
.enabled_mask
& disable_mask
;
597 while (remaining_mask
) {
598 i
= u_bit_scan(&remaining_mask
);
599 assert(dst
->views
.views
[i
]);
601 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
604 for (i
= 0; i
< count
; i
++) {
605 if (rviews
[i
] == dst
->views
.views
[i
]) {
610 struct r600_texture
*rtex
=
611 (struct r600_texture
*)rviews
[i
]->base
.texture
;
613 if (rtex
->is_depth
&& !rtex
->is_flushing_texture
) {
614 dst
->views
.depth_texture_mask
|= 1 << i
;
616 dst
->views
.depth_texture_mask
&= ~(1 << i
);
619 /* Changing from array to non-arrays textures and vice
620 * versa requires updating TEX_ARRAY_OVERRIDE on R6xx-R7xx. */
621 if (rctx
->chip_class
<= R700
&&
622 (rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_1D_ARRAY
||
623 rviews
[i
]->base
.texture
->target
== PIPE_TEXTURE_2D_ARRAY
) != dst
->is_array_sampler
[i
]) {
624 r600_atom_dirty(rctx
, &dst
->atom_sampler
);
627 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], views
[i
]);
630 pipe_sampler_view_reference((struct pipe_sampler_view
**)&dst
->views
.views
[i
], NULL
);
631 disable_mask
|= 1 << i
;
635 dst
->views
.enabled_mask
&= ~disable_mask
;
636 dst
->views
.dirty_mask
&= dst
->views
.enabled_mask
;
637 dst
->views
.enabled_mask
|= new_mask
;
638 dst
->views
.dirty_mask
|= new_mask
;
639 dst
->views
.depth_texture_mask
&= dst
->views
.enabled_mask
;
641 r600_sampler_views_dirty(rctx
, &dst
->views
);
644 void *r600_create_vertex_elements(struct pipe_context
*ctx
,
646 const struct pipe_vertex_element
*elements
)
648 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
649 struct r600_vertex_element
*v
= CALLOC_STRUCT(r600_vertex_element
);
656 memcpy(v
->elements
, elements
, sizeof(struct pipe_vertex_element
) * count
);
658 if (r600_vertex_elements_build_fetch_shader(rctx
, v
)) {
666 /* Compute the key for the hw shader variant */
667 static INLINE
unsigned r600_shader_selector_key(struct pipe_context
* ctx
,
668 struct r600_pipe_shader_selector
* sel
)
670 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
673 if (sel
->type
== PIPE_SHADER_FRAGMENT
) {
674 key
= rctx
->two_side
|
675 ((rctx
->alpha_to_one
&& rctx
->multisample_enable
&& !rctx
->cb0_is_integer
) << 1) |
676 (MIN2(sel
->nr_ps_max_color_exports
, rctx
->nr_cbufs
+ rctx
->dual_src_blend
) << 2);
683 /* Select the hw shader variant depending on the current state.
684 * (*dirty) is set to 1 if current variant was changed */
685 static int r600_shader_select(struct pipe_context
*ctx
,
686 struct r600_pipe_shader_selector
* sel
,
690 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
691 struct r600_pipe_shader
* shader
= NULL
;
694 key
= r600_shader_selector_key(ctx
, sel
);
696 /* Check if we don't need to change anything.
697 * This path is also used for most shaders that don't need multiple
698 * variants, it will cost just a computation of the key and this
700 if (likely(sel
->current
&& sel
->current
->key
== key
)) {
704 /* lookup if we have other variants in the list */
705 if (sel
->num_shaders
> 1) {
706 struct r600_pipe_shader
*p
= sel
->current
, *c
= p
->next_variant
;
708 while (c
&& c
->key
!= key
) {
714 p
->next_variant
= c
->next_variant
;
719 if (unlikely(!shader
)) {
720 shader
= CALLOC(1, sizeof(struct r600_pipe_shader
));
721 shader
->selector
= sel
;
723 r
= r600_pipe_shader_create(ctx
, shader
);
725 R600_ERR("Failed to build shader variant (type=%u, key=%u) %d\n",
731 /* We don't know the value of nr_ps_max_color_exports until we built
732 * at least one variant, so we may need to recompute the key after
733 * building first variant. */
734 if (sel
->type
== PIPE_SHADER_FRAGMENT
&&
735 sel
->num_shaders
== 0) {
736 sel
->nr_ps_max_color_exports
= shader
->shader
.nr_ps_max_color_exports
;
737 key
= r600_shader_selector_key(ctx
, sel
);
747 shader
->next_variant
= sel
->current
;
748 sel
->current
= shader
;
750 if (rctx
->chip_class
< EVERGREEN
&& rctx
->ps_shader
&& rctx
->vs_shader
) {
751 r600_adjust_gprs(rctx
);
754 if (rctx
->ps_shader
&&
755 rctx
->cb_misc_state
.nr_ps_color_outputs
!= rctx
->ps_shader
->current
->nr_ps_color_outputs
) {
756 rctx
->cb_misc_state
.nr_ps_color_outputs
= rctx
->ps_shader
->current
->nr_ps_color_outputs
;
757 r600_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
762 static void *r600_create_shader_state(struct pipe_context
*ctx
,
763 const struct pipe_shader_state
*state
,
764 unsigned pipe_shader_type
)
766 struct r600_pipe_shader_selector
*sel
= CALLOC_STRUCT(r600_pipe_shader_selector
);
769 sel
->type
= pipe_shader_type
;
770 sel
->tokens
= tgsi_dup_tokens(state
->tokens
);
771 sel
->so
= state
->stream_output
;
773 r
= r600_shader_select(ctx
, sel
, NULL
);
780 void *r600_create_shader_state_ps(struct pipe_context
*ctx
,
781 const struct pipe_shader_state
*state
)
783 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_FRAGMENT
);
786 void *r600_create_shader_state_vs(struct pipe_context
*ctx
,
787 const struct pipe_shader_state
*state
)
789 return r600_create_shader_state(ctx
, state
, PIPE_SHADER_VERTEX
);
792 void r600_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
794 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
797 state
= rctx
->dummy_pixel_shader
;
799 rctx
->ps_shader
= (struct r600_pipe_shader_selector
*)state
;
800 r600_context_pipe_state_set(rctx
, &rctx
->ps_shader
->current
->rstate
);
802 if (rctx
->chip_class
<= R700
) {
803 bool multiwrite
= rctx
->ps_shader
->current
->shader
.fs_write_all
;
805 if (rctx
->cb_misc_state
.multiwrite
!= multiwrite
) {
806 rctx
->cb_misc_state
.multiwrite
= multiwrite
;
807 r600_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
811 r600_adjust_gprs(rctx
);
814 if (rctx
->cb_misc_state
.nr_ps_color_outputs
!= rctx
->ps_shader
->current
->nr_ps_color_outputs
) {
815 rctx
->cb_misc_state
.nr_ps_color_outputs
= rctx
->ps_shader
->current
->nr_ps_color_outputs
;
816 r600_atom_dirty(rctx
, &rctx
->cb_misc_state
.atom
);
820 void r600_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
822 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
824 rctx
->vs_shader
= (struct r600_pipe_shader_selector
*)state
;
826 r600_context_pipe_state_set(rctx
, &rctx
->vs_shader
->current
->rstate
);
828 if (rctx
->chip_class
< EVERGREEN
&& rctx
->ps_shader
)
829 r600_adjust_gprs(rctx
);
833 static void r600_delete_shader_selector(struct pipe_context
*ctx
,
834 struct r600_pipe_shader_selector
*sel
)
836 struct r600_pipe_shader
*p
= sel
->current
, *c
;
839 r600_pipe_shader_destroy(ctx
, p
);
849 void r600_delete_ps_shader(struct pipe_context
*ctx
, void *state
)
851 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
852 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
854 if (rctx
->ps_shader
== sel
) {
855 rctx
->ps_shader
= NULL
;
858 r600_delete_shader_selector(ctx
, sel
);
861 void r600_delete_vs_shader(struct pipe_context
*ctx
, void *state
)
863 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
864 struct r600_pipe_shader_selector
*sel
= (struct r600_pipe_shader_selector
*)state
;
866 if (rctx
->vs_shader
== sel
) {
867 rctx
->vs_shader
= NULL
;
870 r600_delete_shader_selector(ctx
, sel
);
873 void r600_constant_buffers_dirty(struct r600_context
*rctx
, struct r600_constbuf_state
*state
)
875 if (state
->dirty_mask
) {
876 r600_inval_shader_cache(rctx
);
877 state
->atom
.num_dw
= rctx
->chip_class
>= EVERGREEN
? util_bitcount(state
->dirty_mask
)*20
878 : util_bitcount(state
->dirty_mask
)*19;
879 r600_atom_dirty(rctx
, &state
->atom
);
883 void r600_set_constant_buffer(struct pipe_context
*ctx
, uint shader
, uint index
,
884 struct pipe_constant_buffer
*input
)
886 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
887 struct r600_constbuf_state
*state
;
888 struct pipe_constant_buffer
*cb
;
892 case PIPE_SHADER_VERTEX
:
893 state
= &rctx
->vs_constbuf_state
;
895 case PIPE_SHADER_FRAGMENT
:
896 state
= &rctx
->ps_constbuf_state
;
902 /* Note that the state tracker can unbind constant buffers by
905 if (unlikely(!input
)) {
906 state
->enabled_mask
&= ~(1 << index
);
907 state
->dirty_mask
&= ~(1 << index
);
908 pipe_resource_reference(&state
->cb
[index
].buffer
, NULL
);
912 cb
= &state
->cb
[index
];
913 cb
->buffer_size
= input
->buffer_size
;
915 ptr
= input
->user_buffer
;
918 /* Upload the user buffer. */
919 if (R600_BIG_ENDIAN
) {
921 unsigned i
, size
= input
->buffer_size
;
923 if (!(tmpPtr
= malloc(size
))) {
924 R600_ERR("Failed to allocate BE swap buffer.\n");
928 for (i
= 0; i
< size
/ 4; ++i
) {
929 tmpPtr
[i
] = bswap_32(((uint32_t *)ptr
)[i
]);
932 u_upload_data(rctx
->uploader
, 0, size
, tmpPtr
, &cb
->buffer_offset
, &cb
->buffer
);
935 u_upload_data(rctx
->uploader
, 0, input
->buffer_size
, ptr
, &cb
->buffer_offset
, &cb
->buffer
);
938 /* Setup the hw buffer. */
939 cb
->buffer_offset
= input
->buffer_offset
;
940 pipe_resource_reference(&cb
->buffer
, input
->buffer
);
943 state
->enabled_mask
|= 1 << index
;
944 state
->dirty_mask
|= 1 << index
;
945 r600_constant_buffers_dirty(rctx
, state
);
948 struct pipe_stream_output_target
*
949 r600_create_so_target(struct pipe_context
*ctx
,
950 struct pipe_resource
*buffer
,
951 unsigned buffer_offset
,
952 unsigned buffer_size
)
954 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
955 struct r600_so_target
*t
;
958 t
= CALLOC_STRUCT(r600_so_target
);
963 t
->b
.reference
.count
= 1;
965 pipe_resource_reference(&t
->b
.buffer
, buffer
);
966 t
->b
.buffer_offset
= buffer_offset
;
967 t
->b
.buffer_size
= buffer_size
;
969 t
->filled_size
= (struct r600_resource
*)
970 pipe_buffer_create(ctx
->screen
, PIPE_BIND_CUSTOM
, PIPE_USAGE_STATIC
, 4);
971 ptr
= rctx
->ws
->buffer_map(t
->filled_size
->cs_buf
, rctx
->cs
, PIPE_TRANSFER_WRITE
);
972 memset(ptr
, 0, t
->filled_size
->buf
->size
);
973 rctx
->ws
->buffer_unmap(t
->filled_size
->cs_buf
);
978 void r600_so_target_destroy(struct pipe_context
*ctx
,
979 struct pipe_stream_output_target
*target
)
981 struct r600_so_target
*t
= (struct r600_so_target
*)target
;
982 pipe_resource_reference(&t
->b
.buffer
, NULL
);
983 pipe_resource_reference((struct pipe_resource
**)&t
->filled_size
, NULL
);
987 void r600_set_so_targets(struct pipe_context
*ctx
,
988 unsigned num_targets
,
989 struct pipe_stream_output_target
**targets
,
990 unsigned append_bitmask
)
992 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
995 /* Stop streamout. */
996 if (rctx
->num_so_targets
&& !rctx
->streamout_start
) {
997 r600_context_streamout_end(rctx
);
1000 /* Set the new targets. */
1001 for (i
= 0; i
< num_targets
; i
++) {
1002 pipe_so_target_reference((struct pipe_stream_output_target
**)&rctx
->so_targets
[i
], targets
[i
]);
1004 for (; i
< rctx
->num_so_targets
; i
++) {
1005 pipe_so_target_reference((struct pipe_stream_output_target
**)&rctx
->so_targets
[i
], NULL
);
1008 rctx
->num_so_targets
= num_targets
;
1009 rctx
->streamout_start
= num_targets
!= 0;
1010 rctx
->streamout_append_bitmask
= append_bitmask
;
1013 void r600_set_sample_mask(struct pipe_context
*pipe
, unsigned sample_mask
)
1015 struct r600_context
*rctx
= (struct r600_context
*)pipe
;
1017 if (rctx
->sample_mask
.sample_mask
== (uint16_t)sample_mask
)
1020 rctx
->sample_mask
.sample_mask
= sample_mask
;
1021 r600_atom_dirty(rctx
, &rctx
->sample_mask
.atom
);
1024 static void r600_update_derived_state(struct r600_context
*rctx
)
1026 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
1027 unsigned ps_dirty
= 0;
1029 if (!rctx
->blitter
->running
) {
1030 /* Flush depth textures which need to be flushed. */
1031 if (rctx
->vs_samplers
.views
.depth_texture_mask
) {
1032 r600_flush_depth_textures(rctx
, &rctx
->vs_samplers
.views
);
1034 if (rctx
->ps_samplers
.views
.depth_texture_mask
) {
1035 r600_flush_depth_textures(rctx
, &rctx
->ps_samplers
.views
);
1039 r600_shader_select(ctx
, rctx
->ps_shader
, &ps_dirty
);
1041 if (rctx
->ps_shader
&& ((rctx
->sprite_coord_enable
&&
1042 (rctx
->ps_shader
->current
->sprite_coord_enable
!= rctx
->sprite_coord_enable
)) ||
1043 (rctx
->rasterizer
&& rctx
->rasterizer
->flatshade
!= rctx
->ps_shader
->current
->flatshade
))) {
1045 if (rctx
->chip_class
>= EVERGREEN
)
1046 evergreen_pipe_shader_ps(ctx
, rctx
->ps_shader
->current
);
1048 r600_pipe_shader_ps(ctx
, rctx
->ps_shader
->current
);
1054 r600_context_pipe_state_set(rctx
, &rctx
->ps_shader
->current
->rstate
);
1056 if (rctx
->chip_class
>= EVERGREEN
) {
1057 evergreen_update_dual_export_state(rctx
);
1059 r600_update_dual_export_state(rctx
);
1063 static unsigned r600_conv_prim_to_gs_out(unsigned mode
)
1065 static const int prim_conv
[] = {
1066 V_028A6C_OUTPRIM_TYPE_POINTLIST
,
1067 V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
1068 V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
1069 V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
1070 V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
1071 V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
1072 V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
1073 V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
1074 V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
1075 V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
1076 V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
1077 V_028A6C_OUTPRIM_TYPE_LINESTRIP
,
1078 V_028A6C_OUTPRIM_TYPE_TRISTRIP
,
1079 V_028A6C_OUTPRIM_TYPE_TRISTRIP
1081 assert(mode
< Elements(prim_conv
));
1083 return prim_conv
[mode
];
1086 void r600_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*dinfo
)
1088 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
1089 struct pipe_draw_info info
= *dinfo
;
1090 struct pipe_index_buffer ib
= {};
1091 unsigned prim
, ls_mask
= 0;
1092 struct r600_block
*dirty_block
= NULL
, *next_block
= NULL
;
1093 struct r600_atom
*state
= NULL
, *next_state
= NULL
;
1094 struct radeon_winsys_cs
*cs
= rctx
->cs
;
1098 if ((!info
.count
&& (info
.indexed
|| !info
.count_from_stream_output
)) ||
1099 !r600_conv_pipe_prim(info
.mode
, &prim
)) {
1104 if (!rctx
->vs_shader
) {
1109 r600_update_derived_state(rctx
);
1111 /* partial flush triggered by border color change */
1112 if (rctx
->flags
& R600_PARTIAL_FLUSH
) {
1113 rctx
->flags
&= ~R600_PARTIAL_FLUSH
;
1114 r600_write_value(cs
, PKT3(PKT3_EVENT_WRITE
, 0, 0));
1115 r600_write_value(cs
, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH
) | EVENT_INDEX(4));
1119 /* Initialize the index buffer struct. */
1120 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
1121 ib
.user_buffer
= rctx
->index_buffer
.user_buffer
;
1122 ib
.index_size
= rctx
->index_buffer
.index_size
;
1123 ib
.offset
= rctx
->index_buffer
.offset
+ info
.start
* ib
.index_size
;
1125 /* Translate or upload, if needed. */
1126 r600_translate_index_buffer(rctx
, &ib
, info
.count
);
1128 ptr
= (uint8_t*)ib
.user_buffer
;
1129 if (!ib
.buffer
&& ptr
) {
1130 u_upload_data(rctx
->uploader
, 0, info
.count
* ib
.index_size
,
1131 ptr
, &ib
.offset
, &ib
.buffer
);
1134 info
.index_bias
= info
.start
;
1137 if (rctx
->vgt
.id
!= R600_PIPE_STATE_VGT
) {
1138 rctx
->vgt
.id
= R600_PIPE_STATE_VGT
;
1139 rctx
->vgt
.nregs
= 0;
1140 r600_pipe_state_add_reg(&rctx
->vgt
, R_008958_VGT_PRIMITIVE_TYPE
, prim
);
1141 r600_pipe_state_add_reg(&rctx
->vgt
, R_028A6C_VGT_GS_OUT_PRIM_TYPE
, 0);
1142 r600_pipe_state_add_reg(&rctx
->vgt
, R_028408_VGT_INDX_OFFSET
, info
.index_bias
);
1143 r600_pipe_state_add_reg(&rctx
->vgt
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, info
.restart_index
);
1144 r600_pipe_state_add_reg(&rctx
->vgt
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
.primitive_restart
);
1145 r600_pipe_state_add_reg(&rctx
->vgt
, R_03CFF4_SQ_VTX_START_INST_LOC
, info
.start_instance
);
1146 r600_pipe_state_add_reg(&rctx
->vgt
, R_028A0C_PA_SC_LINE_STIPPLE
, 0);
1147 r600_pipe_state_add_reg(&rctx
->vgt
, R_02881C_PA_CL_VS_OUT_CNTL
, 0);
1148 r600_pipe_state_add_reg(&rctx
->vgt
, R_028810_PA_CL_CLIP_CNTL
, 0);
1151 rctx
->vgt
.nregs
= 0;
1152 r600_pipe_state_mod_reg(&rctx
->vgt
, prim
);
1153 r600_pipe_state_mod_reg(&rctx
->vgt
, r600_conv_prim_to_gs_out(info
.mode
));
1154 r600_pipe_state_mod_reg(&rctx
->vgt
, info
.index_bias
);
1155 r600_pipe_state_mod_reg(&rctx
->vgt
, info
.restart_index
);
1156 r600_pipe_state_mod_reg(&rctx
->vgt
, info
.primitive_restart
);
1157 r600_pipe_state_mod_reg(&rctx
->vgt
, info
.start_instance
);
1159 if (prim
== V_008958_DI_PT_LINELIST
)
1161 else if (prim
== V_008958_DI_PT_LINESTRIP
||
1162 prim
== V_008958_DI_PT_LINELOOP
)
1164 r600_pipe_state_mod_reg(&rctx
->vgt
, S_028A0C_AUTO_RESET_CNTL(ls_mask
) | rctx
->pa_sc_line_stipple
);
1165 r600_pipe_state_mod_reg(&rctx
->vgt
,
1166 rctx
->vs_shader
->current
->pa_cl_vs_out_cntl
|
1167 (rctx
->rasterizer
->clip_plane_enable
& rctx
->vs_shader
->current
->shader
.clip_dist_write
));
1168 r600_pipe_state_mod_reg(&rctx
->vgt
,
1169 rctx
->pa_cl_clip_cntl
|
1170 (rctx
->vs_shader
->current
->shader
.clip_dist_write
||
1171 rctx
->vs_shader
->current
->shader
.vs_prohibit_ucps
?
1172 0 : rctx
->rasterizer
->clip_plane_enable
& 0x3F));
1174 r600_context_pipe_state_set(rctx
, &rctx
->vgt
);
1176 /* Enable stream out if needed. */
1177 if (rctx
->streamout_start
) {
1178 r600_context_streamout_begin(rctx
);
1179 rctx
->streamout_start
= FALSE
;
1182 /* Emit states (the function expects that we emit at most 17 dwords here). */
1183 r600_need_cs_space(rctx
, 0, TRUE
);
1185 LIST_FOR_EACH_ENTRY_SAFE(state
, next_state
, &rctx
->dirty_states
, head
) {
1186 r600_emit_atom(rctx
, state
);
1188 LIST_FOR_EACH_ENTRY_SAFE(dirty_block
, next_block
, &rctx
->dirty
,list
) {
1189 r600_context_block_emit_dirty(rctx
, dirty_block
, 0 /* pkt_flags */);
1191 rctx
->pm4_dirty_cdwords
= 0;
1194 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NUM_INSTANCES
, 0, rctx
->predicate_drawing
);
1195 cs
->buf
[cs
->cdw
++] = info
.instance_count
;
1197 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_INDEX_TYPE
, 0, rctx
->predicate_drawing
);
1198 cs
->buf
[cs
->cdw
++] = ib
.index_size
== 4 ?
1199 (VGT_INDEX_32
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_32_BIT
: 0)) :
1200 (VGT_INDEX_16
| (R600_BIG_ENDIAN
? VGT_DMA_SWAP_16_BIT
: 0));
1202 va
= r600_resource_va(ctx
->screen
, ib
.buffer
);
1204 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_DRAW_INDEX
, 3, rctx
->predicate_drawing
);
1205 cs
->buf
[cs
->cdw
++] = va
;
1206 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFF;
1207 cs
->buf
[cs
->cdw
++] = info
.count
;
1208 cs
->buf
[cs
->cdw
++] = V_0287F0_DI_SRC_SEL_DMA
;
1209 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, rctx
->predicate_drawing
);
1210 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(rctx
, (struct r600_resource
*)ib
.buffer
, RADEON_USAGE_READ
);
1212 if (info
.count_from_stream_output
) {
1213 struct r600_so_target
*t
= (struct r600_so_target
*)info
.count_from_stream_output
;
1214 uint64_t va
= r600_resource_va(&rctx
->screen
->screen
, (void*)t
->filled_size
);
1216 r600_write_context_reg(cs
, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
, t
->stride_in_dw
);
1218 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_COPY_DW
, 4, 0);
1219 cs
->buf
[cs
->cdw
++] = COPY_DW_SRC_IS_MEM
| COPY_DW_DST_IS_REG
;
1220 cs
->buf
[cs
->cdw
++] = va
& 0xFFFFFFFFUL
; /* src address lo */
1221 cs
->buf
[cs
->cdw
++] = (va
>> 32UL) & 0xFFUL
; /* src address hi */
1222 cs
->buf
[cs
->cdw
++] = R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
>> 2; /* dst register */
1223 cs
->buf
[cs
->cdw
++] = 0; /* unused */
1225 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_NOP
, 0, 0);
1226 cs
->buf
[cs
->cdw
++] = r600_context_bo_reloc(rctx
, t
->filled_size
, RADEON_USAGE_READ
);
1229 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_DRAW_INDEX_AUTO
, 1, rctx
->predicate_drawing
);
1230 cs
->buf
[cs
->cdw
++] = info
.count
;
1231 cs
->buf
[cs
->cdw
++] = V_0287F0_DI_SRC_SEL_AUTO_INDEX
|
1232 (info
.count_from_stream_output
? S_0287F0_USE_OPAQUE(1) : 0);
1235 rctx
->flags
|= R600_CONTEXT_DST_CACHES_DIRTY
| R600_CONTEXT_DRAW_PENDING
;
1237 /* Set the depth buffer as dirty. */
1238 if (rctx
->framebuffer
.zsbuf
) {
1239 struct pipe_surface
*surf
= rctx
->framebuffer
.zsbuf
;
1240 struct r600_texture
*rtex
= (struct r600_texture
*)surf
->texture
;
1242 rtex
->dirty_db_mask
|= 1 << surf
->u
.tex
.level
;
1245 pipe_resource_reference(&ib
.buffer
, NULL
);
1248 void _r600_pipe_state_add_reg_bo(struct r600_context
*ctx
,
1249 struct r600_pipe_state
*state
,
1250 uint32_t offset
, uint32_t value
,
1251 uint32_t range_id
, uint32_t block_id
,
1252 struct r600_resource
*bo
,
1253 enum radeon_bo_usage usage
)
1256 struct r600_range
*range
;
1257 struct r600_block
*block
;
1259 if (bo
) assert(usage
);
1261 range
= &ctx
->range
[range_id
];
1262 block
= range
->blocks
[block_id
];
1263 state
->regs
[state
->nregs
].block
= block
;
1264 state
->regs
[state
->nregs
].id
= (offset
- block
->start_offset
) >> 2;
1266 state
->regs
[state
->nregs
].value
= value
;
1267 state
->regs
[state
->nregs
].bo
= bo
;
1268 state
->regs
[state
->nregs
].bo_usage
= usage
;
1271 assert(state
->nregs
< R600_BLOCK_MAX_REG
);
1274 void _r600_pipe_state_add_reg(struct r600_context
*ctx
,
1275 struct r600_pipe_state
*state
,
1276 uint32_t offset
, uint32_t value
,
1277 uint32_t range_id
, uint32_t block_id
)
1279 _r600_pipe_state_add_reg_bo(ctx
, state
, offset
, value
,
1280 range_id
, block_id
, NULL
, 0);
1283 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state
*state
,
1284 uint32_t offset
, uint32_t value
,
1285 struct r600_resource
*bo
,
1286 enum radeon_bo_usage usage
)
1288 if (bo
) assert(usage
);
1290 state
->regs
[state
->nregs
].id
= offset
;
1291 state
->regs
[state
->nregs
].block
= NULL
;
1292 state
->regs
[state
->nregs
].value
= value
;
1293 state
->regs
[state
->nregs
].bo
= bo
;
1294 state
->regs
[state
->nregs
].bo_usage
= usage
;
1297 assert(state
->nregs
< R600_BLOCK_MAX_REG
);
1300 uint32_t r600_translate_stencil_op(int s_op
)
1303 case PIPE_STENCIL_OP_KEEP
:
1304 return V_028800_STENCIL_KEEP
;
1305 case PIPE_STENCIL_OP_ZERO
:
1306 return V_028800_STENCIL_ZERO
;
1307 case PIPE_STENCIL_OP_REPLACE
:
1308 return V_028800_STENCIL_REPLACE
;
1309 case PIPE_STENCIL_OP_INCR
:
1310 return V_028800_STENCIL_INCR
;
1311 case PIPE_STENCIL_OP_DECR
:
1312 return V_028800_STENCIL_DECR
;
1313 case PIPE_STENCIL_OP_INCR_WRAP
:
1314 return V_028800_STENCIL_INCR_WRAP
;
1315 case PIPE_STENCIL_OP_DECR_WRAP
:
1316 return V_028800_STENCIL_DECR_WRAP
;
1317 case PIPE_STENCIL_OP_INVERT
:
1318 return V_028800_STENCIL_INVERT
;
1320 R600_ERR("Unknown stencil op %d", s_op
);
1327 uint32_t r600_translate_fill(uint32_t func
)
1330 case PIPE_POLYGON_MODE_FILL
:
1332 case PIPE_POLYGON_MODE_LINE
:
1334 case PIPE_POLYGON_MODE_POINT
:
1342 unsigned r600_tex_wrap(unsigned wrap
)
1346 case PIPE_TEX_WRAP_REPEAT
:
1347 return V_03C000_SQ_TEX_WRAP
;
1348 case PIPE_TEX_WRAP_CLAMP
:
1349 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER
;
1350 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
1351 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL
;
1352 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
1353 return V_03C000_SQ_TEX_CLAMP_BORDER
;
1354 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
1355 return V_03C000_SQ_TEX_MIRROR
;
1356 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
1357 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER
;
1358 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
1359 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL
;
1360 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
1361 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER
;
1365 unsigned r600_tex_filter(unsigned filter
)
1369 case PIPE_TEX_FILTER_NEAREST
:
1370 return V_03C000_SQ_TEX_XY_FILTER_POINT
;
1371 case PIPE_TEX_FILTER_LINEAR
:
1372 return V_03C000_SQ_TEX_XY_FILTER_BILINEAR
;
1376 unsigned r600_tex_mipfilter(unsigned filter
)
1379 case PIPE_TEX_MIPFILTER_NEAREST
:
1380 return V_03C000_SQ_TEX_Z_FILTER_POINT
;
1381 case PIPE_TEX_MIPFILTER_LINEAR
:
1382 return V_03C000_SQ_TEX_Z_FILTER_LINEAR
;
1384 case PIPE_TEX_MIPFILTER_NONE
:
1385 return V_03C000_SQ_TEX_Z_FILTER_NONE
;
1389 unsigned r600_tex_compare(unsigned compare
)
1393 case PIPE_FUNC_NEVER
:
1394 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER
;
1395 case PIPE_FUNC_LESS
:
1396 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS
;
1397 case PIPE_FUNC_EQUAL
:
1398 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL
;
1399 case PIPE_FUNC_LEQUAL
:
1400 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL
;
1401 case PIPE_FUNC_GREATER
:
1402 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER
;
1403 case PIPE_FUNC_NOTEQUAL
:
1404 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL
;
1405 case PIPE_FUNC_GEQUAL
:
1406 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL
;
1407 case PIPE_FUNC_ALWAYS
:
1408 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS
;