2 * Copyright 2010 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
27 #include "util/u_blitter.h"
28 #include "util/u_memory.h"
29 #include "util/u_format.h"
30 #include "pipebuffer/pb_buffer.h"
31 #include "pipe/p_shader_tokens.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "r600_hw_context_priv.h"
34 #include "radeonsi_pipe.h"
37 static void r600_emit_surface_sync(struct r600_context
*rctx
, struct r600_atom
*atom
)
39 struct radeon_winsys_cs
*cs
= rctx
->cs
;
40 struct r600_atom_surface_sync
*a
= (struct r600_atom_surface_sync
*)atom
;
42 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_SURFACE_SYNC
, 3, 0);
43 cs
->buf
[cs
->cdw
++] = a
->flush_flags
; /* CP_COHER_CNTL */
44 cs
->buf
[cs
->cdw
++] = 0xffffffff; /* CP_COHER_SIZE */
45 cs
->buf
[cs
->cdw
++] = 0; /* CP_COHER_BASE */
46 cs
->buf
[cs
->cdw
++] = 0x0000000A; /* POLL_INTERVAL */
51 static void r600_emit_r6xx_flush_and_inv(struct r600_context
*rctx
, struct r600_atom
*atom
)
53 struct radeon_winsys_cs
*cs
= rctx
->cs
;
54 cs
->buf
[cs
->cdw
++] = PKT3(PKT3_EVENT_WRITE
, 0, 0);
55 cs
->buf
[cs
->cdw
++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0);
58 static void r600_init_atom(struct r600_atom
*atom
,
59 void (*emit
)(struct r600_context
*ctx
, struct r600_atom
*state
),
61 enum r600_atom_flags flags
)
64 atom
->num_dw
= num_dw
;
68 void r600_init_common_atoms(struct r600_context
*rctx
)
70 r600_init_atom(&rctx
->atom_surface_sync
.atom
, r600_emit_surface_sync
, 5, EMIT_EARLY
);
71 r600_init_atom(&rctx
->atom_r6xx_flush_and_inv
, r600_emit_r6xx_flush_and_inv
, 2, EMIT_EARLY
);
74 unsigned r600_get_cb_flush_flags(struct r600_context
*rctx
)
78 if (rctx
->framebuffer
.nr_cbufs
) {
79 flags
|= S_0085F0_CB_ACTION_ENA(1) |
80 (((1 << rctx
->framebuffer
.nr_cbufs
) - 1) << S_0085F0_CB0_DEST_BASE_ENA_SHIFT
);
86 void r600_texture_barrier(struct pipe_context
*ctx
)
88 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
90 rctx
->atom_surface_sync
.flush_flags
|= S_0085F0_TC_ACTION_ENA(1) | r600_get_cb_flush_flags(rctx
);
91 r600_atom_dirty(rctx
, &rctx
->atom_surface_sync
.atom
);
94 static bool r600_conv_pipe_prim(unsigned pprim
, unsigned *prim
)
96 static const int prim_conv
[] = {
97 V_008958_DI_PT_POINTLIST
,
98 V_008958_DI_PT_LINELIST
,
99 V_008958_DI_PT_LINELOOP
,
100 V_008958_DI_PT_LINESTRIP
,
101 V_008958_DI_PT_TRILIST
,
102 V_008958_DI_PT_TRISTRIP
,
103 V_008958_DI_PT_TRIFAN
,
104 V_008958_DI_PT_QUADLIST
,
105 V_008958_DI_PT_QUADSTRIP
,
106 V_008958_DI_PT_POLYGON
,
113 *prim
= prim_conv
[pprim
];
115 fprintf(stderr
, "%s:%d unsupported %d\n", __func__
, __LINE__
, pprim
);
121 /* common state between evergreen and r600 */
122 void r600_bind_blend_state(struct pipe_context
*ctx
, void *state
)
124 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
125 struct r600_pipe_blend
*blend
= (struct r600_pipe_blend
*)state
;
126 struct r600_pipe_state
*rstate
;
130 rstate
= &blend
->rstate
;
131 rctx
->states
[rstate
->id
] = rstate
;
132 rctx
->cb_target_mask
= blend
->cb_target_mask
;
133 rctx
->cb_color_control
= blend
->cb_color_control
;
135 r600_context_pipe_state_set(rctx
, rstate
);
138 static void r600_set_stencil_ref(struct pipe_context
*ctx
,
139 const struct r600_stencil_ref
*state
)
141 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
142 struct r600_pipe_state
*rstate
= CALLOC_STRUCT(r600_pipe_state
);
147 rstate
->id
= R600_PIPE_STATE_STENCIL_REF
;
148 r600_pipe_state_add_reg(rstate
,
149 R_028430_DB_STENCILREFMASK
,
150 S_028430_STENCILTESTVAL(state
->ref_value
[0]) |
151 S_028430_STENCILMASK(state
->valuemask
[0]) |
152 S_028430_STENCILWRITEMASK(state
->writemask
[0]),
154 r600_pipe_state_add_reg(rstate
,
155 R_028434_DB_STENCILREFMASK_BF
,
156 S_028434_STENCILTESTVAL_BF(state
->ref_value
[1]) |
157 S_028434_STENCILMASK_BF(state
->valuemask
[1]) |
158 S_028434_STENCILWRITEMASK_BF(state
->writemask
[1]),
161 free(rctx
->states
[R600_PIPE_STATE_STENCIL_REF
]);
162 rctx
->states
[R600_PIPE_STATE_STENCIL_REF
] = rstate
;
163 r600_context_pipe_state_set(rctx
, rstate
);
166 void r600_set_pipe_stencil_ref(struct pipe_context
*ctx
,
167 const struct pipe_stencil_ref
*state
)
169 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
170 struct r600_pipe_dsa
*dsa
= (struct r600_pipe_dsa
*)rctx
->states
[R600_PIPE_STATE_DSA
];
171 struct r600_stencil_ref ref
;
173 rctx
->stencil_ref
= *state
;
178 ref
.ref_value
[0] = state
->ref_value
[0];
179 ref
.ref_value
[1] = state
->ref_value
[1];
180 ref
.valuemask
[0] = dsa
->valuemask
[0];
181 ref
.valuemask
[1] = dsa
->valuemask
[1];
182 ref
.writemask
[0] = dsa
->writemask
[0];
183 ref
.writemask
[1] = dsa
->writemask
[1];
185 r600_set_stencil_ref(ctx
, &ref
);
188 void r600_bind_dsa_state(struct pipe_context
*ctx
, void *state
)
190 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
191 struct r600_pipe_dsa
*dsa
= state
;
192 struct r600_pipe_state
*rstate
;
193 struct r600_stencil_ref ref
;
197 rstate
= &dsa
->rstate
;
198 rctx
->states
[rstate
->id
] = rstate
;
199 rctx
->alpha_ref
= dsa
->alpha_ref
;
200 rctx
->alpha_ref_dirty
= true;
201 r600_context_pipe_state_set(rctx
, rstate
);
203 ref
.ref_value
[0] = rctx
->stencil_ref
.ref_value
[0];
204 ref
.ref_value
[1] = rctx
->stencil_ref
.ref_value
[1];
205 ref
.valuemask
[0] = dsa
->valuemask
[0];
206 ref
.valuemask
[1] = dsa
->valuemask
[1];
207 ref
.writemask
[0] = dsa
->writemask
[0];
208 ref
.writemask
[1] = dsa
->writemask
[1];
210 r600_set_stencil_ref(ctx
, &ref
);
213 void r600_bind_rs_state(struct pipe_context
*ctx
, void *state
)
215 struct r600_pipe_rasterizer
*rs
= (struct r600_pipe_rasterizer
*)state
;
216 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
221 rctx
->sprite_coord_enable
= rs
->sprite_coord_enable
;
222 rctx
->pa_sc_line_stipple
= rs
->pa_sc_line_stipple
;
223 rctx
->pa_su_sc_mode_cntl
= rs
->pa_su_sc_mode_cntl
;
224 rctx
->pa_cl_clip_cntl
= rs
->pa_cl_clip_cntl
;
225 rctx
->pa_cl_vs_out_cntl
= rs
->pa_cl_vs_out_cntl
;
227 rctx
->rasterizer
= rs
;
229 rctx
->states
[rs
->rstate
.id
] = &rs
->rstate
;
230 r600_context_pipe_state_set(rctx
, &rs
->rstate
);
232 if (rctx
->chip_class
>= CAYMAN
) {
233 cayman_polygon_offset_update(rctx
);
237 void r600_delete_rs_state(struct pipe_context
*ctx
, void *state
)
239 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
240 struct r600_pipe_rasterizer
*rs
= (struct r600_pipe_rasterizer
*)state
;
242 if (rctx
->rasterizer
== rs
) {
243 rctx
->rasterizer
= NULL
;
245 if (rctx
->states
[rs
->rstate
.id
] == &rs
->rstate
) {
246 rctx
->states
[rs
->rstate
.id
] = NULL
;
251 void r600_sampler_view_destroy(struct pipe_context
*ctx
,
252 struct pipe_sampler_view
*state
)
254 struct r600_pipe_sampler_view
*resource
= (struct r600_pipe_sampler_view
*)state
;
256 pipe_resource_reference(&state
->texture
, NULL
);
260 void r600_delete_state(struct pipe_context
*ctx
, void *state
)
262 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
263 struct r600_pipe_state
*rstate
= (struct r600_pipe_state
*)state
;
265 if (rctx
->states
[rstate
->id
] == rstate
) {
266 rctx
->states
[rstate
->id
] = NULL
;
268 for (int i
= 0; i
< rstate
->nregs
; i
++) {
269 pipe_resource_reference((struct pipe_resource
**)&rstate
->regs
[i
].bo
, NULL
);
274 void r600_bind_vertex_elements(struct pipe_context
*ctx
, void *state
)
276 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
277 struct r600_vertex_element
*v
= (struct r600_vertex_element
*)state
;
279 rctx
->vertex_elements
= v
;
281 r600_inval_shader_cache(rctx
);
283 rctx
->states
[v
->rstate
.id
] = &v
->rstate
;
284 r600_context_pipe_state_set(rctx
, &v
->rstate
);
288 void r600_delete_vertex_element(struct pipe_context
*ctx
, void *state
)
290 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
291 struct r600_vertex_element
*v
= (struct r600_vertex_element
*)state
;
293 if (rctx
->states
[v
->rstate
.id
] == &v
->rstate
) {
294 rctx
->states
[v
->rstate
.id
] = NULL
;
296 if (rctx
->vertex_elements
== state
)
297 rctx
->vertex_elements
= NULL
;
302 void r600_set_index_buffer(struct pipe_context
*ctx
,
303 const struct pipe_index_buffer
*ib
)
305 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
308 pipe_resource_reference(&rctx
->index_buffer
.buffer
, ib
->buffer
);
309 memcpy(&rctx
->index_buffer
, ib
, sizeof(*ib
));
311 pipe_resource_reference(&rctx
->index_buffer
.buffer
, NULL
);
315 void r600_set_vertex_buffers(struct pipe_context
*ctx
, unsigned count
,
316 const struct pipe_vertex_buffer
*buffers
)
318 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
320 util_copy_vertex_buffers(rctx
->vertex_buffer
, &rctx
->nr_vertex_buffers
, buffers
, count
);
323 void *si_create_vertex_elements(struct pipe_context
*ctx
,
325 const struct pipe_vertex_element
*elements
)
327 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
328 struct r600_vertex_element
*v
= CALLOC_STRUCT(r600_vertex_element
);
335 memcpy(v
->elements
, elements
, sizeof(struct pipe_vertex_element
) * count
);
340 void *si_create_shader_state(struct pipe_context
*ctx
,
341 const struct pipe_shader_state
*state
)
343 struct si_pipe_shader
*shader
= CALLOC_STRUCT(si_pipe_shader
);
345 shader
->tokens
= tgsi_dup_tokens(state
->tokens
);
346 shader
->so
= state
->stream_output
;
351 void r600_bind_ps_shader(struct pipe_context
*ctx
, void *state
)
353 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
355 if (rctx
->ps_shader
!= state
)
356 rctx
->shader_dirty
= true;
358 /* TODO delete old shader */
359 rctx
->ps_shader
= (struct si_pipe_shader
*)state
;
361 r600_inval_shader_cache(rctx
);
362 r600_context_pipe_state_set(rctx
, &rctx
->ps_shader
->rstate
);
366 void r600_bind_vs_shader(struct pipe_context
*ctx
, void *state
)
368 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
370 if (rctx
->vs_shader
!= state
)
371 rctx
->shader_dirty
= true;
373 /* TODO delete old shader */
374 rctx
->vs_shader
= (struct si_pipe_shader
*)state
;
376 r600_inval_shader_cache(rctx
);
377 r600_context_pipe_state_set(rctx
, &rctx
->vs_shader
->rstate
);
381 void r600_delete_ps_shader(struct pipe_context
*ctx
, void *state
)
383 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
384 struct si_pipe_shader
*shader
= (struct si_pipe_shader
*)state
;
386 if (rctx
->ps_shader
== shader
) {
387 rctx
->ps_shader
= NULL
;
390 free(shader
->tokens
);
391 si_pipe_shader_destroy(ctx
, shader
);
395 void r600_delete_vs_shader(struct pipe_context
*ctx
, void *state
)
397 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
398 struct si_pipe_shader
*shader
= (struct si_pipe_shader
*)state
;
400 if (rctx
->vs_shader
== shader
) {
401 rctx
->vs_shader
= NULL
;
404 free(shader
->tokens
);
405 si_pipe_shader_destroy(ctx
, shader
);
409 static void r600_update_alpha_ref(struct r600_context
*rctx
)
413 struct r600_pipe_state rstate
;
415 alpha_ref
= rctx
->alpha_ref
;
417 if (rctx
->export_16bpc
)
418 alpha_ref
&= ~0x1FFF;
419 r600_pipe_state_add_reg(&rstate
, R_028438_SX_ALPHA_REF
, alpha_ref
, NULL
, 0);
421 r600_context_pipe_state_set(rctx
, &rstate
);
422 rctx
->alpha_ref_dirty
= false;
426 void r600_set_constant_buffer(struct pipe_context
*ctx
, uint shader
, uint index
,
427 struct pipe_constant_buffer
*cb
)
429 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
430 struct r600_resource
*rbuffer
= cb
? r600_resource(cb
->buffer
) : NULL
;
431 struct r600_pipe_state
*rstate
;
435 /* Note that the state tracker can unbind constant buffers by
442 r600_inval_shader_cache(rctx
);
445 r600_upload_const_buffer(rctx
, &rbuffer
, cb
->user_buffer
, cb
->buffer_size
, &offset
);
448 va_offset
= r600_resource_va(ctx
->screen
, (void*)rbuffer
);
453 case PIPE_SHADER_VERTEX
:
454 rstate
= &rctx
->vs_const_buffer
;
456 r600_pipe_state_add_reg(rstate
,
457 R_00B130_SPI_SHADER_USER_DATA_VS_0
,
458 va_offset
, rbuffer
, RADEON_USAGE_READ
);
459 r600_pipe_state_add_reg(rstate
,
460 R_00B134_SPI_SHADER_USER_DATA_VS_1
,
461 va_offset
>> 32, NULL
, 0);
463 case PIPE_SHADER_FRAGMENT
:
464 rstate
= &rctx
->ps_const_buffer
;
466 r600_pipe_state_add_reg(rstate
,
467 R_00B030_SPI_SHADER_USER_DATA_PS_0
,
468 va_offset
, rbuffer
, RADEON_USAGE_READ
);
469 r600_pipe_state_add_reg(rstate
,
470 R_00B034_SPI_SHADER_USER_DATA_PS_1
,
471 va_offset
>> 32, NULL
, 0);
474 R600_ERR("unsupported %d\n", shader
);
478 r600_context_pipe_state_set(rctx
, rstate
);
480 if (cb
->buffer
!= &rbuffer
->b
.b
)
481 pipe_resource_reference((struct pipe_resource
**)&rbuffer
, NULL
);
484 struct pipe_stream_output_target
*
485 r600_create_so_target(struct pipe_context
*ctx
,
486 struct pipe_resource
*buffer
,
487 unsigned buffer_offset
,
488 unsigned buffer_size
)
490 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
491 struct r600_so_target
*t
;
494 t
= CALLOC_STRUCT(r600_so_target
);
499 t
->b
.reference
.count
= 1;
501 pipe_resource_reference(&t
->b
.buffer
, buffer
);
502 t
->b
.buffer_offset
= buffer_offset
;
503 t
->b
.buffer_size
= buffer_size
;
505 t
->filled_size
= (struct r600_resource
*)
506 pipe_buffer_create(ctx
->screen
, PIPE_BIND_CUSTOM
, PIPE_USAGE_STATIC
, 4);
507 ptr
= rctx
->ws
->buffer_map(t
->filled_size
->cs_buf
, rctx
->cs
, PIPE_TRANSFER_WRITE
);
508 memset(ptr
, 0, t
->filled_size
->buf
->size
);
509 rctx
->ws
->buffer_unmap(t
->filled_size
->cs_buf
);
514 void r600_so_target_destroy(struct pipe_context
*ctx
,
515 struct pipe_stream_output_target
*target
)
517 struct r600_so_target
*t
= (struct r600_so_target
*)target
;
518 pipe_resource_reference(&t
->b
.buffer
, NULL
);
519 pipe_resource_reference((struct pipe_resource
**)&t
->filled_size
, NULL
);
523 void r600_set_so_targets(struct pipe_context
*ctx
,
524 unsigned num_targets
,
525 struct pipe_stream_output_target
**targets
,
526 unsigned append_bitmask
)
528 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
531 /* Stop streamout. */
532 if (rctx
->num_so_targets
) {
533 r600_context_streamout_end(rctx
);
536 /* Set the new targets. */
537 for (i
= 0; i
< num_targets
; i
++) {
538 pipe_so_target_reference((struct pipe_stream_output_target
**)&rctx
->so_targets
[i
], targets
[i
]);
540 for (; i
< rctx
->num_so_targets
; i
++) {
541 pipe_so_target_reference((struct pipe_stream_output_target
**)&rctx
->so_targets
[i
], NULL
);
544 rctx
->num_so_targets
= num_targets
;
545 rctx
->streamout_start
= num_targets
!= 0;
546 rctx
->streamout_append_bitmask
= append_bitmask
;
549 static void r600_vertex_buffer_update(struct r600_context
*rctx
)
551 struct pipe_context
*ctx
= &rctx
->context
;
552 struct r600_pipe_state
*rstate
= &rctx
->vs_user_data
;
553 struct r600_resource
*rbuffer
, *t_list_buffer
;
554 struct pipe_vertex_buffer
*vertex_buffer
;
555 unsigned i
, count
, offset
;
559 r600_inval_vertex_cache(rctx
);
561 if (rctx
->vertex_elements
->vbuffer_need_offset
) {
562 /* one resource per vertex elements */
563 count
= rctx
->vertex_elements
->count
;
565 /* bind vertex buffer once */
566 count
= rctx
->nr_vertex_buffers
;
568 assert(count
<= 256 / 4);
570 t_list_buffer
= (struct r600_resource
*)
571 pipe_buffer_create(ctx
->screen
, PIPE_BIND_CUSTOM
,
572 PIPE_USAGE_IMMUTABLE
, 4 * 4 * count
);
573 if (t_list_buffer
== NULL
)
576 ptr
= (uint32_t*)rctx
->ws
->buffer_map(t_list_buffer
->cs_buf
,
578 PIPE_TRANSFER_WRITE
);
580 for (i
= 0 ; i
< count
; i
++, ptr
+= 4) {
581 struct pipe_vertex_element
*velem
= &rctx
->vertex_elements
->elements
[i
];
582 const struct util_format_description
*desc
;
583 unsigned data_format
, num_format
;
586 if (rctx
->vertex_elements
->vbuffer_need_offset
) {
587 /* one resource per vertex elements */
588 unsigned vbuffer_index
;
589 vbuffer_index
= rctx
->vertex_elements
->elements
[i
].vertex_buffer_index
;
590 vertex_buffer
= &rctx
->vertex_buffer
[vbuffer_index
];
591 rbuffer
= (struct r600_resource
*)vertex_buffer
->buffer
;
592 offset
= rctx
->vertex_elements
->vbuffer_offset
[i
];
594 /* bind vertex buffer once */
595 vertex_buffer
= &rctx
->vertex_buffer
[i
];
596 rbuffer
= (struct r600_resource
*)vertex_buffer
->buffer
;
599 if (vertex_buffer
== NULL
|| rbuffer
== NULL
)
601 offset
+= vertex_buffer
->buffer_offset
;
603 va
= r600_resource_va(ctx
->screen
, (void*)rbuffer
);
606 desc
= util_format_description(velem
->src_format
);
607 first_non_void
= util_format_get_first_non_void_channel(velem
->src_format
);
608 data_format
= si_translate_vertexformat(ctx
->screen
,
610 desc
, first_non_void
);
612 switch (desc
->channel
[first_non_void
].type
) {
613 case UTIL_FORMAT_TYPE_FIXED
:
614 num_format
= V_008F0C_BUF_NUM_FORMAT_USCALED
; /* XXX */
616 case UTIL_FORMAT_TYPE_SIGNED
:
617 num_format
= V_008F0C_BUF_NUM_FORMAT_SNORM
;
619 case UTIL_FORMAT_TYPE_UNSIGNED
:
620 num_format
= V_008F0C_BUF_NUM_FORMAT_UNORM
;
622 case UTIL_FORMAT_TYPE_FLOAT
:
624 num_format
= V_008F14_IMG_NUM_FORMAT_FLOAT
;
627 /* Fill in T# buffer resource description */
628 ptr
[0] = va
& 0xFFFFFFFF;
629 ptr
[1] = (S_008F04_BASE_ADDRESS_HI(va
>> 32) |
630 S_008F04_STRIDE(vertex_buffer
->stride
));
631 if (vertex_buffer
->stride
> 0)
632 ptr
[2] = ((vertex_buffer
->buffer
->width0
- offset
) /
633 vertex_buffer
->stride
);
635 ptr
[2] = vertex_buffer
->buffer
->width0
- offset
;
636 ptr
[3] = (S_008F0C_DST_SEL_X(si_map_swizzle(desc
->swizzle
[0])) |
637 S_008F0C_DST_SEL_Y(si_map_swizzle(desc
->swizzle
[1])) |
638 S_008F0C_DST_SEL_Z(si_map_swizzle(desc
->swizzle
[2])) |
639 S_008F0C_DST_SEL_W(si_map_swizzle(desc
->swizzle
[3])) |
640 S_008F0C_NUM_FORMAT(num_format
) |
641 S_008F0C_DATA_FORMAT(data_format
));
643 r600_context_bo_reloc(rctx
, rbuffer
, RADEON_USAGE_READ
);
648 va
= r600_resource_va(ctx
->screen
, (void*)t_list_buffer
);
649 r600_pipe_state_add_reg(rstate
,
650 R_00B148_SPI_SHADER_USER_DATA_VS_6
,
651 va
, t_list_buffer
, RADEON_USAGE_READ
);
652 r600_pipe_state_add_reg(rstate
,
653 R_00B14C_SPI_SHADER_USER_DATA_VS_7
,
657 r600_context_pipe_state_set(rctx
, rstate
);
660 static void si_update_derived_state(struct r600_context
*rctx
)
662 struct pipe_context
* ctx
= (struct pipe_context
*)rctx
;
664 if (!rctx
->blitter
->running
) {
665 if (rctx
->have_depth_fb
|| rctx
->have_depth_texture
)
666 r600_flush_depth_textures(rctx
);
669 if ((rctx
->ps_shader
->shader
.fs_write_all
&&
670 (rctx
->ps_shader
->shader
.nr_cbufs
!= rctx
->nr_cbufs
)) ||
671 (rctx
->sprite_coord_enable
&&
672 (rctx
->ps_shader
->sprite_coord_enable
!= rctx
->sprite_coord_enable
))) {
673 si_pipe_shader_destroy(&rctx
->context
, rctx
->ps_shader
);
676 if (rctx
->alpha_ref_dirty
) {
677 r600_update_alpha_ref(rctx
);
680 if (!rctx
->vs_shader
->bo
) {
681 si_pipe_shader_vs(ctx
, rctx
->vs_shader
);
683 r600_context_pipe_state_set(rctx
, &rctx
->vs_shader
->rstate
);
686 if (!rctx
->ps_shader
->bo
) {
687 si_pipe_shader_ps(ctx
, rctx
->ps_shader
);
689 r600_context_pipe_state_set(rctx
, &rctx
->ps_shader
->rstate
);
692 if (rctx
->shader_dirty
) {
693 si_update_spi_map(rctx
);
694 rctx
->shader_dirty
= false;
698 void r600_draw_vbo(struct pipe_context
*ctx
, const struct pipe_draw_info
*dinfo
)
700 struct r600_context
*rctx
= (struct r600_context
*)ctx
;
701 struct r600_pipe_dsa
*dsa
= (struct r600_pipe_dsa
*)rctx
->states
[R600_PIPE_STATE_DSA
];
702 struct pipe_draw_info info
= *dinfo
;
703 struct r600_draw rdraw
= {};
704 struct pipe_index_buffer ib
= {};
705 unsigned prim
, mask
, ls_mask
= 0;
706 struct r600_block
*dirty_block
= NULL
, *next_block
= NULL
;
707 struct r600_atom
*state
= NULL
, *next_state
= NULL
;
710 if ((!info
.count
&& (info
.indexed
|| !info
.count_from_stream_output
)) ||
711 (info
.indexed
&& !rctx
->index_buffer
.buffer
) ||
712 !r600_conv_pipe_prim(info
.mode
, &prim
)) {
716 if (!rctx
->ps_shader
|| !rctx
->vs_shader
)
719 si_update_derived_state(rctx
);
721 r600_vertex_buffer_update(rctx
);
723 rdraw
.vgt_num_indices
= info
.count
;
724 rdraw
.vgt_num_instances
= info
.instance_count
;
727 /* Initialize the index buffer struct. */
728 pipe_resource_reference(&ib
.buffer
, rctx
->index_buffer
.buffer
);
729 ib
.index_size
= rctx
->index_buffer
.index_size
;
730 ib
.offset
= rctx
->index_buffer
.offset
+ info
.start
* ib
.index_size
;
732 /* Translate or upload, if needed. */
733 r600_translate_index_buffer(rctx
, &ib
, info
.count
);
735 if (ib
.user_buffer
) {
736 r600_upload_index_buffer(rctx
, &ib
, info
.count
);
739 /* Initialize the r600_draw struct with index buffer info. */
740 if (ib
.index_size
== 4) {
741 rdraw
.vgt_index_type
= V_028A7C_VGT_INDEX_32
|
742 (R600_BIG_ENDIAN
? V_028A7C_VGT_DMA_SWAP_32_BIT
: 0);
744 rdraw
.vgt_index_type
= V_028A7C_VGT_INDEX_16
|
745 (R600_BIG_ENDIAN
? V_028A7C_VGT_DMA_SWAP_16_BIT
: 0);
747 rdraw
.indices
= (struct r600_resource
*)ib
.buffer
;
748 rdraw
.indices_bo_offset
= ib
.offset
;
749 rdraw
.vgt_draw_initiator
= V_0287F0_DI_SRC_SEL_DMA
;
751 info
.index_bias
= info
.start
;
752 rdraw
.vgt_draw_initiator
= V_0287F0_DI_SRC_SEL_AUTO_INDEX
;
753 if (info
.count_from_stream_output
) {
754 rdraw
.vgt_draw_initiator
|= S_0287F0_USE_OPAQUE(1);
756 r600_context_draw_opaque_count(rctx
, (struct r600_so_target
*)info
.count_from_stream_output
);
760 rctx
->vs_shader_so_strides
= rctx
->vs_shader
->so_strides
;
762 mask
= (1ULL << ((unsigned)rctx
->framebuffer
.nr_cbufs
* 4)) - 1;
764 if (rctx
->vgt
.id
!= R600_PIPE_STATE_VGT
) {
765 rctx
->vgt
.id
= R600_PIPE_STATE_VGT
;
767 r600_pipe_state_add_reg(&rctx
->vgt
, R_008958_VGT_PRIMITIVE_TYPE
, prim
, NULL
, 0);
768 r600_pipe_state_add_reg(&rctx
->vgt
, R_028238_CB_TARGET_MASK
, rctx
->cb_target_mask
& mask
, NULL
, 0);
769 r600_pipe_state_add_reg(&rctx
->vgt
, R_028400_VGT_MAX_VTX_INDX
, ~0, NULL
, 0);
770 r600_pipe_state_add_reg(&rctx
->vgt
, R_028404_VGT_MIN_VTX_INDX
, 0, NULL
, 0);
771 r600_pipe_state_add_reg(&rctx
->vgt
, R_028408_VGT_INDX_OFFSET
, info
.index_bias
, NULL
, 0);
772 r600_pipe_state_add_reg(&rctx
->vgt
, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX
, info
.restart_index
, NULL
, 0);
773 r600_pipe_state_add_reg(&rctx
->vgt
, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN
, info
.primitive_restart
, NULL
, 0);
775 r600_pipe_state_add_reg(&rctx
->vgt
, R_03CFF0_SQ_VTX_BASE_VTX_LOC
, 0, NULL
, 0);
776 r600_pipe_state_add_reg(&rctx
->vgt
, R_03CFF4_SQ_VTX_START_INST_LOC
, info
.start_instance
, NULL
, 0);
778 r600_pipe_state_add_reg(&rctx
->vgt
, R_028A0C_PA_SC_LINE_STIPPLE
, 0, NULL
, 0);
779 r600_pipe_state_add_reg(&rctx
->vgt
, R_028814_PA_SU_SC_MODE_CNTL
, 0, NULL
, 0);
780 r600_pipe_state_add_reg(&rctx
->vgt
, R_02881C_PA_CL_VS_OUT_CNTL
, 0, NULL
, 0);
781 r600_pipe_state_add_reg(&rctx
->vgt
, R_028810_PA_CL_CLIP_CNTL
, 0x0, NULL
, 0);
785 r600_pipe_state_mod_reg(&rctx
->vgt
, prim
);
786 r600_pipe_state_mod_reg(&rctx
->vgt
, rctx
->cb_target_mask
& mask
);
787 r600_pipe_state_mod_reg(&rctx
->vgt
, ~0);
788 r600_pipe_state_mod_reg(&rctx
->vgt
, 0);
789 r600_pipe_state_mod_reg(&rctx
->vgt
, info
.index_bias
);
790 r600_pipe_state_mod_reg(&rctx
->vgt
, info
.restart_index
);
791 r600_pipe_state_mod_reg(&rctx
->vgt
, info
.primitive_restart
);
793 r600_pipe_state_mod_reg(&rctx
->vgt
, 0);
794 r600_pipe_state_mod_reg(&rctx
->vgt
, info
.start_instance
);
797 if (prim
== V_008958_DI_PT_LINELIST
)
799 else if (prim
== V_008958_DI_PT_LINESTRIP
)
801 r600_pipe_state_mod_reg(&rctx
->vgt
, S_028A0C_AUTO_RESET_CNTL(ls_mask
) | rctx
->pa_sc_line_stipple
);
803 if (info
.mode
== PIPE_PRIM_QUADS
|| info
.mode
== PIPE_PRIM_QUAD_STRIP
|| info
.mode
== PIPE_PRIM_POLYGON
) {
804 r600_pipe_state_mod_reg(&rctx
->vgt
, S_028814_PROVOKING_VTX_LAST(1) | rctx
->pa_su_sc_mode_cntl
);
806 r600_pipe_state_mod_reg(&rctx
->vgt
, rctx
->pa_su_sc_mode_cntl
);
808 r600_pipe_state_mod_reg(&rctx
->vgt
,
809 prim
== PIPE_PRIM_POINTS
? rctx
->pa_cl_vs_out_cntl
: 0
810 /*| (rctx->rasterizer->clip_plane_enable &
811 rctx->vs_shader->shader.clip_dist_write)*/);
812 r600_pipe_state_mod_reg(&rctx
->vgt
,
813 rctx
->pa_cl_clip_cntl
/*|
814 (rctx->vs_shader->shader.clip_dist_write ||
815 rctx->vs_shader->shader.vs_prohibit_ucps ?
816 0 : rctx->rasterizer->clip_plane_enable & 0x3F)*/);
818 r600_context_pipe_state_set(rctx
, &rctx
->vgt
);
820 rdraw
.db_render_override
= dsa
->db_render_override
;
821 rdraw
.db_render_control
= dsa
->db_render_control
;
824 r600_need_cs_space(rctx
, 0, TRUE
);
826 LIST_FOR_EACH_ENTRY_SAFE(state
, next_state
, &rctx
->dirty_states
, head
) {
827 r600_emit_atom(rctx
, state
);
829 LIST_FOR_EACH_ENTRY_SAFE(dirty_block
, next_block
, &rctx
->dirty
,list
) {
830 r600_context_block_emit_dirty(rctx
, dirty_block
);
832 rctx
->pm4_dirty_cdwords
= 0;
834 /* Enable stream out if needed. */
835 if (rctx
->streamout_start
) {
836 r600_context_streamout_begin(rctx
);
837 rctx
->streamout_start
= FALSE
;
840 for (i
= 0; i
< NUM_TEX_UNITS
; i
++) {
841 if (rctx
->ps_samplers
.views
[i
])
842 r600_context_bo_reloc(rctx
,
843 (struct r600_resource
*)rctx
->ps_samplers
.views
[i
]->base
.texture
,
847 si_context_draw(rctx
, &rdraw
);
849 rctx
->flags
|= R600_CONTEXT_DST_CACHES_DIRTY
| R600_CONTEXT_DRAW_PENDING
;
851 if (rctx
->framebuffer
.zsbuf
)
853 struct pipe_resource
*tex
= rctx
->framebuffer
.zsbuf
->texture
;
854 ((struct r600_resource_texture
*)tex
)->dirty_db
= TRUE
;
857 pipe_resource_reference(&ib
.buffer
, NULL
);
860 void _r600_pipe_state_add_reg(struct r600_context
*ctx
,
861 struct r600_pipe_state
*state
,
862 uint32_t offset
, uint32_t value
,
863 uint32_t range_id
, uint32_t block_id
,
864 struct r600_resource
*bo
,
865 enum radeon_bo_usage usage
)
867 struct r600_range
*range
;
868 struct r600_block
*block
;
870 if (bo
) assert(usage
);
872 range
= &ctx
->range
[range_id
];
873 block
= range
->blocks
[block_id
];
874 state
->regs
[state
->nregs
].block
= block
;
875 state
->regs
[state
->nregs
].id
= (offset
- block
->start_offset
) >> 2;
877 state
->regs
[state
->nregs
].value
= value
;
878 state
->regs
[state
->nregs
].bo
= bo
;
879 state
->regs
[state
->nregs
].bo_usage
= usage
;
882 assert(state
->nregs
< R600_BLOCK_MAX_REG
);
885 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state
*state
,
886 uint32_t offset
, uint32_t value
,
887 struct r600_resource
*bo
,
888 enum radeon_bo_usage usage
)
890 if (bo
) assert(usage
);
892 state
->regs
[state
->nregs
].id
= offset
;
893 state
->regs
[state
->nregs
].block
= NULL
;
894 state
->regs
[state
->nregs
].value
= value
;
895 state
->regs
[state
->nregs
].bo
= bo
;
896 state
->regs
[state
->nregs
].bo_usage
= usage
;
899 assert(state
->nregs
< R600_BLOCK_MAX_REG
);