r600g: move streamout state to drivers/radeon
[mesa.git] / src / gallium / drivers / r600 / r600_state_common.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 * 2010 Jerome Glisse
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
26 */
27 #include "r600_formats.h"
28 #include "r600_shader.h"
29 #include "r600d.h"
30
31 #include "util/u_draw_quad.h"
32 #include "util/u_index_modify.h"
33 #include "util/u_memory.h"
34 #include "util/u_upload_mgr.h"
35 #include "util/u_math.h"
36 #include "tgsi/tgsi_parse.h"
37
38 #define R600_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
39
40 void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw)
41 {
42 assert(!cb->buf);
43 cb->buf = CALLOC(1, 4 * num_dw);
44 cb->max_num_dw = num_dw;
45 }
46
47 void r600_release_command_buffer(struct r600_command_buffer *cb)
48 {
49 FREE(cb->buf);
50 }
51
52 void r600_init_atom(struct r600_context *rctx,
53 struct r600_atom *atom,
54 unsigned id,
55 void (*emit)(struct r600_context *ctx, struct r600_atom *state),
56 unsigned num_dw)
57 {
58 assert(id < R600_NUM_ATOMS);
59 assert(rctx->atoms[id] == NULL);
60 rctx->atoms[id] = atom;
61 atom->emit = (void*)emit;
62 atom->num_dw = num_dw;
63 atom->dirty = false;
64 }
65
66 void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom)
67 {
68 r600_emit_command_buffer(rctx->b.rings.gfx.cs, ((struct r600_cso_state*)atom)->cb);
69 }
70
71 void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom)
72 {
73 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
74 struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom;
75 unsigned alpha_ref = a->sx_alpha_ref;
76
77 if (rctx->b.chip_class >= EVERGREEN && a->cb0_export_16bpc) {
78 alpha_ref &= ~0x1FFF;
79 }
80
81 r600_write_context_reg(cs, R_028410_SX_ALPHA_TEST_CONTROL,
82 a->sx_alpha_test_control |
83 S_028410_ALPHA_TEST_BYPASS(a->bypass));
84 r600_write_context_reg(cs, R_028438_SX_ALPHA_REF, alpha_ref);
85 }
86
87 static void r600_texture_barrier(struct pipe_context *ctx)
88 {
89 struct r600_context *rctx = (struct r600_context *)ctx;
90
91 rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
92 R600_CONTEXT_FLUSH_AND_INV_CB |
93 R600_CONTEXT_FLUSH_AND_INV |
94 R600_CONTEXT_WAIT_3D_IDLE;
95 }
96
97 static unsigned r600_conv_pipe_prim(unsigned prim)
98 {
99 static const unsigned prim_conv[] = {
100 V_008958_DI_PT_POINTLIST,
101 V_008958_DI_PT_LINELIST,
102 V_008958_DI_PT_LINELOOP,
103 V_008958_DI_PT_LINESTRIP,
104 V_008958_DI_PT_TRILIST,
105 V_008958_DI_PT_TRISTRIP,
106 V_008958_DI_PT_TRIFAN,
107 V_008958_DI_PT_QUADLIST,
108 V_008958_DI_PT_QUADSTRIP,
109 V_008958_DI_PT_POLYGON,
110 V_008958_DI_PT_LINELIST_ADJ,
111 V_008958_DI_PT_LINESTRIP_ADJ,
112 V_008958_DI_PT_TRILIST_ADJ,
113 V_008958_DI_PT_TRISTRIP_ADJ,
114 V_008958_DI_PT_RECTLIST
115 };
116 return prim_conv[prim];
117 }
118
119 /* common state between evergreen and r600 */
120
121 static void r600_bind_blend_state_internal(struct r600_context *rctx,
122 struct r600_blend_state *blend, bool blend_disable)
123 {
124 unsigned color_control;
125 bool update_cb = false;
126
127 rctx->alpha_to_one = blend->alpha_to_one;
128 rctx->dual_src_blend = blend->dual_src_blend;
129
130 if (!blend_disable) {
131 r600_set_cso_state_with_cb(&rctx->blend_state, blend, &blend->buffer);
132 color_control = blend->cb_color_control;
133 } else {
134 /* Blending is disabled. */
135 r600_set_cso_state_with_cb(&rctx->blend_state, blend, &blend->buffer_no_blend);
136 color_control = blend->cb_color_control_no_blend;
137 }
138
139 /* Update derived states. */
140 if (rctx->cb_misc_state.blend_colormask != blend->cb_target_mask) {
141 rctx->cb_misc_state.blend_colormask = blend->cb_target_mask;
142 update_cb = true;
143 }
144 if (rctx->b.chip_class <= R700 &&
145 rctx->cb_misc_state.cb_color_control != color_control) {
146 rctx->cb_misc_state.cb_color_control = color_control;
147 update_cb = true;
148 }
149 if (rctx->cb_misc_state.dual_src_blend != blend->dual_src_blend) {
150 rctx->cb_misc_state.dual_src_blend = blend->dual_src_blend;
151 update_cb = true;
152 }
153 if (update_cb) {
154 rctx->cb_misc_state.atom.dirty = true;
155 }
156 }
157
158 static void r600_bind_blend_state(struct pipe_context *ctx, void *state)
159 {
160 struct r600_context *rctx = (struct r600_context *)ctx;
161 struct r600_blend_state *blend = (struct r600_blend_state *)state;
162
163 if (blend == NULL)
164 return;
165
166 r600_bind_blend_state_internal(rctx, blend, rctx->force_blend_disable);
167 }
168
169 static void r600_set_blend_color(struct pipe_context *ctx,
170 const struct pipe_blend_color *state)
171 {
172 struct r600_context *rctx = (struct r600_context *)ctx;
173
174 rctx->blend_color.state = *state;
175 rctx->blend_color.atom.dirty = true;
176 }
177
178 void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom)
179 {
180 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
181 struct pipe_blend_color *state = &rctx->blend_color.state;
182
183 r600_write_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4);
184 radeon_emit(cs, fui(state->color[0])); /* R_028414_CB_BLEND_RED */
185 radeon_emit(cs, fui(state->color[1])); /* R_028418_CB_BLEND_GREEN */
186 radeon_emit(cs, fui(state->color[2])); /* R_02841C_CB_BLEND_BLUE */
187 radeon_emit(cs, fui(state->color[3])); /* R_028420_CB_BLEND_ALPHA */
188 }
189
190 void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom)
191 {
192 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
193 struct r600_vgt_state *a = (struct r600_vgt_state *)atom;
194
195 r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en);
196 r600_write_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2);
197 radeon_emit(cs, a->vgt_indx_offset); /* R_028408_VGT_INDX_OFFSET */
198 radeon_emit(cs, a->vgt_multi_prim_ib_reset_indx); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */
199 }
200
201 static void r600_set_clip_state(struct pipe_context *ctx,
202 const struct pipe_clip_state *state)
203 {
204 struct r600_context *rctx = (struct r600_context *)ctx;
205 struct pipe_constant_buffer cb;
206
207 rctx->clip_state.state = *state;
208 rctx->clip_state.atom.dirty = true;
209
210 cb.buffer = NULL;
211 cb.user_buffer = state->ucp;
212 cb.buffer_offset = 0;
213 cb.buffer_size = 4*4*8;
214 ctx->set_constant_buffer(ctx, PIPE_SHADER_VERTEX, R600_UCP_CONST_BUFFER, &cb);
215 pipe_resource_reference(&cb.buffer, NULL);
216 }
217
218 static void r600_set_stencil_ref(struct pipe_context *ctx,
219 const struct r600_stencil_ref *state)
220 {
221 struct r600_context *rctx = (struct r600_context *)ctx;
222
223 rctx->stencil_ref.state = *state;
224 rctx->stencil_ref.atom.dirty = true;
225 }
226
227 void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom)
228 {
229 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
230 struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom;
231
232 r600_write_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2);
233 radeon_emit(cs, /* R_028430_DB_STENCILREFMASK */
234 S_028430_STENCILREF(a->state.ref_value[0]) |
235 S_028430_STENCILMASK(a->state.valuemask[0]) |
236 S_028430_STENCILWRITEMASK(a->state.writemask[0]));
237 radeon_emit(cs, /* R_028434_DB_STENCILREFMASK_BF */
238 S_028434_STENCILREF_BF(a->state.ref_value[1]) |
239 S_028434_STENCILMASK_BF(a->state.valuemask[1]) |
240 S_028434_STENCILWRITEMASK_BF(a->state.writemask[1]));
241 }
242
243 static void r600_set_pipe_stencil_ref(struct pipe_context *ctx,
244 const struct pipe_stencil_ref *state)
245 {
246 struct r600_context *rctx = (struct r600_context *)ctx;
247 struct r600_dsa_state *dsa = (struct r600_dsa_state*)rctx->dsa_state.cso;
248 struct r600_stencil_ref ref;
249
250 rctx->stencil_ref.pipe_state = *state;
251
252 if (!dsa)
253 return;
254
255 ref.ref_value[0] = state->ref_value[0];
256 ref.ref_value[1] = state->ref_value[1];
257 ref.valuemask[0] = dsa->valuemask[0];
258 ref.valuemask[1] = dsa->valuemask[1];
259 ref.writemask[0] = dsa->writemask[0];
260 ref.writemask[1] = dsa->writemask[1];
261
262 r600_set_stencil_ref(ctx, &ref);
263 }
264
265 static void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
266 {
267 struct r600_context *rctx = (struct r600_context *)ctx;
268 struct r600_dsa_state *dsa = state;
269 struct r600_stencil_ref ref;
270
271 if (state == NULL)
272 return;
273
274 r600_set_cso_state_with_cb(&rctx->dsa_state, dsa, &dsa->buffer);
275
276 ref.ref_value[0] = rctx->stencil_ref.pipe_state.ref_value[0];
277 ref.ref_value[1] = rctx->stencil_ref.pipe_state.ref_value[1];
278 ref.valuemask[0] = dsa->valuemask[0];
279 ref.valuemask[1] = dsa->valuemask[1];
280 ref.writemask[0] = dsa->writemask[0];
281 ref.writemask[1] = dsa->writemask[1];
282 if (rctx->zwritemask != dsa->zwritemask) {
283 rctx->zwritemask = dsa->zwritemask;
284 if (rctx->b.chip_class >= EVERGREEN) {
285 /* work around some issue when not writting to zbuffer
286 * we are having lockup on evergreen so do not enable
287 * hyperz when not writting zbuffer
288 */
289 rctx->db_misc_state.atom.dirty = true;
290 }
291 }
292
293 r600_set_stencil_ref(ctx, &ref);
294
295 /* Update alphatest state. */
296 if (rctx->alphatest_state.sx_alpha_test_control != dsa->sx_alpha_test_control ||
297 rctx->alphatest_state.sx_alpha_ref != dsa->alpha_ref) {
298 rctx->alphatest_state.sx_alpha_test_control = dsa->sx_alpha_test_control;
299 rctx->alphatest_state.sx_alpha_ref = dsa->alpha_ref;
300 rctx->alphatest_state.atom.dirty = true;
301 if (rctx->b.chip_class >= EVERGREEN) {
302 evergreen_update_db_shader_control(rctx);
303 } else {
304 r600_update_db_shader_control(rctx);
305 }
306 }
307 }
308
309 static void r600_bind_rs_state(struct pipe_context *ctx, void *state)
310 {
311 struct r600_rasterizer_state *rs = (struct r600_rasterizer_state *)state;
312 struct r600_context *rctx = (struct r600_context *)ctx;
313
314 if (state == NULL)
315 return;
316
317 rctx->rasterizer = rs;
318
319 r600_set_cso_state_with_cb(&rctx->rasterizer_state, rs, &rs->buffer);
320
321 if (rs->offset_enable &&
322 (rs->offset_units != rctx->poly_offset_state.offset_units ||
323 rs->offset_scale != rctx->poly_offset_state.offset_scale)) {
324 rctx->poly_offset_state.offset_units = rs->offset_units;
325 rctx->poly_offset_state.offset_scale = rs->offset_scale;
326 rctx->poly_offset_state.atom.dirty = true;
327 }
328
329 /* Update clip_misc_state. */
330 if (rctx->clip_misc_state.pa_cl_clip_cntl != rs->pa_cl_clip_cntl ||
331 rctx->clip_misc_state.clip_plane_enable != rs->clip_plane_enable) {
332 rctx->clip_misc_state.pa_cl_clip_cntl = rs->pa_cl_clip_cntl;
333 rctx->clip_misc_state.clip_plane_enable = rs->clip_plane_enable;
334 rctx->clip_misc_state.atom.dirty = true;
335 }
336
337 /* Workaround for a missing scissor enable on r600. */
338 if (rctx->b.chip_class == R600 &&
339 rs->scissor_enable != rctx->scissor.enable) {
340 rctx->scissor.enable = rs->scissor_enable;
341 rctx->scissor.atom.dirty = true;
342 }
343
344 /* Re-emit PA_SC_LINE_STIPPLE. */
345 rctx->last_primitive_type = -1;
346 }
347
348 static void r600_delete_rs_state(struct pipe_context *ctx, void *state)
349 {
350 struct r600_rasterizer_state *rs = (struct r600_rasterizer_state *)state;
351
352 r600_release_command_buffer(&rs->buffer);
353 FREE(rs);
354 }
355
356 static void r600_sampler_view_destroy(struct pipe_context *ctx,
357 struct pipe_sampler_view *state)
358 {
359 struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;
360
361 pipe_resource_reference(&state->texture, NULL);
362 FREE(resource);
363 }
364
365 void r600_sampler_states_dirty(struct r600_context *rctx,
366 struct r600_sampler_states *state)
367 {
368 if (state->dirty_mask) {
369 if (state->dirty_mask & state->has_bordercolor_mask) {
370 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
371 }
372 state->atom.num_dw =
373 util_bitcount(state->dirty_mask & state->has_bordercolor_mask) * 11 +
374 util_bitcount(state->dirty_mask & ~state->has_bordercolor_mask) * 5;
375 state->atom.dirty = true;
376 }
377 }
378
379 static void r600_bind_sampler_states(struct pipe_context *pipe,
380 unsigned shader,
381 unsigned start,
382 unsigned count, void **states)
383 {
384 struct r600_context *rctx = (struct r600_context *)pipe;
385 struct r600_textures_info *dst = &rctx->samplers[shader];
386 struct r600_pipe_sampler_state **rstates = (struct r600_pipe_sampler_state**)states;
387 int seamless_cube_map = -1;
388 unsigned i;
389 /* This sets 1-bit for states with index >= count. */
390 uint32_t disable_mask = ~((1ull << count) - 1);
391 /* These are the new states set by this function. */
392 uint32_t new_mask = 0;
393
394 assert(start == 0); /* XXX fix below */
395
396 for (i = 0; i < count; i++) {
397 struct r600_pipe_sampler_state *rstate = rstates[i];
398
399 if (rstate == dst->states.states[i]) {
400 continue;
401 }
402
403 if (rstate) {
404 if (rstate->border_color_use) {
405 dst->states.has_bordercolor_mask |= 1 << i;
406 } else {
407 dst->states.has_bordercolor_mask &= ~(1 << i);
408 }
409 seamless_cube_map = rstate->seamless_cube_map;
410
411 new_mask |= 1 << i;
412 } else {
413 disable_mask |= 1 << i;
414 }
415 }
416
417 memcpy(dst->states.states, rstates, sizeof(void*) * count);
418 memset(dst->states.states + count, 0, sizeof(void*) * (NUM_TEX_UNITS - count));
419
420 dst->states.enabled_mask &= ~disable_mask;
421 dst->states.dirty_mask &= dst->states.enabled_mask;
422 dst->states.enabled_mask |= new_mask;
423 dst->states.dirty_mask |= new_mask;
424 dst->states.has_bordercolor_mask &= dst->states.enabled_mask;
425
426 r600_sampler_states_dirty(rctx, &dst->states);
427
428 /* Seamless cubemap state. */
429 if (rctx->b.chip_class <= R700 &&
430 seamless_cube_map != -1 &&
431 seamless_cube_map != rctx->seamless_cube_map.enabled) {
432 /* change in TA_CNTL_AUX need a pipeline flush */
433 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
434 rctx->seamless_cube_map.enabled = seamless_cube_map;
435 rctx->seamless_cube_map.atom.dirty = true;
436 }
437 }
438
439 static void r600_bind_vs_sampler_states(struct pipe_context *ctx, unsigned count, void **states)
440 {
441 r600_bind_sampler_states(ctx, PIPE_SHADER_VERTEX, 0, count, states);
442 }
443
444 static void r600_bind_ps_sampler_states(struct pipe_context *ctx, unsigned count, void **states)
445 {
446 r600_bind_sampler_states(ctx, PIPE_SHADER_FRAGMENT, 0, count, states);
447 }
448
449 static void r600_delete_sampler_state(struct pipe_context *ctx, void *state)
450 {
451 free(state);
452 }
453
454 static void r600_delete_blend_state(struct pipe_context *ctx, void *state)
455 {
456 struct r600_blend_state *blend = (struct r600_blend_state*)state;
457
458 r600_release_command_buffer(&blend->buffer);
459 r600_release_command_buffer(&blend->buffer_no_blend);
460 FREE(blend);
461 }
462
463 static void r600_delete_dsa_state(struct pipe_context *ctx, void *state)
464 {
465 struct r600_dsa_state *dsa = (struct r600_dsa_state *)state;
466
467 r600_release_command_buffer(&dsa->buffer);
468 free(dsa);
469 }
470
471 static void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
472 {
473 struct r600_context *rctx = (struct r600_context *)ctx;
474
475 r600_set_cso_state(&rctx->vertex_fetch_shader, state);
476 }
477
478 static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state)
479 {
480 struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state;
481 pipe_resource_reference((struct pipe_resource**)&shader->buffer, NULL);
482 FREE(shader);
483 }
484
485 static void r600_set_index_buffer(struct pipe_context *ctx,
486 const struct pipe_index_buffer *ib)
487 {
488 struct r600_context *rctx = (struct r600_context *)ctx;
489
490 if (ib) {
491 pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
492 memcpy(&rctx->index_buffer, ib, sizeof(*ib));
493 r600_context_add_resource_size(ctx, ib->buffer);
494 } else {
495 pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
496 }
497 }
498
499 void r600_vertex_buffers_dirty(struct r600_context *rctx)
500 {
501 if (rctx->vertex_buffer_state.dirty_mask) {
502 rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
503 rctx->vertex_buffer_state.atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 12 : 11) *
504 util_bitcount(rctx->vertex_buffer_state.dirty_mask);
505 rctx->vertex_buffer_state.atom.dirty = true;
506 }
507 }
508
509 static void r600_set_vertex_buffers(struct pipe_context *ctx,
510 unsigned start_slot, unsigned count,
511 const struct pipe_vertex_buffer *input)
512 {
513 struct r600_context *rctx = (struct r600_context *)ctx;
514 struct r600_vertexbuf_state *state = &rctx->vertex_buffer_state;
515 struct pipe_vertex_buffer *vb = state->vb + start_slot;
516 unsigned i;
517 uint32_t disable_mask = 0;
518 /* These are the new buffers set by this function. */
519 uint32_t new_buffer_mask = 0;
520
521 /* Set vertex buffers. */
522 if (input) {
523 for (i = 0; i < count; i++) {
524 if (memcmp(&input[i], &vb[i], sizeof(struct pipe_vertex_buffer))) {
525 if (input[i].buffer) {
526 vb[i].stride = input[i].stride;
527 vb[i].buffer_offset = input[i].buffer_offset;
528 pipe_resource_reference(&vb[i].buffer, input[i].buffer);
529 new_buffer_mask |= 1 << i;
530 r600_context_add_resource_size(ctx, input[i].buffer);
531 } else {
532 pipe_resource_reference(&vb[i].buffer, NULL);
533 disable_mask |= 1 << i;
534 }
535 }
536 }
537 } else {
538 for (i = 0; i < count; i++) {
539 pipe_resource_reference(&vb[i].buffer, NULL);
540 }
541 disable_mask = ((1ull << count) - 1);
542 }
543
544 disable_mask <<= start_slot;
545 new_buffer_mask <<= start_slot;
546
547 rctx->vertex_buffer_state.enabled_mask &= ~disable_mask;
548 rctx->vertex_buffer_state.dirty_mask &= rctx->vertex_buffer_state.enabled_mask;
549 rctx->vertex_buffer_state.enabled_mask |= new_buffer_mask;
550 rctx->vertex_buffer_state.dirty_mask |= new_buffer_mask;
551
552 r600_vertex_buffers_dirty(rctx);
553 }
554
555 void r600_sampler_views_dirty(struct r600_context *rctx,
556 struct r600_samplerview_state *state)
557 {
558 if (state->dirty_mask) {
559 rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
560 state->atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 14 : 13) *
561 util_bitcount(state->dirty_mask);
562 state->atom.dirty = true;
563 }
564 }
565
566 static void r600_set_sampler_views(struct pipe_context *pipe, unsigned shader,
567 unsigned start, unsigned count,
568 struct pipe_sampler_view **views)
569 {
570 struct r600_context *rctx = (struct r600_context *) pipe;
571 struct r600_textures_info *dst = &rctx->samplers[shader];
572 struct r600_pipe_sampler_view **rviews = (struct r600_pipe_sampler_view **)views;
573 uint32_t dirty_sampler_states_mask = 0;
574 unsigned i;
575 /* This sets 1-bit for textures with index >= count. */
576 uint32_t disable_mask = ~((1ull << count) - 1);
577 /* These are the new textures set by this function. */
578 uint32_t new_mask = 0;
579
580 /* Set textures with index >= count to NULL. */
581 uint32_t remaining_mask;
582
583 assert(start == 0); /* XXX fix below */
584
585 remaining_mask = dst->views.enabled_mask & disable_mask;
586
587 while (remaining_mask) {
588 i = u_bit_scan(&remaining_mask);
589 assert(dst->views.views[i]);
590
591 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL);
592 }
593
594 for (i = 0; i < count; i++) {
595 if (rviews[i] == dst->views.views[i]) {
596 continue;
597 }
598
599 if (rviews[i]) {
600 struct r600_texture *rtex =
601 (struct r600_texture*)rviews[i]->base.texture;
602
603 if (rviews[i]->base.texture->target != PIPE_BUFFER) {
604 if (rtex->is_depth && !rtex->is_flushing_texture) {
605 dst->views.compressed_depthtex_mask |= 1 << i;
606 } else {
607 dst->views.compressed_depthtex_mask &= ~(1 << i);
608 }
609
610 /* Track compressed colorbuffers. */
611 if (rtex->cmask_size && rtex->fmask_size) {
612 dst->views.compressed_colortex_mask |= 1 << i;
613 } else {
614 dst->views.compressed_colortex_mask &= ~(1 << i);
615 }
616 }
617 /* Changing from array to non-arrays textures and vice versa requires
618 * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */
619 if (rctx->b.chip_class <= R700 &&
620 (dst->states.enabled_mask & (1 << i)) &&
621 (rviews[i]->base.texture->target == PIPE_TEXTURE_1D_ARRAY ||
622 rviews[i]->base.texture->target == PIPE_TEXTURE_2D_ARRAY) != dst->is_array_sampler[i]) {
623 dirty_sampler_states_mask |= 1 << i;
624 }
625
626 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], views[i]);
627 new_mask |= 1 << i;
628 r600_context_add_resource_size(pipe, views[i]->texture);
629 } else {
630 pipe_sampler_view_reference((struct pipe_sampler_view **)&dst->views.views[i], NULL);
631 disable_mask |= 1 << i;
632 }
633 }
634
635 dst->views.enabled_mask &= ~disable_mask;
636 dst->views.dirty_mask &= dst->views.enabled_mask;
637 dst->views.enabled_mask |= new_mask;
638 dst->views.dirty_mask |= new_mask;
639 dst->views.compressed_depthtex_mask &= dst->views.enabled_mask;
640 dst->views.compressed_colortex_mask &= dst->views.enabled_mask;
641 dst->views.dirty_txq_constants = TRUE;
642 dst->views.dirty_buffer_constants = TRUE;
643 r600_sampler_views_dirty(rctx, &dst->views);
644
645 if (dirty_sampler_states_mask) {
646 dst->states.dirty_mask |= dirty_sampler_states_mask;
647 r600_sampler_states_dirty(rctx, &dst->states);
648 }
649 }
650
651 static void r600_set_vs_sampler_views(struct pipe_context *ctx, unsigned count,
652 struct pipe_sampler_view **views)
653 {
654 r600_set_sampler_views(ctx, PIPE_SHADER_VERTEX, 0, count, views);
655 }
656
657 static void r600_set_ps_sampler_views(struct pipe_context *ctx, unsigned count,
658 struct pipe_sampler_view **views)
659 {
660 r600_set_sampler_views(ctx, PIPE_SHADER_FRAGMENT, 0, count, views);
661 }
662
663 static void r600_set_viewport_states(struct pipe_context *ctx,
664 unsigned start_slot,
665 unsigned num_viewports,
666 const struct pipe_viewport_state *state)
667 {
668 struct r600_context *rctx = (struct r600_context *)ctx;
669
670 rctx->viewport.state = *state;
671 rctx->viewport.atom.dirty = true;
672 }
673
674 void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom)
675 {
676 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
677 struct pipe_viewport_state *state = &rctx->viewport.state;
678
679 r600_write_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE_0, 6);
680 radeon_emit(cs, fui(state->scale[0])); /* R_02843C_PA_CL_VPORT_XSCALE_0 */
681 radeon_emit(cs, fui(state->translate[0])); /* R_028440_PA_CL_VPORT_XOFFSET_0 */
682 radeon_emit(cs, fui(state->scale[1])); /* R_028444_PA_CL_VPORT_YSCALE_0 */
683 radeon_emit(cs, fui(state->translate[1])); /* R_028448_PA_CL_VPORT_YOFFSET_0 */
684 radeon_emit(cs, fui(state->scale[2])); /* R_02844C_PA_CL_VPORT_ZSCALE_0 */
685 radeon_emit(cs, fui(state->translate[2])); /* R_028450_PA_CL_VPORT_ZOFFSET_0 */
686 }
687
688 /* Compute the key for the hw shader variant */
689 static INLINE struct r600_shader_key r600_shader_selector_key(struct pipe_context * ctx,
690 struct r600_pipe_shader_selector * sel)
691 {
692 struct r600_context *rctx = (struct r600_context *)ctx;
693 struct r600_shader_key key;
694 memset(&key, 0, sizeof(key));
695
696 if (sel->type == PIPE_SHADER_FRAGMENT) {
697 key.color_two_side = rctx->rasterizer && rctx->rasterizer->two_side;
698 key.alpha_to_one = rctx->alpha_to_one &&
699 rctx->rasterizer && rctx->rasterizer->multisample_enable &&
700 !rctx->framebuffer.cb0_is_integer;
701 key.nr_cbufs = rctx->framebuffer.state.nr_cbufs;
702 /* Dual-source blending only makes sense with nr_cbufs == 1. */
703 if (key.nr_cbufs == 1 && rctx->dual_src_blend)
704 key.nr_cbufs = 2;
705 }
706 return key;
707 }
708
709 /* Select the hw shader variant depending on the current state.
710 * (*dirty) is set to 1 if current variant was changed */
711 static int r600_shader_select(struct pipe_context *ctx,
712 struct r600_pipe_shader_selector* sel,
713 bool *dirty)
714 {
715 struct r600_shader_key key;
716 struct r600_context *rctx = (struct r600_context *)ctx;
717 struct r600_pipe_shader * shader = NULL;
718 int r;
719
720 memset(&key, 0, sizeof(key));
721 key = r600_shader_selector_key(ctx, sel);
722
723 /* Check if we don't need to change anything.
724 * This path is also used for most shaders that don't need multiple
725 * variants, it will cost just a computation of the key and this
726 * test. */
727 if (likely(sel->current && memcmp(&sel->current->key, &key, sizeof(key)) == 0)) {
728 return 0;
729 }
730
731 /* lookup if we have other variants in the list */
732 if (sel->num_shaders > 1) {
733 struct r600_pipe_shader *p = sel->current, *c = p->next_variant;
734
735 while (c && memcmp(&c->key, &key, sizeof(key)) != 0) {
736 p = c;
737 c = c->next_variant;
738 }
739
740 if (c) {
741 p->next_variant = c->next_variant;
742 shader = c;
743 }
744 }
745
746 if (unlikely(!shader)) {
747 shader = CALLOC(1, sizeof(struct r600_pipe_shader));
748 shader->selector = sel;
749
750 r = r600_pipe_shader_create(ctx, shader, key);
751 if (unlikely(r)) {
752 R600_ERR("Failed to build shader variant (type=%u) %d\n",
753 sel->type, r);
754 sel->current = NULL;
755 FREE(shader);
756 return r;
757 }
758
759 /* We don't know the value of nr_ps_max_color_exports until we built
760 * at least one variant, so we may need to recompute the key after
761 * building first variant. */
762 if (sel->type == PIPE_SHADER_FRAGMENT &&
763 sel->num_shaders == 0) {
764 sel->nr_ps_max_color_exports = shader->shader.nr_ps_max_color_exports;
765 key = r600_shader_selector_key(ctx, sel);
766 }
767
768 memcpy(&shader->key, &key, sizeof(key));
769 sel->num_shaders++;
770 }
771
772 if (dirty)
773 *dirty = true;
774
775 shader->next_variant = sel->current;
776 sel->current = shader;
777
778 if (rctx->ps_shader &&
779 rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs) {
780 rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs;
781 rctx->cb_misc_state.atom.dirty = true;
782 }
783 return 0;
784 }
785
786 static void *r600_create_shader_state(struct pipe_context *ctx,
787 const struct pipe_shader_state *state,
788 unsigned pipe_shader_type)
789 {
790 struct r600_pipe_shader_selector *sel = CALLOC_STRUCT(r600_pipe_shader_selector);
791 int r;
792
793 sel->type = pipe_shader_type;
794 sel->tokens = tgsi_dup_tokens(state->tokens);
795 sel->so = state->stream_output;
796
797 r = r600_shader_select(ctx, sel, NULL);
798 if (r)
799 return NULL;
800
801 return sel;
802 }
803
804 static void *r600_create_ps_state(struct pipe_context *ctx,
805 const struct pipe_shader_state *state)
806 {
807 return r600_create_shader_state(ctx, state, PIPE_SHADER_FRAGMENT);
808 }
809
810 static void *r600_create_vs_state(struct pipe_context *ctx,
811 const struct pipe_shader_state *state)
812 {
813 return r600_create_shader_state(ctx, state, PIPE_SHADER_VERTEX);
814 }
815
816 static void r600_bind_ps_state(struct pipe_context *ctx, void *state)
817 {
818 struct r600_context *rctx = (struct r600_context *)ctx;
819
820 if (!state)
821 state = rctx->dummy_pixel_shader;
822
823 rctx->pixel_shader.shader = rctx->ps_shader = (struct r600_pipe_shader_selector *)state;
824 rctx->pixel_shader.atom.num_dw = rctx->ps_shader->current->command_buffer.num_dw;
825 rctx->pixel_shader.atom.dirty = true;
826
827 r600_context_add_resource_size(ctx, (struct pipe_resource *)rctx->ps_shader->current->bo);
828
829 if (rctx->b.chip_class <= R700) {
830 bool multiwrite = rctx->ps_shader->current->shader.fs_write_all;
831
832 if (rctx->cb_misc_state.multiwrite != multiwrite) {
833 rctx->cb_misc_state.multiwrite = multiwrite;
834 rctx->cb_misc_state.atom.dirty = true;
835 }
836 }
837
838 if (rctx->cb_misc_state.nr_ps_color_outputs != rctx->ps_shader->current->nr_ps_color_outputs) {
839 rctx->cb_misc_state.nr_ps_color_outputs = rctx->ps_shader->current->nr_ps_color_outputs;
840 rctx->cb_misc_state.atom.dirty = true;
841 }
842
843 if (rctx->b.chip_class >= EVERGREEN) {
844 evergreen_update_db_shader_control(rctx);
845 } else {
846 r600_update_db_shader_control(rctx);
847 }
848 }
849
850 static void r600_bind_vs_state(struct pipe_context *ctx, void *state)
851 {
852 struct r600_context *rctx = (struct r600_context *)ctx;
853
854 if (!state)
855 return;
856
857 rctx->vertex_shader.shader = rctx->vs_shader = (struct r600_pipe_shader_selector *)state;
858 rctx->vertex_shader.atom.dirty = true;
859 rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride;
860
861 r600_context_add_resource_size(ctx, (struct pipe_resource *)rctx->vs_shader->current->bo);
862
863 /* Update clip misc state. */
864 if (rctx->vs_shader->current->pa_cl_vs_out_cntl != rctx->clip_misc_state.pa_cl_vs_out_cntl ||
865 rctx->vs_shader->current->shader.clip_dist_write != rctx->clip_misc_state.clip_dist_write) {
866 rctx->clip_misc_state.pa_cl_vs_out_cntl = rctx->vs_shader->current->pa_cl_vs_out_cntl;
867 rctx->clip_misc_state.clip_dist_write = rctx->vs_shader->current->shader.clip_dist_write;
868 rctx->clip_misc_state.atom.dirty = true;
869 }
870 }
871
872 static void r600_delete_shader_selector(struct pipe_context *ctx,
873 struct r600_pipe_shader_selector *sel)
874 {
875 struct r600_pipe_shader *p = sel->current, *c;
876 while (p) {
877 c = p->next_variant;
878 r600_pipe_shader_destroy(ctx, p);
879 free(p);
880 p = c;
881 }
882
883 free(sel->tokens);
884 free(sel);
885 }
886
887
888 static void r600_delete_ps_state(struct pipe_context *ctx, void *state)
889 {
890 struct r600_context *rctx = (struct r600_context *)ctx;
891 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state;
892
893 if (rctx->ps_shader == sel) {
894 rctx->ps_shader = NULL;
895 }
896
897 r600_delete_shader_selector(ctx, sel);
898 }
899
900 static void r600_delete_vs_state(struct pipe_context *ctx, void *state)
901 {
902 struct r600_context *rctx = (struct r600_context *)ctx;
903 struct r600_pipe_shader_selector *sel = (struct r600_pipe_shader_selector *)state;
904
905 if (rctx->vs_shader == sel) {
906 rctx->vs_shader = NULL;
907 }
908
909 r600_delete_shader_selector(ctx, sel);
910 }
911
912 void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state)
913 {
914 if (state->dirty_mask) {
915 rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
916 state->atom.num_dw = rctx->b.chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
917 : util_bitcount(state->dirty_mask)*19;
918 state->atom.dirty = true;
919 }
920 }
921
922 static void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
923 struct pipe_constant_buffer *input)
924 {
925 struct r600_context *rctx = (struct r600_context *)ctx;
926 struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
927 struct pipe_constant_buffer *cb;
928 const uint8_t *ptr;
929
930 /* Note that the state tracker can unbind constant buffers by
931 * passing NULL here.
932 */
933 if (unlikely(!input || (!input->buffer && !input->user_buffer))) {
934 state->enabled_mask &= ~(1 << index);
935 state->dirty_mask &= ~(1 << index);
936 pipe_resource_reference(&state->cb[index].buffer, NULL);
937 return;
938 }
939
940 cb = &state->cb[index];
941 cb->buffer_size = input->buffer_size;
942
943 ptr = input->user_buffer;
944
945 if (ptr) {
946 /* Upload the user buffer. */
947 if (R600_BIG_ENDIAN) {
948 uint32_t *tmpPtr;
949 unsigned i, size = input->buffer_size;
950
951 if (!(tmpPtr = malloc(size))) {
952 R600_ERR("Failed to allocate BE swap buffer.\n");
953 return;
954 }
955
956 for (i = 0; i < size / 4; ++i) {
957 tmpPtr[i] = util_bswap32(((uint32_t *)ptr)[i]);
958 }
959
960 u_upload_data(rctx->uploader, 0, size, tmpPtr, &cb->buffer_offset, &cb->buffer);
961 free(tmpPtr);
962 } else {
963 u_upload_data(rctx->uploader, 0, input->buffer_size, ptr, &cb->buffer_offset, &cb->buffer);
964 }
965 /* account it in gtt */
966 rctx->b.gtt += input->buffer_size;
967 } else {
968 /* Setup the hw buffer. */
969 cb->buffer_offset = input->buffer_offset;
970 pipe_resource_reference(&cb->buffer, input->buffer);
971 r600_context_add_resource_size(ctx, input->buffer);
972 }
973
974 state->enabled_mask |= 1 << index;
975 state->dirty_mask |= 1 << index;
976 r600_constant_buffers_dirty(rctx, state);
977 }
978
979 static void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
980 {
981 struct r600_context *rctx = (struct r600_context*)pipe;
982
983 if (rctx->sample_mask.sample_mask == (uint16_t)sample_mask)
984 return;
985
986 rctx->sample_mask.sample_mask = sample_mask;
987 rctx->sample_mask.atom.dirty = true;
988 }
989
990 /*
991 * On r600/700 hw we don't have vertex fetch swizzle, though TBO
992 * doesn't require full swizzles it does need masking and setting alpha
993 * to one, so we setup a set of 5 constants with the masks + alpha value
994 * then in the shader, we AND the 4 components with 0xffffffff or 0,
995 * then OR the alpha with the value given here.
996 * We use a 6th constant to store the txq buffer size in
997 */
998 static void r600_setup_buffer_constants(struct r600_context *rctx, int shader_type)
999 {
1000 struct r600_textures_info *samplers = &rctx->samplers[shader_type];
1001 int bits;
1002 uint32_t array_size;
1003 struct pipe_constant_buffer cb;
1004 int i, j;
1005
1006 if (!samplers->views.dirty_buffer_constants)
1007 return;
1008
1009 samplers->views.dirty_buffer_constants = FALSE;
1010
1011 bits = util_last_bit(samplers->views.enabled_mask);
1012 array_size = bits * 8 * sizeof(uint32_t) * 4;
1013 samplers->buffer_constants = realloc(samplers->buffer_constants, array_size);
1014 memset(samplers->buffer_constants, 0, array_size);
1015 for (i = 0; i < bits; i++) {
1016 if (samplers->views.enabled_mask & (1 << i)) {
1017 int offset = i * 8;
1018 const struct util_format_description *desc;
1019 desc = util_format_description(samplers->views.views[i]->base.format);
1020
1021 for (j = 0; j < 4; j++)
1022 if (j < desc->nr_channels)
1023 samplers->buffer_constants[offset+j] = 0xffffffff;
1024 else
1025 samplers->buffer_constants[offset+j] = 0x0;
1026 if (desc->nr_channels < 4) {
1027 if (desc->channel[0].pure_integer)
1028 samplers->buffer_constants[offset+4] = 1;
1029 else
1030 samplers->buffer_constants[offset+4] = 0x3f800000;
1031 } else
1032 samplers->buffer_constants[offset + 4] = 0;
1033
1034 samplers->buffer_constants[offset + 5] = samplers->views.views[i]->base.texture->width0 / util_format_get_blocksize(samplers->views.views[i]->base.format);
1035 }
1036 }
1037
1038 cb.buffer = NULL;
1039 cb.user_buffer = samplers->buffer_constants;
1040 cb.buffer_offset = 0;
1041 cb.buffer_size = array_size;
1042 rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_BUFFER_INFO_CONST_BUFFER, &cb);
1043 pipe_resource_reference(&cb.buffer, NULL);
1044 }
1045
1046 /* On evergreen we only need to store the buffer size for TXQ */
1047 static void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type)
1048 {
1049 struct r600_textures_info *samplers = &rctx->samplers[shader_type];
1050 int bits;
1051 uint32_t array_size;
1052 struct pipe_constant_buffer cb;
1053 int i;
1054
1055 if (!samplers->views.dirty_buffer_constants)
1056 return;
1057
1058 samplers->views.dirty_buffer_constants = FALSE;
1059
1060 bits = util_last_bit(samplers->views.enabled_mask);
1061 array_size = bits * sizeof(uint32_t) * 4;
1062 samplers->buffer_constants = realloc(samplers->buffer_constants, array_size);
1063 memset(samplers->buffer_constants, 0, array_size);
1064 for (i = 0; i < bits; i++)
1065 if (samplers->views.enabled_mask & (1 << i))
1066 samplers->buffer_constants[i] = samplers->views.views[i]->base.texture->width0 / util_format_get_blocksize(samplers->views.views[i]->base.format);
1067
1068 cb.buffer = NULL;
1069 cb.user_buffer = samplers->buffer_constants;
1070 cb.buffer_offset = 0;
1071 cb.buffer_size = array_size;
1072 rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_BUFFER_INFO_CONST_BUFFER, &cb);
1073 pipe_resource_reference(&cb.buffer, NULL);
1074 }
1075
1076 static void r600_setup_txq_cube_array_constants(struct r600_context *rctx, int shader_type)
1077 {
1078 struct r600_textures_info *samplers = &rctx->samplers[shader_type];
1079 int bits;
1080 uint32_t array_size;
1081 struct pipe_constant_buffer cb;
1082 int i;
1083
1084 if (!samplers->views.dirty_txq_constants)
1085 return;
1086
1087 samplers->views.dirty_txq_constants = FALSE;
1088
1089 bits = util_last_bit(samplers->views.enabled_mask);
1090 array_size = bits * sizeof(uint32_t) * 4;
1091 samplers->txq_constants = realloc(samplers->txq_constants, array_size);
1092 memset(samplers->txq_constants, 0, array_size);
1093 for (i = 0; i < bits; i++)
1094 if (samplers->views.enabled_mask & (1 << i))
1095 samplers->txq_constants[i] = samplers->views.views[i]->base.texture->array_size / 6;
1096
1097 cb.buffer = NULL;
1098 cb.user_buffer = samplers->txq_constants;
1099 cb.buffer_offset = 0;
1100 cb.buffer_size = array_size;
1101 rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_TXQ_CONST_BUFFER, &cb);
1102 pipe_resource_reference(&cb.buffer, NULL);
1103 }
1104
1105 static bool r600_update_derived_state(struct r600_context *rctx)
1106 {
1107 struct pipe_context * ctx = (struct pipe_context*)rctx;
1108 bool ps_dirty = false;
1109 bool blend_disable;
1110
1111 if (!rctx->blitter->running) {
1112 unsigned i;
1113
1114 /* Decompress textures if needed. */
1115 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
1116 struct r600_samplerview_state *views = &rctx->samplers[i].views;
1117 if (views->compressed_depthtex_mask) {
1118 r600_decompress_depth_textures(rctx, views);
1119 }
1120 if (views->compressed_colortex_mask) {
1121 r600_decompress_color_textures(rctx, views);
1122 }
1123 }
1124 }
1125
1126 r600_shader_select(ctx, rctx->ps_shader, &ps_dirty);
1127
1128 if (rctx->ps_shader && rctx->rasterizer &&
1129 ((rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable) ||
1130 (rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade))) {
1131
1132 if (rctx->b.chip_class >= EVERGREEN)
1133 evergreen_update_ps_state(ctx, rctx->ps_shader->current);
1134 else
1135 r600_update_ps_state(ctx, rctx->ps_shader->current);
1136
1137 ps_dirty = true;
1138 }
1139
1140 if (ps_dirty) {
1141 rctx->pixel_shader.atom.num_dw = rctx->ps_shader->current->command_buffer.num_dw;
1142 rctx->pixel_shader.atom.dirty = true;
1143 }
1144
1145 /* on R600 we stuff masks + txq info into one constant buffer */
1146 /* on evergreen we only need a txq info one */
1147 if (rctx->b.chip_class < EVERGREEN) {
1148 if (rctx->ps_shader && rctx->ps_shader->current->shader.uses_tex_buffers)
1149 r600_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT);
1150 if (rctx->vs_shader && rctx->vs_shader->current->shader.uses_tex_buffers)
1151 r600_setup_buffer_constants(rctx, PIPE_SHADER_VERTEX);
1152 } else {
1153 if (rctx->ps_shader && rctx->ps_shader->current->shader.uses_tex_buffers)
1154 eg_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT);
1155 if (rctx->vs_shader && rctx->vs_shader->current->shader.uses_tex_buffers)
1156 eg_setup_buffer_constants(rctx, PIPE_SHADER_VERTEX);
1157 }
1158
1159
1160 if (rctx->ps_shader && rctx->ps_shader->current->shader.has_txq_cube_array_z_comp)
1161 r600_setup_txq_cube_array_constants(rctx, PIPE_SHADER_FRAGMENT);
1162 if (rctx->vs_shader && rctx->vs_shader->current->shader.has_txq_cube_array_z_comp)
1163 r600_setup_txq_cube_array_constants(rctx, PIPE_SHADER_VERTEX);
1164
1165 if (rctx->b.chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) {
1166 if (!r600_adjust_gprs(rctx)) {
1167 /* discard rendering */
1168 return false;
1169 }
1170 }
1171
1172 blend_disable = (rctx->dual_src_blend &&
1173 rctx->ps_shader->current->nr_ps_color_outputs < 2);
1174
1175 if (blend_disable != rctx->force_blend_disable) {
1176 rctx->force_blend_disable = blend_disable;
1177 r600_bind_blend_state_internal(rctx,
1178 rctx->blend_state.cso,
1179 blend_disable);
1180 }
1181 return true;
1182 }
1183
1184 static unsigned r600_conv_prim_to_gs_out(unsigned mode)
1185 {
1186 static const int prim_conv[] = {
1187 V_028A6C_OUTPRIM_TYPE_POINTLIST,
1188 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
1189 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
1190 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
1191 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
1192 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
1193 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
1194 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
1195 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
1196 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
1197 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
1198 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
1199 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
1200 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
1201 V_028A6C_OUTPRIM_TYPE_TRISTRIP
1202 };
1203 assert(mode < Elements(prim_conv));
1204
1205 return prim_conv[mode];
1206 }
1207
1208 void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom)
1209 {
1210 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
1211 struct r600_clip_misc_state *state = &rctx->clip_misc_state;
1212
1213 r600_write_context_reg(cs, R_028810_PA_CL_CLIP_CNTL,
1214 state->pa_cl_clip_cntl |
1215 (state->clip_dist_write ? 0 : state->clip_plane_enable & 0x3F));
1216 r600_write_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL,
1217 state->pa_cl_vs_out_cntl |
1218 (state->clip_plane_enable & state->clip_dist_write));
1219 }
1220
1221 static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
1222 {
1223 struct r600_context *rctx = (struct r600_context *)ctx;
1224 struct pipe_draw_info info = *dinfo;
1225 struct pipe_index_buffer ib = {};
1226 unsigned i;
1227 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
1228
1229 if (!info.count && (info.indexed || !info.count_from_stream_output)) {
1230 assert(0);
1231 return;
1232 }
1233
1234 if (!rctx->vs_shader) {
1235 assert(0);
1236 return;
1237 }
1238
1239 /* make sure that the gfx ring is only one active */
1240 if (rctx->b.rings.dma.cs) {
1241 rctx->b.rings.dma.flush(rctx, RADEON_FLUSH_ASYNC);
1242 }
1243
1244 if (!r600_update_derived_state(rctx)) {
1245 /* useless to render because current rendering command
1246 * can't be achieved
1247 */
1248 return;
1249 }
1250
1251 if (info.indexed) {
1252 /* Initialize the index buffer struct. */
1253 pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
1254 ib.user_buffer = rctx->index_buffer.user_buffer;
1255 ib.index_size = rctx->index_buffer.index_size;
1256 ib.offset = rctx->index_buffer.offset + info.start * ib.index_size;
1257
1258 /* Translate 8-bit indices to 16-bit. */
1259 if (ib.index_size == 1) {
1260 struct pipe_resource *out_buffer = NULL;
1261 unsigned out_offset;
1262 void *ptr;
1263
1264 u_upload_alloc(rctx->uploader, 0, info.count * 2,
1265 &out_offset, &out_buffer, &ptr);
1266
1267 util_shorten_ubyte_elts_to_userptr(
1268 &rctx->b.b, &ib, 0, ib.offset, info.count, ptr);
1269
1270 pipe_resource_reference(&ib.buffer, NULL);
1271 ib.user_buffer = NULL;
1272 ib.buffer = out_buffer;
1273 ib.offset = out_offset;
1274 ib.index_size = 2;
1275 }
1276
1277 /* Upload the index buffer.
1278 * The upload is skipped for small index counts on little-endian machines
1279 * and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
1280 * Note: Instanced rendering in combination with immediate indices hangs. */
1281 if (ib.user_buffer && (R600_BIG_ENDIAN || info.instance_count > 1 ||
1282 info.count*ib.index_size > 20)) {
1283 u_upload_data(rctx->uploader, 0, info.count * ib.index_size,
1284 ib.user_buffer, &ib.offset, &ib.buffer);
1285 ib.user_buffer = NULL;
1286 }
1287 } else {
1288 info.index_bias = info.start;
1289 }
1290
1291 /* Set the index offset and primitive restart. */
1292 if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info.primitive_restart ||
1293 rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info.restart_index ||
1294 rctx->vgt_state.vgt_indx_offset != info.index_bias) {
1295 rctx->vgt_state.vgt_multi_prim_ib_reset_en = info.primitive_restart;
1296 rctx->vgt_state.vgt_multi_prim_ib_reset_indx = info.restart_index;
1297 rctx->vgt_state.vgt_indx_offset = info.index_bias;
1298 rctx->vgt_state.atom.dirty = true;
1299 }
1300
1301 /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */
1302 if (rctx->b.chip_class == R600) {
1303 rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH;
1304 rctx->cb_misc_state.atom.dirty = true;
1305 }
1306
1307 /* Emit states. */
1308 r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE);
1309 r600_flush_emit(rctx);
1310
1311 for (i = 0; i < R600_NUM_ATOMS; i++) {
1312 if (rctx->atoms[i] == NULL || !rctx->atoms[i]->dirty) {
1313 continue;
1314 }
1315 r600_emit_atom(rctx, rctx->atoms[i]);
1316 }
1317
1318 /* Update start instance. */
1319 if (rctx->last_start_instance != info.start_instance) {
1320 r600_write_ctl_const(cs, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance);
1321 rctx->last_start_instance = info.start_instance;
1322 }
1323
1324 /* Update the primitive type. */
1325 if (rctx->last_primitive_type != info.mode) {
1326 unsigned ls_mask = 0;
1327
1328 if (info.mode == PIPE_PRIM_LINES)
1329 ls_mask = 1;
1330 else if (info.mode == PIPE_PRIM_LINE_STRIP ||
1331 info.mode == PIPE_PRIM_LINE_LOOP)
1332 ls_mask = 2;
1333
1334 r600_write_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
1335 S_028A0C_AUTO_RESET_CNTL(ls_mask) |
1336 (rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
1337 r600_write_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE,
1338 r600_conv_prim_to_gs_out(info.mode));
1339 r600_write_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE,
1340 r600_conv_pipe_prim(info.mode));
1341
1342 rctx->last_primitive_type = info.mode;
1343 }
1344
1345 /* Draw packets. */
1346 cs->buf[cs->cdw++] = PKT3(PKT3_NUM_INSTANCES, 0, rctx->predicate_drawing);
1347 cs->buf[cs->cdw++] = info.instance_count;
1348 if (info.indexed) {
1349 cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, rctx->predicate_drawing);
1350 cs->buf[cs->cdw++] = ib.index_size == 4 ?
1351 (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
1352 (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0));
1353
1354 if (ib.user_buffer) {
1355 unsigned size_bytes = info.count*ib.index_size;
1356 unsigned size_dw = align(size_bytes, 4) / 4;
1357 cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, rctx->predicate_drawing);
1358 cs->buf[cs->cdw++] = info.count;
1359 cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_IMMEDIATE;
1360 memcpy(cs->buf+cs->cdw, ib.user_buffer, size_bytes);
1361 cs->cdw += size_dw;
1362 } else {
1363 uint64_t va = r600_resource_va(ctx->screen, ib.buffer) + ib.offset;
1364 cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, rctx->predicate_drawing);
1365 cs->buf[cs->cdw++] = va;
1366 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
1367 cs->buf[cs->cdw++] = info.count;
1368 cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA;
1369 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->predicate_drawing);
1370 cs->buf[cs->cdw++] = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)ib.buffer, RADEON_USAGE_READ);
1371 }
1372 } else {
1373 if (info.count_from_stream_output) {
1374 struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output;
1375 uint64_t va = r600_resource_va(&rctx->screen->b.b, (void*)t->buf_filled_size) + t->buf_filled_size_offset;
1376
1377 r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw);
1378
1379 cs->buf[cs->cdw++] = PKT3(PKT3_COPY_DW, 4, 0);
1380 cs->buf[cs->cdw++] = COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG;
1381 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */
1382 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */
1383 cs->buf[cs->cdw++] = R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2; /* dst register */
1384 cs->buf[cs->cdw++] = 0; /* unused */
1385
1386 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
1387 cs->buf[cs->cdw++] = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, t->buf_filled_size, RADEON_USAGE_READ);
1388 }
1389
1390 cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, rctx->predicate_drawing);
1391 cs->buf[cs->cdw++] = info.count;
1392 cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_AUTO_INDEX |
1393 (info.count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0);
1394 }
1395
1396 if (rctx->screen->trace_bo) {
1397 r600_trace_emit(rctx);
1398 }
1399
1400 /* Set the depth buffer as dirty. */
1401 if (rctx->framebuffer.state.zsbuf) {
1402 struct pipe_surface *surf = rctx->framebuffer.state.zsbuf;
1403 struct r600_texture *rtex = (struct r600_texture *)surf->texture;
1404
1405 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
1406 }
1407 if (rctx->framebuffer.compressed_cb_mask) {
1408 struct pipe_surface *surf;
1409 struct r600_texture *rtex;
1410 unsigned mask = rctx->framebuffer.compressed_cb_mask;
1411
1412 do {
1413 unsigned i = u_bit_scan(&mask);
1414 surf = rctx->framebuffer.state.cbufs[i];
1415 rtex = (struct r600_texture*)surf->texture;
1416
1417 rtex->dirty_level_mask |= 1 << surf->u.tex.level;
1418
1419 } while (mask);
1420 }
1421
1422 pipe_resource_reference(&ib.buffer, NULL);
1423 rctx->num_draw_calls++;
1424 }
1425
1426 void r600_draw_rectangle(struct blitter_context *blitter,
1427 int x1, int y1, int x2, int y2, float depth,
1428 enum blitter_attrib_type type, const union pipe_color_union *attrib)
1429 {
1430 struct r600_context *rctx = (struct r600_context*)util_blitter_get_pipe(blitter);
1431 struct pipe_viewport_state viewport;
1432 struct pipe_resource *buf = NULL;
1433 unsigned offset = 0;
1434 float *vb;
1435
1436 if (type == UTIL_BLITTER_ATTRIB_TEXCOORD) {
1437 util_blitter_draw_rectangle(blitter, x1, y1, x2, y2, depth, type, attrib);
1438 return;
1439 }
1440
1441 /* Some operations (like color resolve on r6xx) don't work
1442 * with the conventional primitive types.
1443 * One that works is PT_RECTLIST, which we use here. */
1444
1445 /* setup viewport */
1446 viewport.scale[0] = 1.0f;
1447 viewport.scale[1] = 1.0f;
1448 viewport.scale[2] = 1.0f;
1449 viewport.scale[3] = 1.0f;
1450 viewport.translate[0] = 0.0f;
1451 viewport.translate[1] = 0.0f;
1452 viewport.translate[2] = 0.0f;
1453 viewport.translate[3] = 0.0f;
1454 rctx->b.b.set_viewport_states(&rctx->b.b, 0, 1, &viewport);
1455
1456 /* Upload vertices. The hw rectangle has only 3 vertices,
1457 * I guess the 4th one is derived from the first 3.
1458 * The vertex specification should match u_blitter's vertex element state. */
1459 u_upload_alloc(rctx->uploader, 0, sizeof(float) * 24, &offset, &buf, (void**)&vb);
1460 vb[0] = x1;
1461 vb[1] = y1;
1462 vb[2] = depth;
1463 vb[3] = 1;
1464
1465 vb[8] = x1;
1466 vb[9] = y2;
1467 vb[10] = depth;
1468 vb[11] = 1;
1469
1470 vb[16] = x2;
1471 vb[17] = y1;
1472 vb[18] = depth;
1473 vb[19] = 1;
1474
1475 if (attrib) {
1476 memcpy(vb+4, attrib->f, sizeof(float)*4);
1477 memcpy(vb+12, attrib->f, sizeof(float)*4);
1478 memcpy(vb+20, attrib->f, sizeof(float)*4);
1479 }
1480
1481 /* draw */
1482 util_draw_vertex_buffer(&rctx->b.b, NULL, buf, rctx->blitter->vb_slot, offset,
1483 R600_PRIM_RECTANGLE_LIST, 3, 2);
1484 pipe_resource_reference(&buf, NULL);
1485 }
1486
1487 uint32_t r600_translate_stencil_op(int s_op)
1488 {
1489 switch (s_op) {
1490 case PIPE_STENCIL_OP_KEEP:
1491 return V_028800_STENCIL_KEEP;
1492 case PIPE_STENCIL_OP_ZERO:
1493 return V_028800_STENCIL_ZERO;
1494 case PIPE_STENCIL_OP_REPLACE:
1495 return V_028800_STENCIL_REPLACE;
1496 case PIPE_STENCIL_OP_INCR:
1497 return V_028800_STENCIL_INCR;
1498 case PIPE_STENCIL_OP_DECR:
1499 return V_028800_STENCIL_DECR;
1500 case PIPE_STENCIL_OP_INCR_WRAP:
1501 return V_028800_STENCIL_INCR_WRAP;
1502 case PIPE_STENCIL_OP_DECR_WRAP:
1503 return V_028800_STENCIL_DECR_WRAP;
1504 case PIPE_STENCIL_OP_INVERT:
1505 return V_028800_STENCIL_INVERT;
1506 default:
1507 R600_ERR("Unknown stencil op %d", s_op);
1508 assert(0);
1509 break;
1510 }
1511 return 0;
1512 }
1513
1514 uint32_t r600_translate_fill(uint32_t func)
1515 {
1516 switch(func) {
1517 case PIPE_POLYGON_MODE_FILL:
1518 return 2;
1519 case PIPE_POLYGON_MODE_LINE:
1520 return 1;
1521 case PIPE_POLYGON_MODE_POINT:
1522 return 0;
1523 default:
1524 assert(0);
1525 return 0;
1526 }
1527 }
1528
1529 unsigned r600_tex_wrap(unsigned wrap)
1530 {
1531 switch (wrap) {
1532 default:
1533 case PIPE_TEX_WRAP_REPEAT:
1534 return V_03C000_SQ_TEX_WRAP;
1535 case PIPE_TEX_WRAP_CLAMP:
1536 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER;
1537 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
1538 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL;
1539 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
1540 return V_03C000_SQ_TEX_CLAMP_BORDER;
1541 case PIPE_TEX_WRAP_MIRROR_REPEAT:
1542 return V_03C000_SQ_TEX_MIRROR;
1543 case PIPE_TEX_WRAP_MIRROR_CLAMP:
1544 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER;
1545 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
1546 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
1547 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
1548 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER;
1549 }
1550 }
1551
1552 unsigned r600_tex_filter(unsigned filter)
1553 {
1554 switch (filter) {
1555 default:
1556 case PIPE_TEX_FILTER_NEAREST:
1557 return V_03C000_SQ_TEX_XY_FILTER_POINT;
1558 case PIPE_TEX_FILTER_LINEAR:
1559 return V_03C000_SQ_TEX_XY_FILTER_BILINEAR;
1560 }
1561 }
1562
1563 unsigned r600_tex_mipfilter(unsigned filter)
1564 {
1565 switch (filter) {
1566 case PIPE_TEX_MIPFILTER_NEAREST:
1567 return V_03C000_SQ_TEX_Z_FILTER_POINT;
1568 case PIPE_TEX_MIPFILTER_LINEAR:
1569 return V_03C000_SQ_TEX_Z_FILTER_LINEAR;
1570 default:
1571 case PIPE_TEX_MIPFILTER_NONE:
1572 return V_03C000_SQ_TEX_Z_FILTER_NONE;
1573 }
1574 }
1575
1576 unsigned r600_tex_compare(unsigned compare)
1577 {
1578 switch (compare) {
1579 default:
1580 case PIPE_FUNC_NEVER:
1581 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER;
1582 case PIPE_FUNC_LESS:
1583 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS;
1584 case PIPE_FUNC_EQUAL:
1585 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL;
1586 case PIPE_FUNC_LEQUAL:
1587 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
1588 case PIPE_FUNC_GREATER:
1589 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER;
1590 case PIPE_FUNC_NOTEQUAL:
1591 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
1592 case PIPE_FUNC_GEQUAL:
1593 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
1594 case PIPE_FUNC_ALWAYS:
1595 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS;
1596 }
1597 }
1598
1599 static bool wrap_mode_uses_border_color(unsigned wrap, bool linear_filter)
1600 {
1601 return wrap == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
1602 wrap == PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER ||
1603 (linear_filter &&
1604 (wrap == PIPE_TEX_WRAP_CLAMP ||
1605 wrap == PIPE_TEX_WRAP_MIRROR_CLAMP));
1606 }
1607
1608 bool sampler_state_needs_border_color(const struct pipe_sampler_state *state)
1609 {
1610 bool linear_filter = state->min_img_filter != PIPE_TEX_FILTER_NEAREST ||
1611 state->mag_img_filter != PIPE_TEX_FILTER_NEAREST;
1612
1613 return (state->border_color.ui[0] || state->border_color.ui[1] ||
1614 state->border_color.ui[2] || state->border_color.ui[3]) &&
1615 (wrap_mode_uses_border_color(state->wrap_s, linear_filter) ||
1616 wrap_mode_uses_border_color(state->wrap_t, linear_filter) ||
1617 wrap_mode_uses_border_color(state->wrap_r, linear_filter));
1618 }
1619
1620 void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a)
1621 {
1622 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
1623 struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader->current;
1624
1625 r600_emit_command_buffer(cs, &shader->command_buffer);
1626
1627 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1628 radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->bo, RADEON_USAGE_READ));
1629 }
1630
1631 /* keep this at the end of this file, please */
1632 void r600_init_common_state_functions(struct r600_context *rctx)
1633 {
1634 rctx->b.b.create_fs_state = r600_create_ps_state;
1635 rctx->b.b.create_vs_state = r600_create_vs_state;
1636 rctx->b.b.create_vertex_elements_state = r600_create_vertex_fetch_shader;
1637 rctx->b.b.bind_blend_state = r600_bind_blend_state;
1638 rctx->b.b.bind_depth_stencil_alpha_state = r600_bind_dsa_state;
1639 rctx->b.b.bind_fragment_sampler_states = r600_bind_ps_sampler_states;
1640 rctx->b.b.bind_fs_state = r600_bind_ps_state;
1641 rctx->b.b.bind_rasterizer_state = r600_bind_rs_state;
1642 rctx->b.b.bind_vertex_elements_state = r600_bind_vertex_elements;
1643 rctx->b.b.bind_vertex_sampler_states = r600_bind_vs_sampler_states;
1644 rctx->b.b.bind_vs_state = r600_bind_vs_state;
1645 rctx->b.b.delete_blend_state = r600_delete_blend_state;
1646 rctx->b.b.delete_depth_stencil_alpha_state = r600_delete_dsa_state;
1647 rctx->b.b.delete_fs_state = r600_delete_ps_state;
1648 rctx->b.b.delete_rasterizer_state = r600_delete_rs_state;
1649 rctx->b.b.delete_sampler_state = r600_delete_sampler_state;
1650 rctx->b.b.delete_vertex_elements_state = r600_delete_vertex_elements;
1651 rctx->b.b.delete_vs_state = r600_delete_vs_state;
1652 rctx->b.b.set_blend_color = r600_set_blend_color;
1653 rctx->b.b.set_clip_state = r600_set_clip_state;
1654 rctx->b.b.set_constant_buffer = r600_set_constant_buffer;
1655 rctx->b.b.set_sample_mask = r600_set_sample_mask;
1656 rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref;
1657 rctx->b.b.set_viewport_states = r600_set_viewport_states;
1658 rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers;
1659 rctx->b.b.set_index_buffer = r600_set_index_buffer;
1660 rctx->b.b.set_fragment_sampler_views = r600_set_ps_sampler_views;
1661 rctx->b.b.set_vertex_sampler_views = r600_set_vs_sampler_views;
1662 rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
1663 rctx->b.b.texture_barrier = r600_texture_barrier;
1664 rctx->b.b.set_stream_output_targets = r600_set_streamout_targets;
1665 rctx->b.b.draw_vbo = r600_draw_vbo;
1666 }
1667
1668 void r600_trace_emit(struct r600_context *rctx)
1669 {
1670 struct r600_screen *rscreen = rctx->screen;
1671 struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
1672 uint64_t va;
1673 uint32_t reloc;
1674
1675 va = r600_resource_va(&rscreen->b.b, (void*)rscreen->trace_bo);
1676 reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rscreen->trace_bo, RADEON_USAGE_READWRITE);
1677 radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0));
1678 radeon_emit(cs, va & 0xFFFFFFFFUL);
1679 radeon_emit(cs, (va >> 32UL) & 0xFFUL);
1680 radeon_emit(cs, cs->cdw);
1681 radeon_emit(cs, rscreen->cs_count);
1682 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1683 radeon_emit(cs, reloc);
1684 }