1eb9389badef09828f5885f65a66888d381c304c
[mesa.git] / src / gallium / drivers / r600 / r600_state_common.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 * 2010 Jerome Glisse
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
26 */
27 #include <util/u_memory.h>
28 #include <util/u_format.h>
29 #include <pipebuffer/pb_buffer.h>
30 #include "pipe/p_shader_tokens.h"
31 #include "r600_formats.h"
32 #include "r600_pipe.h"
33 #include "r600d.h"
34
35 static void r600_spi_update(struct r600_pipe_context *rctx);
36
37 static int r600_conv_pipe_prim(unsigned pprim, unsigned *prim)
38 {
39 static const int prim_conv[] = {
40 V_008958_DI_PT_POINTLIST,
41 V_008958_DI_PT_LINELIST,
42 V_008958_DI_PT_LINELOOP,
43 V_008958_DI_PT_LINESTRIP,
44 V_008958_DI_PT_TRILIST,
45 V_008958_DI_PT_TRISTRIP,
46 V_008958_DI_PT_TRIFAN,
47 V_008958_DI_PT_QUADLIST,
48 V_008958_DI_PT_QUADSTRIP,
49 V_008958_DI_PT_POLYGON,
50 -1,
51 -1,
52 -1,
53 -1
54 };
55
56 *prim = prim_conv[pprim];
57 if (*prim == -1) {
58 fprintf(stderr, "%s:%d unsupported %d\n", __func__, __LINE__, pprim);
59 return -1;
60 }
61 return 0;
62 }
63
64 /* common state between evergreen and r600 */
65 void r600_bind_blend_state(struct pipe_context *ctx, void *state)
66 {
67 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
68 struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state;
69 struct r600_pipe_state *rstate;
70
71 if (state == NULL)
72 return;
73 rstate = &blend->rstate;
74 rctx->states[rstate->id] = rstate;
75 rctx->cb_target_mask = blend->cb_target_mask;
76 r600_context_pipe_state_set(&rctx->ctx, rstate);
77 }
78
79 void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
80 {
81 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
82 struct r600_pipe_dsa *dsa = state;
83 struct r600_pipe_state *rstate;
84
85 if (state == NULL)
86 return;
87 rstate = &dsa->rstate;
88 rctx->states[rstate->id] = rstate;
89 rctx->alpha_ref = dsa->alpha_ref;
90 rctx->alpha_ref_dirty = true;
91 r600_context_pipe_state_set(&rctx->ctx, rstate);
92 }
93
94 void r600_bind_rs_state(struct pipe_context *ctx, void *state)
95 {
96 struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
97 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
98
99 if (state == NULL)
100 return;
101
102 rctx->flatshade = rs->flatshade;
103 rctx->sprite_coord_enable = rs->sprite_coord_enable;
104 rctx->rasterizer = rs;
105
106 rctx->states[rs->rstate.id] = &rs->rstate;
107 r600_context_pipe_state_set(&rctx->ctx, &rs->rstate);
108
109 if (rctx->family >= CHIP_CEDAR) {
110 evergreen_polygon_offset_update(rctx);
111 } else {
112 r600_polygon_offset_update(rctx);
113 }
114 if (rctx->ps_shader && rctx->vs_shader)
115 r600_spi_update(rctx);
116 }
117
118 void r600_delete_rs_state(struct pipe_context *ctx, void *state)
119 {
120 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
121 struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
122
123 if (rctx->rasterizer == rs) {
124 rctx->rasterizer = NULL;
125 }
126 if (rctx->states[rs->rstate.id] == &rs->rstate) {
127 rctx->states[rs->rstate.id] = NULL;
128 }
129 free(rs);
130 }
131
132 void r600_sampler_view_destroy(struct pipe_context *ctx,
133 struct pipe_sampler_view *state)
134 {
135 struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;
136
137 pipe_resource_reference(&state->texture, NULL);
138 FREE(resource);
139 }
140
141 void r600_delete_state(struct pipe_context *ctx, void *state)
142 {
143 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
144 struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
145
146 if (rctx->states[rstate->id] == rstate) {
147 rctx->states[rstate->id] = NULL;
148 }
149 for (int i = 0; i < rstate->nregs; i++) {
150 r600_bo_reference(rctx->radeon, &rstate->regs[i].bo, NULL);
151 }
152 free(rstate);
153 }
154
155 void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
156 {
157 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
158 struct r600_vertex_element *v = (struct r600_vertex_element*)state;
159
160 rctx->vertex_elements = v;
161 if (v) {
162 u_vbuf_mgr_bind_vertex_elements(rctx->vbuf_mgr, state,
163 v->vmgr_elements);
164
165 rctx->states[v->rstate.id] = &v->rstate;
166 r600_context_pipe_state_set(&rctx->ctx, &v->rstate);
167 }
168 }
169
170 void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
171 {
172 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
173 struct r600_vertex_element *v = (struct r600_vertex_element*)state;
174
175 if (rctx->states[v->rstate.id] == &v->rstate) {
176 rctx->states[v->rstate.id] = NULL;
177 }
178 if (rctx->vertex_elements == state)
179 rctx->vertex_elements = NULL;
180
181 r600_bo_reference(rctx->radeon, &v->fetch_shader, NULL);
182 u_vbuf_mgr_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
183 FREE(state);
184 }
185
186
187 void r600_set_index_buffer(struct pipe_context *ctx,
188 const struct pipe_index_buffer *ib)
189 {
190 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
191
192 if (ib) {
193 pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
194 memcpy(&rctx->index_buffer, ib, sizeof(rctx->index_buffer));
195 } else {
196 pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
197 memset(&rctx->index_buffer, 0, sizeof(rctx->index_buffer));
198 }
199
200 /* TODO make this more like a state */
201 }
202
203 void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
204 const struct pipe_vertex_buffer *buffers)
205 {
206 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
207 int i;
208
209 /* Zero states. */
210 for (i = 0; i < count; i++) {
211 if (!buffers[i].buffer) {
212 if (rctx->family >= CHIP_CEDAR) {
213 evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
214 } else {
215 r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
216 }
217 }
218 }
219 for (; i < rctx->vbuf_mgr->nr_real_vertex_buffers; i++) {
220 if (rctx->family >= CHIP_CEDAR) {
221 evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
222 } else {
223 r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i);
224 }
225 }
226
227 u_vbuf_mgr_set_vertex_buffers(rctx->vbuf_mgr, count, buffers);
228 }
229
230 void *r600_create_vertex_elements(struct pipe_context *ctx,
231 unsigned count,
232 const struct pipe_vertex_element *elements)
233 {
234 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
235 struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);
236
237 assert(count < 32);
238 if (!v)
239 return NULL;
240
241 v->count = count;
242 v->vmgr_elements =
243 u_vbuf_mgr_create_vertex_elements(rctx->vbuf_mgr, count,
244 elements, v->elements);
245
246 if (r600_vertex_elements_build_fetch_shader(rctx, v)) {
247 FREE(v);
248 return NULL;
249 }
250
251 return v;
252 }
253
254 void *r600_create_shader_state(struct pipe_context *ctx,
255 const struct pipe_shader_state *state)
256 {
257 struct r600_pipe_shader *shader = CALLOC_STRUCT(r600_pipe_shader);
258 int r;
259
260 r = r600_pipe_shader_create(ctx, shader, state->tokens);
261 if (r) {
262 return NULL;
263 }
264 return shader;
265 }
266
267 void r600_bind_ps_shader(struct pipe_context *ctx, void *state)
268 {
269 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
270
271 /* TODO delete old shader */
272 rctx->ps_shader = (struct r600_pipe_shader *)state;
273 if (state) {
274 r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_shader->rstate);
275 }
276 if (rctx->ps_shader && rctx->vs_shader) {
277 r600_spi_update(rctx);
278 r600_adjust_gprs(rctx);
279 }
280 }
281
282 void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
283 {
284 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
285
286 /* TODO delete old shader */
287 rctx->vs_shader = (struct r600_pipe_shader *)state;
288 if (state) {
289 r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_shader->rstate);
290 }
291 if (rctx->ps_shader && rctx->vs_shader) {
292 r600_spi_update(rctx);
293 r600_adjust_gprs(rctx);
294 }
295 }
296
297 void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
298 {
299 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
300 struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
301
302 if (rctx->ps_shader == shader) {
303 rctx->ps_shader = NULL;
304 }
305
306 r600_pipe_shader_destroy(ctx, shader);
307 free(shader);
308 }
309
310 void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
311 {
312 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
313 struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
314
315 if (rctx->vs_shader == shader) {
316 rctx->vs_shader = NULL;
317 }
318
319 r600_pipe_shader_destroy(ctx, shader);
320 free(shader);
321 }
322
323 static void r600_update_alpha_ref(struct r600_pipe_context *rctx)
324 {
325 unsigned alpha_ref;
326 struct r600_pipe_state rstate;
327
328 alpha_ref = rctx->alpha_ref;
329 rstate.nregs = 0;
330 if (rctx->export_16bpc)
331 alpha_ref &= ~0x1FFF;
332 r600_pipe_state_add_reg(&rstate, R_028438_SX_ALPHA_REF, alpha_ref, 0xFFFFFFFF, NULL);
333
334 r600_context_pipe_state_set(&rctx->ctx, &rstate);
335 rctx->alpha_ref_dirty = false;
336 }
337
338 /* FIXME optimize away spi update when it's not needed */
339 static void r600_spi_block_init(struct r600_pipe_context *rctx, struct r600_pipe_state *rstate)
340 {
341 int i;
342 rstate->nregs = 0;
343 rstate->id = R600_PIPE_STATE_SPI;
344 for (i = 0; i < 32; i++) {
345 r600_pipe_state_add_reg(rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, 0, 0xFFFFFFFF, NULL);
346 }
347 }
348
349 static void r600_spi_update(struct r600_pipe_context *rctx)
350 {
351 struct r600_pipe_shader *shader = rctx->ps_shader;
352 struct r600_pipe_state *rstate = &rctx->spi;
353 struct r600_shader *rshader = &shader->shader;
354 unsigned i, tmp;
355
356 if (rctx->spi.id == 0)
357 r600_spi_block_init(rctx, &rctx->spi);
358
359 rstate->nregs = 0;
360 for (i = 0; i < rshader->ninput; i++) {
361 tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i));
362
363 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR ||
364 rshader->input[i].name == TGSI_SEMANTIC_BCOLOR ||
365 rshader->input[i].name == TGSI_SEMANTIC_POSITION) {
366 tmp |= S_028644_FLAT_SHADE(rctx->flatshade);
367 }
368
369 if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC &&
370 rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) {
371 tmp |= S_028644_PT_SPRITE_TEX(1);
372 }
373
374 if (rctx->family < CHIP_CEDAR) {
375 if (rshader->input[i].centroid)
376 tmp |= S_028644_SEL_CENTROID(1);
377
378 if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR)
379 tmp |= S_028644_SEL_LINEAR(1);
380 }
381
382 r600_pipe_state_mod_reg(rstate, tmp);
383 }
384
385 r600_context_pipe_state_set(&rctx->ctx, rstate);
386 }
387
388 void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
389 struct pipe_resource *buffer)
390 {
391 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
392 struct r600_resource_buffer *rbuffer = r600_buffer(buffer);
393 struct r600_pipe_resource_state *rstate;
394 uint32_t offset;
395
396 /* Note that the state tracker can unbind constant buffers by
397 * passing NULL here.
398 */
399 if (buffer == NULL) {
400 return;
401 }
402
403 r600_upload_const_buffer(rctx, &rbuffer, &offset);
404 offset += r600_bo_offset(rbuffer->r.bo);
405
406 switch (shader) {
407 case PIPE_SHADER_VERTEX:
408 rctx->vs_const_buffer.nregs = 0;
409 r600_pipe_state_add_reg(&rctx->vs_const_buffer,
410 R_028180_ALU_CONST_BUFFER_SIZE_VS_0,
411 ALIGN_DIVUP(buffer->width0 >> 4, 16),
412 0xFFFFFFFF, NULL);
413 r600_pipe_state_add_reg(&rctx->vs_const_buffer,
414 R_028980_ALU_CONST_CACHE_VS_0,
415 offset >> 8, 0xFFFFFFFF, rbuffer->r.bo);
416 r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer);
417
418 rstate = &rctx->vs_const_buffer_resource[index];
419 if (!rstate->id) {
420 if (rctx->family >= CHIP_CEDAR) {
421 evergreen_pipe_init_buffer_resource(rctx, rstate);
422 } else {
423 r600_pipe_init_buffer_resource(rctx, rstate);
424 }
425 }
426
427 if (rctx->family >= CHIP_CEDAR) {
428 evergreen_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16);
429 evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
430 } else {
431 r600_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16);
432 r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index);
433 }
434 break;
435 case PIPE_SHADER_FRAGMENT:
436 rctx->ps_const_buffer.nregs = 0;
437 r600_pipe_state_add_reg(&rctx->ps_const_buffer,
438 R_028140_ALU_CONST_BUFFER_SIZE_PS_0,
439 ALIGN_DIVUP(buffer->width0 >> 4, 16),
440 0xFFFFFFFF, NULL);
441 r600_pipe_state_add_reg(&rctx->ps_const_buffer,
442 R_028940_ALU_CONST_CACHE_PS_0,
443 offset >> 8, 0xFFFFFFFF, rbuffer->r.bo);
444 r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer);
445
446 rstate = &rctx->ps_const_buffer_resource[index];
447 if (!rstate->id) {
448 if (rctx->family >= CHIP_CEDAR) {
449 evergreen_pipe_init_buffer_resource(rctx, rstate);
450 } else {
451 r600_pipe_init_buffer_resource(rctx, rstate);
452 }
453 }
454 if (rctx->family >= CHIP_CEDAR) {
455 evergreen_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16);
456 evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
457 } else {
458 r600_pipe_mod_buffer_resource(rstate, &rbuffer->r, offset, 16);
459 r600_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index);
460 }
461 break;
462 default:
463 R600_ERR("unsupported %d\n", shader);
464 return;
465 }
466
467 if (buffer != &rbuffer->r.b.b.b)
468 pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL);
469 }
470
471 static void r600_vertex_buffer_update(struct r600_pipe_context *rctx)
472 {
473 struct r600_pipe_resource_state *rstate;
474 struct r600_resource *rbuffer;
475 struct pipe_vertex_buffer *vertex_buffer;
476 unsigned i, count, offset;
477
478 if (rctx->vertex_elements->vbuffer_need_offset) {
479 /* one resource per vertex elements */
480 count = rctx->vertex_elements->count;
481 } else {
482 /* bind vertex buffer once */
483 count = rctx->vbuf_mgr->nr_real_vertex_buffers;
484 }
485
486 for (i = 0 ; i < count; i++) {
487 rstate = &rctx->fs_resource[i];
488
489 if (rctx->vertex_elements->vbuffer_need_offset) {
490 /* one resource per vertex elements */
491 unsigned vbuffer_index;
492 vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index;
493 vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[vbuffer_index];
494 rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index];
495 offset = rctx->vertex_elements->vbuffer_offset[i];
496 } else {
497 /* bind vertex buffer once */
498 vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[i];
499 rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[i];
500 offset = 0;
501 }
502 if (vertex_buffer == NULL || rbuffer == NULL)
503 continue;
504 offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo);
505
506 if (!rstate->id) {
507 if (rctx->family >= CHIP_CEDAR) {
508 evergreen_pipe_init_buffer_resource(rctx, rstate);
509 } else {
510 r600_pipe_init_buffer_resource(rctx, rstate);
511 }
512 }
513
514 if (rctx->family >= CHIP_CEDAR) {
515 evergreen_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride);
516 evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
517 } else {
518 r600_pipe_mod_buffer_resource(rstate, rbuffer, offset, vertex_buffer->stride);
519 r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
520 }
521 }
522 }
523
524 void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
525 {
526 struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx;
527 struct r600_resource *rbuffer;
528 u32 vgt_dma_index_type, vgt_dma_swap_mode, vgt_draw_initiator, mask;
529 struct r600_draw rdraw;
530 struct r600_drawl draw = {};
531 unsigned prim;
532
533 r600_flush_depth_textures(rctx);
534 u_vbuf_mgr_draw_begin(rctx->vbuf_mgr, info, NULL, NULL);
535 r600_vertex_buffer_update(rctx);
536
537 draw.info = *info;
538 draw.ctx = ctx;
539 if (info->indexed && rctx->index_buffer.buffer) {
540 draw.info.start += rctx->index_buffer.offset / rctx->index_buffer.index_size;
541 pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer);
542
543 r600_translate_index_buffer(rctx, &draw.index_buffer,
544 &rctx->index_buffer.index_size,
545 &draw.info.start,
546 info->count);
547
548 draw.index_size = rctx->index_buffer.index_size;
549 draw.index_buffer_offset = draw.info.start * draw.index_size;
550 draw.info.start = 0;
551
552 if (u_vbuf_resource(draw.index_buffer)->user_ptr) {
553 r600_upload_index_buffer(rctx, &draw);
554 }
555 } else {
556 draw.info.index_bias = info->start;
557 }
558
559 vgt_dma_swap_mode = 0;
560 switch (draw.index_size) {
561 case 2:
562 vgt_draw_initiator = 0;
563 vgt_dma_index_type = 0;
564 if (R600_BIG_ENDIAN) {
565 vgt_dma_swap_mode = ENDIAN_8IN16;
566 }
567 break;
568 case 4:
569 vgt_draw_initiator = 0;
570 vgt_dma_index_type = 1;
571 if (R600_BIG_ENDIAN) {
572 vgt_dma_swap_mode = ENDIAN_8IN32;
573 }
574 break;
575 case 0:
576 vgt_draw_initiator = 2;
577 vgt_dma_index_type = 0;
578 break;
579 default:
580 R600_ERR("unsupported index size %d\n", draw.index_size);
581 return;
582 }
583 if (r600_conv_pipe_prim(draw.info.mode, &prim))
584 return;
585 if (unlikely(rctx->ps_shader == NULL)) {
586 R600_ERR("missing vertex shader\n");
587 return;
588 }
589 if (unlikely(rctx->vs_shader == NULL)) {
590 R600_ERR("missing vertex shader\n");
591 return;
592 }
593 /* there should be enough input */
594 if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) {
595 R600_ERR("%d resources provided, expecting %d\n",
596 rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource);
597 return;
598 }
599
600 if (rctx->alpha_ref_dirty)
601 r600_update_alpha_ref(rctx);
602
603 mask = 0;
604 for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) {
605 mask |= (0xF << (i * 4));
606 }
607
608 if (rctx->vgt.id != R600_PIPE_STATE_VGT) {
609 rctx->vgt.id = R600_PIPE_STATE_VGT;
610 rctx->vgt.nregs = 0;
611 r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL);
612 r600_pipe_state_add_reg(&rctx->vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL);
613 r600_pipe_state_add_reg(&rctx->vgt, R_028400_VGT_MAX_VTX_INDX, draw.info.max_index, 0xFFFFFFFF, NULL);
614 r600_pipe_state_add_reg(&rctx->vgt, R_028404_VGT_MIN_VTX_INDX, draw.info.min_index, 0xFFFFFFFF, NULL);
615 r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, draw.info.index_bias, 0xFFFFFFFF, NULL);
616 r600_pipe_state_add_reg(&rctx->vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL);
617 r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, draw.info.start_instance, 0xFFFFFFFF, NULL);
618 r600_pipe_state_add_reg(&rctx->vgt, R_028814_PA_SU_SC_MODE_CNTL,
619 0,
620 S_028814_PROVOKING_VTX_LAST(1), NULL);
621
622 }
623
624 rctx->vgt.nregs = 0;
625 r600_pipe_state_mod_reg(&rctx->vgt, prim);
626 r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_target_mask & mask);
627 r600_pipe_state_mod_reg(&rctx->vgt, draw.info.max_index);
628 r600_pipe_state_mod_reg(&rctx->vgt, draw.info.min_index);
629 r600_pipe_state_mod_reg(&rctx->vgt, draw.info.index_bias);
630 r600_pipe_state_mod_reg(&rctx->vgt, 0);
631 r600_pipe_state_mod_reg(&rctx->vgt, draw.info.start_instance);
632 if (draw.info.mode == PIPE_PRIM_QUADS || draw.info.mode == PIPE_PRIM_QUAD_STRIP || draw.info.mode == PIPE_PRIM_POLYGON) {
633 r600_pipe_state_mod_reg(&rctx->vgt, S_028814_PROVOKING_VTX_LAST(1));
634 }
635
636 r600_context_pipe_state_set(&rctx->ctx, &rctx->vgt);
637
638 rdraw.vgt_num_indices = draw.info.count;
639 rdraw.vgt_num_instances = draw.info.instance_count;
640 rdraw.vgt_index_type = vgt_dma_index_type | (vgt_dma_swap_mode << 2);
641 rdraw.vgt_draw_initiator = vgt_draw_initiator;
642 rdraw.indices = NULL;
643 if (draw.index_buffer) {
644 rbuffer = (struct r600_resource*)draw.index_buffer;
645 rdraw.indices = rbuffer->bo;
646 rdraw.indices_bo_offset = draw.index_buffer_offset;
647 }
648
649 if (rctx->family >= CHIP_CEDAR) {
650 evergreen_context_draw(&rctx->ctx, &rdraw);
651 } else {
652 r600_context_draw(&rctx->ctx, &rdraw);
653 }
654
655 if (rctx->framebuffer.zsbuf)
656 {
657 struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
658 ((struct r600_resource_texture *)tex)->dirty_db = TRUE;
659 }
660
661 pipe_resource_reference(&draw.index_buffer, NULL);
662
663 u_vbuf_mgr_draw_end(rctx->vbuf_mgr);
664 }
665
666 void _r600_pipe_state_add_reg(struct r600_context *ctx,
667 struct r600_pipe_state *state,
668 u32 offset, u32 value, u32 mask,
669 u32 range_id, u32 block_id,
670 struct r600_bo *bo)
671 {
672 struct r600_range *range;
673 struct r600_block *block;
674
675 range = &ctx->range[range_id];
676 block = range->blocks[block_id];
677 state->regs[state->nregs].block = block;
678 state->regs[state->nregs].id = (offset - block->start_offset) >> 2;
679
680 state->regs[state->nregs].value = value;
681 state->regs[state->nregs].mask = mask;
682 state->regs[state->nregs].bo = bo;
683
684 state->nregs++;
685 assert(state->nregs < R600_BLOCK_MAX_REG);
686 }
687
688 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
689 u32 offset, u32 value, u32 mask,
690 struct r600_bo *bo)
691 {
692 state->regs[state->nregs].id = offset;
693 state->regs[state->nregs].block = NULL;
694 state->regs[state->nregs].value = value;
695 state->regs[state->nregs].mask = mask;
696 state->regs[state->nregs].bo = bo;
697
698 state->nregs++;
699 assert(state->nregs < R600_BLOCK_MAX_REG);
700 }