r600g: split add_reg into add_reg and add_reg_bo variants
[mesa.git] / src / gallium / drivers / r600 / r600_state_common.c
1 /*
2 * Copyright 2010 Red Hat Inc.
3 * 2010 Jerome Glisse
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie <airlied@redhat.com>
25 * Jerome Glisse <jglisse@redhat.com>
26 */
27 #include "r600_formats.h"
28 #include "r600d.h"
29
30 #include "util/u_blitter.h"
31 #include "util/u_upload_mgr.h"
32 #include "tgsi/tgsi_parse.h"
33 #include <byteswap.h>
34
35 static void r600_emit_command_buffer(struct r600_context *rctx, struct r600_atom *atom)
36 {
37 struct radeon_winsys_cs *cs = rctx->cs;
38 struct r600_command_buffer *cb = (struct r600_command_buffer*)atom;
39
40 assert(cs->cdw + cb->atom.num_dw <= RADEON_MAX_CMDBUF_DWORDS);
41 memcpy(cs->buf + cs->cdw, cb->buf, 4 * cb->atom.num_dw);
42 cs->cdw += cb->atom.num_dw;
43 }
44
45 void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw, enum r600_atom_flags flags)
46 {
47 cb->atom.emit = r600_emit_command_buffer;
48 cb->atom.num_dw = 0;
49 cb->atom.flags = flags;
50 cb->buf = CALLOC(1, 4 * num_dw);
51 cb->max_num_dw = num_dw;
52 }
53
54 void r600_release_command_buffer(struct r600_command_buffer *cb)
55 {
56 FREE(cb->buf);
57 }
58
59 static void r600_emit_surface_sync(struct r600_context *rctx, struct r600_atom *atom)
60 {
61 struct radeon_winsys_cs *cs = rctx->cs;
62 struct r600_surface_sync_cmd *a = (struct r600_surface_sync_cmd*)atom;
63
64 cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0);
65 cs->buf[cs->cdw++] = a->flush_flags; /* CP_COHER_CNTL */
66 cs->buf[cs->cdw++] = 0xffffffff; /* CP_COHER_SIZE */
67 cs->buf[cs->cdw++] = 0; /* CP_COHER_BASE */
68 cs->buf[cs->cdw++] = 0x0000000A; /* POLL_INTERVAL */
69
70 a->flush_flags = 0;
71 }
72
73 static void r600_emit_r6xx_flush_and_inv(struct r600_context *rctx, struct r600_atom *atom)
74 {
75 struct radeon_winsys_cs *cs = rctx->cs;
76 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
77 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
78 }
79
80 void r600_init_atom(struct r600_atom *atom,
81 void (*emit)(struct r600_context *ctx, struct r600_atom *state),
82 unsigned num_dw, enum r600_atom_flags flags)
83 {
84 atom->emit = emit;
85 atom->num_dw = num_dw;
86 atom->flags = flags;
87 }
88
89 void r600_init_common_atoms(struct r600_context *rctx)
90 {
91 r600_init_atom(&rctx->surface_sync_cmd.atom, r600_emit_surface_sync, 5, EMIT_EARLY);
92 r600_init_atom(&rctx->r6xx_flush_and_inv_cmd, r600_emit_r6xx_flush_and_inv, 2, EMIT_EARLY);
93 }
94
95 unsigned r600_get_cb_flush_flags(struct r600_context *rctx)
96 {
97 unsigned flags = 0;
98
99 if (rctx->framebuffer.nr_cbufs) {
100 flags |= S_0085F0_CB_ACTION_ENA(1) |
101 (((1 << rctx->framebuffer.nr_cbufs) - 1) << S_0085F0_CB0_DEST_BASE_ENA_SHIFT);
102 }
103
104 /* Workaround for broken flushing on some R6xx chipsets. */
105 if (rctx->family == CHIP_RV670 ||
106 rctx->family == CHIP_RS780 ||
107 rctx->family == CHIP_RS880) {
108 flags |= S_0085F0_CB1_DEST_BASE_ENA(1) |
109 S_0085F0_DEST_BASE_0_ENA(1);
110 }
111 return flags;
112 }
113
114 void r600_texture_barrier(struct pipe_context *ctx)
115 {
116 struct r600_context *rctx = (struct r600_context *)ctx;
117
118 rctx->surface_sync_cmd.flush_flags |= S_0085F0_TC_ACTION_ENA(1) | r600_get_cb_flush_flags(rctx);
119 r600_atom_dirty(rctx, &rctx->surface_sync_cmd.atom);
120 }
121
122 static bool r600_conv_pipe_prim(unsigned pprim, unsigned *prim)
123 {
124 static const int prim_conv[] = {
125 V_008958_DI_PT_POINTLIST,
126 V_008958_DI_PT_LINELIST,
127 V_008958_DI_PT_LINELOOP,
128 V_008958_DI_PT_LINESTRIP,
129 V_008958_DI_PT_TRILIST,
130 V_008958_DI_PT_TRISTRIP,
131 V_008958_DI_PT_TRIFAN,
132 V_008958_DI_PT_QUADLIST,
133 V_008958_DI_PT_QUADSTRIP,
134 V_008958_DI_PT_POLYGON,
135 -1,
136 -1,
137 -1,
138 -1
139 };
140
141 *prim = prim_conv[pprim];
142 if (*prim == -1) {
143 fprintf(stderr, "%s:%d unsupported %d\n", __func__, __LINE__, pprim);
144 return false;
145 }
146 return true;
147 }
148
149 /* common state between evergreen and r600 */
150 void r600_bind_blend_state(struct pipe_context *ctx, void *state)
151 {
152 struct r600_context *rctx = (struct r600_context *)ctx;
153 struct r600_pipe_blend *blend = (struct r600_pipe_blend *)state;
154 struct r600_pipe_state *rstate;
155
156 if (state == NULL)
157 return;
158 rstate = &blend->rstate;
159 rctx->states[rstate->id] = rstate;
160 rctx->cb_target_mask = blend->cb_target_mask;
161 /* Replace every bit except MULTIWRITE_ENABLE. */
162 rctx->cb_color_control &= ~C_028808_MULTIWRITE_ENABLE;
163 rctx->cb_color_control |= blend->cb_color_control & C_028808_MULTIWRITE_ENABLE;
164 rctx->dual_src_blend = blend->dual_src_blend;
165 r600_context_pipe_state_set(rctx, rstate);
166 }
167
168 void r600_set_blend_color(struct pipe_context *ctx,
169 const struct pipe_blend_color *state)
170 {
171 struct r600_context *rctx = (struct r600_context *)ctx;
172 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
173
174 if (rstate == NULL)
175 return;
176
177 rstate->id = R600_PIPE_STATE_BLEND_COLOR;
178 r600_pipe_state_add_reg(rstate, R_028414_CB_BLEND_RED, fui(state->color[0]));
179 r600_pipe_state_add_reg(rstate, R_028418_CB_BLEND_GREEN, fui(state->color[1]));
180 r600_pipe_state_add_reg(rstate, R_02841C_CB_BLEND_BLUE, fui(state->color[2]));
181 r600_pipe_state_add_reg(rstate, R_028420_CB_BLEND_ALPHA, fui(state->color[3]));
182
183 free(rctx->states[R600_PIPE_STATE_BLEND_COLOR]);
184 rctx->states[R600_PIPE_STATE_BLEND_COLOR] = rstate;
185 r600_context_pipe_state_set(rctx, rstate);
186 }
187
188 static void r600_set_stencil_ref(struct pipe_context *ctx,
189 const struct r600_stencil_ref *state)
190 {
191 struct r600_context *rctx = (struct r600_context *)ctx;
192 struct r600_pipe_state *rstate = CALLOC_STRUCT(r600_pipe_state);
193
194 if (rstate == NULL)
195 return;
196
197 rstate->id = R600_PIPE_STATE_STENCIL_REF;
198 r600_pipe_state_add_reg(rstate,
199 R_028430_DB_STENCILREFMASK,
200 S_028430_STENCILREF(state->ref_value[0]) |
201 S_028430_STENCILMASK(state->valuemask[0]) |
202 S_028430_STENCILWRITEMASK(state->writemask[0]));
203 r600_pipe_state_add_reg(rstate,
204 R_028434_DB_STENCILREFMASK_BF,
205 S_028434_STENCILREF_BF(state->ref_value[1]) |
206 S_028434_STENCILMASK_BF(state->valuemask[1]) |
207 S_028434_STENCILWRITEMASK_BF(state->writemask[1]));
208
209 free(rctx->states[R600_PIPE_STATE_STENCIL_REF]);
210 rctx->states[R600_PIPE_STATE_STENCIL_REF] = rstate;
211 r600_context_pipe_state_set(rctx, rstate);
212 }
213
214 void r600_set_pipe_stencil_ref(struct pipe_context *ctx,
215 const struct pipe_stencil_ref *state)
216 {
217 struct r600_context *rctx = (struct r600_context *)ctx;
218 struct r600_pipe_dsa *dsa = (struct r600_pipe_dsa*)rctx->states[R600_PIPE_STATE_DSA];
219 struct r600_stencil_ref ref;
220
221 rctx->stencil_ref = *state;
222
223 if (!dsa)
224 return;
225
226 ref.ref_value[0] = state->ref_value[0];
227 ref.ref_value[1] = state->ref_value[1];
228 ref.valuemask[0] = dsa->valuemask[0];
229 ref.valuemask[1] = dsa->valuemask[1];
230 ref.writemask[0] = dsa->writemask[0];
231 ref.writemask[1] = dsa->writemask[1];
232
233 r600_set_stencil_ref(ctx, &ref);
234 }
235
236 void r600_bind_dsa_state(struct pipe_context *ctx, void *state)
237 {
238 struct r600_context *rctx = (struct r600_context *)ctx;
239 struct r600_pipe_dsa *dsa = state;
240 struct r600_pipe_state *rstate;
241 struct r600_stencil_ref ref;
242
243 if (state == NULL)
244 return;
245 rstate = &dsa->rstate;
246 rctx->states[rstate->id] = rstate;
247 rctx->alpha_ref = dsa->alpha_ref;
248 rctx->alpha_ref_dirty = true;
249 r600_context_pipe_state_set(rctx, rstate);
250
251 ref.ref_value[0] = rctx->stencil_ref.ref_value[0];
252 ref.ref_value[1] = rctx->stencil_ref.ref_value[1];
253 ref.valuemask[0] = dsa->valuemask[0];
254 ref.valuemask[1] = dsa->valuemask[1];
255 ref.writemask[0] = dsa->writemask[0];
256 ref.writemask[1] = dsa->writemask[1];
257
258 r600_set_stencil_ref(ctx, &ref);
259
260 if (rctx->db_misc_state.flush_depthstencil_enabled != dsa->is_flush) {
261 rctx->db_misc_state.flush_depthstencil_enabled = dsa->is_flush;
262 r600_atom_dirty(rctx, &rctx->db_misc_state.atom);
263 }
264 }
265
266 void r600_set_max_scissor(struct r600_context *rctx)
267 {
268 /* Set a scissor state such that it doesn't do anything. */
269 struct pipe_scissor_state scissor;
270 scissor.minx = 0;
271 scissor.miny = 0;
272 scissor.maxx = 8192;
273 scissor.maxy = 8192;
274
275 r600_set_scissor_state(rctx, &scissor);
276 }
277
278 void r600_bind_rs_state(struct pipe_context *ctx, void *state)
279 {
280 struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
281 struct r600_context *rctx = (struct r600_context *)ctx;
282
283 if (state == NULL)
284 return;
285
286 rctx->sprite_coord_enable = rs->sprite_coord_enable;
287 rctx->two_side = rs->two_side;
288 rctx->pa_sc_line_stipple = rs->pa_sc_line_stipple;
289 rctx->pa_cl_clip_cntl = rs->pa_cl_clip_cntl;
290
291 rctx->rasterizer = rs;
292
293 rctx->states[rs->rstate.id] = &rs->rstate;
294 r600_context_pipe_state_set(rctx, &rs->rstate);
295
296 if (rctx->chip_class >= EVERGREEN) {
297 evergreen_polygon_offset_update(rctx);
298 } else {
299 r600_polygon_offset_update(rctx);
300 }
301
302 /* Workaround for a missing scissor enable on r600. */
303 if (rctx->chip_class == R600) {
304 if (rs->scissor_enable != rctx->scissor_enable) {
305 rctx->scissor_enable = rs->scissor_enable;
306
307 if (rs->scissor_enable) {
308 r600_set_scissor_state(rctx, &rctx->scissor_state);
309 } else {
310 r600_set_max_scissor(rctx);
311 }
312 }
313 }
314 }
315
316 void r600_delete_rs_state(struct pipe_context *ctx, void *state)
317 {
318 struct r600_context *rctx = (struct r600_context *)ctx;
319 struct r600_pipe_rasterizer *rs = (struct r600_pipe_rasterizer *)state;
320
321 if (rctx->rasterizer == rs) {
322 rctx->rasterizer = NULL;
323 }
324 if (rctx->states[rs->rstate.id] == &rs->rstate) {
325 rctx->states[rs->rstate.id] = NULL;
326 }
327 free(rs);
328 }
329
330 void r600_sampler_view_destroy(struct pipe_context *ctx,
331 struct pipe_sampler_view *state)
332 {
333 struct r600_pipe_sampler_view *resource = (struct r600_pipe_sampler_view *)state;
334
335 pipe_resource_reference(&state->texture, NULL);
336 FREE(resource);
337 }
338
339 void r600_delete_state(struct pipe_context *ctx, void *state)
340 {
341 struct r600_context *rctx = (struct r600_context *)ctx;
342 struct r600_pipe_state *rstate = (struct r600_pipe_state *)state;
343
344 if (rctx->states[rstate->id] == rstate) {
345 rctx->states[rstate->id] = NULL;
346 }
347 for (int i = 0; i < rstate->nregs; i++) {
348 pipe_resource_reference((struct pipe_resource**)&rstate->regs[i].bo, NULL);
349 }
350 free(rstate);
351 }
352
353 void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
354 {
355 struct r600_context *rctx = (struct r600_context *)ctx;
356 struct r600_vertex_element *v = (struct r600_vertex_element*)state;
357
358 rctx->vertex_elements = v;
359 if (v) {
360 r600_inval_shader_cache(rctx);
361 u_vbuf_bind_vertex_elements(rctx->vbuf_mgr, state,
362 v->vmgr_elements);
363
364 rctx->states[v->rstate.id] = &v->rstate;
365 r600_context_pipe_state_set(rctx, &v->rstate);
366 }
367 }
368
369 void r600_delete_vertex_element(struct pipe_context *ctx, void *state)
370 {
371 struct r600_context *rctx = (struct r600_context *)ctx;
372 struct r600_vertex_element *v = (struct r600_vertex_element*)state;
373
374 if (rctx->states[v->rstate.id] == &v->rstate) {
375 rctx->states[v->rstate.id] = NULL;
376 }
377 if (rctx->vertex_elements == state)
378 rctx->vertex_elements = NULL;
379
380 pipe_resource_reference((struct pipe_resource**)&v->fetch_shader, NULL);
381 u_vbuf_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements);
382 FREE(state);
383 }
384
385
386 void r600_set_index_buffer(struct pipe_context *ctx,
387 const struct pipe_index_buffer *ib)
388 {
389 struct r600_context *rctx = (struct r600_context *)ctx;
390
391 u_vbuf_set_index_buffer(rctx->vbuf_mgr, ib);
392 }
393
394 void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
395 const struct pipe_vertex_buffer *buffers)
396 {
397 struct r600_context *rctx = (struct r600_context *)ctx;
398
399 u_vbuf_set_vertex_buffers(rctx->vbuf_mgr, count, buffers);
400 rctx->vertex_buffers_dirty = true;
401 }
402
403 void *r600_create_vertex_elements(struct pipe_context *ctx,
404 unsigned count,
405 const struct pipe_vertex_element *elements)
406 {
407 struct r600_context *rctx = (struct r600_context *)ctx;
408 struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element);
409
410 assert(count < 32);
411 if (!v)
412 return NULL;
413
414 v->count = count;
415 v->vmgr_elements =
416 u_vbuf_create_vertex_elements(rctx->vbuf_mgr, count,
417 elements, v->elements);
418
419 if (r600_vertex_elements_build_fetch_shader(rctx, v)) {
420 FREE(v);
421 return NULL;
422 }
423
424 return v;
425 }
426
427 void *r600_create_shader_state(struct pipe_context *ctx,
428 const struct pipe_shader_state *state)
429 {
430 struct r600_pipe_shader *shader = CALLOC_STRUCT(r600_pipe_shader);
431 int r;
432
433 shader->tokens = tgsi_dup_tokens(state->tokens);
434 shader->so = state->stream_output;
435
436 r = r600_pipe_shader_create(ctx, shader);
437 if (r) {
438 return NULL;
439 }
440 return shader;
441 }
442
443 void r600_bind_ps_shader(struct pipe_context *ctx, void *state)
444 {
445 struct r600_context *rctx = (struct r600_context *)ctx;
446
447 if (!state) {
448 state = rctx->dummy_pixel_shader;
449 }
450
451 rctx->ps_shader = (struct r600_pipe_shader *)state;
452
453 r600_inval_shader_cache(rctx);
454 r600_context_pipe_state_set(rctx, &rctx->ps_shader->rstate);
455
456 rctx->cb_color_control &= C_028808_MULTIWRITE_ENABLE;
457 rctx->cb_color_control |= S_028808_MULTIWRITE_ENABLE(!!rctx->ps_shader->shader.fs_write_all);
458
459 if (rctx->ps_shader && rctx->vs_shader) {
460 r600_adjust_gprs(rctx);
461 }
462 }
463
464 void r600_bind_vs_shader(struct pipe_context *ctx, void *state)
465 {
466 struct r600_context *rctx = (struct r600_context *)ctx;
467
468 rctx->vs_shader = (struct r600_pipe_shader *)state;
469 if (state) {
470 r600_inval_shader_cache(rctx);
471 r600_context_pipe_state_set(rctx, &rctx->vs_shader->rstate);
472 }
473 if (rctx->ps_shader && rctx->vs_shader) {
474 r600_adjust_gprs(rctx);
475 }
476 }
477
478 void r600_delete_ps_shader(struct pipe_context *ctx, void *state)
479 {
480 struct r600_context *rctx = (struct r600_context *)ctx;
481 struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
482
483 if (rctx->ps_shader == shader) {
484 rctx->ps_shader = NULL;
485 }
486
487 free(shader->tokens);
488 r600_pipe_shader_destroy(ctx, shader);
489 free(shader);
490 }
491
492 void r600_delete_vs_shader(struct pipe_context *ctx, void *state)
493 {
494 struct r600_context *rctx = (struct r600_context *)ctx;
495 struct r600_pipe_shader *shader = (struct r600_pipe_shader *)state;
496
497 if (rctx->vs_shader == shader) {
498 rctx->vs_shader = NULL;
499 }
500
501 free(shader->tokens);
502 r600_pipe_shader_destroy(ctx, shader);
503 free(shader);
504 }
505
506 static void r600_update_alpha_ref(struct r600_context *rctx)
507 {
508 unsigned alpha_ref;
509 struct r600_pipe_state rstate;
510
511 alpha_ref = rctx->alpha_ref;
512 rstate.nregs = 0;
513 if (rctx->export_16bpc)
514 alpha_ref &= ~0x1FFF;
515 r600_pipe_state_add_reg(&rstate, R_028438_SX_ALPHA_REF, alpha_ref);
516
517 r600_context_pipe_state_set(rctx, &rstate);
518 rctx->alpha_ref_dirty = false;
519 }
520
521 void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state)
522 {
523 r600_inval_shader_cache(rctx);
524 state->atom.num_dw = rctx->chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
525 : util_bitcount(state->dirty_mask)*19;
526 r600_atom_dirty(rctx, &state->atom);
527 }
528
529 void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
530 struct pipe_resource *buffer)
531 {
532 struct r600_context *rctx = (struct r600_context *)ctx;
533 struct r600_constbuf_state *state;
534 struct r600_constant_buffer *cb;
535 uint8_t *ptr;
536
537 switch (shader) {
538 case PIPE_SHADER_VERTEX:
539 state = &rctx->vs_constbuf_state;
540 break;
541 case PIPE_SHADER_FRAGMENT:
542 state = &rctx->ps_constbuf_state;
543 break;
544 default:
545 return;
546 }
547
548 /* Note that the state tracker can unbind constant buffers by
549 * passing NULL here.
550 */
551 if (unlikely(!buffer)) {
552 state->enabled_mask &= ~(1 << index);
553 state->dirty_mask &= ~(1 << index);
554 pipe_resource_reference(&state->cb[index].buffer, NULL);
555 return;
556 }
557
558 cb = &state->cb[index];
559 cb->buffer_size = buffer->width0;
560
561 ptr = u_vbuf_resource(buffer)->user_ptr;
562
563 if (ptr) {
564 /* Upload the user buffer. */
565 if (R600_BIG_ENDIAN) {
566 uint32_t *tmpPtr;
567 unsigned i, size = buffer->width0;
568
569 if (!(tmpPtr = malloc(size))) {
570 R600_ERR("Failed to allocate BE swap buffer.\n");
571 return;
572 }
573
574 for (i = 0; i < size / 4; ++i) {
575 tmpPtr[i] = bswap_32(((uint32_t *)ptr)[i]);
576 }
577
578 u_upload_data(rctx->vbuf_mgr->uploader, 0, size, tmpPtr, &cb->buffer_offset, &cb->buffer);
579 free(tmpPtr);
580 } else {
581 u_upload_data(rctx->vbuf_mgr->uploader, 0, buffer->width0, ptr, &cb->buffer_offset, &cb->buffer);
582 }
583 } else {
584 /* Setup the hw buffer. */
585 cb->buffer_offset = 0;
586 pipe_resource_reference(&cb->buffer, buffer);
587 }
588
589 state->enabled_mask |= 1 << index;
590 state->dirty_mask |= 1 << index;
591 r600_constant_buffers_dirty(rctx, state);
592 }
593
594 struct pipe_stream_output_target *
595 r600_create_so_target(struct pipe_context *ctx,
596 struct pipe_resource *buffer,
597 unsigned buffer_offset,
598 unsigned buffer_size)
599 {
600 struct r600_context *rctx = (struct r600_context *)ctx;
601 struct r600_so_target *t;
602 void *ptr;
603
604 t = CALLOC_STRUCT(r600_so_target);
605 if (!t) {
606 return NULL;
607 }
608
609 t->b.reference.count = 1;
610 t->b.context = ctx;
611 pipe_resource_reference(&t->b.buffer, buffer);
612 t->b.buffer_offset = buffer_offset;
613 t->b.buffer_size = buffer_size;
614
615 t->filled_size = (struct r600_resource*)
616 pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4);
617 ptr = rctx->ws->buffer_map(t->filled_size->buf, rctx->cs, PIPE_TRANSFER_WRITE);
618 memset(ptr, 0, t->filled_size->buf->size);
619 rctx->ws->buffer_unmap(t->filled_size->buf);
620
621 return &t->b;
622 }
623
624 void r600_so_target_destroy(struct pipe_context *ctx,
625 struct pipe_stream_output_target *target)
626 {
627 struct r600_so_target *t = (struct r600_so_target*)target;
628 pipe_resource_reference(&t->b.buffer, NULL);
629 pipe_resource_reference((struct pipe_resource**)&t->filled_size, NULL);
630 FREE(t);
631 }
632
633 void r600_set_so_targets(struct pipe_context *ctx,
634 unsigned num_targets,
635 struct pipe_stream_output_target **targets,
636 unsigned append_bitmask)
637 {
638 struct r600_context *rctx = (struct r600_context *)ctx;
639 unsigned i;
640
641 /* Stop streamout. */
642 if (rctx->num_so_targets) {
643 r600_context_streamout_end(rctx);
644 }
645
646 /* Set the new targets. */
647 for (i = 0; i < num_targets; i++) {
648 pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->so_targets[i], targets[i]);
649 }
650 for (; i < rctx->num_so_targets; i++) {
651 pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->so_targets[i], NULL);
652 }
653
654 rctx->num_so_targets = num_targets;
655 rctx->streamout_start = num_targets != 0;
656 rctx->streamout_append_bitmask = append_bitmask;
657 }
658
659 static int r600_shader_rebuild(struct pipe_context * ctx, struct r600_pipe_shader * shader)
660 {
661 struct r600_context *rctx = (struct r600_context *)ctx;
662 int r;
663
664 r600_pipe_shader_destroy(ctx, shader);
665 r = r600_pipe_shader_create(ctx, shader);
666 if (r) {
667 return r;
668 }
669 r600_context_pipe_state_set(rctx, &shader->rstate);
670
671 return 0;
672 }
673
674 static void r600_update_derived_state(struct r600_context *rctx)
675 {
676 struct pipe_context * ctx = (struct pipe_context*)rctx;
677
678 if (!rctx->blitter->running) {
679 if (rctx->have_depth_fb || rctx->have_depth_texture)
680 r600_flush_depth_textures(rctx);
681 }
682
683 if (rctx->chip_class < EVERGREEN) {
684 r600_update_sampler_states(rctx);
685 }
686
687 if ((rctx->ps_shader->shader.two_side != rctx->two_side) ||
688 ((rctx->chip_class >= EVERGREEN) && rctx->ps_shader->shader.fs_write_all &&
689 (rctx->ps_shader->shader.nr_cbufs != rctx->nr_cbufs))) {
690 r600_shader_rebuild(&rctx->context, rctx->ps_shader);
691 }
692
693 if (rctx->alpha_ref_dirty) {
694 r600_update_alpha_ref(rctx);
695 }
696
697 if (rctx->ps_shader && ((rctx->sprite_coord_enable &&
698 (rctx->ps_shader->sprite_coord_enable != rctx->sprite_coord_enable)) ||
699 (rctx->rasterizer && rctx->rasterizer->flatshade != rctx->ps_shader->flatshade))) {
700
701 if (rctx->chip_class >= EVERGREEN)
702 evergreen_pipe_shader_ps(ctx, rctx->ps_shader);
703 else
704 r600_pipe_shader_ps(ctx, rctx->ps_shader);
705
706 r600_context_pipe_state_set(rctx, &rctx->ps_shader->rstate);
707 }
708
709 if (rctx->dual_src_blend)
710 rctx->cb_shader_mask = rctx->ps_shader->ps_cb_shader_mask | rctx->fb_cb_shader_mask;
711 else
712 rctx->cb_shader_mask = rctx->fb_cb_shader_mask;
713 }
714
715 static unsigned r600_conv_prim_to_gs_out(unsigned mode)
716 {
717 static const int prim_conv[] = {
718 V_028A6C_OUTPRIM_TYPE_POINTLIST,
719 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
720 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
721 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
722 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
723 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
724 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
725 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
726 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
727 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
728 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
729 V_028A6C_OUTPRIM_TYPE_LINESTRIP,
730 V_028A6C_OUTPRIM_TYPE_TRISTRIP,
731 V_028A6C_OUTPRIM_TYPE_TRISTRIP
732 };
733 assert(mode < Elements(prim_conv));
734
735 return prim_conv[mode];
736 }
737
738 void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo)
739 {
740 struct r600_context *rctx = (struct r600_context *)ctx;
741 struct pipe_draw_info info = *dinfo;
742 struct pipe_index_buffer ib = {};
743 unsigned prim, mask, ls_mask = 0;
744 struct r600_block *dirty_block = NULL, *next_block = NULL;
745 struct r600_atom *state = NULL, *next_state = NULL;
746 struct radeon_winsys_cs *cs = rctx->cs;
747 uint64_t va;
748 uint8_t *ptr;
749
750 if ((!info.count && (info.indexed || !info.count_from_stream_output)) ||
751 (info.indexed && !rctx->vbuf_mgr->index_buffer.buffer) ||
752 !r600_conv_pipe_prim(info.mode, &prim)) {
753 assert(0);
754 return;
755 }
756
757 if (!rctx->vs_shader) {
758 assert(0);
759 return;
760 }
761
762 r600_update_derived_state(rctx);
763
764 /* Update vertex buffers. */
765 if ((u_vbuf_draw_begin(rctx->vbuf_mgr, &info) & U_VBUF_BUFFERS_UPDATED) ||
766 rctx->vertex_buffers_dirty) {
767 r600_inval_vertex_cache(rctx);
768 rctx->vertex_buffer_state.num_dw = (rctx->chip_class >= EVERGREEN ? 12 : 10) *
769 rctx->vbuf_mgr->nr_real_vertex_buffers;
770 r600_atom_dirty(rctx, &rctx->vertex_buffer_state);
771 rctx->vertex_buffers_dirty = FALSE;
772 }
773
774 if (info.indexed) {
775 /* Initialize the index buffer struct. */
776 pipe_resource_reference(&ib.buffer, rctx->vbuf_mgr->index_buffer.buffer);
777 ib.index_size = rctx->vbuf_mgr->index_buffer.index_size;
778 ib.offset = rctx->vbuf_mgr->index_buffer.offset + info.start * ib.index_size;
779
780 /* Translate or upload, if needed. */
781 r600_translate_index_buffer(rctx, &ib, info.count);
782
783 ptr = u_vbuf_resource(ib.buffer)->user_ptr;
784 if (ptr) {
785 u_upload_data(rctx->vbuf_mgr->uploader, 0, info.count * ib.index_size,
786 ptr, &ib.offset, &ib.buffer);
787 }
788 } else {
789 info.index_bias = info.start;
790 if (info.count_from_stream_output) {
791 r600_context_draw_opaque_count(rctx, (struct r600_so_target*)info.count_from_stream_output);
792 }
793 }
794
795 mask = (1ULL << ((unsigned)rctx->framebuffer.nr_cbufs * 4)) - 1;
796
797 if (rctx->vgt.id != R600_PIPE_STATE_VGT) {
798 rctx->vgt.id = R600_PIPE_STATE_VGT;
799 rctx->vgt.nregs = 0;
800 r600_pipe_state_add_reg(&rctx->vgt, R_008958_VGT_PRIMITIVE_TYPE, prim);
801 r600_pipe_state_add_reg(&rctx->vgt, R_028A6C_VGT_GS_OUT_PRIM_TYPE, 0);
802 r600_pipe_state_add_reg(&rctx->vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask);
803 r600_pipe_state_add_reg(&rctx->vgt, R_02823C_CB_SHADER_MASK, 0);
804 r600_pipe_state_add_reg(&rctx->vgt, R_028408_VGT_INDX_OFFSET, info.index_bias);
805 r600_pipe_state_add_reg(&rctx->vgt, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info.restart_index);
806 r600_pipe_state_add_reg(&rctx->vgt, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info.primitive_restart);
807 r600_pipe_state_add_reg(&rctx->vgt, R_03CFF4_SQ_VTX_START_INST_LOC, info.start_instance);
808 r600_pipe_state_add_reg(&rctx->vgt, R_028A0C_PA_SC_LINE_STIPPLE, 0);
809 if (rctx->chip_class <= R700)
810 r600_pipe_state_add_reg(&rctx->vgt, R_028808_CB_COLOR_CONTROL, rctx->cb_color_control);
811 r600_pipe_state_add_reg(&rctx->vgt, R_02881C_PA_CL_VS_OUT_CNTL, 0);
812 r600_pipe_state_add_reg(&rctx->vgt, R_028810_PA_CL_CLIP_CNTL, 0);
813
814 if (rctx->chip_class <= R700)
815 r600_pipe_state_add_reg(&rctx->vgt, R_0280A4_CB_COLOR1_INFO, 0);
816 else
817 r600_pipe_state_add_reg(&rctx->vgt, 0x28CAC, 0);
818 }
819
820 rctx->vgt.nregs = 0;
821 r600_pipe_state_mod_reg(&rctx->vgt, prim);
822 r600_pipe_state_mod_reg(&rctx->vgt, r600_conv_prim_to_gs_out(info.mode));
823 r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_target_mask & mask);
824 r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_shader_mask);
825 r600_pipe_state_mod_reg(&rctx->vgt, info.index_bias);
826 r600_pipe_state_mod_reg(&rctx->vgt, info.restart_index);
827 r600_pipe_state_mod_reg(&rctx->vgt, info.primitive_restart);
828 r600_pipe_state_mod_reg(&rctx->vgt, info.start_instance);
829
830 if (prim == V_008958_DI_PT_LINELIST)
831 ls_mask = 1;
832 else if (prim == V_008958_DI_PT_LINESTRIP)
833 ls_mask = 2;
834 r600_pipe_state_mod_reg(&rctx->vgt, S_028A0C_AUTO_RESET_CNTL(ls_mask) | rctx->pa_sc_line_stipple);
835 if (rctx->chip_class <= R700)
836 r600_pipe_state_mod_reg(&rctx->vgt, rctx->cb_color_control);
837 r600_pipe_state_mod_reg(&rctx->vgt,
838 rctx->vs_shader->pa_cl_vs_out_cntl |
839 (rctx->rasterizer->clip_plane_enable & rctx->vs_shader->shader.clip_dist_write));
840 r600_pipe_state_mod_reg(&rctx->vgt,
841 rctx->pa_cl_clip_cntl |
842 (rctx->vs_shader->shader.clip_dist_write ||
843 rctx->vs_shader->shader.vs_prohibit_ucps ?
844 0 : rctx->rasterizer->clip_plane_enable & 0x3F));
845
846 if (rctx->dual_src_blend) {
847 r600_pipe_state_mod_reg(&rctx->vgt,
848 rctx->color0_format);
849 }
850
851 r600_context_pipe_state_set(rctx, &rctx->vgt);
852
853 /* Emit states (the function expects that we emit at most 17 dwords here). */
854 r600_need_cs_space(rctx, 0, TRUE);
855
856 LIST_FOR_EACH_ENTRY_SAFE(state, next_state, &rctx->dirty_states, head) {
857 r600_emit_atom(rctx, state);
858 }
859 LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &rctx->dirty,list) {
860 r600_context_block_emit_dirty(rctx, dirty_block);
861 }
862 LIST_FOR_EACH_ENTRY_SAFE(dirty_block, next_block, &rctx->resource_dirty,list) {
863 r600_context_block_resource_emit_dirty(rctx, dirty_block);
864 }
865 rctx->pm4_dirty_cdwords = 0;
866
867 /* Enable stream out if needed. */
868 if (rctx->streamout_start) {
869 r600_context_streamout_begin(rctx);
870 rctx->streamout_start = FALSE;
871 }
872
873 /* draw packet */
874 cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, rctx->predicate_drawing);
875 cs->buf[cs->cdw++] = ib.index_size == 4 ?
876 (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
877 (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0));
878 cs->buf[cs->cdw++] = PKT3(PKT3_NUM_INSTANCES, 0, rctx->predicate_drawing);
879 cs->buf[cs->cdw++] = info.instance_count;
880 if (info.indexed) {
881 va = r600_resource_va(ctx->screen, ib.buffer);
882 va += ib.offset;
883 cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, rctx->predicate_drawing);
884 cs->buf[cs->cdw++] = va;
885 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
886 cs->buf[cs->cdw++] = info.count;
887 cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA;
888 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->predicate_drawing);
889 cs->buf[cs->cdw++] = r600_context_bo_reloc(rctx, (struct r600_resource*)ib.buffer, RADEON_USAGE_READ);
890 } else {
891 cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, rctx->predicate_drawing);
892 cs->buf[cs->cdw++] = info.count;
893 cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_AUTO_INDEX |
894 (info.count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0);
895 }
896
897 rctx->flags |= R600_CONTEXT_DST_CACHES_DIRTY | R600_CONTEXT_DRAW_PENDING;
898
899 if (rctx->framebuffer.zsbuf)
900 {
901 struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture;
902 ((struct r600_resource_texture *)tex)->dirty_db = TRUE;
903 }
904
905 pipe_resource_reference(&ib.buffer, NULL);
906 u_vbuf_draw_end(rctx->vbuf_mgr);
907 }
908
909 void _r600_pipe_state_add_reg_bo(struct r600_context *ctx,
910 struct r600_pipe_state *state,
911 uint32_t offset, uint32_t value,
912 uint32_t range_id, uint32_t block_id,
913 struct r600_resource *bo,
914 enum radeon_bo_usage usage)
915
916 {
917 struct r600_range *range;
918 struct r600_block *block;
919
920 if (bo) assert(usage);
921
922 range = &ctx->range[range_id];
923 block = range->blocks[block_id];
924 state->regs[state->nregs].block = block;
925 state->regs[state->nregs].id = (offset - block->start_offset) >> 2;
926
927 state->regs[state->nregs].value = value;
928 state->regs[state->nregs].bo = bo;
929 state->regs[state->nregs].bo_usage = usage;
930
931 state->nregs++;
932 assert(state->nregs < R600_BLOCK_MAX_REG);
933 }
934
935 void _r600_pipe_state_add_reg(struct r600_context *ctx,
936 struct r600_pipe_state *state,
937 uint32_t offset, uint32_t value,
938 uint32_t range_id, uint32_t block_id)
939 {
940 _r600_pipe_state_add_reg_bo(ctx, state, offset, value,
941 range_id, block_id, NULL, 0);
942 }
943
944 void r600_pipe_state_add_reg_noblock(struct r600_pipe_state *state,
945 uint32_t offset, uint32_t value,
946 struct r600_resource *bo,
947 enum radeon_bo_usage usage)
948 {
949 if (bo) assert(usage);
950
951 state->regs[state->nregs].id = offset;
952 state->regs[state->nregs].block = NULL;
953 state->regs[state->nregs].value = value;
954 state->regs[state->nregs].bo = bo;
955 state->regs[state->nregs].bo_usage = usage;
956
957 state->nregs++;
958 assert(state->nregs < R600_BLOCK_MAX_REG);
959 }
960
961 uint32_t r600_translate_stencil_op(int s_op)
962 {
963 switch (s_op) {
964 case PIPE_STENCIL_OP_KEEP:
965 return V_028800_STENCIL_KEEP;
966 case PIPE_STENCIL_OP_ZERO:
967 return V_028800_STENCIL_ZERO;
968 case PIPE_STENCIL_OP_REPLACE:
969 return V_028800_STENCIL_REPLACE;
970 case PIPE_STENCIL_OP_INCR:
971 return V_028800_STENCIL_INCR;
972 case PIPE_STENCIL_OP_DECR:
973 return V_028800_STENCIL_DECR;
974 case PIPE_STENCIL_OP_INCR_WRAP:
975 return V_028800_STENCIL_INCR_WRAP;
976 case PIPE_STENCIL_OP_DECR_WRAP:
977 return V_028800_STENCIL_DECR_WRAP;
978 case PIPE_STENCIL_OP_INVERT:
979 return V_028800_STENCIL_INVERT;
980 default:
981 R600_ERR("Unknown stencil op %d", s_op);
982 assert(0);
983 break;
984 }
985 return 0;
986 }
987
988 uint32_t r600_translate_fill(uint32_t func)
989 {
990 switch(func) {
991 case PIPE_POLYGON_MODE_FILL:
992 return 2;
993 case PIPE_POLYGON_MODE_LINE:
994 return 1;
995 case PIPE_POLYGON_MODE_POINT:
996 return 0;
997 default:
998 assert(0);
999 return 0;
1000 }
1001 }
1002
1003 unsigned r600_tex_wrap(unsigned wrap)
1004 {
1005 switch (wrap) {
1006 default:
1007 case PIPE_TEX_WRAP_REPEAT:
1008 return V_03C000_SQ_TEX_WRAP;
1009 case PIPE_TEX_WRAP_CLAMP:
1010 return V_03C000_SQ_TEX_CLAMP_HALF_BORDER;
1011 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
1012 return V_03C000_SQ_TEX_CLAMP_LAST_TEXEL;
1013 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
1014 return V_03C000_SQ_TEX_CLAMP_BORDER;
1015 case PIPE_TEX_WRAP_MIRROR_REPEAT:
1016 return V_03C000_SQ_TEX_MIRROR;
1017 case PIPE_TEX_WRAP_MIRROR_CLAMP:
1018 return V_03C000_SQ_TEX_MIRROR_ONCE_HALF_BORDER;
1019 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
1020 return V_03C000_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
1021 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
1022 return V_03C000_SQ_TEX_MIRROR_ONCE_BORDER;
1023 }
1024 }
1025
1026 unsigned r600_tex_filter(unsigned filter)
1027 {
1028 switch (filter) {
1029 default:
1030 case PIPE_TEX_FILTER_NEAREST:
1031 return V_03C000_SQ_TEX_XY_FILTER_POINT;
1032 case PIPE_TEX_FILTER_LINEAR:
1033 return V_03C000_SQ_TEX_XY_FILTER_BILINEAR;
1034 }
1035 }
1036
1037 unsigned r600_tex_mipfilter(unsigned filter)
1038 {
1039 switch (filter) {
1040 case PIPE_TEX_MIPFILTER_NEAREST:
1041 return V_03C000_SQ_TEX_Z_FILTER_POINT;
1042 case PIPE_TEX_MIPFILTER_LINEAR:
1043 return V_03C000_SQ_TEX_Z_FILTER_LINEAR;
1044 default:
1045 case PIPE_TEX_MIPFILTER_NONE:
1046 return V_03C000_SQ_TEX_Z_FILTER_NONE;
1047 }
1048 }
1049
1050 unsigned r600_tex_compare(unsigned compare)
1051 {
1052 switch (compare) {
1053 default:
1054 case PIPE_FUNC_NEVER:
1055 return V_03C000_SQ_TEX_DEPTH_COMPARE_NEVER;
1056 case PIPE_FUNC_LESS:
1057 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESS;
1058 case PIPE_FUNC_EQUAL:
1059 return V_03C000_SQ_TEX_DEPTH_COMPARE_EQUAL;
1060 case PIPE_FUNC_LEQUAL:
1061 return V_03C000_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
1062 case PIPE_FUNC_GREATER:
1063 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATER;
1064 case PIPE_FUNC_NOTEQUAL:
1065 return V_03C000_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
1066 case PIPE_FUNC_GEQUAL:
1067 return V_03C000_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
1068 case PIPE_FUNC_ALWAYS:
1069 return V_03C000_SQ_TEX_DEPTH_COMPARE_ALWAYS;
1070 }
1071 }