ilo: introduce vertex element CSO
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
31
32 #include "ilo_context.h"
33 #include "ilo_cp.h"
34 #include "ilo_gpe_gen6.h"
35 #include "ilo_shader.h"
36 #include "ilo_state.h"
37 #include "ilo_3d_pipeline.h"
38 #include "ilo_3d_pipeline_gen6.h"
39
40 /**
41 * This should be called before any depth stall flush (including those
42 * produced by non-pipelined state commands) or cache flush on GEN6.
43 *
44 * \see intel_emit_post_sync_nonzero_flush()
45 */
46 static void
47 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
48 bool caller_post_sync)
49 {
50 assert(p->dev->gen == ILO_GEN(6));
51
52 /* emit once */
53 if (p->state.has_gen6_wa_pipe_control)
54 return;
55
56 p->state.has_gen6_wa_pipe_control = true;
57
58 /*
59 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
60 *
61 * "Pipe-control with CS-stall bit set must be sent BEFORE the
62 * pipe-control with a post-sync op and no write-cache flushes."
63 *
64 * The workaround below necessitates this workaround.
65 */
66 p->gen6_PIPE_CONTROL(p->dev,
67 PIPE_CONTROL_CS_STALL |
68 PIPE_CONTROL_STALL_AT_SCOREBOARD,
69 NULL, 0, false, p->cp);
70
71 /* the caller will emit the post-sync op */
72 if (caller_post_sync)
73 return;
74
75 /*
76 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
77 *
78 * "Before any depth stall flush (including those produced by
79 * non-pipelined state commands), software needs to first send a
80 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
81 *
82 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
83 * PIPE_CONTROL with any non-zero post-sync-op is required."
84 */
85 p->gen6_PIPE_CONTROL(p->dev,
86 PIPE_CONTROL_WRITE_IMMEDIATE,
87 p->workaround_bo, 0, false, p->cp);
88 }
89
90 static void
91 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
92 {
93 assert(p->dev->gen == ILO_GEN(6));
94
95 gen6_wa_pipe_control_post_sync(p, false);
96
97 /*
98 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
99 *
100 * "Driver must guarentee that all the caches in the depth pipe are
101 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
102 * requires driver to send a PIPE_CONTROL with a CS stall along with a
103 * Depth Flush prior to this command."
104 */
105 p->gen6_PIPE_CONTROL(p->dev,
106 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
107 PIPE_CONTROL_CS_STALL,
108 0, 0, false, p->cp);
109 }
110
111 static void
112 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
113 {
114 assert(p->dev->gen == ILO_GEN(6));
115
116 gen6_wa_pipe_control_post_sync(p, false);
117
118 /*
119 * According to intel_emit_depth_stall_flushes() of classic i965, we need
120 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
121 * commands.
122 */
123 p->gen6_PIPE_CONTROL(p->dev,
124 PIPE_CONTROL_DEPTH_STALL,
125 NULL, 0, false, p->cp);
126
127 p->gen6_PIPE_CONTROL(p->dev,
128 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
129 NULL, 0, false, p->cp);
130
131 p->gen6_PIPE_CONTROL(p->dev,
132 PIPE_CONTROL_DEPTH_STALL,
133 NULL, 0, false, p->cp);
134 }
135
136 static void
137 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
138 {
139 assert(p->dev->gen == ILO_GEN(6));
140
141 /* the post-sync workaround should cover this already */
142 if (p->state.has_gen6_wa_pipe_control)
143 return;
144
145 /*
146 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
147 *
148 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
149 * field set (DW1 Bit 1), must be issued prior to any change to the
150 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
151 */
152 p->gen6_PIPE_CONTROL(p->dev,
153 PIPE_CONTROL_STALL_AT_SCOREBOARD,
154 NULL, 0, false, p->cp);
155
156 }
157
158 static void
159 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
160 {
161 assert(p->dev->gen == ILO_GEN(6));
162
163 gen6_wa_pipe_control_post_sync(p, false);
164
165 /*
166 * According to upload_vs_state() of classic i965, we need to emit
167 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
168 * buffered by VS FF, to the point that the FF dies.
169 */
170 p->gen6_PIPE_CONTROL(p->dev,
171 PIPE_CONTROL_DEPTH_STALL |
172 PIPE_CONTROL_INSTRUCTION_FLUSH |
173 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
174 NULL, 0, false, p->cp);
175 }
176
177 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
178
179 void
180 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
181 const struct ilo_context *ilo,
182 struct gen6_pipeline_session *session)
183 {
184 /* PIPELINE_SELECT */
185 if (session->hw_ctx_changed) {
186 if (p->dev->gen == ILO_GEN(6))
187 gen6_wa_pipe_control_post_sync(p, false);
188
189 p->gen6_PIPELINE_SELECT(p->dev, 0x0, p->cp);
190 }
191 }
192
193 void
194 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
195 const struct ilo_context *ilo,
196 struct gen6_pipeline_session *session)
197 {
198 /* STATE_SIP */
199 if (session->hw_ctx_changed) {
200 if (p->dev->gen == ILO_GEN(6))
201 gen6_wa_pipe_control_post_sync(p, false);
202
203 p->gen6_STATE_SIP(p->dev, 0, p->cp);
204 }
205 }
206
207 void
208 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
209 const struct ilo_context *ilo,
210 struct gen6_pipeline_session *session)
211 {
212 /* STATE_BASE_ADDRESS */
213 if (session->state_bo_changed || session->instruction_bo_changed) {
214 if (p->dev->gen == ILO_GEN(6))
215 gen6_wa_pipe_control_post_sync(p, false);
216
217 p->gen6_STATE_BASE_ADDRESS(p->dev,
218 NULL, p->cp->bo, p->cp->bo, NULL, ilo->shader_cache->bo,
219 0, 0, 0, 0, p->cp);
220
221 /*
222 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
223 *
224 * "The following commands must be reissued following any change to
225 * the base addresses:
226 *
227 * * 3DSTATE_BINDING_TABLE_POINTERS
228 * * 3DSTATE_SAMPLER_STATE_POINTERS
229 * * 3DSTATE_VIEWPORT_STATE_POINTERS
230 * * 3DSTATE_CC_POINTERS
231 * * MEDIA_STATE_POINTERS"
232 *
233 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
234 * reasonable to also reissue the command. Same to PCB.
235 */
236 session->viewport_state_changed = true;
237
238 session->cc_state_blend_changed = true;
239 session->cc_state_dsa_changed = true;
240 session->cc_state_cc_changed = true;
241
242 session->scissor_state_changed = true;
243
244 session->binding_table_vs_changed = true;
245 session->binding_table_gs_changed = true;
246 session->binding_table_fs_changed = true;
247
248 session->sampler_state_vs_changed = true;
249 session->sampler_state_gs_changed = true;
250 session->sampler_state_fs_changed = true;
251
252 session->pcb_state_vs_changed = true;
253 session->pcb_state_gs_changed = true;
254 session->pcb_state_fs_changed = true;
255 }
256 }
257
258 static void
259 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
260 const struct ilo_context *ilo,
261 struct gen6_pipeline_session *session)
262 {
263 /* 3DSTATE_URB */
264 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS) || DIRTY(GS)) {
265 const struct ilo_shader *vs = (ilo->vs) ? ilo->vs->shader : NULL;
266 const struct ilo_shader *gs = (ilo->gs) ? ilo->gs->shader : NULL;
267 const bool gs_active = (gs || (vs && vs->stream_output));
268 int vs_entry_size, gs_entry_size;
269 int vs_total_size, gs_total_size;
270
271 vs_entry_size = (vs) ? vs->out.count : 0;
272
273 /*
274 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
275 * share VUE handles. The VUE allocation size must be large enough to
276 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
277 *
278 * I am not sure if the PRM explicitly states that VF and VS share VUE
279 * handles. But here is a citation that implies so:
280 *
281 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
282 *
283 * "Once a FF stage that spawn threads has sufficient input to
284 * initiate a thread, it must guarantee that it is safe to request
285 * the thread initiation. For all these FF stages, this check is
286 * based on :
287 *
288 * - The availability of output URB entries:
289 * - VS: As the input URB entries are overwritten with the
290 * VS-generated output data, output URB availability isn't a
291 * factor."
292 */
293 if (vs_entry_size < ilo->ve->count)
294 vs_entry_size = ilo->ve->count;
295
296 gs_entry_size = (gs) ? gs->out.count :
297 (vs && vs->stream_output) ? vs_entry_size : 0;
298
299 /* in bytes */
300 vs_entry_size *= sizeof(float) * 4;
301 gs_entry_size *= sizeof(float) * 4;
302 vs_total_size = ilo->dev->urb_size;
303
304 if (gs_active) {
305 vs_total_size /= 2;
306 gs_total_size = vs_total_size;
307 }
308 else {
309 gs_total_size = 0;
310 }
311
312 p->gen6_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
313 vs_entry_size, gs_entry_size, p->cp);
314
315 /*
316 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
317 *
318 * "Because of a urb corruption caused by allocating a previous
319 * gsunit's urb entry to vsunit software is required to send a
320 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
321 * size == 0) plus a dummy DRAW call before any case where VS will
322 * be taking over GS URB space."
323 */
324 if (p->state.gs.active && !gs_active)
325 ilo_3d_pipeline_emit_flush_gen6(p);
326
327 p->state.gs.active = gs_active;
328 }
329 }
330
331 static void
332 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
333 const struct ilo_context *ilo,
334 struct gen6_pipeline_session *session)
335 {
336 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
337 if (session->viewport_state_changed) {
338 p->gen6_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
339 p->state.CLIP_VIEWPORT,
340 p->state.SF_VIEWPORT,
341 p->state.CC_VIEWPORT, p->cp);
342 }
343 }
344
345 static void
346 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
347 const struct ilo_context *ilo,
348 struct gen6_pipeline_session *session)
349 {
350 /* 3DSTATE_CC_STATE_POINTERS */
351 if (session->cc_state_blend_changed ||
352 session->cc_state_dsa_changed ||
353 session->cc_state_cc_changed) {
354 p->gen6_3DSTATE_CC_STATE_POINTERS(p->dev,
355 p->state.BLEND_STATE,
356 p->state.DEPTH_STENCIL_STATE,
357 p->state.COLOR_CALC_STATE, p->cp);
358 }
359
360 /* 3DSTATE_SAMPLER_STATE_POINTERS */
361 if (session->sampler_state_vs_changed ||
362 session->sampler_state_gs_changed ||
363 session->sampler_state_fs_changed) {
364 p->gen6_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
365 p->state.vs.SAMPLER_STATE,
366 0,
367 p->state.wm.SAMPLER_STATE, p->cp);
368 }
369 }
370
371 static void
372 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
373 const struct ilo_context *ilo,
374 struct gen6_pipeline_session *session)
375 {
376 /* 3DSTATE_SCISSOR_STATE_POINTERS */
377 if (session->scissor_state_changed) {
378 p->gen6_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
379 p->state.SCISSOR_RECT, p->cp);
380 }
381
382 /* 3DSTATE_BINDING_TABLE_POINTERS */
383 if (session->binding_table_vs_changed ||
384 session->binding_table_gs_changed ||
385 session->binding_table_fs_changed) {
386 p->gen6_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
387 p->state.vs.BINDING_TABLE_STATE,
388 p->state.gs.BINDING_TABLE_STATE,
389 p->state.wm.BINDING_TABLE_STATE, p->cp);
390 }
391 }
392
393 void
394 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
395 const struct ilo_context *ilo,
396 struct gen6_pipeline_session *session)
397 {
398 /* 3DSTATE_INDEX_BUFFER */
399 if (DIRTY(INDEX_BUFFER)) {
400 p->gen6_3DSTATE_INDEX_BUFFER(p->dev,
401 &ilo->ib.state, session->info->primitive_restart, p->cp);
402 }
403
404 /* 3DSTATE_VERTEX_BUFFERS */
405 if (DIRTY(VERTEX_BUFFERS) || DIRTY(VERTEX_ELEMENTS)) {
406 p->gen6_3DSTATE_VERTEX_BUFFERS(p->dev,
407 ilo->vb.states, ilo->vb.enabled_mask, ilo->ve, p->cp);
408 }
409
410 /* 3DSTATE_VERTEX_ELEMENTS */
411 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS)) {
412 const struct ilo_ve_state *ve = ilo->ve;
413 bool last_velement_edgeflag = false;
414 bool prepend_generate_ids = false;
415
416 if (ilo->vs) {
417 const struct ilo_shader_info *info = &ilo->vs->info;
418
419 if (info->edgeflag_in >= 0) {
420 /* we rely on the state tracker here */
421 assert(info->edgeflag_in == ve->count - 1);
422 last_velement_edgeflag = true;
423 }
424
425 prepend_generate_ids = (info->has_instanceid || info->has_vertexid);
426 }
427
428 p->gen6_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
429 last_velement_edgeflag, prepend_generate_ids, p->cp);
430 }
431 }
432
433 void
434 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
435 const struct ilo_context *ilo,
436 struct gen6_pipeline_session *session)
437 {
438 /* 3DSTATE_VF_STATISTICS */
439 if (session->hw_ctx_changed)
440 p->gen6_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
441 }
442
443 void
444 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
445 const struct ilo_context *ilo,
446 struct gen6_pipeline_session *session)
447 {
448 /* 3DPRIMITIVE */
449 p->gen6_3DPRIMITIVE(p->dev, session->info, false, p->cp);
450 p->state.has_gen6_wa_pipe_control = false;
451 }
452
453 void
454 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
455 const struct ilo_context *ilo,
456 struct gen6_pipeline_session *session)
457 {
458 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(VERTEX_SAMPLERS));
459 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
460
461 /*
462 * the classic i965 does this in upload_vs_state(), citing a spec that I
463 * cannot find
464 */
465 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
466 gen6_wa_pipe_control_post_sync(p, false);
467
468 /* 3DSTATE_CONSTANT_VS */
469 if (emit_3dstate_constant_vs) {
470 p->gen6_3DSTATE_CONSTANT_VS(p->dev,
471 &p->state.vs.PUSH_CONSTANT_BUFFER,
472 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
473 1, p->cp);
474 }
475
476 /* 3DSTATE_VS */
477 if (emit_3dstate_vs) {
478 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
479 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
480
481 p->gen6_3DSTATE_VS(p->dev, vs, num_samplers, p->cp);
482 }
483
484 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
485 gen6_wa_pipe_control_vs_const_flush(p);
486 }
487
488 static void
489 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
490 const struct ilo_context *ilo,
491 struct gen6_pipeline_session *session)
492 {
493 /* 3DSTATE_CONSTANT_GS */
494 if (session->pcb_state_gs_changed)
495 p->gen6_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
496
497 /* 3DSTATE_GS */
498 if (DIRTY(GS) || DIRTY(VS) || session->prim_changed) {
499 const struct ilo_shader *gs = (ilo->gs)? ilo->gs->shader : NULL;
500 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
501 const int num_vertices = u_vertices_per_prim(session->reduced_prim);
502
503 if (gs)
504 assert(!gs->pcb.clip_state_size);
505
506 p->gen6_3DSTATE_GS(p->dev, gs, vs,
507 (vs) ? vs->cache_offset + vs->gs_offsets[num_vertices - 1] : 0,
508 p->cp);
509 }
510 }
511
512 bool
513 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
514 const struct ilo_context *ilo,
515 struct gen6_pipeline_session *session)
516 {
517 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
518 const struct pipe_stream_output_info *so_info =
519 (ilo->gs) ? &ilo->gs->info.stream_output :
520 (ilo->vs) ? &ilo->vs->info.stream_output : NULL;
521 unsigned max_svbi = 0xffffffff;
522 int i;
523
524 for (i = 0; i < so_info->num_outputs; i++) {
525 const int output_buffer = so_info->output[i].output_buffer;
526 const struct pipe_stream_output_target *so =
527 ilo->so.states[output_buffer];
528 const int struct_size = so_info->stride[output_buffer] * 4;
529 const int elem_size = so_info->output[i].num_components * 4;
530 int buf_size, count;
531
532 if (!so) {
533 max_svbi = 0;
534 break;
535 }
536
537 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
538
539 count = buf_size / struct_size;
540 if (buf_size % struct_size >= elem_size)
541 count++;
542
543 if (count < max_svbi)
544 max_svbi = count;
545 }
546
547 if (p->state.so_max_vertices != max_svbi) {
548 p->state.so_max_vertices = max_svbi;
549 return true;
550 }
551 }
552
553 return false;
554 }
555
556 static void
557 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
558 const struct ilo_context *ilo,
559 struct gen6_pipeline_session *session)
560 {
561 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
562
563 /* 3DSTATE_GS_SVB_INDEX */
564 if (emit) {
565 if (p->dev->gen == ILO_GEN(6))
566 gen6_wa_pipe_control_post_sync(p, false);
567
568 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
569 0, p->state.so_num_vertices, p->state.so_max_vertices,
570 false, p->cp);
571
572 if (session->hw_ctx_changed) {
573 int i;
574
575 /*
576 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
577 *
578 * "If a buffer is not enabled then the SVBI must be set to 0x0
579 * in order to not cause overflow in that SVBI."
580 *
581 * "If a buffer is not enabled then the MaxSVBI must be set to
582 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
583 */
584 for (i = 1; i < 4; i++) {
585 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
586 i, 0, 0xffffffff, false, p->cp);
587 }
588 }
589 }
590 }
591
592 void
593 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
594 const struct ilo_context *ilo,
595 struct gen6_pipeline_session *session)
596 {
597 /* 3DSTATE_CLIP */
598 if (DIRTY(RASTERIZER) || DIRTY(FS) ||
599 DIRTY(VIEWPORT) || DIRTY(FRAMEBUFFER)) {
600 bool enable_guardband = true;
601 unsigned i;
602
603 /*
604 * We do not do 2D clipping yet. Guard band test should only be enabled
605 * when the viewport is larger than the framebuffer.
606 */
607 for (i = 0; i < ilo->viewport.count; i++) {
608 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
609
610 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
611 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
612 enable_guardband = false;
613 break;
614 }
615 }
616
617 p->gen6_3DSTATE_CLIP(p->dev,
618 &ilo->rasterizer->state,
619 (ilo->fs && ilo->fs->shader->in.has_linear_interp),
620 enable_guardband, 1, p->cp);
621 }
622 }
623
624 static void
625 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
626 const struct ilo_context *ilo,
627 struct gen6_pipeline_session *session)
628 {
629 /* 3DSTATE_SF */
630 if (DIRTY(RASTERIZER) || DIRTY(VS) || DIRTY(GS) || DIRTY(FS)) {
631 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
632 const struct ilo_shader *last_sh =
633 (ilo->gs)? ilo->gs->shader :
634 (ilo->vs)? ilo->vs->shader : NULL;
635
636 p->gen6_3DSTATE_SF(p->dev,
637 &ilo->rasterizer->state, fs, last_sh, p->cp);
638 }
639 }
640
641 void
642 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
643 const struct ilo_context *ilo,
644 struct gen6_pipeline_session *session)
645 {
646 /* 3DSTATE_DRAWING_RECTANGLE */
647 if (DIRTY(FRAMEBUFFER)) {
648 if (p->dev->gen == ILO_GEN(6))
649 gen6_wa_pipe_control_post_sync(p, false);
650
651 p->gen6_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
652 ilo->fb.state.width, ilo->fb.state.height, p->cp);
653 }
654 }
655
656 static void
657 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
658 const struct ilo_context *ilo,
659 struct gen6_pipeline_session *session)
660 {
661 /* 3DSTATE_CONSTANT_PS */
662 if (session->pcb_state_fs_changed)
663 p->gen6_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
664
665 /* 3DSTATE_WM */
666 if (DIRTY(FS) || DIRTY(FRAGMENT_SAMPLERS) ||
667 DIRTY(BLEND) || DIRTY(DEPTH_STENCIL_ALPHA) ||
668 DIRTY(RASTERIZER)) {
669 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
670 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
671 const bool dual_blend = ilo->blend->dual_blend;
672 const bool cc_may_kill = (ilo->dsa->state.alpha.enabled ||
673 ilo->blend->alpha_to_coverage);
674
675 if (fs)
676 assert(!fs->pcb.clip_state_size);
677
678 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
679 gen6_wa_pipe_control_wm_max_threads_stall(p);
680
681 p->gen6_3DSTATE_WM(p->dev, fs, num_samplers,
682 &ilo->rasterizer->state, dual_blend, cc_may_kill, p->cp);
683 }
684 }
685
686 static void
687 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
688 const struct ilo_context *ilo,
689 struct gen6_pipeline_session *session)
690 {
691 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
692 if (DIRTY(SAMPLE_MASK) || DIRTY(FRAMEBUFFER)) {
693 const uint32_t *packed_sample_pos;
694
695 packed_sample_pos = (ilo->fb.num_samples > 1) ?
696 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
697
698 if (p->dev->gen == ILO_GEN(6)) {
699 gen6_wa_pipe_control_post_sync(p, false);
700 gen6_wa_pipe_control_wm_multisample_flush(p);
701 }
702
703 p->gen6_3DSTATE_MULTISAMPLE(p->dev,
704 ilo->fb.num_samples, packed_sample_pos,
705 ilo->rasterizer->state.half_pixel_center, p->cp);
706
707 p->gen6_3DSTATE_SAMPLE_MASK(p->dev,
708 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
709 }
710 }
711
712 static void
713 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
714 const struct ilo_context *ilo,
715 struct gen6_pipeline_session *session)
716 {
717 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
718 if (DIRTY(FRAMEBUFFER)) {
719 if (p->dev->gen == ILO_GEN(6)) {
720 gen6_wa_pipe_control_post_sync(p, false);
721 gen6_wa_pipe_control_wm_depth_flush(p);
722 }
723
724 p->gen6_3DSTATE_DEPTH_BUFFER(p->dev, ilo->fb.state.zsbuf, p->cp);
725
726 /* TODO */
727 p->gen6_3DSTATE_CLEAR_PARAMS(p->dev, 0, p->cp);
728 }
729 }
730
731 void
732 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
733 const struct ilo_context *ilo,
734 struct gen6_pipeline_session *session)
735 {
736 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
737 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
738 ilo->rasterizer->state.poly_stipple_enable) {
739 if (p->dev->gen == ILO_GEN(6))
740 gen6_wa_pipe_control_post_sync(p, false);
741
742 p->gen6_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
743 &ilo->poly_stipple, p->cp);
744
745 p->gen6_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
746 }
747
748 /* 3DSTATE_LINE_STIPPLE */
749 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
750 if (p->dev->gen == ILO_GEN(6))
751 gen6_wa_pipe_control_post_sync(p, false);
752
753 p->gen6_3DSTATE_LINE_STIPPLE(p->dev,
754 ilo->rasterizer->state.line_stipple_pattern,
755 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
756 }
757
758 /* 3DSTATE_AA_LINE_PARAMETERS */
759 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
760 if (p->dev->gen == ILO_GEN(6))
761 gen6_wa_pipe_control_post_sync(p, false);
762
763 p->gen6_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
764 }
765 }
766
767 static void
768 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
769 const struct ilo_context *ilo,
770 struct gen6_pipeline_session *session)
771 {
772 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
773 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
774 p->state.SF_CLIP_VIEWPORT = p->gen7_SF_CLIP_VIEWPORT(p->dev,
775 ilo->viewport.cso, ilo->viewport.count, p->cp);
776
777 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
778 ilo->viewport.cso, ilo->viewport.count, p->cp);
779
780 session->viewport_state_changed = true;
781 }
782 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
783 else if (DIRTY(VIEWPORT)) {
784 p->state.CLIP_VIEWPORT = p->gen6_CLIP_VIEWPORT(p->dev,
785 ilo->viewport.cso, ilo->viewport.count, p->cp);
786
787 p->state.SF_VIEWPORT = p->gen6_SF_VIEWPORT(p->dev,
788 ilo->viewport.cso, ilo->viewport.count, p->cp);
789
790 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
791 ilo->viewport.cso, ilo->viewport.count, p->cp);
792
793 session->viewport_state_changed = true;
794 }
795 }
796
797 static void
798 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
799 const struct ilo_context *ilo,
800 struct gen6_pipeline_session *session)
801 {
802 /* BLEND_STATE */
803 if (DIRTY(BLEND) || DIRTY(FRAMEBUFFER) || DIRTY(DEPTH_STENCIL_ALPHA)) {
804 p->state.BLEND_STATE = p->gen6_BLEND_STATE(p->dev,
805 ilo->blend, &ilo->fb, &ilo->dsa->state.alpha, p->cp);
806
807 session->cc_state_blend_changed = true;
808 }
809
810 /* COLOR_CALC_STATE */
811 if (DIRTY(DEPTH_STENCIL_ALPHA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
812 p->state.COLOR_CALC_STATE =
813 p->gen6_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
814 ilo->dsa->state.alpha.ref_value, &ilo->blend_color, p->cp);
815
816 session->cc_state_cc_changed = true;
817 }
818
819 /* DEPTH_STENCIL_STATE */
820 if (DIRTY(DEPTH_STENCIL_ALPHA)) {
821 p->state.DEPTH_STENCIL_STATE =
822 p->gen6_DEPTH_STENCIL_STATE(p->dev, &ilo->dsa->state, p->cp);
823
824 session->cc_state_dsa_changed = true;
825 }
826 }
827
828 static void
829 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
830 const struct ilo_context *ilo,
831 struct gen6_pipeline_session *session)
832 {
833 /* SCISSOR_RECT */
834 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
835 /* there should be as many scissors as there are viewports */
836 p->state.SCISSOR_RECT = p->gen6_SCISSOR_RECT(p->dev,
837 &ilo->scissor, ilo->viewport.count, p->cp);
838
839 session->scissor_state_changed = true;
840 }
841 }
842
843 static void
844 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
845 const struct ilo_context *ilo,
846 struct gen6_pipeline_session *session)
847 {
848 /* SURFACE_STATEs for render targets */
849 if (DIRTY(FRAMEBUFFER)) {
850 const int offset = ILO_WM_DRAW_SURFACE(0);
851 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
852 int i;
853
854 for (i = 0; i < ilo->fb.state.nr_cbufs; i++) {
855 const struct pipe_surface *surface = ilo->fb.state.cbufs[i];
856
857 assert(surface);
858 surface_state[i] =
859 p->gen6_surf_SURFACE_STATE(p->dev, surface, p->cp);
860 }
861
862 /*
863 * Upload at least one render target, as
864 * brw_update_renderbuffer_surfaces() does. I don't know why.
865 */
866 if (i == 0) {
867 struct pipe_surface null_surface;
868
869 memset(&null_surface, 0, sizeof(null_surface));
870 null_surface.width = ilo->fb.state.width;
871 null_surface.height = ilo->fb.state.height;
872
873 surface_state[i] =
874 p->gen6_surf_SURFACE_STATE(p->dev, &null_surface, p->cp);
875
876 i++;
877 }
878
879 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
880
881 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
882 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
883
884 session->binding_table_fs_changed = true;
885 }
886 }
887
888 static void
889 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
890 const struct ilo_context *ilo,
891 struct gen6_pipeline_session *session)
892 {
893 const struct ilo_shader_state *vs = ilo->vs;
894 const struct ilo_shader_state *gs = ilo->gs;
895 const struct pipe_stream_output_target **so_targets =
896 (const struct pipe_stream_output_target **) ilo->so.states;
897 const int num_so_targets = ilo->so.count;
898
899 if (p->dev->gen != ILO_GEN(6))
900 return;
901
902 /* SURFACE_STATEs for stream output targets */
903 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
904 const struct pipe_stream_output_info *so_info =
905 (gs) ? &gs->info.stream_output :
906 (vs) ? &vs->info.stream_output : NULL;
907 const int offset = ILO_GS_SO_SURFACE(0);
908 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
909 int i;
910
911 for (i = 0; so_info && i < so_info->num_outputs; i++) {
912 const int target = so_info->output[i].output_buffer;
913 const struct pipe_stream_output_target *so_target =
914 (target < num_so_targets) ? so_targets[target] : NULL;
915
916 if (so_target) {
917 surface_state[i] = p->gen6_so_SURFACE_STATE(p->dev,
918 so_target, so_info, i, p->cp);
919 }
920 else {
921 surface_state[i] = 0;
922 }
923 }
924
925 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
926
927 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
928 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
929
930 session->binding_table_gs_changed = true;
931 }
932 }
933
934 static void
935 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
936 const struct ilo_context *ilo,
937 int shader_type,
938 struct gen6_pipeline_session *session)
939 {
940 const struct pipe_sampler_view **views =
941 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
942 const int num_views = ilo->view[shader_type].count;
943 uint32_t *surface_state;
944 int offset, i;
945 bool skip = false;
946
947 /* SURFACE_STATEs for sampler views */
948 switch (shader_type) {
949 case PIPE_SHADER_VERTEX:
950 if (DIRTY(VERTEX_SAMPLER_VIEWS)) {
951 offset = ILO_VS_TEXTURE_SURFACE(0);
952 surface_state = &p->state.vs.SURFACE_STATE[offset];
953
954 session->binding_table_vs_changed = true;
955 }
956 else {
957 skip = true;
958 }
959 break;
960 case PIPE_SHADER_FRAGMENT:
961 if (DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
962 offset = ILO_WM_TEXTURE_SURFACE(0);
963 surface_state = &p->state.wm.SURFACE_STATE[offset];
964
965 session->binding_table_fs_changed = true;
966 }
967 else {
968 skip = true;
969 }
970 break;
971 default:
972 skip = true;
973 break;
974 }
975
976 if (skip)
977 return;
978
979 for (i = 0; i < num_views; i++) {
980 if (views[i]) {
981 surface_state[i] =
982 p->gen6_view_SURFACE_STATE(p->dev, views[i], p->cp);
983 }
984 else {
985 surface_state[i] = 0;
986 }
987 }
988
989 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
990
991 if (i && session->num_surfaces[shader_type] < offset + i)
992 session->num_surfaces[shader_type] = offset + i;
993 }
994
995 static void
996 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
997 const struct ilo_context *ilo,
998 int shader_type,
999 struct gen6_pipeline_session *session)
1000 {
1001 const struct pipe_constant_buffer *buffers =
1002 ilo->cbuf[shader_type].states;
1003 const int num_buffers = ilo->cbuf[shader_type].count;
1004 uint32_t *surface_state;
1005 int offset, i;
1006 bool skip = false;
1007
1008 /* SURFACE_STATEs for constant buffers */
1009 switch (shader_type) {
1010 case PIPE_SHADER_VERTEX:
1011 if (DIRTY(CONSTANT_BUFFER)) {
1012 offset = ILO_VS_CONST_SURFACE(0);
1013 surface_state = &p->state.vs.SURFACE_STATE[offset];
1014
1015 session->binding_table_vs_changed = true;
1016 }
1017 else {
1018 skip = true;
1019 }
1020 break;
1021 case PIPE_SHADER_FRAGMENT:
1022 if (DIRTY(CONSTANT_BUFFER)) {
1023 offset = ILO_WM_CONST_SURFACE(0);
1024 surface_state = &p->state.wm.SURFACE_STATE[offset];
1025
1026 session->binding_table_fs_changed = true;
1027 }
1028 else {
1029 skip = true;
1030 }
1031 break;
1032 default:
1033 skip = true;
1034 break;
1035 }
1036
1037 if (skip)
1038 return;
1039
1040 for (i = 0; i < num_buffers; i++) {
1041 if (buffers[i].buffer) {
1042 surface_state[i] =
1043 p->gen6_cbuf_SURFACE_STATE(p->dev, &buffers[i], p->cp);
1044 }
1045 else {
1046 surface_state[i] = 0;
1047 }
1048 }
1049
1050 memset(&surface_state[i], 0, (ILO_MAX_CONST_BUFFERS - i) * 4);
1051
1052 if (i && session->num_surfaces[shader_type] < offset + i)
1053 session->num_surfaces[shader_type] = offset + i;
1054 }
1055
1056 static void
1057 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1058 const struct ilo_context *ilo,
1059 int shader_type,
1060 struct gen6_pipeline_session *session)
1061 {
1062 uint32_t *binding_table_state, *surface_state;
1063 int *binding_table_state_size, size;
1064 bool skip = false;
1065
1066 /* BINDING_TABLE_STATE */
1067 switch (shader_type) {
1068 case PIPE_SHADER_VERTEX:
1069 surface_state = p->state.vs.SURFACE_STATE;
1070 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1071 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1072
1073 skip = !session->binding_table_vs_changed;
1074 break;
1075 case PIPE_SHADER_GEOMETRY:
1076 surface_state = p->state.gs.SURFACE_STATE;
1077 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1078 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1079
1080 skip = !session->binding_table_gs_changed;
1081 break;
1082 case PIPE_SHADER_FRAGMENT:
1083 surface_state = p->state.wm.SURFACE_STATE;
1084 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1085 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1086
1087 skip = !session->binding_table_fs_changed;
1088 break;
1089 default:
1090 skip = true;
1091 break;
1092 }
1093
1094 if (skip)
1095 return;
1096
1097 /*
1098 * If we have seemingly less SURFACE_STATEs than before, it could be that
1099 * we did not touch those reside at the tail in this upload. Loop over
1100 * them to figure out the real number of SURFACE_STATEs.
1101 */
1102 for (size = *binding_table_state_size;
1103 size > session->num_surfaces[shader_type]; size--) {
1104 if (surface_state[size - 1])
1105 break;
1106 }
1107 if (size < session->num_surfaces[shader_type])
1108 size = session->num_surfaces[shader_type];
1109
1110 *binding_table_state = p->gen6_BINDING_TABLE_STATE(p->dev,
1111 surface_state, size, p->cp);
1112 *binding_table_state_size = size;
1113 }
1114
1115 static void
1116 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1117 const struct ilo_context *ilo,
1118 int shader_type,
1119 struct gen6_pipeline_session *session)
1120 {
1121 const struct ilo_sampler_cso * const *samplers =
1122 ilo->sampler[shader_type].cso;
1123 const struct pipe_sampler_view **views =
1124 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1125 const int num_samplers = ilo->sampler[shader_type].count;
1126 const int num_views = ilo->view[shader_type].count;
1127 uint32_t *sampler_state, *border_color_state;
1128 bool emit_border_color = false;
1129 bool skip = false;
1130
1131 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1132 switch (shader_type) {
1133 case PIPE_SHADER_VERTEX:
1134 if (DIRTY(VERTEX_SAMPLERS) || DIRTY(VERTEX_SAMPLER_VIEWS)) {
1135 sampler_state = &p->state.vs.SAMPLER_STATE;
1136 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1137
1138 if (DIRTY(VERTEX_SAMPLERS))
1139 emit_border_color = true;
1140
1141 session->sampler_state_vs_changed = true;
1142 }
1143 else {
1144 skip = true;
1145 }
1146 break;
1147 case PIPE_SHADER_FRAGMENT:
1148 if (DIRTY(FRAGMENT_SAMPLERS) || DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
1149 sampler_state = &p->state.wm.SAMPLER_STATE;
1150 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1151
1152 if (DIRTY(FRAGMENT_SAMPLERS))
1153 emit_border_color = true;
1154
1155 session->sampler_state_fs_changed = true;
1156 }
1157 else {
1158 skip = true;
1159 }
1160 break;
1161 default:
1162 skip = true;
1163 break;
1164 }
1165
1166 if (skip)
1167 return;
1168
1169 if (emit_border_color) {
1170 int i;
1171
1172 for (i = 0; i < num_samplers; i++) {
1173 border_color_state[i] = (samplers[i]) ?
1174 p->gen6_SAMPLER_BORDER_COLOR_STATE(p->dev,
1175 samplers[i], p->cp) : 0;
1176 }
1177 }
1178
1179 /* should we take the minimum of num_samplers and num_views? */
1180 *sampler_state = p->gen6_SAMPLER_STATE(p->dev,
1181 samplers, views,
1182 border_color_state,
1183 MIN2(num_samplers, num_views), p->cp);
1184 }
1185
1186 static void
1187 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1188 const struct ilo_context *ilo,
1189 struct gen6_pipeline_session *session)
1190 {
1191 /* push constant buffer for VS */
1192 if (DIRTY(VS) || DIRTY(CLIP)) {
1193 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
1194
1195 if (vs && vs->pcb.clip_state_size) {
1196 void *pcb;
1197
1198 p->state.vs.PUSH_CONSTANT_BUFFER_size = vs->pcb.clip_state_size;
1199 p->state.vs.PUSH_CONSTANT_BUFFER =
1200 p->gen6_push_constant_buffer(p->dev,
1201 p->state.vs.PUSH_CONSTANT_BUFFER_size, &pcb, p->cp);
1202
1203 memcpy(pcb, &ilo->clip, vs->pcb.clip_state_size);
1204 }
1205 else {
1206 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1207 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1208 }
1209
1210 session->pcb_state_vs_changed = true;
1211 }
1212 }
1213
1214 #undef DIRTY
1215
1216 static void
1217 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1218 const struct ilo_context *ilo,
1219 struct gen6_pipeline_session *session)
1220 {
1221 /*
1222 * We try to keep the order of the commands match, as closely as possible,
1223 * that of the classic i965 driver. It allows us to compare the command
1224 * streams easily.
1225 */
1226 gen6_pipeline_common_select(p, ilo, session);
1227 gen6_pipeline_gs_svbi(p, ilo, session);
1228 gen6_pipeline_common_sip(p, ilo, session);
1229 gen6_pipeline_vf_statistics(p, ilo, session);
1230 gen6_pipeline_common_base_address(p, ilo, session);
1231 gen6_pipeline_common_pointers_1(p, ilo, session);
1232 gen6_pipeline_common_urb(p, ilo, session);
1233 gen6_pipeline_common_pointers_2(p, ilo, session);
1234 gen6_pipeline_wm_multisample(p, ilo, session);
1235 gen6_pipeline_vs(p, ilo, session);
1236 gen6_pipeline_gs(p, ilo, session);
1237 gen6_pipeline_clip(p, ilo, session);
1238 gen6_pipeline_sf(p, ilo, session);
1239 gen6_pipeline_wm(p, ilo, session);
1240 gen6_pipeline_common_pointers_3(p, ilo, session);
1241 gen6_pipeline_wm_depth(p, ilo, session);
1242 gen6_pipeline_wm_raster(p, ilo, session);
1243 gen6_pipeline_sf_rect(p, ilo, session);
1244 gen6_pipeline_vf(p, ilo, session);
1245 gen6_pipeline_vf_draw(p, ilo, session);
1246 }
1247
1248 void
1249 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1250 const struct ilo_context *ilo,
1251 struct gen6_pipeline_session *session)
1252 {
1253 int shader_type;
1254
1255 gen6_pipeline_state_viewports(p, ilo, session);
1256 gen6_pipeline_state_cc(p, ilo, session);
1257 gen6_pipeline_state_scissors(p, ilo, session);
1258 gen6_pipeline_state_pcb(p, ilo, session);
1259
1260 /*
1261 * upload all SURAFCE_STATEs together so that we know there are minimal
1262 * paddings
1263 */
1264 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1265 gen6_pipeline_state_surfaces_so(p, ilo, session);
1266 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1267 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1268 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1269 }
1270
1271 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1272 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1273 /* this must be called after all SURFACE_STATEs are uploaded */
1274 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1275 }
1276 }
1277
1278 void
1279 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1280 const struct ilo_context *ilo,
1281 const struct pipe_draw_info *info,
1282 struct gen6_pipeline_session *session)
1283 {
1284 memset(session, 0, sizeof(*session));
1285 session->info = info;
1286 session->pipe_dirty = ilo->dirty;
1287 session->reduced_prim = u_reduced_prim(info->mode);
1288
1289 /* available space before the session */
1290 session->init_cp_space = ilo_cp_space(p->cp);
1291
1292 session->hw_ctx_changed =
1293 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1294
1295 if (session->hw_ctx_changed) {
1296 /* these should be enough to make everything uploaded */
1297 session->state_bo_changed = true;
1298 session->instruction_bo_changed = true;
1299 session->prim_changed = true;
1300 }
1301 else {
1302 session->state_bo_changed =
1303 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1304 session->instruction_bo_changed =
1305 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1306 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1307 }
1308 }
1309
1310 void
1311 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1312 const struct ilo_context *ilo,
1313 struct gen6_pipeline_session *session)
1314 {
1315 /* force all states to be uploaded if the state bo changed */
1316 if (session->state_bo_changed)
1317 session->pipe_dirty = ILO_DIRTY_ALL;
1318 else
1319 session->pipe_dirty = ilo->dirty;
1320
1321 session->emit_draw_states(p, ilo, session);
1322
1323 /* force all commands to be uploaded if the HW context changed */
1324 if (session->hw_ctx_changed)
1325 session->pipe_dirty = ILO_DIRTY_ALL;
1326 else
1327 session->pipe_dirty = ilo->dirty;
1328
1329 session->emit_draw_commands(p, ilo, session);
1330 }
1331
1332 void
1333 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1334 const struct ilo_context *ilo,
1335 struct gen6_pipeline_session *session)
1336 {
1337 int used, estimate;
1338
1339 /* sanity check size estimation */
1340 used = session->init_cp_space - ilo_cp_space(p->cp);
1341 estimate = ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo);
1342 assert(used <= estimate);
1343
1344 p->state.reduced_prim = session->reduced_prim;
1345 }
1346
1347 static void
1348 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1349 const struct ilo_context *ilo,
1350 const struct pipe_draw_info *info)
1351 {
1352 struct gen6_pipeline_session session;
1353
1354 gen6_pipeline_prepare(p, ilo, info, &session);
1355
1356 session.emit_draw_states = gen6_pipeline_states;
1357 session.emit_draw_commands = gen6_pipeline_commands;
1358
1359 gen6_pipeline_draw(p, ilo, &session);
1360 gen6_pipeline_end(p, ilo, &session);
1361 }
1362
1363 void
1364 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1365 {
1366 if (p->dev->gen == ILO_GEN(6))
1367 gen6_wa_pipe_control_post_sync(p, false);
1368
1369 p->gen6_PIPE_CONTROL(p->dev,
1370 PIPE_CONTROL_INSTRUCTION_FLUSH |
1371 PIPE_CONTROL_WRITE_FLUSH |
1372 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1373 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1374 PIPE_CONTROL_TC_FLUSH |
1375 PIPE_CONTROL_NO_WRITE |
1376 PIPE_CONTROL_CS_STALL,
1377 0, 0, false, p->cp);
1378 }
1379
1380 void
1381 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1382 struct intel_bo *bo, int index)
1383 {
1384 if (p->dev->gen == ILO_GEN(6))
1385 gen6_wa_pipe_control_post_sync(p, true);
1386
1387 p->gen6_PIPE_CONTROL(p->dev,
1388 PIPE_CONTROL_WRITE_TIMESTAMP,
1389 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1390 true, p->cp);
1391 }
1392
1393 void
1394 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1395 struct intel_bo *bo, int index)
1396 {
1397 if (p->dev->gen == ILO_GEN(6))
1398 gen6_wa_pipe_control_post_sync(p, false);
1399
1400 p->gen6_PIPE_CONTROL(p->dev,
1401 PIPE_CONTROL_DEPTH_STALL |
1402 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1403 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1404 true, p->cp);
1405 }
1406
1407 static int
1408 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1409 const struct ilo_gpe_gen6 *gen6,
1410 const struct ilo_context *ilo)
1411 {
1412 static int size;
1413 enum ilo_gpe_gen6_command cmd;
1414
1415 if (size)
1416 return size;
1417
1418 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1419 int count;
1420
1421 switch (cmd) {
1422 case ILO_GPE_GEN6_PIPE_CONTROL:
1423 /* for the workaround */
1424 count = 2;
1425 /* another one after 3DSTATE_URB */
1426 count += 1;
1427 /* and another one after 3DSTATE_CONSTANT_VS */
1428 count += 1;
1429 break;
1430 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1431 /* there are 4 SVBIs */
1432 count = 4;
1433 break;
1434 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1435 count = 33;
1436 break;
1437 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1438 count = 34;
1439 break;
1440 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1441 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1442 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1443 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1444 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1445 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1446 /* media commands */
1447 count = 0;
1448 break;
1449 default:
1450 count = 1;
1451 break;
1452 }
1453
1454 if (count)
1455 size += gen6->estimate_command_size(p->dev, cmd, count);
1456 }
1457
1458 return size;
1459 }
1460
1461 static int
1462 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1463 const struct ilo_gpe_gen6 *gen6,
1464 const struct ilo_context *ilo)
1465 {
1466 static int static_size;
1467 int shader_type, count, size;
1468
1469 if (!static_size) {
1470 struct {
1471 enum ilo_gpe_gen6_state state;
1472 int count;
1473 } static_states[] = {
1474 /* viewports */
1475 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1476 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1477 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1478 /* cc */
1479 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1480 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1481 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1482 /* scissors */
1483 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1484 /* binding table (vs, gs, fs) */
1485 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1486 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1487 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1488 };
1489 int i;
1490
1491 for (i = 0; i < Elements(static_states); i++) {
1492 static_size += gen6->estimate_state_size(p->dev,
1493 static_states[i].state,
1494 static_states[i].count);
1495 }
1496 }
1497
1498 size = static_size;
1499
1500 /*
1501 * render targets (fs)
1502 * stream outputs (gs)
1503 * sampler views (vs, fs)
1504 * constant buffers (vs, fs)
1505 */
1506 count = ilo->fb.state.nr_cbufs;
1507
1508 if (ilo->gs)
1509 count += ilo->gs->info.stream_output.num_outputs;
1510 else if (ilo->vs)
1511 count += ilo->vs->info.stream_output.num_outputs;
1512
1513 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1514 count += ilo->view[shader_type].count;
1515 count += ilo->cbuf[shader_type].count;
1516 }
1517
1518 if (count) {
1519 size += gen6->estimate_state_size(p->dev,
1520 ILO_GPE_GEN6_SURFACE_STATE, count);
1521 }
1522
1523 /* samplers (vs, fs) */
1524 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1525 count = ilo->sampler[shader_type].count;
1526 if (count) {
1527 size += gen6->estimate_state_size(p->dev,
1528 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1529 size += gen6->estimate_state_size(p->dev,
1530 ILO_GPE_GEN6_SAMPLER_STATE, count);
1531 }
1532 }
1533
1534 /* pcb (vs) */
1535 if (ilo->vs && ilo->vs->shader->pcb.clip_state_size) {
1536 const int pcb_size = ilo->vs->shader->pcb.clip_state_size;
1537
1538 size += gen6->estimate_state_size(p->dev,
1539 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, pcb_size);
1540 }
1541
1542 return size;
1543 }
1544
1545 static int
1546 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1547 enum ilo_3d_pipeline_action action,
1548 const void *arg)
1549 {
1550 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1551 int size;
1552
1553 switch (action) {
1554 case ILO_3D_PIPELINE_DRAW:
1555 {
1556 const struct ilo_context *ilo = arg;
1557
1558 size = gen6_pipeline_estimate_commands(p, gen6, ilo) +
1559 gen6_pipeline_estimate_states(p, gen6, ilo);
1560 }
1561 break;
1562 case ILO_3D_PIPELINE_FLUSH:
1563 size = gen6->estimate_command_size(p->dev,
1564 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1565 break;
1566 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1567 size = gen6->estimate_command_size(p->dev,
1568 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1569 break;
1570 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1571 size = gen6->estimate_command_size(p->dev,
1572 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1573 break;
1574 default:
1575 assert(!"unknown 3D pipeline action");
1576 size = 0;
1577 break;
1578 }
1579
1580 return size;
1581 }
1582
1583 void
1584 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1585 {
1586 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1587
1588 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1589 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1590 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1591 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1592 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1593
1594 #define GEN6_USE(p, name, from) \
1595 p->gen6_ ## name = from->emit_ ## name
1596 GEN6_USE(p, STATE_BASE_ADDRESS, gen6);
1597 GEN6_USE(p, STATE_SIP, gen6);
1598 GEN6_USE(p, PIPELINE_SELECT, gen6);
1599 GEN6_USE(p, 3DSTATE_BINDING_TABLE_POINTERS, gen6);
1600 GEN6_USE(p, 3DSTATE_SAMPLER_STATE_POINTERS, gen6);
1601 GEN6_USE(p, 3DSTATE_URB, gen6);
1602 GEN6_USE(p, 3DSTATE_VERTEX_BUFFERS, gen6);
1603 GEN6_USE(p, 3DSTATE_VERTEX_ELEMENTS, gen6);
1604 GEN6_USE(p, 3DSTATE_INDEX_BUFFER, gen6);
1605 GEN6_USE(p, 3DSTATE_VF_STATISTICS, gen6);
1606 GEN6_USE(p, 3DSTATE_VIEWPORT_STATE_POINTERS, gen6);
1607 GEN6_USE(p, 3DSTATE_CC_STATE_POINTERS, gen6);
1608 GEN6_USE(p, 3DSTATE_SCISSOR_STATE_POINTERS, gen6);
1609 GEN6_USE(p, 3DSTATE_VS, gen6);
1610 GEN6_USE(p, 3DSTATE_GS, gen6);
1611 GEN6_USE(p, 3DSTATE_CLIP, gen6);
1612 GEN6_USE(p, 3DSTATE_SF, gen6);
1613 GEN6_USE(p, 3DSTATE_WM, gen6);
1614 GEN6_USE(p, 3DSTATE_CONSTANT_VS, gen6);
1615 GEN6_USE(p, 3DSTATE_CONSTANT_GS, gen6);
1616 GEN6_USE(p, 3DSTATE_CONSTANT_PS, gen6);
1617 GEN6_USE(p, 3DSTATE_SAMPLE_MASK, gen6);
1618 GEN6_USE(p, 3DSTATE_DRAWING_RECTANGLE, gen6);
1619 GEN6_USE(p, 3DSTATE_DEPTH_BUFFER, gen6);
1620 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_OFFSET, gen6);
1621 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_PATTERN, gen6);
1622 GEN6_USE(p, 3DSTATE_LINE_STIPPLE, gen6);
1623 GEN6_USE(p, 3DSTATE_AA_LINE_PARAMETERS, gen6);
1624 GEN6_USE(p, 3DSTATE_GS_SVB_INDEX, gen6);
1625 GEN6_USE(p, 3DSTATE_MULTISAMPLE, gen6);
1626 GEN6_USE(p, 3DSTATE_STENCIL_BUFFER, gen6);
1627 GEN6_USE(p, 3DSTATE_HIER_DEPTH_BUFFER, gen6);
1628 GEN6_USE(p, 3DSTATE_CLEAR_PARAMS, gen6);
1629 GEN6_USE(p, PIPE_CONTROL, gen6);
1630 GEN6_USE(p, 3DPRIMITIVE, gen6);
1631 GEN6_USE(p, INTERFACE_DESCRIPTOR_DATA, gen6);
1632 GEN6_USE(p, SF_VIEWPORT, gen6);
1633 GEN6_USE(p, CLIP_VIEWPORT, gen6);
1634 GEN6_USE(p, CC_VIEWPORT, gen6);
1635 GEN6_USE(p, COLOR_CALC_STATE, gen6);
1636 GEN6_USE(p, BLEND_STATE, gen6);
1637 GEN6_USE(p, DEPTH_STENCIL_STATE, gen6);
1638 GEN6_USE(p, SCISSOR_RECT, gen6);
1639 GEN6_USE(p, BINDING_TABLE_STATE, gen6);
1640 GEN6_USE(p, surf_SURFACE_STATE, gen6);
1641 GEN6_USE(p, view_SURFACE_STATE, gen6);
1642 GEN6_USE(p, cbuf_SURFACE_STATE, gen6);
1643 GEN6_USE(p, so_SURFACE_STATE, gen6);
1644 GEN6_USE(p, SAMPLER_STATE, gen6);
1645 GEN6_USE(p, SAMPLER_BORDER_COLOR_STATE, gen6);
1646 GEN6_USE(p, push_constant_buffer, gen6);
1647 #undef GEN6_USE
1648 }