72e87d4efa74d0d6b7d32348cee60d76e037c8c4
[mesa.git] / src / gallium / drivers / ilo / ilo_3d_pipeline_gen6.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2013 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
31
32 #include "shader/ilo_shader_internal.h"
33 #include "ilo_3d.h"
34 #include "ilo_context.h"
35 #include "ilo_cp.h"
36 #include "ilo_gpe_gen6.h"
37 #include "ilo_shader.h"
38 #include "ilo_state.h"
39 #include "ilo_3d_pipeline.h"
40 #include "ilo_3d_pipeline_gen6.h"
41
42 /**
43 * This should be called before any depth stall flush (including those
44 * produced by non-pipelined state commands) or cache flush on GEN6.
45 *
46 * \see intel_emit_post_sync_nonzero_flush()
47 */
48 static void
49 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
50 bool caller_post_sync)
51 {
52 assert(p->dev->gen == ILO_GEN(6));
53
54 /* emit once */
55 if (p->state.has_gen6_wa_pipe_control)
56 return;
57
58 p->state.has_gen6_wa_pipe_control = true;
59
60 /*
61 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
62 *
63 * "Pipe-control with CS-stall bit set must be sent BEFORE the
64 * pipe-control with a post-sync op and no write-cache flushes."
65 *
66 * The workaround below necessitates this workaround.
67 */
68 p->gen6_PIPE_CONTROL(p->dev,
69 PIPE_CONTROL_CS_STALL |
70 PIPE_CONTROL_STALL_AT_SCOREBOARD,
71 NULL, 0, false, p->cp);
72
73 /* the caller will emit the post-sync op */
74 if (caller_post_sync)
75 return;
76
77 /*
78 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
79 *
80 * "Before any depth stall flush (including those produced by
81 * non-pipelined state commands), software needs to first send a
82 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
83 *
84 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
85 * PIPE_CONTROL with any non-zero post-sync-op is required."
86 */
87 p->gen6_PIPE_CONTROL(p->dev,
88 PIPE_CONTROL_WRITE_IMMEDIATE,
89 p->workaround_bo, 0, false, p->cp);
90 }
91
92 static void
93 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
94 {
95 assert(p->dev->gen == ILO_GEN(6));
96
97 gen6_wa_pipe_control_post_sync(p, false);
98
99 /*
100 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
101 *
102 * "Driver must guarentee that all the caches in the depth pipe are
103 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
104 * requires driver to send a PIPE_CONTROL with a CS stall along with a
105 * Depth Flush prior to this command."
106 */
107 p->gen6_PIPE_CONTROL(p->dev,
108 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
109 PIPE_CONTROL_CS_STALL,
110 0, 0, false, p->cp);
111 }
112
113 static void
114 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
115 {
116 assert(p->dev->gen == ILO_GEN(6));
117
118 gen6_wa_pipe_control_post_sync(p, false);
119
120 /*
121 * According to intel_emit_depth_stall_flushes() of classic i965, we need
122 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
123 * commands.
124 */
125 p->gen6_PIPE_CONTROL(p->dev,
126 PIPE_CONTROL_DEPTH_STALL,
127 NULL, 0, false, p->cp);
128
129 p->gen6_PIPE_CONTROL(p->dev,
130 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
131 NULL, 0, false, p->cp);
132
133 p->gen6_PIPE_CONTROL(p->dev,
134 PIPE_CONTROL_DEPTH_STALL,
135 NULL, 0, false, p->cp);
136 }
137
138 static void
139 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
140 {
141 assert(p->dev->gen == ILO_GEN(6));
142
143 /* the post-sync workaround should cover this already */
144 if (p->state.has_gen6_wa_pipe_control)
145 return;
146
147 /*
148 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
149 *
150 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
151 * field set (DW1 Bit 1), must be issued prior to any change to the
152 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
153 */
154 p->gen6_PIPE_CONTROL(p->dev,
155 PIPE_CONTROL_STALL_AT_SCOREBOARD,
156 NULL, 0, false, p->cp);
157
158 }
159
160 static void
161 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
162 {
163 assert(p->dev->gen == ILO_GEN(6));
164
165 gen6_wa_pipe_control_post_sync(p, false);
166
167 /*
168 * According to upload_vs_state() of classic i965, we need to emit
169 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
170 * buffered by VS FF, to the point that the FF dies.
171 */
172 p->gen6_PIPE_CONTROL(p->dev,
173 PIPE_CONTROL_DEPTH_STALL |
174 PIPE_CONTROL_INSTRUCTION_FLUSH |
175 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
176 NULL, 0, false, p->cp);
177 }
178
179 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
180
181 void
182 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
183 const struct ilo_context *ilo,
184 struct gen6_pipeline_session *session)
185 {
186 /* PIPELINE_SELECT */
187 if (session->hw_ctx_changed) {
188 if (p->dev->gen == ILO_GEN(6))
189 gen6_wa_pipe_control_post_sync(p, false);
190
191 p->gen6_PIPELINE_SELECT(p->dev, 0x0, p->cp);
192 }
193 }
194
195 void
196 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
197 const struct ilo_context *ilo,
198 struct gen6_pipeline_session *session)
199 {
200 /* STATE_SIP */
201 if (session->hw_ctx_changed) {
202 if (p->dev->gen == ILO_GEN(6))
203 gen6_wa_pipe_control_post_sync(p, false);
204
205 p->gen6_STATE_SIP(p->dev, 0, p->cp);
206 }
207 }
208
209 void
210 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
211 const struct ilo_context *ilo,
212 struct gen6_pipeline_session *session)
213 {
214 /* STATE_BASE_ADDRESS */
215 if (session->state_bo_changed || session->kernel_bo_changed ||
216 session->batch_bo_changed) {
217 if (p->dev->gen == ILO_GEN(6))
218 gen6_wa_pipe_control_post_sync(p, false);
219
220 p->gen6_STATE_BASE_ADDRESS(p->dev,
221 NULL, p->cp->bo, p->cp->bo, NULL, ilo->hw3d->kernel.bo,
222 0, 0, 0, 0, p->cp);
223
224 /*
225 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
226 *
227 * "The following commands must be reissued following any change to
228 * the base addresses:
229 *
230 * * 3DSTATE_BINDING_TABLE_POINTERS
231 * * 3DSTATE_SAMPLER_STATE_POINTERS
232 * * 3DSTATE_VIEWPORT_STATE_POINTERS
233 * * 3DSTATE_CC_POINTERS
234 * * MEDIA_STATE_POINTERS"
235 *
236 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
237 * reasonable to also reissue the command. Same to PCB.
238 */
239 session->viewport_state_changed = true;
240
241 session->cc_state_blend_changed = true;
242 session->cc_state_dsa_changed = true;
243 session->cc_state_cc_changed = true;
244
245 session->scissor_state_changed = true;
246
247 session->binding_table_vs_changed = true;
248 session->binding_table_gs_changed = true;
249 session->binding_table_fs_changed = true;
250
251 session->sampler_state_vs_changed = true;
252 session->sampler_state_gs_changed = true;
253 session->sampler_state_fs_changed = true;
254
255 session->pcb_state_vs_changed = true;
256 session->pcb_state_gs_changed = true;
257 session->pcb_state_fs_changed = true;
258 }
259 }
260
261 static void
262 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
263 const struct ilo_context *ilo,
264 struct gen6_pipeline_session *session)
265 {
266 /* 3DSTATE_URB */
267 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS) || DIRTY(GS)) {
268 const struct ilo_shader *vs = (ilo->vs) ? ilo->vs->shader : NULL;
269 const struct ilo_shader *gs = (ilo->gs) ? ilo->gs->shader : NULL;
270 const bool gs_active = (gs || (vs && vs->stream_output));
271 int vs_entry_size, gs_entry_size;
272 int vs_total_size, gs_total_size;
273
274 vs_entry_size = (vs) ? vs->out.count : 0;
275
276 /*
277 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
278 * share VUE handles. The VUE allocation size must be large enough to
279 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
280 *
281 * I am not sure if the PRM explicitly states that VF and VS share VUE
282 * handles. But here is a citation that implies so:
283 *
284 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
285 *
286 * "Once a FF stage that spawn threads has sufficient input to
287 * initiate a thread, it must guarantee that it is safe to request
288 * the thread initiation. For all these FF stages, this check is
289 * based on :
290 *
291 * - The availability of output URB entries:
292 * - VS: As the input URB entries are overwritten with the
293 * VS-generated output data, output URB availability isn't a
294 * factor."
295 */
296 if (vs_entry_size < ilo->ve->count)
297 vs_entry_size = ilo->ve->count;
298
299 gs_entry_size = (gs) ? gs->out.count :
300 (vs && vs->stream_output) ? vs_entry_size : 0;
301
302 /* in bytes */
303 vs_entry_size *= sizeof(float) * 4;
304 gs_entry_size *= sizeof(float) * 4;
305 vs_total_size = ilo->dev->urb_size;
306
307 if (gs_active) {
308 vs_total_size /= 2;
309 gs_total_size = vs_total_size;
310 }
311 else {
312 gs_total_size = 0;
313 }
314
315 p->gen6_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
316 vs_entry_size, gs_entry_size, p->cp);
317
318 /*
319 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
320 *
321 * "Because of a urb corruption caused by allocating a previous
322 * gsunit's urb entry to vsunit software is required to send a
323 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
324 * size == 0) plus a dummy DRAW call before any case where VS will
325 * be taking over GS URB space."
326 */
327 if (p->state.gs.active && !gs_active)
328 ilo_3d_pipeline_emit_flush_gen6(p);
329
330 p->state.gs.active = gs_active;
331 }
332 }
333
334 static void
335 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
336 const struct ilo_context *ilo,
337 struct gen6_pipeline_session *session)
338 {
339 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
340 if (session->viewport_state_changed) {
341 p->gen6_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
342 p->state.CLIP_VIEWPORT,
343 p->state.SF_VIEWPORT,
344 p->state.CC_VIEWPORT, p->cp);
345 }
346 }
347
348 static void
349 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
350 const struct ilo_context *ilo,
351 struct gen6_pipeline_session *session)
352 {
353 /* 3DSTATE_CC_STATE_POINTERS */
354 if (session->cc_state_blend_changed ||
355 session->cc_state_dsa_changed ||
356 session->cc_state_cc_changed) {
357 p->gen6_3DSTATE_CC_STATE_POINTERS(p->dev,
358 p->state.BLEND_STATE,
359 p->state.DEPTH_STENCIL_STATE,
360 p->state.COLOR_CALC_STATE, p->cp);
361 }
362
363 /* 3DSTATE_SAMPLER_STATE_POINTERS */
364 if (session->sampler_state_vs_changed ||
365 session->sampler_state_gs_changed ||
366 session->sampler_state_fs_changed) {
367 p->gen6_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
368 p->state.vs.SAMPLER_STATE,
369 0,
370 p->state.wm.SAMPLER_STATE, p->cp);
371 }
372 }
373
374 static void
375 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
376 const struct ilo_context *ilo,
377 struct gen6_pipeline_session *session)
378 {
379 /* 3DSTATE_SCISSOR_STATE_POINTERS */
380 if (session->scissor_state_changed) {
381 p->gen6_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
382 p->state.SCISSOR_RECT, p->cp);
383 }
384
385 /* 3DSTATE_BINDING_TABLE_POINTERS */
386 if (session->binding_table_vs_changed ||
387 session->binding_table_gs_changed ||
388 session->binding_table_fs_changed) {
389 p->gen6_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
390 p->state.vs.BINDING_TABLE_STATE,
391 p->state.gs.BINDING_TABLE_STATE,
392 p->state.wm.BINDING_TABLE_STATE, p->cp);
393 }
394 }
395
396 void
397 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
398 const struct ilo_context *ilo,
399 struct gen6_pipeline_session *session)
400 {
401 /* 3DSTATE_INDEX_BUFFER */
402 if (DIRTY(INDEX_BUFFER) || session->batch_bo_changed) {
403 p->gen6_3DSTATE_INDEX_BUFFER(p->dev,
404 &ilo->ib.state, session->info->primitive_restart, p->cp);
405 }
406
407 /* 3DSTATE_VERTEX_BUFFERS */
408 if (DIRTY(VERTEX_BUFFERS) || DIRTY(VERTEX_ELEMENTS) ||
409 session->batch_bo_changed) {
410 p->gen6_3DSTATE_VERTEX_BUFFERS(p->dev,
411 ilo->vb.states, ilo->vb.enabled_mask, ilo->ve, p->cp);
412 }
413
414 /* 3DSTATE_VERTEX_ELEMENTS */
415 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS)) {
416 const struct ilo_ve_state *ve = ilo->ve;
417 bool last_velement_edgeflag = false;
418 bool prepend_generate_ids = false;
419
420 if (ilo->vs) {
421 const struct ilo_shader_info *info = &ilo->vs->info;
422
423 if (info->edgeflag_in >= 0) {
424 /* we rely on the state tracker here */
425 assert(info->edgeflag_in == ve->count - 1);
426 last_velement_edgeflag = true;
427 }
428
429 prepend_generate_ids = (info->has_instanceid || info->has_vertexid);
430 }
431
432 p->gen6_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
433 last_velement_edgeflag, prepend_generate_ids, p->cp);
434 }
435 }
436
437 void
438 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
439 const struct ilo_context *ilo,
440 struct gen6_pipeline_session *session)
441 {
442 /* 3DSTATE_VF_STATISTICS */
443 if (session->hw_ctx_changed)
444 p->gen6_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
445 }
446
447 void
448 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
449 const struct ilo_context *ilo,
450 struct gen6_pipeline_session *session)
451 {
452 /* 3DPRIMITIVE */
453 p->gen6_3DPRIMITIVE(p->dev, session->info, false, p->cp);
454 p->state.has_gen6_wa_pipe_control = false;
455 }
456
457 void
458 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
459 const struct ilo_context *ilo,
460 struct gen6_pipeline_session *session)
461 {
462 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(VERTEX_SAMPLERS) ||
463 session->kernel_bo_changed);
464 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
465
466 /*
467 * the classic i965 does this in upload_vs_state(), citing a spec that I
468 * cannot find
469 */
470 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
471 gen6_wa_pipe_control_post_sync(p, false);
472
473 /* 3DSTATE_CONSTANT_VS */
474 if (emit_3dstate_constant_vs) {
475 p->gen6_3DSTATE_CONSTANT_VS(p->dev,
476 &p->state.vs.PUSH_CONSTANT_BUFFER,
477 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
478 1, p->cp);
479 }
480
481 /* 3DSTATE_VS */
482 if (emit_3dstate_vs) {
483 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
484
485 p->gen6_3DSTATE_VS(p->dev, ilo->vs, num_samplers, p->cp);
486 }
487
488 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
489 gen6_wa_pipe_control_vs_const_flush(p);
490 }
491
492 static void
493 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
494 const struct ilo_context *ilo,
495 struct gen6_pipeline_session *session)
496 {
497 /* 3DSTATE_CONSTANT_GS */
498 if (session->pcb_state_gs_changed)
499 p->gen6_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
500
501 /* 3DSTATE_GS */
502 if (DIRTY(GS) || DIRTY(VS) ||
503 session->prim_changed || session->kernel_bo_changed) {
504 const int verts_per_prim = u_vertices_per_prim(session->reduced_prim);
505
506 p->gen6_3DSTATE_GS(p->dev, ilo->gs, ilo->vs, verts_per_prim, p->cp);
507 }
508 }
509
510 bool
511 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
512 const struct ilo_context *ilo,
513 struct gen6_pipeline_session *session)
514 {
515 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
516 const struct pipe_stream_output_info *so_info =
517 (ilo->gs) ? &ilo->gs->info.stream_output :
518 (ilo->vs) ? &ilo->vs->info.stream_output : NULL;
519 unsigned max_svbi = 0xffffffff;
520 int i;
521
522 for (i = 0; i < so_info->num_outputs; i++) {
523 const int output_buffer = so_info->output[i].output_buffer;
524 const struct pipe_stream_output_target *so =
525 ilo->so.states[output_buffer];
526 const int struct_size = so_info->stride[output_buffer] * 4;
527 const int elem_size = so_info->output[i].num_components * 4;
528 int buf_size, count;
529
530 if (!so) {
531 max_svbi = 0;
532 break;
533 }
534
535 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
536
537 count = buf_size / struct_size;
538 if (buf_size % struct_size >= elem_size)
539 count++;
540
541 if (count < max_svbi)
542 max_svbi = count;
543 }
544
545 if (p->state.so_max_vertices != max_svbi) {
546 p->state.so_max_vertices = max_svbi;
547 return true;
548 }
549 }
550
551 return false;
552 }
553
554 static void
555 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
556 const struct ilo_context *ilo,
557 struct gen6_pipeline_session *session)
558 {
559 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
560
561 /* 3DSTATE_GS_SVB_INDEX */
562 if (emit) {
563 if (p->dev->gen == ILO_GEN(6))
564 gen6_wa_pipe_control_post_sync(p, false);
565
566 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
567 0, p->state.so_num_vertices, p->state.so_max_vertices,
568 false, p->cp);
569
570 if (session->hw_ctx_changed) {
571 int i;
572
573 /*
574 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
575 *
576 * "If a buffer is not enabled then the SVBI must be set to 0x0
577 * in order to not cause overflow in that SVBI."
578 *
579 * "If a buffer is not enabled then the MaxSVBI must be set to
580 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
581 */
582 for (i = 1; i < 4; i++) {
583 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
584 i, 0, 0xffffffff, false, p->cp);
585 }
586 }
587 }
588 }
589
590 void
591 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
592 const struct ilo_context *ilo,
593 struct gen6_pipeline_session *session)
594 {
595 /* 3DSTATE_CLIP */
596 if (DIRTY(RASTERIZER) || DIRTY(FS) ||
597 DIRTY(VIEWPORT) || DIRTY(FRAMEBUFFER)) {
598 bool enable_guardband = true;
599 unsigned i;
600
601 /*
602 * We do not do 2D clipping yet. Guard band test should only be enabled
603 * when the viewport is larger than the framebuffer.
604 */
605 for (i = 0; i < ilo->viewport.count; i++) {
606 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
607
608 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
609 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
610 enable_guardband = false;
611 break;
612 }
613 }
614
615 p->gen6_3DSTATE_CLIP(p->dev,
616 ilo->rasterizer,
617 (ilo->fs && ilo->fs->shader->in.has_linear_interp),
618 enable_guardband, 1, p->cp);
619 }
620 }
621
622 static void
623 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
624 const struct ilo_context *ilo,
625 struct gen6_pipeline_session *session)
626 {
627 /* 3DSTATE_SF */
628 if (DIRTY(RASTERIZER) || DIRTY(VS) || DIRTY(GS) || DIRTY(FS)) {
629 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
630 const struct ilo_shader *last_sh =
631 (ilo->gs)? ilo->gs->shader :
632 (ilo->vs)? ilo->vs->shader : NULL;
633
634 p->gen6_3DSTATE_SF(p->dev, ilo->rasterizer, fs, last_sh, p->cp);
635 }
636 }
637
638 void
639 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
640 const struct ilo_context *ilo,
641 struct gen6_pipeline_session *session)
642 {
643 /* 3DSTATE_DRAWING_RECTANGLE */
644 if (DIRTY(FRAMEBUFFER)) {
645 if (p->dev->gen == ILO_GEN(6))
646 gen6_wa_pipe_control_post_sync(p, false);
647
648 p->gen6_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
649 ilo->fb.state.width, ilo->fb.state.height, p->cp);
650 }
651 }
652
653 static void
654 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
655 const struct ilo_context *ilo,
656 struct gen6_pipeline_session *session)
657 {
658 /* 3DSTATE_CONSTANT_PS */
659 if (session->pcb_state_fs_changed)
660 p->gen6_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
661
662 /* 3DSTATE_WM */
663 if (DIRTY(FS) || DIRTY(FRAGMENT_SAMPLERS) ||
664 DIRTY(BLEND) || DIRTY(DEPTH_STENCIL_ALPHA) ||
665 DIRTY(RASTERIZER) || session->kernel_bo_changed) {
666 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
667 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
668 const bool dual_blend = ilo->blend->dual_blend;
669 const bool cc_may_kill = (ilo->dsa->alpha.enabled ||
670 ilo->blend->alpha_to_coverage);
671
672 if (fs)
673 assert(!fs->pcb.clip_state_size);
674
675 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
676 gen6_wa_pipe_control_wm_max_threads_stall(p);
677
678 p->gen6_3DSTATE_WM(p->dev, fs, num_samplers,
679 &ilo->rasterizer->state, dual_blend, cc_may_kill, p->cp);
680 }
681 }
682
683 static void
684 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
685 const struct ilo_context *ilo,
686 struct gen6_pipeline_session *session)
687 {
688 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
689 if (DIRTY(SAMPLE_MASK) || DIRTY(FRAMEBUFFER)) {
690 const uint32_t *packed_sample_pos;
691
692 packed_sample_pos = (ilo->fb.num_samples > 1) ?
693 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
694
695 if (p->dev->gen == ILO_GEN(6)) {
696 gen6_wa_pipe_control_post_sync(p, false);
697 gen6_wa_pipe_control_wm_multisample_flush(p);
698 }
699
700 p->gen6_3DSTATE_MULTISAMPLE(p->dev,
701 ilo->fb.num_samples, packed_sample_pos,
702 ilo->rasterizer->state.half_pixel_center, p->cp);
703
704 p->gen6_3DSTATE_SAMPLE_MASK(p->dev,
705 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
706 }
707 }
708
709 static void
710 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
711 const struct ilo_context *ilo,
712 struct gen6_pipeline_session *session)
713 {
714 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
715 if (DIRTY(FRAMEBUFFER) || session->batch_bo_changed) {
716 const struct ilo_zs_surface *zs;
717
718 if (ilo->fb.state.zsbuf) {
719 const struct ilo_surface_cso *surface =
720 (const struct ilo_surface_cso *) ilo->fb.state.zsbuf;
721
722 assert(!surface->is_rt);
723 zs = &surface->u.zs;
724 }
725 else {
726 zs = &ilo->fb.null_zs;
727 }
728
729 if (p->dev->gen == ILO_GEN(6)) {
730 gen6_wa_pipe_control_post_sync(p, false);
731 gen6_wa_pipe_control_wm_depth_flush(p);
732 }
733
734 p->gen6_3DSTATE_DEPTH_BUFFER(p->dev, zs, p->cp);
735
736 /* TODO */
737 p->gen6_3DSTATE_CLEAR_PARAMS(p->dev, 0, p->cp);
738 }
739 }
740
741 void
742 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
743 const struct ilo_context *ilo,
744 struct gen6_pipeline_session *session)
745 {
746 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
747 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
748 ilo->rasterizer->state.poly_stipple_enable) {
749 if (p->dev->gen == ILO_GEN(6))
750 gen6_wa_pipe_control_post_sync(p, false);
751
752 p->gen6_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
753 &ilo->poly_stipple, p->cp);
754
755 p->gen6_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
756 }
757
758 /* 3DSTATE_LINE_STIPPLE */
759 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
760 if (p->dev->gen == ILO_GEN(6))
761 gen6_wa_pipe_control_post_sync(p, false);
762
763 p->gen6_3DSTATE_LINE_STIPPLE(p->dev,
764 ilo->rasterizer->state.line_stipple_pattern,
765 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
766 }
767
768 /* 3DSTATE_AA_LINE_PARAMETERS */
769 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
770 if (p->dev->gen == ILO_GEN(6))
771 gen6_wa_pipe_control_post_sync(p, false);
772
773 p->gen6_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
774 }
775 }
776
777 static void
778 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
779 const struct ilo_context *ilo,
780 struct gen6_pipeline_session *session)
781 {
782 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
783 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
784 p->state.SF_CLIP_VIEWPORT = p->gen7_SF_CLIP_VIEWPORT(p->dev,
785 ilo->viewport.cso, ilo->viewport.count, p->cp);
786
787 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
788 ilo->viewport.cso, ilo->viewport.count, p->cp);
789
790 session->viewport_state_changed = true;
791 }
792 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
793 else if (DIRTY(VIEWPORT)) {
794 p->state.CLIP_VIEWPORT = p->gen6_CLIP_VIEWPORT(p->dev,
795 ilo->viewport.cso, ilo->viewport.count, p->cp);
796
797 p->state.SF_VIEWPORT = p->gen6_SF_VIEWPORT(p->dev,
798 ilo->viewport.cso, ilo->viewport.count, p->cp);
799
800 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
801 ilo->viewport.cso, ilo->viewport.count, p->cp);
802
803 session->viewport_state_changed = true;
804 }
805 }
806
807 static void
808 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
809 const struct ilo_context *ilo,
810 struct gen6_pipeline_session *session)
811 {
812 /* BLEND_STATE */
813 if (DIRTY(BLEND) || DIRTY(FRAMEBUFFER) || DIRTY(DEPTH_STENCIL_ALPHA)) {
814 p->state.BLEND_STATE = p->gen6_BLEND_STATE(p->dev,
815 ilo->blend, &ilo->fb, &ilo->dsa->alpha, p->cp);
816
817 session->cc_state_blend_changed = true;
818 }
819
820 /* COLOR_CALC_STATE */
821 if (DIRTY(DEPTH_STENCIL_ALPHA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
822 p->state.COLOR_CALC_STATE =
823 p->gen6_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
824 ilo->dsa->alpha.ref_value, &ilo->blend_color, p->cp);
825
826 session->cc_state_cc_changed = true;
827 }
828
829 /* DEPTH_STENCIL_STATE */
830 if (DIRTY(DEPTH_STENCIL_ALPHA)) {
831 p->state.DEPTH_STENCIL_STATE =
832 p->gen6_DEPTH_STENCIL_STATE(p->dev, ilo->dsa, p->cp);
833
834 session->cc_state_dsa_changed = true;
835 }
836 }
837
838 static void
839 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
840 const struct ilo_context *ilo,
841 struct gen6_pipeline_session *session)
842 {
843 /* SCISSOR_RECT */
844 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
845 /* there should be as many scissors as there are viewports */
846 p->state.SCISSOR_RECT = p->gen6_SCISSOR_RECT(p->dev,
847 &ilo->scissor, ilo->viewport.count, p->cp);
848
849 session->scissor_state_changed = true;
850 }
851 }
852
853 static void
854 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
855 const struct ilo_context *ilo,
856 struct gen6_pipeline_session *session)
857 {
858 /* SURFACE_STATEs for render targets */
859 if (DIRTY(FRAMEBUFFER)) {
860 const int offset = ILO_WM_DRAW_SURFACE(0);
861 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
862 int i;
863
864 for (i = 0; i < ilo->fb.state.nr_cbufs; i++) {
865 const struct ilo_surface_cso *surface =
866 (const struct ilo_surface_cso *) ilo->fb.state.cbufs[i];
867
868 assert(surface && surface->is_rt);
869 surface_state[i] =
870 p->gen6_SURFACE_STATE(p->dev, &surface->u.rt, true, p->cp);
871 }
872
873 /*
874 * Upload at least one render target, as
875 * brw_update_renderbuffer_surfaces() does. I don't know why.
876 */
877 if (i == 0) {
878 struct ilo_view_surface null_surface;
879
880 ilo_gpe_init_view_surface_null(p->dev,
881 ilo->fb.state.width, ilo->fb.state.height,
882 1, 0, &null_surface);
883
884 surface_state[i] =
885 p->gen6_SURFACE_STATE(p->dev, &null_surface, true, p->cp);
886
887 i++;
888 }
889
890 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
891
892 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
893 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
894
895 session->binding_table_fs_changed = true;
896 }
897 }
898
899 static void
900 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
901 const struct ilo_context *ilo,
902 struct gen6_pipeline_session *session)
903 {
904 const struct ilo_shader_state *vs = ilo->vs;
905 const struct ilo_shader_state *gs = ilo->gs;
906 const struct pipe_stream_output_target **so_targets =
907 (const struct pipe_stream_output_target **) ilo->so.states;
908 const int num_so_targets = ilo->so.count;
909
910 if (p->dev->gen != ILO_GEN(6))
911 return;
912
913 /* SURFACE_STATEs for stream output targets */
914 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
915 const struct pipe_stream_output_info *so_info =
916 (gs) ? &gs->info.stream_output :
917 (vs) ? &vs->info.stream_output : NULL;
918 const int offset = ILO_GS_SO_SURFACE(0);
919 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
920 int i;
921
922 for (i = 0; so_info && i < so_info->num_outputs; i++) {
923 const int target = so_info->output[i].output_buffer;
924 const struct pipe_stream_output_target *so_target =
925 (target < num_so_targets) ? so_targets[target] : NULL;
926
927 if (so_target) {
928 surface_state[i] = p->gen6_so_SURFACE_STATE(p->dev,
929 so_target, so_info, i, p->cp);
930 }
931 else {
932 surface_state[i] = 0;
933 }
934 }
935
936 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
937
938 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
939 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
940
941 session->binding_table_gs_changed = true;
942 }
943 }
944
945 static void
946 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
947 const struct ilo_context *ilo,
948 int shader_type,
949 struct gen6_pipeline_session *session)
950 {
951 const struct pipe_sampler_view * const *views =
952 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
953 const int num_views = ilo->view[shader_type].count;
954 uint32_t *surface_state;
955 int offset, i;
956 bool skip = false;
957
958 /* SURFACE_STATEs for sampler views */
959 switch (shader_type) {
960 case PIPE_SHADER_VERTEX:
961 if (DIRTY(VERTEX_SAMPLER_VIEWS)) {
962 offset = ILO_VS_TEXTURE_SURFACE(0);
963 surface_state = &p->state.vs.SURFACE_STATE[offset];
964
965 session->binding_table_vs_changed = true;
966 }
967 else {
968 skip = true;
969 }
970 break;
971 case PIPE_SHADER_FRAGMENT:
972 if (DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
973 offset = ILO_WM_TEXTURE_SURFACE(0);
974 surface_state = &p->state.wm.SURFACE_STATE[offset];
975
976 session->binding_table_fs_changed = true;
977 }
978 else {
979 skip = true;
980 }
981 break;
982 default:
983 skip = true;
984 break;
985 }
986
987 if (skip)
988 return;
989
990 for (i = 0; i < num_views; i++) {
991 if (views[i]) {
992 const struct ilo_view_cso *cso =
993 (const struct ilo_view_cso *) views[i];
994
995 surface_state[i] =
996 p->gen6_SURFACE_STATE(p->dev, &cso->surface, false, p->cp);
997 }
998 else {
999 surface_state[i] = 0;
1000 }
1001 }
1002
1003 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
1004
1005 if (i && session->num_surfaces[shader_type] < offset + i)
1006 session->num_surfaces[shader_type] = offset + i;
1007 }
1008
1009 static void
1010 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1011 const struct ilo_context *ilo,
1012 int shader_type,
1013 struct gen6_pipeline_session *session)
1014 {
1015 const struct ilo_cbuf_cso *buffers = ilo->cbuf[shader_type].cso;
1016 const int num_buffers = ilo->cbuf[shader_type].count;
1017 uint32_t *surface_state;
1018 int offset, i;
1019 bool skip = false;
1020
1021 /* SURFACE_STATEs for constant buffers */
1022 switch (shader_type) {
1023 case PIPE_SHADER_VERTEX:
1024 if (DIRTY(CONSTANT_BUFFER)) {
1025 offset = ILO_VS_CONST_SURFACE(0);
1026 surface_state = &p->state.vs.SURFACE_STATE[offset];
1027
1028 session->binding_table_vs_changed = true;
1029 }
1030 else {
1031 skip = true;
1032 }
1033 break;
1034 case PIPE_SHADER_FRAGMENT:
1035 if (DIRTY(CONSTANT_BUFFER)) {
1036 offset = ILO_WM_CONST_SURFACE(0);
1037 surface_state = &p->state.wm.SURFACE_STATE[offset];
1038
1039 session->binding_table_fs_changed = true;
1040 }
1041 else {
1042 skip = true;
1043 }
1044 break;
1045 default:
1046 skip = true;
1047 break;
1048 }
1049
1050 if (skip)
1051 return;
1052
1053 for (i = 0; i < num_buffers; i++) {
1054 if (buffers[i].resource) {
1055 const struct ilo_view_surface *surf = &buffers[i].surface;
1056
1057 surface_state[i] =
1058 p->gen6_SURFACE_STATE(p->dev, surf, false, p->cp);
1059 }
1060 else {
1061 surface_state[i] = 0;
1062 }
1063 }
1064
1065 memset(&surface_state[i], 0, (ILO_MAX_CONST_BUFFERS - i) * 4);
1066
1067 if (i && session->num_surfaces[shader_type] < offset + i)
1068 session->num_surfaces[shader_type] = offset + i;
1069 }
1070
1071 static void
1072 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1073 const struct ilo_context *ilo,
1074 int shader_type,
1075 struct gen6_pipeline_session *session)
1076 {
1077 uint32_t *binding_table_state, *surface_state;
1078 int *binding_table_state_size, size;
1079 bool skip = false;
1080
1081 /* BINDING_TABLE_STATE */
1082 switch (shader_type) {
1083 case PIPE_SHADER_VERTEX:
1084 surface_state = p->state.vs.SURFACE_STATE;
1085 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1086 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1087
1088 skip = !session->binding_table_vs_changed;
1089 break;
1090 case PIPE_SHADER_GEOMETRY:
1091 surface_state = p->state.gs.SURFACE_STATE;
1092 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1093 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1094
1095 skip = !session->binding_table_gs_changed;
1096 break;
1097 case PIPE_SHADER_FRAGMENT:
1098 surface_state = p->state.wm.SURFACE_STATE;
1099 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1100 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1101
1102 skip = !session->binding_table_fs_changed;
1103 break;
1104 default:
1105 skip = true;
1106 break;
1107 }
1108
1109 if (skip)
1110 return;
1111
1112 /*
1113 * If we have seemingly less SURFACE_STATEs than before, it could be that
1114 * we did not touch those reside at the tail in this upload. Loop over
1115 * them to figure out the real number of SURFACE_STATEs.
1116 */
1117 for (size = *binding_table_state_size;
1118 size > session->num_surfaces[shader_type]; size--) {
1119 if (surface_state[size - 1])
1120 break;
1121 }
1122 if (size < session->num_surfaces[shader_type])
1123 size = session->num_surfaces[shader_type];
1124
1125 *binding_table_state = p->gen6_BINDING_TABLE_STATE(p->dev,
1126 surface_state, size, p->cp);
1127 *binding_table_state_size = size;
1128 }
1129
1130 static void
1131 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1132 const struct ilo_context *ilo,
1133 int shader_type,
1134 struct gen6_pipeline_session *session)
1135 {
1136 const struct ilo_sampler_cso * const *samplers =
1137 ilo->sampler[shader_type].cso;
1138 const struct pipe_sampler_view * const *views =
1139 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1140 const int num_samplers = ilo->sampler[shader_type].count;
1141 const int num_views = ilo->view[shader_type].count;
1142 uint32_t *sampler_state, *border_color_state;
1143 bool emit_border_color = false;
1144 bool skip = false;
1145
1146 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1147 switch (shader_type) {
1148 case PIPE_SHADER_VERTEX:
1149 if (DIRTY(VERTEX_SAMPLERS) || DIRTY(VERTEX_SAMPLER_VIEWS)) {
1150 sampler_state = &p->state.vs.SAMPLER_STATE;
1151 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1152
1153 if (DIRTY(VERTEX_SAMPLERS))
1154 emit_border_color = true;
1155
1156 session->sampler_state_vs_changed = true;
1157 }
1158 else {
1159 skip = true;
1160 }
1161 break;
1162 case PIPE_SHADER_FRAGMENT:
1163 if (DIRTY(FRAGMENT_SAMPLERS) || DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
1164 sampler_state = &p->state.wm.SAMPLER_STATE;
1165 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1166
1167 if (DIRTY(FRAGMENT_SAMPLERS))
1168 emit_border_color = true;
1169
1170 session->sampler_state_fs_changed = true;
1171 }
1172 else {
1173 skip = true;
1174 }
1175 break;
1176 default:
1177 skip = true;
1178 break;
1179 }
1180
1181 if (skip)
1182 return;
1183
1184 if (emit_border_color) {
1185 int i;
1186
1187 for (i = 0; i < num_samplers; i++) {
1188 border_color_state[i] = (samplers[i]) ?
1189 p->gen6_SAMPLER_BORDER_COLOR_STATE(p->dev,
1190 samplers[i], p->cp) : 0;
1191 }
1192 }
1193
1194 /* should we take the minimum of num_samplers and num_views? */
1195 *sampler_state = p->gen6_SAMPLER_STATE(p->dev,
1196 samplers, views,
1197 border_color_state,
1198 MIN2(num_samplers, num_views), p->cp);
1199 }
1200
1201 static void
1202 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1203 const struct ilo_context *ilo,
1204 struct gen6_pipeline_session *session)
1205 {
1206 /* push constant buffer for VS */
1207 if (DIRTY(VS) || DIRTY(CLIP)) {
1208 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
1209
1210 if (vs && vs->pcb.clip_state_size) {
1211 void *pcb;
1212
1213 p->state.vs.PUSH_CONSTANT_BUFFER_size = vs->pcb.clip_state_size;
1214 p->state.vs.PUSH_CONSTANT_BUFFER =
1215 p->gen6_push_constant_buffer(p->dev,
1216 p->state.vs.PUSH_CONSTANT_BUFFER_size, &pcb, p->cp);
1217
1218 memcpy(pcb, &ilo->clip, vs->pcb.clip_state_size);
1219 }
1220 else {
1221 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1222 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1223 }
1224
1225 session->pcb_state_vs_changed = true;
1226 }
1227 }
1228
1229 #undef DIRTY
1230
1231 static void
1232 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1233 const struct ilo_context *ilo,
1234 struct gen6_pipeline_session *session)
1235 {
1236 /*
1237 * We try to keep the order of the commands match, as closely as possible,
1238 * that of the classic i965 driver. It allows us to compare the command
1239 * streams easily.
1240 */
1241 gen6_pipeline_common_select(p, ilo, session);
1242 gen6_pipeline_gs_svbi(p, ilo, session);
1243 gen6_pipeline_common_sip(p, ilo, session);
1244 gen6_pipeline_vf_statistics(p, ilo, session);
1245 gen6_pipeline_common_base_address(p, ilo, session);
1246 gen6_pipeline_common_pointers_1(p, ilo, session);
1247 gen6_pipeline_common_urb(p, ilo, session);
1248 gen6_pipeline_common_pointers_2(p, ilo, session);
1249 gen6_pipeline_wm_multisample(p, ilo, session);
1250 gen6_pipeline_vs(p, ilo, session);
1251 gen6_pipeline_gs(p, ilo, session);
1252 gen6_pipeline_clip(p, ilo, session);
1253 gen6_pipeline_sf(p, ilo, session);
1254 gen6_pipeline_wm(p, ilo, session);
1255 gen6_pipeline_common_pointers_3(p, ilo, session);
1256 gen6_pipeline_wm_depth(p, ilo, session);
1257 gen6_pipeline_wm_raster(p, ilo, session);
1258 gen6_pipeline_sf_rect(p, ilo, session);
1259 gen6_pipeline_vf(p, ilo, session);
1260 gen6_pipeline_vf_draw(p, ilo, session);
1261 }
1262
1263 void
1264 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1265 const struct ilo_context *ilo,
1266 struct gen6_pipeline_session *session)
1267 {
1268 int shader_type;
1269
1270 gen6_pipeline_state_viewports(p, ilo, session);
1271 gen6_pipeline_state_cc(p, ilo, session);
1272 gen6_pipeline_state_scissors(p, ilo, session);
1273 gen6_pipeline_state_pcb(p, ilo, session);
1274
1275 /*
1276 * upload all SURAFCE_STATEs together so that we know there are minimal
1277 * paddings
1278 */
1279 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1280 gen6_pipeline_state_surfaces_so(p, ilo, session);
1281 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1282 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1283 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1284 }
1285
1286 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1287 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1288 /* this must be called after all SURFACE_STATEs are uploaded */
1289 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1290 }
1291 }
1292
1293 void
1294 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1295 const struct ilo_context *ilo,
1296 const struct pipe_draw_info *info,
1297 struct gen6_pipeline_session *session)
1298 {
1299 memset(session, 0, sizeof(*session));
1300 session->info = info;
1301 session->pipe_dirty = ilo->dirty;
1302 session->reduced_prim = u_reduced_prim(info->mode);
1303
1304 /* available space before the session */
1305 session->init_cp_space = ilo_cp_space(p->cp);
1306
1307 session->hw_ctx_changed =
1308 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1309
1310 if (session->hw_ctx_changed) {
1311 /* these should be enough to make everything uploaded */
1312 session->batch_bo_changed = true;
1313 session->state_bo_changed = true;
1314 session->kernel_bo_changed = true;
1315 session->prim_changed = true;
1316 }
1317 else {
1318 /*
1319 * Any state that involves resources needs to be re-emitted when the
1320 * batch bo changed. This is because we do not pin the resources and
1321 * their offsets (or existence) may change between batch buffers.
1322 *
1323 * Since we messed around with ILO_3D_PIPELINE_INVALIDATE_BATCH_BO in
1324 * handle_invalid_batch_bo(), use ILO_3D_PIPELINE_INVALIDATE_STATE_BO as
1325 * a temporary workaround.
1326 */
1327 session->batch_bo_changed =
1328 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1329
1330 session->state_bo_changed =
1331 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1332 session->kernel_bo_changed =
1333 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1334 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1335 }
1336 }
1337
1338 void
1339 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1340 const struct ilo_context *ilo,
1341 struct gen6_pipeline_session *session)
1342 {
1343 /* force all states to be uploaded if the state bo changed */
1344 if (session->state_bo_changed)
1345 session->pipe_dirty = ILO_DIRTY_ALL;
1346 else
1347 session->pipe_dirty = ilo->dirty;
1348
1349 session->emit_draw_states(p, ilo, session);
1350
1351 /* force all commands to be uploaded if the HW context changed */
1352 if (session->hw_ctx_changed)
1353 session->pipe_dirty = ILO_DIRTY_ALL;
1354 else
1355 session->pipe_dirty = ilo->dirty;
1356
1357 session->emit_draw_commands(p, ilo, session);
1358 }
1359
1360 void
1361 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1362 const struct ilo_context *ilo,
1363 struct gen6_pipeline_session *session)
1364 {
1365 int used, estimate;
1366
1367 /* sanity check size estimation */
1368 used = session->init_cp_space - ilo_cp_space(p->cp);
1369 estimate = ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo);
1370 assert(used <= estimate);
1371
1372 p->state.reduced_prim = session->reduced_prim;
1373 }
1374
1375 static void
1376 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1377 const struct ilo_context *ilo,
1378 const struct pipe_draw_info *info)
1379 {
1380 struct gen6_pipeline_session session;
1381
1382 gen6_pipeline_prepare(p, ilo, info, &session);
1383
1384 session.emit_draw_states = gen6_pipeline_states;
1385 session.emit_draw_commands = gen6_pipeline_commands;
1386
1387 gen6_pipeline_draw(p, ilo, &session);
1388 gen6_pipeline_end(p, ilo, &session);
1389 }
1390
1391 void
1392 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1393 {
1394 if (p->dev->gen == ILO_GEN(6))
1395 gen6_wa_pipe_control_post_sync(p, false);
1396
1397 p->gen6_PIPE_CONTROL(p->dev,
1398 PIPE_CONTROL_INSTRUCTION_FLUSH |
1399 PIPE_CONTROL_WRITE_FLUSH |
1400 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1401 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1402 PIPE_CONTROL_TC_FLUSH |
1403 PIPE_CONTROL_NO_WRITE |
1404 PIPE_CONTROL_CS_STALL,
1405 0, 0, false, p->cp);
1406 }
1407
1408 void
1409 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1410 struct intel_bo *bo, int index)
1411 {
1412 if (p->dev->gen == ILO_GEN(6))
1413 gen6_wa_pipe_control_post_sync(p, true);
1414
1415 p->gen6_PIPE_CONTROL(p->dev,
1416 PIPE_CONTROL_WRITE_TIMESTAMP,
1417 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1418 true, p->cp);
1419 }
1420
1421 void
1422 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1423 struct intel_bo *bo, int index)
1424 {
1425 if (p->dev->gen == ILO_GEN(6))
1426 gen6_wa_pipe_control_post_sync(p, false);
1427
1428 p->gen6_PIPE_CONTROL(p->dev,
1429 PIPE_CONTROL_DEPTH_STALL |
1430 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1431 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1432 true, p->cp);
1433 }
1434
1435 static int
1436 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1437 const struct ilo_gpe_gen6 *gen6,
1438 const struct ilo_context *ilo)
1439 {
1440 static int size;
1441 enum ilo_gpe_gen6_command cmd;
1442
1443 if (size)
1444 return size;
1445
1446 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1447 int count;
1448
1449 switch (cmd) {
1450 case ILO_GPE_GEN6_PIPE_CONTROL:
1451 /* for the workaround */
1452 count = 2;
1453 /* another one after 3DSTATE_URB */
1454 count += 1;
1455 /* and another one after 3DSTATE_CONSTANT_VS */
1456 count += 1;
1457 break;
1458 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1459 /* there are 4 SVBIs */
1460 count = 4;
1461 break;
1462 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1463 count = 33;
1464 break;
1465 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1466 count = 34;
1467 break;
1468 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1469 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1470 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1471 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1472 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1473 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1474 /* media commands */
1475 count = 0;
1476 break;
1477 default:
1478 count = 1;
1479 break;
1480 }
1481
1482 if (count)
1483 size += gen6->estimate_command_size(p->dev, cmd, count);
1484 }
1485
1486 return size;
1487 }
1488
1489 static int
1490 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1491 const struct ilo_gpe_gen6 *gen6,
1492 const struct ilo_context *ilo)
1493 {
1494 static int static_size;
1495 int shader_type, count, size;
1496
1497 if (!static_size) {
1498 struct {
1499 enum ilo_gpe_gen6_state state;
1500 int count;
1501 } static_states[] = {
1502 /* viewports */
1503 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1504 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1505 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1506 /* cc */
1507 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1508 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1509 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1510 /* scissors */
1511 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1512 /* binding table (vs, gs, fs) */
1513 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1514 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1515 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1516 };
1517 int i;
1518
1519 for (i = 0; i < Elements(static_states); i++) {
1520 static_size += gen6->estimate_state_size(p->dev,
1521 static_states[i].state,
1522 static_states[i].count);
1523 }
1524 }
1525
1526 size = static_size;
1527
1528 /*
1529 * render targets (fs)
1530 * stream outputs (gs)
1531 * sampler views (vs, fs)
1532 * constant buffers (vs, fs)
1533 */
1534 count = ilo->fb.state.nr_cbufs;
1535
1536 if (ilo->gs)
1537 count += ilo->gs->info.stream_output.num_outputs;
1538 else if (ilo->vs)
1539 count += ilo->vs->info.stream_output.num_outputs;
1540
1541 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1542 count += ilo->view[shader_type].count;
1543 count += ilo->cbuf[shader_type].count;
1544 }
1545
1546 if (count) {
1547 size += gen6->estimate_state_size(p->dev,
1548 ILO_GPE_GEN6_SURFACE_STATE, count);
1549 }
1550
1551 /* samplers (vs, fs) */
1552 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1553 count = ilo->sampler[shader_type].count;
1554 if (count) {
1555 size += gen6->estimate_state_size(p->dev,
1556 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1557 size += gen6->estimate_state_size(p->dev,
1558 ILO_GPE_GEN6_SAMPLER_STATE, count);
1559 }
1560 }
1561
1562 /* pcb (vs) */
1563 if (ilo->vs && ilo->vs->shader->pcb.clip_state_size) {
1564 const int pcb_size = ilo->vs->shader->pcb.clip_state_size;
1565
1566 size += gen6->estimate_state_size(p->dev,
1567 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, pcb_size);
1568 }
1569
1570 return size;
1571 }
1572
1573 static int
1574 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1575 enum ilo_3d_pipeline_action action,
1576 const void *arg)
1577 {
1578 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1579 int size;
1580
1581 switch (action) {
1582 case ILO_3D_PIPELINE_DRAW:
1583 {
1584 const struct ilo_context *ilo = arg;
1585
1586 size = gen6_pipeline_estimate_commands(p, gen6, ilo) +
1587 gen6_pipeline_estimate_states(p, gen6, ilo);
1588 }
1589 break;
1590 case ILO_3D_PIPELINE_FLUSH:
1591 size = gen6->estimate_command_size(p->dev,
1592 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1593 break;
1594 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1595 size = gen6->estimate_command_size(p->dev,
1596 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1597 break;
1598 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1599 size = gen6->estimate_command_size(p->dev,
1600 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1601 break;
1602 default:
1603 assert(!"unknown 3D pipeline action");
1604 size = 0;
1605 break;
1606 }
1607
1608 return size;
1609 }
1610
1611 void
1612 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1613 {
1614 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1615
1616 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1617 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1618 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1619 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1620 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1621
1622 #define GEN6_USE(p, name, from) \
1623 p->gen6_ ## name = from->emit_ ## name
1624 GEN6_USE(p, STATE_BASE_ADDRESS, gen6);
1625 GEN6_USE(p, STATE_SIP, gen6);
1626 GEN6_USE(p, PIPELINE_SELECT, gen6);
1627 GEN6_USE(p, 3DSTATE_BINDING_TABLE_POINTERS, gen6);
1628 GEN6_USE(p, 3DSTATE_SAMPLER_STATE_POINTERS, gen6);
1629 GEN6_USE(p, 3DSTATE_URB, gen6);
1630 GEN6_USE(p, 3DSTATE_VERTEX_BUFFERS, gen6);
1631 GEN6_USE(p, 3DSTATE_VERTEX_ELEMENTS, gen6);
1632 GEN6_USE(p, 3DSTATE_INDEX_BUFFER, gen6);
1633 GEN6_USE(p, 3DSTATE_VF_STATISTICS, gen6);
1634 GEN6_USE(p, 3DSTATE_VIEWPORT_STATE_POINTERS, gen6);
1635 GEN6_USE(p, 3DSTATE_CC_STATE_POINTERS, gen6);
1636 GEN6_USE(p, 3DSTATE_SCISSOR_STATE_POINTERS, gen6);
1637 GEN6_USE(p, 3DSTATE_VS, gen6);
1638 GEN6_USE(p, 3DSTATE_GS, gen6);
1639 GEN6_USE(p, 3DSTATE_CLIP, gen6);
1640 GEN6_USE(p, 3DSTATE_SF, gen6);
1641 GEN6_USE(p, 3DSTATE_WM, gen6);
1642 GEN6_USE(p, 3DSTATE_CONSTANT_VS, gen6);
1643 GEN6_USE(p, 3DSTATE_CONSTANT_GS, gen6);
1644 GEN6_USE(p, 3DSTATE_CONSTANT_PS, gen6);
1645 GEN6_USE(p, 3DSTATE_SAMPLE_MASK, gen6);
1646 GEN6_USE(p, 3DSTATE_DRAWING_RECTANGLE, gen6);
1647 GEN6_USE(p, 3DSTATE_DEPTH_BUFFER, gen6);
1648 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_OFFSET, gen6);
1649 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_PATTERN, gen6);
1650 GEN6_USE(p, 3DSTATE_LINE_STIPPLE, gen6);
1651 GEN6_USE(p, 3DSTATE_AA_LINE_PARAMETERS, gen6);
1652 GEN6_USE(p, 3DSTATE_GS_SVB_INDEX, gen6);
1653 GEN6_USE(p, 3DSTATE_MULTISAMPLE, gen6);
1654 GEN6_USE(p, 3DSTATE_STENCIL_BUFFER, gen6);
1655 GEN6_USE(p, 3DSTATE_HIER_DEPTH_BUFFER, gen6);
1656 GEN6_USE(p, 3DSTATE_CLEAR_PARAMS, gen6);
1657 GEN6_USE(p, PIPE_CONTROL, gen6);
1658 GEN6_USE(p, 3DPRIMITIVE, gen6);
1659 GEN6_USE(p, INTERFACE_DESCRIPTOR_DATA, gen6);
1660 GEN6_USE(p, SF_VIEWPORT, gen6);
1661 GEN6_USE(p, CLIP_VIEWPORT, gen6);
1662 GEN6_USE(p, CC_VIEWPORT, gen6);
1663 GEN6_USE(p, COLOR_CALC_STATE, gen6);
1664 GEN6_USE(p, BLEND_STATE, gen6);
1665 GEN6_USE(p, DEPTH_STENCIL_STATE, gen6);
1666 GEN6_USE(p, SCISSOR_RECT, gen6);
1667 GEN6_USE(p, BINDING_TABLE_STATE, gen6);
1668 GEN6_USE(p, SURFACE_STATE, gen6);
1669 GEN6_USE(p, so_SURFACE_STATE, gen6);
1670 GEN6_USE(p, SAMPLER_STATE, gen6);
1671 GEN6_USE(p, SAMPLER_BORDER_COLOR_STATE, gen6);
1672 GEN6_USE(p, push_constant_buffer, gen6);
1673 #undef GEN6_USE
1674 }